1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * mm/mmap.c |
4 | * |
5 | * Written by obz. |
6 | * |
7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/backing-dev.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/mm_inline.h> |
17 | #include <linux/shm.h> |
18 | #include <linux/mman.h> |
19 | #include <linux/pagemap.h> |
20 | #include <linux/swap.h> |
21 | #include <linux/syscalls.h> |
22 | #include <linux/capability.h> |
23 | #include <linux/init.h> |
24 | #include <linux/file.h> |
25 | #include <linux/fs.h> |
26 | #include <linux/personality.h> |
27 | #include <linux/security.h> |
28 | #include <linux/hugetlb.h> |
29 | #include <linux/shmem_fs.h> |
30 | #include <linux/profile.h> |
31 | #include <linux/export.h> |
32 | #include <linux/mount.h> |
33 | #include <linux/mempolicy.h> |
34 | #include <linux/rmap.h> |
35 | #include <linux/mmu_notifier.h> |
36 | #include <linux/mmdebug.h> |
37 | #include <linux/perf_event.h> |
38 | #include <linux/audit.h> |
39 | #include <linux/khugepaged.h> |
40 | #include <linux/uprobes.h> |
41 | #include <linux/notifier.h> |
42 | #include <linux/memory.h> |
43 | #include <linux/printk.h> |
44 | #include <linux/userfaultfd_k.h> |
45 | #include <linux/moduleparam.h> |
46 | #include <linux/pkeys.h> |
47 | #include <linux/oom.h> |
48 | #include <linux/sched/mm.h> |
49 | #include <linux/ksm.h> |
50 | #include <linux/memfd.h> |
51 | |
52 | #include <linux/uaccess.h> |
53 | #include <asm/cacheflush.h> |
54 | #include <asm/tlb.h> |
55 | #include <asm/mmu_context.h> |
56 | |
57 | #define CREATE_TRACE_POINTS |
58 | #include <trace/events/mmap.h> |
59 | |
60 | #include "internal.h" |
61 | |
62 | #ifndef arch_mmap_check |
63 | #define arch_mmap_check(addr, len, flags) (0) |
64 | #endif |
65 | |
66 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS |
67 | const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; |
68 | int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; |
69 | int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; |
70 | #endif |
71 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS |
72 | const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; |
73 | const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; |
74 | int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; |
75 | #endif |
76 | |
77 | static bool ignore_rlimit_data; |
78 | core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); |
79 | |
80 | /* Update vma->vm_page_prot to reflect vma->vm_flags. */ |
81 | void vma_set_page_prot(struct vm_area_struct *vma) |
82 | { |
83 | unsigned long vm_flags = vma->vm_flags; |
84 | pgprot_t vm_page_prot; |
85 | |
86 | vm_page_prot = vm_pgprot_modify(oldprot: vma->vm_page_prot, vm_flags); |
87 | if (vma_wants_writenotify(vma, vm_page_prot)) { |
88 | vm_flags &= ~VM_SHARED; |
89 | vm_page_prot = vm_pgprot_modify(oldprot: vm_page_prot, vm_flags); |
90 | } |
91 | /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ |
92 | WRITE_ONCE(vma->vm_page_prot, vm_page_prot); |
93 | } |
94 | |
95 | /* |
96 | * check_brk_limits() - Use platform specific check of range & verify mlock |
97 | * limits. |
98 | * @addr: The address to check |
99 | * @len: The size of increase. |
100 | * |
101 | * Return: 0 on success. |
102 | */ |
103 | static int check_brk_limits(unsigned long addr, unsigned long len) |
104 | { |
105 | unsigned long mapped_addr; |
106 | |
107 | mapped_addr = get_unmapped_area(NULL, addr, len, pgoff: 0, MAP_FIXED); |
108 | if (IS_ERR_VALUE(mapped_addr)) |
109 | return mapped_addr; |
110 | |
111 | return mlock_future_ok(current->mm, current->mm->def_flags, bytes: len) |
112 | ? 0 : -EAGAIN; |
113 | } |
114 | |
115 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
116 | { |
117 | unsigned long newbrk, oldbrk, origbrk; |
118 | struct mm_struct *mm = current->mm; |
119 | struct vm_area_struct *brkvma, *next = NULL; |
120 | unsigned long min_brk; |
121 | bool populate = false; |
122 | LIST_HEAD(uf); |
123 | struct vma_iterator vmi; |
124 | |
125 | if (mmap_write_lock_killable(mm)) |
126 | return -EINTR; |
127 | |
128 | origbrk = mm->brk; |
129 | |
130 | #ifdef CONFIG_COMPAT_BRK |
131 | /* |
132 | * CONFIG_COMPAT_BRK can still be overridden by setting |
133 | * randomize_va_space to 2, which will still cause mm->start_brk |
134 | * to be arbitrarily shifted |
135 | */ |
136 | if (current->brk_randomized) |
137 | min_brk = mm->start_brk; |
138 | else |
139 | min_brk = mm->end_data; |
140 | #else |
141 | min_brk = mm->start_brk; |
142 | #endif |
143 | if (brk < min_brk) |
144 | goto out; |
145 | |
146 | /* |
147 | * Check against rlimit here. If this check is done later after the test |
148 | * of oldbrk with newbrk then it can escape the test and let the data |
149 | * segment grow beyond its set limit the in case where the limit is |
150 | * not page aligned -Ram Gupta |
151 | */ |
152 | if (check_data_rlimit(rlim: rlimit(RLIMIT_DATA), new: brk, start: mm->start_brk, |
153 | end_data: mm->end_data, start_data: mm->start_data)) |
154 | goto out; |
155 | |
156 | newbrk = PAGE_ALIGN(brk); |
157 | oldbrk = PAGE_ALIGN(mm->brk); |
158 | if (oldbrk == newbrk) { |
159 | mm->brk = brk; |
160 | goto success; |
161 | } |
162 | |
163 | /* Always allow shrinking brk. */ |
164 | if (brk <= mm->brk) { |
165 | /* Search one past newbrk */ |
166 | vma_iter_init(vmi: &vmi, mm, addr: newbrk); |
167 | brkvma = vma_find(vmi: &vmi, max: oldbrk); |
168 | if (!brkvma || brkvma->vm_start >= oldbrk) |
169 | goto out; /* mapping intersects with an existing non-brk vma. */ |
170 | /* |
171 | * mm->brk must be protected by write mmap_lock. |
172 | * do_vmi_align_munmap() will drop the lock on success, so |
173 | * update it before calling do_vma_munmap(). |
174 | */ |
175 | mm->brk = brk; |
176 | if (do_vmi_align_munmap(vmi: &vmi, vma: brkvma, mm, start: newbrk, end: oldbrk, uf: &uf, |
177 | /* unlock = */ true)) |
178 | goto out; |
179 | |
180 | goto success_unlocked; |
181 | } |
182 | |
183 | if (check_brk_limits(addr: oldbrk, len: newbrk - oldbrk)) |
184 | goto out; |
185 | |
186 | /* |
187 | * Only check if the next VMA is within the stack_guard_gap of the |
188 | * expansion area |
189 | */ |
190 | vma_iter_init(vmi: &vmi, mm, addr: oldbrk); |
191 | next = vma_find(vmi: &vmi, max: newbrk + PAGE_SIZE + stack_guard_gap); |
192 | if (next && newbrk + PAGE_SIZE > vm_start_gap(vma: next)) |
193 | goto out; |
194 | |
195 | brkvma = vma_prev_limit(vmi: &vmi, min: mm->start_brk); |
196 | /* Ok, looks good - let it rip. */ |
197 | if (do_brk_flags(vmi: &vmi, brkvma, addr: oldbrk, request: newbrk - oldbrk, flags: 0) < 0) |
198 | goto out; |
199 | |
200 | mm->brk = brk; |
201 | if (mm->def_flags & VM_LOCKED) |
202 | populate = true; |
203 | |
204 | success: |
205 | mmap_write_unlock(mm); |
206 | success_unlocked: |
207 | userfaultfd_unmap_complete(mm, uf: &uf); |
208 | if (populate) |
209 | mm_populate(addr: oldbrk, len: newbrk - oldbrk); |
210 | return brk; |
211 | |
212 | out: |
213 | mm->brk = origbrk; |
214 | mmap_write_unlock(mm); |
215 | return origbrk; |
216 | } |
217 | |
218 | /* |
219 | * If a hint addr is less than mmap_min_addr change hint to be as |
220 | * low as possible but still greater than mmap_min_addr |
221 | */ |
222 | static inline unsigned long round_hint_to_min(unsigned long hint) |
223 | { |
224 | hint &= PAGE_MASK; |
225 | if (((void *)hint != NULL) && |
226 | (hint < mmap_min_addr)) |
227 | return PAGE_ALIGN(mmap_min_addr); |
228 | return hint; |
229 | } |
230 | |
231 | bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, |
232 | unsigned long bytes) |
233 | { |
234 | unsigned long locked_pages, limit_pages; |
235 | |
236 | if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) |
237 | return true; |
238 | |
239 | locked_pages = bytes >> PAGE_SHIFT; |
240 | locked_pages += mm->locked_vm; |
241 | |
242 | limit_pages = rlimit(RLIMIT_MEMLOCK); |
243 | limit_pages >>= PAGE_SHIFT; |
244 | |
245 | return locked_pages <= limit_pages; |
246 | } |
247 | |
248 | static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) |
249 | { |
250 | if (S_ISREG(inode->i_mode)) |
251 | return MAX_LFS_FILESIZE; |
252 | |
253 | if (S_ISBLK(inode->i_mode)) |
254 | return MAX_LFS_FILESIZE; |
255 | |
256 | if (S_ISSOCK(inode->i_mode)) |
257 | return MAX_LFS_FILESIZE; |
258 | |
259 | /* Special "we do even unsigned file positions" case */ |
260 | if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET) |
261 | return 0; |
262 | |
263 | /* Yes, random drivers might want more. But I'm tired of buggy drivers */ |
264 | return ULONG_MAX; |
265 | } |
266 | |
267 | static inline bool file_mmap_ok(struct file *file, struct inode *inode, |
268 | unsigned long pgoff, unsigned long len) |
269 | { |
270 | u64 maxsize = file_mmap_size_max(file, inode); |
271 | |
272 | if (maxsize && len > maxsize) |
273 | return false; |
274 | maxsize -= len; |
275 | if (pgoff > maxsize >> PAGE_SHIFT) |
276 | return false; |
277 | return true; |
278 | } |
279 | |
280 | /** |
281 | * do_mmap() - Perform a userland memory mapping into the current process |
282 | * address space of length @len with protection bits @prot, mmap flags @flags |
283 | * (from which VMA flags will be inferred), and any additional VMA flags to |
284 | * apply @vm_flags. If this is a file-backed mapping then the file is specified |
285 | * in @file and page offset into the file via @pgoff. |
286 | * |
287 | * This function does not perform security checks on the file and assumes, if |
288 | * @uf is non-NULL, the caller has provided a list head to track unmap events |
289 | * for userfaultfd @uf. |
290 | * |
291 | * It also simply indicates whether memory population is required by setting |
292 | * @populate, which must be non-NULL, expecting the caller to actually perform |
293 | * this task itself if appropriate. |
294 | * |
295 | * This function will invoke architecture-specific (and if provided and |
296 | * relevant, file system-specific) logic to determine the most appropriate |
297 | * unmapped area in which to place the mapping if not MAP_FIXED. |
298 | * |
299 | * Callers which require userland mmap() behaviour should invoke vm_mmap(), |
300 | * which is also exported for module use. |
301 | * |
302 | * Those which require this behaviour less security checks, userfaultfd and |
303 | * populate behaviour, and who handle the mmap write lock themselves, should |
304 | * call this function. |
305 | * |
306 | * Note that the returned address may reside within a merged VMA if an |
307 | * appropriate merge were to take place, so it doesn't necessarily specify the |
308 | * start of a VMA, rather only the start of a valid mapped range of length |
309 | * @len bytes, rounded down to the nearest page size. |
310 | * |
311 | * The caller must write-lock current->mm->mmap_lock. |
312 | * |
313 | * @file: An optional struct file pointer describing the file which is to be |
314 | * mapped, if a file-backed mapping. |
315 | * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the |
316 | * address at which to perform this mapping. See mmap (2) for details. Must be |
317 | * page-aligned. |
318 | * @len: The length of the mapping. Will be page-aligned and must be at least 1 |
319 | * page in size. |
320 | * @prot: Protection bits describing access required to the mapping. See mmap |
321 | * (2) for details. |
322 | * @flags: Flags specifying how the mapping should be performed, see mmap (2) |
323 | * for details. |
324 | * @vm_flags: VMA flags which should be set by default, or 0 otherwise. |
325 | * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise. |
326 | * @populate: A pointer to a value which will be set to 0 if no population of |
327 | * the range is required, or the number of bytes to populate if it is. Must be |
328 | * non-NULL. See mmap (2) for details as to under what circumstances population |
329 | * of the range occurs. |
330 | * @uf: An optional pointer to a list head to track userfaultfd unmap events |
331 | * should unmapping events arise. If provided, it is up to the caller to manage |
332 | * this. |
333 | * |
334 | * Returns: Either an error, or the address at which the requested mapping has |
335 | * been performed. |
336 | */ |
337 | unsigned long do_mmap(struct file *file, unsigned long addr, |
338 | unsigned long len, unsigned long prot, |
339 | unsigned long flags, vm_flags_t vm_flags, |
340 | unsigned long pgoff, unsigned long *populate, |
341 | struct list_head *uf) |
342 | { |
343 | struct mm_struct *mm = current->mm; |
344 | int pkey = 0; |
345 | |
346 | *populate = 0; |
347 | |
348 | mmap_assert_write_locked(mm); |
349 | |
350 | if (!len) |
351 | return -EINVAL; |
352 | |
353 | /* |
354 | * Does the application expect PROT_READ to imply PROT_EXEC? |
355 | * |
356 | * (the exception is when the underlying filesystem is noexec |
357 | * mounted, in which case we don't add PROT_EXEC.) |
358 | */ |
359 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
360 | if (!(file && path_noexec(path: &file->f_path))) |
361 | prot |= PROT_EXEC; |
362 | |
363 | /* force arch specific MAP_FIXED handling in get_unmapped_area */ |
364 | if (flags & MAP_FIXED_NOREPLACE) |
365 | flags |= MAP_FIXED; |
366 | |
367 | if (!(flags & MAP_FIXED)) |
368 | addr = round_hint_to_min(hint: addr); |
369 | |
370 | /* Careful about overflows.. */ |
371 | len = PAGE_ALIGN(len); |
372 | if (!len) |
373 | return -ENOMEM; |
374 | |
375 | /* offset overflow? */ |
376 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) |
377 | return -EOVERFLOW; |
378 | |
379 | /* Too many mappings? */ |
380 | if (mm->map_count > sysctl_max_map_count) |
381 | return -ENOMEM; |
382 | |
383 | /* |
384 | * addr is returned from get_unmapped_area, |
385 | * There are two cases: |
386 | * 1> MAP_FIXED == false |
387 | * unallocated memory, no need to check sealing. |
388 | * 1> MAP_FIXED == true |
389 | * sealing is checked inside mmap_region when |
390 | * do_vmi_munmap is called. |
391 | */ |
392 | |
393 | if (prot == PROT_EXEC) { |
394 | pkey = execute_only_pkey(mm); |
395 | if (pkey < 0) |
396 | pkey = 0; |
397 | } |
398 | |
399 | /* Do simple checking here so the lower-level routines won't have |
400 | * to. we assume access permissions have been handled by the open |
401 | * of the memory object, so we don't do any here. |
402 | */ |
403 | vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | |
404 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
405 | |
406 | /* Obtain the address to map to. we verify (or select) it and ensure |
407 | * that it represents a valid section of the address space. |
408 | */ |
409 | addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); |
410 | if (IS_ERR_VALUE(addr)) |
411 | return addr; |
412 | |
413 | if (flags & MAP_FIXED_NOREPLACE) { |
414 | if (find_vma_intersection(mm, start_addr: addr, end_addr: addr + len)) |
415 | return -EEXIST; |
416 | } |
417 | |
418 | if (flags & MAP_LOCKED) |
419 | if (!can_do_mlock()) |
420 | return -EPERM; |
421 | |
422 | if (!mlock_future_ok(mm, flags: vm_flags, bytes: len)) |
423 | return -EAGAIN; |
424 | |
425 | if (file) { |
426 | struct inode *inode = file_inode(f: file); |
427 | unsigned long flags_mask; |
428 | int err; |
429 | |
430 | if (!file_mmap_ok(file, inode, pgoff, len)) |
431 | return -EOVERFLOW; |
432 | |
433 | flags_mask = LEGACY_MAP_MASK; |
434 | if (file->f_op->fop_flags & FOP_MMAP_SYNC) |
435 | flags_mask |= MAP_SYNC; |
436 | |
437 | switch (flags & MAP_TYPE) { |
438 | case MAP_SHARED: |
439 | /* |
440 | * Force use of MAP_SHARED_VALIDATE with non-legacy |
441 | * flags. E.g. MAP_SYNC is dangerous to use with |
442 | * MAP_SHARED as you don't know which consistency model |
443 | * you will get. We silently ignore unsupported flags |
444 | * with MAP_SHARED to preserve backward compatibility. |
445 | */ |
446 | flags &= LEGACY_MAP_MASK; |
447 | fallthrough; |
448 | case MAP_SHARED_VALIDATE: |
449 | if (flags & ~flags_mask) |
450 | return -EOPNOTSUPP; |
451 | if (prot & PROT_WRITE) { |
452 | if (!(file->f_mode & FMODE_WRITE)) |
453 | return -EACCES; |
454 | if (IS_SWAPFILE(file->f_mapping->host)) |
455 | return -ETXTBSY; |
456 | } |
457 | |
458 | /* |
459 | * Make sure we don't allow writing to an append-only |
460 | * file.. |
461 | */ |
462 | if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) |
463 | return -EACCES; |
464 | |
465 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
466 | if (!(file->f_mode & FMODE_WRITE)) |
467 | vm_flags &= ~(VM_MAYWRITE | VM_SHARED); |
468 | fallthrough; |
469 | case MAP_PRIVATE: |
470 | if (!(file->f_mode & FMODE_READ)) |
471 | return -EACCES; |
472 | if (path_noexec(path: &file->f_path)) { |
473 | if (vm_flags & VM_EXEC) |
474 | return -EPERM; |
475 | vm_flags &= ~VM_MAYEXEC; |
476 | } |
477 | |
478 | if (!file_has_valid_mmap_hooks(file)) |
479 | return -ENODEV; |
480 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
481 | return -EINVAL; |
482 | break; |
483 | |
484 | default: |
485 | return -EINVAL; |
486 | } |
487 | |
488 | /* |
489 | * Check to see if we are violating any seals and update VMA |
490 | * flags if necessary to avoid future seal violations. |
491 | */ |
492 | err = memfd_check_seals_mmap(file, vm_flags_ptr: &vm_flags); |
493 | if (err) |
494 | return (unsigned long)err; |
495 | } else { |
496 | switch (flags & MAP_TYPE) { |
497 | case MAP_SHARED: |
498 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
499 | return -EINVAL; |
500 | /* |
501 | * Ignore pgoff. |
502 | */ |
503 | pgoff = 0; |
504 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
505 | break; |
506 | case MAP_DROPPABLE: |
507 | if (VM_DROPPABLE == VM_NONE) |
508 | return -ENOTSUPP; |
509 | /* |
510 | * A locked or stack area makes no sense to be droppable. |
511 | * |
512 | * Also, since droppable pages can just go away at any time |
513 | * it makes no sense to copy them on fork or dump them. |
514 | * |
515 | * And don't attempt to combine with hugetlb for now. |
516 | */ |
517 | if (flags & (MAP_LOCKED | MAP_HUGETLB)) |
518 | return -EINVAL; |
519 | if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) |
520 | return -EINVAL; |
521 | |
522 | vm_flags |= VM_DROPPABLE; |
523 | |
524 | /* |
525 | * If the pages can be dropped, then it doesn't make |
526 | * sense to reserve them. |
527 | */ |
528 | vm_flags |= VM_NORESERVE; |
529 | |
530 | /* |
531 | * Likewise, they're volatile enough that they |
532 | * shouldn't survive forks or coredumps. |
533 | */ |
534 | vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; |
535 | fallthrough; |
536 | case MAP_PRIVATE: |
537 | /* |
538 | * Set pgoff according to addr for anon_vma. |
539 | */ |
540 | pgoff = addr >> PAGE_SHIFT; |
541 | break; |
542 | default: |
543 | return -EINVAL; |
544 | } |
545 | } |
546 | |
547 | /* |
548 | * Set 'VM_NORESERVE' if we should not account for the |
549 | * memory use of this mapping. |
550 | */ |
551 | if (flags & MAP_NORESERVE) { |
552 | /* We honor MAP_NORESERVE if allowed to overcommit */ |
553 | if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) |
554 | vm_flags |= VM_NORESERVE; |
555 | |
556 | /* hugetlb applies strict overcommit unless MAP_NORESERVE */ |
557 | if (file && is_file_hugepages(file)) |
558 | vm_flags |= VM_NORESERVE; |
559 | } |
560 | |
561 | addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); |
562 | if (!IS_ERR_VALUE(addr) && |
563 | ((vm_flags & VM_LOCKED) || |
564 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) |
565 | *populate = len; |
566 | return addr; |
567 | } |
568 | |
569 | unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, |
570 | unsigned long prot, unsigned long flags, |
571 | unsigned long fd, unsigned long pgoff) |
572 | { |
573 | struct file *file = NULL; |
574 | unsigned long retval; |
575 | |
576 | if (!(flags & MAP_ANONYMOUS)) { |
577 | audit_mmap_fd(fd, flags); |
578 | file = fget(fd); |
579 | if (!file) |
580 | return -EBADF; |
581 | if (is_file_hugepages(file)) { |
582 | len = ALIGN(len, huge_page_size(hstate_file(file))); |
583 | } else if (unlikely(flags & MAP_HUGETLB)) { |
584 | retval = -EINVAL; |
585 | goto out_fput; |
586 | } |
587 | } else if (flags & MAP_HUGETLB) { |
588 | struct hstate *hs; |
589 | |
590 | hs = hstate_sizelog(page_size_log: (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
591 | if (!hs) |
592 | return -EINVAL; |
593 | |
594 | len = ALIGN(len, huge_page_size(hs)); |
595 | /* |
596 | * VM_NORESERVE is used because the reservations will be |
597 | * taken when vm_ops->mmap() is called |
598 | */ |
599 | file = hugetlb_file_setup(HUGETLB_ANON_FILE, size: len, |
600 | VM_NORESERVE, |
601 | creat_flags: HUGETLB_ANONHUGE_INODE, |
602 | page_size_log: (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
603 | if (IS_ERR(ptr: file)) |
604 | return PTR_ERR(ptr: file); |
605 | } |
606 | |
607 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
608 | out_fput: |
609 | if (file) |
610 | fput(file); |
611 | return retval; |
612 | } |
613 | |
614 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
615 | unsigned long, prot, unsigned long, flags, |
616 | unsigned long, fd, unsigned long, pgoff) |
617 | { |
618 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); |
619 | } |
620 | |
621 | #ifdef __ARCH_WANT_SYS_OLD_MMAP |
622 | struct mmap_arg_struct { |
623 | unsigned long addr; |
624 | unsigned long len; |
625 | unsigned long prot; |
626 | unsigned long flags; |
627 | unsigned long fd; |
628 | unsigned long offset; |
629 | }; |
630 | |
631 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) |
632 | { |
633 | struct mmap_arg_struct a; |
634 | |
635 | if (copy_from_user(&a, arg, sizeof(a))) |
636 | return -EFAULT; |
637 | if (offset_in_page(a.offset)) |
638 | return -EINVAL; |
639 | |
640 | return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, |
641 | a.offset >> PAGE_SHIFT); |
642 | } |
643 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ |
644 | |
645 | /* |
646 | * Determine if the allocation needs to ensure that there is no |
647 | * existing mapping within it's guard gaps, for use as start_gap. |
648 | */ |
649 | static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) |
650 | { |
651 | if (vm_flags & VM_SHADOW_STACK) |
652 | return PAGE_SIZE; |
653 | |
654 | return 0; |
655 | } |
656 | |
657 | /* |
658 | * Search for an unmapped address range. |
659 | * |
660 | * We are looking for a range that: |
661 | * - does not intersect with any VMA; |
662 | * - is contained within the [low_limit, high_limit) interval; |
663 | * - is at least the desired size. |
664 | * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) |
665 | */ |
666 | unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) |
667 | { |
668 | unsigned long addr; |
669 | |
670 | if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) |
671 | addr = unmapped_area_topdown(info); |
672 | else |
673 | addr = unmapped_area(info); |
674 | |
675 | trace_vm_unmapped_area(addr, info); |
676 | return addr; |
677 | } |
678 | |
679 | /* Get an address range which is currently unmapped. |
680 | * For shmat() with addr=0. |
681 | * |
682 | * Ugly calling convention alert: |
683 | * Return value with the low bits set means error value, |
684 | * ie |
685 | * if (ret & ~PAGE_MASK) |
686 | * error = ret; |
687 | * |
688 | * This function "knows" that -ENOMEM has the bits set. |
689 | */ |
690 | unsigned long |
691 | generic_get_unmapped_area(struct file *filp, unsigned long addr, |
692 | unsigned long len, unsigned long pgoff, |
693 | unsigned long flags, vm_flags_t vm_flags) |
694 | { |
695 | struct mm_struct *mm = current->mm; |
696 | struct vm_area_struct *vma, *prev; |
697 | struct vm_unmapped_area_info info = {}; |
698 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
699 | |
700 | if (len > mmap_end - mmap_min_addr) |
701 | return -ENOMEM; |
702 | |
703 | if (flags & MAP_FIXED) |
704 | return addr; |
705 | |
706 | if (addr) { |
707 | addr = PAGE_ALIGN(addr); |
708 | vma = find_vma_prev(mm, addr, pprev: &prev); |
709 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
710 | (!vma || addr + len <= vm_start_gap(vma)) && |
711 | (!prev || addr >= vm_end_gap(vma: prev))) |
712 | return addr; |
713 | } |
714 | |
715 | info.length = len; |
716 | info.low_limit = mm->mmap_base; |
717 | info.high_limit = mmap_end; |
718 | info.start_gap = stack_guard_placement(vm_flags); |
719 | if (filp && is_file_hugepages(file: filp)) |
720 | info.align_mask = huge_page_mask_align(file: filp); |
721 | return vm_unmapped_area(info: &info); |
722 | } |
723 | |
724 | #ifndef HAVE_ARCH_UNMAPPED_AREA |
725 | unsigned long |
726 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
727 | unsigned long len, unsigned long pgoff, |
728 | unsigned long flags, vm_flags_t vm_flags) |
729 | { |
730 | return generic_get_unmapped_area(filp, addr, len, pgoff, flags, |
731 | vm_flags); |
732 | } |
733 | #endif |
734 | |
735 | /* |
736 | * This mmap-allocator allocates new areas top-down from below the |
737 | * stack's low limit (the base): |
738 | */ |
739 | unsigned long |
740 | generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
741 | unsigned long len, unsigned long pgoff, |
742 | unsigned long flags, vm_flags_t vm_flags) |
743 | { |
744 | struct vm_area_struct *vma, *prev; |
745 | struct mm_struct *mm = current->mm; |
746 | struct vm_unmapped_area_info info = {}; |
747 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
748 | |
749 | /* requested length too big for entire address space */ |
750 | if (len > mmap_end - mmap_min_addr) |
751 | return -ENOMEM; |
752 | |
753 | if (flags & MAP_FIXED) |
754 | return addr; |
755 | |
756 | /* requesting a specific address */ |
757 | if (addr) { |
758 | addr = PAGE_ALIGN(addr); |
759 | vma = find_vma_prev(mm, addr, pprev: &prev); |
760 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
761 | (!vma || addr + len <= vm_start_gap(vma)) && |
762 | (!prev || addr >= vm_end_gap(vma: prev))) |
763 | return addr; |
764 | } |
765 | |
766 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
767 | info.length = len; |
768 | info.low_limit = PAGE_SIZE; |
769 | info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); |
770 | info.start_gap = stack_guard_placement(vm_flags); |
771 | if (filp && is_file_hugepages(file: filp)) |
772 | info.align_mask = huge_page_mask_align(file: filp); |
773 | addr = vm_unmapped_area(info: &info); |
774 | |
775 | /* |
776 | * A failed mmap() very likely causes application failure, |
777 | * so fall back to the bottom-up function here. This scenario |
778 | * can happen with large stack limits and large mmap() |
779 | * allocations. |
780 | */ |
781 | if (offset_in_page(addr)) { |
782 | VM_BUG_ON(addr != -ENOMEM); |
783 | info.flags = 0; |
784 | info.low_limit = TASK_UNMAPPED_BASE; |
785 | info.high_limit = mmap_end; |
786 | addr = vm_unmapped_area(info: &info); |
787 | } |
788 | |
789 | return addr; |
790 | } |
791 | |
792 | #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
793 | unsigned long |
794 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
795 | unsigned long len, unsigned long pgoff, |
796 | unsigned long flags, vm_flags_t vm_flags) |
797 | { |
798 | return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, |
799 | vm_flags); |
800 | } |
801 | #endif |
802 | |
803 | unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, |
804 | unsigned long addr, unsigned long len, |
805 | unsigned long pgoff, unsigned long flags, |
806 | vm_flags_t vm_flags) |
807 | { |
808 | if (test_bit(MMF_TOPDOWN, &mm->flags)) |
809 | return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, |
810 | flags, vm_flags); |
811 | return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); |
812 | } |
813 | |
814 | unsigned long |
815 | __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, |
816 | unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) |
817 | { |
818 | unsigned long (*get_area)(struct file *, unsigned long, |
819 | unsigned long, unsigned long, unsigned long) |
820 | = NULL; |
821 | |
822 | unsigned long error = arch_mmap_check(addr, len, flags); |
823 | if (error) |
824 | return error; |
825 | |
826 | /* Careful about overflows.. */ |
827 | if (len > TASK_SIZE) |
828 | return -ENOMEM; |
829 | |
830 | if (file) { |
831 | if (file->f_op->get_unmapped_area) |
832 | get_area = file->f_op->get_unmapped_area; |
833 | } else if (flags & MAP_SHARED) { |
834 | /* |
835 | * mmap_region() will call shmem_zero_setup() to create a file, |
836 | * so use shmem's get_unmapped_area in case it can be huge. |
837 | */ |
838 | get_area = shmem_get_unmapped_area; |
839 | } |
840 | |
841 | /* Always treat pgoff as zero for anonymous memory. */ |
842 | if (!file) |
843 | pgoff = 0; |
844 | |
845 | if (get_area) { |
846 | addr = get_area(file, addr, len, pgoff, flags); |
847 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file |
848 | && !addr /* no hint */ |
849 | && IS_ALIGNED(len, PMD_SIZE)) { |
850 | /* Ensures that larger anonymous mappings are THP aligned. */ |
851 | addr = thp_get_unmapped_area_vmflags(filp: file, addr, len, |
852 | pgoff, flags, vm_flags); |
853 | } else { |
854 | addr = mm_get_unmapped_area_vmflags(current->mm, filp: file, addr, len, |
855 | pgoff, flags, vm_flags); |
856 | } |
857 | if (IS_ERR_VALUE(addr)) |
858 | return addr; |
859 | |
860 | if (addr > TASK_SIZE - len) |
861 | return -ENOMEM; |
862 | if (offset_in_page(addr)) |
863 | return -EINVAL; |
864 | |
865 | error = security_mmap_addr(addr); |
866 | return error ? error : addr; |
867 | } |
868 | |
869 | unsigned long |
870 | mm_get_unmapped_area(struct mm_struct *mm, struct file *file, |
871 | unsigned long addr, unsigned long len, |
872 | unsigned long pgoff, unsigned long flags) |
873 | { |
874 | if (test_bit(MMF_TOPDOWN, &mm->flags)) |
875 | return arch_get_unmapped_area_topdown(filp: file, addr, len, pgoff, flags, 0); |
876 | return arch_get_unmapped_area(filp: file, addr, len, pgoff, flags, vm_flags: 0); |
877 | } |
878 | EXPORT_SYMBOL(mm_get_unmapped_area); |
879 | |
880 | /** |
881 | * find_vma_intersection() - Look up the first VMA which intersects the interval |
882 | * @mm: The process address space. |
883 | * @start_addr: The inclusive start user address. |
884 | * @end_addr: The exclusive end user address. |
885 | * |
886 | * Returns: The first VMA within the provided range, %NULL otherwise. Assumes |
887 | * start_addr < end_addr. |
888 | */ |
889 | struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, |
890 | unsigned long start_addr, |
891 | unsigned long end_addr) |
892 | { |
893 | unsigned long index = start_addr; |
894 | |
895 | mmap_assert_locked(mm); |
896 | return mt_find(mt: &mm->mm_mt, index: &index, max: end_addr - 1); |
897 | } |
898 | EXPORT_SYMBOL(find_vma_intersection); |
899 | |
900 | /** |
901 | * find_vma() - Find the VMA for a given address, or the next VMA. |
902 | * @mm: The mm_struct to check |
903 | * @addr: The address |
904 | * |
905 | * Returns: The VMA associated with addr, or the next VMA. |
906 | * May return %NULL in the case of no VMA at addr or above. |
907 | */ |
908 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
909 | { |
910 | unsigned long index = addr; |
911 | |
912 | mmap_assert_locked(mm); |
913 | return mt_find(mt: &mm->mm_mt, index: &index, ULONG_MAX); |
914 | } |
915 | EXPORT_SYMBOL(find_vma); |
916 | |
917 | /** |
918 | * find_vma_prev() - Find the VMA for a given address, or the next vma and |
919 | * set %pprev to the previous VMA, if any. |
920 | * @mm: The mm_struct to check |
921 | * @addr: The address |
922 | * @pprev: The pointer to set to the previous VMA |
923 | * |
924 | * Note that RCU lock is missing here since the external mmap_lock() is used |
925 | * instead. |
926 | * |
927 | * Returns: The VMA associated with @addr, or the next vma. |
928 | * May return %NULL in the case of no vma at addr or above. |
929 | */ |
930 | struct vm_area_struct * |
931 | find_vma_prev(struct mm_struct *mm, unsigned long addr, |
932 | struct vm_area_struct **pprev) |
933 | { |
934 | struct vm_area_struct *vma; |
935 | VMA_ITERATOR(vmi, mm, addr); |
936 | |
937 | vma = vma_iter_load(vmi: &vmi); |
938 | *pprev = vma_prev(vmi: &vmi); |
939 | if (!vma) |
940 | vma = vma_next(vmi: &vmi); |
941 | return vma; |
942 | } |
943 | |
944 | /* enforced gap between the expanding stack and other mappings. */ |
945 | unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; |
946 | |
947 | static int __init cmdline_parse_stack_guard_gap(char *p) |
948 | { |
949 | unsigned long val; |
950 | char *endptr; |
951 | |
952 | val = simple_strtoul(p, &endptr, 10); |
953 | if (!*endptr) |
954 | stack_guard_gap = val << PAGE_SHIFT; |
955 | |
956 | return 1; |
957 | } |
958 | __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); |
959 | |
960 | #ifdef CONFIG_STACK_GROWSUP |
961 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
962 | { |
963 | return expand_upwards(vma, address); |
964 | } |
965 | |
966 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
967 | { |
968 | struct vm_area_struct *vma, *prev; |
969 | |
970 | addr &= PAGE_MASK; |
971 | vma = find_vma_prev(mm, addr, &prev); |
972 | if (vma && (vma->vm_start <= addr)) |
973 | return vma; |
974 | if (!prev) |
975 | return NULL; |
976 | if (expand_stack_locked(prev, addr)) |
977 | return NULL; |
978 | if (prev->vm_flags & VM_LOCKED) |
979 | populate_vma_page_range(prev, addr, prev->vm_end, NULL); |
980 | return prev; |
981 | } |
982 | #else |
983 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
984 | { |
985 | return expand_downwards(vma, address); |
986 | } |
987 | |
988 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
989 | { |
990 | struct vm_area_struct *vma; |
991 | unsigned long start; |
992 | |
993 | addr &= PAGE_MASK; |
994 | vma = find_vma(mm, addr); |
995 | if (!vma) |
996 | return NULL; |
997 | if (vma->vm_start <= addr) |
998 | return vma; |
999 | start = vma->vm_start; |
1000 | if (expand_stack_locked(vma, address: addr)) |
1001 | return NULL; |
1002 | if (vma->vm_flags & VM_LOCKED) |
1003 | populate_vma_page_range(vma, start: addr, end: start, NULL); |
1004 | return vma; |
1005 | } |
1006 | #endif |
1007 | |
1008 | #if defined(CONFIG_STACK_GROWSUP) |
1009 | |
1010 | #define vma_expand_up(vma,addr) expand_upwards(vma, addr) |
1011 | #define vma_expand_down(vma, addr) (-EFAULT) |
1012 | |
1013 | #else |
1014 | |
1015 | #define vma_expand_up(vma,addr) (-EFAULT) |
1016 | #define vma_expand_down(vma, addr) expand_downwards(vma, addr) |
1017 | |
1018 | #endif |
1019 | |
1020 | /* |
1021 | * expand_stack(): legacy interface for page faulting. Don't use unless |
1022 | * you have to. |
1023 | * |
1024 | * This is called with the mm locked for reading, drops the lock, takes |
1025 | * the lock for writing, tries to look up a vma again, expands it if |
1026 | * necessary, and downgrades the lock to reading again. |
1027 | * |
1028 | * If no vma is found or it can't be expanded, it returns NULL and has |
1029 | * dropped the lock. |
1030 | */ |
1031 | struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) |
1032 | { |
1033 | struct vm_area_struct *vma, *prev; |
1034 | |
1035 | mmap_read_unlock(mm); |
1036 | if (mmap_write_lock_killable(mm)) |
1037 | return NULL; |
1038 | |
1039 | vma = find_vma_prev(mm, addr, pprev: &prev); |
1040 | if (vma && vma->vm_start <= addr) |
1041 | goto success; |
1042 | |
1043 | if (prev && !vma_expand_up(prev, addr)) { |
1044 | vma = prev; |
1045 | goto success; |
1046 | } |
1047 | |
1048 | if (vma && !vma_expand_down(vma, addr)) |
1049 | goto success; |
1050 | |
1051 | mmap_write_unlock(mm); |
1052 | return NULL; |
1053 | |
1054 | success: |
1055 | mmap_write_downgrade(mm); |
1056 | return vma; |
1057 | } |
1058 | |
1059 | /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. |
1060 | * @mm: The mm_struct |
1061 | * @start: The start address to munmap |
1062 | * @len: The length to be munmapped. |
1063 | * @uf: The userfaultfd list_head |
1064 | * |
1065 | * Return: 0 on success, error otherwise. |
1066 | */ |
1067 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, |
1068 | struct list_head *uf) |
1069 | { |
1070 | VMA_ITERATOR(vmi, mm, start); |
1071 | |
1072 | return do_vmi_munmap(vmi: &vmi, mm, start, len, uf, unlock: false); |
1073 | } |
1074 | |
1075 | int vm_munmap(unsigned long start, size_t len) |
1076 | { |
1077 | return __vm_munmap(start, len, unlock: false); |
1078 | } |
1079 | EXPORT_SYMBOL(vm_munmap); |
1080 | |
1081 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) |
1082 | { |
1083 | addr = untagged_addr(addr); |
1084 | return __vm_munmap(start: addr, len, unlock: true); |
1085 | } |
1086 | |
1087 | |
1088 | /* |
1089 | * Emulation of deprecated remap_file_pages() syscall. |
1090 | */ |
1091 | SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, |
1092 | unsigned long, prot, unsigned long, pgoff, unsigned long, flags) |
1093 | { |
1094 | |
1095 | struct mm_struct *mm = current->mm; |
1096 | struct vm_area_struct *vma; |
1097 | unsigned long populate = 0; |
1098 | unsigned long ret = -EINVAL; |
1099 | struct file *file; |
1100 | vm_flags_t vm_flags; |
1101 | |
1102 | pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", |
1103 | current->comm, current->pid); |
1104 | |
1105 | if (prot) |
1106 | return ret; |
1107 | start = start & PAGE_MASK; |
1108 | size = size & PAGE_MASK; |
1109 | |
1110 | if (start + size <= start) |
1111 | return ret; |
1112 | |
1113 | /* Does pgoff wrap? */ |
1114 | if (pgoff + (size >> PAGE_SHIFT) < pgoff) |
1115 | return ret; |
1116 | |
1117 | if (mmap_read_lock_killable(mm)) |
1118 | return -EINTR; |
1119 | |
1120 | /* |
1121 | * Look up VMA under read lock first so we can perform the security |
1122 | * without holding locks (which can be problematic). We reacquire a |
1123 | * write lock later and check nothing changed underneath us. |
1124 | */ |
1125 | vma = vma_lookup(mm, addr: start); |
1126 | |
1127 | if (!vma || !(vma->vm_flags & VM_SHARED)) { |
1128 | mmap_read_unlock(mm); |
1129 | return -EINVAL; |
1130 | } |
1131 | |
1132 | prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; |
1133 | prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; |
1134 | prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; |
1135 | |
1136 | flags &= MAP_NONBLOCK; |
1137 | flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; |
1138 | if (vma->vm_flags & VM_LOCKED) |
1139 | flags |= MAP_LOCKED; |
1140 | |
1141 | /* Save vm_flags used to calculate prot and flags, and recheck later. */ |
1142 | vm_flags = vma->vm_flags; |
1143 | file = get_file(f: vma->vm_file); |
1144 | |
1145 | mmap_read_unlock(mm); |
1146 | |
1147 | /* Call outside mmap_lock to be consistent with other callers. */ |
1148 | ret = security_mmap_file(file, prot, flags); |
1149 | if (ret) { |
1150 | fput(file); |
1151 | return ret; |
1152 | } |
1153 | |
1154 | ret = -EINVAL; |
1155 | |
1156 | /* OK security check passed, take write lock + let it rip. */ |
1157 | if (mmap_write_lock_killable(mm)) { |
1158 | fput(file); |
1159 | return -EINTR; |
1160 | } |
1161 | |
1162 | vma = vma_lookup(mm, addr: start); |
1163 | |
1164 | if (!vma) |
1165 | goto out; |
1166 | |
1167 | /* Make sure things didn't change under us. */ |
1168 | if (vma->vm_flags != vm_flags) |
1169 | goto out; |
1170 | if (vma->vm_file != file) |
1171 | goto out; |
1172 | |
1173 | if (start + size > vma->vm_end) { |
1174 | VMA_ITERATOR(vmi, mm, vma->vm_end); |
1175 | struct vm_area_struct *next, *prev = vma; |
1176 | |
1177 | for_each_vma_range(vmi, next, start + size) { |
1178 | /* hole between vmas ? */ |
1179 | if (next->vm_start != prev->vm_end) |
1180 | goto out; |
1181 | |
1182 | if (next->vm_file != vma->vm_file) |
1183 | goto out; |
1184 | |
1185 | if (next->vm_flags != vma->vm_flags) |
1186 | goto out; |
1187 | |
1188 | if (start + size <= next->vm_end) |
1189 | break; |
1190 | |
1191 | prev = next; |
1192 | } |
1193 | |
1194 | if (!next) |
1195 | goto out; |
1196 | } |
1197 | |
1198 | ret = do_mmap(file: vma->vm_file, addr: start, len: size, |
1199 | prot, flags, vm_flags: 0, pgoff, populate: &populate, NULL); |
1200 | out: |
1201 | mmap_write_unlock(mm); |
1202 | fput(file); |
1203 | if (populate) |
1204 | mm_populate(addr: ret, len: populate); |
1205 | if (!IS_ERR_VALUE(ret)) |
1206 | ret = 0; |
1207 | return ret; |
1208 | } |
1209 | |
1210 | int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) |
1211 | { |
1212 | struct mm_struct *mm = current->mm; |
1213 | struct vm_area_struct *vma = NULL; |
1214 | unsigned long len; |
1215 | int ret; |
1216 | bool populate; |
1217 | LIST_HEAD(uf); |
1218 | VMA_ITERATOR(vmi, mm, addr); |
1219 | |
1220 | len = PAGE_ALIGN(request); |
1221 | if (len < request) |
1222 | return -ENOMEM; |
1223 | if (!len) |
1224 | return 0; |
1225 | |
1226 | /* Until we need other flags, refuse anything except VM_EXEC. */ |
1227 | if ((flags & (~VM_EXEC)) != 0) |
1228 | return -EINVAL; |
1229 | |
1230 | if (mmap_write_lock_killable(mm)) |
1231 | return -EINTR; |
1232 | |
1233 | ret = check_brk_limits(addr, len); |
1234 | if (ret) |
1235 | goto limits_failed; |
1236 | |
1237 | ret = do_vmi_munmap(vmi: &vmi, mm, start: addr, len, uf: &uf, unlock: 0); |
1238 | if (ret) |
1239 | goto munmap_failed; |
1240 | |
1241 | vma = vma_prev(vmi: &vmi); |
1242 | ret = do_brk_flags(vmi: &vmi, brkvma: vma, addr, request: len, flags); |
1243 | populate = ((mm->def_flags & VM_LOCKED) != 0); |
1244 | mmap_write_unlock(mm); |
1245 | userfaultfd_unmap_complete(mm, uf: &uf); |
1246 | if (populate && !ret) |
1247 | mm_populate(addr, len); |
1248 | return ret; |
1249 | |
1250 | munmap_failed: |
1251 | limits_failed: |
1252 | mmap_write_unlock(mm); |
1253 | return ret; |
1254 | } |
1255 | EXPORT_SYMBOL(vm_brk_flags); |
1256 | |
1257 | /* Release all mmaps. */ |
1258 | void exit_mmap(struct mm_struct *mm) |
1259 | { |
1260 | struct mmu_gather tlb; |
1261 | struct vm_area_struct *vma; |
1262 | unsigned long nr_accounted = 0; |
1263 | VMA_ITERATOR(vmi, mm, 0); |
1264 | int count = 0; |
1265 | |
1266 | /* mm's last user has gone, and its about to be pulled down */ |
1267 | mmu_notifier_release(mm); |
1268 | |
1269 | mmap_read_lock(mm); |
1270 | arch_exit_mmap(mm); |
1271 | |
1272 | vma = vma_next(vmi: &vmi); |
1273 | if (!vma || unlikely(xa_is_zero(vma))) { |
1274 | /* Can happen if dup_mmap() received an OOM */ |
1275 | mmap_read_unlock(mm); |
1276 | mmap_write_lock(mm); |
1277 | goto destroy; |
1278 | } |
1279 | |
1280 | flush_cache_mm(mm); |
1281 | tlb_gather_mmu_fullmm(tlb: &tlb, mm); |
1282 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
1283 | /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ |
1284 | unmap_vmas(tlb: &tlb, mas: &vmi.mas, start_vma: vma, start: 0, ULONG_MAX, ULONG_MAX, mm_wr_locked: false); |
1285 | mmap_read_unlock(mm); |
1286 | |
1287 | /* |
1288 | * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper |
1289 | * because the memory has been already freed. |
1290 | */ |
1291 | set_bit(MMF_OOM_SKIP, addr: &mm->flags); |
1292 | mmap_write_lock(mm); |
1293 | mt_clear_in_rcu(mt: &mm->mm_mt); |
1294 | vma_iter_set(vmi: &vmi, addr: vma->vm_end); |
1295 | free_pgtables(tlb: &tlb, mas: &vmi.mas, start_vma: vma, FIRST_USER_ADDRESS, |
1296 | USER_PGTABLES_CEILING, mm_wr_locked: true); |
1297 | tlb_finish_mmu(tlb: &tlb); |
1298 | |
1299 | /* |
1300 | * Walk the list again, actually closing and freeing it, with preemption |
1301 | * enabled, without holding any MM locks besides the unreachable |
1302 | * mmap_write_lock. |
1303 | */ |
1304 | vma_iter_set(vmi: &vmi, addr: vma->vm_end); |
1305 | do { |
1306 | if (vma->vm_flags & VM_ACCOUNT) |
1307 | nr_accounted += vma_pages(vma); |
1308 | vma_mark_detached(vma); |
1309 | remove_vma(vma); |
1310 | count++; |
1311 | cond_resched(); |
1312 | vma = vma_next(vmi: &vmi); |
1313 | } while (vma && likely(!xa_is_zero(vma))); |
1314 | |
1315 | BUG_ON(count != mm->map_count); |
1316 | |
1317 | trace_exit_mmap(mm); |
1318 | destroy: |
1319 | __mt_destroy(mt: &mm->mm_mt); |
1320 | mmap_write_unlock(mm); |
1321 | vm_unacct_memory(pages: nr_accounted); |
1322 | } |
1323 | |
1324 | /* |
1325 | * Return true if the calling process may expand its vm space by the passed |
1326 | * number of pages |
1327 | */ |
1328 | bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) |
1329 | { |
1330 | if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) |
1331 | return false; |
1332 | |
1333 | if (is_data_mapping(flags) && |
1334 | mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { |
1335 | /* Workaround for Valgrind */ |
1336 | if (rlimit(RLIMIT_DATA) == 0 && |
1337 | mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) |
1338 | return true; |
1339 | |
1340 | pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", |
1341 | current->comm, current->pid, |
1342 | (mm->data_vm + npages) << PAGE_SHIFT, |
1343 | rlimit(RLIMIT_DATA), |
1344 | ignore_rlimit_data ? "": " or use boot option ignore_rlimit_data"); |
1345 | |
1346 | if (!ignore_rlimit_data) |
1347 | return false; |
1348 | } |
1349 | |
1350 | return true; |
1351 | } |
1352 | |
1353 | void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) |
1354 | { |
1355 | WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); |
1356 | |
1357 | if (is_exec_mapping(flags)) |
1358 | mm->exec_vm += npages; |
1359 | else if (is_stack_mapping(flags)) |
1360 | mm->stack_vm += npages; |
1361 | else if (is_data_mapping(flags)) |
1362 | mm->data_vm += npages; |
1363 | } |
1364 | |
1365 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf); |
1366 | |
1367 | /* |
1368 | * Close hook, called for unmap() and on the old vma for mremap(). |
1369 | * |
1370 | * Having a close hook prevents vma merging regardless of flags. |
1371 | */ |
1372 | static void special_mapping_close(struct vm_area_struct *vma) |
1373 | { |
1374 | const struct vm_special_mapping *sm = vma->vm_private_data; |
1375 | |
1376 | if (sm->close) |
1377 | sm->close(sm, vma); |
1378 | } |
1379 | |
1380 | static const char *special_mapping_name(struct vm_area_struct *vma) |
1381 | { |
1382 | return ((struct vm_special_mapping *)vma->vm_private_data)->name; |
1383 | } |
1384 | |
1385 | static int special_mapping_mremap(struct vm_area_struct *new_vma) |
1386 | { |
1387 | struct vm_special_mapping *sm = new_vma->vm_private_data; |
1388 | |
1389 | if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) |
1390 | return -EFAULT; |
1391 | |
1392 | if (sm->mremap) |
1393 | return sm->mremap(sm, new_vma); |
1394 | |
1395 | return 0; |
1396 | } |
1397 | |
1398 | static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) |
1399 | { |
1400 | /* |
1401 | * Forbid splitting special mappings - kernel has expectations over |
1402 | * the number of pages in mapping. Together with VM_DONTEXPAND |
1403 | * the size of vma should stay the same over the special mapping's |
1404 | * lifetime. |
1405 | */ |
1406 | return -EINVAL; |
1407 | } |
1408 | |
1409 | static const struct vm_operations_struct special_mapping_vmops = { |
1410 | .close = special_mapping_close, |
1411 | .fault = special_mapping_fault, |
1412 | .mremap = special_mapping_mremap, |
1413 | .name = special_mapping_name, |
1414 | /* vDSO code relies that VVAR can't be accessed remotely */ |
1415 | .access = NULL, |
1416 | .may_split = special_mapping_split, |
1417 | }; |
1418 | |
1419 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf) |
1420 | { |
1421 | struct vm_area_struct *vma = vmf->vma; |
1422 | pgoff_t pgoff; |
1423 | struct page **pages; |
1424 | struct vm_special_mapping *sm = vma->vm_private_data; |
1425 | |
1426 | if (sm->fault) |
1427 | return sm->fault(sm, vmf->vma, vmf); |
1428 | |
1429 | pages = sm->pages; |
1430 | |
1431 | for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) |
1432 | pgoff--; |
1433 | |
1434 | if (*pages) { |
1435 | struct page *page = *pages; |
1436 | get_page(page); |
1437 | vmf->page = page; |
1438 | return 0; |
1439 | } |
1440 | |
1441 | return VM_FAULT_SIGBUS; |
1442 | } |
1443 | |
1444 | static struct vm_area_struct *__install_special_mapping( |
1445 | struct mm_struct *mm, |
1446 | unsigned long addr, unsigned long len, |
1447 | unsigned long vm_flags, void *priv, |
1448 | const struct vm_operations_struct *ops) |
1449 | { |
1450 | int ret; |
1451 | struct vm_area_struct *vma; |
1452 | |
1453 | vma = vm_area_alloc(mm); |
1454 | if (unlikely(vma == NULL)) |
1455 | return ERR_PTR(error: -ENOMEM); |
1456 | |
1457 | vma_set_range(vma, start: addr, end: addr + len, pgoff: 0); |
1458 | vm_flags_init(vma, flags: (vm_flags | mm->def_flags | |
1459 | VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); |
1460 | vma->vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags); |
1461 | |
1462 | vma->vm_ops = ops; |
1463 | vma->vm_private_data = priv; |
1464 | |
1465 | ret = insert_vm_struct(mm, vma); |
1466 | if (ret) |
1467 | goto out; |
1468 | |
1469 | vm_stat_account(mm, flags: vma->vm_flags, npages: len >> PAGE_SHIFT); |
1470 | |
1471 | perf_event_mmap(vma); |
1472 | |
1473 | return vma; |
1474 | |
1475 | out: |
1476 | vm_area_free(vma); |
1477 | return ERR_PTR(error: ret); |
1478 | } |
1479 | |
1480 | bool vma_is_special_mapping(const struct vm_area_struct *vma, |
1481 | const struct vm_special_mapping *sm) |
1482 | { |
1483 | return vma->vm_private_data == sm && |
1484 | vma->vm_ops == &special_mapping_vmops; |
1485 | } |
1486 | |
1487 | /* |
1488 | * Called with mm->mmap_lock held for writing. |
1489 | * Insert a new vma covering the given region, with the given flags. |
1490 | * Its pages are supplied by the given array of struct page *. |
1491 | * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. |
1492 | * The region past the last page supplied will always produce SIGBUS. |
1493 | * The array pointer and the pages it points to are assumed to stay alive |
1494 | * for as long as this mapping might exist. |
1495 | */ |
1496 | struct vm_area_struct *_install_special_mapping( |
1497 | struct mm_struct *mm, |
1498 | unsigned long addr, unsigned long len, |
1499 | unsigned long vm_flags, const struct vm_special_mapping *spec) |
1500 | { |
1501 | return __install_special_mapping(mm, addr, len, vm_flags, priv: (void *)spec, |
1502 | ops: &special_mapping_vmops); |
1503 | } |
1504 | |
1505 | #ifdef CONFIG_SYSCTL |
1506 | #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ |
1507 | defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) |
1508 | int sysctl_legacy_va_layout; |
1509 | #endif |
1510 | |
1511 | static const struct ctl_table mmap_table[] = { |
1512 | { |
1513 | .procname = "max_map_count", |
1514 | .data = &sysctl_max_map_count, |
1515 | .maxlen = sizeof(sysctl_max_map_count), |
1516 | .mode = 0644, |
1517 | .proc_handler = proc_dointvec_minmax, |
1518 | .extra1 = SYSCTL_ZERO, |
1519 | }, |
1520 | #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ |
1521 | defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) |
1522 | { |
1523 | .procname = "legacy_va_layout", |
1524 | .data = &sysctl_legacy_va_layout, |
1525 | .maxlen = sizeof(sysctl_legacy_va_layout), |
1526 | .mode = 0644, |
1527 | .proc_handler = proc_dointvec_minmax, |
1528 | .extra1 = SYSCTL_ZERO, |
1529 | }, |
1530 | #endif |
1531 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS |
1532 | { |
1533 | .procname = "mmap_rnd_bits", |
1534 | .data = &mmap_rnd_bits, |
1535 | .maxlen = sizeof(mmap_rnd_bits), |
1536 | .mode = 0600, |
1537 | .proc_handler = proc_dointvec_minmax, |
1538 | .extra1 = (void *)&mmap_rnd_bits_min, |
1539 | .extra2 = (void *)&mmap_rnd_bits_max, |
1540 | }, |
1541 | #endif |
1542 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS |
1543 | { |
1544 | .procname = "mmap_rnd_compat_bits", |
1545 | .data = &mmap_rnd_compat_bits, |
1546 | .maxlen = sizeof(mmap_rnd_compat_bits), |
1547 | .mode = 0600, |
1548 | .proc_handler = proc_dointvec_minmax, |
1549 | .extra1 = (void *)&mmap_rnd_compat_bits_min, |
1550 | .extra2 = (void *)&mmap_rnd_compat_bits_max, |
1551 | }, |
1552 | #endif |
1553 | }; |
1554 | #endif /* CONFIG_SYSCTL */ |
1555 | |
1556 | /* |
1557 | * initialise the percpu counter for VM, initialise VMA state. |
1558 | */ |
1559 | void __init mmap_init(void) |
1560 | { |
1561 | int ret; |
1562 | |
1563 | ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); |
1564 | VM_BUG_ON(ret); |
1565 | #ifdef CONFIG_SYSCTL |
1566 | register_sysctl_init("vm", mmap_table); |
1567 | #endif |
1568 | vma_state_init(); |
1569 | } |
1570 | |
1571 | /* |
1572 | * Initialise sysctl_user_reserve_kbytes. |
1573 | * |
1574 | * This is intended to prevent a user from starting a single memory hogging |
1575 | * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER |
1576 | * mode. |
1577 | * |
1578 | * The default value is min(3% of free memory, 128MB) |
1579 | * 128MB is enough to recover with sshd/login, bash, and top/kill. |
1580 | */ |
1581 | static int init_user_reserve(void) |
1582 | { |
1583 | unsigned long free_kbytes; |
1584 | |
1585 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
1586 | |
1587 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); |
1588 | return 0; |
1589 | } |
1590 | subsys_initcall(init_user_reserve); |
1591 | |
1592 | /* |
1593 | * Initialise sysctl_admin_reserve_kbytes. |
1594 | * |
1595 | * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin |
1596 | * to log in and kill a memory hogging process. |
1597 | * |
1598 | * Systems with more than 256MB will reserve 8MB, enough to recover |
1599 | * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will |
1600 | * only reserve 3% of free pages by default. |
1601 | */ |
1602 | static int init_admin_reserve(void) |
1603 | { |
1604 | unsigned long free_kbytes; |
1605 | |
1606 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
1607 | |
1608 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); |
1609 | return 0; |
1610 | } |
1611 | subsys_initcall(init_admin_reserve); |
1612 | |
1613 | /* |
1614 | * Reinititalise user and admin reserves if memory is added or removed. |
1615 | * |
1616 | * The default user reserve max is 128MB, and the default max for the |
1617 | * admin reserve is 8MB. These are usually, but not always, enough to |
1618 | * enable recovery from a memory hogging process using login/sshd, a shell, |
1619 | * and tools like top. It may make sense to increase or even disable the |
1620 | * reserve depending on the existence of swap or variations in the recovery |
1621 | * tools. So, the admin may have changed them. |
1622 | * |
1623 | * If memory is added and the reserves have been eliminated or increased above |
1624 | * the default max, then we'll trust the admin. |
1625 | * |
1626 | * If memory is removed and there isn't enough free memory, then we |
1627 | * need to reset the reserves. |
1628 | * |
1629 | * Otherwise keep the reserve set by the admin. |
1630 | */ |
1631 | static int reserve_mem_notifier(struct notifier_block *nb, |
1632 | unsigned long action, void *data) |
1633 | { |
1634 | unsigned long tmp, free_kbytes; |
1635 | |
1636 | switch (action) { |
1637 | case MEM_ONLINE: |
1638 | /* Default max is 128MB. Leave alone if modified by operator. */ |
1639 | tmp = sysctl_user_reserve_kbytes; |
1640 | if (tmp > 0 && tmp < SZ_128K) |
1641 | init_user_reserve(); |
1642 | |
1643 | /* Default max is 8MB. Leave alone if modified by operator. */ |
1644 | tmp = sysctl_admin_reserve_kbytes; |
1645 | if (tmp > 0 && tmp < SZ_8K) |
1646 | init_admin_reserve(); |
1647 | |
1648 | break; |
1649 | case MEM_OFFLINE: |
1650 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
1651 | |
1652 | if (sysctl_user_reserve_kbytes > free_kbytes) { |
1653 | init_user_reserve(); |
1654 | pr_info("vm.user_reserve_kbytes reset to %lu\n", |
1655 | sysctl_user_reserve_kbytes); |
1656 | } |
1657 | |
1658 | if (sysctl_admin_reserve_kbytes > free_kbytes) { |
1659 | init_admin_reserve(); |
1660 | pr_info("vm.admin_reserve_kbytes reset to %lu\n", |
1661 | sysctl_admin_reserve_kbytes); |
1662 | } |
1663 | break; |
1664 | default: |
1665 | break; |
1666 | } |
1667 | return NOTIFY_OK; |
1668 | } |
1669 | |
1670 | static int __meminit init_reserve_notifier(void) |
1671 | { |
1672 | if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) |
1673 | pr_err("Failed registering memory add/remove notifier for admin reserve\n"); |
1674 | |
1675 | return 0; |
1676 | } |
1677 | subsys_initcall(init_reserve_notifier); |
1678 | |
1679 | /* |
1680 | * Obtain a read lock on mm->mmap_lock, if the specified address is below the |
1681 | * start of the VMA, the intent is to perform a write, and it is a |
1682 | * downward-growing stack, then attempt to expand the stack to contain it. |
1683 | * |
1684 | * This function is intended only for obtaining an argument page from an ELF |
1685 | * image, and is almost certainly NOT what you want to use for any other |
1686 | * purpose. |
1687 | * |
1688 | * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the |
1689 | * VMA referenced must not be linked in any user-visible tree, i.e. it must be a |
1690 | * new VMA being mapped. |
1691 | * |
1692 | * The function assumes that addr is either contained within the VMA or below |
1693 | * it, and makes no attempt to validate this value beyond that. |
1694 | * |
1695 | * Returns true if the read lock was obtained and a stack was perhaps expanded, |
1696 | * false if the stack expansion failed. |
1697 | * |
1698 | * On stack expansion the function temporarily acquires an mmap write lock |
1699 | * before downgrading it. |
1700 | */ |
1701 | bool mmap_read_lock_maybe_expand(struct mm_struct *mm, |
1702 | struct vm_area_struct *new_vma, |
1703 | unsigned long addr, bool write) |
1704 | { |
1705 | if (!write || addr >= new_vma->vm_start) { |
1706 | mmap_read_lock(mm); |
1707 | return true; |
1708 | } |
1709 | |
1710 | if (!(new_vma->vm_flags & VM_GROWSDOWN)) |
1711 | return false; |
1712 | |
1713 | mmap_write_lock(mm); |
1714 | if (expand_downwards(vma: new_vma, address: addr)) { |
1715 | mmap_write_unlock(mm); |
1716 | return false; |
1717 | } |
1718 | |
1719 | mmap_write_downgrade(mm); |
1720 | return true; |
1721 | } |
1722 | |
1723 | __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
1724 | { |
1725 | struct vm_area_struct *mpnt, *tmp; |
1726 | int retval; |
1727 | unsigned long charge = 0; |
1728 | LIST_HEAD(uf); |
1729 | VMA_ITERATOR(vmi, mm, 0); |
1730 | |
1731 | if (mmap_write_lock_killable(mm: oldmm)) |
1732 | return -EINTR; |
1733 | flush_cache_dup_mm(mm: oldmm); |
1734 | uprobe_dup_mmap(oldmm, newmm: mm); |
1735 | /* |
1736 | * Not linked in yet - no deadlock potential: |
1737 | */ |
1738 | mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); |
1739 | |
1740 | /* No ordering required: file already has been exposed. */ |
1741 | dup_mm_exe_file(mm, oldmm); |
1742 | |
1743 | mm->total_vm = oldmm->total_vm; |
1744 | mm->data_vm = oldmm->data_vm; |
1745 | mm->exec_vm = oldmm->exec_vm; |
1746 | mm->stack_vm = oldmm->stack_vm; |
1747 | |
1748 | /* Use __mt_dup() to efficiently build an identical maple tree. */ |
1749 | retval = __mt_dup(mt: &oldmm->mm_mt, new: &mm->mm_mt, GFP_KERNEL); |
1750 | if (unlikely(retval)) |
1751 | goto out; |
1752 | |
1753 | mt_clear_in_rcu(mt: vmi.mas.tree); |
1754 | for_each_vma(vmi, mpnt) { |
1755 | struct file *file; |
1756 | |
1757 | vma_start_write(vma: mpnt); |
1758 | if (mpnt->vm_flags & VM_DONTCOPY) { |
1759 | retval = vma_iter_clear_gfp(vmi: &vmi, start: mpnt->vm_start, |
1760 | end: mpnt->vm_end, GFP_KERNEL); |
1761 | if (retval) |
1762 | goto loop_out; |
1763 | |
1764 | vm_stat_account(mm, flags: mpnt->vm_flags, npages: -vma_pages(vma: mpnt)); |
1765 | continue; |
1766 | } |
1767 | charge = 0; |
1768 | /* |
1769 | * Don't duplicate many vmas if we've been oom-killed (for |
1770 | * example) |
1771 | */ |
1772 | if (fatal_signal_pending(current)) { |
1773 | retval = -EINTR; |
1774 | goto loop_out; |
1775 | } |
1776 | if (mpnt->vm_flags & VM_ACCOUNT) { |
1777 | unsigned long len = vma_pages(vma: mpnt); |
1778 | |
1779 | if (security_vm_enough_memory_mm(mm: oldmm, pages: len)) /* sic */ |
1780 | goto fail_nomem; |
1781 | charge = len; |
1782 | } |
1783 | |
1784 | tmp = vm_area_dup(orig: mpnt); |
1785 | if (!tmp) |
1786 | goto fail_nomem; |
1787 | retval = vma_dup_policy(src: mpnt, dst: tmp); |
1788 | if (retval) |
1789 | goto fail_nomem_policy; |
1790 | tmp->vm_mm = mm; |
1791 | retval = dup_userfaultfd(tmp, &uf); |
1792 | if (retval) |
1793 | goto fail_nomem_anon_vma_fork; |
1794 | if (tmp->vm_flags & VM_WIPEONFORK) { |
1795 | /* |
1796 | * VM_WIPEONFORK gets a clean slate in the child. |
1797 | * Don't prepare anon_vma until fault since we don't |
1798 | * copy page for current vma. |
1799 | */ |
1800 | tmp->anon_vma = NULL; |
1801 | } else if (anon_vma_fork(tmp, mpnt)) |
1802 | goto fail_nomem_anon_vma_fork; |
1803 | vm_flags_clear(vma: tmp, VM_LOCKED_MASK); |
1804 | /* |
1805 | * Copy/update hugetlb private vma information. |
1806 | */ |
1807 | if (is_vm_hugetlb_page(vma: tmp)) |
1808 | hugetlb_dup_vma_private(vma: tmp); |
1809 | |
1810 | /* |
1811 | * Link the vma into the MT. After using __mt_dup(), memory |
1812 | * allocation is not necessary here, so it cannot fail. |
1813 | */ |
1814 | vma_iter_bulk_store(vmi: &vmi, vma: tmp); |
1815 | |
1816 | mm->map_count++; |
1817 | |
1818 | if (tmp->vm_ops && tmp->vm_ops->open) |
1819 | tmp->vm_ops->open(tmp); |
1820 | |
1821 | file = tmp->vm_file; |
1822 | if (file) { |
1823 | struct address_space *mapping = file->f_mapping; |
1824 | |
1825 | get_file(f: file); |
1826 | i_mmap_lock_write(mapping); |
1827 | if (vma_is_shared_maywrite(vma: tmp)) |
1828 | mapping_allow_writable(mapping); |
1829 | flush_dcache_mmap_lock(mapping); |
1830 | /* insert tmp into the share list, just after mpnt */ |
1831 | vma_interval_tree_insert_after(node: tmp, prev: mpnt, |
1832 | root: &mapping->i_mmap); |
1833 | flush_dcache_mmap_unlock(mapping); |
1834 | i_mmap_unlock_write(mapping); |
1835 | } |
1836 | |
1837 | if (!(tmp->vm_flags & VM_WIPEONFORK)) |
1838 | retval = copy_page_range(dst_vma: tmp, src_vma: mpnt); |
1839 | |
1840 | if (retval) { |
1841 | mpnt = vma_next(vmi: &vmi); |
1842 | goto loop_out; |
1843 | } |
1844 | } |
1845 | /* a new mm has just been created */ |
1846 | retval = arch_dup_mmap(oldmm, mm); |
1847 | loop_out: |
1848 | vma_iter_free(vmi: &vmi); |
1849 | if (!retval) { |
1850 | mt_set_in_rcu(mt: vmi.mas.tree); |
1851 | ksm_fork(mm, oldmm); |
1852 | khugepaged_fork(mm, oldmm); |
1853 | } else { |
1854 | |
1855 | /* |
1856 | * The entire maple tree has already been duplicated. If the |
1857 | * mmap duplication fails, mark the failure point with |
1858 | * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, |
1859 | * stop releasing VMAs that have not been duplicated after this |
1860 | * point. |
1861 | */ |
1862 | if (mpnt) { |
1863 | mas_set_range(mas: &vmi.mas, start: mpnt->vm_start, last: mpnt->vm_end - 1); |
1864 | mas_store(mas: &vmi.mas, XA_ZERO_ENTRY); |
1865 | /* Avoid OOM iterating a broken tree */ |
1866 | set_bit(MMF_OOM_SKIP, addr: &mm->flags); |
1867 | } |
1868 | /* |
1869 | * The mm_struct is going to exit, but the locks will be dropped |
1870 | * first. Set the mm_struct as unstable is advisable as it is |
1871 | * not fully initialised. |
1872 | */ |
1873 | set_bit(MMF_UNSTABLE, addr: &mm->flags); |
1874 | } |
1875 | out: |
1876 | mmap_write_unlock(mm); |
1877 | flush_tlb_mm(oldmm); |
1878 | mmap_write_unlock(mm: oldmm); |
1879 | if (!retval) |
1880 | dup_userfaultfd_complete(&uf); |
1881 | else |
1882 | dup_userfaultfd_fail(&uf); |
1883 | return retval; |
1884 | |
1885 | fail_nomem_anon_vma_fork: |
1886 | mpol_put(vma_policy(tmp)); |
1887 | fail_nomem_policy: |
1888 | vm_area_free(vma: tmp); |
1889 | fail_nomem: |
1890 | retval = -ENOMEM; |
1891 | vm_unacct_memory(pages: charge); |
1892 | goto loop_out; |
1893 | } |
1894 |
Definitions
- mmap_rnd_bits_min
- mmap_rnd_bits_max
- mmap_rnd_bits
- mmap_rnd_compat_bits_min
- mmap_rnd_compat_bits_max
- mmap_rnd_compat_bits
- ignore_rlimit_data
- vma_set_page_prot
- check_brk_limits
- round_hint_to_min
- mlock_future_ok
- file_mmap_size_max
- file_mmap_ok
- do_mmap
- ksys_mmap_pgoff
- stack_guard_placement
- vm_unmapped_area
- generic_get_unmapped_area
- generic_get_unmapped_area_topdown
- mm_get_unmapped_area_vmflags
- __get_unmapped_area
- mm_get_unmapped_area
- find_vma_intersection
- find_vma
- find_vma_prev
- stack_guard_gap
- cmdline_parse_stack_guard_gap
- expand_stack_locked
- find_extend_vma_locked
- expand_stack
- do_munmap
- vm_munmap
- vm_brk_flags
- exit_mmap
- may_expand_vm
- vm_stat_account
- special_mapping_close
- special_mapping_name
- special_mapping_mremap
- special_mapping_split
- special_mapping_vmops
- special_mapping_fault
- __install_special_mapping
- vma_is_special_mapping
- _install_special_mapping
- sysctl_legacy_va_layout
- mmap_table
- mmap_init
- init_user_reserve
- init_admin_reserve
- reserve_mem_notifier
- init_reserve_notifier
- mmap_read_lock_maybe_expand
Improve your Profiling and Debugging skills
Find out more