1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * mm/mmap.c |
4 | * |
5 | * Written by obz. |
6 | * |
7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/backing-dev.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/mm_inline.h> |
17 | #include <linux/shm.h> |
18 | #include <linux/mman.h> |
19 | #include <linux/pagemap.h> |
20 | #include <linux/swap.h> |
21 | #include <linux/syscalls.h> |
22 | #include <linux/capability.h> |
23 | #include <linux/init.h> |
24 | #include <linux/file.h> |
25 | #include <linux/fs.h> |
26 | #include <linux/personality.h> |
27 | #include <linux/security.h> |
28 | #include <linux/hugetlb.h> |
29 | #include <linux/shmem_fs.h> |
30 | #include <linux/profile.h> |
31 | #include <linux/export.h> |
32 | #include <linux/mount.h> |
33 | #include <linux/mempolicy.h> |
34 | #include <linux/rmap.h> |
35 | #include <linux/mmu_notifier.h> |
36 | #include <linux/mmdebug.h> |
37 | #include <linux/perf_event.h> |
38 | #include <linux/audit.h> |
39 | #include <linux/khugepaged.h> |
40 | #include <linux/uprobes.h> |
41 | #include <linux/notifier.h> |
42 | #include <linux/memory.h> |
43 | #include <linux/printk.h> |
44 | #include <linux/userfaultfd_k.h> |
45 | #include <linux/moduleparam.h> |
46 | #include <linux/pkeys.h> |
47 | #include <linux/oom.h> |
48 | #include <linux/sched/mm.h> |
49 | #include <linux/ksm.h> |
50 | |
51 | #include <linux/uaccess.h> |
52 | #include <asm/cacheflush.h> |
53 | #include <asm/tlb.h> |
54 | #include <asm/mmu_context.h> |
55 | |
56 | #define CREATE_TRACE_POINTS |
57 | #include <trace/events/mmap.h> |
58 | |
59 | #include "internal.h" |
60 | |
61 | #ifndef arch_mmap_check |
62 | #define arch_mmap_check(addr, len, flags) (0) |
63 | #endif |
64 | |
65 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS |
66 | const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; |
67 | const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; |
68 | int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; |
69 | #endif |
70 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS |
71 | const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; |
72 | const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; |
73 | int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; |
74 | #endif |
75 | |
76 | static bool ignore_rlimit_data; |
77 | core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); |
78 | |
79 | static void unmap_region(struct mm_struct *mm, struct ma_state *mas, |
80 | struct vm_area_struct *vma, struct vm_area_struct *prev, |
81 | struct vm_area_struct *next, unsigned long start, |
82 | unsigned long end, unsigned long tree_end, bool mm_wr_locked); |
83 | |
84 | static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) |
85 | { |
86 | return pgprot_modify(oldprot, newprot: vm_get_page_prot(vm_flags)); |
87 | } |
88 | |
89 | /* Update vma->vm_page_prot to reflect vma->vm_flags. */ |
90 | void vma_set_page_prot(struct vm_area_struct *vma) |
91 | { |
92 | unsigned long vm_flags = vma->vm_flags; |
93 | pgprot_t vm_page_prot; |
94 | |
95 | vm_page_prot = vm_pgprot_modify(oldprot: vma->vm_page_prot, vm_flags); |
96 | if (vma_wants_writenotify(vma, vm_page_prot)) { |
97 | vm_flags &= ~VM_SHARED; |
98 | vm_page_prot = vm_pgprot_modify(oldprot: vm_page_prot, vm_flags); |
99 | } |
100 | /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ |
101 | WRITE_ONCE(vma->vm_page_prot, vm_page_prot); |
102 | } |
103 | |
104 | /* |
105 | * Requires inode->i_mapping->i_mmap_rwsem |
106 | */ |
107 | static void __remove_shared_vm_struct(struct vm_area_struct *vma, |
108 | struct file *file, struct address_space *mapping) |
109 | { |
110 | if (vma_is_shared_maywrite(vma)) |
111 | mapping_unmap_writable(mapping); |
112 | |
113 | flush_dcache_mmap_lock(mapping); |
114 | vma_interval_tree_remove(node: vma, root: &mapping->i_mmap); |
115 | flush_dcache_mmap_unlock(mapping); |
116 | } |
117 | |
118 | /* |
119 | * Unlink a file-based vm structure from its interval tree, to hide |
120 | * vma from rmap and vmtruncate before freeing its page tables. |
121 | */ |
122 | void unlink_file_vma(struct vm_area_struct *vma) |
123 | { |
124 | struct file *file = vma->vm_file; |
125 | |
126 | if (file) { |
127 | struct address_space *mapping = file->f_mapping; |
128 | i_mmap_lock_write(mapping); |
129 | __remove_shared_vm_struct(vma, file, mapping); |
130 | i_mmap_unlock_write(mapping); |
131 | } |
132 | } |
133 | |
134 | /* |
135 | * Close a vm structure and free it. |
136 | */ |
137 | static void remove_vma(struct vm_area_struct *vma, bool unreachable) |
138 | { |
139 | might_sleep(); |
140 | if (vma->vm_ops && vma->vm_ops->close) |
141 | vma->vm_ops->close(vma); |
142 | if (vma->vm_file) |
143 | fput(vma->vm_file); |
144 | mpol_put(vma_policy(vma)); |
145 | if (unreachable) |
146 | __vm_area_free(vma); |
147 | else |
148 | vm_area_free(vma); |
149 | } |
150 | |
151 | static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, |
152 | unsigned long min) |
153 | { |
154 | return mas_prev(mas: &vmi->mas, min); |
155 | } |
156 | |
157 | /* |
158 | * check_brk_limits() - Use platform specific check of range & verify mlock |
159 | * limits. |
160 | * @addr: The address to check |
161 | * @len: The size of increase. |
162 | * |
163 | * Return: 0 on success. |
164 | */ |
165 | static int check_brk_limits(unsigned long addr, unsigned long len) |
166 | { |
167 | unsigned long mapped_addr; |
168 | |
169 | mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); |
170 | if (IS_ERR_VALUE(mapped_addr)) |
171 | return mapped_addr; |
172 | |
173 | return mlock_future_ok(current->mm, current->mm->def_flags, bytes: len) |
174 | ? 0 : -EAGAIN; |
175 | } |
176 | static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, |
177 | unsigned long addr, unsigned long request, unsigned long flags); |
178 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
179 | { |
180 | unsigned long newbrk, oldbrk, origbrk; |
181 | struct mm_struct *mm = current->mm; |
182 | struct vm_area_struct *brkvma, *next = NULL; |
183 | unsigned long min_brk; |
184 | bool populate = false; |
185 | LIST_HEAD(uf); |
186 | struct vma_iterator vmi; |
187 | |
188 | if (mmap_write_lock_killable(mm)) |
189 | return -EINTR; |
190 | |
191 | origbrk = mm->brk; |
192 | |
193 | #ifdef CONFIG_COMPAT_BRK |
194 | /* |
195 | * CONFIG_COMPAT_BRK can still be overridden by setting |
196 | * randomize_va_space to 2, which will still cause mm->start_brk |
197 | * to be arbitrarily shifted |
198 | */ |
199 | if (current->brk_randomized) |
200 | min_brk = mm->start_brk; |
201 | else |
202 | min_brk = mm->end_data; |
203 | #else |
204 | min_brk = mm->start_brk; |
205 | #endif |
206 | if (brk < min_brk) |
207 | goto out; |
208 | |
209 | /* |
210 | * Check against rlimit here. If this check is done later after the test |
211 | * of oldbrk with newbrk then it can escape the test and let the data |
212 | * segment grow beyond its set limit the in case where the limit is |
213 | * not page aligned -Ram Gupta |
214 | */ |
215 | if (check_data_rlimit(rlim: rlimit(RLIMIT_DATA), new: brk, start: mm->start_brk, |
216 | end_data: mm->end_data, start_data: mm->start_data)) |
217 | goto out; |
218 | |
219 | newbrk = PAGE_ALIGN(brk); |
220 | oldbrk = PAGE_ALIGN(mm->brk); |
221 | if (oldbrk == newbrk) { |
222 | mm->brk = brk; |
223 | goto success; |
224 | } |
225 | |
226 | /* Always allow shrinking brk. */ |
227 | if (brk <= mm->brk) { |
228 | /* Search one past newbrk */ |
229 | vma_iter_init(vmi: &vmi, mm, addr: newbrk); |
230 | brkvma = vma_find(vmi: &vmi, max: oldbrk); |
231 | if (!brkvma || brkvma->vm_start >= oldbrk) |
232 | goto out; /* mapping intersects with an existing non-brk vma. */ |
233 | /* |
234 | * mm->brk must be protected by write mmap_lock. |
235 | * do_vma_munmap() will drop the lock on success, so update it |
236 | * before calling do_vma_munmap(). |
237 | */ |
238 | mm->brk = brk; |
239 | if (do_vma_munmap(vmi: &vmi, vma: brkvma, start: newbrk, end: oldbrk, uf: &uf, unlock: true)) |
240 | goto out; |
241 | |
242 | goto success_unlocked; |
243 | } |
244 | |
245 | if (check_brk_limits(addr: oldbrk, len: newbrk - oldbrk)) |
246 | goto out; |
247 | |
248 | /* |
249 | * Only check if the next VMA is within the stack_guard_gap of the |
250 | * expansion area |
251 | */ |
252 | vma_iter_init(vmi: &vmi, mm, addr: oldbrk); |
253 | next = vma_find(vmi: &vmi, max: newbrk + PAGE_SIZE + stack_guard_gap); |
254 | if (next && newbrk + PAGE_SIZE > vm_start_gap(vma: next)) |
255 | goto out; |
256 | |
257 | brkvma = vma_prev_limit(vmi: &vmi, min: mm->start_brk); |
258 | /* Ok, looks good - let it rip. */ |
259 | if (do_brk_flags(vmi: &vmi, brkvma, addr: oldbrk, request: newbrk - oldbrk, flags: 0) < 0) |
260 | goto out; |
261 | |
262 | mm->brk = brk; |
263 | if (mm->def_flags & VM_LOCKED) |
264 | populate = true; |
265 | |
266 | success: |
267 | mmap_write_unlock(mm); |
268 | success_unlocked: |
269 | userfaultfd_unmap_complete(mm, uf: &uf); |
270 | if (populate) |
271 | mm_populate(addr: oldbrk, len: newbrk - oldbrk); |
272 | return brk; |
273 | |
274 | out: |
275 | mm->brk = origbrk; |
276 | mmap_write_unlock(mm); |
277 | return origbrk; |
278 | } |
279 | |
280 | #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) |
281 | static void validate_mm(struct mm_struct *mm) |
282 | { |
283 | int bug = 0; |
284 | int i = 0; |
285 | struct vm_area_struct *vma; |
286 | VMA_ITERATOR(vmi, mm, 0); |
287 | |
288 | mt_validate(mt: &mm->mm_mt); |
289 | for_each_vma(vmi, vma) { |
290 | #ifdef CONFIG_DEBUG_VM_RB |
291 | struct anon_vma *anon_vma = vma->anon_vma; |
292 | struct anon_vma_chain *avc; |
293 | #endif |
294 | unsigned long vmi_start, vmi_end; |
295 | bool warn = 0; |
296 | |
297 | vmi_start = vma_iter_addr(vmi: &vmi); |
298 | vmi_end = vma_iter_end(vmi: &vmi); |
299 | if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) |
300 | warn = 1; |
301 | |
302 | if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) |
303 | warn = 1; |
304 | |
305 | if (warn) { |
306 | pr_emerg("issue in %s\n" , current->comm); |
307 | dump_stack(); |
308 | dump_vma(vma); |
309 | pr_emerg("tree range: %px start %lx end %lx\n" , vma, |
310 | vmi_start, vmi_end - 1); |
311 | vma_iter_dump_tree(vmi: &vmi); |
312 | } |
313 | |
314 | #ifdef CONFIG_DEBUG_VM_RB |
315 | if (anon_vma) { |
316 | anon_vma_lock_read(anon_vma); |
317 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) |
318 | anon_vma_interval_tree_verify(node: avc); |
319 | anon_vma_unlock_read(anon_vma); |
320 | } |
321 | #endif |
322 | i++; |
323 | } |
324 | if (i != mm->map_count) { |
325 | pr_emerg("map_count %d vma iterator %d\n" , mm->map_count, i); |
326 | bug = 1; |
327 | } |
328 | VM_BUG_ON_MM(bug, mm); |
329 | } |
330 | |
331 | #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ |
332 | #define validate_mm(mm) do { } while (0) |
333 | #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ |
334 | |
335 | /* |
336 | * vma has some anon_vma assigned, and is already inserted on that |
337 | * anon_vma's interval trees. |
338 | * |
339 | * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the |
340 | * vma must be removed from the anon_vma's interval trees using |
341 | * anon_vma_interval_tree_pre_update_vma(). |
342 | * |
343 | * After the update, the vma will be reinserted using |
344 | * anon_vma_interval_tree_post_update_vma(). |
345 | * |
346 | * The entire update must be protected by exclusive mmap_lock and by |
347 | * the root anon_vma's mutex. |
348 | */ |
349 | static inline void |
350 | anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) |
351 | { |
352 | struct anon_vma_chain *avc; |
353 | |
354 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) |
355 | anon_vma_interval_tree_remove(node: avc, root: &avc->anon_vma->rb_root); |
356 | } |
357 | |
358 | static inline void |
359 | anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) |
360 | { |
361 | struct anon_vma_chain *avc; |
362 | |
363 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) |
364 | anon_vma_interval_tree_insert(node: avc, root: &avc->anon_vma->rb_root); |
365 | } |
366 | |
367 | static unsigned long count_vma_pages_range(struct mm_struct *mm, |
368 | unsigned long addr, unsigned long end) |
369 | { |
370 | VMA_ITERATOR(vmi, mm, addr); |
371 | struct vm_area_struct *vma; |
372 | unsigned long nr_pages = 0; |
373 | |
374 | for_each_vma_range(vmi, vma, end) { |
375 | unsigned long vm_start = max(addr, vma->vm_start); |
376 | unsigned long vm_end = min(end, vma->vm_end); |
377 | |
378 | nr_pages += PHYS_PFN(vm_end - vm_start); |
379 | } |
380 | |
381 | return nr_pages; |
382 | } |
383 | |
384 | static void __vma_link_file(struct vm_area_struct *vma, |
385 | struct address_space *mapping) |
386 | { |
387 | if (vma_is_shared_maywrite(vma)) |
388 | mapping_allow_writable(mapping); |
389 | |
390 | flush_dcache_mmap_lock(mapping); |
391 | vma_interval_tree_insert(node: vma, root: &mapping->i_mmap); |
392 | flush_dcache_mmap_unlock(mapping); |
393 | } |
394 | |
395 | static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) |
396 | { |
397 | VMA_ITERATOR(vmi, mm, 0); |
398 | struct address_space *mapping = NULL; |
399 | |
400 | vma_iter_config(vmi: &vmi, index: vma->vm_start, last: vma->vm_end); |
401 | if (vma_iter_prealloc(vmi: &vmi, vma)) |
402 | return -ENOMEM; |
403 | |
404 | vma_start_write(vma); |
405 | |
406 | vma_iter_store(vmi: &vmi, vma); |
407 | |
408 | if (vma->vm_file) { |
409 | mapping = vma->vm_file->f_mapping; |
410 | i_mmap_lock_write(mapping); |
411 | __vma_link_file(vma, mapping); |
412 | i_mmap_unlock_write(mapping); |
413 | } |
414 | |
415 | mm->map_count++; |
416 | validate_mm(mm); |
417 | return 0; |
418 | } |
419 | |
420 | /* |
421 | * init_multi_vma_prep() - Initializer for struct vma_prepare |
422 | * @vp: The vma_prepare struct |
423 | * @vma: The vma that will be altered once locked |
424 | * @next: The next vma if it is to be adjusted |
425 | * @remove: The first vma to be removed |
426 | * @remove2: The second vma to be removed |
427 | */ |
428 | static inline void init_multi_vma_prep(struct vma_prepare *vp, |
429 | struct vm_area_struct *vma, struct vm_area_struct *next, |
430 | struct vm_area_struct *remove, struct vm_area_struct *remove2) |
431 | { |
432 | memset(vp, 0, sizeof(struct vma_prepare)); |
433 | vp->vma = vma; |
434 | vp->anon_vma = vma->anon_vma; |
435 | vp->remove = remove; |
436 | vp->remove2 = remove2; |
437 | vp->adj_next = next; |
438 | if (!vp->anon_vma && next) |
439 | vp->anon_vma = next->anon_vma; |
440 | |
441 | vp->file = vma->vm_file; |
442 | if (vp->file) |
443 | vp->mapping = vma->vm_file->f_mapping; |
444 | |
445 | } |
446 | |
447 | /* |
448 | * init_vma_prep() - Initializer wrapper for vma_prepare struct |
449 | * @vp: The vma_prepare struct |
450 | * @vma: The vma that will be altered once locked |
451 | */ |
452 | static inline void init_vma_prep(struct vma_prepare *vp, |
453 | struct vm_area_struct *vma) |
454 | { |
455 | init_multi_vma_prep(vp, vma, NULL, NULL, NULL); |
456 | } |
457 | |
458 | |
459 | /* |
460 | * vma_prepare() - Helper function for handling locking VMAs prior to altering |
461 | * @vp: The initialized vma_prepare struct |
462 | */ |
463 | static inline void vma_prepare(struct vma_prepare *vp) |
464 | { |
465 | if (vp->file) { |
466 | uprobe_munmap(vma: vp->vma, start: vp->vma->vm_start, end: vp->vma->vm_end); |
467 | |
468 | if (vp->adj_next) |
469 | uprobe_munmap(vma: vp->adj_next, start: vp->adj_next->vm_start, |
470 | end: vp->adj_next->vm_end); |
471 | |
472 | i_mmap_lock_write(mapping: vp->mapping); |
473 | if (vp->insert && vp->insert->vm_file) { |
474 | /* |
475 | * Put into interval tree now, so instantiated pages |
476 | * are visible to arm/parisc __flush_dcache_page |
477 | * throughout; but we cannot insert into address |
478 | * space until vma start or end is updated. |
479 | */ |
480 | __vma_link_file(vma: vp->insert, |
481 | mapping: vp->insert->vm_file->f_mapping); |
482 | } |
483 | } |
484 | |
485 | if (vp->anon_vma) { |
486 | anon_vma_lock_write(anon_vma: vp->anon_vma); |
487 | anon_vma_interval_tree_pre_update_vma(vma: vp->vma); |
488 | if (vp->adj_next) |
489 | anon_vma_interval_tree_pre_update_vma(vma: vp->adj_next); |
490 | } |
491 | |
492 | if (vp->file) { |
493 | flush_dcache_mmap_lock(mapping: vp->mapping); |
494 | vma_interval_tree_remove(node: vp->vma, root: &vp->mapping->i_mmap); |
495 | if (vp->adj_next) |
496 | vma_interval_tree_remove(node: vp->adj_next, |
497 | root: &vp->mapping->i_mmap); |
498 | } |
499 | |
500 | } |
501 | |
502 | /* |
503 | * vma_complete- Helper function for handling the unlocking after altering VMAs, |
504 | * or for inserting a VMA. |
505 | * |
506 | * @vp: The vma_prepare struct |
507 | * @vmi: The vma iterator |
508 | * @mm: The mm_struct |
509 | */ |
510 | static inline void vma_complete(struct vma_prepare *vp, |
511 | struct vma_iterator *vmi, struct mm_struct *mm) |
512 | { |
513 | if (vp->file) { |
514 | if (vp->adj_next) |
515 | vma_interval_tree_insert(node: vp->adj_next, |
516 | root: &vp->mapping->i_mmap); |
517 | vma_interval_tree_insert(node: vp->vma, root: &vp->mapping->i_mmap); |
518 | flush_dcache_mmap_unlock(mapping: vp->mapping); |
519 | } |
520 | |
521 | if (vp->remove && vp->file) { |
522 | __remove_shared_vm_struct(vma: vp->remove, file: vp->file, mapping: vp->mapping); |
523 | if (vp->remove2) |
524 | __remove_shared_vm_struct(vma: vp->remove2, file: vp->file, |
525 | mapping: vp->mapping); |
526 | } else if (vp->insert) { |
527 | /* |
528 | * split_vma has split insert from vma, and needs |
529 | * us to insert it before dropping the locks |
530 | * (it may either follow vma or precede it). |
531 | */ |
532 | vma_iter_store(vmi, vma: vp->insert); |
533 | mm->map_count++; |
534 | } |
535 | |
536 | if (vp->anon_vma) { |
537 | anon_vma_interval_tree_post_update_vma(vma: vp->vma); |
538 | if (vp->adj_next) |
539 | anon_vma_interval_tree_post_update_vma(vma: vp->adj_next); |
540 | anon_vma_unlock_write(anon_vma: vp->anon_vma); |
541 | } |
542 | |
543 | if (vp->file) { |
544 | i_mmap_unlock_write(mapping: vp->mapping); |
545 | uprobe_mmap(vma: vp->vma); |
546 | |
547 | if (vp->adj_next) |
548 | uprobe_mmap(vma: vp->adj_next); |
549 | } |
550 | |
551 | if (vp->remove) { |
552 | again: |
553 | vma_mark_detached(vma: vp->remove, detached: true); |
554 | if (vp->file) { |
555 | uprobe_munmap(vma: vp->remove, start: vp->remove->vm_start, |
556 | end: vp->remove->vm_end); |
557 | fput(vp->file); |
558 | } |
559 | if (vp->remove->anon_vma) |
560 | anon_vma_merge(vma: vp->vma, next: vp->remove); |
561 | mm->map_count--; |
562 | mpol_put(vma_policy(vp->remove)); |
563 | if (!vp->remove2) |
564 | WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); |
565 | vm_area_free(vp->remove); |
566 | |
567 | /* |
568 | * In mprotect's case 6 (see comments on vma_merge), |
569 | * we are removing both mid and next vmas |
570 | */ |
571 | if (vp->remove2) { |
572 | vp->remove = vp->remove2; |
573 | vp->remove2 = NULL; |
574 | goto again; |
575 | } |
576 | } |
577 | if (vp->insert && vp->file) |
578 | uprobe_mmap(vma: vp->insert); |
579 | validate_mm(mm); |
580 | } |
581 | |
582 | /* |
583 | * dup_anon_vma() - Helper function to duplicate anon_vma |
584 | * @dst: The destination VMA |
585 | * @src: The source VMA |
586 | * @dup: Pointer to the destination VMA when successful. |
587 | * |
588 | * Returns: 0 on success. |
589 | */ |
590 | static inline int dup_anon_vma(struct vm_area_struct *dst, |
591 | struct vm_area_struct *src, struct vm_area_struct **dup) |
592 | { |
593 | /* |
594 | * Easily overlooked: when mprotect shifts the boundary, make sure the |
595 | * expanding vma has anon_vma set if the shrinking vma had, to cover any |
596 | * anon pages imported. |
597 | */ |
598 | if (src->anon_vma && !dst->anon_vma) { |
599 | int ret; |
600 | |
601 | vma_assert_write_locked(vma: dst); |
602 | dst->anon_vma = src->anon_vma; |
603 | ret = anon_vma_clone(dst, src); |
604 | if (ret) |
605 | return ret; |
606 | |
607 | *dup = dst; |
608 | } |
609 | |
610 | return 0; |
611 | } |
612 | |
613 | /* |
614 | * vma_expand - Expand an existing VMA |
615 | * |
616 | * @vmi: The vma iterator |
617 | * @vma: The vma to expand |
618 | * @start: The start of the vma |
619 | * @end: The exclusive end of the vma |
620 | * @pgoff: The page offset of vma |
621 | * @next: The current of next vma. |
622 | * |
623 | * Expand @vma to @start and @end. Can expand off the start and end. Will |
624 | * expand over @next if it's different from @vma and @end == @next->vm_end. |
625 | * Checking if the @vma can expand and merge with @next needs to be handled by |
626 | * the caller. |
627 | * |
628 | * Returns: 0 on success |
629 | */ |
630 | int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, |
631 | unsigned long start, unsigned long end, pgoff_t pgoff, |
632 | struct vm_area_struct *next) |
633 | { |
634 | struct vm_area_struct *anon_dup = NULL; |
635 | bool remove_next = false; |
636 | struct vma_prepare vp; |
637 | |
638 | vma_start_write(vma); |
639 | if (next && (vma != next) && (end == next->vm_end)) { |
640 | int ret; |
641 | |
642 | remove_next = true; |
643 | vma_start_write(vma: next); |
644 | ret = dup_anon_vma(dst: vma, src: next, dup: &anon_dup); |
645 | if (ret) |
646 | return ret; |
647 | } |
648 | |
649 | init_multi_vma_prep(vp: &vp, vma, NULL, remove: remove_next ? next : NULL, NULL); |
650 | /* Not merging but overwriting any part of next is not handled. */ |
651 | VM_WARN_ON(next && !vp.remove && |
652 | next != vma && end > next->vm_start); |
653 | /* Only handles expanding */ |
654 | VM_WARN_ON(vma->vm_start < start || vma->vm_end > end); |
655 | |
656 | /* Note: vma iterator must be pointing to 'start' */ |
657 | vma_iter_config(vmi, index: start, last: end); |
658 | if (vma_iter_prealloc(vmi, vma)) |
659 | goto nomem; |
660 | |
661 | vma_prepare(vp: &vp); |
662 | vma_adjust_trans_huge(vma, start, end, adjust_next: 0); |
663 | vma->vm_start = start; |
664 | vma->vm_end = end; |
665 | vma->vm_pgoff = pgoff; |
666 | vma_iter_store(vmi, vma); |
667 | |
668 | vma_complete(vp: &vp, vmi, mm: vma->vm_mm); |
669 | return 0; |
670 | |
671 | nomem: |
672 | if (anon_dup) |
673 | unlink_anon_vmas(anon_dup); |
674 | return -ENOMEM; |
675 | } |
676 | |
677 | /* |
678 | * vma_shrink() - Reduce an existing VMAs memory area |
679 | * @vmi: The vma iterator |
680 | * @vma: The VMA to modify |
681 | * @start: The new start |
682 | * @end: The new end |
683 | * |
684 | * Returns: 0 on success, -ENOMEM otherwise |
685 | */ |
686 | int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, |
687 | unsigned long start, unsigned long end, pgoff_t pgoff) |
688 | { |
689 | struct vma_prepare vp; |
690 | |
691 | WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); |
692 | |
693 | if (vma->vm_start < start) |
694 | vma_iter_config(vmi, index: vma->vm_start, last: start); |
695 | else |
696 | vma_iter_config(vmi, index: end, last: vma->vm_end); |
697 | |
698 | if (vma_iter_prealloc(vmi, NULL)) |
699 | return -ENOMEM; |
700 | |
701 | vma_start_write(vma); |
702 | |
703 | init_vma_prep(vp: &vp, vma); |
704 | vma_prepare(vp: &vp); |
705 | vma_adjust_trans_huge(vma, start, end, adjust_next: 0); |
706 | |
707 | vma_iter_clear(vmi); |
708 | vma->vm_start = start; |
709 | vma->vm_end = end; |
710 | vma->vm_pgoff = pgoff; |
711 | vma_complete(vp: &vp, vmi, mm: vma->vm_mm); |
712 | return 0; |
713 | } |
714 | |
715 | /* |
716 | * If the vma has a ->close operation then the driver probably needs to release |
717 | * per-vma resources, so we don't attempt to merge those if the caller indicates |
718 | * the current vma may be removed as part of the merge. |
719 | */ |
720 | static inline bool is_mergeable_vma(struct vm_area_struct *vma, |
721 | struct file *file, unsigned long vm_flags, |
722 | struct vm_userfaultfd_ctx vm_userfaultfd_ctx, |
723 | struct anon_vma_name *anon_name, bool may_remove_vma) |
724 | { |
725 | /* |
726 | * VM_SOFTDIRTY should not prevent from VMA merging, if we |
727 | * match the flags but dirty bit -- the caller should mark |
728 | * merged VMA as dirty. If dirty bit won't be excluded from |
729 | * comparison, we increase pressure on the memory system forcing |
730 | * the kernel to generate new VMAs when old one could be |
731 | * extended instead. |
732 | */ |
733 | if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) |
734 | return false; |
735 | if (vma->vm_file != file) |
736 | return false; |
737 | if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) |
738 | return false; |
739 | if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_ctx: vm_userfaultfd_ctx)) |
740 | return false; |
741 | if (!anon_vma_name_eq(anon_name1: anon_vma_name(vma), anon_name2: anon_name)) |
742 | return false; |
743 | return true; |
744 | } |
745 | |
746 | static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, |
747 | struct anon_vma *anon_vma2, struct vm_area_struct *vma) |
748 | { |
749 | /* |
750 | * The list_is_singular() test is to avoid merging VMA cloned from |
751 | * parents. This can improve scalability caused by anon_vma lock. |
752 | */ |
753 | if ((!anon_vma1 || !anon_vma2) && (!vma || |
754 | list_is_singular(head: &vma->anon_vma_chain))) |
755 | return true; |
756 | return anon_vma1 == anon_vma2; |
757 | } |
758 | |
759 | /* |
760 | * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) |
761 | * in front of (at a lower virtual address and file offset than) the vma. |
762 | * |
763 | * We cannot merge two vmas if they have differently assigned (non-NULL) |
764 | * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. |
765 | * |
766 | * We don't check here for the merged mmap wrapping around the end of pagecache |
767 | * indices (16TB on ia32) because do_mmap() does not permit mmap's which |
768 | * wrap, nor mmaps which cover the final page at index -1UL. |
769 | * |
770 | * We assume the vma may be removed as part of the merge. |
771 | */ |
772 | static bool |
773 | can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, |
774 | struct anon_vma *anon_vma, struct file *file, |
775 | pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, |
776 | struct anon_vma_name *anon_name) |
777 | { |
778 | if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, may_remove_vma: true) && |
779 | is_mergeable_anon_vma(anon_vma1: anon_vma, anon_vma2: vma->anon_vma, vma)) { |
780 | if (vma->vm_pgoff == vm_pgoff) |
781 | return true; |
782 | } |
783 | return false; |
784 | } |
785 | |
786 | /* |
787 | * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) |
788 | * beyond (at a higher virtual address and file offset than) the vma. |
789 | * |
790 | * We cannot merge two vmas if they have differently assigned (non-NULL) |
791 | * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. |
792 | * |
793 | * We assume that vma is not removed as part of the merge. |
794 | */ |
795 | static bool |
796 | can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, |
797 | struct anon_vma *anon_vma, struct file *file, |
798 | pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, |
799 | struct anon_vma_name *anon_name) |
800 | { |
801 | if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, may_remove_vma: false) && |
802 | is_mergeable_anon_vma(anon_vma1: anon_vma, anon_vma2: vma->anon_vma, vma)) { |
803 | pgoff_t vm_pglen; |
804 | vm_pglen = vma_pages(vma); |
805 | if (vma->vm_pgoff + vm_pglen == vm_pgoff) |
806 | return true; |
807 | } |
808 | return false; |
809 | } |
810 | |
811 | /* |
812 | * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), |
813 | * figure out whether that can be merged with its predecessor or its |
814 | * successor. Or both (it neatly fills a hole). |
815 | * |
816 | * In most cases - when called for mmap, brk or mremap - [addr,end) is |
817 | * certain not to be mapped by the time vma_merge is called; but when |
818 | * called for mprotect, it is certain to be already mapped (either at |
819 | * an offset within prev, or at the start of next), and the flags of |
820 | * this area are about to be changed to vm_flags - and the no-change |
821 | * case has already been eliminated. |
822 | * |
823 | * The following mprotect cases have to be considered, where **** is |
824 | * the area passed down from mprotect_fixup, never extending beyond one |
825 | * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts |
826 | * at the same address as **** and is of the same or larger span, and |
827 | * NNNN the next vma after ****: |
828 | * |
829 | * **** **** **** |
830 | * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC |
831 | * cannot merge might become might become |
832 | * PPNNNNNNNNNN PPPPPPPPPPCC |
833 | * mmap, brk or case 4 below case 5 below |
834 | * mremap move: |
835 | * **** **** |
836 | * PPPP NNNN PPPPCCCCNNNN |
837 | * might become might become |
838 | * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or |
839 | * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or |
840 | * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 |
841 | * |
842 | * It is important for case 8 that the vma CCCC overlapping the |
843 | * region **** is never going to extended over NNNN. Instead NNNN must |
844 | * be extended in region **** and CCCC must be removed. This way in |
845 | * all cases where vma_merge succeeds, the moment vma_merge drops the |
846 | * rmap_locks, the properties of the merged vma will be already |
847 | * correct for the whole merged range. Some of those properties like |
848 | * vm_page_prot/vm_flags may be accessed by rmap_walks and they must |
849 | * be correct for the whole merged range immediately after the |
850 | * rmap_locks are released. Otherwise if NNNN would be removed and |
851 | * CCCC would be extended over the NNNN range, remove_migration_ptes |
852 | * or other rmap walkers (if working on addresses beyond the "end" |
853 | * parameter) may establish ptes with the wrong permissions of CCCC |
854 | * instead of the right permissions of NNNN. |
855 | * |
856 | * In the code below: |
857 | * PPPP is represented by *prev |
858 | * CCCC is represented by *curr or not represented at all (NULL) |
859 | * NNNN is represented by *next or not represented at all (NULL) |
860 | * **** is not represented - it will be merged and the vma containing the |
861 | * area is returned, or the function will return NULL |
862 | */ |
863 | static struct vm_area_struct |
864 | *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, |
865 | struct vm_area_struct *prev, unsigned long addr, unsigned long end, |
866 | unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, |
867 | pgoff_t pgoff, struct mempolicy *policy, |
868 | struct vm_userfaultfd_ctx vm_userfaultfd_ctx, |
869 | struct anon_vma_name *anon_name) |
870 | { |
871 | struct vm_area_struct *curr, *next, *res; |
872 | struct vm_area_struct *vma, *adjust, *remove, *remove2; |
873 | struct vm_area_struct *anon_dup = NULL; |
874 | struct vma_prepare vp; |
875 | pgoff_t vma_pgoff; |
876 | int err = 0; |
877 | bool merge_prev = false; |
878 | bool merge_next = false; |
879 | bool vma_expanded = false; |
880 | unsigned long vma_start = addr; |
881 | unsigned long vma_end = end; |
882 | pgoff_t pglen = (end - addr) >> PAGE_SHIFT; |
883 | long adj_start = 0; |
884 | |
885 | /* |
886 | * We later require that vma->vm_flags == vm_flags, |
887 | * so this tests vma->vm_flags & VM_SPECIAL, too. |
888 | */ |
889 | if (vm_flags & VM_SPECIAL) |
890 | return NULL; |
891 | |
892 | /* Does the input range span an existing VMA? (cases 5 - 8) */ |
893 | curr = find_vma_intersection(mm, start_addr: prev ? prev->vm_end : 0, end_addr: end); |
894 | |
895 | if (!curr || /* cases 1 - 4 */ |
896 | end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ |
897 | next = vma_lookup(mm, addr: end); |
898 | else |
899 | next = NULL; /* case 5 */ |
900 | |
901 | if (prev) { |
902 | vma_start = prev->vm_start; |
903 | vma_pgoff = prev->vm_pgoff; |
904 | |
905 | /* Can we merge the predecessor? */ |
906 | if (addr == prev->vm_end && mpol_equal(vma_policy(prev), b: policy) |
907 | && can_vma_merge_after(vma: prev, vm_flags, anon_vma, file, |
908 | vm_pgoff: pgoff, vm_userfaultfd_ctx, anon_name)) { |
909 | merge_prev = true; |
910 | vma_prev(vmi); |
911 | } |
912 | } |
913 | |
914 | /* Can we merge the successor? */ |
915 | if (next && mpol_equal(a: policy, vma_policy(next)) && |
916 | can_vma_merge_before(vma: next, vm_flags, anon_vma, file, vm_pgoff: pgoff+pglen, |
917 | vm_userfaultfd_ctx, anon_name)) { |
918 | merge_next = true; |
919 | } |
920 | |
921 | /* Verify some invariant that must be enforced by the caller. */ |
922 | VM_WARN_ON(prev && addr <= prev->vm_start); |
923 | VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); |
924 | VM_WARN_ON(addr >= end); |
925 | |
926 | if (!merge_prev && !merge_next) |
927 | return NULL; /* Not mergeable. */ |
928 | |
929 | if (merge_prev) |
930 | vma_start_write(vma: prev); |
931 | |
932 | res = vma = prev; |
933 | remove = remove2 = adjust = NULL; |
934 | |
935 | /* Can we merge both the predecessor and the successor? */ |
936 | if (merge_prev && merge_next && |
937 | is_mergeable_anon_vma(anon_vma1: prev->anon_vma, anon_vma2: next->anon_vma, NULL)) { |
938 | vma_start_write(vma: next); |
939 | remove = next; /* case 1 */ |
940 | vma_end = next->vm_end; |
941 | err = dup_anon_vma(dst: prev, src: next, dup: &anon_dup); |
942 | if (curr) { /* case 6 */ |
943 | vma_start_write(vma: curr); |
944 | remove = curr; |
945 | remove2 = next; |
946 | /* |
947 | * Note that the dup_anon_vma below cannot overwrite err |
948 | * since the first caller would do nothing unless next |
949 | * has an anon_vma. |
950 | */ |
951 | if (!next->anon_vma) |
952 | err = dup_anon_vma(dst: prev, src: curr, dup: &anon_dup); |
953 | } |
954 | } else if (merge_prev) { /* case 2 */ |
955 | if (curr) { |
956 | vma_start_write(vma: curr); |
957 | err = dup_anon_vma(dst: prev, src: curr, dup: &anon_dup); |
958 | if (end == curr->vm_end) { /* case 7 */ |
959 | remove = curr; |
960 | } else { /* case 5 */ |
961 | adjust = curr; |
962 | adj_start = (end - curr->vm_start); |
963 | } |
964 | } |
965 | } else { /* merge_next */ |
966 | vma_start_write(vma: next); |
967 | res = next; |
968 | if (prev && addr < prev->vm_end) { /* case 4 */ |
969 | vma_start_write(vma: prev); |
970 | vma_end = addr; |
971 | adjust = next; |
972 | adj_start = -(prev->vm_end - addr); |
973 | err = dup_anon_vma(dst: next, src: prev, dup: &anon_dup); |
974 | } else { |
975 | /* |
976 | * Note that cases 3 and 8 are the ONLY ones where prev |
977 | * is permitted to be (but is not necessarily) NULL. |
978 | */ |
979 | vma = next; /* case 3 */ |
980 | vma_start = addr; |
981 | vma_end = next->vm_end; |
982 | vma_pgoff = next->vm_pgoff - pglen; |
983 | if (curr) { /* case 8 */ |
984 | vma_pgoff = curr->vm_pgoff; |
985 | vma_start_write(vma: curr); |
986 | remove = curr; |
987 | err = dup_anon_vma(dst: next, src: curr, dup: &anon_dup); |
988 | } |
989 | } |
990 | } |
991 | |
992 | /* Error in anon_vma clone. */ |
993 | if (err) |
994 | goto anon_vma_fail; |
995 | |
996 | if (vma_start < vma->vm_start || vma_end > vma->vm_end) |
997 | vma_expanded = true; |
998 | |
999 | if (vma_expanded) { |
1000 | vma_iter_config(vmi, index: vma_start, last: vma_end); |
1001 | } else { |
1002 | vma_iter_config(vmi, index: adjust->vm_start + adj_start, |
1003 | last: adjust->vm_end); |
1004 | } |
1005 | |
1006 | if (vma_iter_prealloc(vmi, vma)) |
1007 | goto prealloc_fail; |
1008 | |
1009 | init_multi_vma_prep(vp: &vp, vma, next: adjust, remove, remove2); |
1010 | VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && |
1011 | vp.anon_vma != adjust->anon_vma); |
1012 | |
1013 | vma_prepare(vp: &vp); |
1014 | vma_adjust_trans_huge(vma, start: vma_start, end: vma_end, adjust_next: adj_start); |
1015 | |
1016 | vma->vm_start = vma_start; |
1017 | vma->vm_end = vma_end; |
1018 | vma->vm_pgoff = vma_pgoff; |
1019 | |
1020 | if (vma_expanded) |
1021 | vma_iter_store(vmi, vma); |
1022 | |
1023 | if (adj_start) { |
1024 | adjust->vm_start += adj_start; |
1025 | adjust->vm_pgoff += adj_start >> PAGE_SHIFT; |
1026 | if (adj_start < 0) { |
1027 | WARN_ON(vma_expanded); |
1028 | vma_iter_store(vmi, vma: next); |
1029 | } |
1030 | } |
1031 | |
1032 | vma_complete(vp: &vp, vmi, mm); |
1033 | khugepaged_enter_vma(vma: res, vm_flags); |
1034 | return res; |
1035 | |
1036 | prealloc_fail: |
1037 | if (anon_dup) |
1038 | unlink_anon_vmas(anon_dup); |
1039 | |
1040 | anon_vma_fail: |
1041 | vma_iter_set(vmi, addr); |
1042 | vma_iter_load(vmi); |
1043 | return NULL; |
1044 | } |
1045 | |
1046 | /* |
1047 | * Rough compatibility check to quickly see if it's even worth looking |
1048 | * at sharing an anon_vma. |
1049 | * |
1050 | * They need to have the same vm_file, and the flags can only differ |
1051 | * in things that mprotect may change. |
1052 | * |
1053 | * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that |
1054 | * we can merge the two vma's. For example, we refuse to merge a vma if |
1055 | * there is a vm_ops->close() function, because that indicates that the |
1056 | * driver is doing some kind of reference counting. But that doesn't |
1057 | * really matter for the anon_vma sharing case. |
1058 | */ |
1059 | static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) |
1060 | { |
1061 | return a->vm_end == b->vm_start && |
1062 | mpol_equal(vma_policy(a), vma_policy(b)) && |
1063 | a->vm_file == b->vm_file && |
1064 | !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && |
1065 | b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); |
1066 | } |
1067 | |
1068 | /* |
1069 | * Do some basic sanity checking to see if we can re-use the anon_vma |
1070 | * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be |
1071 | * the same as 'old', the other will be the new one that is trying |
1072 | * to share the anon_vma. |
1073 | * |
1074 | * NOTE! This runs with mmap_lock held for reading, so it is possible that |
1075 | * the anon_vma of 'old' is concurrently in the process of being set up |
1076 | * by another page fault trying to merge _that_. But that's ok: if it |
1077 | * is being set up, that automatically means that it will be a singleton |
1078 | * acceptable for merging, so we can do all of this optimistically. But |
1079 | * we do that READ_ONCE() to make sure that we never re-load the pointer. |
1080 | * |
1081 | * IOW: that the "list_is_singular()" test on the anon_vma_chain only |
1082 | * matters for the 'stable anon_vma' case (ie the thing we want to avoid |
1083 | * is to return an anon_vma that is "complex" due to having gone through |
1084 | * a fork). |
1085 | * |
1086 | * We also make sure that the two vma's are compatible (adjacent, |
1087 | * and with the same memory policies). That's all stable, even with just |
1088 | * a read lock on the mmap_lock. |
1089 | */ |
1090 | static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) |
1091 | { |
1092 | if (anon_vma_compatible(a, b)) { |
1093 | struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); |
1094 | |
1095 | if (anon_vma && list_is_singular(head: &old->anon_vma_chain)) |
1096 | return anon_vma; |
1097 | } |
1098 | return NULL; |
1099 | } |
1100 | |
1101 | /* |
1102 | * find_mergeable_anon_vma is used by anon_vma_prepare, to check |
1103 | * neighbouring vmas for a suitable anon_vma, before it goes off |
1104 | * to allocate a new anon_vma. It checks because a repetitive |
1105 | * sequence of mprotects and faults may otherwise lead to distinct |
1106 | * anon_vmas being allocated, preventing vma merge in subsequent |
1107 | * mprotect. |
1108 | */ |
1109 | struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) |
1110 | { |
1111 | MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); |
1112 | struct anon_vma *anon_vma = NULL; |
1113 | struct vm_area_struct *prev, *next; |
1114 | |
1115 | /* Try next first. */ |
1116 | next = mas_walk(mas: &mas); |
1117 | if (next) { |
1118 | anon_vma = reusable_anon_vma(old: next, a: vma, b: next); |
1119 | if (anon_vma) |
1120 | return anon_vma; |
1121 | } |
1122 | |
1123 | prev = mas_prev(mas: &mas, min: 0); |
1124 | VM_BUG_ON_VMA(prev != vma, vma); |
1125 | prev = mas_prev(mas: &mas, min: 0); |
1126 | /* Try prev next. */ |
1127 | if (prev) |
1128 | anon_vma = reusable_anon_vma(old: prev, a: prev, b: vma); |
1129 | |
1130 | /* |
1131 | * We might reach here with anon_vma == NULL if we can't find |
1132 | * any reusable anon_vma. |
1133 | * There's no absolute need to look only at touching neighbours: |
1134 | * we could search further afield for "compatible" anon_vmas. |
1135 | * But it would probably just be a waste of time searching, |
1136 | * or lead to too many vmas hanging off the same anon_vma. |
1137 | * We're trying to allow mprotect remerging later on, |
1138 | * not trying to minimize memory used for anon_vmas. |
1139 | */ |
1140 | return anon_vma; |
1141 | } |
1142 | |
1143 | /* |
1144 | * If a hint addr is less than mmap_min_addr change hint to be as |
1145 | * low as possible but still greater than mmap_min_addr |
1146 | */ |
1147 | static inline unsigned long round_hint_to_min(unsigned long hint) |
1148 | { |
1149 | hint &= PAGE_MASK; |
1150 | if (((void *)hint != NULL) && |
1151 | (hint < mmap_min_addr)) |
1152 | return PAGE_ALIGN(mmap_min_addr); |
1153 | return hint; |
1154 | } |
1155 | |
1156 | bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, |
1157 | unsigned long bytes) |
1158 | { |
1159 | unsigned long locked_pages, limit_pages; |
1160 | |
1161 | if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) |
1162 | return true; |
1163 | |
1164 | locked_pages = bytes >> PAGE_SHIFT; |
1165 | locked_pages += mm->locked_vm; |
1166 | |
1167 | limit_pages = rlimit(RLIMIT_MEMLOCK); |
1168 | limit_pages >>= PAGE_SHIFT; |
1169 | |
1170 | return locked_pages <= limit_pages; |
1171 | } |
1172 | |
1173 | static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) |
1174 | { |
1175 | if (S_ISREG(inode->i_mode)) |
1176 | return MAX_LFS_FILESIZE; |
1177 | |
1178 | if (S_ISBLK(inode->i_mode)) |
1179 | return MAX_LFS_FILESIZE; |
1180 | |
1181 | if (S_ISSOCK(inode->i_mode)) |
1182 | return MAX_LFS_FILESIZE; |
1183 | |
1184 | /* Special "we do even unsigned file positions" case */ |
1185 | if (file->f_mode & FMODE_UNSIGNED_OFFSET) |
1186 | return 0; |
1187 | |
1188 | /* Yes, random drivers might want more. But I'm tired of buggy drivers */ |
1189 | return ULONG_MAX; |
1190 | } |
1191 | |
1192 | static inline bool file_mmap_ok(struct file *file, struct inode *inode, |
1193 | unsigned long pgoff, unsigned long len) |
1194 | { |
1195 | u64 maxsize = file_mmap_size_max(file, inode); |
1196 | |
1197 | if (maxsize && len > maxsize) |
1198 | return false; |
1199 | maxsize -= len; |
1200 | if (pgoff > maxsize >> PAGE_SHIFT) |
1201 | return false; |
1202 | return true; |
1203 | } |
1204 | |
1205 | /* |
1206 | * The caller must write-lock current->mm->mmap_lock. |
1207 | */ |
1208 | unsigned long do_mmap(struct file *file, unsigned long addr, |
1209 | unsigned long len, unsigned long prot, |
1210 | unsigned long flags, vm_flags_t vm_flags, |
1211 | unsigned long pgoff, unsigned long *populate, |
1212 | struct list_head *uf) |
1213 | { |
1214 | struct mm_struct *mm = current->mm; |
1215 | int pkey = 0; |
1216 | |
1217 | *populate = 0; |
1218 | |
1219 | if (!len) |
1220 | return -EINVAL; |
1221 | |
1222 | /* |
1223 | * Does the application expect PROT_READ to imply PROT_EXEC? |
1224 | * |
1225 | * (the exception is when the underlying filesystem is noexec |
1226 | * mounted, in which case we don't add PROT_EXEC.) |
1227 | */ |
1228 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
1229 | if (!(file && path_noexec(path: &file->f_path))) |
1230 | prot |= PROT_EXEC; |
1231 | |
1232 | /* force arch specific MAP_FIXED handling in get_unmapped_area */ |
1233 | if (flags & MAP_FIXED_NOREPLACE) |
1234 | flags |= MAP_FIXED; |
1235 | |
1236 | if (!(flags & MAP_FIXED)) |
1237 | addr = round_hint_to_min(hint: addr); |
1238 | |
1239 | /* Careful about overflows.. */ |
1240 | len = PAGE_ALIGN(len); |
1241 | if (!len) |
1242 | return -ENOMEM; |
1243 | |
1244 | /* offset overflow? */ |
1245 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) |
1246 | return -EOVERFLOW; |
1247 | |
1248 | /* Too many mappings? */ |
1249 | if (mm->map_count > sysctl_max_map_count) |
1250 | return -ENOMEM; |
1251 | |
1252 | /* Obtain the address to map to. we verify (or select) it and ensure |
1253 | * that it represents a valid section of the address space. |
1254 | */ |
1255 | addr = get_unmapped_area(file, addr, len, pgoff, flags); |
1256 | if (IS_ERR_VALUE(addr)) |
1257 | return addr; |
1258 | |
1259 | if (flags & MAP_FIXED_NOREPLACE) { |
1260 | if (find_vma_intersection(mm, start_addr: addr, end_addr: addr + len)) |
1261 | return -EEXIST; |
1262 | } |
1263 | |
1264 | if (prot == PROT_EXEC) { |
1265 | pkey = execute_only_pkey(mm); |
1266 | if (pkey < 0) |
1267 | pkey = 0; |
1268 | } |
1269 | |
1270 | /* Do simple checking here so the lower-level routines won't have |
1271 | * to. we assume access permissions have been handled by the open |
1272 | * of the memory object, so we don't do any here. |
1273 | */ |
1274 | vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | |
1275 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
1276 | |
1277 | if (flags & MAP_LOCKED) |
1278 | if (!can_do_mlock()) |
1279 | return -EPERM; |
1280 | |
1281 | if (!mlock_future_ok(mm, flags: vm_flags, bytes: len)) |
1282 | return -EAGAIN; |
1283 | |
1284 | if (file) { |
1285 | struct inode *inode = file_inode(f: file); |
1286 | unsigned long flags_mask; |
1287 | |
1288 | if (!file_mmap_ok(file, inode, pgoff, len)) |
1289 | return -EOVERFLOW; |
1290 | |
1291 | flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; |
1292 | |
1293 | switch (flags & MAP_TYPE) { |
1294 | case MAP_SHARED: |
1295 | /* |
1296 | * Force use of MAP_SHARED_VALIDATE with non-legacy |
1297 | * flags. E.g. MAP_SYNC is dangerous to use with |
1298 | * MAP_SHARED as you don't know which consistency model |
1299 | * you will get. We silently ignore unsupported flags |
1300 | * with MAP_SHARED to preserve backward compatibility. |
1301 | */ |
1302 | flags &= LEGACY_MAP_MASK; |
1303 | fallthrough; |
1304 | case MAP_SHARED_VALIDATE: |
1305 | if (flags & ~flags_mask) |
1306 | return -EOPNOTSUPP; |
1307 | if (prot & PROT_WRITE) { |
1308 | if (!(file->f_mode & FMODE_WRITE)) |
1309 | return -EACCES; |
1310 | if (IS_SWAPFILE(file->f_mapping->host)) |
1311 | return -ETXTBSY; |
1312 | } |
1313 | |
1314 | /* |
1315 | * Make sure we don't allow writing to an append-only |
1316 | * file.. |
1317 | */ |
1318 | if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) |
1319 | return -EACCES; |
1320 | |
1321 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
1322 | if (!(file->f_mode & FMODE_WRITE)) |
1323 | vm_flags &= ~(VM_MAYWRITE | VM_SHARED); |
1324 | fallthrough; |
1325 | case MAP_PRIVATE: |
1326 | if (!(file->f_mode & FMODE_READ)) |
1327 | return -EACCES; |
1328 | if (path_noexec(path: &file->f_path)) { |
1329 | if (vm_flags & VM_EXEC) |
1330 | return -EPERM; |
1331 | vm_flags &= ~VM_MAYEXEC; |
1332 | } |
1333 | |
1334 | if (!file->f_op->mmap) |
1335 | return -ENODEV; |
1336 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
1337 | return -EINVAL; |
1338 | break; |
1339 | |
1340 | default: |
1341 | return -EINVAL; |
1342 | } |
1343 | } else { |
1344 | switch (flags & MAP_TYPE) { |
1345 | case MAP_SHARED: |
1346 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
1347 | return -EINVAL; |
1348 | /* |
1349 | * Ignore pgoff. |
1350 | */ |
1351 | pgoff = 0; |
1352 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
1353 | break; |
1354 | case MAP_PRIVATE: |
1355 | /* |
1356 | * Set pgoff according to addr for anon_vma. |
1357 | */ |
1358 | pgoff = addr >> PAGE_SHIFT; |
1359 | break; |
1360 | default: |
1361 | return -EINVAL; |
1362 | } |
1363 | } |
1364 | |
1365 | /* |
1366 | * Set 'VM_NORESERVE' if we should not account for the |
1367 | * memory use of this mapping. |
1368 | */ |
1369 | if (flags & MAP_NORESERVE) { |
1370 | /* We honor MAP_NORESERVE if allowed to overcommit */ |
1371 | if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) |
1372 | vm_flags |= VM_NORESERVE; |
1373 | |
1374 | /* hugetlb applies strict overcommit unless MAP_NORESERVE */ |
1375 | if (file && is_file_hugepages(file)) |
1376 | vm_flags |= VM_NORESERVE; |
1377 | } |
1378 | |
1379 | addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); |
1380 | if (!IS_ERR_VALUE(addr) && |
1381 | ((vm_flags & VM_LOCKED) || |
1382 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) |
1383 | *populate = len; |
1384 | return addr; |
1385 | } |
1386 | |
1387 | unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, |
1388 | unsigned long prot, unsigned long flags, |
1389 | unsigned long fd, unsigned long pgoff) |
1390 | { |
1391 | struct file *file = NULL; |
1392 | unsigned long retval; |
1393 | |
1394 | if (!(flags & MAP_ANONYMOUS)) { |
1395 | audit_mmap_fd(fd, flags); |
1396 | file = fget(fd); |
1397 | if (!file) |
1398 | return -EBADF; |
1399 | if (is_file_hugepages(file)) { |
1400 | len = ALIGN(len, huge_page_size(hstate_file(file))); |
1401 | } else if (unlikely(flags & MAP_HUGETLB)) { |
1402 | retval = -EINVAL; |
1403 | goto out_fput; |
1404 | } |
1405 | } else if (flags & MAP_HUGETLB) { |
1406 | struct hstate *hs; |
1407 | |
1408 | hs = hstate_sizelog(page_size_log: (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
1409 | if (!hs) |
1410 | return -EINVAL; |
1411 | |
1412 | len = ALIGN(len, huge_page_size(hs)); |
1413 | /* |
1414 | * VM_NORESERVE is used because the reservations will be |
1415 | * taken when vm_ops->mmap() is called |
1416 | */ |
1417 | file = hugetlb_file_setup(HUGETLB_ANON_FILE, size: len, |
1418 | VM_NORESERVE, |
1419 | creat_flags: HUGETLB_ANONHUGE_INODE, |
1420 | page_size_log: (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
1421 | if (IS_ERR(ptr: file)) |
1422 | return PTR_ERR(ptr: file); |
1423 | } |
1424 | |
1425 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
1426 | out_fput: |
1427 | if (file) |
1428 | fput(file); |
1429 | return retval; |
1430 | } |
1431 | |
1432 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
1433 | unsigned long, prot, unsigned long, flags, |
1434 | unsigned long, fd, unsigned long, pgoff) |
1435 | { |
1436 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); |
1437 | } |
1438 | |
1439 | #ifdef __ARCH_WANT_SYS_OLD_MMAP |
1440 | struct mmap_arg_struct { |
1441 | unsigned long addr; |
1442 | unsigned long len; |
1443 | unsigned long prot; |
1444 | unsigned long flags; |
1445 | unsigned long fd; |
1446 | unsigned long offset; |
1447 | }; |
1448 | |
1449 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) |
1450 | { |
1451 | struct mmap_arg_struct a; |
1452 | |
1453 | if (copy_from_user(&a, arg, sizeof(a))) |
1454 | return -EFAULT; |
1455 | if (offset_in_page(a.offset)) |
1456 | return -EINVAL; |
1457 | |
1458 | return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, |
1459 | a.offset >> PAGE_SHIFT); |
1460 | } |
1461 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ |
1462 | |
1463 | static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) |
1464 | { |
1465 | return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); |
1466 | } |
1467 | |
1468 | static bool vma_is_shared_writable(struct vm_area_struct *vma) |
1469 | { |
1470 | return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == |
1471 | (VM_WRITE | VM_SHARED); |
1472 | } |
1473 | |
1474 | static bool vma_fs_can_writeback(struct vm_area_struct *vma) |
1475 | { |
1476 | /* No managed pages to writeback. */ |
1477 | if (vma->vm_flags & VM_PFNMAP) |
1478 | return false; |
1479 | |
1480 | return vma->vm_file && vma->vm_file->f_mapping && |
1481 | mapping_can_writeback(mapping: vma->vm_file->f_mapping); |
1482 | } |
1483 | |
1484 | /* |
1485 | * Does this VMA require the underlying folios to have their dirty state |
1486 | * tracked? |
1487 | */ |
1488 | bool vma_needs_dirty_tracking(struct vm_area_struct *vma) |
1489 | { |
1490 | /* Only shared, writable VMAs require dirty tracking. */ |
1491 | if (!vma_is_shared_writable(vma)) |
1492 | return false; |
1493 | |
1494 | /* Does the filesystem need to be notified? */ |
1495 | if (vm_ops_needs_writenotify(vm_ops: vma->vm_ops)) |
1496 | return true; |
1497 | |
1498 | /* |
1499 | * Even if the filesystem doesn't indicate a need for writenotify, if it |
1500 | * can writeback, dirty tracking is still required. |
1501 | */ |
1502 | return vma_fs_can_writeback(vma); |
1503 | } |
1504 | |
1505 | /* |
1506 | * Some shared mappings will want the pages marked read-only |
1507 | * to track write events. If so, we'll downgrade vm_page_prot |
1508 | * to the private version (using protection_map[] without the |
1509 | * VM_SHARED bit). |
1510 | */ |
1511 | int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) |
1512 | { |
1513 | /* If it was private or non-writable, the write bit is already clear */ |
1514 | if (!vma_is_shared_writable(vma)) |
1515 | return 0; |
1516 | |
1517 | /* The backer wishes to know when pages are first written to? */ |
1518 | if (vm_ops_needs_writenotify(vm_ops: vma->vm_ops)) |
1519 | return 1; |
1520 | |
1521 | /* The open routine did something to the protections that pgprot_modify |
1522 | * won't preserve? */ |
1523 | if (pgprot_val(vm_page_prot) != |
1524 | pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) |
1525 | return 0; |
1526 | |
1527 | /* |
1528 | * Do we need to track softdirty? hugetlb does not support softdirty |
1529 | * tracking yet. |
1530 | */ |
1531 | if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) |
1532 | return 1; |
1533 | |
1534 | /* Do we need write faults for uffd-wp tracking? */ |
1535 | if (userfaultfd_wp(vma)) |
1536 | return 1; |
1537 | |
1538 | /* Can the mapping track the dirty pages? */ |
1539 | return vma_fs_can_writeback(vma); |
1540 | } |
1541 | |
1542 | /* |
1543 | * We account for memory if it's a private writeable mapping, |
1544 | * not hugepages and VM_NORESERVE wasn't set. |
1545 | */ |
1546 | static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) |
1547 | { |
1548 | /* |
1549 | * hugetlb has its own accounting separate from the core VM |
1550 | * VM_HUGETLB may not be set yet so we cannot check for that flag. |
1551 | */ |
1552 | if (file && is_file_hugepages(file)) |
1553 | return 0; |
1554 | |
1555 | return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; |
1556 | } |
1557 | |
1558 | /** |
1559 | * unmapped_area() - Find an area between the low_limit and the high_limit with |
1560 | * the correct alignment and offset, all from @info. Note: current->mm is used |
1561 | * for the search. |
1562 | * |
1563 | * @info: The unmapped area information including the range [low_limit - |
1564 | * high_limit), the alignment offset and mask. |
1565 | * |
1566 | * Return: A memory address or -ENOMEM. |
1567 | */ |
1568 | static unsigned long unmapped_area(struct vm_unmapped_area_info *info) |
1569 | { |
1570 | unsigned long length, gap; |
1571 | unsigned long low_limit, high_limit; |
1572 | struct vm_area_struct *tmp; |
1573 | |
1574 | MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); |
1575 | |
1576 | /* Adjust search length to account for worst case alignment overhead */ |
1577 | length = info->length + info->align_mask; |
1578 | if (length < info->length) |
1579 | return -ENOMEM; |
1580 | |
1581 | low_limit = info->low_limit; |
1582 | if (low_limit < mmap_min_addr) |
1583 | low_limit = mmap_min_addr; |
1584 | high_limit = info->high_limit; |
1585 | retry: |
1586 | if (mas_empty_area(mas: &mas, min: low_limit, max: high_limit - 1, size: length)) |
1587 | return -ENOMEM; |
1588 | |
1589 | gap = mas.index; |
1590 | gap += (info->align_offset - gap) & info->align_mask; |
1591 | tmp = mas_next(mas: &mas, ULONG_MAX); |
1592 | if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ |
1593 | if (vm_start_gap(vma: tmp) < gap + length - 1) { |
1594 | low_limit = tmp->vm_end; |
1595 | mas_reset(mas: &mas); |
1596 | goto retry; |
1597 | } |
1598 | } else { |
1599 | tmp = mas_prev(mas: &mas, min: 0); |
1600 | if (tmp && vm_end_gap(vma: tmp) > gap) { |
1601 | low_limit = vm_end_gap(vma: tmp); |
1602 | mas_reset(mas: &mas); |
1603 | goto retry; |
1604 | } |
1605 | } |
1606 | |
1607 | return gap; |
1608 | } |
1609 | |
1610 | /** |
1611 | * unmapped_area_topdown() - Find an area between the low_limit and the |
1612 | * high_limit with the correct alignment and offset at the highest available |
1613 | * address, all from @info. Note: current->mm is used for the search. |
1614 | * |
1615 | * @info: The unmapped area information including the range [low_limit - |
1616 | * high_limit), the alignment offset and mask. |
1617 | * |
1618 | * Return: A memory address or -ENOMEM. |
1619 | */ |
1620 | static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) |
1621 | { |
1622 | unsigned long length, gap, gap_end; |
1623 | unsigned long low_limit, high_limit; |
1624 | struct vm_area_struct *tmp; |
1625 | |
1626 | MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); |
1627 | /* Adjust search length to account for worst case alignment overhead */ |
1628 | length = info->length + info->align_mask; |
1629 | if (length < info->length) |
1630 | return -ENOMEM; |
1631 | |
1632 | low_limit = info->low_limit; |
1633 | if (low_limit < mmap_min_addr) |
1634 | low_limit = mmap_min_addr; |
1635 | high_limit = info->high_limit; |
1636 | retry: |
1637 | if (mas_empty_area_rev(mas: &mas, min: low_limit, max: high_limit - 1, size: length)) |
1638 | return -ENOMEM; |
1639 | |
1640 | gap = mas.last + 1 - info->length; |
1641 | gap -= (gap - info->align_offset) & info->align_mask; |
1642 | gap_end = mas.last; |
1643 | tmp = mas_next(mas: &mas, ULONG_MAX); |
1644 | if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ |
1645 | if (vm_start_gap(vma: tmp) <= gap_end) { |
1646 | high_limit = vm_start_gap(vma: tmp); |
1647 | mas_reset(mas: &mas); |
1648 | goto retry; |
1649 | } |
1650 | } else { |
1651 | tmp = mas_prev(mas: &mas, min: 0); |
1652 | if (tmp && vm_end_gap(vma: tmp) > gap) { |
1653 | high_limit = tmp->vm_start; |
1654 | mas_reset(mas: &mas); |
1655 | goto retry; |
1656 | } |
1657 | } |
1658 | |
1659 | return gap; |
1660 | } |
1661 | |
1662 | /* |
1663 | * Search for an unmapped address range. |
1664 | * |
1665 | * We are looking for a range that: |
1666 | * - does not intersect with any VMA; |
1667 | * - is contained within the [low_limit, high_limit) interval; |
1668 | * - is at least the desired size. |
1669 | * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) |
1670 | */ |
1671 | unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) |
1672 | { |
1673 | unsigned long addr; |
1674 | |
1675 | if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) |
1676 | addr = unmapped_area_topdown(info); |
1677 | else |
1678 | addr = unmapped_area(info); |
1679 | |
1680 | trace_vm_unmapped_area(addr, info); |
1681 | return addr; |
1682 | } |
1683 | |
1684 | /* Get an address range which is currently unmapped. |
1685 | * For shmat() with addr=0. |
1686 | * |
1687 | * Ugly calling convention alert: |
1688 | * Return value with the low bits set means error value, |
1689 | * ie |
1690 | * if (ret & ~PAGE_MASK) |
1691 | * error = ret; |
1692 | * |
1693 | * This function "knows" that -ENOMEM has the bits set. |
1694 | */ |
1695 | unsigned long |
1696 | generic_get_unmapped_area(struct file *filp, unsigned long addr, |
1697 | unsigned long len, unsigned long pgoff, |
1698 | unsigned long flags) |
1699 | { |
1700 | struct mm_struct *mm = current->mm; |
1701 | struct vm_area_struct *vma, *prev; |
1702 | struct vm_unmapped_area_info info; |
1703 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1704 | |
1705 | if (len > mmap_end - mmap_min_addr) |
1706 | return -ENOMEM; |
1707 | |
1708 | if (flags & MAP_FIXED) |
1709 | return addr; |
1710 | |
1711 | if (addr) { |
1712 | addr = PAGE_ALIGN(addr); |
1713 | vma = find_vma_prev(mm, addr, pprev: &prev); |
1714 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
1715 | (!vma || addr + len <= vm_start_gap(vma)) && |
1716 | (!prev || addr >= vm_end_gap(vma: prev))) |
1717 | return addr; |
1718 | } |
1719 | |
1720 | info.flags = 0; |
1721 | info.length = len; |
1722 | info.low_limit = mm->mmap_base; |
1723 | info.high_limit = mmap_end; |
1724 | info.align_mask = 0; |
1725 | info.align_offset = 0; |
1726 | return vm_unmapped_area(info: &info); |
1727 | } |
1728 | |
1729 | #ifndef HAVE_ARCH_UNMAPPED_AREA |
1730 | unsigned long |
1731 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
1732 | unsigned long len, unsigned long pgoff, |
1733 | unsigned long flags) |
1734 | { |
1735 | return generic_get_unmapped_area(filp, addr, len, pgoff, flags); |
1736 | } |
1737 | #endif |
1738 | |
1739 | /* |
1740 | * This mmap-allocator allocates new areas top-down from below the |
1741 | * stack's low limit (the base): |
1742 | */ |
1743 | unsigned long |
1744 | generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
1745 | unsigned long len, unsigned long pgoff, |
1746 | unsigned long flags) |
1747 | { |
1748 | struct vm_area_struct *vma, *prev; |
1749 | struct mm_struct *mm = current->mm; |
1750 | struct vm_unmapped_area_info info; |
1751 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1752 | |
1753 | /* requested length too big for entire address space */ |
1754 | if (len > mmap_end - mmap_min_addr) |
1755 | return -ENOMEM; |
1756 | |
1757 | if (flags & MAP_FIXED) |
1758 | return addr; |
1759 | |
1760 | /* requesting a specific address */ |
1761 | if (addr) { |
1762 | addr = PAGE_ALIGN(addr); |
1763 | vma = find_vma_prev(mm, addr, pprev: &prev); |
1764 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
1765 | (!vma || addr + len <= vm_start_gap(vma)) && |
1766 | (!prev || addr >= vm_end_gap(vma: prev))) |
1767 | return addr; |
1768 | } |
1769 | |
1770 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
1771 | info.length = len; |
1772 | info.low_limit = PAGE_SIZE; |
1773 | info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); |
1774 | info.align_mask = 0; |
1775 | info.align_offset = 0; |
1776 | addr = vm_unmapped_area(info: &info); |
1777 | |
1778 | /* |
1779 | * A failed mmap() very likely causes application failure, |
1780 | * so fall back to the bottom-up function here. This scenario |
1781 | * can happen with large stack limits and large mmap() |
1782 | * allocations. |
1783 | */ |
1784 | if (offset_in_page(addr)) { |
1785 | VM_BUG_ON(addr != -ENOMEM); |
1786 | info.flags = 0; |
1787 | info.low_limit = TASK_UNMAPPED_BASE; |
1788 | info.high_limit = mmap_end; |
1789 | addr = vm_unmapped_area(info: &info); |
1790 | } |
1791 | |
1792 | return addr; |
1793 | } |
1794 | |
1795 | #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
1796 | unsigned long |
1797 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
1798 | unsigned long len, unsigned long pgoff, |
1799 | unsigned long flags) |
1800 | { |
1801 | return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); |
1802 | } |
1803 | #endif |
1804 | |
1805 | unsigned long |
1806 | get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, |
1807 | unsigned long pgoff, unsigned long flags) |
1808 | { |
1809 | unsigned long (*get_area)(struct file *, unsigned long, |
1810 | unsigned long, unsigned long, unsigned long); |
1811 | |
1812 | unsigned long error = arch_mmap_check(addr, len, flags); |
1813 | if (error) |
1814 | return error; |
1815 | |
1816 | /* Careful about overflows.. */ |
1817 | if (len > TASK_SIZE) |
1818 | return -ENOMEM; |
1819 | |
1820 | get_area = current->mm->get_unmapped_area; |
1821 | if (file) { |
1822 | if (file->f_op->get_unmapped_area) |
1823 | get_area = file->f_op->get_unmapped_area; |
1824 | } else if (flags & MAP_SHARED) { |
1825 | /* |
1826 | * mmap_region() will call shmem_zero_setup() to create a file, |
1827 | * so use shmem's get_unmapped_area in case it can be huge. |
1828 | * do_mmap() will clear pgoff, so match alignment. |
1829 | */ |
1830 | pgoff = 0; |
1831 | get_area = shmem_get_unmapped_area; |
1832 | } |
1833 | |
1834 | addr = get_area(file, addr, len, pgoff, flags); |
1835 | if (IS_ERR_VALUE(addr)) |
1836 | return addr; |
1837 | |
1838 | if (addr > TASK_SIZE - len) |
1839 | return -ENOMEM; |
1840 | if (offset_in_page(addr)) |
1841 | return -EINVAL; |
1842 | |
1843 | error = security_mmap_addr(addr); |
1844 | return error ? error : addr; |
1845 | } |
1846 | |
1847 | EXPORT_SYMBOL(get_unmapped_area); |
1848 | |
1849 | /** |
1850 | * find_vma_intersection() - Look up the first VMA which intersects the interval |
1851 | * @mm: The process address space. |
1852 | * @start_addr: The inclusive start user address. |
1853 | * @end_addr: The exclusive end user address. |
1854 | * |
1855 | * Returns: The first VMA within the provided range, %NULL otherwise. Assumes |
1856 | * start_addr < end_addr. |
1857 | */ |
1858 | struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, |
1859 | unsigned long start_addr, |
1860 | unsigned long end_addr) |
1861 | { |
1862 | unsigned long index = start_addr; |
1863 | |
1864 | mmap_assert_locked(mm); |
1865 | return mt_find(mt: &mm->mm_mt, index: &index, max: end_addr - 1); |
1866 | } |
1867 | EXPORT_SYMBOL(find_vma_intersection); |
1868 | |
1869 | /** |
1870 | * find_vma() - Find the VMA for a given address, or the next VMA. |
1871 | * @mm: The mm_struct to check |
1872 | * @addr: The address |
1873 | * |
1874 | * Returns: The VMA associated with addr, or the next VMA. |
1875 | * May return %NULL in the case of no VMA at addr or above. |
1876 | */ |
1877 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
1878 | { |
1879 | unsigned long index = addr; |
1880 | |
1881 | mmap_assert_locked(mm); |
1882 | return mt_find(mt: &mm->mm_mt, index: &index, ULONG_MAX); |
1883 | } |
1884 | EXPORT_SYMBOL(find_vma); |
1885 | |
1886 | /** |
1887 | * find_vma_prev() - Find the VMA for a given address, or the next vma and |
1888 | * set %pprev to the previous VMA, if any. |
1889 | * @mm: The mm_struct to check |
1890 | * @addr: The address |
1891 | * @pprev: The pointer to set to the previous VMA |
1892 | * |
1893 | * Note that RCU lock is missing here since the external mmap_lock() is used |
1894 | * instead. |
1895 | * |
1896 | * Returns: The VMA associated with @addr, or the next vma. |
1897 | * May return %NULL in the case of no vma at addr or above. |
1898 | */ |
1899 | struct vm_area_struct * |
1900 | find_vma_prev(struct mm_struct *mm, unsigned long addr, |
1901 | struct vm_area_struct **pprev) |
1902 | { |
1903 | struct vm_area_struct *vma; |
1904 | MA_STATE(mas, &mm->mm_mt, addr, addr); |
1905 | |
1906 | vma = mas_walk(mas: &mas); |
1907 | *pprev = mas_prev(mas: &mas, min: 0); |
1908 | if (!vma) |
1909 | vma = mas_next(mas: &mas, ULONG_MAX); |
1910 | return vma; |
1911 | } |
1912 | |
1913 | /* |
1914 | * Verify that the stack growth is acceptable and |
1915 | * update accounting. This is shared with both the |
1916 | * grow-up and grow-down cases. |
1917 | */ |
1918 | static int acct_stack_growth(struct vm_area_struct *vma, |
1919 | unsigned long size, unsigned long grow) |
1920 | { |
1921 | struct mm_struct *mm = vma->vm_mm; |
1922 | unsigned long new_start; |
1923 | |
1924 | /* address space limit tests */ |
1925 | if (!may_expand_vm(mm, vma->vm_flags, npages: grow)) |
1926 | return -ENOMEM; |
1927 | |
1928 | /* Stack limit test */ |
1929 | if (size > rlimit(RLIMIT_STACK)) |
1930 | return -ENOMEM; |
1931 | |
1932 | /* mlock limit tests */ |
1933 | if (!mlock_future_ok(mm, flags: vma->vm_flags, bytes: grow << PAGE_SHIFT)) |
1934 | return -ENOMEM; |
1935 | |
1936 | /* Check to ensure the stack will not grow into a hugetlb-only region */ |
1937 | new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : |
1938 | vma->vm_end - size; |
1939 | if (is_hugepage_only_range(mm: vma->vm_mm, addr: new_start, len: size)) |
1940 | return -EFAULT; |
1941 | |
1942 | /* |
1943 | * Overcommit.. This must be the final test, as it will |
1944 | * update security statistics. |
1945 | */ |
1946 | if (security_vm_enough_memory_mm(mm, pages: grow)) |
1947 | return -ENOMEM; |
1948 | |
1949 | return 0; |
1950 | } |
1951 | |
1952 | #if defined(CONFIG_STACK_GROWSUP) |
1953 | /* |
1954 | * PA-RISC uses this for its stack. |
1955 | * vma is the last one with address > vma->vm_end. Have to extend vma. |
1956 | */ |
1957 | static int expand_upwards(struct vm_area_struct *vma, unsigned long address) |
1958 | { |
1959 | struct mm_struct *mm = vma->vm_mm; |
1960 | struct vm_area_struct *next; |
1961 | unsigned long gap_addr; |
1962 | int error = 0; |
1963 | MA_STATE(mas, &mm->mm_mt, vma->vm_start, address); |
1964 | |
1965 | if (!(vma->vm_flags & VM_GROWSUP)) |
1966 | return -EFAULT; |
1967 | |
1968 | /* Guard against exceeding limits of the address space. */ |
1969 | address &= PAGE_MASK; |
1970 | if (address >= (TASK_SIZE & PAGE_MASK)) |
1971 | return -ENOMEM; |
1972 | address += PAGE_SIZE; |
1973 | |
1974 | /* Enforce stack_guard_gap */ |
1975 | gap_addr = address + stack_guard_gap; |
1976 | |
1977 | /* Guard against overflow */ |
1978 | if (gap_addr < address || gap_addr > TASK_SIZE) |
1979 | gap_addr = TASK_SIZE; |
1980 | |
1981 | next = find_vma_intersection(mm, vma->vm_end, gap_addr); |
1982 | if (next && vma_is_accessible(next)) { |
1983 | if (!(next->vm_flags & VM_GROWSUP)) |
1984 | return -ENOMEM; |
1985 | /* Check that both stack segments have the same anon_vma? */ |
1986 | } |
1987 | |
1988 | if (next) |
1989 | mas_prev_range(&mas, address); |
1990 | |
1991 | __mas_set_range(&mas, vma->vm_start, address - 1); |
1992 | if (mas_preallocate(&mas, vma, GFP_KERNEL)) |
1993 | return -ENOMEM; |
1994 | |
1995 | /* We must make sure the anon_vma is allocated. */ |
1996 | if (unlikely(anon_vma_prepare(vma))) { |
1997 | mas_destroy(&mas); |
1998 | return -ENOMEM; |
1999 | } |
2000 | |
2001 | /* Lock the VMA before expanding to prevent concurrent page faults */ |
2002 | vma_start_write(vma); |
2003 | /* |
2004 | * vma->vm_start/vm_end cannot change under us because the caller |
2005 | * is required to hold the mmap_lock in read mode. We need the |
2006 | * anon_vma lock to serialize against concurrent expand_stacks. |
2007 | */ |
2008 | anon_vma_lock_write(vma->anon_vma); |
2009 | |
2010 | /* Somebody else might have raced and expanded it already */ |
2011 | if (address > vma->vm_end) { |
2012 | unsigned long size, grow; |
2013 | |
2014 | size = address - vma->vm_start; |
2015 | grow = (address - vma->vm_end) >> PAGE_SHIFT; |
2016 | |
2017 | error = -ENOMEM; |
2018 | if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { |
2019 | error = acct_stack_growth(vma, size, grow); |
2020 | if (!error) { |
2021 | /* |
2022 | * We only hold a shared mmap_lock lock here, so |
2023 | * we need to protect against concurrent vma |
2024 | * expansions. anon_vma_lock_write() doesn't |
2025 | * help here, as we don't guarantee that all |
2026 | * growable vmas in a mm share the same root |
2027 | * anon vma. So, we reuse mm->page_table_lock |
2028 | * to guard against concurrent vma expansions. |
2029 | */ |
2030 | spin_lock(&mm->page_table_lock); |
2031 | if (vma->vm_flags & VM_LOCKED) |
2032 | mm->locked_vm += grow; |
2033 | vm_stat_account(mm, vma->vm_flags, grow); |
2034 | anon_vma_interval_tree_pre_update_vma(vma); |
2035 | vma->vm_end = address; |
2036 | /* Overwrite old entry in mtree. */ |
2037 | mas_store_prealloc(&mas, vma); |
2038 | anon_vma_interval_tree_post_update_vma(vma); |
2039 | spin_unlock(&mm->page_table_lock); |
2040 | |
2041 | perf_event_mmap(vma); |
2042 | } |
2043 | } |
2044 | } |
2045 | anon_vma_unlock_write(vma->anon_vma); |
2046 | khugepaged_enter_vma(vma, vma->vm_flags); |
2047 | mas_destroy(&mas); |
2048 | validate_mm(mm); |
2049 | return error; |
2050 | } |
2051 | #endif /* CONFIG_STACK_GROWSUP */ |
2052 | |
2053 | /* |
2054 | * vma is the first one with address < vma->vm_start. Have to extend vma. |
2055 | * mmap_lock held for writing. |
2056 | */ |
2057 | int expand_downwards(struct vm_area_struct *vma, unsigned long address) |
2058 | { |
2059 | struct mm_struct *mm = vma->vm_mm; |
2060 | MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); |
2061 | struct vm_area_struct *prev; |
2062 | int error = 0; |
2063 | |
2064 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
2065 | return -EFAULT; |
2066 | |
2067 | address &= PAGE_MASK; |
2068 | if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) |
2069 | return -EPERM; |
2070 | |
2071 | /* Enforce stack_guard_gap */ |
2072 | prev = mas_prev(mas: &mas, min: 0); |
2073 | /* Check that both stack segments have the same anon_vma? */ |
2074 | if (prev) { |
2075 | if (!(prev->vm_flags & VM_GROWSDOWN) && |
2076 | vma_is_accessible(vma: prev) && |
2077 | (address - prev->vm_end < stack_guard_gap)) |
2078 | return -ENOMEM; |
2079 | } |
2080 | |
2081 | if (prev) |
2082 | mas_next_range(mas: &mas, max: vma->vm_start); |
2083 | |
2084 | __mas_set_range(mas: &mas, start: address, last: vma->vm_end - 1); |
2085 | if (mas_preallocate(mas: &mas, entry: vma, GFP_KERNEL)) |
2086 | return -ENOMEM; |
2087 | |
2088 | /* We must make sure the anon_vma is allocated. */ |
2089 | if (unlikely(anon_vma_prepare(vma))) { |
2090 | mas_destroy(mas: &mas); |
2091 | return -ENOMEM; |
2092 | } |
2093 | |
2094 | /* Lock the VMA before expanding to prevent concurrent page faults */ |
2095 | vma_start_write(vma); |
2096 | /* |
2097 | * vma->vm_start/vm_end cannot change under us because the caller |
2098 | * is required to hold the mmap_lock in read mode. We need the |
2099 | * anon_vma lock to serialize against concurrent expand_stacks. |
2100 | */ |
2101 | anon_vma_lock_write(anon_vma: vma->anon_vma); |
2102 | |
2103 | /* Somebody else might have raced and expanded it already */ |
2104 | if (address < vma->vm_start) { |
2105 | unsigned long size, grow; |
2106 | |
2107 | size = vma->vm_end - address; |
2108 | grow = (vma->vm_start - address) >> PAGE_SHIFT; |
2109 | |
2110 | error = -ENOMEM; |
2111 | if (grow <= vma->vm_pgoff) { |
2112 | error = acct_stack_growth(vma, size, grow); |
2113 | if (!error) { |
2114 | /* |
2115 | * We only hold a shared mmap_lock lock here, so |
2116 | * we need to protect against concurrent vma |
2117 | * expansions. anon_vma_lock_write() doesn't |
2118 | * help here, as we don't guarantee that all |
2119 | * growable vmas in a mm share the same root |
2120 | * anon vma. So, we reuse mm->page_table_lock |
2121 | * to guard against concurrent vma expansions. |
2122 | */ |
2123 | spin_lock(lock: &mm->page_table_lock); |
2124 | if (vma->vm_flags & VM_LOCKED) |
2125 | mm->locked_vm += grow; |
2126 | vm_stat_account(mm, vma->vm_flags, npages: grow); |
2127 | anon_vma_interval_tree_pre_update_vma(vma); |
2128 | vma->vm_start = address; |
2129 | vma->vm_pgoff -= grow; |
2130 | /* Overwrite old entry in mtree. */ |
2131 | mas_store_prealloc(mas: &mas, entry: vma); |
2132 | anon_vma_interval_tree_post_update_vma(vma); |
2133 | spin_unlock(lock: &mm->page_table_lock); |
2134 | |
2135 | perf_event_mmap(vma); |
2136 | } |
2137 | } |
2138 | } |
2139 | anon_vma_unlock_write(anon_vma: vma->anon_vma); |
2140 | khugepaged_enter_vma(vma, vm_flags: vma->vm_flags); |
2141 | mas_destroy(mas: &mas); |
2142 | validate_mm(mm); |
2143 | return error; |
2144 | } |
2145 | |
2146 | /* enforced gap between the expanding stack and other mappings. */ |
2147 | unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; |
2148 | |
2149 | static int __init cmdline_parse_stack_guard_gap(char *p) |
2150 | { |
2151 | unsigned long val; |
2152 | char *endptr; |
2153 | |
2154 | val = simple_strtoul(p, &endptr, 10); |
2155 | if (!*endptr) |
2156 | stack_guard_gap = val << PAGE_SHIFT; |
2157 | |
2158 | return 1; |
2159 | } |
2160 | __setup("stack_guard_gap=" , cmdline_parse_stack_guard_gap); |
2161 | |
2162 | #ifdef CONFIG_STACK_GROWSUP |
2163 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
2164 | { |
2165 | return expand_upwards(vma, address); |
2166 | } |
2167 | |
2168 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
2169 | { |
2170 | struct vm_area_struct *vma, *prev; |
2171 | |
2172 | addr &= PAGE_MASK; |
2173 | vma = find_vma_prev(mm, addr, &prev); |
2174 | if (vma && (vma->vm_start <= addr)) |
2175 | return vma; |
2176 | if (!prev) |
2177 | return NULL; |
2178 | if (expand_stack_locked(prev, addr)) |
2179 | return NULL; |
2180 | if (prev->vm_flags & VM_LOCKED) |
2181 | populate_vma_page_range(prev, addr, prev->vm_end, NULL); |
2182 | return prev; |
2183 | } |
2184 | #else |
2185 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
2186 | { |
2187 | return expand_downwards(vma, address); |
2188 | } |
2189 | |
2190 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
2191 | { |
2192 | struct vm_area_struct *vma; |
2193 | unsigned long start; |
2194 | |
2195 | addr &= PAGE_MASK; |
2196 | vma = find_vma(mm, addr); |
2197 | if (!vma) |
2198 | return NULL; |
2199 | if (vma->vm_start <= addr) |
2200 | return vma; |
2201 | start = vma->vm_start; |
2202 | if (expand_stack_locked(vma, address: addr)) |
2203 | return NULL; |
2204 | if (vma->vm_flags & VM_LOCKED) |
2205 | populate_vma_page_range(vma, start: addr, end: start, NULL); |
2206 | return vma; |
2207 | } |
2208 | #endif |
2209 | |
2210 | /* |
2211 | * IA64 has some horrid mapping rules: it can expand both up and down, |
2212 | * but with various special rules. |
2213 | * |
2214 | * We'll get rid of this architecture eventually, so the ugliness is |
2215 | * temporary. |
2216 | */ |
2217 | #ifdef CONFIG_IA64 |
2218 | static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr) |
2219 | { |
2220 | return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) && |
2221 | REGION_OFFSET(addr) < RGN_MAP_LIMIT; |
2222 | } |
2223 | |
2224 | /* |
2225 | * IA64 stacks grow down, but there's a special register backing store |
2226 | * that can grow up. Only sequentially, though, so the new address must |
2227 | * match vm_end. |
2228 | */ |
2229 | static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr) |
2230 | { |
2231 | if (!vma_expand_ok(vma, addr)) |
2232 | return -EFAULT; |
2233 | if (vma->vm_end != (addr & PAGE_MASK)) |
2234 | return -EFAULT; |
2235 | return expand_upwards(vma, addr); |
2236 | } |
2237 | |
2238 | static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr) |
2239 | { |
2240 | if (!vma_expand_ok(vma, addr)) |
2241 | return -EFAULT; |
2242 | return expand_downwards(vma, addr); |
2243 | } |
2244 | |
2245 | #elif defined(CONFIG_STACK_GROWSUP) |
2246 | |
2247 | #define vma_expand_up(vma,addr) expand_upwards(vma, addr) |
2248 | #define vma_expand_down(vma, addr) (-EFAULT) |
2249 | |
2250 | #else |
2251 | |
2252 | #define vma_expand_up(vma,addr) (-EFAULT) |
2253 | #define vma_expand_down(vma, addr) expand_downwards(vma, addr) |
2254 | |
2255 | #endif |
2256 | |
2257 | /* |
2258 | * expand_stack(): legacy interface for page faulting. Don't use unless |
2259 | * you have to. |
2260 | * |
2261 | * This is called with the mm locked for reading, drops the lock, takes |
2262 | * the lock for writing, tries to look up a vma again, expands it if |
2263 | * necessary, and downgrades the lock to reading again. |
2264 | * |
2265 | * If no vma is found or it can't be expanded, it returns NULL and has |
2266 | * dropped the lock. |
2267 | */ |
2268 | struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) |
2269 | { |
2270 | struct vm_area_struct *vma, *prev; |
2271 | |
2272 | mmap_read_unlock(mm); |
2273 | if (mmap_write_lock_killable(mm)) |
2274 | return NULL; |
2275 | |
2276 | vma = find_vma_prev(mm, addr, pprev: &prev); |
2277 | if (vma && vma->vm_start <= addr) |
2278 | goto success; |
2279 | |
2280 | if (prev && !vma_expand_up(prev, addr)) { |
2281 | vma = prev; |
2282 | goto success; |
2283 | } |
2284 | |
2285 | if (vma && !vma_expand_down(vma, addr)) |
2286 | goto success; |
2287 | |
2288 | mmap_write_unlock(mm); |
2289 | return NULL; |
2290 | |
2291 | success: |
2292 | mmap_write_downgrade(mm); |
2293 | return vma; |
2294 | } |
2295 | |
2296 | /* |
2297 | * Ok - we have the memory areas we should free on a maple tree so release them, |
2298 | * and do the vma updates. |
2299 | * |
2300 | * Called with the mm semaphore held. |
2301 | */ |
2302 | static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) |
2303 | { |
2304 | unsigned long nr_accounted = 0; |
2305 | struct vm_area_struct *vma; |
2306 | |
2307 | /* Update high watermark before we lower total_vm */ |
2308 | update_hiwater_vm(mm); |
2309 | mas_for_each(mas, vma, ULONG_MAX) { |
2310 | long nrpages = vma_pages(vma); |
2311 | |
2312 | if (vma->vm_flags & VM_ACCOUNT) |
2313 | nr_accounted += nrpages; |
2314 | vm_stat_account(mm, vma->vm_flags, npages: -nrpages); |
2315 | remove_vma(vma, unreachable: false); |
2316 | } |
2317 | vm_unacct_memory(pages: nr_accounted); |
2318 | } |
2319 | |
2320 | /* |
2321 | * Get rid of page table information in the indicated region. |
2322 | * |
2323 | * Called with the mm semaphore held. |
2324 | */ |
2325 | static void unmap_region(struct mm_struct *mm, struct ma_state *mas, |
2326 | struct vm_area_struct *vma, struct vm_area_struct *prev, |
2327 | struct vm_area_struct *next, unsigned long start, |
2328 | unsigned long end, unsigned long tree_end, bool mm_wr_locked) |
2329 | { |
2330 | struct mmu_gather tlb; |
2331 | unsigned long mt_start = mas->index; |
2332 | |
2333 | lru_add_drain(); |
2334 | tlb_gather_mmu(tlb: &tlb, mm); |
2335 | update_hiwater_rss(mm); |
2336 | unmap_vmas(tlb: &tlb, mas, start_vma: vma, start, end, tree_end, mm_wr_locked); |
2337 | mas_set(mas, index: mt_start); |
2338 | free_pgtables(tlb: &tlb, mas, start_vma: vma, floor: prev ? prev->vm_end : FIRST_USER_ADDRESS, |
2339 | ceiling: next ? next->vm_start : USER_PGTABLES_CEILING, |
2340 | mm_wr_locked); |
2341 | tlb_finish_mmu(tlb: &tlb); |
2342 | } |
2343 | |
2344 | /* |
2345 | * __split_vma() bypasses sysctl_max_map_count checking. We use this where it |
2346 | * has already been checked or doesn't make sense to fail. |
2347 | * VMA Iterator will point to the end VMA. |
2348 | */ |
2349 | static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, |
2350 | unsigned long addr, int new_below) |
2351 | { |
2352 | struct vma_prepare vp; |
2353 | struct vm_area_struct *new; |
2354 | int err; |
2355 | |
2356 | WARN_ON(vma->vm_start >= addr); |
2357 | WARN_ON(vma->vm_end <= addr); |
2358 | |
2359 | if (vma->vm_ops && vma->vm_ops->may_split) { |
2360 | err = vma->vm_ops->may_split(vma, addr); |
2361 | if (err) |
2362 | return err; |
2363 | } |
2364 | |
2365 | new = vm_area_dup(vma); |
2366 | if (!new) |
2367 | return -ENOMEM; |
2368 | |
2369 | if (new_below) { |
2370 | new->vm_end = addr; |
2371 | } else { |
2372 | new->vm_start = addr; |
2373 | new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); |
2374 | } |
2375 | |
2376 | err = -ENOMEM; |
2377 | vma_iter_config(vmi, index: new->vm_start, last: new->vm_end); |
2378 | if (vma_iter_prealloc(vmi, vma: new)) |
2379 | goto out_free_vma; |
2380 | |
2381 | err = vma_dup_policy(src: vma, dst: new); |
2382 | if (err) |
2383 | goto out_free_vmi; |
2384 | |
2385 | err = anon_vma_clone(new, vma); |
2386 | if (err) |
2387 | goto out_free_mpol; |
2388 | |
2389 | if (new->vm_file) |
2390 | get_file(f: new->vm_file); |
2391 | |
2392 | if (new->vm_ops && new->vm_ops->open) |
2393 | new->vm_ops->open(new); |
2394 | |
2395 | vma_start_write(vma); |
2396 | vma_start_write(vma: new); |
2397 | |
2398 | init_vma_prep(vp: &vp, vma); |
2399 | vp.insert = new; |
2400 | vma_prepare(vp: &vp); |
2401 | vma_adjust_trans_huge(vma, start: vma->vm_start, end: addr, adjust_next: 0); |
2402 | |
2403 | if (new_below) { |
2404 | vma->vm_start = addr; |
2405 | vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; |
2406 | } else { |
2407 | vma->vm_end = addr; |
2408 | } |
2409 | |
2410 | /* vma_complete stores the new vma */ |
2411 | vma_complete(vp: &vp, vmi, mm: vma->vm_mm); |
2412 | |
2413 | /* Success. */ |
2414 | if (new_below) |
2415 | vma_next(vmi); |
2416 | return 0; |
2417 | |
2418 | out_free_mpol: |
2419 | mpol_put(vma_policy(new)); |
2420 | out_free_vmi: |
2421 | vma_iter_free(vmi); |
2422 | out_free_vma: |
2423 | vm_area_free(new); |
2424 | return err; |
2425 | } |
2426 | |
2427 | /* |
2428 | * Split a vma into two pieces at address 'addr', a new vma is allocated |
2429 | * either for the first part or the tail. |
2430 | */ |
2431 | static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, |
2432 | unsigned long addr, int new_below) |
2433 | { |
2434 | if (vma->vm_mm->map_count >= sysctl_max_map_count) |
2435 | return -ENOMEM; |
2436 | |
2437 | return __split_vma(vmi, vma, addr, new_below); |
2438 | } |
2439 | |
2440 | /* |
2441 | * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd |
2442 | * context and anonymous VMA name within the range [start, end). |
2443 | * |
2444 | * As a result, we might be able to merge the newly modified VMA range with an |
2445 | * adjacent VMA with identical properties. |
2446 | * |
2447 | * If no merge is possible and the range does not span the entirety of the VMA, |
2448 | * we then need to split the VMA to accommodate the change. |
2449 | * |
2450 | * The function returns either the merged VMA, the original VMA if a split was |
2451 | * required instead, or an error if the split failed. |
2452 | */ |
2453 | struct vm_area_struct *vma_modify(struct vma_iterator *vmi, |
2454 | struct vm_area_struct *prev, |
2455 | struct vm_area_struct *vma, |
2456 | unsigned long start, unsigned long end, |
2457 | unsigned long vm_flags, |
2458 | struct mempolicy *policy, |
2459 | struct vm_userfaultfd_ctx uffd_ctx, |
2460 | struct anon_vma_name *anon_name) |
2461 | { |
2462 | pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
2463 | struct vm_area_struct *merged; |
2464 | |
2465 | merged = vma_merge(vmi, mm: vma->vm_mm, prev, addr: start, end, vm_flags, |
2466 | anon_vma: vma->anon_vma, file: vma->vm_file, pgoff, policy, |
2467 | vm_userfaultfd_ctx: uffd_ctx, anon_name); |
2468 | if (merged) |
2469 | return merged; |
2470 | |
2471 | if (vma->vm_start < start) { |
2472 | int err = split_vma(vmi, vma, addr: start, new_below: 1); |
2473 | |
2474 | if (err) |
2475 | return ERR_PTR(error: err); |
2476 | } |
2477 | |
2478 | if (vma->vm_end > end) { |
2479 | int err = split_vma(vmi, vma, addr: end, new_below: 0); |
2480 | |
2481 | if (err) |
2482 | return ERR_PTR(error: err); |
2483 | } |
2484 | |
2485 | return vma; |
2486 | } |
2487 | |
2488 | /* |
2489 | * Attempt to merge a newly mapped VMA with those adjacent to it. The caller |
2490 | * must ensure that [start, end) does not overlap any existing VMA. |
2491 | */ |
2492 | static struct vm_area_struct |
2493 | *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, |
2494 | struct vm_area_struct *vma, unsigned long start, |
2495 | unsigned long end, pgoff_t pgoff) |
2496 | { |
2497 | return vma_merge(vmi, mm: vma->vm_mm, prev, addr: start, end, vm_flags: vma->vm_flags, |
2498 | anon_vma: vma->anon_vma, file: vma->vm_file, pgoff, vma_policy(vma), |
2499 | vm_userfaultfd_ctx: vma->vm_userfaultfd_ctx, anon_name: anon_vma_name(vma)); |
2500 | } |
2501 | |
2502 | /* |
2503 | * Expand vma by delta bytes, potentially merging with an immediately adjacent |
2504 | * VMA with identical properties. |
2505 | */ |
2506 | struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, |
2507 | struct vm_area_struct *vma, |
2508 | unsigned long delta) |
2509 | { |
2510 | pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma); |
2511 | |
2512 | /* vma is specified as prev, so case 1 or 2 will apply. */ |
2513 | return vma_merge(vmi, mm: vma->vm_mm, prev: vma, addr: vma->vm_end, end: vma->vm_end + delta, |
2514 | vm_flags: vma->vm_flags, anon_vma: vma->anon_vma, file: vma->vm_file, pgoff, |
2515 | vma_policy(vma), vm_userfaultfd_ctx: vma->vm_userfaultfd_ctx, |
2516 | anon_name: anon_vma_name(vma)); |
2517 | } |
2518 | |
2519 | /* |
2520 | * do_vmi_align_munmap() - munmap the aligned region from @start to @end. |
2521 | * @vmi: The vma iterator |
2522 | * @vma: The starting vm_area_struct |
2523 | * @mm: The mm_struct |
2524 | * @start: The aligned start address to munmap. |
2525 | * @end: The aligned end address to munmap. |
2526 | * @uf: The userfaultfd list_head |
2527 | * @unlock: Set to true to drop the mmap_lock. unlocking only happens on |
2528 | * success. |
2529 | * |
2530 | * Return: 0 on success and drops the lock if so directed, error and leaves the |
2531 | * lock held otherwise. |
2532 | */ |
2533 | static int |
2534 | do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, |
2535 | struct mm_struct *mm, unsigned long start, |
2536 | unsigned long end, struct list_head *uf, bool unlock) |
2537 | { |
2538 | struct vm_area_struct *prev, *next = NULL; |
2539 | struct maple_tree mt_detach; |
2540 | int count = 0; |
2541 | int error = -ENOMEM; |
2542 | unsigned long locked_vm = 0; |
2543 | MA_STATE(mas_detach, &mt_detach, 0, 0); |
2544 | mt_init_flags(mt: &mt_detach, flags: vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); |
2545 | mt_on_stack(mt_detach); |
2546 | |
2547 | /* |
2548 | * If we need to split any vma, do it now to save pain later. |
2549 | * |
2550 | * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially |
2551 | * unmapped vm_area_struct will remain in use: so lower split_vma |
2552 | * places tmp vma above, and higher split_vma places tmp vma below. |
2553 | */ |
2554 | |
2555 | /* Does it split the first one? */ |
2556 | if (start > vma->vm_start) { |
2557 | |
2558 | /* |
2559 | * Make sure that map_count on return from munmap() will |
2560 | * not exceed its limit; but let map_count go just above |
2561 | * its limit temporarily, to help free resources as expected. |
2562 | */ |
2563 | if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) |
2564 | goto map_count_exceeded; |
2565 | |
2566 | error = __split_vma(vmi, vma, addr: start, new_below: 1); |
2567 | if (error) |
2568 | goto start_split_failed; |
2569 | } |
2570 | |
2571 | /* |
2572 | * Detach a range of VMAs from the mm. Using next as a temp variable as |
2573 | * it is always overwritten. |
2574 | */ |
2575 | next = vma; |
2576 | do { |
2577 | /* Does it split the end? */ |
2578 | if (next->vm_end > end) { |
2579 | error = __split_vma(vmi, vma: next, addr: end, new_below: 0); |
2580 | if (error) |
2581 | goto end_split_failed; |
2582 | } |
2583 | vma_start_write(vma: next); |
2584 | mas_set(mas: &mas_detach, index: count); |
2585 | error = mas_store_gfp(mas: &mas_detach, entry: next, GFP_KERNEL); |
2586 | if (error) |
2587 | goto munmap_gather_failed; |
2588 | vma_mark_detached(vma: next, detached: true); |
2589 | if (next->vm_flags & VM_LOCKED) |
2590 | locked_vm += vma_pages(vma: next); |
2591 | |
2592 | count++; |
2593 | if (unlikely(uf)) { |
2594 | /* |
2595 | * If userfaultfd_unmap_prep returns an error the vmas |
2596 | * will remain split, but userland will get a |
2597 | * highly unexpected error anyway. This is no |
2598 | * different than the case where the first of the two |
2599 | * __split_vma fails, but we don't undo the first |
2600 | * split, despite we could. This is unlikely enough |
2601 | * failure that it's not worth optimizing it for. |
2602 | */ |
2603 | error = userfaultfd_unmap_prep(vma: next, start, end, uf); |
2604 | |
2605 | if (error) |
2606 | goto userfaultfd_error; |
2607 | } |
2608 | #ifdef CONFIG_DEBUG_VM_MAPLE_TREE |
2609 | BUG_ON(next->vm_start < start); |
2610 | BUG_ON(next->vm_start > end); |
2611 | #endif |
2612 | } for_each_vma_range(*vmi, next, end); |
2613 | |
2614 | #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) |
2615 | /* Make sure no VMAs are about to be lost. */ |
2616 | { |
2617 | MA_STATE(test, &mt_detach, 0, 0); |
2618 | struct vm_area_struct *vma_mas, *vma_test; |
2619 | int test_count = 0; |
2620 | |
2621 | vma_iter_set(vmi, addr: start); |
2622 | rcu_read_lock(); |
2623 | vma_test = mas_find(mas: &test, max: count - 1); |
2624 | for_each_vma_range(*vmi, vma_mas, end) { |
2625 | BUG_ON(vma_mas != vma_test); |
2626 | test_count++; |
2627 | vma_test = mas_next(mas: &test, max: count - 1); |
2628 | } |
2629 | rcu_read_unlock(); |
2630 | BUG_ON(count != test_count); |
2631 | } |
2632 | #endif |
2633 | |
2634 | while (vma_iter_addr(vmi) > start) |
2635 | vma_iter_prev_range(vmi); |
2636 | |
2637 | error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); |
2638 | if (error) |
2639 | goto clear_tree_failed; |
2640 | |
2641 | /* Point of no return */ |
2642 | mm->locked_vm -= locked_vm; |
2643 | mm->map_count -= count; |
2644 | if (unlock) |
2645 | mmap_write_downgrade(mm); |
2646 | |
2647 | prev = vma_iter_prev_range(vmi); |
2648 | next = vma_next(vmi); |
2649 | if (next) |
2650 | vma_iter_prev_range(vmi); |
2651 | |
2652 | /* |
2653 | * We can free page tables without write-locking mmap_lock because VMAs |
2654 | * were isolated before we downgraded mmap_lock. |
2655 | */ |
2656 | mas_set(mas: &mas_detach, index: 1); |
2657 | unmap_region(mm, mas: &mas_detach, vma, prev, next, start, end, tree_end: count, |
2658 | mm_wr_locked: !unlock); |
2659 | /* Statistics and freeing VMAs */ |
2660 | mas_set(mas: &mas_detach, index: 0); |
2661 | remove_mt(mm, mas: &mas_detach); |
2662 | validate_mm(mm); |
2663 | if (unlock) |
2664 | mmap_read_unlock(mm); |
2665 | |
2666 | __mt_destroy(mt: &mt_detach); |
2667 | return 0; |
2668 | |
2669 | clear_tree_failed: |
2670 | userfaultfd_error: |
2671 | munmap_gather_failed: |
2672 | end_split_failed: |
2673 | mas_set(mas: &mas_detach, index: 0); |
2674 | mas_for_each(&mas_detach, next, end) |
2675 | vma_mark_detached(vma: next, detached: false); |
2676 | |
2677 | __mt_destroy(mt: &mt_detach); |
2678 | start_split_failed: |
2679 | map_count_exceeded: |
2680 | validate_mm(mm); |
2681 | return error; |
2682 | } |
2683 | |
2684 | /* |
2685 | * do_vmi_munmap() - munmap a given range. |
2686 | * @vmi: The vma iterator |
2687 | * @mm: The mm_struct |
2688 | * @start: The start address to munmap |
2689 | * @len: The length of the range to munmap |
2690 | * @uf: The userfaultfd list_head |
2691 | * @unlock: set to true if the user wants to drop the mmap_lock on success |
2692 | * |
2693 | * This function takes a @mas that is either pointing to the previous VMA or set |
2694 | * to MA_START and sets it up to remove the mapping(s). The @len will be |
2695 | * aligned and any arch_unmap work will be preformed. |
2696 | * |
2697 | * Return: 0 on success and drops the lock if so directed, error and leaves the |
2698 | * lock held otherwise. |
2699 | */ |
2700 | int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, |
2701 | unsigned long start, size_t len, struct list_head *uf, |
2702 | bool unlock) |
2703 | { |
2704 | unsigned long end; |
2705 | struct vm_area_struct *vma; |
2706 | |
2707 | if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) |
2708 | return -EINVAL; |
2709 | |
2710 | end = start + PAGE_ALIGN(len); |
2711 | if (end == start) |
2712 | return -EINVAL; |
2713 | |
2714 | /* arch_unmap() might do unmaps itself. */ |
2715 | arch_unmap(mm, start, end); |
2716 | |
2717 | /* Find the first overlapping VMA */ |
2718 | vma = vma_find(vmi, max: end); |
2719 | if (!vma) { |
2720 | if (unlock) |
2721 | mmap_write_unlock(mm); |
2722 | return 0; |
2723 | } |
2724 | |
2725 | return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); |
2726 | } |
2727 | |
2728 | /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. |
2729 | * @mm: The mm_struct |
2730 | * @start: The start address to munmap |
2731 | * @len: The length to be munmapped. |
2732 | * @uf: The userfaultfd list_head |
2733 | * |
2734 | * Return: 0 on success, error otherwise. |
2735 | */ |
2736 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, |
2737 | struct list_head *uf) |
2738 | { |
2739 | VMA_ITERATOR(vmi, mm, start); |
2740 | |
2741 | return do_vmi_munmap(vmi: &vmi, mm, start, len, uf, unlock: false); |
2742 | } |
2743 | |
2744 | unsigned long mmap_region(struct file *file, unsigned long addr, |
2745 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, |
2746 | struct list_head *uf) |
2747 | { |
2748 | struct mm_struct *mm = current->mm; |
2749 | struct vm_area_struct *vma = NULL; |
2750 | struct vm_area_struct *next, *prev, *merge; |
2751 | pgoff_t pglen = len >> PAGE_SHIFT; |
2752 | unsigned long charged = 0; |
2753 | unsigned long end = addr + len; |
2754 | unsigned long merge_start = addr, merge_end = end; |
2755 | bool writable_file_mapping = false; |
2756 | pgoff_t vm_pgoff; |
2757 | int error; |
2758 | VMA_ITERATOR(vmi, mm, addr); |
2759 | |
2760 | /* Check against address space limit. */ |
2761 | if (!may_expand_vm(mm, vm_flags, npages: len >> PAGE_SHIFT)) { |
2762 | unsigned long nr_pages; |
2763 | |
2764 | /* |
2765 | * MAP_FIXED may remove pages of mappings that intersects with |
2766 | * requested mapping. Account for the pages it would unmap. |
2767 | */ |
2768 | nr_pages = count_vma_pages_range(mm, addr, end); |
2769 | |
2770 | if (!may_expand_vm(mm, vm_flags, |
2771 | npages: (len >> PAGE_SHIFT) - nr_pages)) |
2772 | return -ENOMEM; |
2773 | } |
2774 | |
2775 | /* Unmap any existing mapping in the area */ |
2776 | if (do_vmi_munmap(vmi: &vmi, mm, start: addr, len, uf, unlock: false)) |
2777 | return -ENOMEM; |
2778 | |
2779 | /* |
2780 | * Private writable mapping: check memory availability |
2781 | */ |
2782 | if (accountable_mapping(file, vm_flags)) { |
2783 | charged = len >> PAGE_SHIFT; |
2784 | if (security_vm_enough_memory_mm(mm, pages: charged)) |
2785 | return -ENOMEM; |
2786 | vm_flags |= VM_ACCOUNT; |
2787 | } |
2788 | |
2789 | next = vma_next(vmi: &vmi); |
2790 | prev = vma_prev(vmi: &vmi); |
2791 | if (vm_flags & VM_SPECIAL) { |
2792 | if (prev) |
2793 | vma_iter_next_range(vmi: &vmi); |
2794 | goto cannot_expand; |
2795 | } |
2796 | |
2797 | /* Attempt to expand an old mapping */ |
2798 | /* Check next */ |
2799 | if (next && next->vm_start == end && !vma_policy(next) && |
2800 | can_vma_merge_before(vma: next, vm_flags, NULL, file, vm_pgoff: pgoff+pglen, |
2801 | NULL_VM_UFFD_CTX, NULL)) { |
2802 | merge_end = next->vm_end; |
2803 | vma = next; |
2804 | vm_pgoff = next->vm_pgoff - pglen; |
2805 | } |
2806 | |
2807 | /* Check prev */ |
2808 | if (prev && prev->vm_end == addr && !vma_policy(prev) && |
2809 | (vma ? can_vma_merge_after(vma: prev, vm_flags, anon_vma: vma->anon_vma, file, |
2810 | vm_pgoff: pgoff, vm_userfaultfd_ctx: vma->vm_userfaultfd_ctx, NULL) : |
2811 | can_vma_merge_after(vma: prev, vm_flags, NULL, file, vm_pgoff: pgoff, |
2812 | NULL_VM_UFFD_CTX, NULL))) { |
2813 | merge_start = prev->vm_start; |
2814 | vma = prev; |
2815 | vm_pgoff = prev->vm_pgoff; |
2816 | } else if (prev) { |
2817 | vma_iter_next_range(vmi: &vmi); |
2818 | } |
2819 | |
2820 | /* Actually expand, if possible */ |
2821 | if (vma && |
2822 | !vma_expand(vmi: &vmi, vma, start: merge_start, end: merge_end, pgoff: vm_pgoff, next)) { |
2823 | khugepaged_enter_vma(vma, vm_flags); |
2824 | goto expanded; |
2825 | } |
2826 | |
2827 | if (vma == prev) |
2828 | vma_iter_set(vmi: &vmi, addr); |
2829 | cannot_expand: |
2830 | |
2831 | /* |
2832 | * Determine the object being mapped and call the appropriate |
2833 | * specific mapper. the address has already been validated, but |
2834 | * not unmapped, but the maps are removed from the list. |
2835 | */ |
2836 | vma = vm_area_alloc(mm); |
2837 | if (!vma) { |
2838 | error = -ENOMEM; |
2839 | goto unacct_error; |
2840 | } |
2841 | |
2842 | vma_iter_config(vmi: &vmi, index: addr, last: end); |
2843 | vma->vm_start = addr; |
2844 | vma->vm_end = end; |
2845 | vm_flags_init(vma, flags: vm_flags); |
2846 | vma->vm_page_prot = vm_get_page_prot(vm_flags); |
2847 | vma->vm_pgoff = pgoff; |
2848 | |
2849 | if (file) { |
2850 | vma->vm_file = get_file(f: file); |
2851 | error = call_mmap(file, vma); |
2852 | if (error) |
2853 | goto unmap_and_free_vma; |
2854 | |
2855 | if (vma_is_shared_maywrite(vma)) { |
2856 | error = mapping_map_writable(mapping: file->f_mapping); |
2857 | if (error) |
2858 | goto close_and_free_vma; |
2859 | |
2860 | writable_file_mapping = true; |
2861 | } |
2862 | |
2863 | /* |
2864 | * Expansion is handled above, merging is handled below. |
2865 | * Drivers should not alter the address of the VMA. |
2866 | */ |
2867 | error = -EINVAL; |
2868 | if (WARN_ON((addr != vma->vm_start))) |
2869 | goto close_and_free_vma; |
2870 | |
2871 | vma_iter_config(vmi: &vmi, index: addr, last: end); |
2872 | /* |
2873 | * If vm_flags changed after call_mmap(), we should try merge |
2874 | * vma again as we may succeed this time. |
2875 | */ |
2876 | if (unlikely(vm_flags != vma->vm_flags && prev)) { |
2877 | merge = vma_merge_new_vma(vmi: &vmi, prev, vma, |
2878 | start: vma->vm_start, end: vma->vm_end, |
2879 | pgoff: vma->vm_pgoff); |
2880 | if (merge) { |
2881 | /* |
2882 | * ->mmap() can change vma->vm_file and fput |
2883 | * the original file. So fput the vma->vm_file |
2884 | * here or we would add an extra fput for file |
2885 | * and cause general protection fault |
2886 | * ultimately. |
2887 | */ |
2888 | fput(vma->vm_file); |
2889 | vm_area_free(vma); |
2890 | vma = merge; |
2891 | /* Update vm_flags to pick up the change. */ |
2892 | vm_flags = vma->vm_flags; |
2893 | goto unmap_writable; |
2894 | } |
2895 | } |
2896 | |
2897 | vm_flags = vma->vm_flags; |
2898 | } else if (vm_flags & VM_SHARED) { |
2899 | error = shmem_zero_setup(vma); |
2900 | if (error) |
2901 | goto free_vma; |
2902 | } else { |
2903 | vma_set_anonymous(vma); |
2904 | } |
2905 | |
2906 | if (map_deny_write_exec(vma, vm_flags: vma->vm_flags)) { |
2907 | error = -EACCES; |
2908 | goto close_and_free_vma; |
2909 | } |
2910 | |
2911 | /* Allow architectures to sanity-check the vm_flags */ |
2912 | error = -EINVAL; |
2913 | if (!arch_validate_flags(flags: vma->vm_flags)) |
2914 | goto close_and_free_vma; |
2915 | |
2916 | error = -ENOMEM; |
2917 | if (vma_iter_prealloc(vmi: &vmi, vma)) |
2918 | goto close_and_free_vma; |
2919 | |
2920 | /* Lock the VMA since it is modified after insertion into VMA tree */ |
2921 | vma_start_write(vma); |
2922 | vma_iter_store(vmi: &vmi, vma); |
2923 | mm->map_count++; |
2924 | if (vma->vm_file) { |
2925 | i_mmap_lock_write(mapping: vma->vm_file->f_mapping); |
2926 | if (vma_is_shared_maywrite(vma)) |
2927 | mapping_allow_writable(mapping: vma->vm_file->f_mapping); |
2928 | |
2929 | flush_dcache_mmap_lock(mapping: vma->vm_file->f_mapping); |
2930 | vma_interval_tree_insert(node: vma, root: &vma->vm_file->f_mapping->i_mmap); |
2931 | flush_dcache_mmap_unlock(mapping: vma->vm_file->f_mapping); |
2932 | i_mmap_unlock_write(mapping: vma->vm_file->f_mapping); |
2933 | } |
2934 | |
2935 | /* |
2936 | * vma_merge() calls khugepaged_enter_vma() either, the below |
2937 | * call covers the non-merge case. |
2938 | */ |
2939 | khugepaged_enter_vma(vma, vm_flags: vma->vm_flags); |
2940 | |
2941 | /* Once vma denies write, undo our temporary denial count */ |
2942 | unmap_writable: |
2943 | if (writable_file_mapping) |
2944 | mapping_unmap_writable(mapping: file->f_mapping); |
2945 | file = vma->vm_file; |
2946 | ksm_add_vma(vma); |
2947 | expanded: |
2948 | perf_event_mmap(vma); |
2949 | |
2950 | vm_stat_account(mm, vm_flags, npages: len >> PAGE_SHIFT); |
2951 | if (vm_flags & VM_LOCKED) { |
2952 | if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || |
2953 | is_vm_hugetlb_page(vma) || |
2954 | vma == get_gate_vma(current->mm)) |
2955 | vm_flags_clear(vma, VM_LOCKED_MASK); |
2956 | else |
2957 | mm->locked_vm += (len >> PAGE_SHIFT); |
2958 | } |
2959 | |
2960 | if (file) |
2961 | uprobe_mmap(vma); |
2962 | |
2963 | /* |
2964 | * New (or expanded) vma always get soft dirty status. |
2965 | * Otherwise user-space soft-dirty page tracker won't |
2966 | * be able to distinguish situation when vma area unmapped, |
2967 | * then new mapped in-place (which must be aimed as |
2968 | * a completely new data area). |
2969 | */ |
2970 | vm_flags_set(vma, VM_SOFTDIRTY); |
2971 | |
2972 | vma_set_page_prot(vma); |
2973 | |
2974 | validate_mm(mm); |
2975 | return addr; |
2976 | |
2977 | close_and_free_vma: |
2978 | if (file && vma->vm_ops && vma->vm_ops->close) |
2979 | vma->vm_ops->close(vma); |
2980 | |
2981 | if (file || vma->vm_file) { |
2982 | unmap_and_free_vma: |
2983 | fput(vma->vm_file); |
2984 | vma->vm_file = NULL; |
2985 | |
2986 | vma_iter_set(vmi: &vmi, addr: vma->vm_end); |
2987 | /* Undo any partial mapping done by a device driver. */ |
2988 | unmap_region(mm, mas: &vmi.mas, vma, prev, next, start: vma->vm_start, |
2989 | end: vma->vm_end, tree_end: vma->vm_end, mm_wr_locked: true); |
2990 | } |
2991 | if (writable_file_mapping) |
2992 | mapping_unmap_writable(mapping: file->f_mapping); |
2993 | free_vma: |
2994 | vm_area_free(vma); |
2995 | unacct_error: |
2996 | if (charged) |
2997 | vm_unacct_memory(pages: charged); |
2998 | validate_mm(mm); |
2999 | return error; |
3000 | } |
3001 | |
3002 | static int __vm_munmap(unsigned long start, size_t len, bool unlock) |
3003 | { |
3004 | int ret; |
3005 | struct mm_struct *mm = current->mm; |
3006 | LIST_HEAD(uf); |
3007 | VMA_ITERATOR(vmi, mm, start); |
3008 | |
3009 | if (mmap_write_lock_killable(mm)) |
3010 | return -EINTR; |
3011 | |
3012 | ret = do_vmi_munmap(vmi: &vmi, mm, start, len, uf: &uf, unlock); |
3013 | if (ret || !unlock) |
3014 | mmap_write_unlock(mm); |
3015 | |
3016 | userfaultfd_unmap_complete(mm, uf: &uf); |
3017 | return ret; |
3018 | } |
3019 | |
3020 | int vm_munmap(unsigned long start, size_t len) |
3021 | { |
3022 | return __vm_munmap(start, len, unlock: false); |
3023 | } |
3024 | EXPORT_SYMBOL(vm_munmap); |
3025 | |
3026 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) |
3027 | { |
3028 | addr = untagged_addr(addr); |
3029 | return __vm_munmap(start: addr, len, unlock: true); |
3030 | } |
3031 | |
3032 | |
3033 | /* |
3034 | * Emulation of deprecated remap_file_pages() syscall. |
3035 | */ |
3036 | SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, |
3037 | unsigned long, prot, unsigned long, pgoff, unsigned long, flags) |
3038 | { |
3039 | |
3040 | struct mm_struct *mm = current->mm; |
3041 | struct vm_area_struct *vma; |
3042 | unsigned long populate = 0; |
3043 | unsigned long ret = -EINVAL; |
3044 | struct file *file; |
3045 | |
3046 | pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n" , |
3047 | current->comm, current->pid); |
3048 | |
3049 | if (prot) |
3050 | return ret; |
3051 | start = start & PAGE_MASK; |
3052 | size = size & PAGE_MASK; |
3053 | |
3054 | if (start + size <= start) |
3055 | return ret; |
3056 | |
3057 | /* Does pgoff wrap? */ |
3058 | if (pgoff + (size >> PAGE_SHIFT) < pgoff) |
3059 | return ret; |
3060 | |
3061 | if (mmap_write_lock_killable(mm)) |
3062 | return -EINTR; |
3063 | |
3064 | vma = vma_lookup(mm, addr: start); |
3065 | |
3066 | if (!vma || !(vma->vm_flags & VM_SHARED)) |
3067 | goto out; |
3068 | |
3069 | if (start + size > vma->vm_end) { |
3070 | VMA_ITERATOR(vmi, mm, vma->vm_end); |
3071 | struct vm_area_struct *next, *prev = vma; |
3072 | |
3073 | for_each_vma_range(vmi, next, start + size) { |
3074 | /* hole between vmas ? */ |
3075 | if (next->vm_start != prev->vm_end) |
3076 | goto out; |
3077 | |
3078 | if (next->vm_file != vma->vm_file) |
3079 | goto out; |
3080 | |
3081 | if (next->vm_flags != vma->vm_flags) |
3082 | goto out; |
3083 | |
3084 | if (start + size <= next->vm_end) |
3085 | break; |
3086 | |
3087 | prev = next; |
3088 | } |
3089 | |
3090 | if (!next) |
3091 | goto out; |
3092 | } |
3093 | |
3094 | prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; |
3095 | prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; |
3096 | prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; |
3097 | |
3098 | flags &= MAP_NONBLOCK; |
3099 | flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; |
3100 | if (vma->vm_flags & VM_LOCKED) |
3101 | flags |= MAP_LOCKED; |
3102 | |
3103 | file = get_file(f: vma->vm_file); |
3104 | ret = do_mmap(file: vma->vm_file, addr: start, len: size, |
3105 | prot, flags, vm_flags: 0, pgoff, populate: &populate, NULL); |
3106 | fput(file); |
3107 | out: |
3108 | mmap_write_unlock(mm); |
3109 | if (populate) |
3110 | mm_populate(addr: ret, len: populate); |
3111 | if (!IS_ERR_VALUE(ret)) |
3112 | ret = 0; |
3113 | return ret; |
3114 | } |
3115 | |
3116 | /* |
3117 | * do_vma_munmap() - Unmap a full or partial vma. |
3118 | * @vmi: The vma iterator pointing at the vma |
3119 | * @vma: The first vma to be munmapped |
3120 | * @start: the start of the address to unmap |
3121 | * @end: The end of the address to unmap |
3122 | * @uf: The userfaultfd list_head |
3123 | * @unlock: Drop the lock on success |
3124 | * |
3125 | * unmaps a VMA mapping when the vma iterator is already in position. |
3126 | * Does not handle alignment. |
3127 | * |
3128 | * Return: 0 on success drops the lock of so directed, error on failure and will |
3129 | * still hold the lock. |
3130 | */ |
3131 | int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, |
3132 | unsigned long start, unsigned long end, struct list_head *uf, |
3133 | bool unlock) |
3134 | { |
3135 | struct mm_struct *mm = vma->vm_mm; |
3136 | |
3137 | arch_unmap(mm, start, end); |
3138 | return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); |
3139 | } |
3140 | |
3141 | /* |
3142 | * do_brk_flags() - Increase the brk vma if the flags match. |
3143 | * @vmi: The vma iterator |
3144 | * @addr: The start address |
3145 | * @len: The length of the increase |
3146 | * @vma: The vma, |
3147 | * @flags: The VMA Flags |
3148 | * |
3149 | * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags |
3150 | * do not match then create a new anonymous VMA. Eventually we may be able to |
3151 | * do some brk-specific accounting here. |
3152 | */ |
3153 | static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, |
3154 | unsigned long addr, unsigned long len, unsigned long flags) |
3155 | { |
3156 | struct mm_struct *mm = current->mm; |
3157 | struct vma_prepare vp; |
3158 | |
3159 | /* |
3160 | * Check against address space limits by the changed size |
3161 | * Note: This happens *after* clearing old mappings in some code paths. |
3162 | */ |
3163 | flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; |
3164 | if (!may_expand_vm(mm, flags, npages: len >> PAGE_SHIFT)) |
3165 | return -ENOMEM; |
3166 | |
3167 | if (mm->map_count > sysctl_max_map_count) |
3168 | return -ENOMEM; |
3169 | |
3170 | if (security_vm_enough_memory_mm(mm, pages: len >> PAGE_SHIFT)) |
3171 | return -ENOMEM; |
3172 | |
3173 | /* |
3174 | * Expand the existing vma if possible; Note that singular lists do not |
3175 | * occur after forking, so the expand will only happen on new VMAs. |
3176 | */ |
3177 | if (vma && vma->vm_end == addr && !vma_policy(vma) && |
3178 | can_vma_merge_after(vma, vm_flags: flags, NULL, NULL, |
3179 | vm_pgoff: addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { |
3180 | vma_iter_config(vmi, index: vma->vm_start, last: addr + len); |
3181 | if (vma_iter_prealloc(vmi, vma)) |
3182 | goto unacct_fail; |
3183 | |
3184 | vma_start_write(vma); |
3185 | |
3186 | init_vma_prep(vp: &vp, vma); |
3187 | vma_prepare(vp: &vp); |
3188 | vma_adjust_trans_huge(vma, start: vma->vm_start, end: addr + len, adjust_next: 0); |
3189 | vma->vm_end = addr + len; |
3190 | vm_flags_set(vma, VM_SOFTDIRTY); |
3191 | vma_iter_store(vmi, vma); |
3192 | |
3193 | vma_complete(vp: &vp, vmi, mm); |
3194 | khugepaged_enter_vma(vma, vm_flags: flags); |
3195 | goto out; |
3196 | } |
3197 | |
3198 | if (vma) |
3199 | vma_iter_next_range(vmi); |
3200 | /* create a vma struct for an anonymous mapping */ |
3201 | vma = vm_area_alloc(mm); |
3202 | if (!vma) |
3203 | goto unacct_fail; |
3204 | |
3205 | vma_set_anonymous(vma); |
3206 | vma->vm_start = addr; |
3207 | vma->vm_end = addr + len; |
3208 | vma->vm_pgoff = addr >> PAGE_SHIFT; |
3209 | vm_flags_init(vma, flags); |
3210 | vma->vm_page_prot = vm_get_page_prot(vm_flags: flags); |
3211 | vma_start_write(vma); |
3212 | if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) |
3213 | goto mas_store_fail; |
3214 | |
3215 | mm->map_count++; |
3216 | validate_mm(mm); |
3217 | ksm_add_vma(vma); |
3218 | out: |
3219 | perf_event_mmap(vma); |
3220 | mm->total_vm += len >> PAGE_SHIFT; |
3221 | mm->data_vm += len >> PAGE_SHIFT; |
3222 | if (flags & VM_LOCKED) |
3223 | mm->locked_vm += (len >> PAGE_SHIFT); |
3224 | vm_flags_set(vma, VM_SOFTDIRTY); |
3225 | return 0; |
3226 | |
3227 | mas_store_fail: |
3228 | vm_area_free(vma); |
3229 | unacct_fail: |
3230 | vm_unacct_memory(pages: len >> PAGE_SHIFT); |
3231 | return -ENOMEM; |
3232 | } |
3233 | |
3234 | int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) |
3235 | { |
3236 | struct mm_struct *mm = current->mm; |
3237 | struct vm_area_struct *vma = NULL; |
3238 | unsigned long len; |
3239 | int ret; |
3240 | bool populate; |
3241 | LIST_HEAD(uf); |
3242 | VMA_ITERATOR(vmi, mm, addr); |
3243 | |
3244 | len = PAGE_ALIGN(request); |
3245 | if (len < request) |
3246 | return -ENOMEM; |
3247 | if (!len) |
3248 | return 0; |
3249 | |
3250 | /* Until we need other flags, refuse anything except VM_EXEC. */ |
3251 | if ((flags & (~VM_EXEC)) != 0) |
3252 | return -EINVAL; |
3253 | |
3254 | if (mmap_write_lock_killable(mm)) |
3255 | return -EINTR; |
3256 | |
3257 | ret = check_brk_limits(addr, len); |
3258 | if (ret) |
3259 | goto limits_failed; |
3260 | |
3261 | ret = do_vmi_munmap(vmi: &vmi, mm, start: addr, len, uf: &uf, unlock: 0); |
3262 | if (ret) |
3263 | goto munmap_failed; |
3264 | |
3265 | vma = vma_prev(vmi: &vmi); |
3266 | ret = do_brk_flags(vmi: &vmi, vma, addr, len, flags); |
3267 | populate = ((mm->def_flags & VM_LOCKED) != 0); |
3268 | mmap_write_unlock(mm); |
3269 | userfaultfd_unmap_complete(mm, uf: &uf); |
3270 | if (populate && !ret) |
3271 | mm_populate(addr, len); |
3272 | return ret; |
3273 | |
3274 | munmap_failed: |
3275 | limits_failed: |
3276 | mmap_write_unlock(mm); |
3277 | return ret; |
3278 | } |
3279 | EXPORT_SYMBOL(vm_brk_flags); |
3280 | |
3281 | /* Release all mmaps. */ |
3282 | void exit_mmap(struct mm_struct *mm) |
3283 | { |
3284 | struct mmu_gather tlb; |
3285 | struct vm_area_struct *vma; |
3286 | unsigned long nr_accounted = 0; |
3287 | MA_STATE(mas, &mm->mm_mt, 0, 0); |
3288 | int count = 0; |
3289 | |
3290 | /* mm's last user has gone, and its about to be pulled down */ |
3291 | mmu_notifier_release(mm); |
3292 | |
3293 | mmap_read_lock(mm); |
3294 | arch_exit_mmap(mm); |
3295 | |
3296 | vma = mas_find(mas: &mas, ULONG_MAX); |
3297 | if (!vma) { |
3298 | /* Can happen if dup_mmap() received an OOM */ |
3299 | mmap_read_unlock(mm); |
3300 | return; |
3301 | } |
3302 | |
3303 | lru_add_drain(); |
3304 | flush_cache_mm(mm); |
3305 | tlb_gather_mmu_fullmm(tlb: &tlb, mm); |
3306 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
3307 | /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ |
3308 | unmap_vmas(tlb: &tlb, mas: &mas, start_vma: vma, start: 0, ULONG_MAX, ULONG_MAX, mm_wr_locked: false); |
3309 | mmap_read_unlock(mm); |
3310 | |
3311 | /* |
3312 | * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper |
3313 | * because the memory has been already freed. |
3314 | */ |
3315 | set_bit(MMF_OOM_SKIP, addr: &mm->flags); |
3316 | mmap_write_lock(mm); |
3317 | mt_clear_in_rcu(mt: &mm->mm_mt); |
3318 | mas_set(mas: &mas, index: vma->vm_end); |
3319 | free_pgtables(tlb: &tlb, mas: &mas, start_vma: vma, FIRST_USER_ADDRESS, |
3320 | USER_PGTABLES_CEILING, mm_wr_locked: true); |
3321 | tlb_finish_mmu(tlb: &tlb); |
3322 | |
3323 | /* |
3324 | * Walk the list again, actually closing and freeing it, with preemption |
3325 | * enabled, without holding any MM locks besides the unreachable |
3326 | * mmap_write_lock. |
3327 | */ |
3328 | mas_set(mas: &mas, index: vma->vm_end); |
3329 | do { |
3330 | if (vma->vm_flags & VM_ACCOUNT) |
3331 | nr_accounted += vma_pages(vma); |
3332 | remove_vma(vma, unreachable: true); |
3333 | count++; |
3334 | cond_resched(); |
3335 | } while ((vma = mas_find(mas: &mas, ULONG_MAX)) != NULL); |
3336 | |
3337 | BUG_ON(count != mm->map_count); |
3338 | |
3339 | trace_exit_mmap(mm); |
3340 | __mt_destroy(mt: &mm->mm_mt); |
3341 | mmap_write_unlock(mm); |
3342 | vm_unacct_memory(pages: nr_accounted); |
3343 | } |
3344 | |
3345 | /* Insert vm structure into process list sorted by address |
3346 | * and into the inode's i_mmap tree. If vm_file is non-NULL |
3347 | * then i_mmap_rwsem is taken here. |
3348 | */ |
3349 | int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) |
3350 | { |
3351 | unsigned long charged = vma_pages(vma); |
3352 | |
3353 | |
3354 | if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) |
3355 | return -ENOMEM; |
3356 | |
3357 | if ((vma->vm_flags & VM_ACCOUNT) && |
3358 | security_vm_enough_memory_mm(mm, pages: charged)) |
3359 | return -ENOMEM; |
3360 | |
3361 | /* |
3362 | * The vm_pgoff of a purely anonymous vma should be irrelevant |
3363 | * until its first write fault, when page's anon_vma and index |
3364 | * are set. But now set the vm_pgoff it will almost certainly |
3365 | * end up with (unless mremap moves it elsewhere before that |
3366 | * first wfault), so /proc/pid/maps tells a consistent story. |
3367 | * |
3368 | * By setting it to reflect the virtual start address of the |
3369 | * vma, merges and splits can happen in a seamless way, just |
3370 | * using the existing file pgoff checks and manipulations. |
3371 | * Similarly in do_mmap and in do_brk_flags. |
3372 | */ |
3373 | if (vma_is_anonymous(vma)) { |
3374 | BUG_ON(vma->anon_vma); |
3375 | vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; |
3376 | } |
3377 | |
3378 | if (vma_link(mm, vma)) { |
3379 | if (vma->vm_flags & VM_ACCOUNT) |
3380 | vm_unacct_memory(pages: charged); |
3381 | return -ENOMEM; |
3382 | } |
3383 | |
3384 | return 0; |
3385 | } |
3386 | |
3387 | /* |
3388 | * Copy the vma structure to a new location in the same mm, |
3389 | * prior to moving page table entries, to effect an mremap move. |
3390 | */ |
3391 | struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, |
3392 | unsigned long addr, unsigned long len, pgoff_t pgoff, |
3393 | bool *need_rmap_locks) |
3394 | { |
3395 | struct vm_area_struct *vma = *vmap; |
3396 | unsigned long vma_start = vma->vm_start; |
3397 | struct mm_struct *mm = vma->vm_mm; |
3398 | struct vm_area_struct *new_vma, *prev; |
3399 | bool faulted_in_anon_vma = true; |
3400 | VMA_ITERATOR(vmi, mm, addr); |
3401 | |
3402 | /* |
3403 | * If anonymous vma has not yet been faulted, update new pgoff |
3404 | * to match new location, to increase its chance of merging. |
3405 | */ |
3406 | if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { |
3407 | pgoff = addr >> PAGE_SHIFT; |
3408 | faulted_in_anon_vma = false; |
3409 | } |
3410 | |
3411 | new_vma = find_vma_prev(mm, addr, pprev: &prev); |
3412 | if (new_vma && new_vma->vm_start < addr + len) |
3413 | return NULL; /* should never get here */ |
3414 | |
3415 | new_vma = vma_merge_new_vma(vmi: &vmi, prev, vma, start: addr, end: addr + len, pgoff); |
3416 | if (new_vma) { |
3417 | /* |
3418 | * Source vma may have been merged into new_vma |
3419 | */ |
3420 | if (unlikely(vma_start >= new_vma->vm_start && |
3421 | vma_start < new_vma->vm_end)) { |
3422 | /* |
3423 | * The only way we can get a vma_merge with |
3424 | * self during an mremap is if the vma hasn't |
3425 | * been faulted in yet and we were allowed to |
3426 | * reset the dst vma->vm_pgoff to the |
3427 | * destination address of the mremap to allow |
3428 | * the merge to happen. mremap must change the |
3429 | * vm_pgoff linearity between src and dst vmas |
3430 | * (in turn preventing a vma_merge) to be |
3431 | * safe. It is only safe to keep the vm_pgoff |
3432 | * linear if there are no pages mapped yet. |
3433 | */ |
3434 | VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); |
3435 | *vmap = vma = new_vma; |
3436 | } |
3437 | *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); |
3438 | } else { |
3439 | new_vma = vm_area_dup(vma); |
3440 | if (!new_vma) |
3441 | goto out; |
3442 | new_vma->vm_start = addr; |
3443 | new_vma->vm_end = addr + len; |
3444 | new_vma->vm_pgoff = pgoff; |
3445 | if (vma_dup_policy(src: vma, dst: new_vma)) |
3446 | goto out_free_vma; |
3447 | if (anon_vma_clone(new_vma, vma)) |
3448 | goto out_free_mempol; |
3449 | if (new_vma->vm_file) |
3450 | get_file(f: new_vma->vm_file); |
3451 | if (new_vma->vm_ops && new_vma->vm_ops->open) |
3452 | new_vma->vm_ops->open(new_vma); |
3453 | if (vma_link(mm, vma: new_vma)) |
3454 | goto out_vma_link; |
3455 | *need_rmap_locks = false; |
3456 | } |
3457 | return new_vma; |
3458 | |
3459 | out_vma_link: |
3460 | if (new_vma->vm_ops && new_vma->vm_ops->close) |
3461 | new_vma->vm_ops->close(new_vma); |
3462 | |
3463 | if (new_vma->vm_file) |
3464 | fput(new_vma->vm_file); |
3465 | |
3466 | unlink_anon_vmas(new_vma); |
3467 | out_free_mempol: |
3468 | mpol_put(vma_policy(new_vma)); |
3469 | out_free_vma: |
3470 | vm_area_free(new_vma); |
3471 | out: |
3472 | return NULL; |
3473 | } |
3474 | |
3475 | /* |
3476 | * Return true if the calling process may expand its vm space by the passed |
3477 | * number of pages |
3478 | */ |
3479 | bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) |
3480 | { |
3481 | if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) |
3482 | return false; |
3483 | |
3484 | if (is_data_mapping(flags) && |
3485 | mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { |
3486 | /* Workaround for Valgrind */ |
3487 | if (rlimit(RLIMIT_DATA) == 0 && |
3488 | mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) |
3489 | return true; |
3490 | |
3491 | pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n" , |
3492 | current->comm, current->pid, |
3493 | (mm->data_vm + npages) << PAGE_SHIFT, |
3494 | rlimit(RLIMIT_DATA), |
3495 | ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data" ); |
3496 | |
3497 | if (!ignore_rlimit_data) |
3498 | return false; |
3499 | } |
3500 | |
3501 | return true; |
3502 | } |
3503 | |
3504 | void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) |
3505 | { |
3506 | WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); |
3507 | |
3508 | if (is_exec_mapping(flags)) |
3509 | mm->exec_vm += npages; |
3510 | else if (is_stack_mapping(flags)) |
3511 | mm->stack_vm += npages; |
3512 | else if (is_data_mapping(flags)) |
3513 | mm->data_vm += npages; |
3514 | } |
3515 | |
3516 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf); |
3517 | |
3518 | /* |
3519 | * Having a close hook prevents vma merging regardless of flags. |
3520 | */ |
3521 | static void special_mapping_close(struct vm_area_struct *vma) |
3522 | { |
3523 | } |
3524 | |
3525 | static const char *special_mapping_name(struct vm_area_struct *vma) |
3526 | { |
3527 | return ((struct vm_special_mapping *)vma->vm_private_data)->name; |
3528 | } |
3529 | |
3530 | static int special_mapping_mremap(struct vm_area_struct *new_vma) |
3531 | { |
3532 | struct vm_special_mapping *sm = new_vma->vm_private_data; |
3533 | |
3534 | if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) |
3535 | return -EFAULT; |
3536 | |
3537 | if (sm->mremap) |
3538 | return sm->mremap(sm, new_vma); |
3539 | |
3540 | return 0; |
3541 | } |
3542 | |
3543 | static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) |
3544 | { |
3545 | /* |
3546 | * Forbid splitting special mappings - kernel has expectations over |
3547 | * the number of pages in mapping. Together with VM_DONTEXPAND |
3548 | * the size of vma should stay the same over the special mapping's |
3549 | * lifetime. |
3550 | */ |
3551 | return -EINVAL; |
3552 | } |
3553 | |
3554 | static const struct vm_operations_struct special_mapping_vmops = { |
3555 | .close = special_mapping_close, |
3556 | .fault = special_mapping_fault, |
3557 | .mremap = special_mapping_mremap, |
3558 | .name = special_mapping_name, |
3559 | /* vDSO code relies that VVAR can't be accessed remotely */ |
3560 | .access = NULL, |
3561 | .may_split = special_mapping_split, |
3562 | }; |
3563 | |
3564 | static const struct vm_operations_struct legacy_special_mapping_vmops = { |
3565 | .close = special_mapping_close, |
3566 | .fault = special_mapping_fault, |
3567 | }; |
3568 | |
3569 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf) |
3570 | { |
3571 | struct vm_area_struct *vma = vmf->vma; |
3572 | pgoff_t pgoff; |
3573 | struct page **pages; |
3574 | |
3575 | if (vma->vm_ops == &legacy_special_mapping_vmops) { |
3576 | pages = vma->vm_private_data; |
3577 | } else { |
3578 | struct vm_special_mapping *sm = vma->vm_private_data; |
3579 | |
3580 | if (sm->fault) |
3581 | return sm->fault(sm, vmf->vma, vmf); |
3582 | |
3583 | pages = sm->pages; |
3584 | } |
3585 | |
3586 | for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) |
3587 | pgoff--; |
3588 | |
3589 | if (*pages) { |
3590 | struct page *page = *pages; |
3591 | get_page(page); |
3592 | vmf->page = page; |
3593 | return 0; |
3594 | } |
3595 | |
3596 | return VM_FAULT_SIGBUS; |
3597 | } |
3598 | |
3599 | static struct vm_area_struct *__install_special_mapping( |
3600 | struct mm_struct *mm, |
3601 | unsigned long addr, unsigned long len, |
3602 | unsigned long vm_flags, void *priv, |
3603 | const struct vm_operations_struct *ops) |
3604 | { |
3605 | int ret; |
3606 | struct vm_area_struct *vma; |
3607 | |
3608 | vma = vm_area_alloc(mm); |
3609 | if (unlikely(vma == NULL)) |
3610 | return ERR_PTR(error: -ENOMEM); |
3611 | |
3612 | vma->vm_start = addr; |
3613 | vma->vm_end = addr + len; |
3614 | |
3615 | vm_flags_init(vma, flags: (vm_flags | mm->def_flags | |
3616 | VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); |
3617 | vma->vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags); |
3618 | |
3619 | vma->vm_ops = ops; |
3620 | vma->vm_private_data = priv; |
3621 | |
3622 | ret = insert_vm_struct(mm, vma); |
3623 | if (ret) |
3624 | goto out; |
3625 | |
3626 | vm_stat_account(mm, flags: vma->vm_flags, npages: len >> PAGE_SHIFT); |
3627 | |
3628 | perf_event_mmap(vma); |
3629 | |
3630 | return vma; |
3631 | |
3632 | out: |
3633 | vm_area_free(vma); |
3634 | return ERR_PTR(error: ret); |
3635 | } |
3636 | |
3637 | bool vma_is_special_mapping(const struct vm_area_struct *vma, |
3638 | const struct vm_special_mapping *sm) |
3639 | { |
3640 | return vma->vm_private_data == sm && |
3641 | (vma->vm_ops == &special_mapping_vmops || |
3642 | vma->vm_ops == &legacy_special_mapping_vmops); |
3643 | } |
3644 | |
3645 | /* |
3646 | * Called with mm->mmap_lock held for writing. |
3647 | * Insert a new vma covering the given region, with the given flags. |
3648 | * Its pages are supplied by the given array of struct page *. |
3649 | * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. |
3650 | * The region past the last page supplied will always produce SIGBUS. |
3651 | * The array pointer and the pages it points to are assumed to stay alive |
3652 | * for as long as this mapping might exist. |
3653 | */ |
3654 | struct vm_area_struct *_install_special_mapping( |
3655 | struct mm_struct *mm, |
3656 | unsigned long addr, unsigned long len, |
3657 | unsigned long vm_flags, const struct vm_special_mapping *spec) |
3658 | { |
3659 | return __install_special_mapping(mm, addr, len, vm_flags, priv: (void *)spec, |
3660 | ops: &special_mapping_vmops); |
3661 | } |
3662 | |
3663 | int install_special_mapping(struct mm_struct *mm, |
3664 | unsigned long addr, unsigned long len, |
3665 | unsigned long vm_flags, struct page **pages) |
3666 | { |
3667 | struct vm_area_struct *vma = __install_special_mapping( |
3668 | mm, addr, len, vm_flags, priv: (void *)pages, |
3669 | ops: &legacy_special_mapping_vmops); |
3670 | |
3671 | return PTR_ERR_OR_ZERO(ptr: vma); |
3672 | } |
3673 | |
3674 | static DEFINE_MUTEX(mm_all_locks_mutex); |
3675 | |
3676 | static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) |
3677 | { |
3678 | if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { |
3679 | /* |
3680 | * The LSB of head.next can't change from under us |
3681 | * because we hold the mm_all_locks_mutex. |
3682 | */ |
3683 | down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); |
3684 | /* |
3685 | * We can safely modify head.next after taking the |
3686 | * anon_vma->root->rwsem. If some other vma in this mm shares |
3687 | * the same anon_vma we won't take it again. |
3688 | * |
3689 | * No need of atomic instructions here, head.next |
3690 | * can't change from under us thanks to the |
3691 | * anon_vma->root->rwsem. |
3692 | */ |
3693 | if (__test_and_set_bit(0, (unsigned long *) |
3694 | &anon_vma->root->rb_root.rb_root.rb_node)) |
3695 | BUG(); |
3696 | } |
3697 | } |
3698 | |
3699 | static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) |
3700 | { |
3701 | if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { |
3702 | /* |
3703 | * AS_MM_ALL_LOCKS can't change from under us because |
3704 | * we hold the mm_all_locks_mutex. |
3705 | * |
3706 | * Operations on ->flags have to be atomic because |
3707 | * even if AS_MM_ALL_LOCKS is stable thanks to the |
3708 | * mm_all_locks_mutex, there may be other cpus |
3709 | * changing other bitflags in parallel to us. |
3710 | */ |
3711 | if (test_and_set_bit(nr: AS_MM_ALL_LOCKS, addr: &mapping->flags)) |
3712 | BUG(); |
3713 | down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); |
3714 | } |
3715 | } |
3716 | |
3717 | /* |
3718 | * This operation locks against the VM for all pte/vma/mm related |
3719 | * operations that could ever happen on a certain mm. This includes |
3720 | * vmtruncate, try_to_unmap, and all page faults. |
3721 | * |
3722 | * The caller must take the mmap_lock in write mode before calling |
3723 | * mm_take_all_locks(). The caller isn't allowed to release the |
3724 | * mmap_lock until mm_drop_all_locks() returns. |
3725 | * |
3726 | * mmap_lock in write mode is required in order to block all operations |
3727 | * that could modify pagetables and free pages without need of |
3728 | * altering the vma layout. It's also needed in write mode to avoid new |
3729 | * anon_vmas to be associated with existing vmas. |
3730 | * |
3731 | * A single task can't take more than one mm_take_all_locks() in a row |
3732 | * or it would deadlock. |
3733 | * |
3734 | * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in |
3735 | * mapping->flags avoid to take the same lock twice, if more than one |
3736 | * vma in this mm is backed by the same anon_vma or address_space. |
3737 | * |
3738 | * We take locks in following order, accordingly to comment at beginning |
3739 | * of mm/rmap.c: |
3740 | * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for |
3741 | * hugetlb mapping); |
3742 | * - all vmas marked locked |
3743 | * - all i_mmap_rwsem locks; |
3744 | * - all anon_vma->rwseml |
3745 | * |
3746 | * We can take all locks within these types randomly because the VM code |
3747 | * doesn't nest them and we protected from parallel mm_take_all_locks() by |
3748 | * mm_all_locks_mutex. |
3749 | * |
3750 | * mm_take_all_locks() and mm_drop_all_locks are expensive operations |
3751 | * that may have to take thousand of locks. |
3752 | * |
3753 | * mm_take_all_locks() can fail if it's interrupted by signals. |
3754 | */ |
3755 | int mm_take_all_locks(struct mm_struct *mm) |
3756 | { |
3757 | struct vm_area_struct *vma; |
3758 | struct anon_vma_chain *avc; |
3759 | MA_STATE(mas, &mm->mm_mt, 0, 0); |
3760 | |
3761 | mmap_assert_write_locked(mm); |
3762 | |
3763 | mutex_lock(&mm_all_locks_mutex); |
3764 | |
3765 | /* |
3766 | * vma_start_write() does not have a complement in mm_drop_all_locks() |
3767 | * because vma_start_write() is always asymmetrical; it marks a VMA as |
3768 | * being written to until mmap_write_unlock() or mmap_write_downgrade() |
3769 | * is reached. |
3770 | */ |
3771 | mas_for_each(&mas, vma, ULONG_MAX) { |
3772 | if (signal_pending(current)) |
3773 | goto out_unlock; |
3774 | vma_start_write(vma); |
3775 | } |
3776 | |
3777 | mas_set(mas: &mas, index: 0); |
3778 | mas_for_each(&mas, vma, ULONG_MAX) { |
3779 | if (signal_pending(current)) |
3780 | goto out_unlock; |
3781 | if (vma->vm_file && vma->vm_file->f_mapping && |
3782 | is_vm_hugetlb_page(vma)) |
3783 | vm_lock_mapping(mm, mapping: vma->vm_file->f_mapping); |
3784 | } |
3785 | |
3786 | mas_set(mas: &mas, index: 0); |
3787 | mas_for_each(&mas, vma, ULONG_MAX) { |
3788 | if (signal_pending(current)) |
3789 | goto out_unlock; |
3790 | if (vma->vm_file && vma->vm_file->f_mapping && |
3791 | !is_vm_hugetlb_page(vma)) |
3792 | vm_lock_mapping(mm, mapping: vma->vm_file->f_mapping); |
3793 | } |
3794 | |
3795 | mas_set(mas: &mas, index: 0); |
3796 | mas_for_each(&mas, vma, ULONG_MAX) { |
3797 | if (signal_pending(current)) |
3798 | goto out_unlock; |
3799 | if (vma->anon_vma) |
3800 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) |
3801 | vm_lock_anon_vma(mm, anon_vma: avc->anon_vma); |
3802 | } |
3803 | |
3804 | return 0; |
3805 | |
3806 | out_unlock: |
3807 | mm_drop_all_locks(mm); |
3808 | return -EINTR; |
3809 | } |
3810 | |
3811 | static void vm_unlock_anon_vma(struct anon_vma *anon_vma) |
3812 | { |
3813 | if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { |
3814 | /* |
3815 | * The LSB of head.next can't change to 0 from under |
3816 | * us because we hold the mm_all_locks_mutex. |
3817 | * |
3818 | * We must however clear the bitflag before unlocking |
3819 | * the vma so the users using the anon_vma->rb_root will |
3820 | * never see our bitflag. |
3821 | * |
3822 | * No need of atomic instructions here, head.next |
3823 | * can't change from under us until we release the |
3824 | * anon_vma->root->rwsem. |
3825 | */ |
3826 | if (!__test_and_clear_bit(0, (unsigned long *) |
3827 | &anon_vma->root->rb_root.rb_root.rb_node)) |
3828 | BUG(); |
3829 | anon_vma_unlock_write(anon_vma); |
3830 | } |
3831 | } |
3832 | |
3833 | static void vm_unlock_mapping(struct address_space *mapping) |
3834 | { |
3835 | if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { |
3836 | /* |
3837 | * AS_MM_ALL_LOCKS can't change to 0 from under us |
3838 | * because we hold the mm_all_locks_mutex. |
3839 | */ |
3840 | i_mmap_unlock_write(mapping); |
3841 | if (!test_and_clear_bit(nr: AS_MM_ALL_LOCKS, |
3842 | addr: &mapping->flags)) |
3843 | BUG(); |
3844 | } |
3845 | } |
3846 | |
3847 | /* |
3848 | * The mmap_lock cannot be released by the caller until |
3849 | * mm_drop_all_locks() returns. |
3850 | */ |
3851 | void mm_drop_all_locks(struct mm_struct *mm) |
3852 | { |
3853 | struct vm_area_struct *vma; |
3854 | struct anon_vma_chain *avc; |
3855 | MA_STATE(mas, &mm->mm_mt, 0, 0); |
3856 | |
3857 | mmap_assert_write_locked(mm); |
3858 | BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); |
3859 | |
3860 | mas_for_each(&mas, vma, ULONG_MAX) { |
3861 | if (vma->anon_vma) |
3862 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) |
3863 | vm_unlock_anon_vma(anon_vma: avc->anon_vma); |
3864 | if (vma->vm_file && vma->vm_file->f_mapping) |
3865 | vm_unlock_mapping(mapping: vma->vm_file->f_mapping); |
3866 | } |
3867 | |
3868 | mutex_unlock(lock: &mm_all_locks_mutex); |
3869 | } |
3870 | |
3871 | /* |
3872 | * initialise the percpu counter for VM |
3873 | */ |
3874 | void __init mmap_init(void) |
3875 | { |
3876 | int ret; |
3877 | |
3878 | ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); |
3879 | VM_BUG_ON(ret); |
3880 | } |
3881 | |
3882 | /* |
3883 | * Initialise sysctl_user_reserve_kbytes. |
3884 | * |
3885 | * This is intended to prevent a user from starting a single memory hogging |
3886 | * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER |
3887 | * mode. |
3888 | * |
3889 | * The default value is min(3% of free memory, 128MB) |
3890 | * 128MB is enough to recover with sshd/login, bash, and top/kill. |
3891 | */ |
3892 | static int init_user_reserve(void) |
3893 | { |
3894 | unsigned long free_kbytes; |
3895 | |
3896 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
3897 | |
3898 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); |
3899 | return 0; |
3900 | } |
3901 | subsys_initcall(init_user_reserve); |
3902 | |
3903 | /* |
3904 | * Initialise sysctl_admin_reserve_kbytes. |
3905 | * |
3906 | * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin |
3907 | * to log in and kill a memory hogging process. |
3908 | * |
3909 | * Systems with more than 256MB will reserve 8MB, enough to recover |
3910 | * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will |
3911 | * only reserve 3% of free pages by default. |
3912 | */ |
3913 | static int init_admin_reserve(void) |
3914 | { |
3915 | unsigned long free_kbytes; |
3916 | |
3917 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
3918 | |
3919 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); |
3920 | return 0; |
3921 | } |
3922 | subsys_initcall(init_admin_reserve); |
3923 | |
3924 | /* |
3925 | * Reinititalise user and admin reserves if memory is added or removed. |
3926 | * |
3927 | * The default user reserve max is 128MB, and the default max for the |
3928 | * admin reserve is 8MB. These are usually, but not always, enough to |
3929 | * enable recovery from a memory hogging process using login/sshd, a shell, |
3930 | * and tools like top. It may make sense to increase or even disable the |
3931 | * reserve depending on the existence of swap or variations in the recovery |
3932 | * tools. So, the admin may have changed them. |
3933 | * |
3934 | * If memory is added and the reserves have been eliminated or increased above |
3935 | * the default max, then we'll trust the admin. |
3936 | * |
3937 | * If memory is removed and there isn't enough free memory, then we |
3938 | * need to reset the reserves. |
3939 | * |
3940 | * Otherwise keep the reserve set by the admin. |
3941 | */ |
3942 | static int reserve_mem_notifier(struct notifier_block *nb, |
3943 | unsigned long action, void *data) |
3944 | { |
3945 | unsigned long tmp, free_kbytes; |
3946 | |
3947 | switch (action) { |
3948 | case MEM_ONLINE: |
3949 | /* Default max is 128MB. Leave alone if modified by operator. */ |
3950 | tmp = sysctl_user_reserve_kbytes; |
3951 | if (0 < tmp && tmp < (1UL << 17)) |
3952 | init_user_reserve(); |
3953 | |
3954 | /* Default max is 8MB. Leave alone if modified by operator. */ |
3955 | tmp = sysctl_admin_reserve_kbytes; |
3956 | if (0 < tmp && tmp < (1UL << 13)) |
3957 | init_admin_reserve(); |
3958 | |
3959 | break; |
3960 | case MEM_OFFLINE: |
3961 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
3962 | |
3963 | if (sysctl_user_reserve_kbytes > free_kbytes) { |
3964 | init_user_reserve(); |
3965 | pr_info("vm.user_reserve_kbytes reset to %lu\n" , |
3966 | sysctl_user_reserve_kbytes); |
3967 | } |
3968 | |
3969 | if (sysctl_admin_reserve_kbytes > free_kbytes) { |
3970 | init_admin_reserve(); |
3971 | pr_info("vm.admin_reserve_kbytes reset to %lu\n" , |
3972 | sysctl_admin_reserve_kbytes); |
3973 | } |
3974 | break; |
3975 | default: |
3976 | break; |
3977 | } |
3978 | return NOTIFY_OK; |
3979 | } |
3980 | |
3981 | static int __meminit init_reserve_notifier(void) |
3982 | { |
3983 | if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) |
3984 | pr_err("Failed registering memory add/remove notifier for admin reserve\n" ); |
3985 | |
3986 | return 0; |
3987 | } |
3988 | subsys_initcall(init_reserve_notifier); |
3989 | |