1 | /* |
2 | * mm/rmap.c - physical to virtual reverse mappings |
3 | * |
4 | * Copyright 2001, Rik van Riel <riel@conectiva.com.br> |
5 | * Released under the General Public License (GPL). |
6 | * |
7 | * Simple, low overhead reverse mapping scheme. |
8 | * Please try to keep this thing as modular as possible. |
9 | * |
10 | * Provides methods for unmapping each kind of mapped page: |
11 | * the anon methods track anonymous pages, and |
12 | * the file methods track pages belonging to an inode. |
13 | * |
14 | * Original design by Rik van Riel <riel@conectiva.com.br> 2001 |
15 | * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 |
16 | * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 |
17 | * Contributions by Hugh Dickins 2003, 2004 |
18 | */ |
19 | |
20 | /* |
21 | * Lock ordering in mm: |
22 | * |
23 | * inode->i_rwsem (while writing or truncating, not reading or faulting) |
24 | * mm->mmap_lock |
25 | * mapping->invalidate_lock (in filemap_fault) |
26 | * page->flags PG_locked (lock_page) |
27 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) |
28 | * vma_start_write |
29 | * mapping->i_mmap_rwsem |
30 | * anon_vma->rwsem |
31 | * mm->page_table_lock or pte_lock |
32 | * swap_lock (in swap_duplicate, swap_info_get) |
33 | * mmlist_lock (in mmput, drain_mmlist and others) |
34 | * mapping->private_lock (in block_dirty_folio) |
35 | * folio_lock_memcg move_lock (in block_dirty_folio) |
36 | * i_pages lock (widely used) |
37 | * lruvec->lru_lock (in folio_lruvec_lock_irq) |
38 | * inode->i_lock (in set_page_dirty's __mark_inode_dirty) |
39 | * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) |
40 | * sb_lock (within inode_lock in fs/fs-writeback.c) |
41 | * i_pages lock (widely used, in set_page_dirty, |
42 | * in arch-dependent flush_dcache_mmap_lock, |
43 | * within bdi.wb->list_lock in __sync_single_inode) |
44 | * |
45 | * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) |
46 | * ->tasklist_lock |
47 | * pte map lock |
48 | * |
49 | * hugetlbfs PageHuge() take locks in this order: |
50 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) |
51 | * vma_lock (hugetlb specific lock for pmd_sharing) |
52 | * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) |
53 | * page->flags PG_locked (lock_page) |
54 | */ |
55 | |
56 | #include <linux/mm.h> |
57 | #include <linux/sched/mm.h> |
58 | #include <linux/sched/task.h> |
59 | #include <linux/pagemap.h> |
60 | #include <linux/swap.h> |
61 | #include <linux/swapops.h> |
62 | #include <linux/slab.h> |
63 | #include <linux/init.h> |
64 | #include <linux/ksm.h> |
65 | #include <linux/rmap.h> |
66 | #include <linux/rcupdate.h> |
67 | #include <linux/export.h> |
68 | #include <linux/memcontrol.h> |
69 | #include <linux/mmu_notifier.h> |
70 | #include <linux/migrate.h> |
71 | #include <linux/hugetlb.h> |
72 | #include <linux/huge_mm.h> |
73 | #include <linux/backing-dev.h> |
74 | #include <linux/page_idle.h> |
75 | #include <linux/memremap.h> |
76 | #include <linux/userfaultfd_k.h> |
77 | #include <linux/mm_inline.h> |
78 | |
79 | #include <asm/tlbflush.h> |
80 | |
81 | #define CREATE_TRACE_POINTS |
82 | #include <trace/events/tlb.h> |
83 | #include <trace/events/migrate.h> |
84 | |
85 | #include "internal.h" |
86 | |
87 | static struct kmem_cache *anon_vma_cachep; |
88 | static struct kmem_cache *anon_vma_chain_cachep; |
89 | |
90 | static inline struct anon_vma *anon_vma_alloc(void) |
91 | { |
92 | struct anon_vma *anon_vma; |
93 | |
94 | anon_vma = kmem_cache_alloc(cachep: anon_vma_cachep, GFP_KERNEL); |
95 | if (anon_vma) { |
96 | atomic_set(v: &anon_vma->refcount, i: 1); |
97 | anon_vma->num_children = 0; |
98 | anon_vma->num_active_vmas = 0; |
99 | anon_vma->parent = anon_vma; |
100 | /* |
101 | * Initialise the anon_vma root to point to itself. If called |
102 | * from fork, the root will be reset to the parents anon_vma. |
103 | */ |
104 | anon_vma->root = anon_vma; |
105 | } |
106 | |
107 | return anon_vma; |
108 | } |
109 | |
110 | static inline void anon_vma_free(struct anon_vma *anon_vma) |
111 | { |
112 | VM_BUG_ON(atomic_read(&anon_vma->refcount)); |
113 | |
114 | /* |
115 | * Synchronize against folio_lock_anon_vma_read() such that |
116 | * we can safely hold the lock without the anon_vma getting |
117 | * freed. |
118 | * |
119 | * Relies on the full mb implied by the atomic_dec_and_test() from |
120 | * put_anon_vma() against the acquire barrier implied by |
121 | * down_read_trylock() from folio_lock_anon_vma_read(). This orders: |
122 | * |
123 | * folio_lock_anon_vma_read() VS put_anon_vma() |
124 | * down_read_trylock() atomic_dec_and_test() |
125 | * LOCK MB |
126 | * atomic_read() rwsem_is_locked() |
127 | * |
128 | * LOCK should suffice since the actual taking of the lock must |
129 | * happen _before_ what follows. |
130 | */ |
131 | might_sleep(); |
132 | if (rwsem_is_locked(sem: &anon_vma->root->rwsem)) { |
133 | anon_vma_lock_write(anon_vma); |
134 | anon_vma_unlock_write(anon_vma); |
135 | } |
136 | |
137 | kmem_cache_free(s: anon_vma_cachep, objp: anon_vma); |
138 | } |
139 | |
140 | static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) |
141 | { |
142 | return kmem_cache_alloc(cachep: anon_vma_chain_cachep, flags: gfp); |
143 | } |
144 | |
145 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
146 | { |
147 | kmem_cache_free(s: anon_vma_chain_cachep, objp: anon_vma_chain); |
148 | } |
149 | |
150 | static void anon_vma_chain_link(struct vm_area_struct *vma, |
151 | struct anon_vma_chain *avc, |
152 | struct anon_vma *anon_vma) |
153 | { |
154 | avc->vma = vma; |
155 | avc->anon_vma = anon_vma; |
156 | list_add(new: &avc->same_vma, head: &vma->anon_vma_chain); |
157 | anon_vma_interval_tree_insert(node: avc, root: &anon_vma->rb_root); |
158 | } |
159 | |
160 | /** |
161 | * __anon_vma_prepare - attach an anon_vma to a memory region |
162 | * @vma: the memory region in question |
163 | * |
164 | * This makes sure the memory mapping described by 'vma' has |
165 | * an 'anon_vma' attached to it, so that we can associate the |
166 | * anonymous pages mapped into it with that anon_vma. |
167 | * |
168 | * The common case will be that we already have one, which |
169 | * is handled inline by anon_vma_prepare(). But if |
170 | * not we either need to find an adjacent mapping that we |
171 | * can re-use the anon_vma from (very common when the only |
172 | * reason for splitting a vma has been mprotect()), or we |
173 | * allocate a new one. |
174 | * |
175 | * Anon-vma allocations are very subtle, because we may have |
176 | * optimistically looked up an anon_vma in folio_lock_anon_vma_read() |
177 | * and that may actually touch the rwsem even in the newly |
178 | * allocated vma (it depends on RCU to make sure that the |
179 | * anon_vma isn't actually destroyed). |
180 | * |
181 | * As a result, we need to do proper anon_vma locking even |
182 | * for the new allocation. At the same time, we do not want |
183 | * to do any locking for the common case of already having |
184 | * an anon_vma. |
185 | * |
186 | * This must be called with the mmap_lock held for reading. |
187 | */ |
188 | int __anon_vma_prepare(struct vm_area_struct *vma) |
189 | { |
190 | struct mm_struct *mm = vma->vm_mm; |
191 | struct anon_vma *anon_vma, *allocated; |
192 | struct anon_vma_chain *avc; |
193 | |
194 | might_sleep(); |
195 | |
196 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
197 | if (!avc) |
198 | goto out_enomem; |
199 | |
200 | anon_vma = find_mergeable_anon_vma(vma); |
201 | allocated = NULL; |
202 | if (!anon_vma) { |
203 | anon_vma = anon_vma_alloc(); |
204 | if (unlikely(!anon_vma)) |
205 | goto out_enomem_free_avc; |
206 | anon_vma->num_children++; /* self-parent link for new root */ |
207 | allocated = anon_vma; |
208 | } |
209 | |
210 | anon_vma_lock_write(anon_vma); |
211 | /* page_table_lock to protect against threads */ |
212 | spin_lock(lock: &mm->page_table_lock); |
213 | if (likely(!vma->anon_vma)) { |
214 | vma->anon_vma = anon_vma; |
215 | anon_vma_chain_link(vma, avc, anon_vma); |
216 | anon_vma->num_active_vmas++; |
217 | allocated = NULL; |
218 | avc = NULL; |
219 | } |
220 | spin_unlock(lock: &mm->page_table_lock); |
221 | anon_vma_unlock_write(anon_vma); |
222 | |
223 | if (unlikely(allocated)) |
224 | put_anon_vma(anon_vma: allocated); |
225 | if (unlikely(avc)) |
226 | anon_vma_chain_free(anon_vma_chain: avc); |
227 | |
228 | return 0; |
229 | |
230 | out_enomem_free_avc: |
231 | anon_vma_chain_free(anon_vma_chain: avc); |
232 | out_enomem: |
233 | return -ENOMEM; |
234 | } |
235 | |
236 | /* |
237 | * This is a useful helper function for locking the anon_vma root as |
238 | * we traverse the vma->anon_vma_chain, looping over anon_vma's that |
239 | * have the same vma. |
240 | * |
241 | * Such anon_vma's should have the same root, so you'd expect to see |
242 | * just a single mutex_lock for the whole traversal. |
243 | */ |
244 | static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) |
245 | { |
246 | struct anon_vma *new_root = anon_vma->root; |
247 | if (new_root != root) { |
248 | if (WARN_ON_ONCE(root)) |
249 | up_write(sem: &root->rwsem); |
250 | root = new_root; |
251 | down_write(sem: &root->rwsem); |
252 | } |
253 | return root; |
254 | } |
255 | |
256 | static inline void unlock_anon_vma_root(struct anon_vma *root) |
257 | { |
258 | if (root) |
259 | up_write(sem: &root->rwsem); |
260 | } |
261 | |
262 | /* |
263 | * Attach the anon_vmas from src to dst. |
264 | * Returns 0 on success, -ENOMEM on failure. |
265 | * |
266 | * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), |
267 | * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, |
268 | * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to |
269 | * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before |
270 | * call, we can identify this case by checking (!dst->anon_vma && |
271 | * src->anon_vma). |
272 | * |
273 | * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find |
274 | * and reuse existing anon_vma which has no vmas and only one child anon_vma. |
275 | * This prevents degradation of anon_vma hierarchy to endless linear chain in |
276 | * case of constantly forking task. On the other hand, an anon_vma with more |
277 | * than one child isn't reused even if there was no alive vma, thus rmap |
278 | * walker has a good chance of avoiding scanning the whole hierarchy when it |
279 | * searches where page is mapped. |
280 | */ |
281 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) |
282 | { |
283 | struct anon_vma_chain *avc, *pavc; |
284 | struct anon_vma *root = NULL; |
285 | |
286 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
287 | struct anon_vma *anon_vma; |
288 | |
289 | avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); |
290 | if (unlikely(!avc)) { |
291 | unlock_anon_vma_root(root); |
292 | root = NULL; |
293 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
294 | if (!avc) |
295 | goto enomem_failure; |
296 | } |
297 | anon_vma = pavc->anon_vma; |
298 | root = lock_anon_vma_root(root, anon_vma); |
299 | anon_vma_chain_link(vma: dst, avc, anon_vma); |
300 | |
301 | /* |
302 | * Reuse existing anon_vma if it has no vma and only one |
303 | * anon_vma child. |
304 | * |
305 | * Root anon_vma is never reused: |
306 | * it has self-parent reference and at least one child. |
307 | */ |
308 | if (!dst->anon_vma && src->anon_vma && |
309 | anon_vma->num_children < 2 && |
310 | anon_vma->num_active_vmas == 0) |
311 | dst->anon_vma = anon_vma; |
312 | } |
313 | if (dst->anon_vma) |
314 | dst->anon_vma->num_active_vmas++; |
315 | unlock_anon_vma_root(root); |
316 | return 0; |
317 | |
318 | enomem_failure: |
319 | /* |
320 | * dst->anon_vma is dropped here otherwise its num_active_vmas can |
321 | * be incorrectly decremented in unlink_anon_vmas(). |
322 | * We can safely do this because callers of anon_vma_clone() don't care |
323 | * about dst->anon_vma if anon_vma_clone() failed. |
324 | */ |
325 | dst->anon_vma = NULL; |
326 | unlink_anon_vmas(dst); |
327 | return -ENOMEM; |
328 | } |
329 | |
330 | /* |
331 | * Attach vma to its own anon_vma, as well as to the anon_vmas that |
332 | * the corresponding VMA in the parent process is attached to. |
333 | * Returns 0 on success, non-zero on failure. |
334 | */ |
335 | int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) |
336 | { |
337 | struct anon_vma_chain *avc; |
338 | struct anon_vma *anon_vma; |
339 | int error; |
340 | |
341 | /* Don't bother if the parent process has no anon_vma here. */ |
342 | if (!pvma->anon_vma) |
343 | return 0; |
344 | |
345 | /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ |
346 | vma->anon_vma = NULL; |
347 | |
348 | /* |
349 | * First, attach the new VMA to the parent VMA's anon_vmas, |
350 | * so rmap can find non-COWed pages in child processes. |
351 | */ |
352 | error = anon_vma_clone(dst: vma, src: pvma); |
353 | if (error) |
354 | return error; |
355 | |
356 | /* An existing anon_vma has been reused, all done then. */ |
357 | if (vma->anon_vma) |
358 | return 0; |
359 | |
360 | /* Then add our own anon_vma. */ |
361 | anon_vma = anon_vma_alloc(); |
362 | if (!anon_vma) |
363 | goto out_error; |
364 | anon_vma->num_active_vmas++; |
365 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
366 | if (!avc) |
367 | goto out_error_free_anon_vma; |
368 | |
369 | /* |
370 | * The root anon_vma's rwsem is the lock actually used when we |
371 | * lock any of the anon_vmas in this anon_vma tree. |
372 | */ |
373 | anon_vma->root = pvma->anon_vma->root; |
374 | anon_vma->parent = pvma->anon_vma; |
375 | /* |
376 | * With refcounts, an anon_vma can stay around longer than the |
377 | * process it belongs to. The root anon_vma needs to be pinned until |
378 | * this anon_vma is freed, because the lock lives in the root. |
379 | */ |
380 | get_anon_vma(anon_vma: anon_vma->root); |
381 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ |
382 | vma->anon_vma = anon_vma; |
383 | anon_vma_lock_write(anon_vma); |
384 | anon_vma_chain_link(vma, avc, anon_vma); |
385 | anon_vma->parent->num_children++; |
386 | anon_vma_unlock_write(anon_vma); |
387 | |
388 | return 0; |
389 | |
390 | out_error_free_anon_vma: |
391 | put_anon_vma(anon_vma); |
392 | out_error: |
393 | unlink_anon_vmas(vma); |
394 | return -ENOMEM; |
395 | } |
396 | |
397 | void unlink_anon_vmas(struct vm_area_struct *vma) |
398 | { |
399 | struct anon_vma_chain *avc, *next; |
400 | struct anon_vma *root = NULL; |
401 | |
402 | /* |
403 | * Unlink each anon_vma chained to the VMA. This list is ordered |
404 | * from newest to oldest, ensuring the root anon_vma gets freed last. |
405 | */ |
406 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
407 | struct anon_vma *anon_vma = avc->anon_vma; |
408 | |
409 | root = lock_anon_vma_root(root, anon_vma); |
410 | anon_vma_interval_tree_remove(node: avc, root: &anon_vma->rb_root); |
411 | |
412 | /* |
413 | * Leave empty anon_vmas on the list - we'll need |
414 | * to free them outside the lock. |
415 | */ |
416 | if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { |
417 | anon_vma->parent->num_children--; |
418 | continue; |
419 | } |
420 | |
421 | list_del(entry: &avc->same_vma); |
422 | anon_vma_chain_free(anon_vma_chain: avc); |
423 | } |
424 | if (vma->anon_vma) { |
425 | vma->anon_vma->num_active_vmas--; |
426 | |
427 | /* |
428 | * vma would still be needed after unlink, and anon_vma will be prepared |
429 | * when handle fault. |
430 | */ |
431 | vma->anon_vma = NULL; |
432 | } |
433 | unlock_anon_vma_root(root); |
434 | |
435 | /* |
436 | * Iterate the list once more, it now only contains empty and unlinked |
437 | * anon_vmas, destroy them. Could not do before due to __put_anon_vma() |
438 | * needing to write-acquire the anon_vma->root->rwsem. |
439 | */ |
440 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
441 | struct anon_vma *anon_vma = avc->anon_vma; |
442 | |
443 | VM_WARN_ON(anon_vma->num_children); |
444 | VM_WARN_ON(anon_vma->num_active_vmas); |
445 | put_anon_vma(anon_vma); |
446 | |
447 | list_del(entry: &avc->same_vma); |
448 | anon_vma_chain_free(anon_vma_chain: avc); |
449 | } |
450 | } |
451 | |
452 | static void anon_vma_ctor(void *data) |
453 | { |
454 | struct anon_vma *anon_vma = data; |
455 | |
456 | init_rwsem(&anon_vma->rwsem); |
457 | atomic_set(v: &anon_vma->refcount, i: 0); |
458 | anon_vma->rb_root = RB_ROOT_CACHED; |
459 | } |
460 | |
461 | void __init anon_vma_init(void) |
462 | { |
463 | anon_vma_cachep = kmem_cache_create(name: "anon_vma" , size: sizeof(struct anon_vma), |
464 | align: 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, |
465 | ctor: anon_vma_ctor); |
466 | anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, |
467 | SLAB_PANIC|SLAB_ACCOUNT); |
468 | } |
469 | |
470 | /* |
471 | * Getting a lock on a stable anon_vma from a page off the LRU is tricky! |
472 | * |
473 | * Since there is no serialization what so ever against page_remove_rmap() |
474 | * the best this function can do is return a refcount increased anon_vma |
475 | * that might have been relevant to this page. |
476 | * |
477 | * The page might have been remapped to a different anon_vma or the anon_vma |
478 | * returned may already be freed (and even reused). |
479 | * |
480 | * In case it was remapped to a different anon_vma, the new anon_vma will be a |
481 | * child of the old anon_vma, and the anon_vma lifetime rules will therefore |
482 | * ensure that any anon_vma obtained from the page will still be valid for as |
483 | * long as we observe page_mapped() [ hence all those page_mapped() tests ]. |
484 | * |
485 | * All users of this function must be very careful when walking the anon_vma |
486 | * chain and verify that the page in question is indeed mapped in it |
487 | * [ something equivalent to page_mapped_in_vma() ]. |
488 | * |
489 | * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from |
490 | * page_remove_rmap() that the anon_vma pointer from page->mapping is valid |
491 | * if there is a mapcount, we can dereference the anon_vma after observing |
492 | * those. |
493 | */ |
494 | struct anon_vma *folio_get_anon_vma(struct folio *folio) |
495 | { |
496 | struct anon_vma *anon_vma = NULL; |
497 | unsigned long anon_mapping; |
498 | |
499 | rcu_read_lock(); |
500 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
501 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
502 | goto out; |
503 | if (!folio_mapped(folio)) |
504 | goto out; |
505 | |
506 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
507 | if (!atomic_inc_not_zero(v: &anon_vma->refcount)) { |
508 | anon_vma = NULL; |
509 | goto out; |
510 | } |
511 | |
512 | /* |
513 | * If this folio is still mapped, then its anon_vma cannot have been |
514 | * freed. But if it has been unmapped, we have no security against the |
515 | * anon_vma structure being freed and reused (for another anon_vma: |
516 | * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() |
517 | * above cannot corrupt). |
518 | */ |
519 | if (!folio_mapped(folio)) { |
520 | rcu_read_unlock(); |
521 | put_anon_vma(anon_vma); |
522 | return NULL; |
523 | } |
524 | out: |
525 | rcu_read_unlock(); |
526 | |
527 | return anon_vma; |
528 | } |
529 | |
530 | /* |
531 | * Similar to folio_get_anon_vma() except it locks the anon_vma. |
532 | * |
533 | * Its a little more complex as it tries to keep the fast path to a single |
534 | * atomic op -- the trylock. If we fail the trylock, we fall back to getting a |
535 | * reference like with folio_get_anon_vma() and then block on the mutex |
536 | * on !rwc->try_lock case. |
537 | */ |
538 | struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, |
539 | struct rmap_walk_control *rwc) |
540 | { |
541 | struct anon_vma *anon_vma = NULL; |
542 | struct anon_vma *root_anon_vma; |
543 | unsigned long anon_mapping; |
544 | |
545 | rcu_read_lock(); |
546 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
547 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
548 | goto out; |
549 | if (!folio_mapped(folio)) |
550 | goto out; |
551 | |
552 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
553 | root_anon_vma = READ_ONCE(anon_vma->root); |
554 | if (down_read_trylock(sem: &root_anon_vma->rwsem)) { |
555 | /* |
556 | * If the folio is still mapped, then this anon_vma is still |
557 | * its anon_vma, and holding the mutex ensures that it will |
558 | * not go away, see anon_vma_free(). |
559 | */ |
560 | if (!folio_mapped(folio)) { |
561 | up_read(sem: &root_anon_vma->rwsem); |
562 | anon_vma = NULL; |
563 | } |
564 | goto out; |
565 | } |
566 | |
567 | if (rwc && rwc->try_lock) { |
568 | anon_vma = NULL; |
569 | rwc->contended = true; |
570 | goto out; |
571 | } |
572 | |
573 | /* trylock failed, we got to sleep */ |
574 | if (!atomic_inc_not_zero(v: &anon_vma->refcount)) { |
575 | anon_vma = NULL; |
576 | goto out; |
577 | } |
578 | |
579 | if (!folio_mapped(folio)) { |
580 | rcu_read_unlock(); |
581 | put_anon_vma(anon_vma); |
582 | return NULL; |
583 | } |
584 | |
585 | /* we pinned the anon_vma, its safe to sleep */ |
586 | rcu_read_unlock(); |
587 | anon_vma_lock_read(anon_vma); |
588 | |
589 | if (atomic_dec_and_test(v: &anon_vma->refcount)) { |
590 | /* |
591 | * Oops, we held the last refcount, release the lock |
592 | * and bail -- can't simply use put_anon_vma() because |
593 | * we'll deadlock on the anon_vma_lock_write() recursion. |
594 | */ |
595 | anon_vma_unlock_read(anon_vma); |
596 | __put_anon_vma(anon_vma); |
597 | anon_vma = NULL; |
598 | } |
599 | |
600 | return anon_vma; |
601 | |
602 | out: |
603 | rcu_read_unlock(); |
604 | return anon_vma; |
605 | } |
606 | |
607 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
608 | /* |
609 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is |
610 | * important if a PTE was dirty when it was unmapped that it's flushed |
611 | * before any IO is initiated on the page to prevent lost writes. Similarly, |
612 | * it must be flushed before freeing to prevent data leakage. |
613 | */ |
614 | void try_to_unmap_flush(void) |
615 | { |
616 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
617 | |
618 | if (!tlb_ubc->flush_required) |
619 | return; |
620 | |
621 | arch_tlbbatch_flush(batch: &tlb_ubc->arch); |
622 | tlb_ubc->flush_required = false; |
623 | tlb_ubc->writable = false; |
624 | } |
625 | |
626 | /* Flush iff there are potentially writable TLB entries that can race with IO */ |
627 | void try_to_unmap_flush_dirty(void) |
628 | { |
629 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
630 | |
631 | if (tlb_ubc->writable) |
632 | try_to_unmap_flush(); |
633 | } |
634 | |
635 | /* |
636 | * Bits 0-14 of mm->tlb_flush_batched record pending generations. |
637 | * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. |
638 | */ |
639 | #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 |
640 | #define TLB_FLUSH_BATCH_PENDING_MASK \ |
641 | ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) |
642 | #define TLB_FLUSH_BATCH_PENDING_LARGE \ |
643 | (TLB_FLUSH_BATCH_PENDING_MASK / 2) |
644 | |
645 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, |
646 | unsigned long uaddr) |
647 | { |
648 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
649 | int batch; |
650 | bool writable = pte_dirty(pte: pteval); |
651 | |
652 | if (!pte_accessible(mm, a: pteval)) |
653 | return; |
654 | |
655 | arch_tlbbatch_add_pending(batch: &tlb_ubc->arch, mm, uaddr); |
656 | tlb_ubc->flush_required = true; |
657 | |
658 | /* |
659 | * Ensure compiler does not re-order the setting of tlb_flush_batched |
660 | * before the PTE is cleared. |
661 | */ |
662 | barrier(); |
663 | batch = atomic_read(v: &mm->tlb_flush_batched); |
664 | retry: |
665 | if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { |
666 | /* |
667 | * Prevent `pending' from catching up with `flushed' because of |
668 | * overflow. Reset `pending' and `flushed' to be 1 and 0 if |
669 | * `pending' becomes large. |
670 | */ |
671 | if (!atomic_try_cmpxchg(v: &mm->tlb_flush_batched, old: &batch, new: 1)) |
672 | goto retry; |
673 | } else { |
674 | atomic_inc(v: &mm->tlb_flush_batched); |
675 | } |
676 | |
677 | /* |
678 | * If the PTE was dirty then it's best to assume it's writable. The |
679 | * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() |
680 | * before the page is queued for IO. |
681 | */ |
682 | if (writable) |
683 | tlb_ubc->writable = true; |
684 | } |
685 | |
686 | /* |
687 | * Returns true if the TLB flush should be deferred to the end of a batch of |
688 | * unmap operations to reduce IPIs. |
689 | */ |
690 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) |
691 | { |
692 | if (!(flags & TTU_BATCH_FLUSH)) |
693 | return false; |
694 | |
695 | return arch_tlbbatch_should_defer(mm); |
696 | } |
697 | |
698 | /* |
699 | * Reclaim unmaps pages under the PTL but do not flush the TLB prior to |
700 | * releasing the PTL if TLB flushes are batched. It's possible for a parallel |
701 | * operation such as mprotect or munmap to race between reclaim unmapping |
702 | * the page and flushing the page. If this race occurs, it potentially allows |
703 | * access to data via a stale TLB entry. Tracking all mm's that have TLB |
704 | * batching in flight would be expensive during reclaim so instead track |
705 | * whether TLB batching occurred in the past and if so then do a flush here |
706 | * if required. This will cost one additional flush per reclaim cycle paid |
707 | * by the first operation at risk such as mprotect and mumap. |
708 | * |
709 | * This must be called under the PTL so that an access to tlb_flush_batched |
710 | * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise |
711 | * via the PTL. |
712 | */ |
713 | void flush_tlb_batched_pending(struct mm_struct *mm) |
714 | { |
715 | int batch = atomic_read(v: &mm->tlb_flush_batched); |
716 | int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; |
717 | int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; |
718 | |
719 | if (pending != flushed) { |
720 | arch_flush_tlb_batched_pending(mm); |
721 | /* |
722 | * If the new TLB flushing is pending during flushing, leave |
723 | * mm->tlb_flush_batched as is, to avoid losing flushing. |
724 | */ |
725 | atomic_cmpxchg(v: &mm->tlb_flush_batched, old: batch, |
726 | new: pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); |
727 | } |
728 | } |
729 | #else |
730 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, |
731 | unsigned long uaddr) |
732 | { |
733 | } |
734 | |
735 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) |
736 | { |
737 | return false; |
738 | } |
739 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
740 | |
741 | /* |
742 | * At what user virtual address is page expected in vma? |
743 | * Caller should check the page is actually part of the vma. |
744 | */ |
745 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
746 | { |
747 | struct folio *folio = page_folio(page); |
748 | if (folio_test_anon(folio)) { |
749 | struct anon_vma *page__anon_vma = folio_anon_vma(folio); |
750 | /* |
751 | * Note: swapoff's unuse_vma() is more efficient with this |
752 | * check, and needs it to match anon_vma when KSM is active. |
753 | */ |
754 | if (!vma->anon_vma || !page__anon_vma || |
755 | vma->anon_vma->root != page__anon_vma->root) |
756 | return -EFAULT; |
757 | } else if (!vma->vm_file) { |
758 | return -EFAULT; |
759 | } else if (vma->vm_file->f_mapping != folio->mapping) { |
760 | return -EFAULT; |
761 | } |
762 | |
763 | return vma_address(page, vma); |
764 | } |
765 | |
766 | /* |
767 | * Returns the actual pmd_t* where we expect 'address' to be mapped from, or |
768 | * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* |
769 | * represents. |
770 | */ |
771 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) |
772 | { |
773 | pgd_t *pgd; |
774 | p4d_t *p4d; |
775 | pud_t *pud; |
776 | pmd_t *pmd = NULL; |
777 | |
778 | pgd = pgd_offset(mm, address); |
779 | if (!pgd_present(pgd: *pgd)) |
780 | goto out; |
781 | |
782 | p4d = p4d_offset(pgd, address); |
783 | if (!p4d_present(p4d: *p4d)) |
784 | goto out; |
785 | |
786 | pud = pud_offset(p4d, address); |
787 | if (!pud_present(pud: *pud)) |
788 | goto out; |
789 | |
790 | pmd = pmd_offset(pud, address); |
791 | out: |
792 | return pmd; |
793 | } |
794 | |
795 | struct folio_referenced_arg { |
796 | int mapcount; |
797 | int referenced; |
798 | unsigned long vm_flags; |
799 | struct mem_cgroup *memcg; |
800 | }; |
801 | |
802 | /* |
803 | * arg: folio_referenced_arg will be passed |
804 | */ |
805 | static bool folio_referenced_one(struct folio *folio, |
806 | struct vm_area_struct *vma, unsigned long address, void *arg) |
807 | { |
808 | struct folio_referenced_arg *pra = arg; |
809 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
810 | int referenced = 0; |
811 | unsigned long start = address, ptes = 0; |
812 | |
813 | while (page_vma_mapped_walk(pvmw: &pvmw)) { |
814 | address = pvmw.address; |
815 | |
816 | if (vma->vm_flags & VM_LOCKED) { |
817 | if (!folio_test_large(folio) || !pvmw.pte) { |
818 | /* Restore the mlock which got missed */ |
819 | mlock_vma_folio(folio, vma); |
820 | page_vma_mapped_walk_done(pvmw: &pvmw); |
821 | pra->vm_flags |= VM_LOCKED; |
822 | return false; /* To break the loop */ |
823 | } |
824 | /* |
825 | * For large folio fully mapped to VMA, will |
826 | * be handled after the pvmw loop. |
827 | * |
828 | * For large folio cross VMA boundaries, it's |
829 | * expected to be picked by page reclaim. But |
830 | * should skip reference of pages which are in |
831 | * the range of VM_LOCKED vma. As page reclaim |
832 | * should just count the reference of pages out |
833 | * the range of VM_LOCKED vma. |
834 | */ |
835 | ptes++; |
836 | pra->mapcount--; |
837 | continue; |
838 | } |
839 | |
840 | if (pvmw.pte) { |
841 | if (lru_gen_enabled() && |
842 | pte_young(pte: ptep_get(ptep: pvmw.pte))) { |
843 | lru_gen_look_around(pvmw: &pvmw); |
844 | referenced++; |
845 | } |
846 | |
847 | if (ptep_clear_flush_young_notify(vma, address, |
848 | pvmw.pte)) |
849 | referenced++; |
850 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
851 | if (pmdp_clear_flush_young_notify(vma, address, |
852 | pvmw.pmd)) |
853 | referenced++; |
854 | } else { |
855 | /* unexpected pmd-mapped folio? */ |
856 | WARN_ON_ONCE(1); |
857 | } |
858 | |
859 | pra->mapcount--; |
860 | } |
861 | |
862 | if ((vma->vm_flags & VM_LOCKED) && |
863 | folio_test_large(folio) && |
864 | folio_within_vma(folio, vma)) { |
865 | unsigned long s_align, e_align; |
866 | |
867 | s_align = ALIGN_DOWN(start, PMD_SIZE); |
868 | e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); |
869 | |
870 | /* folio doesn't cross page table boundary and fully mapped */ |
871 | if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { |
872 | /* Restore the mlock which got missed */ |
873 | mlock_vma_folio(folio, vma); |
874 | pra->vm_flags |= VM_LOCKED; |
875 | return false; /* To break the loop */ |
876 | } |
877 | } |
878 | |
879 | if (referenced) |
880 | folio_clear_idle(folio); |
881 | if (folio_test_clear_young(folio)) |
882 | referenced++; |
883 | |
884 | if (referenced) { |
885 | pra->referenced++; |
886 | pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; |
887 | } |
888 | |
889 | if (!pra->mapcount) |
890 | return false; /* To break the loop */ |
891 | |
892 | return true; |
893 | } |
894 | |
895 | static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) |
896 | { |
897 | struct folio_referenced_arg *pra = arg; |
898 | struct mem_cgroup *memcg = pra->memcg; |
899 | |
900 | /* |
901 | * Ignore references from this mapping if it has no recency. If the |
902 | * folio has been used in another mapping, we will catch it; if this |
903 | * other mapping is already gone, the unmap path will have set the |
904 | * referenced flag or activated the folio in zap_pte_range(). |
905 | */ |
906 | if (!vma_has_recency(vma)) |
907 | return true; |
908 | |
909 | /* |
910 | * If we are reclaiming on behalf of a cgroup, skip counting on behalf |
911 | * of references from different cgroups. |
912 | */ |
913 | if (memcg && !mm_match_cgroup(mm: vma->vm_mm, memcg)) |
914 | return true; |
915 | |
916 | return false; |
917 | } |
918 | |
919 | /** |
920 | * folio_referenced() - Test if the folio was referenced. |
921 | * @folio: The folio to test. |
922 | * @is_locked: Caller holds lock on the folio. |
923 | * @memcg: target memory cgroup |
924 | * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. |
925 | * |
926 | * Quick test_and_clear_referenced for all mappings of a folio, |
927 | * |
928 | * Return: The number of mappings which referenced the folio. Return -1 if |
929 | * the function bailed out due to rmap lock contention. |
930 | */ |
931 | int folio_referenced(struct folio *folio, int is_locked, |
932 | struct mem_cgroup *memcg, unsigned long *vm_flags) |
933 | { |
934 | int we_locked = 0; |
935 | struct folio_referenced_arg pra = { |
936 | .mapcount = folio_mapcount(folio), |
937 | .memcg = memcg, |
938 | }; |
939 | struct rmap_walk_control rwc = { |
940 | .rmap_one = folio_referenced_one, |
941 | .arg = (void *)&pra, |
942 | .anon_lock = folio_lock_anon_vma_read, |
943 | .try_lock = true, |
944 | .invalid_vma = invalid_folio_referenced_vma, |
945 | }; |
946 | |
947 | *vm_flags = 0; |
948 | if (!pra.mapcount) |
949 | return 0; |
950 | |
951 | if (!folio_raw_mapping(folio)) |
952 | return 0; |
953 | |
954 | if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { |
955 | we_locked = folio_trylock(folio); |
956 | if (!we_locked) |
957 | return 1; |
958 | } |
959 | |
960 | rmap_walk(folio, rwc: &rwc); |
961 | *vm_flags = pra.vm_flags; |
962 | |
963 | if (we_locked) |
964 | folio_unlock(folio); |
965 | |
966 | return rwc.contended ? -1 : pra.referenced; |
967 | } |
968 | |
969 | static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) |
970 | { |
971 | int cleaned = 0; |
972 | struct vm_area_struct *vma = pvmw->vma; |
973 | struct mmu_notifier_range range; |
974 | unsigned long address = pvmw->address; |
975 | |
976 | /* |
977 | * We have to assume the worse case ie pmd for invalidation. Note that |
978 | * the folio can not be freed from this function. |
979 | */ |
980 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_PROTECTION_PAGE, flags: 0, |
981 | mm: vma->vm_mm, start: address, end: vma_address_end(pvmw)); |
982 | mmu_notifier_invalidate_range_start(range: &range); |
983 | |
984 | while (page_vma_mapped_walk(pvmw)) { |
985 | int ret = 0; |
986 | |
987 | address = pvmw->address; |
988 | if (pvmw->pte) { |
989 | pte_t *pte = pvmw->pte; |
990 | pte_t entry = ptep_get(ptep: pte); |
991 | |
992 | if (!pte_dirty(pte: entry) && !pte_write(pte: entry)) |
993 | continue; |
994 | |
995 | flush_cache_page(vma, vmaddr: address, pfn: pte_pfn(pte: entry)); |
996 | entry = ptep_clear_flush(vma, address, ptep: pte); |
997 | entry = pte_wrprotect(pte: entry); |
998 | entry = pte_mkclean(pte: entry); |
999 | set_pte_at(vma->vm_mm, address, pte, entry); |
1000 | ret = 1; |
1001 | } else { |
1002 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1003 | pmd_t *pmd = pvmw->pmd; |
1004 | pmd_t entry; |
1005 | |
1006 | if (!pmd_dirty(pmd: *pmd) && !pmd_write(pmd: *pmd)) |
1007 | continue; |
1008 | |
1009 | flush_cache_range(vma, start: address, |
1010 | end: address + HPAGE_PMD_SIZE); |
1011 | entry = pmdp_invalidate(vma, address, pmdp: pmd); |
1012 | entry = pmd_wrprotect(pmd: entry); |
1013 | entry = pmd_mkclean(pmd: entry); |
1014 | set_pmd_at(mm: vma->vm_mm, addr: address, pmdp: pmd, pmd: entry); |
1015 | ret = 1; |
1016 | #else |
1017 | /* unexpected pmd-mapped folio? */ |
1018 | WARN_ON_ONCE(1); |
1019 | #endif |
1020 | } |
1021 | |
1022 | if (ret) |
1023 | cleaned++; |
1024 | } |
1025 | |
1026 | mmu_notifier_invalidate_range_end(range: &range); |
1027 | |
1028 | return cleaned; |
1029 | } |
1030 | |
1031 | static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, |
1032 | unsigned long address, void *arg) |
1033 | { |
1034 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); |
1035 | int *cleaned = arg; |
1036 | |
1037 | *cleaned += page_vma_mkclean_one(pvmw: &pvmw); |
1038 | |
1039 | return true; |
1040 | } |
1041 | |
1042 | static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) |
1043 | { |
1044 | if (vma->vm_flags & VM_SHARED) |
1045 | return false; |
1046 | |
1047 | return true; |
1048 | } |
1049 | |
1050 | int folio_mkclean(struct folio *folio) |
1051 | { |
1052 | int cleaned = 0; |
1053 | struct address_space *mapping; |
1054 | struct rmap_walk_control rwc = { |
1055 | .arg = (void *)&cleaned, |
1056 | .rmap_one = page_mkclean_one, |
1057 | .invalid_vma = invalid_mkclean_vma, |
1058 | }; |
1059 | |
1060 | BUG_ON(!folio_test_locked(folio)); |
1061 | |
1062 | if (!folio_mapped(folio)) |
1063 | return 0; |
1064 | |
1065 | mapping = folio_mapping(folio); |
1066 | if (!mapping) |
1067 | return 0; |
1068 | |
1069 | rmap_walk(folio, rwc: &rwc); |
1070 | |
1071 | return cleaned; |
1072 | } |
1073 | EXPORT_SYMBOL_GPL(folio_mkclean); |
1074 | |
1075 | /** |
1076 | * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of |
1077 | * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) |
1078 | * within the @vma of shared mappings. And since clean PTEs |
1079 | * should also be readonly, write protects them too. |
1080 | * @pfn: start pfn. |
1081 | * @nr_pages: number of physically contiguous pages srarting with @pfn. |
1082 | * @pgoff: page offset that the @pfn mapped with. |
1083 | * @vma: vma that @pfn mapped within. |
1084 | * |
1085 | * Returns the number of cleaned PTEs (including PMDs). |
1086 | */ |
1087 | int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, |
1088 | struct vm_area_struct *vma) |
1089 | { |
1090 | struct page_vma_mapped_walk pvmw = { |
1091 | .pfn = pfn, |
1092 | .nr_pages = nr_pages, |
1093 | .pgoff = pgoff, |
1094 | .vma = vma, |
1095 | .flags = PVMW_SYNC, |
1096 | }; |
1097 | |
1098 | if (invalid_mkclean_vma(vma, NULL)) |
1099 | return 0; |
1100 | |
1101 | pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); |
1102 | VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); |
1103 | |
1104 | return page_vma_mkclean_one(pvmw: &pvmw); |
1105 | } |
1106 | |
1107 | int folio_total_mapcount(struct folio *folio) |
1108 | { |
1109 | int mapcount = folio_entire_mapcount(folio); |
1110 | int nr_pages; |
1111 | int i; |
1112 | |
1113 | /* In the common case, avoid the loop when no pages mapped by PTE */ |
1114 | if (folio_nr_pages_mapped(folio) == 0) |
1115 | return mapcount; |
1116 | /* |
1117 | * Add all the PTE mappings of those pages mapped by PTE. |
1118 | * Limit the loop to folio_nr_pages_mapped()? |
1119 | * Perhaps: given all the raciness, that may be a good or a bad idea. |
1120 | */ |
1121 | nr_pages = folio_nr_pages(folio); |
1122 | for (i = 0; i < nr_pages; i++) |
1123 | mapcount += atomic_read(v: &folio_page(folio, i)->_mapcount); |
1124 | |
1125 | /* But each of those _mapcounts was based on -1 */ |
1126 | mapcount += nr_pages; |
1127 | return mapcount; |
1128 | } |
1129 | |
1130 | /** |
1131 | * folio_move_anon_rmap - move a folio to our anon_vma |
1132 | * @folio: The folio to move to our anon_vma |
1133 | * @vma: The vma the folio belongs to |
1134 | * |
1135 | * When a folio belongs exclusively to one process after a COW event, |
1136 | * that folio can be moved into the anon_vma that belongs to just that |
1137 | * process, so the rmap code will not search the parent or sibling processes. |
1138 | */ |
1139 | void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) |
1140 | { |
1141 | void *anon_vma = vma->anon_vma; |
1142 | |
1143 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
1144 | VM_BUG_ON_VMA(!anon_vma, vma); |
1145 | |
1146 | anon_vma += PAGE_MAPPING_ANON; |
1147 | /* |
1148 | * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written |
1149 | * simultaneously, so a concurrent reader (eg folio_referenced()'s |
1150 | * folio_test_anon()) will not see one without the other. |
1151 | */ |
1152 | WRITE_ONCE(folio->mapping, anon_vma); |
1153 | } |
1154 | |
1155 | /** |
1156 | * __folio_set_anon - set up a new anonymous rmap for a folio |
1157 | * @folio: The folio to set up the new anonymous rmap for. |
1158 | * @vma: VM area to add the folio to. |
1159 | * @address: User virtual address of the mapping |
1160 | * @exclusive: Whether the folio is exclusive to the process. |
1161 | */ |
1162 | static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, |
1163 | unsigned long address, bool exclusive) |
1164 | { |
1165 | struct anon_vma *anon_vma = vma->anon_vma; |
1166 | |
1167 | BUG_ON(!anon_vma); |
1168 | |
1169 | /* |
1170 | * If the folio isn't exclusive to this vma, we must use the _oldest_ |
1171 | * possible anon_vma for the folio mapping! |
1172 | */ |
1173 | if (!exclusive) |
1174 | anon_vma = anon_vma->root; |
1175 | |
1176 | /* |
1177 | * page_idle does a lockless/optimistic rmap scan on folio->mapping. |
1178 | * Make sure the compiler doesn't split the stores of anon_vma and |
1179 | * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code |
1180 | * could mistake the mapping for a struct address_space and crash. |
1181 | */ |
1182 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
1183 | WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); |
1184 | folio->index = linear_page_index(vma, address); |
1185 | } |
1186 | |
1187 | /** |
1188 | * __page_check_anon_rmap - sanity check anonymous rmap addition |
1189 | * @folio: The folio containing @page. |
1190 | * @page: the page to check the mapping of |
1191 | * @vma: the vm area in which the mapping is added |
1192 | * @address: the user virtual address mapped |
1193 | */ |
1194 | static void __page_check_anon_rmap(struct folio *folio, struct page *page, |
1195 | struct vm_area_struct *vma, unsigned long address) |
1196 | { |
1197 | /* |
1198 | * The page's anon-rmap details (mapping and index) are guaranteed to |
1199 | * be set up correctly at this point. |
1200 | * |
1201 | * We have exclusion against page_add_anon_rmap because the caller |
1202 | * always holds the page locked. |
1203 | * |
1204 | * We have exclusion against page_add_new_anon_rmap because those pages |
1205 | * are initially only visible via the pagetables, and the pte is locked |
1206 | * over the call to page_add_new_anon_rmap. |
1207 | */ |
1208 | VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, |
1209 | folio); |
1210 | VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), |
1211 | page); |
1212 | } |
1213 | |
1214 | /** |
1215 | * page_add_anon_rmap - add pte mapping to an anonymous page |
1216 | * @page: the page to add the mapping to |
1217 | * @vma: the vm area in which the mapping is added |
1218 | * @address: the user virtual address mapped |
1219 | * @flags: the rmap flags |
1220 | * |
1221 | * The caller needs to hold the pte lock, and the page must be locked in |
1222 | * the anon_vma case: to serialize mapping,index checking after setting, |
1223 | * and to ensure that PageAnon is not being upgraded racily to PageKsm |
1224 | * (but PageKsm is never downgraded to PageAnon). |
1225 | */ |
1226 | void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, |
1227 | unsigned long address, rmap_t flags) |
1228 | { |
1229 | struct folio *folio = page_folio(page); |
1230 | atomic_t *mapped = &folio->_nr_pages_mapped; |
1231 | int nr = 0, nr_pmdmapped = 0; |
1232 | bool compound = flags & RMAP_COMPOUND; |
1233 | bool first; |
1234 | |
1235 | /* Is page being mapped by PTE? Is this its first map to be added? */ |
1236 | if (likely(!compound)) { |
1237 | first = atomic_inc_and_test(v: &page->_mapcount); |
1238 | nr = first; |
1239 | if (first && folio_test_large(folio)) { |
1240 | nr = atomic_inc_return_relaxed(v: mapped); |
1241 | nr = (nr < COMPOUND_MAPPED); |
1242 | } |
1243 | } else if (folio_test_pmd_mappable(folio)) { |
1244 | /* That test is redundant: it's for safety or to optimize out */ |
1245 | |
1246 | first = atomic_inc_and_test(v: &folio->_entire_mapcount); |
1247 | if (first) { |
1248 | nr = atomic_add_return_relaxed(COMPOUND_MAPPED, v: mapped); |
1249 | if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { |
1250 | nr_pmdmapped = folio_nr_pages(folio); |
1251 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
1252 | /* Raced ahead of a remove and another add? */ |
1253 | if (unlikely(nr < 0)) |
1254 | nr = 0; |
1255 | } else { |
1256 | /* Raced ahead of a remove of COMPOUND_MAPPED */ |
1257 | nr = 0; |
1258 | } |
1259 | } |
1260 | } |
1261 | |
1262 | if (nr_pmdmapped) |
1263 | __lruvec_stat_mod_folio(folio, idx: NR_ANON_THPS, val: nr_pmdmapped); |
1264 | if (nr) |
1265 | __lruvec_stat_mod_folio(folio, idx: NR_ANON_MAPPED, val: nr); |
1266 | |
1267 | if (unlikely(!folio_test_anon(folio))) { |
1268 | VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); |
1269 | /* |
1270 | * For a PTE-mapped large folio, we only know that the single |
1271 | * PTE is exclusive. Further, __folio_set_anon() might not get |
1272 | * folio->index right when not given the address of the head |
1273 | * page. |
1274 | */ |
1275 | VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio); |
1276 | __folio_set_anon(folio, vma, address, |
1277 | exclusive: !!(flags & RMAP_EXCLUSIVE)); |
1278 | } else if (likely(!folio_test_ksm(folio))) { |
1279 | __page_check_anon_rmap(folio, page, vma, address); |
1280 | } |
1281 | if (flags & RMAP_EXCLUSIVE) |
1282 | SetPageAnonExclusive(page); |
1283 | /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ |
1284 | VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 || |
1285 | (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) && |
1286 | PageAnonExclusive(page), folio); |
1287 | |
1288 | /* |
1289 | * For large folio, only mlock it if it's fully mapped to VMA. It's |
1290 | * not easy to check whether the large folio is fully mapped to VMA |
1291 | * here. Only mlock normal 4K folio and leave page reclaim to handle |
1292 | * large folio. |
1293 | */ |
1294 | if (!folio_test_large(folio)) |
1295 | mlock_vma_folio(folio, vma); |
1296 | } |
1297 | |
1298 | /** |
1299 | * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. |
1300 | * @folio: The folio to add the mapping to. |
1301 | * @vma: the vm area in which the mapping is added |
1302 | * @address: the user virtual address mapped |
1303 | * |
1304 | * Like page_add_anon_rmap() but must only be called on *new* folios. |
1305 | * This means the inc-and-test can be bypassed. |
1306 | * The folio does not have to be locked. |
1307 | * |
1308 | * If the folio is large, it is accounted as a THP. As the folio |
1309 | * is new, it's assumed to be mapped exclusively by a single process. |
1310 | */ |
1311 | void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, |
1312 | unsigned long address) |
1313 | { |
1314 | int nr; |
1315 | |
1316 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
1317 | __folio_set_swapbacked(folio); |
1318 | |
1319 | if (likely(!folio_test_pmd_mappable(folio))) { |
1320 | /* increment count (starts at -1) */ |
1321 | atomic_set(v: &folio->_mapcount, i: 0); |
1322 | nr = 1; |
1323 | } else { |
1324 | /* increment count (starts at -1) */ |
1325 | atomic_set(v: &folio->_entire_mapcount, i: 0); |
1326 | atomic_set(v: &folio->_nr_pages_mapped, COMPOUND_MAPPED); |
1327 | nr = folio_nr_pages(folio); |
1328 | __lruvec_stat_mod_folio(folio, idx: NR_ANON_THPS, val: nr); |
1329 | } |
1330 | |
1331 | __lruvec_stat_mod_folio(folio, idx: NR_ANON_MAPPED, val: nr); |
1332 | __folio_set_anon(folio, vma, address, exclusive: true); |
1333 | SetPageAnonExclusive(&folio->page); |
1334 | } |
1335 | |
1336 | /** |
1337 | * folio_add_file_rmap_range - add pte mapping to page range of a folio |
1338 | * @folio: The folio to add the mapping to |
1339 | * @page: The first page to add |
1340 | * @nr_pages: The number of pages which will be mapped |
1341 | * @vma: the vm area in which the mapping is added |
1342 | * @compound: charge the page as compound or small page |
1343 | * |
1344 | * The page range of folio is defined by [first_page, first_page + nr_pages) |
1345 | * |
1346 | * The caller needs to hold the pte lock. |
1347 | */ |
1348 | void folio_add_file_rmap_range(struct folio *folio, struct page *page, |
1349 | unsigned int nr_pages, struct vm_area_struct *vma, |
1350 | bool compound) |
1351 | { |
1352 | atomic_t *mapped = &folio->_nr_pages_mapped; |
1353 | unsigned int nr_pmdmapped = 0, first; |
1354 | int nr = 0; |
1355 | |
1356 | VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); |
1357 | |
1358 | /* Is page being mapped by PTE? Is this its first map to be added? */ |
1359 | if (likely(!compound)) { |
1360 | do { |
1361 | first = atomic_inc_and_test(v: &page->_mapcount); |
1362 | if (first && folio_test_large(folio)) { |
1363 | first = atomic_inc_return_relaxed(v: mapped); |
1364 | first = (first < COMPOUND_MAPPED); |
1365 | } |
1366 | |
1367 | if (first) |
1368 | nr++; |
1369 | } while (page++, --nr_pages > 0); |
1370 | } else if (folio_test_pmd_mappable(folio)) { |
1371 | /* That test is redundant: it's for safety or to optimize out */ |
1372 | |
1373 | first = atomic_inc_and_test(v: &folio->_entire_mapcount); |
1374 | if (first) { |
1375 | nr = atomic_add_return_relaxed(COMPOUND_MAPPED, v: mapped); |
1376 | if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { |
1377 | nr_pmdmapped = folio_nr_pages(folio); |
1378 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
1379 | /* Raced ahead of a remove and another add? */ |
1380 | if (unlikely(nr < 0)) |
1381 | nr = 0; |
1382 | } else { |
1383 | /* Raced ahead of a remove of COMPOUND_MAPPED */ |
1384 | nr = 0; |
1385 | } |
1386 | } |
1387 | } |
1388 | |
1389 | if (nr_pmdmapped) |
1390 | __lruvec_stat_mod_folio(folio, idx: folio_test_swapbacked(folio) ? |
1391 | NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, val: nr_pmdmapped); |
1392 | if (nr) |
1393 | __lruvec_stat_mod_folio(folio, idx: NR_FILE_MAPPED, val: nr); |
1394 | |
1395 | /* See comments in page_add_anon_rmap() */ |
1396 | if (!folio_test_large(folio)) |
1397 | mlock_vma_folio(folio, vma); |
1398 | } |
1399 | |
1400 | /** |
1401 | * page_add_file_rmap - add pte mapping to a file page |
1402 | * @page: the page to add the mapping to |
1403 | * @vma: the vm area in which the mapping is added |
1404 | * @compound: charge the page as compound or small page |
1405 | * |
1406 | * The caller needs to hold the pte lock. |
1407 | */ |
1408 | void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, |
1409 | bool compound) |
1410 | { |
1411 | struct folio *folio = page_folio(page); |
1412 | unsigned int nr_pages; |
1413 | |
1414 | VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); |
1415 | |
1416 | if (likely(!compound)) |
1417 | nr_pages = 1; |
1418 | else |
1419 | nr_pages = folio_nr_pages(folio); |
1420 | |
1421 | folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); |
1422 | } |
1423 | |
1424 | /** |
1425 | * page_remove_rmap - take down pte mapping from a page |
1426 | * @page: page to remove mapping from |
1427 | * @vma: the vm area from which the mapping is removed |
1428 | * @compound: uncharge the page as compound or small page |
1429 | * |
1430 | * The caller needs to hold the pte lock. |
1431 | */ |
1432 | void page_remove_rmap(struct page *page, struct vm_area_struct *vma, |
1433 | bool compound) |
1434 | { |
1435 | struct folio *folio = page_folio(page); |
1436 | atomic_t *mapped = &folio->_nr_pages_mapped; |
1437 | int nr = 0, nr_pmdmapped = 0; |
1438 | bool last; |
1439 | enum node_stat_item idx; |
1440 | |
1441 | VM_BUG_ON_PAGE(compound && !PageHead(page), page); |
1442 | |
1443 | /* Hugetlb pages are not counted in NR_*MAPPED */ |
1444 | if (unlikely(folio_test_hugetlb(folio))) { |
1445 | /* hugetlb pages are always mapped with pmds */ |
1446 | atomic_dec(v: &folio->_entire_mapcount); |
1447 | return; |
1448 | } |
1449 | |
1450 | /* Is page being unmapped by PTE? Is this its last map to be removed? */ |
1451 | if (likely(!compound)) { |
1452 | last = atomic_add_negative(i: -1, v: &page->_mapcount); |
1453 | nr = last; |
1454 | if (last && folio_test_large(folio)) { |
1455 | nr = atomic_dec_return_relaxed(v: mapped); |
1456 | nr = (nr < COMPOUND_MAPPED); |
1457 | } |
1458 | } else if (folio_test_pmd_mappable(folio)) { |
1459 | /* That test is redundant: it's for safety or to optimize out */ |
1460 | |
1461 | last = atomic_add_negative(i: -1, v: &folio->_entire_mapcount); |
1462 | if (last) { |
1463 | nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, v: mapped); |
1464 | if (likely(nr < COMPOUND_MAPPED)) { |
1465 | nr_pmdmapped = folio_nr_pages(folio); |
1466 | nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); |
1467 | /* Raced ahead of another remove and an add? */ |
1468 | if (unlikely(nr < 0)) |
1469 | nr = 0; |
1470 | } else { |
1471 | /* An add of COMPOUND_MAPPED raced ahead */ |
1472 | nr = 0; |
1473 | } |
1474 | } |
1475 | } |
1476 | |
1477 | if (nr_pmdmapped) { |
1478 | if (folio_test_anon(folio)) |
1479 | idx = NR_ANON_THPS; |
1480 | else if (folio_test_swapbacked(folio)) |
1481 | idx = NR_SHMEM_PMDMAPPED; |
1482 | else |
1483 | idx = NR_FILE_PMDMAPPED; |
1484 | __lruvec_stat_mod_folio(folio, idx, val: -nr_pmdmapped); |
1485 | } |
1486 | if (nr) { |
1487 | idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; |
1488 | __lruvec_stat_mod_folio(folio, idx, val: -nr); |
1489 | |
1490 | /* |
1491 | * Queue anon THP for deferred split if at least one |
1492 | * page of the folio is unmapped and at least one page |
1493 | * is still mapped. |
1494 | */ |
1495 | if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) |
1496 | if (!compound || nr < nr_pmdmapped) |
1497 | deferred_split_folio(folio); |
1498 | } |
1499 | |
1500 | /* |
1501 | * It would be tidy to reset folio_test_anon mapping when fully |
1502 | * unmapped, but that might overwrite a racing page_add_anon_rmap |
1503 | * which increments mapcount after us but sets mapping before us: |
1504 | * so leave the reset to free_pages_prepare, and remember that |
1505 | * it's only reliable while mapped. |
1506 | */ |
1507 | |
1508 | munlock_vma_folio(folio, vma); |
1509 | } |
1510 | |
1511 | /* |
1512 | * @arg: enum ttu_flags will be passed to this argument |
1513 | */ |
1514 | static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, |
1515 | unsigned long address, void *arg) |
1516 | { |
1517 | struct mm_struct *mm = vma->vm_mm; |
1518 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
1519 | pte_t pteval; |
1520 | struct page *subpage; |
1521 | bool anon_exclusive, ret = true; |
1522 | struct mmu_notifier_range range; |
1523 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
1524 | unsigned long pfn; |
1525 | unsigned long hsz = 0; |
1526 | |
1527 | /* |
1528 | * When racing against e.g. zap_pte_range() on another cpu, |
1529 | * in between its ptep_get_and_clear_full() and page_remove_rmap(), |
1530 | * try_to_unmap() may return before page_mapped() has become false, |
1531 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
1532 | */ |
1533 | if (flags & TTU_SYNC) |
1534 | pvmw.flags = PVMW_SYNC; |
1535 | |
1536 | if (flags & TTU_SPLIT_HUGE_PMD) |
1537 | split_huge_pmd_address(vma, address, freeze: false, folio); |
1538 | |
1539 | /* |
1540 | * For THP, we have to assume the worse case ie pmd for invalidation. |
1541 | * For hugetlb, it could be much worse if we need to do pud |
1542 | * invalidation in the case of pmd sharing. |
1543 | * |
1544 | * Note that the folio can not be freed in this function as call of |
1545 | * try_to_unmap() must hold a reference on the folio. |
1546 | */ |
1547 | range.end = vma_address_end(pvmw: &pvmw); |
1548 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_CLEAR, flags: 0, mm: vma->vm_mm, |
1549 | start: address, end: range.end); |
1550 | if (folio_test_hugetlb(folio)) { |
1551 | /* |
1552 | * If sharing is possible, start and end will be adjusted |
1553 | * accordingly. |
1554 | */ |
1555 | adjust_range_if_pmd_sharing_possible(vma, start: &range.start, |
1556 | end: &range.end); |
1557 | |
1558 | /* We need the huge page size for set_huge_pte_at() */ |
1559 | hsz = huge_page_size(h: hstate_vma(vma)); |
1560 | } |
1561 | mmu_notifier_invalidate_range_start(range: &range); |
1562 | |
1563 | while (page_vma_mapped_walk(pvmw: &pvmw)) { |
1564 | /* Unexpected PMD-mapped THP? */ |
1565 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
1566 | |
1567 | /* |
1568 | * If the folio is in an mlock()d vma, we must not swap it out. |
1569 | */ |
1570 | if (!(flags & TTU_IGNORE_MLOCK) && |
1571 | (vma->vm_flags & VM_LOCKED)) { |
1572 | /* Restore the mlock which got missed */ |
1573 | if (!folio_test_large(folio)) |
1574 | mlock_vma_folio(folio, vma); |
1575 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1576 | ret = false; |
1577 | break; |
1578 | } |
1579 | |
1580 | pfn = pte_pfn(pte: ptep_get(ptep: pvmw.pte)); |
1581 | subpage = folio_page(folio, pfn - folio_pfn(folio)); |
1582 | address = pvmw.address; |
1583 | anon_exclusive = folio_test_anon(folio) && |
1584 | PageAnonExclusive(page: subpage); |
1585 | |
1586 | if (folio_test_hugetlb(folio)) { |
1587 | bool anon = folio_test_anon(folio); |
1588 | |
1589 | /* |
1590 | * The try_to_unmap() is only passed a hugetlb page |
1591 | * in the case where the hugetlb page is poisoned. |
1592 | */ |
1593 | VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); |
1594 | /* |
1595 | * huge_pmd_unshare may unmap an entire PMD page. |
1596 | * There is no way of knowing exactly which PMDs may |
1597 | * be cached for this mm, so we must flush them all. |
1598 | * start/end were already adjusted above to cover this |
1599 | * range. |
1600 | */ |
1601 | flush_cache_range(vma, start: range.start, end: range.end); |
1602 | |
1603 | /* |
1604 | * To call huge_pmd_unshare, i_mmap_rwsem must be |
1605 | * held in write mode. Caller needs to explicitly |
1606 | * do this outside rmap routines. |
1607 | * |
1608 | * We also must hold hugetlb vma_lock in write mode. |
1609 | * Lock order dictates acquiring vma_lock BEFORE |
1610 | * i_mmap_rwsem. We can only try lock here and fail |
1611 | * if unsuccessful. |
1612 | */ |
1613 | if (!anon) { |
1614 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); |
1615 | if (!hugetlb_vma_trylock_write(vma)) { |
1616 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1617 | ret = false; |
1618 | break; |
1619 | } |
1620 | if (huge_pmd_unshare(mm, vma, addr: address, ptep: pvmw.pte)) { |
1621 | hugetlb_vma_unlock_write(vma); |
1622 | flush_tlb_range(vma, |
1623 | range.start, range.end); |
1624 | /* |
1625 | * The ref count of the PMD page was |
1626 | * dropped which is part of the way map |
1627 | * counting is done for shared PMDs. |
1628 | * Return 'true' here. When there is |
1629 | * no other sharing, huge_pmd_unshare |
1630 | * returns false and we will unmap the |
1631 | * actual page and drop map count |
1632 | * to zero. |
1633 | */ |
1634 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1635 | break; |
1636 | } |
1637 | hugetlb_vma_unlock_write(vma); |
1638 | } |
1639 | pteval = huge_ptep_clear_flush(vma, addr: address, ptep: pvmw.pte); |
1640 | } else { |
1641 | flush_cache_page(vma, vmaddr: address, pfn); |
1642 | /* Nuke the page table entry. */ |
1643 | if (should_defer_flush(mm, flags)) { |
1644 | /* |
1645 | * We clear the PTE but do not flush so potentially |
1646 | * a remote CPU could still be writing to the folio. |
1647 | * If the entry was previously clean then the |
1648 | * architecture must guarantee that a clear->dirty |
1649 | * transition on a cached TLB entry is written through |
1650 | * and traps if the PTE is unmapped. |
1651 | */ |
1652 | pteval = ptep_get_and_clear(mm, addr: address, ptep: pvmw.pte); |
1653 | |
1654 | set_tlb_ubc_flush_pending(mm, pteval, uaddr: address); |
1655 | } else { |
1656 | pteval = ptep_clear_flush(vma, address, ptep: pvmw.pte); |
1657 | } |
1658 | } |
1659 | |
1660 | /* |
1661 | * Now the pte is cleared. If this pte was uffd-wp armed, |
1662 | * we may want to replace a none pte with a marker pte if |
1663 | * it's file-backed, so we don't lose the tracking info. |
1664 | */ |
1665 | pte_install_uffd_wp_if_needed(vma, addr: address, pte: pvmw.pte, pteval); |
1666 | |
1667 | /* Set the dirty flag on the folio now the pte is gone. */ |
1668 | if (pte_dirty(pte: pteval)) |
1669 | folio_mark_dirty(folio); |
1670 | |
1671 | /* Update high watermark before we lower rss */ |
1672 | update_hiwater_rss(mm); |
1673 | |
1674 | if (PageHWPoison(page: subpage) && (flags & TTU_HWPOISON)) { |
1675 | pteval = swp_entry_to_pte(entry: make_hwpoison_entry(page: subpage)); |
1676 | if (folio_test_hugetlb(folio)) { |
1677 | hugetlb_count_sub(l: folio_nr_pages(folio), mm); |
1678 | set_huge_pte_at(mm, addr: address, ptep: pvmw.pte, pte: pteval, |
1679 | sz: hsz); |
1680 | } else { |
1681 | dec_mm_counter(mm, member: mm_counter(page: &folio->page)); |
1682 | set_pte_at(mm, address, pvmw.pte, pteval); |
1683 | } |
1684 | |
1685 | } else if (pte_unused(pte: pteval) && !userfaultfd_armed(vma)) { |
1686 | /* |
1687 | * The guest indicated that the page content is of no |
1688 | * interest anymore. Simply discard the pte, vmscan |
1689 | * will take care of the rest. |
1690 | * A future reference will then fault in a new zero |
1691 | * page. When userfaultfd is active, we must not drop |
1692 | * this page though, as its main user (postcopy |
1693 | * migration) will not expect userfaults on already |
1694 | * copied pages. |
1695 | */ |
1696 | dec_mm_counter(mm, member: mm_counter(page: &folio->page)); |
1697 | } else if (folio_test_anon(folio)) { |
1698 | swp_entry_t entry = page_swap_entry(page: subpage); |
1699 | pte_t swp_pte; |
1700 | /* |
1701 | * Store the swap location in the pte. |
1702 | * See handle_pte_fault() ... |
1703 | */ |
1704 | if (unlikely(folio_test_swapbacked(folio) != |
1705 | folio_test_swapcache(folio))) { |
1706 | WARN_ON_ONCE(1); |
1707 | ret = false; |
1708 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1709 | break; |
1710 | } |
1711 | |
1712 | /* MADV_FREE page check */ |
1713 | if (!folio_test_swapbacked(folio)) { |
1714 | int ref_count, map_count; |
1715 | |
1716 | /* |
1717 | * Synchronize with gup_pte_range(): |
1718 | * - clear PTE; barrier; read refcount |
1719 | * - inc refcount; barrier; read PTE |
1720 | */ |
1721 | smp_mb(); |
1722 | |
1723 | ref_count = folio_ref_count(folio); |
1724 | map_count = folio_mapcount(folio); |
1725 | |
1726 | /* |
1727 | * Order reads for page refcount and dirty flag |
1728 | * (see comments in __remove_mapping()). |
1729 | */ |
1730 | smp_rmb(); |
1731 | |
1732 | /* |
1733 | * The only page refs must be one from isolation |
1734 | * plus the rmap(s) (dropped by discard:). |
1735 | */ |
1736 | if (ref_count == 1 + map_count && |
1737 | !folio_test_dirty(folio)) { |
1738 | dec_mm_counter(mm, member: MM_ANONPAGES); |
1739 | goto discard; |
1740 | } |
1741 | |
1742 | /* |
1743 | * If the folio was redirtied, it cannot be |
1744 | * discarded. Remap the page to page table. |
1745 | */ |
1746 | set_pte_at(mm, address, pvmw.pte, pteval); |
1747 | folio_set_swapbacked(folio); |
1748 | ret = false; |
1749 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1750 | break; |
1751 | } |
1752 | |
1753 | if (swap_duplicate(entry) < 0) { |
1754 | set_pte_at(mm, address, pvmw.pte, pteval); |
1755 | ret = false; |
1756 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1757 | break; |
1758 | } |
1759 | if (arch_unmap_one(mm, vma, addr: address, orig_pte: pteval) < 0) { |
1760 | swap_free(entry); |
1761 | set_pte_at(mm, address, pvmw.pte, pteval); |
1762 | ret = false; |
1763 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1764 | break; |
1765 | } |
1766 | |
1767 | /* See page_try_share_anon_rmap(): clear PTE first. */ |
1768 | if (anon_exclusive && |
1769 | page_try_share_anon_rmap(page: subpage)) { |
1770 | swap_free(entry); |
1771 | set_pte_at(mm, address, pvmw.pte, pteval); |
1772 | ret = false; |
1773 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1774 | break; |
1775 | } |
1776 | if (list_empty(head: &mm->mmlist)) { |
1777 | spin_lock(lock: &mmlist_lock); |
1778 | if (list_empty(head: &mm->mmlist)) |
1779 | list_add(new: &mm->mmlist, head: &init_mm.mmlist); |
1780 | spin_unlock(lock: &mmlist_lock); |
1781 | } |
1782 | dec_mm_counter(mm, member: MM_ANONPAGES); |
1783 | inc_mm_counter(mm, member: MM_SWAPENTS); |
1784 | swp_pte = swp_entry_to_pte(entry); |
1785 | if (anon_exclusive) |
1786 | swp_pte = pte_swp_mkexclusive(pte: swp_pte); |
1787 | if (pte_soft_dirty(pte: pteval)) |
1788 | swp_pte = pte_swp_mksoft_dirty(pte: swp_pte); |
1789 | if (pte_uffd_wp(pte: pteval)) |
1790 | swp_pte = pte_swp_mkuffd_wp(pte: swp_pte); |
1791 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
1792 | } else { |
1793 | /* |
1794 | * This is a locked file-backed folio, |
1795 | * so it cannot be removed from the page |
1796 | * cache and replaced by a new folio before |
1797 | * mmu_notifier_invalidate_range_end, so no |
1798 | * concurrent thread might update its page table |
1799 | * to point at a new folio while a device is |
1800 | * still using this folio. |
1801 | * |
1802 | * See Documentation/mm/mmu_notifier.rst |
1803 | */ |
1804 | dec_mm_counter(mm, member: mm_counter_file(page: &folio->page)); |
1805 | } |
1806 | discard: |
1807 | page_remove_rmap(page: subpage, vma, compound: folio_test_hugetlb(folio)); |
1808 | if (vma->vm_flags & VM_LOCKED) |
1809 | mlock_drain_local(); |
1810 | folio_put(folio); |
1811 | } |
1812 | |
1813 | mmu_notifier_invalidate_range_end(range: &range); |
1814 | |
1815 | return ret; |
1816 | } |
1817 | |
1818 | static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) |
1819 | { |
1820 | return vma_is_temporary_stack(vma); |
1821 | } |
1822 | |
1823 | static int folio_not_mapped(struct folio *folio) |
1824 | { |
1825 | return !folio_mapped(folio); |
1826 | } |
1827 | |
1828 | /** |
1829 | * try_to_unmap - Try to remove all page table mappings to a folio. |
1830 | * @folio: The folio to unmap. |
1831 | * @flags: action and flags |
1832 | * |
1833 | * Tries to remove all the page table entries which are mapping this |
1834 | * folio. It is the caller's responsibility to check if the folio is |
1835 | * still mapped if needed (use TTU_SYNC to prevent accounting races). |
1836 | * |
1837 | * Context: Caller must hold the folio lock. |
1838 | */ |
1839 | void try_to_unmap(struct folio *folio, enum ttu_flags flags) |
1840 | { |
1841 | struct rmap_walk_control rwc = { |
1842 | .rmap_one = try_to_unmap_one, |
1843 | .arg = (void *)flags, |
1844 | .done = folio_not_mapped, |
1845 | .anon_lock = folio_lock_anon_vma_read, |
1846 | }; |
1847 | |
1848 | if (flags & TTU_RMAP_LOCKED) |
1849 | rmap_walk_locked(folio, rwc: &rwc); |
1850 | else |
1851 | rmap_walk(folio, rwc: &rwc); |
1852 | } |
1853 | |
1854 | /* |
1855 | * @arg: enum ttu_flags will be passed to this argument. |
1856 | * |
1857 | * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs |
1858 | * containing migration entries. |
1859 | */ |
1860 | static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, |
1861 | unsigned long address, void *arg) |
1862 | { |
1863 | struct mm_struct *mm = vma->vm_mm; |
1864 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
1865 | pte_t pteval; |
1866 | struct page *subpage; |
1867 | bool anon_exclusive, ret = true; |
1868 | struct mmu_notifier_range range; |
1869 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
1870 | unsigned long pfn; |
1871 | unsigned long hsz = 0; |
1872 | |
1873 | /* |
1874 | * When racing against e.g. zap_pte_range() on another cpu, |
1875 | * in between its ptep_get_and_clear_full() and page_remove_rmap(), |
1876 | * try_to_migrate() may return before page_mapped() has become false, |
1877 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
1878 | */ |
1879 | if (flags & TTU_SYNC) |
1880 | pvmw.flags = PVMW_SYNC; |
1881 | |
1882 | /* |
1883 | * unmap_page() in mm/huge_memory.c is the only user of migration with |
1884 | * TTU_SPLIT_HUGE_PMD and it wants to freeze. |
1885 | */ |
1886 | if (flags & TTU_SPLIT_HUGE_PMD) |
1887 | split_huge_pmd_address(vma, address, freeze: true, folio); |
1888 | |
1889 | /* |
1890 | * For THP, we have to assume the worse case ie pmd for invalidation. |
1891 | * For hugetlb, it could be much worse if we need to do pud |
1892 | * invalidation in the case of pmd sharing. |
1893 | * |
1894 | * Note that the page can not be free in this function as call of |
1895 | * try_to_unmap() must hold a reference on the page. |
1896 | */ |
1897 | range.end = vma_address_end(pvmw: &pvmw); |
1898 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_CLEAR, flags: 0, mm: vma->vm_mm, |
1899 | start: address, end: range.end); |
1900 | if (folio_test_hugetlb(folio)) { |
1901 | /* |
1902 | * If sharing is possible, start and end will be adjusted |
1903 | * accordingly. |
1904 | */ |
1905 | adjust_range_if_pmd_sharing_possible(vma, start: &range.start, |
1906 | end: &range.end); |
1907 | |
1908 | /* We need the huge page size for set_huge_pte_at() */ |
1909 | hsz = huge_page_size(h: hstate_vma(vma)); |
1910 | } |
1911 | mmu_notifier_invalidate_range_start(range: &range); |
1912 | |
1913 | while (page_vma_mapped_walk(pvmw: &pvmw)) { |
1914 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
1915 | /* PMD-mapped THP migration entry */ |
1916 | if (!pvmw.pte) { |
1917 | subpage = folio_page(folio, |
1918 | pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); |
1919 | VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || |
1920 | !folio_test_pmd_mappable(folio), folio); |
1921 | |
1922 | if (set_pmd_migration_entry(pvmw: &pvmw, page: subpage)) { |
1923 | ret = false; |
1924 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1925 | break; |
1926 | } |
1927 | continue; |
1928 | } |
1929 | #endif |
1930 | |
1931 | /* Unexpected PMD-mapped THP? */ |
1932 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
1933 | |
1934 | pfn = pte_pfn(pte: ptep_get(ptep: pvmw.pte)); |
1935 | |
1936 | if (folio_is_zone_device(folio)) { |
1937 | /* |
1938 | * Our PTE is a non-present device exclusive entry and |
1939 | * calculating the subpage as for the common case would |
1940 | * result in an invalid pointer. |
1941 | * |
1942 | * Since only PAGE_SIZE pages can currently be |
1943 | * migrated, just set it to page. This will need to be |
1944 | * changed when hugepage migrations to device private |
1945 | * memory are supported. |
1946 | */ |
1947 | VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); |
1948 | subpage = &folio->page; |
1949 | } else { |
1950 | subpage = folio_page(folio, pfn - folio_pfn(folio)); |
1951 | } |
1952 | address = pvmw.address; |
1953 | anon_exclusive = folio_test_anon(folio) && |
1954 | PageAnonExclusive(page: subpage); |
1955 | |
1956 | if (folio_test_hugetlb(folio)) { |
1957 | bool anon = folio_test_anon(folio); |
1958 | |
1959 | /* |
1960 | * huge_pmd_unshare may unmap an entire PMD page. |
1961 | * There is no way of knowing exactly which PMDs may |
1962 | * be cached for this mm, so we must flush them all. |
1963 | * start/end were already adjusted above to cover this |
1964 | * range. |
1965 | */ |
1966 | flush_cache_range(vma, start: range.start, end: range.end); |
1967 | |
1968 | /* |
1969 | * To call huge_pmd_unshare, i_mmap_rwsem must be |
1970 | * held in write mode. Caller needs to explicitly |
1971 | * do this outside rmap routines. |
1972 | * |
1973 | * We also must hold hugetlb vma_lock in write mode. |
1974 | * Lock order dictates acquiring vma_lock BEFORE |
1975 | * i_mmap_rwsem. We can only try lock here and |
1976 | * fail if unsuccessful. |
1977 | */ |
1978 | if (!anon) { |
1979 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); |
1980 | if (!hugetlb_vma_trylock_write(vma)) { |
1981 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1982 | ret = false; |
1983 | break; |
1984 | } |
1985 | if (huge_pmd_unshare(mm, vma, addr: address, ptep: pvmw.pte)) { |
1986 | hugetlb_vma_unlock_write(vma); |
1987 | flush_tlb_range(vma, |
1988 | range.start, range.end); |
1989 | |
1990 | /* |
1991 | * The ref count of the PMD page was |
1992 | * dropped which is part of the way map |
1993 | * counting is done for shared PMDs. |
1994 | * Return 'true' here. When there is |
1995 | * no other sharing, huge_pmd_unshare |
1996 | * returns false and we will unmap the |
1997 | * actual page and drop map count |
1998 | * to zero. |
1999 | */ |
2000 | page_vma_mapped_walk_done(pvmw: &pvmw); |
2001 | break; |
2002 | } |
2003 | hugetlb_vma_unlock_write(vma); |
2004 | } |
2005 | /* Nuke the hugetlb page table entry */ |
2006 | pteval = huge_ptep_clear_flush(vma, addr: address, ptep: pvmw.pte); |
2007 | } else { |
2008 | flush_cache_page(vma, vmaddr: address, pfn); |
2009 | /* Nuke the page table entry. */ |
2010 | if (should_defer_flush(mm, flags)) { |
2011 | /* |
2012 | * We clear the PTE but do not flush so potentially |
2013 | * a remote CPU could still be writing to the folio. |
2014 | * If the entry was previously clean then the |
2015 | * architecture must guarantee that a clear->dirty |
2016 | * transition on a cached TLB entry is written through |
2017 | * and traps if the PTE is unmapped. |
2018 | */ |
2019 | pteval = ptep_get_and_clear(mm, addr: address, ptep: pvmw.pte); |
2020 | |
2021 | set_tlb_ubc_flush_pending(mm, pteval, uaddr: address); |
2022 | } else { |
2023 | pteval = ptep_clear_flush(vma, address, ptep: pvmw.pte); |
2024 | } |
2025 | } |
2026 | |
2027 | /* Set the dirty flag on the folio now the pte is gone. */ |
2028 | if (pte_dirty(pte: pteval)) |
2029 | folio_mark_dirty(folio); |
2030 | |
2031 | /* Update high watermark before we lower rss */ |
2032 | update_hiwater_rss(mm); |
2033 | |
2034 | if (folio_is_device_private(folio)) { |
2035 | unsigned long pfn = folio_pfn(folio); |
2036 | swp_entry_t entry; |
2037 | pte_t swp_pte; |
2038 | |
2039 | if (anon_exclusive) |
2040 | BUG_ON(page_try_share_anon_rmap(subpage)); |
2041 | |
2042 | /* |
2043 | * Store the pfn of the page in a special migration |
2044 | * pte. do_swap_page() will wait until the migration |
2045 | * pte is removed and then restart fault handling. |
2046 | */ |
2047 | entry = pte_to_swp_entry(pte: pteval); |
2048 | if (is_writable_device_private_entry(entry)) |
2049 | entry = make_writable_migration_entry(offset: pfn); |
2050 | else if (anon_exclusive) |
2051 | entry = make_readable_exclusive_migration_entry(offset: pfn); |
2052 | else |
2053 | entry = make_readable_migration_entry(offset: pfn); |
2054 | swp_pte = swp_entry_to_pte(entry); |
2055 | |
2056 | /* |
2057 | * pteval maps a zone device page and is therefore |
2058 | * a swap pte. |
2059 | */ |
2060 | if (pte_swp_soft_dirty(pte: pteval)) |
2061 | swp_pte = pte_swp_mksoft_dirty(pte: swp_pte); |
2062 | if (pte_swp_uffd_wp(pte: pteval)) |
2063 | swp_pte = pte_swp_mkuffd_wp(pte: swp_pte); |
2064 | set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); |
2065 | trace_set_migration_pte(addr: pvmw.address, pte: pte_val(pte: swp_pte), |
2066 | order: compound_order(page: &folio->page)); |
2067 | /* |
2068 | * No need to invalidate here it will synchronize on |
2069 | * against the special swap migration pte. |
2070 | */ |
2071 | } else if (PageHWPoison(page: subpage)) { |
2072 | pteval = swp_entry_to_pte(entry: make_hwpoison_entry(page: subpage)); |
2073 | if (folio_test_hugetlb(folio)) { |
2074 | hugetlb_count_sub(l: folio_nr_pages(folio), mm); |
2075 | set_huge_pte_at(mm, addr: address, ptep: pvmw.pte, pte: pteval, |
2076 | sz: hsz); |
2077 | } else { |
2078 | dec_mm_counter(mm, member: mm_counter(page: &folio->page)); |
2079 | set_pte_at(mm, address, pvmw.pte, pteval); |
2080 | } |
2081 | |
2082 | } else if (pte_unused(pte: pteval) && !userfaultfd_armed(vma)) { |
2083 | /* |
2084 | * The guest indicated that the page content is of no |
2085 | * interest anymore. Simply discard the pte, vmscan |
2086 | * will take care of the rest. |
2087 | * A future reference will then fault in a new zero |
2088 | * page. When userfaultfd is active, we must not drop |
2089 | * this page though, as its main user (postcopy |
2090 | * migration) will not expect userfaults on already |
2091 | * copied pages. |
2092 | */ |
2093 | dec_mm_counter(mm, member: mm_counter(page: &folio->page)); |
2094 | } else { |
2095 | swp_entry_t entry; |
2096 | pte_t swp_pte; |
2097 | |
2098 | if (arch_unmap_one(mm, vma, addr: address, orig_pte: pteval) < 0) { |
2099 | if (folio_test_hugetlb(folio)) |
2100 | set_huge_pte_at(mm, addr: address, ptep: pvmw.pte, |
2101 | pte: pteval, sz: hsz); |
2102 | else |
2103 | set_pte_at(mm, address, pvmw.pte, pteval); |
2104 | ret = false; |
2105 | page_vma_mapped_walk_done(pvmw: &pvmw); |
2106 | break; |
2107 | } |
2108 | VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && |
2109 | !anon_exclusive, subpage); |
2110 | |
2111 | /* See page_try_share_anon_rmap(): clear PTE first. */ |
2112 | if (anon_exclusive && |
2113 | page_try_share_anon_rmap(page: subpage)) { |
2114 | if (folio_test_hugetlb(folio)) |
2115 | set_huge_pte_at(mm, addr: address, ptep: pvmw.pte, |
2116 | pte: pteval, sz: hsz); |
2117 | else |
2118 | set_pte_at(mm, address, pvmw.pte, pteval); |
2119 | ret = false; |
2120 | page_vma_mapped_walk_done(pvmw: &pvmw); |
2121 | break; |
2122 | } |
2123 | |
2124 | /* |
2125 | * Store the pfn of the page in a special migration |
2126 | * pte. do_swap_page() will wait until the migration |
2127 | * pte is removed and then restart fault handling. |
2128 | */ |
2129 | if (pte_write(pte: pteval)) |
2130 | entry = make_writable_migration_entry( |
2131 | page_to_pfn(subpage)); |
2132 | else if (anon_exclusive) |
2133 | entry = make_readable_exclusive_migration_entry( |
2134 | page_to_pfn(subpage)); |
2135 | else |
2136 | entry = make_readable_migration_entry( |
2137 | page_to_pfn(subpage)); |
2138 | if (pte_young(pte: pteval)) |
2139 | entry = make_migration_entry_young(entry); |
2140 | if (pte_dirty(pte: pteval)) |
2141 | entry = make_migration_entry_dirty(entry); |
2142 | swp_pte = swp_entry_to_pte(entry); |
2143 | if (pte_soft_dirty(pte: pteval)) |
2144 | swp_pte = pte_swp_mksoft_dirty(pte: swp_pte); |
2145 | if (pte_uffd_wp(pte: pteval)) |
2146 | swp_pte = pte_swp_mkuffd_wp(pte: swp_pte); |
2147 | if (folio_test_hugetlb(folio)) |
2148 | set_huge_pte_at(mm, addr: address, ptep: pvmw.pte, pte: swp_pte, |
2149 | sz: hsz); |
2150 | else |
2151 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
2152 | trace_set_migration_pte(addr: address, pte: pte_val(pte: swp_pte), |
2153 | order: compound_order(page: &folio->page)); |
2154 | /* |
2155 | * No need to invalidate here it will synchronize on |
2156 | * against the special swap migration pte. |
2157 | */ |
2158 | } |
2159 | |
2160 | page_remove_rmap(page: subpage, vma, compound: folio_test_hugetlb(folio)); |
2161 | if (vma->vm_flags & VM_LOCKED) |
2162 | mlock_drain_local(); |
2163 | folio_put(folio); |
2164 | } |
2165 | |
2166 | mmu_notifier_invalidate_range_end(range: &range); |
2167 | |
2168 | return ret; |
2169 | } |
2170 | |
2171 | /** |
2172 | * try_to_migrate - try to replace all page table mappings with swap entries |
2173 | * @folio: the folio to replace page table entries for |
2174 | * @flags: action and flags |
2175 | * |
2176 | * Tries to remove all the page table entries which are mapping this folio and |
2177 | * replace them with special swap entries. Caller must hold the folio lock. |
2178 | */ |
2179 | void try_to_migrate(struct folio *folio, enum ttu_flags flags) |
2180 | { |
2181 | struct rmap_walk_control rwc = { |
2182 | .rmap_one = try_to_migrate_one, |
2183 | .arg = (void *)flags, |
2184 | .done = folio_not_mapped, |
2185 | .anon_lock = folio_lock_anon_vma_read, |
2186 | }; |
2187 | |
2188 | /* |
2189 | * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and |
2190 | * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. |
2191 | */ |
2192 | if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | |
2193 | TTU_SYNC | TTU_BATCH_FLUSH))) |
2194 | return; |
2195 | |
2196 | if (folio_is_zone_device(folio) && |
2197 | (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) |
2198 | return; |
2199 | |
2200 | /* |
2201 | * During exec, a temporary VMA is setup and later moved. |
2202 | * The VMA is moved under the anon_vma lock but not the |
2203 | * page tables leading to a race where migration cannot |
2204 | * find the migration ptes. Rather than increasing the |
2205 | * locking requirements of exec(), migration skips |
2206 | * temporary VMAs until after exec() completes. |
2207 | */ |
2208 | if (!folio_test_ksm(folio) && folio_test_anon(folio)) |
2209 | rwc.invalid_vma = invalid_migration_vma; |
2210 | |
2211 | if (flags & TTU_RMAP_LOCKED) |
2212 | rmap_walk_locked(folio, rwc: &rwc); |
2213 | else |
2214 | rmap_walk(folio, rwc: &rwc); |
2215 | } |
2216 | |
2217 | #ifdef CONFIG_DEVICE_PRIVATE |
2218 | struct make_exclusive_args { |
2219 | struct mm_struct *mm; |
2220 | unsigned long address; |
2221 | void *owner; |
2222 | bool valid; |
2223 | }; |
2224 | |
2225 | static bool page_make_device_exclusive_one(struct folio *folio, |
2226 | struct vm_area_struct *vma, unsigned long address, void *priv) |
2227 | { |
2228 | struct mm_struct *mm = vma->vm_mm; |
2229 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
2230 | struct make_exclusive_args *args = priv; |
2231 | pte_t pteval; |
2232 | struct page *subpage; |
2233 | bool ret = true; |
2234 | struct mmu_notifier_range range; |
2235 | swp_entry_t entry; |
2236 | pte_t swp_pte; |
2237 | pte_t ptent; |
2238 | |
2239 | mmu_notifier_range_init_owner(range: &range, event: MMU_NOTIFY_EXCLUSIVE, flags: 0, |
2240 | mm: vma->vm_mm, start: address, min(vma->vm_end, |
2241 | address + folio_size(folio)), |
2242 | owner: args->owner); |
2243 | mmu_notifier_invalidate_range_start(range: &range); |
2244 | |
2245 | while (page_vma_mapped_walk(pvmw: &pvmw)) { |
2246 | /* Unexpected PMD-mapped THP? */ |
2247 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
2248 | |
2249 | ptent = ptep_get(ptep: pvmw.pte); |
2250 | if (!pte_present(a: ptent)) { |
2251 | ret = false; |
2252 | page_vma_mapped_walk_done(pvmw: &pvmw); |
2253 | break; |
2254 | } |
2255 | |
2256 | subpage = folio_page(folio, |
2257 | pte_pfn(ptent) - folio_pfn(folio)); |
2258 | address = pvmw.address; |
2259 | |
2260 | /* Nuke the page table entry. */ |
2261 | flush_cache_page(vma, vmaddr: address, pfn: pte_pfn(pte: ptent)); |
2262 | pteval = ptep_clear_flush(vma, address, ptep: pvmw.pte); |
2263 | |
2264 | /* Set the dirty flag on the folio now the pte is gone. */ |
2265 | if (pte_dirty(pte: pteval)) |
2266 | folio_mark_dirty(folio); |
2267 | |
2268 | /* |
2269 | * Check that our target page is still mapped at the expected |
2270 | * address. |
2271 | */ |
2272 | if (args->mm == mm && args->address == address && |
2273 | pte_write(pte: pteval)) |
2274 | args->valid = true; |
2275 | |
2276 | /* |
2277 | * Store the pfn of the page in a special migration |
2278 | * pte. do_swap_page() will wait until the migration |
2279 | * pte is removed and then restart fault handling. |
2280 | */ |
2281 | if (pte_write(pte: pteval)) |
2282 | entry = make_writable_device_exclusive_entry( |
2283 | page_to_pfn(subpage)); |
2284 | else |
2285 | entry = make_readable_device_exclusive_entry( |
2286 | page_to_pfn(subpage)); |
2287 | swp_pte = swp_entry_to_pte(entry); |
2288 | if (pte_soft_dirty(pte: pteval)) |
2289 | swp_pte = pte_swp_mksoft_dirty(pte: swp_pte); |
2290 | if (pte_uffd_wp(pte: pteval)) |
2291 | swp_pte = pte_swp_mkuffd_wp(pte: swp_pte); |
2292 | |
2293 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
2294 | |
2295 | /* |
2296 | * There is a reference on the page for the swap entry which has |
2297 | * been removed, so shouldn't take another. |
2298 | */ |
2299 | page_remove_rmap(page: subpage, vma, compound: false); |
2300 | } |
2301 | |
2302 | mmu_notifier_invalidate_range_end(range: &range); |
2303 | |
2304 | return ret; |
2305 | } |
2306 | |
2307 | /** |
2308 | * folio_make_device_exclusive - Mark the folio exclusively owned by a device. |
2309 | * @folio: The folio to replace page table entries for. |
2310 | * @mm: The mm_struct where the folio is expected to be mapped. |
2311 | * @address: Address where the folio is expected to be mapped. |
2312 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks |
2313 | * |
2314 | * Tries to remove all the page table entries which are mapping this |
2315 | * folio and replace them with special device exclusive swap entries to |
2316 | * grant a device exclusive access to the folio. |
2317 | * |
2318 | * Context: Caller must hold the folio lock. |
2319 | * Return: false if the page is still mapped, or if it could not be unmapped |
2320 | * from the expected address. Otherwise returns true (success). |
2321 | */ |
2322 | static bool folio_make_device_exclusive(struct folio *folio, |
2323 | struct mm_struct *mm, unsigned long address, void *owner) |
2324 | { |
2325 | struct make_exclusive_args args = { |
2326 | .mm = mm, |
2327 | .address = address, |
2328 | .owner = owner, |
2329 | .valid = false, |
2330 | }; |
2331 | struct rmap_walk_control rwc = { |
2332 | .rmap_one = page_make_device_exclusive_one, |
2333 | .done = folio_not_mapped, |
2334 | .anon_lock = folio_lock_anon_vma_read, |
2335 | .arg = &args, |
2336 | }; |
2337 | |
2338 | /* |
2339 | * Restrict to anonymous folios for now to avoid potential writeback |
2340 | * issues. |
2341 | */ |
2342 | if (!folio_test_anon(folio)) |
2343 | return false; |
2344 | |
2345 | rmap_walk(folio, rwc: &rwc); |
2346 | |
2347 | return args.valid && !folio_mapcount(folio); |
2348 | } |
2349 | |
2350 | /** |
2351 | * make_device_exclusive_range() - Mark a range for exclusive use by a device |
2352 | * @mm: mm_struct of associated target process |
2353 | * @start: start of the region to mark for exclusive device access |
2354 | * @end: end address of region |
2355 | * @pages: returns the pages which were successfully marked for exclusive access |
2356 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering |
2357 | * |
2358 | * Returns: number of pages found in the range by GUP. A page is marked for |
2359 | * exclusive access only if the page pointer is non-NULL. |
2360 | * |
2361 | * This function finds ptes mapping page(s) to the given address range, locks |
2362 | * them and replaces mappings with special swap entries preventing userspace CPU |
2363 | * access. On fault these entries are replaced with the original mapping after |
2364 | * calling MMU notifiers. |
2365 | * |
2366 | * A driver using this to program access from a device must use a mmu notifier |
2367 | * critical section to hold a device specific lock during programming. Once |
2368 | * programming is complete it should drop the page lock and reference after |
2369 | * which point CPU access to the page will revoke the exclusive access. |
2370 | */ |
2371 | int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, |
2372 | unsigned long end, struct page **pages, |
2373 | void *owner) |
2374 | { |
2375 | long npages = (end - start) >> PAGE_SHIFT; |
2376 | long i; |
2377 | |
2378 | npages = get_user_pages_remote(mm, start, nr_pages: npages, |
2379 | gup_flags: FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, |
2380 | pages, NULL); |
2381 | if (npages < 0) |
2382 | return npages; |
2383 | |
2384 | for (i = 0; i < npages; i++, start += PAGE_SIZE) { |
2385 | struct folio *folio = page_folio(pages[i]); |
2386 | if (PageTail(page: pages[i]) || !folio_trylock(folio)) { |
2387 | folio_put(folio); |
2388 | pages[i] = NULL; |
2389 | continue; |
2390 | } |
2391 | |
2392 | if (!folio_make_device_exclusive(folio, mm, address: start, owner)) { |
2393 | folio_unlock(folio); |
2394 | folio_put(folio); |
2395 | pages[i] = NULL; |
2396 | } |
2397 | } |
2398 | |
2399 | return npages; |
2400 | } |
2401 | EXPORT_SYMBOL_GPL(make_device_exclusive_range); |
2402 | #endif |
2403 | |
2404 | void __put_anon_vma(struct anon_vma *anon_vma) |
2405 | { |
2406 | struct anon_vma *root = anon_vma->root; |
2407 | |
2408 | anon_vma_free(anon_vma); |
2409 | if (root != anon_vma && atomic_dec_and_test(v: &root->refcount)) |
2410 | anon_vma_free(anon_vma: root); |
2411 | } |
2412 | |
2413 | static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, |
2414 | struct rmap_walk_control *rwc) |
2415 | { |
2416 | struct anon_vma *anon_vma; |
2417 | |
2418 | if (rwc->anon_lock) |
2419 | return rwc->anon_lock(folio, rwc); |
2420 | |
2421 | /* |
2422 | * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() |
2423 | * because that depends on page_mapped(); but not all its usages |
2424 | * are holding mmap_lock. Users without mmap_lock are required to |
2425 | * take a reference count to prevent the anon_vma disappearing |
2426 | */ |
2427 | anon_vma = folio_anon_vma(folio); |
2428 | if (!anon_vma) |
2429 | return NULL; |
2430 | |
2431 | if (anon_vma_trylock_read(anon_vma)) |
2432 | goto out; |
2433 | |
2434 | if (rwc->try_lock) { |
2435 | anon_vma = NULL; |
2436 | rwc->contended = true; |
2437 | goto out; |
2438 | } |
2439 | |
2440 | anon_vma_lock_read(anon_vma); |
2441 | out: |
2442 | return anon_vma; |
2443 | } |
2444 | |
2445 | /* |
2446 | * rmap_walk_anon - do something to anonymous page using the object-based |
2447 | * rmap method |
2448 | * @folio: the folio to be handled |
2449 | * @rwc: control variable according to each walk type |
2450 | * @locked: caller holds relevant rmap lock |
2451 | * |
2452 | * Find all the mappings of a folio using the mapping pointer and the vma |
2453 | * chains contained in the anon_vma struct it points to. |
2454 | */ |
2455 | static void rmap_walk_anon(struct folio *folio, |
2456 | struct rmap_walk_control *rwc, bool locked) |
2457 | { |
2458 | struct anon_vma *anon_vma; |
2459 | pgoff_t pgoff_start, pgoff_end; |
2460 | struct anon_vma_chain *avc; |
2461 | |
2462 | if (locked) { |
2463 | anon_vma = folio_anon_vma(folio); |
2464 | /* anon_vma disappear under us? */ |
2465 | VM_BUG_ON_FOLIO(!anon_vma, folio); |
2466 | } else { |
2467 | anon_vma = rmap_walk_anon_lock(folio, rwc); |
2468 | } |
2469 | if (!anon_vma) |
2470 | return; |
2471 | |
2472 | pgoff_start = folio_pgoff(folio); |
2473 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; |
2474 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, |
2475 | pgoff_start, pgoff_end) { |
2476 | struct vm_area_struct *vma = avc->vma; |
2477 | unsigned long address = vma_address(page: &folio->page, vma); |
2478 | |
2479 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
2480 | cond_resched(); |
2481 | |
2482 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2483 | continue; |
2484 | |
2485 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
2486 | break; |
2487 | if (rwc->done && rwc->done(folio)) |
2488 | break; |
2489 | } |
2490 | |
2491 | if (!locked) |
2492 | anon_vma_unlock_read(anon_vma); |
2493 | } |
2494 | |
2495 | /* |
2496 | * rmap_walk_file - do something to file page using the object-based rmap method |
2497 | * @folio: the folio to be handled |
2498 | * @rwc: control variable according to each walk type |
2499 | * @locked: caller holds relevant rmap lock |
2500 | * |
2501 | * Find all the mappings of a folio using the mapping pointer and the vma chains |
2502 | * contained in the address_space struct it points to. |
2503 | */ |
2504 | static void rmap_walk_file(struct folio *folio, |
2505 | struct rmap_walk_control *rwc, bool locked) |
2506 | { |
2507 | struct address_space *mapping = folio_mapping(folio); |
2508 | pgoff_t pgoff_start, pgoff_end; |
2509 | struct vm_area_struct *vma; |
2510 | |
2511 | /* |
2512 | * The page lock not only makes sure that page->mapping cannot |
2513 | * suddenly be NULLified by truncation, it makes sure that the |
2514 | * structure at mapping cannot be freed and reused yet, |
2515 | * so we can safely take mapping->i_mmap_rwsem. |
2516 | */ |
2517 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
2518 | |
2519 | if (!mapping) |
2520 | return; |
2521 | |
2522 | pgoff_start = folio_pgoff(folio); |
2523 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; |
2524 | if (!locked) { |
2525 | if (i_mmap_trylock_read(mapping)) |
2526 | goto lookup; |
2527 | |
2528 | if (rwc->try_lock) { |
2529 | rwc->contended = true; |
2530 | return; |
2531 | } |
2532 | |
2533 | i_mmap_lock_read(mapping); |
2534 | } |
2535 | lookup: |
2536 | vma_interval_tree_foreach(vma, &mapping->i_mmap, |
2537 | pgoff_start, pgoff_end) { |
2538 | unsigned long address = vma_address(page: &folio->page, vma); |
2539 | |
2540 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
2541 | cond_resched(); |
2542 | |
2543 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2544 | continue; |
2545 | |
2546 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
2547 | goto done; |
2548 | if (rwc->done && rwc->done(folio)) |
2549 | goto done; |
2550 | } |
2551 | |
2552 | done: |
2553 | if (!locked) |
2554 | i_mmap_unlock_read(mapping); |
2555 | } |
2556 | |
2557 | void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) |
2558 | { |
2559 | if (unlikely(folio_test_ksm(folio))) |
2560 | rmap_walk_ksm(folio, rwc); |
2561 | else if (folio_test_anon(folio)) |
2562 | rmap_walk_anon(folio, rwc, locked: false); |
2563 | else |
2564 | rmap_walk_file(folio, rwc, locked: false); |
2565 | } |
2566 | |
2567 | /* Like rmap_walk, but caller holds relevant rmap lock */ |
2568 | void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) |
2569 | { |
2570 | /* no ksm support for now */ |
2571 | VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); |
2572 | if (folio_test_anon(folio)) |
2573 | rmap_walk_anon(folio, rwc, locked: true); |
2574 | else |
2575 | rmap_walk_file(folio, rwc, locked: true); |
2576 | } |
2577 | |
2578 | #ifdef CONFIG_HUGETLB_PAGE |
2579 | /* |
2580 | * The following two functions are for anonymous (private mapped) hugepages. |
2581 | * Unlike common anonymous pages, anonymous hugepages have no accounting code |
2582 | * and no lru code, because we handle hugepages differently from common pages. |
2583 | * |
2584 | * RMAP_COMPOUND is ignored. |
2585 | */ |
2586 | void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, |
2587 | unsigned long address, rmap_t flags) |
2588 | { |
2589 | VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); |
2590 | |
2591 | atomic_inc(v: &folio->_entire_mapcount); |
2592 | if (flags & RMAP_EXCLUSIVE) |
2593 | SetPageAnonExclusive(&folio->page); |
2594 | VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && |
2595 | PageAnonExclusive(&folio->page), folio); |
2596 | } |
2597 | |
2598 | void hugepage_add_new_anon_rmap(struct folio *folio, |
2599 | struct vm_area_struct *vma, unsigned long address) |
2600 | { |
2601 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
2602 | /* increment count (starts at -1) */ |
2603 | atomic_set(v: &folio->_entire_mapcount, i: 0); |
2604 | folio_clear_hugetlb_restore_reserve(folio); |
2605 | __folio_set_anon(folio, vma, address, exclusive: true); |
2606 | SetPageAnonExclusive(&folio->page); |
2607 | } |
2608 | #endif /* CONFIG_HUGETLB_PAGE */ |
2609 | |