1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Device Memory Migration functionality. |
4 | * |
5 | * Originally written by Jérôme Glisse. |
6 | */ |
7 | #include <linux/export.h> |
8 | #include <linux/memremap.h> |
9 | #include <linux/migrate.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/mm_inline.h> |
12 | #include <linux/mmu_notifier.h> |
13 | #include <linux/oom.h> |
14 | #include <linux/pagewalk.h> |
15 | #include <linux/rmap.h> |
16 | #include <linux/swapops.h> |
17 | #include <asm/tlbflush.h> |
18 | #include "internal.h" |
19 | |
20 | static int migrate_vma_collect_skip(unsigned long start, |
21 | unsigned long end, |
22 | struct mm_walk *walk) |
23 | { |
24 | struct migrate_vma *migrate = walk->private; |
25 | unsigned long addr; |
26 | |
27 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
28 | migrate->dst[migrate->npages] = 0; |
29 | migrate->src[migrate->npages++] = 0; |
30 | } |
31 | |
32 | return 0; |
33 | } |
34 | |
35 | static int migrate_vma_collect_hole(unsigned long start, |
36 | unsigned long end, |
37 | __always_unused int depth, |
38 | struct mm_walk *walk) |
39 | { |
40 | struct migrate_vma *migrate = walk->private; |
41 | unsigned long addr; |
42 | |
43 | /* Only allow populating anonymous memory. */ |
44 | if (!vma_is_anonymous(vma: walk->vma)) |
45 | return migrate_vma_collect_skip(start, end, walk); |
46 | |
47 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
48 | migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; |
49 | migrate->dst[migrate->npages] = 0; |
50 | migrate->npages++; |
51 | migrate->cpages++; |
52 | } |
53 | |
54 | return 0; |
55 | } |
56 | |
57 | static int migrate_vma_collect_pmd(pmd_t *pmdp, |
58 | unsigned long start, |
59 | unsigned long end, |
60 | struct mm_walk *walk) |
61 | { |
62 | struct migrate_vma *migrate = walk->private; |
63 | struct vm_area_struct *vma = walk->vma; |
64 | struct mm_struct *mm = vma->vm_mm; |
65 | unsigned long addr = start, unmapped = 0; |
66 | spinlock_t *ptl; |
67 | pte_t *ptep; |
68 | |
69 | again: |
70 | if (pmd_none(pmd: *pmdp)) |
71 | return migrate_vma_collect_hole(start, end, depth: -1, walk); |
72 | |
73 | if (pmd_trans_huge(pmd: *pmdp)) { |
74 | struct page *page; |
75 | |
76 | ptl = pmd_lock(mm, pmd: pmdp); |
77 | if (unlikely(!pmd_trans_huge(*pmdp))) { |
78 | spin_unlock(lock: ptl); |
79 | goto again; |
80 | } |
81 | |
82 | page = pmd_page(*pmdp); |
83 | if (is_huge_zero_page(page)) { |
84 | spin_unlock(lock: ptl); |
85 | split_huge_pmd(vma, pmdp, addr); |
86 | } else { |
87 | int ret; |
88 | |
89 | get_page(page); |
90 | spin_unlock(lock: ptl); |
91 | if (unlikely(!trylock_page(page))) |
92 | return migrate_vma_collect_skip(start, end, |
93 | walk); |
94 | ret = split_huge_page(page); |
95 | unlock_page(page); |
96 | put_page(page); |
97 | if (ret) |
98 | return migrate_vma_collect_skip(start, end, |
99 | walk); |
100 | } |
101 | } |
102 | |
103 | ptep = pte_offset_map_lock(mm, pmd: pmdp, addr, ptlp: &ptl); |
104 | if (!ptep) |
105 | goto again; |
106 | arch_enter_lazy_mmu_mode(); |
107 | |
108 | for (; addr < end; addr += PAGE_SIZE, ptep++) { |
109 | unsigned long mpfn = 0, pfn; |
110 | struct page *page; |
111 | swp_entry_t entry; |
112 | pte_t pte; |
113 | |
114 | pte = ptep_get(ptep); |
115 | |
116 | if (pte_none(pte)) { |
117 | if (vma_is_anonymous(vma)) { |
118 | mpfn = MIGRATE_PFN_MIGRATE; |
119 | migrate->cpages++; |
120 | } |
121 | goto next; |
122 | } |
123 | |
124 | if (!pte_present(a: pte)) { |
125 | /* |
126 | * Only care about unaddressable device page special |
127 | * page table entry. Other special swap entries are not |
128 | * migratable, and we ignore regular swapped page. |
129 | */ |
130 | entry = pte_to_swp_entry(pte); |
131 | if (!is_device_private_entry(entry)) |
132 | goto next; |
133 | |
134 | page = pfn_swap_entry_to_page(entry); |
135 | if (!(migrate->flags & |
136 | MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || |
137 | page->pgmap->owner != migrate->pgmap_owner) |
138 | goto next; |
139 | |
140 | mpfn = migrate_pfn(page_to_pfn(page)) | |
141 | MIGRATE_PFN_MIGRATE; |
142 | if (is_writable_device_private_entry(entry)) |
143 | mpfn |= MIGRATE_PFN_WRITE; |
144 | } else { |
145 | pfn = pte_pfn(pte); |
146 | if (is_zero_pfn(pfn) && |
147 | (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { |
148 | mpfn = MIGRATE_PFN_MIGRATE; |
149 | migrate->cpages++; |
150 | goto next; |
151 | } |
152 | page = vm_normal_page(vma: migrate->vma, addr, pte); |
153 | if (page && !is_zone_device_page(page) && |
154 | !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) |
155 | goto next; |
156 | else if (page && is_device_coherent_page(page) && |
157 | (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || |
158 | page->pgmap->owner != migrate->pgmap_owner)) |
159 | goto next; |
160 | mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; |
161 | mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; |
162 | } |
163 | |
164 | /* FIXME support THP */ |
165 | if (!page || !page->mapping || PageTransCompound(page)) { |
166 | mpfn = 0; |
167 | goto next; |
168 | } |
169 | |
170 | /* |
171 | * By getting a reference on the page we pin it and that blocks |
172 | * any kind of migration. Side effect is that it "freezes" the |
173 | * pte. |
174 | * |
175 | * We drop this reference after isolating the page from the lru |
176 | * for non device page (device page are not on the lru and thus |
177 | * can't be dropped from it). |
178 | */ |
179 | get_page(page); |
180 | |
181 | /* |
182 | * We rely on trylock_page() to avoid deadlock between |
183 | * concurrent migrations where each is waiting on the others |
184 | * page lock. If we can't immediately lock the page we fail this |
185 | * migration as it is only best effort anyway. |
186 | * |
187 | * If we can lock the page it's safe to set up a migration entry |
188 | * now. In the common case where the page is mapped once in a |
189 | * single process setting up the migration entry now is an |
190 | * optimisation to avoid walking the rmap later with |
191 | * try_to_migrate(). |
192 | */ |
193 | if (trylock_page(page)) { |
194 | bool anon_exclusive; |
195 | pte_t swp_pte; |
196 | |
197 | flush_cache_page(vma, vmaddr: addr, pfn: pte_pfn(pte)); |
198 | anon_exclusive = PageAnon(page) && PageAnonExclusive(page); |
199 | if (anon_exclusive) { |
200 | pte = ptep_clear_flush(vma, address: addr, ptep); |
201 | |
202 | if (page_try_share_anon_rmap(page)) { |
203 | set_pte_at(mm, addr, ptep, pte); |
204 | unlock_page(page); |
205 | put_page(page); |
206 | mpfn = 0; |
207 | goto next; |
208 | } |
209 | } else { |
210 | pte = ptep_get_and_clear(mm, addr, ptep); |
211 | } |
212 | |
213 | migrate->cpages++; |
214 | |
215 | /* Set the dirty flag on the folio now the pte is gone. */ |
216 | if (pte_dirty(pte)) |
217 | folio_mark_dirty(page_folio(page)); |
218 | |
219 | /* Setup special migration page table entry */ |
220 | if (mpfn & MIGRATE_PFN_WRITE) |
221 | entry = make_writable_migration_entry( |
222 | page_to_pfn(page)); |
223 | else if (anon_exclusive) |
224 | entry = make_readable_exclusive_migration_entry( |
225 | page_to_pfn(page)); |
226 | else |
227 | entry = make_readable_migration_entry( |
228 | page_to_pfn(page)); |
229 | if (pte_present(a: pte)) { |
230 | if (pte_young(pte)) |
231 | entry = make_migration_entry_young(entry); |
232 | if (pte_dirty(pte)) |
233 | entry = make_migration_entry_dirty(entry); |
234 | } |
235 | swp_pte = swp_entry_to_pte(entry); |
236 | if (pte_present(a: pte)) { |
237 | if (pte_soft_dirty(pte)) |
238 | swp_pte = pte_swp_mksoft_dirty(pte: swp_pte); |
239 | if (pte_uffd_wp(pte)) |
240 | swp_pte = pte_swp_mkuffd_wp(pte: swp_pte); |
241 | } else { |
242 | if (pte_swp_soft_dirty(pte)) |
243 | swp_pte = pte_swp_mksoft_dirty(pte: swp_pte); |
244 | if (pte_swp_uffd_wp(pte)) |
245 | swp_pte = pte_swp_mkuffd_wp(pte: swp_pte); |
246 | } |
247 | set_pte_at(mm, addr, ptep, swp_pte); |
248 | |
249 | /* |
250 | * This is like regular unmap: we remove the rmap and |
251 | * drop page refcount. Page won't be freed, as we took |
252 | * a reference just above. |
253 | */ |
254 | page_remove_rmap(page, vma, compound: false); |
255 | put_page(page); |
256 | |
257 | if (pte_present(a: pte)) |
258 | unmapped++; |
259 | } else { |
260 | put_page(page); |
261 | mpfn = 0; |
262 | } |
263 | |
264 | next: |
265 | migrate->dst[migrate->npages] = 0; |
266 | migrate->src[migrate->npages++] = mpfn; |
267 | } |
268 | |
269 | /* Only flush the TLB if we actually modified any entries */ |
270 | if (unmapped) |
271 | flush_tlb_range(walk->vma, start, end); |
272 | |
273 | arch_leave_lazy_mmu_mode(); |
274 | pte_unmap_unlock(ptep - 1, ptl); |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | static const struct mm_walk_ops migrate_vma_walk_ops = { |
280 | .pmd_entry = migrate_vma_collect_pmd, |
281 | .pte_hole = migrate_vma_collect_hole, |
282 | .walk_lock = PGWALK_RDLOCK, |
283 | }; |
284 | |
285 | /* |
286 | * migrate_vma_collect() - collect pages over a range of virtual addresses |
287 | * @migrate: migrate struct containing all migration information |
288 | * |
289 | * This will walk the CPU page table. For each virtual address backed by a |
290 | * valid page, it updates the src array and takes a reference on the page, in |
291 | * order to pin the page until we lock it and unmap it. |
292 | */ |
293 | static void migrate_vma_collect(struct migrate_vma *migrate) |
294 | { |
295 | struct mmu_notifier_range range; |
296 | |
297 | /* |
298 | * Note that the pgmap_owner is passed to the mmu notifier callback so |
299 | * that the registered device driver can skip invalidating device |
300 | * private page mappings that won't be migrated. |
301 | */ |
302 | mmu_notifier_range_init_owner(range: &range, event: MMU_NOTIFY_MIGRATE, flags: 0, |
303 | mm: migrate->vma->vm_mm, start: migrate->start, end: migrate->end, |
304 | owner: migrate->pgmap_owner); |
305 | mmu_notifier_invalidate_range_start(range: &range); |
306 | |
307 | walk_page_range(mm: migrate->vma->vm_mm, start: migrate->start, end: migrate->end, |
308 | ops: &migrate_vma_walk_ops, private: migrate); |
309 | |
310 | mmu_notifier_invalidate_range_end(range: &range); |
311 | migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); |
312 | } |
313 | |
314 | /* |
315 | * migrate_vma_check_page() - check if page is pinned or not |
316 | * @page: struct page to check |
317 | * |
318 | * Pinned pages cannot be migrated. This is the same test as in |
319 | * folio_migrate_mapping(), except that here we allow migration of a |
320 | * ZONE_DEVICE page. |
321 | */ |
322 | static bool migrate_vma_check_page(struct page *page, struct page *fault_page) |
323 | { |
324 | /* |
325 | * One extra ref because caller holds an extra reference, either from |
326 | * isolate_lru_page() for a regular page, or migrate_vma_collect() for |
327 | * a device page. |
328 | */ |
329 | int = 1 + (page == fault_page); |
330 | |
331 | /* |
332 | * FIXME support THP (transparent huge page), it is bit more complex to |
333 | * check them than regular pages, because they can be mapped with a pmd |
334 | * or with a pte (split pte mapping). |
335 | */ |
336 | if (PageCompound(page)) |
337 | return false; |
338 | |
339 | /* Page from ZONE_DEVICE have one extra reference */ |
340 | if (is_zone_device_page(page)) |
341 | extra++; |
342 | |
343 | /* For file back page */ |
344 | if (page_mapping(page)) |
345 | extra += 1 + page_has_private(page); |
346 | |
347 | if ((page_count(page) - extra) > page_mapcount(page)) |
348 | return false; |
349 | |
350 | return true; |
351 | } |
352 | |
353 | /* |
354 | * Unmaps pages for migration. Returns number of source pfns marked as |
355 | * migrating. |
356 | */ |
357 | static unsigned long migrate_device_unmap(unsigned long *src_pfns, |
358 | unsigned long npages, |
359 | struct page *fault_page) |
360 | { |
361 | unsigned long i, restore = 0; |
362 | bool allow_drain = true; |
363 | unsigned long unmapped = 0; |
364 | |
365 | lru_add_drain(); |
366 | |
367 | for (i = 0; i < npages; i++) { |
368 | struct page *page = migrate_pfn_to_page(mpfn: src_pfns[i]); |
369 | struct folio *folio; |
370 | |
371 | if (!page) { |
372 | if (src_pfns[i] & MIGRATE_PFN_MIGRATE) |
373 | unmapped++; |
374 | continue; |
375 | } |
376 | |
377 | /* ZONE_DEVICE pages are not on LRU */ |
378 | if (!is_zone_device_page(page)) { |
379 | if (!PageLRU(page) && allow_drain) { |
380 | /* Drain CPU's lru cache */ |
381 | lru_add_drain_all(); |
382 | allow_drain = false; |
383 | } |
384 | |
385 | if (!isolate_lru_page(page)) { |
386 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
387 | restore++; |
388 | continue; |
389 | } |
390 | |
391 | /* Drop the reference we took in collect */ |
392 | put_page(page); |
393 | } |
394 | |
395 | folio = page_folio(page); |
396 | if (folio_mapped(folio)) |
397 | try_to_migrate(folio, flags: 0); |
398 | |
399 | if (page_mapped(page) || |
400 | !migrate_vma_check_page(page, fault_page)) { |
401 | if (!is_zone_device_page(page)) { |
402 | get_page(page); |
403 | putback_lru_page(page); |
404 | } |
405 | |
406 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
407 | restore++; |
408 | continue; |
409 | } |
410 | |
411 | unmapped++; |
412 | } |
413 | |
414 | for (i = 0; i < npages && restore; i++) { |
415 | struct page *page = migrate_pfn_to_page(mpfn: src_pfns[i]); |
416 | struct folio *folio; |
417 | |
418 | if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE)) |
419 | continue; |
420 | |
421 | folio = page_folio(page); |
422 | remove_migration_ptes(src: folio, dst: folio, locked: false); |
423 | |
424 | src_pfns[i] = 0; |
425 | folio_unlock(folio); |
426 | folio_put(folio); |
427 | restore--; |
428 | } |
429 | |
430 | return unmapped; |
431 | } |
432 | |
433 | /* |
434 | * migrate_vma_unmap() - replace page mapping with special migration pte entry |
435 | * @migrate: migrate struct containing all migration information |
436 | * |
437 | * Isolate pages from the LRU and replace mappings (CPU page table pte) with a |
438 | * special migration pte entry and check if it has been pinned. Pinned pages are |
439 | * restored because we cannot migrate them. |
440 | * |
441 | * This is the last step before we call the device driver callback to allocate |
442 | * destination memory and copy contents of original page over to new page. |
443 | */ |
444 | static void migrate_vma_unmap(struct migrate_vma *migrate) |
445 | { |
446 | migrate->cpages = migrate_device_unmap(src_pfns: migrate->src, npages: migrate->npages, |
447 | fault_page: migrate->fault_page); |
448 | } |
449 | |
450 | /** |
451 | * migrate_vma_setup() - prepare to migrate a range of memory |
452 | * @args: contains the vma, start, and pfns arrays for the migration |
453 | * |
454 | * Returns: negative errno on failures, 0 when 0 or more pages were migrated |
455 | * without an error. |
456 | * |
457 | * Prepare to migrate a range of memory virtual address range by collecting all |
458 | * the pages backing each virtual address in the range, saving them inside the |
459 | * src array. Then lock those pages and unmap them. Once the pages are locked |
460 | * and unmapped, check whether each page is pinned or not. Pages that aren't |
461 | * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the |
462 | * corresponding src array entry. Then restores any pages that are pinned, by |
463 | * remapping and unlocking those pages. |
464 | * |
465 | * The caller should then allocate destination memory and copy source memory to |
466 | * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE |
467 | * flag set). Once these are allocated and copied, the caller must update each |
468 | * corresponding entry in the dst array with the pfn value of the destination |
469 | * page and with MIGRATE_PFN_VALID. Destination pages must be locked via |
470 | * lock_page(). |
471 | * |
472 | * Note that the caller does not have to migrate all the pages that are marked |
473 | * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from |
474 | * device memory to system memory. If the caller cannot migrate a device page |
475 | * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe |
476 | * consequences for the userspace process, so it must be avoided if at all |
477 | * possible. |
478 | * |
479 | * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we |
480 | * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus |
481 | * allowing the caller to allocate device memory for those unbacked virtual |
482 | * addresses. For this the caller simply has to allocate device memory and |
483 | * properly set the destination entry like for regular migration. Note that |
484 | * this can still fail, and thus inside the device driver you must check if the |
485 | * migration was successful for those entries after calling migrate_vma_pages(), |
486 | * just like for regular migration. |
487 | * |
488 | * After that, the callers must call migrate_vma_pages() to go over each entry |
489 | * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag |
490 | * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, |
491 | * then migrate_vma_pages() to migrate struct page information from the source |
492 | * struct page to the destination struct page. If it fails to migrate the |
493 | * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the |
494 | * src array. |
495 | * |
496 | * At this point all successfully migrated pages have an entry in the src |
497 | * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst |
498 | * array entry with MIGRATE_PFN_VALID flag set. |
499 | * |
500 | * Once migrate_vma_pages() returns the caller may inspect which pages were |
501 | * successfully migrated, and which were not. Successfully migrated pages will |
502 | * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. |
503 | * |
504 | * It is safe to update device page table after migrate_vma_pages() because |
505 | * both destination and source page are still locked, and the mmap_lock is held |
506 | * in read mode (hence no one can unmap the range being migrated). |
507 | * |
508 | * Once the caller is done cleaning up things and updating its page table (if it |
509 | * chose to do so, this is not an obligation) it finally calls |
510 | * migrate_vma_finalize() to update the CPU page table to point to new pages |
511 | * for successfully migrated pages or otherwise restore the CPU page table to |
512 | * point to the original source pages. |
513 | */ |
514 | int migrate_vma_setup(struct migrate_vma *args) |
515 | { |
516 | long nr_pages = (args->end - args->start) >> PAGE_SHIFT; |
517 | |
518 | args->start &= PAGE_MASK; |
519 | args->end &= PAGE_MASK; |
520 | if (!args->vma || is_vm_hugetlb_page(vma: args->vma) || |
521 | (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(vma: args->vma)) |
522 | return -EINVAL; |
523 | if (nr_pages <= 0) |
524 | return -EINVAL; |
525 | if (args->start < args->vma->vm_start || |
526 | args->start >= args->vma->vm_end) |
527 | return -EINVAL; |
528 | if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) |
529 | return -EINVAL; |
530 | if (!args->src || !args->dst) |
531 | return -EINVAL; |
532 | if (args->fault_page && !is_device_private_page(page: args->fault_page)) |
533 | return -EINVAL; |
534 | |
535 | memset(args->src, 0, sizeof(*args->src) * nr_pages); |
536 | args->cpages = 0; |
537 | args->npages = 0; |
538 | |
539 | migrate_vma_collect(migrate: args); |
540 | |
541 | if (args->cpages) |
542 | migrate_vma_unmap(migrate: args); |
543 | |
544 | /* |
545 | * At this point pages are locked and unmapped, and thus they have |
546 | * stable content and can safely be copied to destination memory that |
547 | * is allocated by the drivers. |
548 | */ |
549 | return 0; |
550 | |
551 | } |
552 | EXPORT_SYMBOL(migrate_vma_setup); |
553 | |
554 | /* |
555 | * This code closely matches the code in: |
556 | * __handle_mm_fault() |
557 | * handle_pte_fault() |
558 | * do_anonymous_page() |
559 | * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE |
560 | * private or coherent page. |
561 | */ |
562 | static void migrate_vma_insert_page(struct migrate_vma *migrate, |
563 | unsigned long addr, |
564 | struct page *page, |
565 | unsigned long *src) |
566 | { |
567 | struct vm_area_struct *vma = migrate->vma; |
568 | struct mm_struct *mm = vma->vm_mm; |
569 | bool flush = false; |
570 | spinlock_t *ptl; |
571 | pte_t entry; |
572 | pgd_t *pgdp; |
573 | p4d_t *p4dp; |
574 | pud_t *pudp; |
575 | pmd_t *pmdp; |
576 | pte_t *ptep; |
577 | pte_t orig_pte; |
578 | |
579 | /* Only allow populating anonymous memory */ |
580 | if (!vma_is_anonymous(vma)) |
581 | goto abort; |
582 | |
583 | pgdp = pgd_offset(mm, addr); |
584 | p4dp = p4d_alloc(mm, pgd: pgdp, address: addr); |
585 | if (!p4dp) |
586 | goto abort; |
587 | pudp = pud_alloc(mm, p4d: p4dp, address: addr); |
588 | if (!pudp) |
589 | goto abort; |
590 | pmdp = pmd_alloc(mm, pud: pudp, address: addr); |
591 | if (!pmdp) |
592 | goto abort; |
593 | if (pmd_trans_huge(pmd: *pmdp) || pmd_devmap(pmd: *pmdp)) |
594 | goto abort; |
595 | if (pte_alloc(mm, pmdp)) |
596 | goto abort; |
597 | if (unlikely(anon_vma_prepare(vma))) |
598 | goto abort; |
599 | if (mem_cgroup_charge(page_folio(page), mm: vma->vm_mm, GFP_KERNEL)) |
600 | goto abort; |
601 | |
602 | /* |
603 | * The memory barrier inside __SetPageUptodate makes sure that |
604 | * preceding stores to the page contents become visible before |
605 | * the set_pte_at() write. |
606 | */ |
607 | __SetPageUptodate(page); |
608 | |
609 | if (is_device_private_page(page)) { |
610 | swp_entry_t swp_entry; |
611 | |
612 | if (vma->vm_flags & VM_WRITE) |
613 | swp_entry = make_writable_device_private_entry( |
614 | page_to_pfn(page)); |
615 | else |
616 | swp_entry = make_readable_device_private_entry( |
617 | page_to_pfn(page)); |
618 | entry = swp_entry_to_pte(entry: swp_entry); |
619 | } else { |
620 | if (is_zone_device_page(page) && |
621 | !is_device_coherent_page(page)) { |
622 | pr_warn_once("Unsupported ZONE_DEVICE page type.\n" ); |
623 | goto abort; |
624 | } |
625 | entry = mk_pte(page, vma->vm_page_prot); |
626 | if (vma->vm_flags & VM_WRITE) |
627 | entry = pte_mkwrite(pte: pte_mkdirty(pte: entry), vma); |
628 | } |
629 | |
630 | ptep = pte_offset_map_lock(mm, pmd: pmdp, addr, ptlp: &ptl); |
631 | if (!ptep) |
632 | goto abort; |
633 | orig_pte = ptep_get(ptep); |
634 | |
635 | if (check_stable_address_space(mm)) |
636 | goto unlock_abort; |
637 | |
638 | if (pte_present(a: orig_pte)) { |
639 | unsigned long pfn = pte_pfn(pte: orig_pte); |
640 | |
641 | if (!is_zero_pfn(pfn)) |
642 | goto unlock_abort; |
643 | flush = true; |
644 | } else if (!pte_none(pte: orig_pte)) |
645 | goto unlock_abort; |
646 | |
647 | /* |
648 | * Check for userfaultfd but do not deliver the fault. Instead, |
649 | * just back off. |
650 | */ |
651 | if (userfaultfd_missing(vma)) |
652 | goto unlock_abort; |
653 | |
654 | inc_mm_counter(mm, member: MM_ANONPAGES); |
655 | page_add_new_anon_rmap(page, vma, address: addr); |
656 | if (!is_zone_device_page(page)) |
657 | lru_cache_add_inactive_or_unevictable(page, vma); |
658 | get_page(page); |
659 | |
660 | if (flush) { |
661 | flush_cache_page(vma, vmaddr: addr, pfn: pte_pfn(pte: orig_pte)); |
662 | ptep_clear_flush(vma, address: addr, ptep); |
663 | set_pte_at_notify(mm, addr, ptep, entry); |
664 | update_mmu_cache(vma, addr, ptep); |
665 | } else { |
666 | /* No need to invalidate - it was non-present before */ |
667 | set_pte_at(mm, addr, ptep, entry); |
668 | update_mmu_cache(vma, addr, ptep); |
669 | } |
670 | |
671 | pte_unmap_unlock(ptep, ptl); |
672 | *src = MIGRATE_PFN_MIGRATE; |
673 | return; |
674 | |
675 | unlock_abort: |
676 | pte_unmap_unlock(ptep, ptl); |
677 | abort: |
678 | *src &= ~MIGRATE_PFN_MIGRATE; |
679 | } |
680 | |
681 | static void __migrate_device_pages(unsigned long *src_pfns, |
682 | unsigned long *dst_pfns, unsigned long npages, |
683 | struct migrate_vma *migrate) |
684 | { |
685 | struct mmu_notifier_range range; |
686 | unsigned long i; |
687 | bool notified = false; |
688 | |
689 | for (i = 0; i < npages; i++) { |
690 | struct page *newpage = migrate_pfn_to_page(mpfn: dst_pfns[i]); |
691 | struct page *page = migrate_pfn_to_page(mpfn: src_pfns[i]); |
692 | struct address_space *mapping; |
693 | int r; |
694 | |
695 | if (!newpage) { |
696 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
697 | continue; |
698 | } |
699 | |
700 | if (!page) { |
701 | unsigned long addr; |
702 | |
703 | if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE)) |
704 | continue; |
705 | |
706 | /* |
707 | * The only time there is no vma is when called from |
708 | * migrate_device_coherent_page(). However this isn't |
709 | * called if the page could not be unmapped. |
710 | */ |
711 | VM_BUG_ON(!migrate); |
712 | addr = migrate->start + i*PAGE_SIZE; |
713 | if (!notified) { |
714 | notified = true; |
715 | |
716 | mmu_notifier_range_init_owner(range: &range, |
717 | event: MMU_NOTIFY_MIGRATE, flags: 0, |
718 | mm: migrate->vma->vm_mm, start: addr, end: migrate->end, |
719 | owner: migrate->pgmap_owner); |
720 | mmu_notifier_invalidate_range_start(range: &range); |
721 | } |
722 | migrate_vma_insert_page(migrate, addr, page: newpage, |
723 | src: &src_pfns[i]); |
724 | continue; |
725 | } |
726 | |
727 | mapping = page_mapping(page); |
728 | |
729 | if (is_device_private_page(page: newpage) || |
730 | is_device_coherent_page(page: newpage)) { |
731 | if (mapping) { |
732 | struct folio *folio; |
733 | |
734 | folio = page_folio(page); |
735 | |
736 | /* |
737 | * For now only support anonymous memory migrating to |
738 | * device private or coherent memory. |
739 | * |
740 | * Try to get rid of swap cache if possible. |
741 | */ |
742 | if (!folio_test_anon(folio) || |
743 | !folio_free_swap(folio)) { |
744 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
745 | continue; |
746 | } |
747 | } |
748 | } else if (is_zone_device_page(page: newpage)) { |
749 | /* |
750 | * Other types of ZONE_DEVICE page are not supported. |
751 | */ |
752 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
753 | continue; |
754 | } |
755 | |
756 | if (migrate && migrate->fault_page == page) |
757 | r = migrate_folio_extra(mapping, page_folio(newpage), |
758 | page_folio(page), |
759 | mode: MIGRATE_SYNC_NO_COPY, extra_count: 1); |
760 | else |
761 | r = migrate_folio(mapping, page_folio(newpage), |
762 | page_folio(page), mode: MIGRATE_SYNC_NO_COPY); |
763 | if (r != MIGRATEPAGE_SUCCESS) |
764 | src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; |
765 | } |
766 | |
767 | if (notified) |
768 | mmu_notifier_invalidate_range_end(range: &range); |
769 | } |
770 | |
771 | /** |
772 | * migrate_device_pages() - migrate meta-data from src page to dst page |
773 | * @src_pfns: src_pfns returned from migrate_device_range() |
774 | * @dst_pfns: array of pfns allocated by the driver to migrate memory to |
775 | * @npages: number of pages in the range |
776 | * |
777 | * Equivalent to migrate_vma_pages(). This is called to migrate struct page |
778 | * meta-data from source struct page to destination. |
779 | */ |
780 | void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, |
781 | unsigned long npages) |
782 | { |
783 | __migrate_device_pages(src_pfns, dst_pfns, npages, NULL); |
784 | } |
785 | EXPORT_SYMBOL(migrate_device_pages); |
786 | |
787 | /** |
788 | * migrate_vma_pages() - migrate meta-data from src page to dst page |
789 | * @migrate: migrate struct containing all migration information |
790 | * |
791 | * This migrates struct page meta-data from source struct page to destination |
792 | * struct page. This effectively finishes the migration from source page to the |
793 | * destination page. |
794 | */ |
795 | void migrate_vma_pages(struct migrate_vma *migrate) |
796 | { |
797 | __migrate_device_pages(src_pfns: migrate->src, dst_pfns: migrate->dst, npages: migrate->npages, migrate); |
798 | } |
799 | EXPORT_SYMBOL(migrate_vma_pages); |
800 | |
801 | /* |
802 | * migrate_device_finalize() - complete page migration |
803 | * @src_pfns: src_pfns returned from migrate_device_range() |
804 | * @dst_pfns: array of pfns allocated by the driver to migrate memory to |
805 | * @npages: number of pages in the range |
806 | * |
807 | * Completes migration of the page by removing special migration entries. |
808 | * Drivers must ensure copying of page data is complete and visible to the CPU |
809 | * before calling this. |
810 | */ |
811 | void migrate_device_finalize(unsigned long *src_pfns, |
812 | unsigned long *dst_pfns, unsigned long npages) |
813 | { |
814 | unsigned long i; |
815 | |
816 | for (i = 0; i < npages; i++) { |
817 | struct folio *dst, *src; |
818 | struct page *newpage = migrate_pfn_to_page(mpfn: dst_pfns[i]); |
819 | struct page *page = migrate_pfn_to_page(mpfn: src_pfns[i]); |
820 | |
821 | if (!page) { |
822 | if (newpage) { |
823 | unlock_page(page: newpage); |
824 | put_page(page: newpage); |
825 | } |
826 | continue; |
827 | } |
828 | |
829 | if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) { |
830 | if (newpage) { |
831 | unlock_page(page: newpage); |
832 | put_page(page: newpage); |
833 | } |
834 | newpage = page; |
835 | } |
836 | |
837 | src = page_folio(page); |
838 | dst = page_folio(newpage); |
839 | remove_migration_ptes(src, dst, locked: false); |
840 | folio_unlock(folio: src); |
841 | |
842 | if (is_zone_device_page(page)) |
843 | put_page(page); |
844 | else |
845 | putback_lru_page(page); |
846 | |
847 | if (newpage != page) { |
848 | unlock_page(page: newpage); |
849 | if (is_zone_device_page(page: newpage)) |
850 | put_page(page: newpage); |
851 | else |
852 | putback_lru_page(page: newpage); |
853 | } |
854 | } |
855 | } |
856 | EXPORT_SYMBOL(migrate_device_finalize); |
857 | |
858 | /** |
859 | * migrate_vma_finalize() - restore CPU page table entry |
860 | * @migrate: migrate struct containing all migration information |
861 | * |
862 | * This replaces the special migration pte entry with either a mapping to the |
863 | * new page if migration was successful for that page, or to the original page |
864 | * otherwise. |
865 | * |
866 | * This also unlocks the pages and puts them back on the lru, or drops the extra |
867 | * refcount, for device pages. |
868 | */ |
869 | void migrate_vma_finalize(struct migrate_vma *migrate) |
870 | { |
871 | migrate_device_finalize(migrate->src, migrate->dst, migrate->npages); |
872 | } |
873 | EXPORT_SYMBOL(migrate_vma_finalize); |
874 | |
875 | /** |
876 | * migrate_device_range() - migrate device private pfns to normal memory. |
877 | * @src_pfns: array large enough to hold migrating source device private pfns. |
878 | * @start: starting pfn in the range to migrate. |
879 | * @npages: number of pages to migrate. |
880 | * |
881 | * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that |
882 | * instead of looking up pages based on virtual address mappings a range of |
883 | * device pfns that should be migrated to system memory is used instead. |
884 | * |
885 | * This is useful when a driver needs to free device memory but doesn't know the |
886 | * virtual mappings of every page that may be in device memory. For example this |
887 | * is often the case when a driver is being unloaded or unbound from a device. |
888 | * |
889 | * Like migrate_vma_setup() this function will take a reference and lock any |
890 | * migrating pages that aren't free before unmapping them. Drivers may then |
891 | * allocate destination pages and start copying data from the device to CPU |
892 | * memory before calling migrate_device_pages(). |
893 | */ |
894 | int migrate_device_range(unsigned long *src_pfns, unsigned long start, |
895 | unsigned long npages) |
896 | { |
897 | unsigned long i, pfn; |
898 | |
899 | for (pfn = start, i = 0; i < npages; pfn++, i++) { |
900 | struct page *page = pfn_to_page(pfn); |
901 | |
902 | if (!get_page_unless_zero(page)) { |
903 | src_pfns[i] = 0; |
904 | continue; |
905 | } |
906 | |
907 | if (!trylock_page(page)) { |
908 | src_pfns[i] = 0; |
909 | put_page(page); |
910 | continue; |
911 | } |
912 | |
913 | src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; |
914 | } |
915 | |
916 | migrate_device_unmap(src_pfns, npages, NULL); |
917 | |
918 | return 0; |
919 | } |
920 | EXPORT_SYMBOL(migrate_device_range); |
921 | |
922 | /* |
923 | * Migrate a device coherent page back to normal memory. The caller should have |
924 | * a reference on page which will be copied to the new page if migration is |
925 | * successful or dropped on failure. |
926 | */ |
927 | int migrate_device_coherent_page(struct page *page) |
928 | { |
929 | unsigned long src_pfn, dst_pfn = 0; |
930 | struct page *dpage; |
931 | |
932 | WARN_ON_ONCE(PageCompound(page)); |
933 | |
934 | lock_page(page); |
935 | src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; |
936 | |
937 | /* |
938 | * We don't have a VMA and don't need to walk the page tables to find |
939 | * the source page. So call migrate_vma_unmap() directly to unmap the |
940 | * page as migrate_vma_setup() will fail if args.vma == NULL. |
941 | */ |
942 | migrate_device_unmap(src_pfns: &src_pfn, npages: 1, NULL); |
943 | if (!(src_pfn & MIGRATE_PFN_MIGRATE)) |
944 | return -EBUSY; |
945 | |
946 | dpage = alloc_page(GFP_USER | __GFP_NOWARN); |
947 | if (dpage) { |
948 | lock_page(page: dpage); |
949 | dst_pfn = migrate_pfn(page_to_pfn(dpage)); |
950 | } |
951 | |
952 | migrate_device_pages(&src_pfn, &dst_pfn, 1); |
953 | if (src_pfn & MIGRATE_PFN_MIGRATE) |
954 | copy_highpage(to: dpage, from: page); |
955 | migrate_device_finalize(&src_pfn, &dst_pfn, 1); |
956 | |
957 | if (src_pfn & MIGRATE_PFN_MIGRATE) |
958 | return 0; |
959 | return -EBUSY; |
960 | } |
961 | |