1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/mm.h> |
3 | #include <linux/gfp.h> |
4 | #include <linux/hugetlb.h> |
5 | #include <asm/pgalloc.h> |
6 | #include <asm/tlb.h> |
7 | #include <asm/fixmap.h> |
8 | #include <asm/mtrr.h> |
9 | |
10 | #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK |
11 | phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; |
12 | EXPORT_SYMBOL(physical_mask); |
13 | #endif |
14 | |
15 | #ifdef CONFIG_HIGHPTE |
16 | #define PGTABLE_HIGHMEM __GFP_HIGHMEM |
17 | #else |
18 | #define PGTABLE_HIGHMEM 0 |
19 | #endif |
20 | |
21 | #ifndef CONFIG_PARAVIRT |
22 | static inline |
23 | void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) |
24 | { |
25 | tlb_remove_page(tlb, table); |
26 | } |
27 | #endif |
28 | |
29 | gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM; |
30 | |
31 | pgtable_t pte_alloc_one(struct mm_struct *mm) |
32 | { |
33 | return __pte_alloc_one(mm, gfp: __userpte_alloc_gfp); |
34 | } |
35 | |
36 | static int __init setup_userpte(char *arg) |
37 | { |
38 | if (!arg) |
39 | return -EINVAL; |
40 | |
41 | /* |
42 | * "userpte=nohigh" disables allocation of user pagetables in |
43 | * high memory. |
44 | */ |
45 | if (strcmp(arg, "nohigh" ) == 0) |
46 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; |
47 | else |
48 | return -EINVAL; |
49 | return 0; |
50 | } |
51 | early_param("userpte" , setup_userpte); |
52 | |
53 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
54 | { |
55 | pagetable_pte_dtor(page_ptdesc(pte)); |
56 | paravirt_release_pte(page_to_pfn(pte)); |
57 | paravirt_tlb_remove_table(tlb, table: pte); |
58 | } |
59 | |
60 | #if CONFIG_PGTABLE_LEVELS > 2 |
61 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
62 | { |
63 | struct ptdesc *ptdesc = virt_to_ptdesc(x: pmd); |
64 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
65 | /* |
66 | * NOTE! For PAE, any changes to the top page-directory-pointer-table |
67 | * entries need a full cr3 reload to flush. |
68 | */ |
69 | #ifdef CONFIG_X86_PAE |
70 | tlb->need_flush_all = 1; |
71 | #endif |
72 | pagetable_pmd_dtor(ptdesc); |
73 | paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc)); |
74 | } |
75 | |
76 | #if CONFIG_PGTABLE_LEVELS > 3 |
77 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
78 | { |
79 | struct ptdesc *ptdesc = virt_to_ptdesc(x: pud); |
80 | |
81 | pagetable_pud_dtor(ptdesc); |
82 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
83 | paravirt_tlb_remove_table(tlb, virt_to_page(pud)); |
84 | } |
85 | |
86 | #if CONFIG_PGTABLE_LEVELS > 4 |
87 | void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) |
88 | { |
89 | paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); |
90 | paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); |
91 | } |
92 | #endif /* CONFIG_PGTABLE_LEVELS > 4 */ |
93 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
94 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
95 | |
96 | static inline void pgd_list_add(pgd_t *pgd) |
97 | { |
98 | struct ptdesc *ptdesc = virt_to_ptdesc(x: pgd); |
99 | |
100 | list_add(new: &ptdesc->pt_list, head: &pgd_list); |
101 | } |
102 | |
103 | static inline void pgd_list_del(pgd_t *pgd) |
104 | { |
105 | struct ptdesc *ptdesc = virt_to_ptdesc(x: pgd); |
106 | |
107 | list_del(entry: &ptdesc->pt_list); |
108 | } |
109 | |
110 | #define UNSHARED_PTRS_PER_PGD \ |
111 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
112 | #define MAX_UNSHARED_PTRS_PER_PGD \ |
113 | max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) |
114 | |
115 | |
116 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) |
117 | { |
118 | virt_to_ptdesc(x: pgd)->pt_mm = mm; |
119 | } |
120 | |
121 | struct mm_struct *pgd_page_get_mm(struct page *page) |
122 | { |
123 | return page_ptdesc(page)->pt_mm; |
124 | } |
125 | |
126 | static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) |
127 | { |
128 | /* If the pgd points to a shared pagetable level (either the |
129 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
130 | references from swapper_pg_dir. */ |
131 | if (CONFIG_PGTABLE_LEVELS == 2 || |
132 | (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
133 | CONFIG_PGTABLE_LEVELS >= 4) { |
134 | clone_pgd_range(dst: pgd + KERNEL_PGD_BOUNDARY, |
135 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
136 | KERNEL_PGD_PTRS); |
137 | } |
138 | |
139 | /* list required to sync kernel mapping updates */ |
140 | if (!SHARED_KERNEL_PMD) { |
141 | pgd_set_mm(pgd, mm); |
142 | pgd_list_add(pgd); |
143 | } |
144 | } |
145 | |
146 | static void pgd_dtor(pgd_t *pgd) |
147 | { |
148 | if (SHARED_KERNEL_PMD) |
149 | return; |
150 | |
151 | spin_lock(lock: &pgd_lock); |
152 | pgd_list_del(pgd); |
153 | spin_unlock(lock: &pgd_lock); |
154 | } |
155 | |
156 | /* |
157 | * List of all pgd's needed for non-PAE so it can invalidate entries |
158 | * in both cached and uncached pgd's; not needed for PAE since the |
159 | * kernel pmd is shared. If PAE were not to share the pmd a similar |
160 | * tactic would be needed. This is essentially codepath-based locking |
161 | * against pageattr.c; it is the unique case in which a valid change |
162 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
163 | * vmalloc faults work because attached pagetables are never freed. |
164 | * -- nyc |
165 | */ |
166 | |
167 | #ifdef CONFIG_X86_PAE |
168 | /* |
169 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when |
170 | * updating the top-level pagetable entries to guarantee the |
171 | * processor notices the update. Since this is expensive, and |
172 | * all 4 top-level entries are used almost immediately in a |
173 | * new process's life, we just pre-populate them here. |
174 | * |
175 | * Also, if we're in a paravirt environment where the kernel pmd is |
176 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate |
177 | * and initialize the kernel pmds here. |
178 | */ |
179 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD |
180 | #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD |
181 | |
182 | /* |
183 | * We allocate separate PMDs for the kernel part of the user page-table |
184 | * when PTI is enabled. We need them to map the per-process LDT into the |
185 | * user-space page-table. |
186 | */ |
187 | #define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \ |
188 | KERNEL_PGD_PTRS : 0) |
189 | #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS |
190 | |
191 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) |
192 | { |
193 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
194 | |
195 | /* Note: almost everything apart from _PAGE_PRESENT is |
196 | reserved at the pmd (PDPT) level. */ |
197 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); |
198 | |
199 | /* |
200 | * According to Intel App note "TLBs, Paging-Structure Caches, |
201 | * and Their Invalidation", April 2007, document 317080-001, |
202 | * section 8.1: in PAE mode we explicitly have to flush the |
203 | * TLB via cr3 if the top-level pgd is changed... |
204 | */ |
205 | flush_tlb_mm(mm); |
206 | } |
207 | #else /* !CONFIG_X86_PAE */ |
208 | |
209 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ |
210 | #define PREALLOCATED_PMDS 0 |
211 | #define MAX_PREALLOCATED_PMDS 0 |
212 | #define PREALLOCATED_USER_PMDS 0 |
213 | #define MAX_PREALLOCATED_USER_PMDS 0 |
214 | #endif /* CONFIG_X86_PAE */ |
215 | |
216 | static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) |
217 | { |
218 | int i; |
219 | struct ptdesc *ptdesc; |
220 | |
221 | for (i = 0; i < count; i++) |
222 | if (pmds[i]) { |
223 | ptdesc = virt_to_ptdesc(x: pmds[i]); |
224 | |
225 | pagetable_pmd_dtor(ptdesc); |
226 | pagetable_free(pt: ptdesc); |
227 | mm_dec_nr_pmds(mm); |
228 | } |
229 | } |
230 | |
231 | static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) |
232 | { |
233 | int i; |
234 | bool failed = false; |
235 | gfp_t gfp = GFP_PGTABLE_USER; |
236 | |
237 | if (mm == &init_mm) |
238 | gfp &= ~__GFP_ACCOUNT; |
239 | gfp &= ~__GFP_HIGHMEM; |
240 | |
241 | for (i = 0; i < count; i++) { |
242 | pmd_t *pmd = NULL; |
243 | struct ptdesc *ptdesc = pagetable_alloc(gfp, order: 0); |
244 | |
245 | if (!ptdesc) |
246 | failed = true; |
247 | if (ptdesc && !pagetable_pmd_ctor(ptdesc)) { |
248 | pagetable_free(pt: ptdesc); |
249 | ptdesc = NULL; |
250 | failed = true; |
251 | } |
252 | if (ptdesc) { |
253 | mm_inc_nr_pmds(mm); |
254 | pmd = ptdesc_address(pt: ptdesc); |
255 | } |
256 | |
257 | pmds[i] = pmd; |
258 | } |
259 | |
260 | if (failed) { |
261 | free_pmds(mm, pmds, count); |
262 | return -ENOMEM; |
263 | } |
264 | |
265 | return 0; |
266 | } |
267 | |
268 | /* |
269 | * Mop up any pmd pages which may still be attached to the pgd. |
270 | * Normally they will be freed by munmap/exit_mmap, but any pmd we |
271 | * preallocate which never got a corresponding vma will need to be |
272 | * freed manually. |
273 | */ |
274 | static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp) |
275 | { |
276 | pgd_t pgd = *pgdp; |
277 | |
278 | if (pgd_val(pgd) != 0) { |
279 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); |
280 | |
281 | pgd_clear(pgdp); |
282 | |
283 | paravirt_release_pmd(pfn: pgd_val(pgd) >> PAGE_SHIFT); |
284 | pmd_free(mm, pmd); |
285 | mm_dec_nr_pmds(mm); |
286 | } |
287 | } |
288 | |
289 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) |
290 | { |
291 | int i; |
292 | |
293 | for (i = 0; i < PREALLOCATED_PMDS; i++) |
294 | mop_up_one_pmd(mm, pgdp: &pgdp[i]); |
295 | |
296 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
297 | |
298 | if (!boot_cpu_has(X86_FEATURE_PTI)) |
299 | return; |
300 | |
301 | pgdp = kernel_to_user_pgdp(pgdp); |
302 | |
303 | for (i = 0; i < PREALLOCATED_USER_PMDS; i++) |
304 | mop_up_one_pmd(mm, pgdp: &pgdp[i + KERNEL_PGD_BOUNDARY]); |
305 | #endif |
306 | } |
307 | |
308 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) |
309 | { |
310 | p4d_t *p4d; |
311 | pud_t *pud; |
312 | int i; |
313 | |
314 | p4d = p4d_offset(pgd, address: 0); |
315 | pud = pud_offset(p4d, address: 0); |
316 | |
317 | for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { |
318 | pmd_t *pmd = pmds[i]; |
319 | |
320 | if (i >= KERNEL_PGD_BOUNDARY) |
321 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
322 | sizeof(pmd_t) * PTRS_PER_PMD); |
323 | |
324 | pud_populate(mm, pud, pmd); |
325 | } |
326 | } |
327 | |
328 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
329 | static void pgd_prepopulate_user_pmd(struct mm_struct *mm, |
330 | pgd_t *k_pgd, pmd_t *pmds[]) |
331 | { |
332 | pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir); |
333 | pgd_t *u_pgd = kernel_to_user_pgdp(pgdp: k_pgd); |
334 | p4d_t *u_p4d; |
335 | pud_t *u_pud; |
336 | int i; |
337 | |
338 | u_p4d = p4d_offset(pgd: u_pgd, address: 0); |
339 | u_pud = pud_offset(p4d: u_p4d, address: 0); |
340 | |
341 | s_pgd += KERNEL_PGD_BOUNDARY; |
342 | u_pud += KERNEL_PGD_BOUNDARY; |
343 | |
344 | for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) { |
345 | pmd_t *pmd = pmds[i]; |
346 | |
347 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd), |
348 | sizeof(pmd_t) * PTRS_PER_PMD); |
349 | |
350 | pud_populate(mm, pud: u_pud, pmd); |
351 | } |
352 | |
353 | } |
354 | #else |
355 | static void pgd_prepopulate_user_pmd(struct mm_struct *mm, |
356 | pgd_t *k_pgd, pmd_t *pmds[]) |
357 | { |
358 | } |
359 | #endif |
360 | /* |
361 | * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also |
362 | * assumes that pgd should be in one page. |
363 | * |
364 | * But kernel with PAE paging that is not running as a Xen domain |
365 | * only needs to allocate 32 bytes for pgd instead of one page. |
366 | */ |
367 | #ifdef CONFIG_X86_PAE |
368 | |
369 | #include <linux/slab.h> |
370 | |
371 | #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) |
372 | #define PGD_ALIGN 32 |
373 | |
374 | static struct kmem_cache *pgd_cache; |
375 | |
376 | void __init pgtable_cache_init(void) |
377 | { |
378 | /* |
379 | * When PAE kernel is running as a Xen domain, it does not use |
380 | * shared kernel pmd. And this requires a whole page for pgd. |
381 | */ |
382 | if (!SHARED_KERNEL_PMD) |
383 | return; |
384 | |
385 | /* |
386 | * when PAE kernel is not running as a Xen domain, it uses |
387 | * shared kernel pmd. Shared kernel pmd does not require a whole |
388 | * page for pgd. We are able to just allocate a 32-byte for pgd. |
389 | * During boot time, we create a 32-byte slab for pgd table allocation. |
390 | */ |
391 | pgd_cache = kmem_cache_create("pgd_cache" , PGD_SIZE, PGD_ALIGN, |
392 | SLAB_PANIC, NULL); |
393 | } |
394 | |
395 | static inline pgd_t *_pgd_alloc(void) |
396 | { |
397 | /* |
398 | * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. |
399 | * We allocate one page for pgd. |
400 | */ |
401 | if (!SHARED_KERNEL_PMD) |
402 | return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, |
403 | PGD_ALLOCATION_ORDER); |
404 | |
405 | /* |
406 | * Now PAE kernel is not running as a Xen domain. We can allocate |
407 | * a 32-byte slab for pgd to save memory space. |
408 | */ |
409 | return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER); |
410 | } |
411 | |
412 | static inline void _pgd_free(pgd_t *pgd) |
413 | { |
414 | if (!SHARED_KERNEL_PMD) |
415 | free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); |
416 | else |
417 | kmem_cache_free(pgd_cache, pgd); |
418 | } |
419 | #else |
420 | |
421 | static inline pgd_t *_pgd_alloc(void) |
422 | { |
423 | return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, |
424 | PGD_ALLOCATION_ORDER); |
425 | } |
426 | |
427 | static inline void _pgd_free(pgd_t *pgd) |
428 | { |
429 | free_pages(addr: (unsigned long)pgd, PGD_ALLOCATION_ORDER); |
430 | } |
431 | #endif /* CONFIG_X86_PAE */ |
432 | |
433 | pgd_t *pgd_alloc(struct mm_struct *mm) |
434 | { |
435 | pgd_t *pgd; |
436 | pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; |
437 | pmd_t *pmds[MAX_PREALLOCATED_PMDS]; |
438 | |
439 | pgd = _pgd_alloc(); |
440 | |
441 | if (pgd == NULL) |
442 | goto out; |
443 | |
444 | mm->pgd = pgd; |
445 | |
446 | if (sizeof(pmds) != 0 && |
447 | preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0) |
448 | goto out_free_pgd; |
449 | |
450 | if (sizeof(u_pmds) != 0 && |
451 | preallocate_pmds(mm, pmds: u_pmds, PREALLOCATED_USER_PMDS) != 0) |
452 | goto out_free_pmds; |
453 | |
454 | if (paravirt_pgd_alloc(mm) != 0) |
455 | goto out_free_user_pmds; |
456 | |
457 | /* |
458 | * Make sure that pre-populating the pmds is atomic with |
459 | * respect to anything walking the pgd_list, so that they |
460 | * never see a partially populated pgd. |
461 | */ |
462 | spin_lock(lock: &pgd_lock); |
463 | |
464 | pgd_ctor(mm, pgd); |
465 | if (sizeof(pmds) != 0) |
466 | pgd_prepopulate_pmd(mm, pgd, pmds); |
467 | |
468 | if (sizeof(u_pmds) != 0) |
469 | pgd_prepopulate_user_pmd(mm, k_pgd: pgd, pmds: u_pmds); |
470 | |
471 | spin_unlock(lock: &pgd_lock); |
472 | |
473 | return pgd; |
474 | |
475 | out_free_user_pmds: |
476 | if (sizeof(u_pmds) != 0) |
477 | free_pmds(mm, pmds: u_pmds, PREALLOCATED_USER_PMDS); |
478 | out_free_pmds: |
479 | if (sizeof(pmds) != 0) |
480 | free_pmds(mm, pmds, PREALLOCATED_PMDS); |
481 | out_free_pgd: |
482 | _pgd_free(pgd); |
483 | out: |
484 | return NULL; |
485 | } |
486 | |
487 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
488 | { |
489 | pgd_mop_up_pmds(mm, pgdp: pgd); |
490 | pgd_dtor(pgd); |
491 | paravirt_pgd_free(mm, pgd); |
492 | _pgd_free(pgd); |
493 | } |
494 | |
495 | /* |
496 | * Used to set accessed or dirty bits in the page table entries |
497 | * on other architectures. On x86, the accessed and dirty bits |
498 | * are tracked by hardware. However, do_wp_page calls this function |
499 | * to also make the pte writeable at the same time the dirty bit is |
500 | * set. In that case we do actually need to write the PTE. |
501 | */ |
502 | int ptep_set_access_flags(struct vm_area_struct *vma, |
503 | unsigned long address, pte_t *ptep, |
504 | pte_t entry, int dirty) |
505 | { |
506 | int changed = !pte_same(a: *ptep, b: entry); |
507 | |
508 | if (changed && dirty) |
509 | set_pte(ptep, pte: entry); |
510 | |
511 | return changed; |
512 | } |
513 | |
514 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
515 | int pmdp_set_access_flags(struct vm_area_struct *vma, |
516 | unsigned long address, pmd_t *pmdp, |
517 | pmd_t entry, int dirty) |
518 | { |
519 | int changed = !pmd_same(pmd_a: *pmdp, pmd_b: entry); |
520 | |
521 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
522 | |
523 | if (changed && dirty) { |
524 | set_pmd(pmdp, pmd: entry); |
525 | /* |
526 | * We had a write-protection fault here and changed the pmd |
527 | * to to more permissive. No need to flush the TLB for that, |
528 | * #PF is architecturally guaranteed to do that and in the |
529 | * worst-case we'll generate a spurious fault. |
530 | */ |
531 | } |
532 | |
533 | return changed; |
534 | } |
535 | |
536 | int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
537 | pud_t *pudp, pud_t entry, int dirty) |
538 | { |
539 | int changed = !pud_same(pud_a: *pudp, pud_b: entry); |
540 | |
541 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); |
542 | |
543 | if (changed && dirty) { |
544 | set_pud(pudp, pud: entry); |
545 | /* |
546 | * We had a write-protection fault here and changed the pud |
547 | * to to more permissive. No need to flush the TLB for that, |
548 | * #PF is architecturally guaranteed to do that and in the |
549 | * worst-case we'll generate a spurious fault. |
550 | */ |
551 | } |
552 | |
553 | return changed; |
554 | } |
555 | #endif |
556 | |
557 | int ptep_test_and_clear_young(struct vm_area_struct *vma, |
558 | unsigned long addr, pte_t *ptep) |
559 | { |
560 | int ret = 0; |
561 | |
562 | if (pte_young(pte: *ptep)) |
563 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
564 | addr: (unsigned long *) &ptep->pte); |
565 | |
566 | return ret; |
567 | } |
568 | |
569 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) |
570 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
571 | unsigned long addr, pmd_t *pmdp) |
572 | { |
573 | int ret = 0; |
574 | |
575 | if (pmd_young(pmd: *pmdp)) |
576 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
577 | addr: (unsigned long *)pmdp); |
578 | |
579 | return ret; |
580 | } |
581 | #endif |
582 | |
583 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
584 | int pudp_test_and_clear_young(struct vm_area_struct *vma, |
585 | unsigned long addr, pud_t *pudp) |
586 | { |
587 | int ret = 0; |
588 | |
589 | if (pud_young(pud: *pudp)) |
590 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, |
591 | addr: (unsigned long *)pudp); |
592 | |
593 | return ret; |
594 | } |
595 | #endif |
596 | |
597 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
598 | unsigned long address, pte_t *ptep) |
599 | { |
600 | /* |
601 | * On x86 CPUs, clearing the accessed bit without a TLB flush |
602 | * doesn't cause data corruption. [ It could cause incorrect |
603 | * page aging and the (mistaken) reclaim of hot pages, but the |
604 | * chance of that should be relatively low. ] |
605 | * |
606 | * So as a performance optimization don't flush the TLB when |
607 | * clearing the accessed bit, it will eventually be flushed by |
608 | * a context switch or a VM operation anyway. [ In the rare |
609 | * event of it not getting flushed for a long time the delay |
610 | * shouldn't really matter because there's no real memory |
611 | * pressure for swapout to react to. ] |
612 | */ |
613 | return ptep_test_and_clear_young(vma, addr: address, ptep); |
614 | } |
615 | |
616 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
617 | int pmdp_clear_flush_young(struct vm_area_struct *vma, |
618 | unsigned long address, pmd_t *pmdp) |
619 | { |
620 | int young; |
621 | |
622 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
623 | |
624 | young = pmdp_test_and_clear_young(vma, addr: address, pmdp); |
625 | if (young) |
626 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
627 | |
628 | return young; |
629 | } |
630 | |
631 | pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, |
632 | pmd_t *pmdp) |
633 | { |
634 | /* |
635 | * No flush is necessary. Once an invalid PTE is established, the PTE's |
636 | * access and dirty bits cannot be updated. |
637 | */ |
638 | return pmdp_establish(vma, address, pmdp, pmd: pmd_mkinvalid(pmd: *pmdp)); |
639 | } |
640 | #endif |
641 | |
642 | /** |
643 | * reserve_top_address - reserves a hole in the top of kernel address space |
644 | * @reserve - size of hole to reserve |
645 | * |
646 | * Can be used to relocate the fixmap area and poke a hole in the top |
647 | * of kernel address space to make room for a hypervisor. |
648 | */ |
649 | void __init reserve_top_address(unsigned long reserve) |
650 | { |
651 | #ifdef CONFIG_X86_32 |
652 | BUG_ON(fixmaps_set > 0); |
653 | __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; |
654 | printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n" , |
655 | -reserve, __FIXADDR_TOP + PAGE_SIZE); |
656 | #endif |
657 | } |
658 | |
659 | int fixmaps_set; |
660 | |
661 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) |
662 | { |
663 | unsigned long address = __fix_to_virt(idx); |
664 | |
665 | #ifdef CONFIG_X86_64 |
666 | /* |
667 | * Ensure that the static initial page tables are covering the |
668 | * fixmap completely. |
669 | */ |
670 | BUILD_BUG_ON(__end_of_permanent_fixed_addresses > |
671 | (FIXMAP_PMD_NUM * PTRS_PER_PTE)); |
672 | #endif |
673 | |
674 | if (idx >= __end_of_fixed_addresses) { |
675 | BUG(); |
676 | return; |
677 | } |
678 | set_pte_vaddr(vaddr: address, pte); |
679 | fixmaps_set++; |
680 | } |
681 | |
682 | void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, |
683 | phys_addr_t phys, pgprot_t flags) |
684 | { |
685 | /* Sanitize 'prot' against any unsupported bits: */ |
686 | pgprot_val(flags) &= __default_kernel_pte_mask; |
687 | |
688 | __native_set_fixmap(idx, pte: pfn_pte(page_nr: phys >> PAGE_SHIFT, pgprot: flags)); |
689 | } |
690 | |
691 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
692 | #ifdef CONFIG_X86_5LEVEL |
693 | /** |
694 | * p4d_set_huge - setup kernel P4D mapping |
695 | * |
696 | * No 512GB pages yet -- always return 0 |
697 | */ |
698 | int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) |
699 | { |
700 | return 0; |
701 | } |
702 | |
703 | /** |
704 | * p4d_clear_huge - clear kernel P4D mapping when it is set |
705 | * |
706 | * No 512GB pages yet -- always return 0 |
707 | */ |
708 | void p4d_clear_huge(p4d_t *p4d) |
709 | { |
710 | } |
711 | #endif |
712 | |
713 | /** |
714 | * pud_set_huge - setup kernel PUD mapping |
715 | * |
716 | * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this |
717 | * function sets up a huge page only if the complete range has the same MTRR |
718 | * caching mode. |
719 | * |
720 | * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger |
721 | * page mapping attempt fails. |
722 | * |
723 | * Returns 1 on success and 0 on failure. |
724 | */ |
725 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) |
726 | { |
727 | u8 uniform; |
728 | |
729 | mtrr_type_lookup(addr, end: addr + PUD_SIZE, uniform: &uniform); |
730 | if (!uniform) |
731 | return 0; |
732 | |
733 | /* Bail out if we are we on a populated non-leaf entry: */ |
734 | if (pud_present(pud: *pud) && !pud_huge(pud: *pud)) |
735 | return 0; |
736 | |
737 | set_pte(ptep: (pte_t *)pud, pte: pfn_pte( |
738 | page_nr: (u64)addr >> PAGE_SHIFT, |
739 | __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE))); |
740 | |
741 | return 1; |
742 | } |
743 | |
744 | /** |
745 | * pmd_set_huge - setup kernel PMD mapping |
746 | * |
747 | * See text over pud_set_huge() above. |
748 | * |
749 | * Returns 1 on success and 0 on failure. |
750 | */ |
751 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) |
752 | { |
753 | u8 uniform; |
754 | |
755 | mtrr_type_lookup(addr, end: addr + PMD_SIZE, uniform: &uniform); |
756 | if (!uniform) { |
757 | pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n" , |
758 | __func__, addr, addr + PMD_SIZE); |
759 | return 0; |
760 | } |
761 | |
762 | /* Bail out if we are we on a populated non-leaf entry: */ |
763 | if (pmd_present(pmd: *pmd) && !pmd_huge(pmd: *pmd)) |
764 | return 0; |
765 | |
766 | set_pte(ptep: (pte_t *)pmd, pte: pfn_pte( |
767 | page_nr: (u64)addr >> PAGE_SHIFT, |
768 | __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE))); |
769 | |
770 | return 1; |
771 | } |
772 | |
773 | /** |
774 | * pud_clear_huge - clear kernel PUD mapping when it is set |
775 | * |
776 | * Returns 1 on success and 0 on failure (no PUD map is found). |
777 | */ |
778 | int pud_clear_huge(pud_t *pud) |
779 | { |
780 | if (pud_large(pud: *pud)) { |
781 | pud_clear(pudp: pud); |
782 | return 1; |
783 | } |
784 | |
785 | return 0; |
786 | } |
787 | |
788 | /** |
789 | * pmd_clear_huge - clear kernel PMD mapping when it is set |
790 | * |
791 | * Returns 1 on success and 0 on failure (no PMD map is found). |
792 | */ |
793 | int pmd_clear_huge(pmd_t *pmd) |
794 | { |
795 | if (pmd_large(pte: *pmd)) { |
796 | pmd_clear(pmdp: pmd); |
797 | return 1; |
798 | } |
799 | |
800 | return 0; |
801 | } |
802 | |
803 | #ifdef CONFIG_X86_64 |
804 | /** |
805 | * pud_free_pmd_page - Clear pud entry and free pmd page. |
806 | * @pud: Pointer to a PUD. |
807 | * @addr: Virtual address associated with pud. |
808 | * |
809 | * Context: The pud range has been unmapped and TLB purged. |
810 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
811 | * |
812 | * NOTE: Callers must allow a single page allocation. |
813 | */ |
814 | int pud_free_pmd_page(pud_t *pud, unsigned long addr) |
815 | { |
816 | pmd_t *pmd, *pmd_sv; |
817 | pte_t *pte; |
818 | int i; |
819 | |
820 | pmd = pud_pgtable(pud: *pud); |
821 | pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); |
822 | if (!pmd_sv) |
823 | return 0; |
824 | |
825 | for (i = 0; i < PTRS_PER_PMD; i++) { |
826 | pmd_sv[i] = pmd[i]; |
827 | if (!pmd_none(pmd: pmd[i])) |
828 | pmd_clear(pmdp: &pmd[i]); |
829 | } |
830 | |
831 | pud_clear(pudp: pud); |
832 | |
833 | /* INVLPG to clear all paging-structure caches */ |
834 | flush_tlb_kernel_range(start: addr, end: addr + PAGE_SIZE-1); |
835 | |
836 | for (i = 0; i < PTRS_PER_PMD; i++) { |
837 | if (!pmd_none(pmd: pmd_sv[i])) { |
838 | pte = (pte_t *)pmd_page_vaddr(pmd: pmd_sv[i]); |
839 | free_page((unsigned long)pte); |
840 | } |
841 | } |
842 | |
843 | free_page((unsigned long)pmd_sv); |
844 | |
845 | pagetable_pmd_dtor(ptdesc: virt_to_ptdesc(x: pmd)); |
846 | free_page((unsigned long)pmd); |
847 | |
848 | return 1; |
849 | } |
850 | |
851 | /** |
852 | * pmd_free_pte_page - Clear pmd entry and free pte page. |
853 | * @pmd: Pointer to a PMD. |
854 | * @addr: Virtual address associated with pmd. |
855 | * |
856 | * Context: The pmd range has been unmapped and TLB purged. |
857 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
858 | */ |
859 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
860 | { |
861 | pte_t *pte; |
862 | |
863 | pte = (pte_t *)pmd_page_vaddr(pmd: *pmd); |
864 | pmd_clear(pmdp: pmd); |
865 | |
866 | /* INVLPG to clear all paging-structure caches */ |
867 | flush_tlb_kernel_range(start: addr, end: addr + PAGE_SIZE-1); |
868 | |
869 | free_page((unsigned long)pte); |
870 | |
871 | return 1; |
872 | } |
873 | |
874 | #else /* !CONFIG_X86_64 */ |
875 | |
876 | /* |
877 | * Disable free page handling on x86-PAE. This assures that ioremap() |
878 | * does not update sync'd pmd entries. See vmalloc_sync_one(). |
879 | */ |
880 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
881 | { |
882 | return pmd_none(*pmd); |
883 | } |
884 | |
885 | #endif /* CONFIG_X86_64 */ |
886 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
887 | |
888 | pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) |
889 | { |
890 | if (vma->vm_flags & VM_SHADOW_STACK) |
891 | return pte_mkwrite_shstk(pte); |
892 | |
893 | pte = pte_mkwrite_novma(pte); |
894 | |
895 | return pte_clear_saveddirty(pte); |
896 | } |
897 | |
898 | pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) |
899 | { |
900 | if (vma->vm_flags & VM_SHADOW_STACK) |
901 | return pmd_mkwrite_shstk(pmd); |
902 | |
903 | pmd = pmd_mkwrite_novma(pmd); |
904 | |
905 | return pmd_clear_saveddirty(pmd); |
906 | } |
907 | |
908 | void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte) |
909 | { |
910 | /* |
911 | * Hardware before shadow stack can (rarely) set Dirty=1 |
912 | * on a Write=0 PTE. So the below condition |
913 | * only indicates a software bug when shadow stack is |
914 | * supported by the HW. This checking is covered in |
915 | * pte_shstk(). |
916 | */ |
917 | VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && |
918 | pte_shstk(pte)); |
919 | } |
920 | |
921 | void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd) |
922 | { |
923 | /* See note in arch_check_zapped_pte() */ |
924 | VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && |
925 | pmd_shstk(pmd)); |
926 | } |
927 | |