1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | #include <asm/pgalloc.h> |
4 | #include <linux/gfp.h> |
5 | #include <linux/kernel.h> |
6 | #include <linux/pgtable.h> |
7 | |
8 | int ptep_set_access_flags(struct vm_area_struct *vma, |
9 | unsigned long address, pte_t *ptep, |
10 | pte_t entry, int dirty) |
11 | { |
12 | if (!pte_same(a: ptep_get(ptep), b: entry)) |
13 | __set_pte_at(vma->vm_mm, ptep, entry); |
14 | /* |
15 | * update_mmu_cache will unconditionally execute, handling both |
16 | * the case that the PTE changed and the spurious fault case. |
17 | */ |
18 | return true; |
19 | } |
20 | |
21 | int ptep_test_and_clear_young(struct vm_area_struct *vma, |
22 | unsigned long address, |
23 | pte_t *ptep) |
24 | { |
25 | if (!pte_young(pte: ptep_get(ptep))) |
26 | return 0; |
27 | return test_and_clear_bit(nr: _PAGE_ACCESSED_OFFSET, &pte_val(pte: *ptep)addr: ); |
28 | } |
29 | EXPORT_SYMBOL_GPL(ptep_test_and_clear_young); |
30 | |
31 | #ifdef CONFIG_64BIT |
32 | pud_t *pud_offset(p4d_t *p4d, unsigned long address) |
33 | { |
34 | if (pgtable_l4_enabled) |
35 | return p4d_pgtable(p4d: p4dp_get(p4dp: p4d)) + pud_index(address); |
36 | |
37 | return (pud_t *)p4d; |
38 | } |
39 | |
40 | p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) |
41 | { |
42 | if (pgtable_l5_enabled) |
43 | return pgd_pgtable(pgdp_get(pgdp: pgd)) + p4d_index(address); |
44 | |
45 | return (p4d_t *)pgd; |
46 | } |
47 | #endif |
48 | |
49 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
50 | int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) |
51 | { |
52 | return 0; |
53 | } |
54 | |
55 | void p4d_clear_huge(p4d_t *p4d) |
56 | { |
57 | } |
58 | |
59 | int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) |
60 | { |
61 | pud_t new_pud = pfn_pud(__phys_to_pfn(phys), pgprot: prot); |
62 | |
63 | set_pud(pudp: pud, pud: new_pud); |
64 | return 1; |
65 | } |
66 | |
67 | int pud_clear_huge(pud_t *pud) |
68 | { |
69 | if (!pud_leaf(pud: pudp_get(pudp: pud))) |
70 | return 0; |
71 | pud_clear(pudp: pud); |
72 | return 1; |
73 | } |
74 | |
75 | int pud_free_pmd_page(pud_t *pud, unsigned long addr) |
76 | { |
77 | pmd_t *pmd = pud_pgtable(pud: pudp_get(pudp: pud)); |
78 | int i; |
79 | |
80 | pud_clear(pudp: pud); |
81 | |
82 | flush_tlb_kernel_range(start: addr, end: addr + PUD_SIZE); |
83 | |
84 | for (i = 0; i < PTRS_PER_PMD; i++) { |
85 | if (!pmd_none(pmd: pmd[i])) { |
86 | pte_t *pte = (pte_t *)pmd_page_vaddr(pmd: pmd[i]); |
87 | |
88 | pte_free_kernel(NULL, pte); |
89 | } |
90 | } |
91 | |
92 | pmd_free(NULL, pmd); |
93 | |
94 | return 1; |
95 | } |
96 | |
97 | int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) |
98 | { |
99 | pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), pgprot: prot); |
100 | |
101 | set_pmd(pmdp: pmd, pmd: new_pmd); |
102 | return 1; |
103 | } |
104 | |
105 | int pmd_clear_huge(pmd_t *pmd) |
106 | { |
107 | if (!pmd_leaf(pte: pmdp_get(pmdp: pmd))) |
108 | return 0; |
109 | pmd_clear(pmdp: pmd); |
110 | return 1; |
111 | } |
112 | |
113 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
114 | { |
115 | pte_t *pte = (pte_t *)pmd_page_vaddr(pmd: pmdp_get(pmdp: pmd)); |
116 | |
117 | pmd_clear(pmdp: pmd); |
118 | |
119 | flush_tlb_kernel_range(start: addr, end: addr + PMD_SIZE); |
120 | pte_free_kernel(NULL, pte); |
121 | return 1; |
122 | } |
123 | |
124 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
125 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
126 | pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
127 | unsigned long address, pmd_t *pmdp) |
128 | { |
129 | pmd_t pmd = pmdp_huge_get_and_clear(mm: vma->vm_mm, addr: address, pmdp); |
130 | |
131 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
132 | VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp))); |
133 | /* |
134 | * When leaf PTE entries (regular pages) are collapsed into a leaf |
135 | * PMD entry (huge page), a valid non-leaf PTE is converted into a |
136 | * valid leaf PTE at the level 1 page table. Since the sfence.vma |
137 | * forms that specify an address only apply to leaf PTEs, we need a |
138 | * global flush here. collapse_huge_page() assumes these flushes are |
139 | * eager, so just do the fence here. |
140 | */ |
141 | flush_tlb_mm(vma->vm_mm); |
142 | return pmd; |
143 | } |
144 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
145 | |