1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2023 ARM Ltd. |
4 | */ |
5 | |
6 | #include <linux/mm.h> |
7 | #include <linux/efi.h> |
8 | #include <linux/export.h> |
9 | #include <asm/tlbflush.h> |
10 | |
11 | static inline bool mm_is_user(struct mm_struct *mm) |
12 | { |
13 | /* |
14 | * Don't attempt to apply the contig bit to kernel mappings, because |
15 | * dynamically adding/removing the contig bit can cause page faults. |
16 | * These racing faults are ok for user space, since they get serialized |
17 | * on the PTL. But kernel mappings can't tolerate faults. |
18 | */ |
19 | if (unlikely(mm_is_efi(mm))) |
20 | return false; |
21 | return mm != &init_mm; |
22 | } |
23 | |
24 | static inline pte_t *contpte_align_down(pte_t *ptep) |
25 | { |
26 | return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); |
27 | } |
28 | |
29 | static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr, |
30 | pte_t *ptep, unsigned int nr) |
31 | { |
32 | /* |
33 | * Unfold any partially covered contpte block at the beginning and end |
34 | * of the range. |
35 | */ |
36 | |
37 | if (ptep != contpte_align_down(ptep) || nr < CONT_PTES) |
38 | contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); |
39 | |
40 | if (ptep + nr != contpte_align_down(ptep: ptep + nr)) { |
41 | unsigned long last_addr = addr + PAGE_SIZE * (nr - 1); |
42 | pte_t *last_ptep = ptep + nr - 1; |
43 | |
44 | contpte_try_unfold(mm, last_addr, last_ptep, |
45 | __ptep_get(last_ptep)); |
46 | } |
47 | } |
48 | |
49 | static void contpte_convert(struct mm_struct *mm, unsigned long addr, |
50 | pte_t *ptep, pte_t pte) |
51 | { |
52 | struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); |
53 | unsigned long start_addr; |
54 | pte_t *start_ptep; |
55 | int i; |
56 | |
57 | start_ptep = ptep = contpte_align_down(ptep); |
58 | start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); |
59 | pte = pfn_pte(ALIGN_DOWN(pte_pfn(pte), CONT_PTES), pte_pgprot(pte)); |
60 | |
61 | for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) { |
62 | pte_t ptent = __ptep_get_and_clear(mm, addr, ptep); |
63 | |
64 | if (pte_dirty(pte: ptent)) |
65 | pte = pte_mkdirty(pte); |
66 | |
67 | if (pte_young(pte: ptent)) |
68 | pte = pte_mkyoung(pte); |
69 | } |
70 | |
71 | __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3); |
72 | |
73 | __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES); |
74 | } |
75 | |
76 | void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, |
77 | pte_t *ptep, pte_t pte) |
78 | { |
79 | /* |
80 | * We have already checked that the virtual and pysical addresses are |
81 | * correctly aligned for a contpte mapping in contpte_try_fold() so the |
82 | * remaining checks are to ensure that the contpte range is fully |
83 | * covered by a single folio, and ensure that all the ptes are valid |
84 | * with contiguous PFNs and matching prots. We ignore the state of the |
85 | * access and dirty bits for the purpose of deciding if its a contiguous |
86 | * range; the folding process will generate a single contpte entry which |
87 | * has a single access and dirty bit. Those 2 bits are the logical OR of |
88 | * their respective bits in the constituent pte entries. In order to |
89 | * ensure the contpte range is covered by a single folio, we must |
90 | * recover the folio from the pfn, but special mappings don't have a |
91 | * folio backing them. Fortunately contpte_try_fold() already checked |
92 | * that the pte is not special - we never try to fold special mappings. |
93 | * Note we can't use vm_normal_page() for this since we don't have the |
94 | * vma. |
95 | */ |
96 | |
97 | unsigned long folio_start, folio_end; |
98 | unsigned long cont_start, cont_end; |
99 | pte_t expected_pte, subpte; |
100 | struct folio *folio; |
101 | struct page *page; |
102 | unsigned long pfn; |
103 | pte_t *orig_ptep; |
104 | pgprot_t prot; |
105 | |
106 | int i; |
107 | |
108 | if (!mm_is_user(mm)) |
109 | return; |
110 | |
111 | page = pte_page(pte); |
112 | folio = page_folio(page); |
113 | folio_start = addr - (page - &folio->page) * PAGE_SIZE; |
114 | folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE; |
115 | cont_start = ALIGN_DOWN(addr, CONT_PTE_SIZE); |
116 | cont_end = cont_start + CONT_PTE_SIZE; |
117 | |
118 | if (folio_start > cont_start || folio_end < cont_end) |
119 | return; |
120 | |
121 | pfn = ALIGN_DOWN(pte_pfn(pte), CONT_PTES); |
122 | prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); |
123 | expected_pte = pfn_pte(page_nr: pfn, pgprot: prot); |
124 | orig_ptep = ptep; |
125 | ptep = contpte_align_down(ptep); |
126 | |
127 | for (i = 0; i < CONT_PTES; i++) { |
128 | subpte = pte_mkold(pte: pte_mkclean(__ptep_get(ptep))); |
129 | if (!pte_same(a: subpte, b: expected_pte)) |
130 | return; |
131 | expected_pte = pte_advance_pfn(pte: expected_pte, nr: 1); |
132 | ptep++; |
133 | } |
134 | |
135 | pte = pte_mkcont(pte); |
136 | contpte_convert(mm, addr, ptep: orig_ptep, pte); |
137 | } |
138 | EXPORT_SYMBOL_GPL(__contpte_try_fold); |
139 | |
140 | void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, |
141 | pte_t *ptep, pte_t pte) |
142 | { |
143 | /* |
144 | * We have already checked that the ptes are contiguous in |
145 | * contpte_try_unfold(), so just check that the mm is user space. |
146 | */ |
147 | if (!mm_is_user(mm)) |
148 | return; |
149 | |
150 | pte = pte_mknoncont(pte); |
151 | contpte_convert(mm, addr, ptep, pte); |
152 | } |
153 | EXPORT_SYMBOL_GPL(__contpte_try_unfold); |
154 | |
155 | pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte) |
156 | { |
157 | /* |
158 | * Gather access/dirty bits, which may be populated in any of the ptes |
159 | * of the contig range. We are guaranteed to be holding the PTL, so any |
160 | * contiguous range cannot be unfolded or otherwise modified under our |
161 | * feet. |
162 | */ |
163 | |
164 | pte_t pte; |
165 | int i; |
166 | |
167 | ptep = contpte_align_down(ptep); |
168 | |
169 | for (i = 0; i < CONT_PTES; i++, ptep++) { |
170 | pte = __ptep_get(ptep); |
171 | |
172 | if (pte_dirty(pte)) |
173 | orig_pte = pte_mkdirty(pte: orig_pte); |
174 | |
175 | if (pte_young(pte)) |
176 | orig_pte = pte_mkyoung(pte: orig_pte); |
177 | } |
178 | |
179 | return orig_pte; |
180 | } |
181 | EXPORT_SYMBOL_GPL(contpte_ptep_get); |
182 | |
183 | pte_t contpte_ptep_get_lockless(pte_t *orig_ptep) |
184 | { |
185 | /* |
186 | * The ptep_get_lockless() API requires us to read and return *orig_ptep |
187 | * so that it is self-consistent, without the PTL held, so we may be |
188 | * racing with other threads modifying the pte. Usually a READ_ONCE() |
189 | * would suffice, but for the contpte case, we also need to gather the |
190 | * access and dirty bits from across all ptes in the contiguous block, |
191 | * and we can't read all of those neighbouring ptes atomically, so any |
192 | * contiguous range may be unfolded/modified/refolded under our feet. |
193 | * Therefore we ensure we read a _consistent_ contpte range by checking |
194 | * that all ptes in the range are valid and have CONT_PTE set, that all |
195 | * pfns are contiguous and that all pgprots are the same (ignoring |
196 | * access/dirty). If we find a pte that is not consistent, then we must |
197 | * be racing with an update so start again. If the target pte does not |
198 | * have CONT_PTE set then that is considered consistent on its own |
199 | * because it is not part of a contpte range. |
200 | */ |
201 | |
202 | pgprot_t orig_prot; |
203 | unsigned long pfn; |
204 | pte_t orig_pte; |
205 | pgprot_t prot; |
206 | pte_t *ptep; |
207 | pte_t pte; |
208 | int i; |
209 | |
210 | retry: |
211 | orig_pte = __ptep_get(orig_ptep); |
212 | |
213 | if (!pte_valid_cont(orig_pte)) |
214 | return orig_pte; |
215 | |
216 | orig_prot = pte_pgprot(pte_mkold(pte_mkclean(orig_pte))); |
217 | ptep = contpte_align_down(ptep: orig_ptep); |
218 | pfn = pte_pfn(pte: orig_pte) - (orig_ptep - ptep); |
219 | |
220 | for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) { |
221 | pte = __ptep_get(ptep); |
222 | prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); |
223 | |
224 | if (!pte_valid_cont(pte) || |
225 | pte_pfn(pte) != pfn || |
226 | pgprot_val(prot) != pgprot_val(orig_prot)) |
227 | goto retry; |
228 | |
229 | if (pte_dirty(pte)) |
230 | orig_pte = pte_mkdirty(pte: orig_pte); |
231 | |
232 | if (pte_young(pte)) |
233 | orig_pte = pte_mkyoung(pte: orig_pte); |
234 | } |
235 | |
236 | return orig_pte; |
237 | } |
238 | EXPORT_SYMBOL_GPL(contpte_ptep_get_lockless); |
239 | |
240 | void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, |
241 | pte_t *ptep, pte_t pte, unsigned int nr) |
242 | { |
243 | unsigned long next; |
244 | unsigned long end; |
245 | unsigned long pfn; |
246 | pgprot_t prot; |
247 | |
248 | /* |
249 | * The set_ptes() spec guarantees that when nr > 1, the initial state of |
250 | * all ptes is not-present. Therefore we never need to unfold or |
251 | * otherwise invalidate a range before we set the new ptes. |
252 | * contpte_set_ptes() should never be called for nr < 2. |
253 | */ |
254 | VM_WARN_ON(nr == 1); |
255 | |
256 | if (!mm_is_user(mm)) |
257 | return __set_ptes(mm, addr, ptep, pte, nr); |
258 | |
259 | end = addr + (nr << PAGE_SHIFT); |
260 | pfn = pte_pfn(pte); |
261 | prot = pte_pgprot(pte); |
262 | |
263 | do { |
264 | next = pte_cont_addr_end(addr, end); |
265 | nr = (next - addr) >> PAGE_SHIFT; |
266 | pte = pfn_pte(page_nr: pfn, pgprot: prot); |
267 | |
268 | if (((addr | next | (pfn << PAGE_SHIFT)) & ~CONT_PTE_MASK) == 0) |
269 | pte = pte_mkcont(pte); |
270 | else |
271 | pte = pte_mknoncont(pte); |
272 | |
273 | __set_ptes(mm, addr, ptep, pte, nr); |
274 | |
275 | addr = next; |
276 | ptep += nr; |
277 | pfn += nr; |
278 | |
279 | } while (addr != end); |
280 | } |
281 | EXPORT_SYMBOL_GPL(contpte_set_ptes); |
282 | |
283 | void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, |
284 | pte_t *ptep, unsigned int nr, int full) |
285 | { |
286 | contpte_try_unfold_partial(mm, addr, ptep, nr); |
287 | __clear_full_ptes(mm, addr, ptep, nr, full); |
288 | } |
289 | EXPORT_SYMBOL_GPL(contpte_clear_full_ptes); |
290 | |
291 | pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, |
292 | unsigned long addr, pte_t *ptep, |
293 | unsigned int nr, int full) |
294 | { |
295 | contpte_try_unfold_partial(mm, addr, ptep, nr); |
296 | return __get_and_clear_full_ptes(mm, addr, ptep, nr, full); |
297 | } |
298 | EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes); |
299 | |
300 | int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, |
301 | unsigned long addr, pte_t *ptep) |
302 | { |
303 | /* |
304 | * ptep_clear_flush_young() technically requires us to clear the access |
305 | * flag for a _single_ pte. However, the core-mm code actually tracks |
306 | * access/dirty per folio, not per page. And since we only create a |
307 | * contig range when the range is covered by a single folio, we can get |
308 | * away with clearing young for the whole contig range here, so we avoid |
309 | * having to unfold. |
310 | */ |
311 | |
312 | int young = 0; |
313 | int i; |
314 | |
315 | ptep = contpte_align_down(ptep); |
316 | addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); |
317 | |
318 | for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) |
319 | young |= __ptep_test_and_clear_young(vma, addr, ptep); |
320 | |
321 | return young; |
322 | } |
323 | EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young); |
324 | |
325 | int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, |
326 | unsigned long addr, pte_t *ptep) |
327 | { |
328 | int young; |
329 | |
330 | young = contpte_ptep_test_and_clear_young(vma, addr, ptep); |
331 | |
332 | if (young) { |
333 | /* |
334 | * See comment in __ptep_clear_flush_young(); same rationale for |
335 | * eliding the trailing DSB applies here. |
336 | */ |
337 | addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); |
338 | __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE, |
339 | PAGE_SIZE, true, 3); |
340 | } |
341 | |
342 | return young; |
343 | } |
344 | EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young); |
345 | |
346 | void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, |
347 | pte_t *ptep, unsigned int nr) |
348 | { |
349 | /* |
350 | * If wrprotecting an entire contig range, we can avoid unfolding. Just |
351 | * set wrprotect and wait for the later mmu_gather flush to invalidate |
352 | * the tlb. Until the flush, the page may or may not be wrprotected. |
353 | * After the flush, it is guaranteed wrprotected. If it's a partial |
354 | * range though, we must unfold, because we can't have a case where |
355 | * CONT_PTE is set but wrprotect applies to a subset of the PTEs; this |
356 | * would cause it to continue to be unpredictable after the flush. |
357 | */ |
358 | |
359 | contpte_try_unfold_partial(mm, addr, ptep, nr); |
360 | __wrprotect_ptes(mm, addr, ptep, nr); |
361 | } |
362 | EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes); |
363 | |
364 | int contpte_ptep_set_access_flags(struct vm_area_struct *vma, |
365 | unsigned long addr, pte_t *ptep, |
366 | pte_t entry, int dirty) |
367 | { |
368 | unsigned long start_addr; |
369 | pte_t orig_pte; |
370 | int i; |
371 | |
372 | /* |
373 | * Gather the access/dirty bits for the contiguous range. If nothing has |
374 | * changed, its a noop. |
375 | */ |
376 | orig_pte = pte_mknoncont(ptep_get(ptep)); |
377 | if (pte_val(pte: orig_pte) == pte_val(pte: entry)) |
378 | return 0; |
379 | |
380 | /* |
381 | * We can fix up access/dirty bits without having to unfold the contig |
382 | * range. But if the write bit is changing, we must unfold. |
383 | */ |
384 | if (pte_write(pte: orig_pte) == pte_write(pte: entry)) { |
385 | /* |
386 | * For HW access management, we technically only need to update |
387 | * the flag on a single pte in the range. But for SW access |
388 | * management, we need to update all the ptes to prevent extra |
389 | * faults. Avoid per-page tlb flush in __ptep_set_access_flags() |
390 | * and instead flush the whole range at the end. |
391 | */ |
392 | ptep = contpte_align_down(ptep); |
393 | start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); |
394 | |
395 | for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) |
396 | __ptep_set_access_flags(vma, addr, ptep, entry, 0); |
397 | |
398 | if (dirty) |
399 | __flush_tlb_range(vma, start_addr, addr, |
400 | PAGE_SIZE, true, 3); |
401 | } else { |
402 | __contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte); |
403 | __ptep_set_access_flags(vma, addr, ptep, entry, dirty); |
404 | } |
405 | |
406 | return 1; |
407 | } |
408 | EXPORT_SYMBOL_GPL(contpte_ptep_set_access_flags); |
409 | |