1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
4 | */ |
5 | |
6 | #include <linux/sched.h> |
7 | #include <linux/mm_types.h> |
8 | #include <linux/memblock.h> |
9 | #include <linux/memremap.h> |
10 | #include <linux/pkeys.h> |
11 | #include <linux/debugfs.h> |
12 | #include <linux/proc_fs.h> |
13 | #include <misc/cxl-base.h> |
14 | |
15 | #include <asm/pgalloc.h> |
16 | #include <asm/tlb.h> |
17 | #include <asm/trace.h> |
18 | #include <asm/powernv.h> |
19 | #include <asm/firmware.h> |
20 | #include <asm/ultravisor.h> |
21 | #include <asm/kexec.h> |
22 | |
23 | #include <mm/mmu_decl.h> |
24 | #include <trace/events/thp.h> |
25 | |
26 | #include "internal.h" |
27 | |
28 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; |
29 | EXPORT_SYMBOL_GPL(mmu_psize_defs); |
30 | |
31 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
32 | int mmu_vmemmap_psize = MMU_PAGE_4K; |
33 | #endif |
34 | |
35 | unsigned long __pmd_frag_nr; |
36 | EXPORT_SYMBOL(__pmd_frag_nr); |
37 | unsigned long __pmd_frag_size_shift; |
38 | EXPORT_SYMBOL(__pmd_frag_size_shift); |
39 | |
40 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
41 | /* |
42 | * This is called when relaxing access to a hugepage. It's also called in the page |
43 | * fault path when we don't hit any of the major fault cases, ie, a minor |
44 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
45 | * handled those two for us, we additionally deal with missing execute |
46 | * permission here on some processors |
47 | */ |
48 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
49 | pmd_t *pmdp, pmd_t entry, int dirty) |
50 | { |
51 | int changed; |
52 | #ifdef CONFIG_DEBUG_VM |
53 | WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
54 | assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); |
55 | #endif |
56 | changed = !pmd_same(pmd_a: *(pmdp), pmd_b: entry); |
57 | if (changed) { |
58 | /* |
59 | * We can use MMU_PAGE_2M here, because only radix |
60 | * path look at the psize. |
61 | */ |
62 | __ptep_set_access_flags(vma, pmdp_ptep(pmdp), |
63 | pmd_pte(entry), address, MMU_PAGE_2M); |
64 | } |
65 | return changed; |
66 | } |
67 | |
68 | int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
69 | pud_t *pudp, pud_t entry, int dirty) |
70 | { |
71 | int changed; |
72 | #ifdef CONFIG_DEBUG_VM |
73 | WARN_ON(!pud_devmap(*pudp)); |
74 | assert_spin_locked(pud_lockptr(vma->vm_mm, pudp)); |
75 | #endif |
76 | changed = !pud_same(pud_a: *(pudp), pud_b: entry); |
77 | if (changed) { |
78 | /* |
79 | * We can use MMU_PAGE_1G here, because only radix |
80 | * path look at the psize. |
81 | */ |
82 | __ptep_set_access_flags(vma, pudp_ptep(pudp), |
83 | pud_pte(entry), address, MMU_PAGE_1G); |
84 | } |
85 | return changed; |
86 | } |
87 | |
88 | |
89 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
90 | unsigned long address, pmd_t *pmdp) |
91 | { |
92 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); |
93 | } |
94 | |
95 | int pudp_test_and_clear_young(struct vm_area_struct *vma, |
96 | unsigned long address, pud_t *pudp) |
97 | { |
98 | return __pudp_test_and_clear_young(vma->vm_mm, address, pudp); |
99 | } |
100 | |
101 | /* |
102 | * set a new huge pmd. We should not be called for updating |
103 | * an existing pmd entry. That should go via pmd_hugepage_update. |
104 | */ |
105 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
106 | pmd_t *pmdp, pmd_t pmd) |
107 | { |
108 | #ifdef CONFIG_DEBUG_VM |
109 | /* |
110 | * Make sure hardware valid bit is not set. We don't do |
111 | * tlb flush for this update. |
112 | */ |
113 | |
114 | WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); |
115 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
116 | WARN_ON(!(pmd_leaf(pmd))); |
117 | #endif |
118 | trace_hugepage_set_pmd(addr, pmd: pmd_val(pmd)); |
119 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); |
120 | } |
121 | |
122 | void set_pud_at(struct mm_struct *mm, unsigned long addr, |
123 | pud_t *pudp, pud_t pud) |
124 | { |
125 | #ifdef CONFIG_DEBUG_VM |
126 | /* |
127 | * Make sure hardware valid bit is not set. We don't do |
128 | * tlb flush for this update. |
129 | */ |
130 | |
131 | WARN_ON(pte_hw_valid(pud_pte(*pudp))); |
132 | assert_spin_locked(pud_lockptr(mm, pudp)); |
133 | WARN_ON(!(pud_leaf(pud))); |
134 | #endif |
135 | trace_hugepage_set_pud(addr, pud: pud_val(pud)); |
136 | return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud)); |
137 | } |
138 | |
139 | static void do_serialize(void *arg) |
140 | { |
141 | /* We've taken the IPI, so try to trim the mask while here */ |
142 | if (radix_enabled()) { |
143 | struct mm_struct *mm = arg; |
144 | exit_lazy_flush_tlb(mm, always_flush: false); |
145 | } |
146 | } |
147 | |
148 | /* |
149 | * Serialize against __find_linux_pte() which does lock-less |
150 | * lookup in page tables with local interrupts disabled. For huge pages |
151 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
152 | * pmd_t we want to prevent transit from pmd pointing to page table |
153 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
154 | * We clear pmd to possibly replace it with page table pointer in |
155 | * different code paths. So make sure we wait for the parallel |
156 | * __find_linux_pte() to finish. |
157 | */ |
158 | void serialize_against_pte_lookup(struct mm_struct *mm) |
159 | { |
160 | smp_mb(); |
161 | smp_call_function_many(mask: mm_cpumask(mm), func: do_serialize, info: mm, wait: 1); |
162 | } |
163 | |
164 | /* |
165 | * We use this to invalidate a pmdp entry before switching from a |
166 | * hugepte to regular pmd entry. |
167 | */ |
168 | pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
169 | pmd_t *pmdp) |
170 | { |
171 | unsigned long old_pmd; |
172 | |
173 | old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); |
174 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
175 | return __pmd(val: old_pmd); |
176 | } |
177 | |
178 | pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, |
179 | unsigned long addr, pmd_t *pmdp, int full) |
180 | { |
181 | pmd_t pmd; |
182 | VM_BUG_ON(addr & ~HPAGE_PMD_MASK); |
183 | VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && |
184 | !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); |
185 | pmd = pmdp_huge_get_and_clear(mm: vma->vm_mm, addr, pmdp); |
186 | /* |
187 | * if it not a fullmm flush, then we can possibly end up converting |
188 | * this PMD pte entry to a regular level 0 PTE by a parallel page fault. |
189 | * Make sure we flush the tlb in this case. |
190 | */ |
191 | if (!full) |
192 | flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); |
193 | return pmd; |
194 | } |
195 | |
196 | pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, |
197 | unsigned long addr, pud_t *pudp, int full) |
198 | { |
199 | pud_t pud; |
200 | |
201 | VM_BUG_ON(addr & ~HPAGE_PMD_MASK); |
202 | VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) || |
203 | !pud_present(*pudp)); |
204 | pud = pudp_huge_get_and_clear(mm: vma->vm_mm, addr, pudp); |
205 | /* |
206 | * if it not a fullmm flush, then we can possibly end up converting |
207 | * this PMD pte entry to a regular level 0 PTE by a parallel page fault. |
208 | * Make sure we flush the tlb in this case. |
209 | */ |
210 | if (!full) |
211 | flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE); |
212 | return pud; |
213 | } |
214 | |
215 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) |
216 | { |
217 | return __pmd(val: pmd_val(pmd) | pgprot_val(pgprot)); |
218 | } |
219 | |
220 | static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot) |
221 | { |
222 | return __pud(val: pud_val(pud) | pgprot_val(pgprot)); |
223 | } |
224 | |
225 | /* |
226 | * At some point we should be able to get rid of |
227 | * pmd_mkhuge() and mk_huge_pmd() when we update all the |
228 | * other archs to mark the pmd huge in pfn_pmd() |
229 | */ |
230 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
231 | { |
232 | unsigned long pmdv; |
233 | |
234 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; |
235 | |
236 | return __pmd_mkhuge(pmd_set_protbits(pmd: __pmd(val: pmdv), pgprot)); |
237 | } |
238 | |
239 | pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot) |
240 | { |
241 | unsigned long pudv; |
242 | |
243 | pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; |
244 | |
245 | return __pud_mkhuge(pud_set_protbits(pud: __pud(val: pudv), pgprot)); |
246 | } |
247 | |
248 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) |
249 | { |
250 | return pfn_pmd(page_to_pfn(page), pgprot); |
251 | } |
252 | |
253 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
254 | { |
255 | unsigned long pmdv; |
256 | |
257 | pmdv = pmd_val(pmd); |
258 | pmdv &= _HPAGE_CHG_MASK; |
259 | return pmd_set_protbits(pmd: __pmd(val: pmdv), pgprot: newprot); |
260 | } |
261 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
262 | |
263 | /* For use by kexec, called with MMU off */ |
264 | notrace void mmu_cleanup_all(void) |
265 | { |
266 | if (radix_enabled()) |
267 | radix__mmu_cleanup_all(); |
268 | else if (mmu_hash_ops.hpte_clear_all) |
269 | mmu_hash_ops.hpte_clear_all(); |
270 | |
271 | reset_sprs(); |
272 | } |
273 | |
274 | #ifdef CONFIG_MEMORY_HOTPLUG |
275 | int __meminit create_section_mapping(unsigned long start, unsigned long end, |
276 | int nid, pgprot_t prot) |
277 | { |
278 | if (radix_enabled()) |
279 | return radix__create_section_mapping(start, end, nid, prot); |
280 | |
281 | return hash__create_section_mapping(start, end, nid, prot); |
282 | } |
283 | |
284 | int __meminit remove_section_mapping(unsigned long start, unsigned long end) |
285 | { |
286 | if (radix_enabled()) |
287 | return radix__remove_section_mapping(start, end); |
288 | |
289 | return hash__remove_section_mapping(start, end); |
290 | } |
291 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
292 | |
293 | void __init mmu_partition_table_init(void) |
294 | { |
295 | unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; |
296 | unsigned long ptcr; |
297 | |
298 | /* Initialize the Partition Table with no entries */ |
299 | partition_tb = memblock_alloc(patb_size, patb_size); |
300 | if (!partition_tb) |
301 | panic(fmt: "%s: Failed to allocate %lu bytes align=0x%lx\n" , |
302 | __func__, patb_size, patb_size); |
303 | |
304 | ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); |
305 | set_ptcr_when_no_uv(ptcr); |
306 | powernv_set_nmmu_ptcr(ptcr); |
307 | } |
308 | |
309 | static void flush_partition(unsigned int lpid, bool radix) |
310 | { |
311 | if (radix) { |
312 | radix__flush_all_lpid(lpid); |
313 | radix__flush_all_lpid_guest(lpid); |
314 | } else { |
315 | asm volatile("ptesync" : : : "memory" ); |
316 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : |
317 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); |
318 | /* do we need fixup here ?*/ |
319 | asm volatile("eieio; tlbsync; ptesync" : : : "memory" ); |
320 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); |
321 | } |
322 | } |
323 | |
324 | void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, |
325 | unsigned long dw1, bool flush) |
326 | { |
327 | unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); |
328 | |
329 | /* |
330 | * When ultravisor is enabled, the partition table is stored in secure |
331 | * memory and can only be accessed doing an ultravisor call. However, we |
332 | * maintain a copy of the partition table in normal memory to allow Nest |
333 | * MMU translations to occur (for normal VMs). |
334 | * |
335 | * Therefore, here we always update partition_tb, regardless of whether |
336 | * we are running under an ultravisor or not. |
337 | */ |
338 | partition_tb[lpid].patb0 = cpu_to_be64(dw0); |
339 | partition_tb[lpid].patb1 = cpu_to_be64(dw1); |
340 | |
341 | /* |
342 | * If ultravisor is enabled, we do an ultravisor call to register the |
343 | * partition table entry (PATE), which also do a global flush of TLBs |
344 | * and partition table caches for the lpid. Otherwise, just do the |
345 | * flush. The type of flush (hash or radix) depends on what the previous |
346 | * use of the partition ID was, not the new use. |
347 | */ |
348 | if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) { |
349 | uv_register_pate(lpid, dw0, dw1); |
350 | pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n" , |
351 | dw0, dw1); |
352 | } else if (flush) { |
353 | /* |
354 | * Boot does not need to flush, because MMU is off and each |
355 | * CPU does a tlbiel_all() before switching them on, which |
356 | * flushes everything. |
357 | */ |
358 | flush_partition(lpid, (old & PATB_HR)); |
359 | } |
360 | } |
361 | EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); |
362 | |
363 | static pmd_t *get_pmd_from_cache(struct mm_struct *mm) |
364 | { |
365 | void *pmd_frag, *ret; |
366 | |
367 | if (PMD_FRAG_NR == 1) |
368 | return NULL; |
369 | |
370 | spin_lock(lock: &mm->page_table_lock); |
371 | ret = mm->context.pmd_frag; |
372 | if (ret) { |
373 | pmd_frag = ret + PMD_FRAG_SIZE; |
374 | /* |
375 | * If we have taken up all the fragments mark PTE page NULL |
376 | */ |
377 | if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0) |
378 | pmd_frag = NULL; |
379 | mm->context.pmd_frag = pmd_frag; |
380 | } |
381 | spin_unlock(lock: &mm->page_table_lock); |
382 | return (pmd_t *)ret; |
383 | } |
384 | |
385 | static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) |
386 | { |
387 | void *ret = NULL; |
388 | struct ptdesc *ptdesc; |
389 | gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; |
390 | |
391 | if (mm == &init_mm) |
392 | gfp &= ~__GFP_ACCOUNT; |
393 | ptdesc = pagetable_alloc(gfp, order: 0); |
394 | if (!ptdesc) |
395 | return NULL; |
396 | if (!pagetable_pmd_ctor(ptdesc)) { |
397 | pagetable_free(pt: ptdesc); |
398 | return NULL; |
399 | } |
400 | |
401 | atomic_set(v: &ptdesc->pt_frag_refcount, i: 1); |
402 | |
403 | ret = ptdesc_address(pt: ptdesc); |
404 | /* |
405 | * if we support only one fragment just return the |
406 | * allocated page. |
407 | */ |
408 | if (PMD_FRAG_NR == 1) |
409 | return ret; |
410 | |
411 | spin_lock(lock: &mm->page_table_lock); |
412 | /* |
413 | * If we find ptdesc_page set, we return |
414 | * the allocated page with single fragment |
415 | * count. |
416 | */ |
417 | if (likely(!mm->context.pmd_frag)) { |
418 | atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR); |
419 | mm->context.pmd_frag = ret + PMD_FRAG_SIZE; |
420 | } |
421 | spin_unlock(lock: &mm->page_table_lock); |
422 | |
423 | return (pmd_t *)ret; |
424 | } |
425 | |
426 | pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr) |
427 | { |
428 | pmd_t *pmd; |
429 | |
430 | pmd = get_pmd_from_cache(mm); |
431 | if (pmd) |
432 | return pmd; |
433 | |
434 | return __alloc_for_pmdcache(mm); |
435 | } |
436 | |
437 | void pmd_fragment_free(unsigned long *pmd) |
438 | { |
439 | struct ptdesc *ptdesc = virt_to_ptdesc(x: pmd); |
440 | |
441 | if (pagetable_is_reserved(pt: ptdesc)) |
442 | return free_reserved_ptdesc(pt: ptdesc); |
443 | |
444 | BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); |
445 | if (atomic_dec_and_test(v: &ptdesc->pt_frag_refcount)) { |
446 | pagetable_pmd_dtor(ptdesc); |
447 | pagetable_free(pt: ptdesc); |
448 | } |
449 | } |
450 | |
451 | static inline void pgtable_free(void *table, int index) |
452 | { |
453 | switch (index) { |
454 | case PTE_INDEX: |
455 | pte_fragment_free(table, 0); |
456 | break; |
457 | case PMD_INDEX: |
458 | pmd_fragment_free(pmd: table); |
459 | break; |
460 | case PUD_INDEX: |
461 | __pud_free(table); |
462 | break; |
463 | #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) |
464 | /* 16M hugepd directory at pud level */ |
465 | case HTLB_16M_INDEX: |
466 | BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); |
467 | kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); |
468 | break; |
469 | /* 16G hugepd directory at the pgd level */ |
470 | case HTLB_16G_INDEX: |
471 | BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); |
472 | kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); |
473 | break; |
474 | #endif |
475 | /* We don't free pgd table via RCU callback */ |
476 | default: |
477 | BUG(); |
478 | } |
479 | } |
480 | |
481 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
482 | { |
483 | unsigned long pgf = (unsigned long)table; |
484 | |
485 | BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); |
486 | pgf |= index; |
487 | tlb_remove_table(tlb, table: (void *)pgf); |
488 | } |
489 | |
490 | void __tlb_remove_table(void *_table) |
491 | { |
492 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); |
493 | unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
494 | |
495 | return pgtable_free(table, index); |
496 | } |
497 | |
498 | #ifdef CONFIG_PROC_FS |
499 | atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; |
500 | |
501 | void arch_report_meminfo(struct seq_file *m) |
502 | { |
503 | /* |
504 | * Hash maps the memory with one size mmu_linear_psize. |
505 | * So don't bother to print these on hash |
506 | */ |
507 | if (!radix_enabled()) |
508 | return; |
509 | seq_printf(m, "DirectMap4k: %8lu kB\n" , |
510 | atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2); |
511 | seq_printf(m, "DirectMap64k: %8lu kB\n" , |
512 | atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6); |
513 | seq_printf(m, "DirectMap2M: %8lu kB\n" , |
514 | atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11); |
515 | seq_printf(m, "DirectMap1G: %8lu kB\n" , |
516 | atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); |
517 | } |
518 | #endif /* CONFIG_PROC_FS */ |
519 | |
520 | pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, |
521 | pte_t *ptep) |
522 | { |
523 | unsigned long pte_val; |
524 | |
525 | /* |
526 | * Clear the _PAGE_PRESENT so that no hardware parallel update is |
527 | * possible. Also keep the pte_present true so that we don't take |
528 | * wrong fault. |
529 | */ |
530 | pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); |
531 | |
532 | return __pte(val: pte_val); |
533 | |
534 | } |
535 | |
536 | void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, |
537 | pte_t *ptep, pte_t old_pte, pte_t pte) |
538 | { |
539 | if (radix_enabled()) |
540 | return radix__ptep_modify_prot_commit(vma, addr, |
541 | ptep, old_pte, pte); |
542 | set_pte_at(vma->vm_mm, addr, ptep, pte); |
543 | } |
544 | |
545 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
546 | /* |
547 | * For hash translation mode, we use the deposited table to store hash slot |
548 | * information and they are stored at PTRS_PER_PMD offset from related pmd |
549 | * location. Hence a pmd move requires deposit and withdraw. |
550 | * |
551 | * For radix translation with split pmd ptl, we store the deposited table in the |
552 | * pmd page. Hence if we have different pmd page we need to withdraw during pmd |
553 | * move. |
554 | * |
555 | * With hash we use deposited table always irrespective of anon or not. |
556 | * With radix we use deposited table only for anonymous mapping. |
557 | */ |
558 | int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, |
559 | struct spinlock *old_pmd_ptl, |
560 | struct vm_area_struct *vma) |
561 | { |
562 | if (radix_enabled()) |
563 | return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); |
564 | |
565 | return true; |
566 | } |
567 | #endif |
568 | |
569 | /* |
570 | * Does the CPU support tlbie? |
571 | */ |
572 | bool tlbie_capable __read_mostly = true; |
573 | EXPORT_SYMBOL(tlbie_capable); |
574 | |
575 | /* |
576 | * Should tlbie be used for management of CPU TLBs, for kernel and process |
577 | * address spaces? tlbie may still be used for nMMU accelerators, and for KVM |
578 | * guest address spaces. |
579 | */ |
580 | bool tlbie_enabled __read_mostly = true; |
581 | |
582 | static int __init setup_disable_tlbie(char *str) |
583 | { |
584 | if (!radix_enabled()) { |
585 | pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n" ); |
586 | return 1; |
587 | } |
588 | |
589 | tlbie_capable = false; |
590 | tlbie_enabled = false; |
591 | |
592 | return 1; |
593 | } |
594 | __setup("disable_tlbie" , setup_disable_tlbie); |
595 | |
596 | static int __init pgtable_debugfs_setup(void) |
597 | { |
598 | if (!tlbie_capable) |
599 | return 0; |
600 | |
601 | /* |
602 | * There is no locking vs tlb flushing when changing this value. |
603 | * The tlb flushers will see one value or another, and use either |
604 | * tlbie or tlbiel with IPIs. In both cases the TLBs will be |
605 | * invalidated as expected. |
606 | */ |
607 | debugfs_create_bool(name: "tlbie_enabled" , mode: 0600, |
608 | parent: arch_debugfs_dir, |
609 | value: &tlbie_enabled); |
610 | |
611 | return 0; |
612 | } |
613 | arch_initcall(pgtable_debugfs_setup); |
614 | |
615 | #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN) |
616 | /* |
617 | * Override the generic version in mm/memremap.c. |
618 | * |
619 | * With hash translation, the direct-map range is mapped with just one |
620 | * page size selected by htab_init_page_sizes(). Consult |
621 | * mmu_psize_defs[] to determine the minimum page size alignment. |
622 | */ |
623 | unsigned long memremap_compat_align(void) |
624 | { |
625 | if (!radix_enabled()) { |
626 | unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift; |
627 | return max(SUBSECTION_SIZE, 1UL << shift); |
628 | } |
629 | |
630 | return SUBSECTION_SIZE; |
631 | } |
632 | EXPORT_SYMBOL_GPL(memremap_compat_align); |
633 | #endif |
634 | |
635 | pgprot_t vm_get_page_prot(unsigned long vm_flags) |
636 | { |
637 | unsigned long prot; |
638 | |
639 | /* Radix supports execute-only, but protection_map maps X -> RX */ |
640 | if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) |
641 | vm_flags |= VM_READ; |
642 | |
643 | prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]); |
644 | |
645 | if (vm_flags & VM_SAO) |
646 | prot |= _PAGE_SAO; |
647 | |
648 | #ifdef CONFIG_PPC_MEM_KEYS |
649 | prot |= vmflag_to_pte_pkey_bits(vm_flags); |
650 | #endif |
651 | |
652 | return __pgprot(prot); |
653 | } |
654 | EXPORT_SYMBOL(vm_get_page_prot); |
655 | |