1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Fixmap manipulation code |
4 | */ |
5 | |
6 | #include <linux/bug.h> |
7 | #include <linux/init.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/libfdt.h> |
10 | #include <linux/memory.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/sizes.h> |
13 | |
14 | #include <asm/fixmap.h> |
15 | #include <asm/kernel-pgtable.h> |
16 | #include <asm/pgalloc.h> |
17 | #include <asm/tlbflush.h> |
18 | |
19 | /* ensure that the fixmap region does not grow down into the PCI I/O region */ |
20 | static_assert(FIXADDR_TOT_START > PCI_IO_END); |
21 | |
22 | #define NR_BM_PTE_TABLES \ |
23 | SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT) |
24 | #define NR_BM_PMD_TABLES \ |
25 | SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT) |
26 | |
27 | static_assert(NR_BM_PMD_TABLES == 1); |
28 | |
29 | #define __BM_TABLE_IDX(addr, shift) \ |
30 | (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift))) |
31 | |
32 | #define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) |
33 | |
34 | static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss; |
35 | static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; |
36 | static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; |
37 | |
38 | static inline pte_t *fixmap_pte(unsigned long addr) |
39 | { |
40 | return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(address: addr)]; |
41 | } |
42 | |
43 | static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr) |
44 | { |
45 | pmd_t pmd = READ_ONCE(*pmdp); |
46 | pte_t *ptep; |
47 | |
48 | if (pmd_none(pmd)) { |
49 | ptep = bm_pte[BM_PTE_TABLE_IDX(addr)]; |
50 | __pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE); |
51 | } |
52 | } |
53 | |
54 | static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, |
55 | unsigned long end) |
56 | { |
57 | unsigned long next; |
58 | pud_t pud = READ_ONCE(*pudp); |
59 | pmd_t *pmdp; |
60 | |
61 | if (pud_none(pud)) |
62 | __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); |
63 | |
64 | pmdp = pmd_offset_kimg(pudp, addr); |
65 | do { |
66 | next = pmd_addr_end(addr, end); |
67 | early_fixmap_init_pte(pmdp, addr); |
68 | } while (pmdp++, addr = next, addr != end); |
69 | } |
70 | |
71 | |
72 | static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, |
73 | unsigned long end) |
74 | { |
75 | p4d_t p4d = READ_ONCE(*p4dp); |
76 | pud_t *pudp; |
77 | |
78 | if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) && |
79 | p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) { |
80 | /* |
81 | * We only end up here if the kernel mapping and the fixmap |
82 | * share the top level pgd entry, which should only happen on |
83 | * 16k/4 levels configurations. |
84 | */ |
85 | BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); |
86 | } |
87 | |
88 | if (p4d_none(p4d)) |
89 | __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); |
90 | |
91 | pudp = pud_offset_kimg(p4dp, addr); |
92 | early_fixmap_init_pmd(pudp, addr, end); |
93 | } |
94 | |
95 | /* |
96 | * The p*d_populate functions call virt_to_phys implicitly so they can't be used |
97 | * directly on kernel symbols (bm_p*d). This function is called too early to use |
98 | * lm_alias so __p*d_populate functions must be used to populate with the |
99 | * physical address from __pa_symbol. |
100 | */ |
101 | void __init early_fixmap_init(void) |
102 | { |
103 | unsigned long addr = FIXADDR_TOT_START; |
104 | unsigned long end = FIXADDR_TOP; |
105 | |
106 | pgd_t *pgdp = pgd_offset_k(addr); |
107 | p4d_t *p4dp = p4d_offset_kimg(pgdp, addr); |
108 | |
109 | early_fixmap_init_pud(p4dp, addr, end); |
110 | } |
111 | |
112 | /* |
113 | * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we |
114 | * ever need to use IPIs for TLB broadcasting, then we're in trouble here. |
115 | */ |
116 | void __set_fixmap(enum fixed_addresses idx, |
117 | phys_addr_t phys, pgprot_t flags) |
118 | { |
119 | unsigned long addr = __fix_to_virt(idx); |
120 | pte_t *ptep; |
121 | |
122 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
123 | |
124 | ptep = fixmap_pte(addr); |
125 | |
126 | if (pgprot_val(flags)) { |
127 | __set_pte(ptep, pfn_pte(page_nr: phys >> PAGE_SHIFT, pgprot: flags)); |
128 | } else { |
129 | __pte_clear(&init_mm, addr, ptep); |
130 | flush_tlb_kernel_range(start: addr, end: addr+PAGE_SIZE); |
131 | } |
132 | } |
133 | |
134 | void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
135 | { |
136 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); |
137 | phys_addr_t dt_phys_base; |
138 | int offset; |
139 | void *dt_virt; |
140 | |
141 | /* |
142 | * Check whether the physical FDT address is set and meets the minimum |
143 | * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be |
144 | * at least 8 bytes so that we can always access the magic and size |
145 | * fields of the FDT header after mapping the first chunk, double check |
146 | * here if that is indeed the case. |
147 | */ |
148 | BUILD_BUG_ON(MIN_FDT_ALIGN < 8); |
149 | if (!dt_phys || dt_phys % MIN_FDT_ALIGN) |
150 | return NULL; |
151 | |
152 | dt_phys_base = round_down(dt_phys, PAGE_SIZE); |
153 | offset = dt_phys % PAGE_SIZE; |
154 | dt_virt = (void *)dt_virt_base + offset; |
155 | |
156 | /* map the first chunk so we can read the size from the header */ |
157 | create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); |
158 | |
159 | if (fdt_magic(dt_virt) != FDT_MAGIC) |
160 | return NULL; |
161 | |
162 | *size = fdt_totalsize(dt_virt); |
163 | if (*size > MAX_FDT_SIZE) |
164 | return NULL; |
165 | |
166 | if (offset + *size > PAGE_SIZE) { |
167 | create_mapping_noalloc(dt_phys_base, dt_virt_base, |
168 | offset + *size, prot); |
169 | } |
170 | |
171 | return dt_virt; |
172 | } |
173 | |