1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 */
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/vmalloc.h>
10
11#include <asm/cacheflush.h>
12#include <asm/set_memory.h>
13#include <asm/tlbflush.h>
14#include <asm/kfence.h>
15
16struct page_change_data {
17 pgprot_t set_mask;
18 pgprot_t clear_mask;
19};
20
21bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
22
23bool can_set_direct_map(void)
24{
25 /*
26 * rodata_full and DEBUG_PAGEALLOC require linear map to be
27 * mapped at page granularity, so that it is possible to
28 * protect/unprotect single pages.
29 *
30 * KFENCE pool requires page-granular mapping if initialized late.
31 */
32 return rodata_full || debug_pagealloc_enabled() ||
33 arm64_kfence_can_set_direct_map();
34}
35
36static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
37{
38 struct page_change_data *cdata = data;
39 pte_t pte = __ptep_get(ptep);
40
41 pte = clear_pte_bit(pte, cdata->clear_mask);
42 pte = set_pte_bit(pte, cdata->set_mask);
43
44 __set_pte(ptep, pte);
45 return 0;
46}
47
48/*
49 * This function assumes that the range is mapped with PAGE_SIZE pages.
50 */
51static int __change_memory_common(unsigned long start, unsigned long size,
52 pgprot_t set_mask, pgprot_t clear_mask)
53{
54 struct page_change_data data;
55 int ret;
56
57 data.set_mask = set_mask;
58 data.clear_mask = clear_mask;
59
60 ret = apply_to_page_range(mm: &init_mm, address: start, size, fn: change_page_range,
61 data: &data);
62
63 flush_tlb_kernel_range(start, end: start + size);
64 return ret;
65}
66
67static int change_memory_common(unsigned long addr, int numpages,
68 pgprot_t set_mask, pgprot_t clear_mask)
69{
70 unsigned long start = addr;
71 unsigned long size = PAGE_SIZE * numpages;
72 unsigned long end = start + size;
73 struct vm_struct *area;
74 int i;
75
76 if (!PAGE_ALIGNED(addr)) {
77 start &= PAGE_MASK;
78 end = start + size;
79 WARN_ON_ONCE(1);
80 }
81
82 /*
83 * Kernel VA mappings are always live, and splitting live section
84 * mappings into page mappings may cause TLB conflicts. This means
85 * we have to ensure that changing the permission bits of the range
86 * we are operating on does not result in such splitting.
87 *
88 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
89 * Those are guaranteed to consist entirely of page mappings, and
90 * splitting is never needed.
91 *
92 * So check whether the [addr, addr + size) interval is entirely
93 * covered by precisely one VM area that has the VM_ALLOC flag set.
94 */
95 area = find_vm_area(addr: (void *)addr);
96 if (!area ||
97 end > (unsigned long)kasan_reset_tag(addr: area->addr) + area->size ||
98 !(area->flags & VM_ALLOC))
99 return -EINVAL;
100
101 if (!numpages)
102 return 0;
103
104 /*
105 * If we are manipulating read-only permissions, apply the same
106 * change to the linear mapping of the pages that back this VM area.
107 */
108 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
109 pgprot_val(clear_mask) == PTE_RDONLY)) {
110 for (i = 0; i < area->nr_pages; i++) {
111 __change_memory_common(start: (u64)page_address(area->pages[i]),
112 PAGE_SIZE, set_mask, clear_mask);
113 }
114 }
115
116 /*
117 * Get rid of potentially aliasing lazily unmapped vm areas that may
118 * have permissions set that deviate from the ones we are setting here.
119 */
120 vm_unmap_aliases();
121
122 return __change_memory_common(start, size, set_mask, clear_mask);
123}
124
125int set_memory_ro(unsigned long addr, int numpages)
126{
127 return change_memory_common(addr, numpages,
128 __pgprot(PTE_RDONLY),
129 __pgprot(PTE_WRITE));
130}
131
132int set_memory_rw(unsigned long addr, int numpages)
133{
134 return change_memory_common(addr, numpages,
135 __pgprot(PTE_WRITE),
136 __pgprot(PTE_RDONLY));
137}
138
139int set_memory_nx(unsigned long addr, int numpages)
140{
141 return change_memory_common(addr, numpages,
142 __pgprot(PTE_PXN),
143 __pgprot(PTE_MAYBE_GP));
144}
145
146int set_memory_x(unsigned long addr, int numpages)
147{
148 return change_memory_common(addr, numpages,
149 __pgprot(PTE_MAYBE_GP),
150 __pgprot(PTE_PXN));
151}
152
153int set_memory_valid(unsigned long addr, int numpages, int enable)
154{
155 if (enable)
156 return __change_memory_common(addr, PAGE_SIZE * numpages,
157 __pgprot(PTE_VALID),
158 __pgprot(0));
159 else
160 return __change_memory_common(addr, PAGE_SIZE * numpages,
161 __pgprot(0),
162 __pgprot(PTE_VALID));
163}
164
165int set_direct_map_invalid_noflush(struct page *page)
166{
167 struct page_change_data data = {
168 .set_mask = __pgprot(0),
169 .clear_mask = __pgprot(PTE_VALID),
170 };
171
172 if (!can_set_direct_map())
173 return 0;
174
175 return apply_to_page_range(mm: &init_mm,
176 address: (unsigned long)page_address(page),
177 PAGE_SIZE, fn: change_page_range, data: &data);
178}
179
180int set_direct_map_default_noflush(struct page *page)
181{
182 struct page_change_data data = {
183 .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
184 .clear_mask = __pgprot(PTE_RDONLY),
185 };
186
187 if (!can_set_direct_map())
188 return 0;
189
190 return apply_to_page_range(mm: &init_mm,
191 address: (unsigned long)page_address(page),
192 PAGE_SIZE, fn: change_page_range, data: &data);
193}
194
195#ifdef CONFIG_DEBUG_PAGEALLOC
196void __kernel_map_pages(struct page *page, int numpages, int enable)
197{
198 if (!can_set_direct_map())
199 return;
200
201 set_memory_valid(addr: (unsigned long)page_address(page), numpages, enable);
202}
203#endif /* CONFIG_DEBUG_PAGEALLOC */
204
205/*
206 * This function is used to determine if a linear map page has been marked as
207 * not-valid. Walk the page table and check the PTE_VALID bit.
208 *
209 * Because this is only called on the kernel linear map, p?d_sect() implies
210 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
211 * disabled.
212 */
213bool kernel_page_present(struct page *page)
214{
215 pgd_t *pgdp;
216 p4d_t *p4dp;
217 pud_t *pudp, pud;
218 pmd_t *pmdp, pmd;
219 pte_t *ptep;
220 unsigned long addr = (unsigned long)page_address(page);
221
222 pgdp = pgd_offset_k(addr);
223 if (pgd_none(READ_ONCE(*pgdp)))
224 return false;
225
226 p4dp = p4d_offset(pgd: pgdp, address: addr);
227 if (p4d_none(READ_ONCE(*p4dp)))
228 return false;
229
230 pudp = pud_offset(p4d: p4dp, address: addr);
231 pud = READ_ONCE(*pudp);
232 if (pud_none(pud))
233 return false;
234 if (pud_sect(pud))
235 return true;
236
237 pmdp = pmd_offset(pud: pudp, address: addr);
238 pmd = READ_ONCE(*pmdp);
239 if (pmd_none(pmd))
240 return false;
241 if (pmd_sect(pmd))
242 return true;
243
244 ptep = pte_offset_kernel(pmd: pmdp, address: addr);
245 return pte_valid(__ptep_get(ptep));
246}
247

source code of linux/arch/arm64/mm/pageattr.c