1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_HIGHMEM_H |
3 | #define _LINUX_HIGHMEM_H |
4 | |
5 | #include <linux/fs.h> |
6 | #include <linux/kernel.h> |
7 | #include <linux/bug.h> |
8 | #include <linux/cacheflush.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/uaccess.h> |
11 | #include <linux/hardirq.h> |
12 | |
13 | #include "highmem-internal.h" |
14 | |
15 | /** |
16 | * kmap - Map a page for long term usage |
17 | * @page: Pointer to the page to be mapped |
18 | * |
19 | * Returns: The virtual address of the mapping |
20 | * |
21 | * Can only be invoked from preemptible task context because on 32bit |
22 | * systems with CONFIG_HIGHMEM enabled this function might sleep. |
23 | * |
24 | * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area |
25 | * this returns the virtual address of the direct kernel mapping. |
26 | * |
27 | * The returned virtual address is globally visible and valid up to the |
28 | * point where it is unmapped via kunmap(). The pointer can be handed to |
29 | * other contexts. |
30 | * |
31 | * For highmem pages on 32bit systems this can be slow as the mapping space |
32 | * is limited and protected by a global lock. In case that there is no |
33 | * mapping slot available the function blocks until a slot is released via |
34 | * kunmap(). |
35 | */ |
36 | static inline void *kmap(struct page *page); |
37 | |
38 | /** |
39 | * kunmap - Unmap the virtual address mapped by kmap() |
40 | * @page: Pointer to the page which was mapped by kmap() |
41 | * |
42 | * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of |
43 | * pages in the low memory area. |
44 | */ |
45 | static inline void kunmap(struct page *page); |
46 | |
47 | /** |
48 | * kmap_to_page - Get the page for a kmap'ed address |
49 | * @addr: The address to look up |
50 | * |
51 | * Returns: The page which is mapped to @addr. |
52 | */ |
53 | static inline struct page *kmap_to_page(void *addr); |
54 | |
55 | /** |
56 | * kmap_flush_unused - Flush all unused kmap mappings in order to |
57 | * remove stray mappings |
58 | */ |
59 | static inline void kmap_flush_unused(void); |
60 | |
61 | /** |
62 | * kmap_local_page - Map a page for temporary usage |
63 | * @page: Pointer to the page to be mapped |
64 | * |
65 | * Returns: The virtual address of the mapping |
66 | * |
67 | * Can be invoked from any context, including interrupts. |
68 | * |
69 | * Requires careful handling when nesting multiple mappings because the map |
70 | * management is stack based. The unmap has to be in the reverse order of |
71 | * the map operation: |
72 | * |
73 | * addr1 = kmap_local_page(page1); |
74 | * addr2 = kmap_local_page(page2); |
75 | * ... |
76 | * kunmap_local(addr2); |
77 | * kunmap_local(addr1); |
78 | * |
79 | * Unmapping addr1 before addr2 is invalid and causes malfunction. |
80 | * |
81 | * Contrary to kmap() mappings the mapping is only valid in the context of |
82 | * the caller and cannot be handed to other contexts. |
83 | * |
84 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the |
85 | * virtual address of the direct mapping. Only real highmem pages are |
86 | * temporarily mapped. |
87 | * |
88 | * While it is significantly faster than kmap() for the higmem case it |
89 | * comes with restrictions about the pointer validity. |
90 | * |
91 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of |
92 | * disabling migration in order to keep the virtual address stable across |
93 | * preemption. No caller of kmap_local_page() can rely on this side effect. |
94 | */ |
95 | static inline void *kmap_local_page(struct page *page); |
96 | |
97 | /** |
98 | * kmap_local_folio - Map a page in this folio for temporary usage |
99 | * @folio: The folio containing the page. |
100 | * @offset: The byte offset within the folio which identifies the page. |
101 | * |
102 | * Requires careful handling when nesting multiple mappings because the map |
103 | * management is stack based. The unmap has to be in the reverse order of |
104 | * the map operation:: |
105 | * |
106 | * addr1 = kmap_local_folio(folio1, offset1); |
107 | * addr2 = kmap_local_folio(folio2, offset2); |
108 | * ... |
109 | * kunmap_local(addr2); |
110 | * kunmap_local(addr1); |
111 | * |
112 | * Unmapping addr1 before addr2 is invalid and causes malfunction. |
113 | * |
114 | * Contrary to kmap() mappings the mapping is only valid in the context of |
115 | * the caller and cannot be handed to other contexts. |
116 | * |
117 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the |
118 | * virtual address of the direct mapping. Only real highmem pages are |
119 | * temporarily mapped. |
120 | * |
121 | * While it is significantly faster than kmap() for the higmem case it |
122 | * comes with restrictions about the pointer validity. Only use when really |
123 | * necessary. |
124 | * |
125 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of |
126 | * disabling migration in order to keep the virtual address stable across |
127 | * preemption. No caller of kmap_local_folio() can rely on this side effect. |
128 | * |
129 | * Context: Can be invoked from any context. |
130 | * Return: The virtual address of @offset. |
131 | */ |
132 | static inline void *kmap_local_folio(struct folio *folio, size_t offset); |
133 | |
134 | /** |
135 | * kmap_atomic - Atomically map a page for temporary usage - Deprecated! |
136 | * @page: Pointer to the page to be mapped |
137 | * |
138 | * Returns: The virtual address of the mapping |
139 | * |
140 | * In fact a wrapper around kmap_local_page() which also disables pagefaults |
141 | * and, depending on PREEMPT_RT configuration, also CPU migration and |
142 | * preemption. Therefore users should not count on the latter two side effects. |
143 | * |
144 | * Mappings should always be released by kunmap_atomic(). |
145 | * |
146 | * Do not use in new code. Use kmap_local_page() instead. |
147 | * |
148 | * It is used in atomic context when code wants to access the contents of a |
149 | * page that might be allocated from high memory (see __GFP_HIGHMEM), for |
150 | * example a page in the pagecache. The API has two functions, and they |
151 | * can be used in a manner similar to the following:: |
152 | * |
153 | * // Find the page of interest. |
154 | * struct page *page = find_get_page(mapping, offset); |
155 | * |
156 | * // Gain access to the contents of that page. |
157 | * void *vaddr = kmap_atomic(page); |
158 | * |
159 | * // Do something to the contents of that page. |
160 | * memset(vaddr, 0, PAGE_SIZE); |
161 | * |
162 | * // Unmap that page. |
163 | * kunmap_atomic(vaddr); |
164 | * |
165 | * Note that the kunmap_atomic() call takes the result of the kmap_atomic() |
166 | * call, not the argument. |
167 | * |
168 | * If you need to map two pages because you want to copy from one page to |
169 | * another you need to keep the kmap_atomic calls strictly nested, like: |
170 | * |
171 | * vaddr1 = kmap_atomic(page1); |
172 | * vaddr2 = kmap_atomic(page2); |
173 | * |
174 | * memcpy(vaddr1, vaddr2, PAGE_SIZE); |
175 | * |
176 | * kunmap_atomic(vaddr2); |
177 | * kunmap_atomic(vaddr1); |
178 | */ |
179 | static inline void *kmap_atomic(struct page *page); |
180 | |
181 | /* Highmem related interfaces for management code */ |
182 | static inline unsigned int nr_free_highpages(void); |
183 | static inline unsigned long totalhigh_pages(void); |
184 | |
185 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
186 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
187 | { |
188 | } |
189 | #endif |
190 | |
191 | #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE |
192 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
193 | { |
194 | } |
195 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
196 | { |
197 | } |
198 | #endif |
199 | |
200 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
201 | #ifndef clear_user_highpage |
202 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
203 | { |
204 | void *addr = kmap_local_page(page); |
205 | clear_user_page(addr, vaddr, page); |
206 | kunmap_local(addr); |
207 | } |
208 | #endif |
209 | |
210 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE |
211 | /** |
212 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move |
213 | * @vma: The VMA the page is to be allocated for |
214 | * @vaddr: The virtual address the page will be inserted into |
215 | * |
216 | * Returns: The allocated and zeroed HIGHMEM page |
217 | * |
218 | * This function will allocate a page for a VMA that the caller knows will |
219 | * be able to migrate in the future using move_pages() or reclaimed |
220 | * |
221 | * An architecture may override this function by defining |
222 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own |
223 | * implementation. |
224 | */ |
225 | static inline struct page * |
226 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, |
227 | unsigned long vaddr) |
228 | { |
229 | struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); |
230 | |
231 | if (page) |
232 | clear_user_highpage(page, vaddr); |
233 | |
234 | return page; |
235 | } |
236 | #endif |
237 | |
238 | static inline void clear_highpage(struct page *page) |
239 | { |
240 | void *kaddr = kmap_local_page(page); |
241 | clear_page(kaddr); |
242 | kunmap_local(kaddr); |
243 | } |
244 | |
245 | static inline void clear_highpage_kasan_tagged(struct page *page) |
246 | { |
247 | u8 tag; |
248 | |
249 | tag = page_kasan_tag(page); |
250 | page_kasan_tag_reset(page); |
251 | clear_highpage(page); |
252 | page_kasan_tag_set(page, tag); |
253 | } |
254 | |
255 | #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE |
256 | |
257 | static inline void tag_clear_highpage(struct page *page) |
258 | { |
259 | } |
260 | |
261 | #endif |
262 | |
263 | /* |
264 | * If we pass in a base or tail page, we can zero up to PAGE_SIZE. |
265 | * If we pass in a head page, we can zero up to the size of the compound page. |
266 | */ |
267 | #ifdef CONFIG_HIGHMEM |
268 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, |
269 | unsigned start2, unsigned end2); |
270 | #else |
271 | static inline void zero_user_segments(struct page *page, |
272 | unsigned start1, unsigned end1, |
273 | unsigned start2, unsigned end2) |
274 | { |
275 | void *kaddr = kmap_local_page(page); |
276 | unsigned int i; |
277 | |
278 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); |
279 | |
280 | if (end1 > start1) |
281 | memset(kaddr + start1, 0, end1 - start1); |
282 | |
283 | if (end2 > start2) |
284 | memset(kaddr + start2, 0, end2 - start2); |
285 | |
286 | kunmap_local(kaddr); |
287 | for (i = 0; i < compound_nr(page); i++) |
288 | flush_dcache_page(page + i); |
289 | } |
290 | #endif |
291 | |
292 | static inline void zero_user_segment(struct page *page, |
293 | unsigned start, unsigned end) |
294 | { |
295 | zero_user_segments(page, start, end, 0, 0); |
296 | } |
297 | |
298 | static inline void zero_user(struct page *page, |
299 | unsigned start, unsigned size) |
300 | { |
301 | zero_user_segments(page, start, start + size, 0, 0); |
302 | } |
303 | |
304 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
305 | |
306 | static inline void copy_user_highpage(struct page *to, struct page *from, |
307 | unsigned long vaddr, struct vm_area_struct *vma) |
308 | { |
309 | char *vfrom, *vto; |
310 | |
311 | vfrom = kmap_local_page(from); |
312 | vto = kmap_local_page(to); |
313 | copy_user_page(vto, vfrom, vaddr, to); |
314 | kunmap_local(vto); |
315 | kunmap_local(vfrom); |
316 | } |
317 | |
318 | #endif |
319 | |
320 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE |
321 | |
322 | static inline void copy_highpage(struct page *to, struct page *from) |
323 | { |
324 | char *vfrom, *vto; |
325 | |
326 | vfrom = kmap_local_page(from); |
327 | vto = kmap_local_page(to); |
328 | copy_page(vto, vfrom); |
329 | kunmap_local(vto); |
330 | kunmap_local(vfrom); |
331 | } |
332 | |
333 | #endif |
334 | |
335 | static inline void memcpy_page(struct page *dst_page, size_t dst_off, |
336 | struct page *src_page, size_t src_off, |
337 | size_t len) |
338 | { |
339 | char *dst = kmap_local_page(dst_page); |
340 | char *src = kmap_local_page(src_page); |
341 | |
342 | VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); |
343 | memcpy(dst + dst_off, src + src_off, len); |
344 | kunmap_local(src); |
345 | kunmap_local(dst); |
346 | } |
347 | |
348 | static inline void memset_page(struct page *page, size_t offset, int val, |
349 | size_t len) |
350 | { |
351 | char *addr = kmap_local_page(page); |
352 | |
353 | VM_BUG_ON(offset + len > PAGE_SIZE); |
354 | memset(addr + offset, val, len); |
355 | kunmap_local(addr); |
356 | } |
357 | |
358 | static inline void memcpy_from_page(char *to, struct page *page, |
359 | size_t offset, size_t len) |
360 | { |
361 | char *from = kmap_local_page(page); |
362 | |
363 | VM_BUG_ON(offset + len > PAGE_SIZE); |
364 | memcpy(to, from + offset, len); |
365 | kunmap_local(from); |
366 | } |
367 | |
368 | static inline void memcpy_to_page(struct page *page, size_t offset, |
369 | const char *from, size_t len) |
370 | { |
371 | char *to = kmap_local_page(page); |
372 | |
373 | VM_BUG_ON(offset + len > PAGE_SIZE); |
374 | memcpy(to + offset, from, len); |
375 | flush_dcache_page(page); |
376 | kunmap_local(to); |
377 | } |
378 | |
379 | static inline void memzero_page(struct page *page, size_t offset, size_t len) |
380 | { |
381 | char *addr = kmap_local_page(page); |
382 | |
383 | VM_BUG_ON(offset + len > PAGE_SIZE); |
384 | memset(addr + offset, 0, len); |
385 | flush_dcache_page(page); |
386 | kunmap_local(addr); |
387 | } |
388 | |
389 | /** |
390 | * folio_zero_segments() - Zero two byte ranges in a folio. |
391 | * @folio: The folio to write to. |
392 | * @start1: The first byte to zero. |
393 | * @xend1: One more than the last byte in the first range. |
394 | * @start2: The first byte to zero in the second range. |
395 | * @xend2: One more than the last byte in the second range. |
396 | */ |
397 | static inline void folio_zero_segments(struct folio *folio, |
398 | size_t start1, size_t xend1, size_t start2, size_t xend2) |
399 | { |
400 | zero_user_segments(&folio->page, start1, xend1, start2, xend2); |
401 | } |
402 | |
403 | /** |
404 | * folio_zero_segment() - Zero a byte range in a folio. |
405 | * @folio: The folio to write to. |
406 | * @start: The first byte to zero. |
407 | * @xend: One more than the last byte to zero. |
408 | */ |
409 | static inline void folio_zero_segment(struct folio *folio, |
410 | size_t start, size_t xend) |
411 | { |
412 | zero_user_segments(&folio->page, start, xend, 0, 0); |
413 | } |
414 | |
415 | /** |
416 | * folio_zero_range() - Zero a byte range in a folio. |
417 | * @folio: The folio to write to. |
418 | * @start: The first byte to zero. |
419 | * @length: The number of bytes to zero. |
420 | */ |
421 | static inline void folio_zero_range(struct folio *folio, |
422 | size_t start, size_t length) |
423 | { |
424 | zero_user_segments(&folio->page, start, start + length, 0, 0); |
425 | } |
426 | |
427 | #endif /* _LINUX_HIGHMEM_H */ |
428 | |