1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_HIGHMEM_H |
3 | #define _LINUX_HIGHMEM_H |
4 | |
5 | #include <linux/fs.h> |
6 | #include <linux/kernel.h> |
7 | #include <linux/bug.h> |
8 | #include <linux/cacheflush.h> |
9 | #include <linux/kmsan.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/uaccess.h> |
12 | #include <linux/hardirq.h> |
13 | |
14 | #include "highmem-internal.h" |
15 | |
16 | /** |
17 | * kmap - Map a page for long term usage |
18 | * @page: Pointer to the page to be mapped |
19 | * |
20 | * Returns: The virtual address of the mapping |
21 | * |
22 | * Can only be invoked from preemptible task context because on 32bit |
23 | * systems with CONFIG_HIGHMEM enabled this function might sleep. |
24 | * |
25 | * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area |
26 | * this returns the virtual address of the direct kernel mapping. |
27 | * |
28 | * The returned virtual address is globally visible and valid up to the |
29 | * point where it is unmapped via kunmap(). The pointer can be handed to |
30 | * other contexts. |
31 | * |
32 | * For highmem pages on 32bit systems this can be slow as the mapping space |
33 | * is limited and protected by a global lock. In case that there is no |
34 | * mapping slot available the function blocks until a slot is released via |
35 | * kunmap(). |
36 | */ |
37 | static inline void *kmap(struct page *page); |
38 | |
39 | /** |
40 | * kunmap - Unmap the virtual address mapped by kmap() |
41 | * @page: Pointer to the page which was mapped by kmap() |
42 | * |
43 | * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of |
44 | * pages in the low memory area. |
45 | */ |
46 | static inline void kunmap(struct page *page); |
47 | |
48 | /** |
49 | * kmap_to_page - Get the page for a kmap'ed address |
50 | * @addr: The address to look up |
51 | * |
52 | * Returns: The page which is mapped to @addr. |
53 | */ |
54 | static inline struct page *kmap_to_page(void *addr); |
55 | |
56 | /** |
57 | * kmap_flush_unused - Flush all unused kmap mappings in order to |
58 | * remove stray mappings |
59 | */ |
60 | static inline void kmap_flush_unused(void); |
61 | |
62 | /** |
63 | * kmap_local_page - Map a page for temporary usage |
64 | * @page: Pointer to the page to be mapped |
65 | * |
66 | * Returns: The virtual address of the mapping |
67 | * |
68 | * Can be invoked from any context, including interrupts. |
69 | * |
70 | * Requires careful handling when nesting multiple mappings because the map |
71 | * management is stack based. The unmap has to be in the reverse order of |
72 | * the map operation: |
73 | * |
74 | * addr1 = kmap_local_page(page1); |
75 | * addr2 = kmap_local_page(page2); |
76 | * ... |
77 | * kunmap_local(addr2); |
78 | * kunmap_local(addr1); |
79 | * |
80 | * Unmapping addr1 before addr2 is invalid and causes malfunction. |
81 | * |
82 | * Contrary to kmap() mappings the mapping is only valid in the context of |
83 | * the caller and cannot be handed to other contexts. |
84 | * |
85 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the |
86 | * virtual address of the direct mapping. Only real highmem pages are |
87 | * temporarily mapped. |
88 | * |
89 | * While kmap_local_page() is significantly faster than kmap() for the highmem |
90 | * case it comes with restrictions about the pointer validity. |
91 | * |
92 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of |
93 | * disabling migration in order to keep the virtual address stable across |
94 | * preemption. No caller of kmap_local_page() can rely on this side effect. |
95 | */ |
96 | static inline void *kmap_local_page(struct page *page); |
97 | |
98 | /** |
99 | * kmap_local_folio - Map a page in this folio for temporary usage |
100 | * @folio: The folio containing the page. |
101 | * @offset: The byte offset within the folio which identifies the page. |
102 | * |
103 | * Requires careful handling when nesting multiple mappings because the map |
104 | * management is stack based. The unmap has to be in the reverse order of |
105 | * the map operation:: |
106 | * |
107 | * addr1 = kmap_local_folio(folio1, offset1); |
108 | * addr2 = kmap_local_folio(folio2, offset2); |
109 | * ... |
110 | * kunmap_local(addr2); |
111 | * kunmap_local(addr1); |
112 | * |
113 | * Unmapping addr1 before addr2 is invalid and causes malfunction. |
114 | * |
115 | * Contrary to kmap() mappings the mapping is only valid in the context of |
116 | * the caller and cannot be handed to other contexts. |
117 | * |
118 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the |
119 | * virtual address of the direct mapping. Only real highmem pages are |
120 | * temporarily mapped. |
121 | * |
122 | * While it is significantly faster than kmap() for the highmem case it |
123 | * comes with restrictions about the pointer validity. |
124 | * |
125 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of |
126 | * disabling migration in order to keep the virtual address stable across |
127 | * preemption. No caller of kmap_local_folio() can rely on this side effect. |
128 | * |
129 | * Context: Can be invoked from any context. |
130 | * Return: The virtual address of @offset. |
131 | */ |
132 | static inline void *kmap_local_folio(struct folio *folio, size_t offset); |
133 | |
134 | /** |
135 | * kmap_atomic - Atomically map a page for temporary usage - Deprecated! |
136 | * @page: Pointer to the page to be mapped |
137 | * |
138 | * Returns: The virtual address of the mapping |
139 | * |
140 | * In fact a wrapper around kmap_local_page() which also disables pagefaults |
141 | * and, depending on PREEMPT_RT configuration, also CPU migration and |
142 | * preemption. Therefore users should not count on the latter two side effects. |
143 | * |
144 | * Mappings should always be released by kunmap_atomic(). |
145 | * |
146 | * Do not use in new code. Use kmap_local_page() instead. |
147 | * |
148 | * It is used in atomic context when code wants to access the contents of a |
149 | * page that might be allocated from high memory (see __GFP_HIGHMEM), for |
150 | * example a page in the pagecache. The API has two functions, and they |
151 | * can be used in a manner similar to the following:: |
152 | * |
153 | * // Find the page of interest. |
154 | * struct page *page = find_get_page(mapping, offset); |
155 | * |
156 | * // Gain access to the contents of that page. |
157 | * void *vaddr = kmap_atomic(page); |
158 | * |
159 | * // Do something to the contents of that page. |
160 | * memset(vaddr, 0, PAGE_SIZE); |
161 | * |
162 | * // Unmap that page. |
163 | * kunmap_atomic(vaddr); |
164 | * |
165 | * Note that the kunmap_atomic() call takes the result of the kmap_atomic() |
166 | * call, not the argument. |
167 | * |
168 | * If you need to map two pages because you want to copy from one page to |
169 | * another you need to keep the kmap_atomic calls strictly nested, like: |
170 | * |
171 | * vaddr1 = kmap_atomic(page1); |
172 | * vaddr2 = kmap_atomic(page2); |
173 | * |
174 | * memcpy(vaddr1, vaddr2, PAGE_SIZE); |
175 | * |
176 | * kunmap_atomic(vaddr2); |
177 | * kunmap_atomic(vaddr1); |
178 | */ |
179 | static inline void *kmap_atomic(struct page *page); |
180 | |
181 | /* Highmem related interfaces for management code */ |
182 | static inline unsigned int nr_free_highpages(void); |
183 | static inline unsigned long totalhigh_pages(void); |
184 | |
185 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
186 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
187 | { |
188 | } |
189 | #endif |
190 | |
191 | #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE |
192 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
193 | { |
194 | } |
195 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
196 | { |
197 | } |
198 | #endif |
199 | |
200 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
201 | #ifndef clear_user_highpage |
202 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
203 | { |
204 | void *addr = kmap_local_page(page); |
205 | clear_user_page(page: addr, vaddr, pg: page); |
206 | kunmap_local(addr); |
207 | } |
208 | #endif |
209 | |
210 | #ifndef vma_alloc_zeroed_movable_folio |
211 | /** |
212 | * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA. |
213 | * @vma: The VMA the page is to be allocated for. |
214 | * @vaddr: The virtual address the page will be inserted into. |
215 | * |
216 | * This function will allocate a page suitable for inserting into this |
217 | * VMA at this virtual address. It may be allocated from highmem or |
218 | * the movable zone. An architecture may provide its own implementation. |
219 | * |
220 | * Return: A folio containing one allocated and zeroed page or NULL if |
221 | * we are out of memory. |
222 | */ |
223 | static inline |
224 | struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, |
225 | unsigned long vaddr) |
226 | { |
227 | struct folio *folio; |
228 | |
229 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); |
230 | if (folio) |
231 | clear_user_highpage(&folio->page, vaddr); |
232 | |
233 | return folio; |
234 | } |
235 | #endif |
236 | |
237 | static inline void clear_highpage(struct page *page) |
238 | { |
239 | void *kaddr = kmap_local_page(page); |
240 | clear_page(page: kaddr); |
241 | kunmap_local(kaddr); |
242 | } |
243 | |
244 | static inline void clear_highpage_kasan_tagged(struct page *page) |
245 | { |
246 | void *kaddr = kmap_local_page(page); |
247 | |
248 | clear_page(page: kasan_reset_tag(addr: kaddr)); |
249 | kunmap_local(kaddr); |
250 | } |
251 | |
252 | #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE |
253 | |
254 | static inline void tag_clear_highpage(struct page *page) |
255 | { |
256 | } |
257 | |
258 | #endif |
259 | |
260 | /* |
261 | * If we pass in a base or tail page, we can zero up to PAGE_SIZE. |
262 | * If we pass in a head page, we can zero up to the size of the compound page. |
263 | */ |
264 | #ifdef CONFIG_HIGHMEM |
265 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, |
266 | unsigned start2, unsigned end2); |
267 | #else |
268 | static inline void zero_user_segments(struct page *page, |
269 | unsigned start1, unsigned end1, |
270 | unsigned start2, unsigned end2) |
271 | { |
272 | void *kaddr = kmap_local_page(page); |
273 | unsigned int i; |
274 | |
275 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); |
276 | |
277 | if (end1 > start1) |
278 | memset(kaddr + start1, 0, end1 - start1); |
279 | |
280 | if (end2 > start2) |
281 | memset(kaddr + start2, 0, end2 - start2); |
282 | |
283 | kunmap_local(kaddr); |
284 | for (i = 0; i < compound_nr(page); i++) |
285 | flush_dcache_page(page: page + i); |
286 | } |
287 | #endif |
288 | |
289 | static inline void zero_user_segment(struct page *page, |
290 | unsigned start, unsigned end) |
291 | { |
292 | zero_user_segments(page, start1: start, end1: end, start2: 0, end2: 0); |
293 | } |
294 | |
295 | static inline void zero_user(struct page *page, |
296 | unsigned start, unsigned size) |
297 | { |
298 | zero_user_segments(page, start1: start, end1: start + size, start2: 0, end2: 0); |
299 | } |
300 | |
301 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
302 | |
303 | static inline void copy_user_highpage(struct page *to, struct page *from, |
304 | unsigned long vaddr, struct vm_area_struct *vma) |
305 | { |
306 | char *vfrom, *vto; |
307 | |
308 | vfrom = kmap_local_page(page: from); |
309 | vto = kmap_local_page(page: to); |
310 | copy_user_page(to: vto, from: vfrom, vaddr, topage: to); |
311 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); |
312 | kunmap_local(vto); |
313 | kunmap_local(vfrom); |
314 | } |
315 | |
316 | #endif |
317 | |
318 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE |
319 | |
320 | static inline void copy_highpage(struct page *to, struct page *from) |
321 | { |
322 | char *vfrom, *vto; |
323 | |
324 | vfrom = kmap_local_page(page: from); |
325 | vto = kmap_local_page(page: to); |
326 | copy_page(to: vto, from: vfrom); |
327 | kmsan_copy_page_meta(dst: to, src: from); |
328 | kunmap_local(vto); |
329 | kunmap_local(vfrom); |
330 | } |
331 | |
332 | #endif |
333 | |
334 | #ifdef copy_mc_to_kernel |
335 | /* |
336 | * If architecture supports machine check exception handling, define the |
337 | * #MC versions of copy_user_highpage and copy_highpage. They copy a memory |
338 | * page with #MC in source page (@from) handled, and return the number |
339 | * of bytes not copied if there was a #MC, otherwise 0 for success. |
340 | */ |
341 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, |
342 | unsigned long vaddr, struct vm_area_struct *vma) |
343 | { |
344 | unsigned long ret; |
345 | char *vfrom, *vto; |
346 | |
347 | vfrom = kmap_local_page(page: from); |
348 | vto = kmap_local_page(page: to); |
349 | ret = copy_mc_to_kernel(to: vto, from: vfrom, PAGE_SIZE); |
350 | if (!ret) |
351 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); |
352 | kunmap_local(vto); |
353 | kunmap_local(vfrom); |
354 | |
355 | return ret; |
356 | } |
357 | |
358 | static inline int copy_mc_highpage(struct page *to, struct page *from) |
359 | { |
360 | unsigned long ret; |
361 | char *vfrom, *vto; |
362 | |
363 | vfrom = kmap_local_page(page: from); |
364 | vto = kmap_local_page(page: to); |
365 | ret = copy_mc_to_kernel(to: vto, from: vfrom, PAGE_SIZE); |
366 | if (!ret) |
367 | kmsan_copy_page_meta(dst: to, src: from); |
368 | kunmap_local(vto); |
369 | kunmap_local(vfrom); |
370 | |
371 | return ret; |
372 | } |
373 | #else |
374 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, |
375 | unsigned long vaddr, struct vm_area_struct *vma) |
376 | { |
377 | copy_user_highpage(to, from, vaddr, vma); |
378 | return 0; |
379 | } |
380 | |
381 | static inline int copy_mc_highpage(struct page *to, struct page *from) |
382 | { |
383 | copy_highpage(to, from); |
384 | return 0; |
385 | } |
386 | #endif |
387 | |
388 | static inline void memcpy_page(struct page *dst_page, size_t dst_off, |
389 | struct page *src_page, size_t src_off, |
390 | size_t len) |
391 | { |
392 | char *dst = kmap_local_page(page: dst_page); |
393 | char *src = kmap_local_page(page: src_page); |
394 | |
395 | VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); |
396 | memcpy(dst + dst_off, src + src_off, len); |
397 | kunmap_local(src); |
398 | kunmap_local(dst); |
399 | } |
400 | |
401 | static inline void memset_page(struct page *page, size_t offset, int val, |
402 | size_t len) |
403 | { |
404 | char *addr = kmap_local_page(page); |
405 | |
406 | VM_BUG_ON(offset + len > PAGE_SIZE); |
407 | memset(addr + offset, val, len); |
408 | kunmap_local(addr); |
409 | } |
410 | |
411 | static inline void memcpy_from_page(char *to, struct page *page, |
412 | size_t offset, size_t len) |
413 | { |
414 | char *from = kmap_local_page(page); |
415 | |
416 | VM_BUG_ON(offset + len > PAGE_SIZE); |
417 | memcpy(to, from + offset, len); |
418 | kunmap_local(from); |
419 | } |
420 | |
421 | static inline void memcpy_to_page(struct page *page, size_t offset, |
422 | const char *from, size_t len) |
423 | { |
424 | char *to = kmap_local_page(page); |
425 | |
426 | VM_BUG_ON(offset + len > PAGE_SIZE); |
427 | memcpy(to + offset, from, len); |
428 | flush_dcache_page(page); |
429 | kunmap_local(to); |
430 | } |
431 | |
432 | static inline void memzero_page(struct page *page, size_t offset, size_t len) |
433 | { |
434 | char *addr = kmap_local_page(page); |
435 | |
436 | VM_BUG_ON(offset + len > PAGE_SIZE); |
437 | memset(addr + offset, 0, len); |
438 | flush_dcache_page(page); |
439 | kunmap_local(addr); |
440 | } |
441 | |
442 | /** |
443 | * memcpy_from_folio - Copy a range of bytes from a folio. |
444 | * @to: The memory to copy to. |
445 | * @folio: The folio to read from. |
446 | * @offset: The first byte in the folio to read. |
447 | * @len: The number of bytes to copy. |
448 | */ |
449 | static inline void memcpy_from_folio(char *to, struct folio *folio, |
450 | size_t offset, size_t len) |
451 | { |
452 | VM_BUG_ON(offset + len > folio_size(folio)); |
453 | |
454 | do { |
455 | const char *from = kmap_local_folio(folio, offset); |
456 | size_t chunk = len; |
457 | |
458 | if (folio_test_highmem(folio) && |
459 | chunk > PAGE_SIZE - offset_in_page(offset)) |
460 | chunk = PAGE_SIZE - offset_in_page(offset); |
461 | memcpy(to, from, chunk); |
462 | kunmap_local(from); |
463 | |
464 | to += chunk; |
465 | offset += chunk; |
466 | len -= chunk; |
467 | } while (len > 0); |
468 | } |
469 | |
470 | /** |
471 | * memcpy_to_folio - Copy a range of bytes to a folio. |
472 | * @folio: The folio to write to. |
473 | * @offset: The first byte in the folio to store to. |
474 | * @from: The memory to copy from. |
475 | * @len: The number of bytes to copy. |
476 | */ |
477 | static inline void memcpy_to_folio(struct folio *folio, size_t offset, |
478 | const char *from, size_t len) |
479 | { |
480 | VM_BUG_ON(offset + len > folio_size(folio)); |
481 | |
482 | do { |
483 | char *to = kmap_local_folio(folio, offset); |
484 | size_t chunk = len; |
485 | |
486 | if (folio_test_highmem(folio) && |
487 | chunk > PAGE_SIZE - offset_in_page(offset)) |
488 | chunk = PAGE_SIZE - offset_in_page(offset); |
489 | memcpy(to, from, chunk); |
490 | kunmap_local(to); |
491 | |
492 | from += chunk; |
493 | offset += chunk; |
494 | len -= chunk; |
495 | } while (len > 0); |
496 | |
497 | flush_dcache_folio(folio); |
498 | } |
499 | |
500 | /** |
501 | * folio_zero_tail - Zero the tail of a folio. |
502 | * @folio: The folio to zero. |
503 | * @offset: The byte offset in the folio to start zeroing at. |
504 | * @kaddr: The address the folio is currently mapped to. |
505 | * |
506 | * If you have already used kmap_local_folio() to map a folio, written |
507 | * some data to it and now need to zero the end of the folio (and flush |
508 | * the dcache), you can use this function. If you do not have the |
509 | * folio kmapped (eg the folio has been partially populated by DMA), |
510 | * use folio_zero_range() or folio_zero_segment() instead. |
511 | * |
512 | * Return: An address which can be passed to kunmap_local(). |
513 | */ |
514 | static inline __must_check void *folio_zero_tail(struct folio *folio, |
515 | size_t offset, void *kaddr) |
516 | { |
517 | size_t len = folio_size(folio) - offset; |
518 | |
519 | if (folio_test_highmem(folio)) { |
520 | size_t max = PAGE_SIZE - offset_in_page(offset); |
521 | |
522 | while (len > max) { |
523 | memset(kaddr, 0, max); |
524 | kunmap_local(kaddr); |
525 | len -= max; |
526 | offset += max; |
527 | max = PAGE_SIZE; |
528 | kaddr = kmap_local_folio(folio, offset); |
529 | } |
530 | } |
531 | |
532 | memset(kaddr, 0, len); |
533 | flush_dcache_folio(folio); |
534 | |
535 | return kaddr; |
536 | } |
537 | |
538 | /** |
539 | * folio_fill_tail - Copy some data to a folio and pad with zeroes. |
540 | * @folio: The destination folio. |
541 | * @offset: The offset into @folio at which to start copying. |
542 | * @from: The data to copy. |
543 | * @len: How many bytes of data to copy. |
544 | * |
545 | * This function is most useful for filesystems which support inline data. |
546 | * When they want to copy data from the inode into the page cache, this |
547 | * function does everything for them. It supports large folios even on |
548 | * HIGHMEM configurations. |
549 | */ |
550 | static inline void folio_fill_tail(struct folio *folio, size_t offset, |
551 | const char *from, size_t len) |
552 | { |
553 | char *to = kmap_local_folio(folio, offset); |
554 | |
555 | VM_BUG_ON(offset + len > folio_size(folio)); |
556 | |
557 | if (folio_test_highmem(folio)) { |
558 | size_t max = PAGE_SIZE - offset_in_page(offset); |
559 | |
560 | while (len > max) { |
561 | memcpy(to, from, max); |
562 | kunmap_local(to); |
563 | len -= max; |
564 | from += max; |
565 | offset += max; |
566 | max = PAGE_SIZE; |
567 | to = kmap_local_folio(folio, offset); |
568 | } |
569 | } |
570 | |
571 | memcpy(to, from, len); |
572 | to = folio_zero_tail(folio, offset: offset + len, kaddr: to + len); |
573 | kunmap_local(to); |
574 | } |
575 | |
576 | /** |
577 | * memcpy_from_file_folio - Copy some bytes from a file folio. |
578 | * @to: The destination buffer. |
579 | * @folio: The folio to copy from. |
580 | * @pos: The position in the file. |
581 | * @len: The maximum number of bytes to copy. |
582 | * |
583 | * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE |
584 | * if the folio comes from HIGHMEM, and by the size of the folio. |
585 | * |
586 | * Return: The number of bytes copied from the folio. |
587 | */ |
588 | static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, |
589 | loff_t pos, size_t len) |
590 | { |
591 | size_t offset = offset_in_folio(folio, pos); |
592 | char *from = kmap_local_folio(folio, offset); |
593 | |
594 | if (folio_test_highmem(folio)) { |
595 | offset = offset_in_page(offset); |
596 | len = min_t(size_t, len, PAGE_SIZE - offset); |
597 | } else |
598 | len = min(len, folio_size(folio) - offset); |
599 | |
600 | memcpy(to, from, len); |
601 | kunmap_local(from); |
602 | |
603 | return len; |
604 | } |
605 | |
606 | /** |
607 | * folio_zero_segments() - Zero two byte ranges in a folio. |
608 | * @folio: The folio to write to. |
609 | * @start1: The first byte to zero. |
610 | * @xend1: One more than the last byte in the first range. |
611 | * @start2: The first byte to zero in the second range. |
612 | * @xend2: One more than the last byte in the second range. |
613 | */ |
614 | static inline void folio_zero_segments(struct folio *folio, |
615 | size_t start1, size_t xend1, size_t start2, size_t xend2) |
616 | { |
617 | zero_user_segments(page: &folio->page, start1, end1: xend1, start2, end2: xend2); |
618 | } |
619 | |
620 | /** |
621 | * folio_zero_segment() - Zero a byte range in a folio. |
622 | * @folio: The folio to write to. |
623 | * @start: The first byte to zero. |
624 | * @xend: One more than the last byte to zero. |
625 | */ |
626 | static inline void folio_zero_segment(struct folio *folio, |
627 | size_t start, size_t xend) |
628 | { |
629 | zero_user_segments(page: &folio->page, start1: start, end1: xend, start2: 0, end2: 0); |
630 | } |
631 | |
632 | /** |
633 | * folio_zero_range() - Zero a byte range in a folio. |
634 | * @folio: The folio to write to. |
635 | * @start: The first byte to zero. |
636 | * @length: The number of bytes to zero. |
637 | */ |
638 | static inline void folio_zero_range(struct folio *folio, |
639 | size_t start, size_t length) |
640 | { |
641 | zero_user_segments(page: &folio->page, start1: start, end1: start + length, start2: 0, end2: 0); |
642 | } |
643 | |
644 | /** |
645 | * folio_release_kmap - Unmap a folio and drop a refcount. |
646 | * @folio: The folio to release. |
647 | * @addr: The address previously returned by a call to kmap_local_folio(). |
648 | * |
649 | * It is common, eg in directory handling to kmap a folio. This function |
650 | * unmaps the folio and drops the refcount that was being held to keep the |
651 | * folio alive while we accessed it. |
652 | */ |
653 | static inline void folio_release_kmap(struct folio *folio, void *addr) |
654 | { |
655 | kunmap_local(addr); |
656 | folio_put(folio); |
657 | } |
658 | |
659 | static inline void unmap_and_put_page(struct page *page, void *addr) |
660 | { |
661 | folio_release_kmap(page_folio(page), addr); |
662 | } |
663 | |
664 | #endif /* _LINUX_HIGHMEM_H */ |
665 | |