1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HIGHMEM_INTERNAL_H
3#define _LINUX_HIGHMEM_INTERNAL_H
4
5/*
6 * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7 */
8#ifdef CONFIG_KMAP_LOCAL
9void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
11void kunmap_local_indexed(const void *vaddr);
12void kmap_local_fork(struct task_struct *tsk);
13void __kmap_local_sched_out(void);
14void __kmap_local_sched_in(void);
15static inline void kmap_assert_nomap(void)
16{
17 DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
18}
19#else
20static inline void kmap_local_fork(struct task_struct *tsk) { }
21static inline void kmap_assert_nomap(void) { }
22#endif
23
24#ifdef CONFIG_HIGHMEM
25#include <asm/highmem.h>
26
27#ifndef ARCH_HAS_KMAP_FLUSH_TLB
28static inline void kmap_flush_tlb(unsigned long addr) { }
29#endif
30
31#ifndef kmap_prot
32#define kmap_prot PAGE_KERNEL
33#endif
34
35void *kmap_high(struct page *page);
36void kunmap_high(struct page *page);
37void __kmap_flush_unused(void);
38struct page *__kmap_to_page(void *addr);
39
40static inline void *kmap(struct page *page)
41{
42 void *addr;
43
44 might_sleep();
45 if (!PageHighMem(page))
46 addr = page_address(page);
47 else
48 addr = kmap_high(page);
49 kmap_flush_tlb((unsigned long)addr);
50 return addr;
51}
52
53static inline void kunmap(struct page *page)
54{
55 might_sleep();
56 if (!PageHighMem(page))
57 return;
58 kunmap_high(page);
59}
60
61static inline struct page *kmap_to_page(void *addr)
62{
63 return __kmap_to_page(addr);
64}
65
66static inline void kmap_flush_unused(void)
67{
68 __kmap_flush_unused();
69}
70
71static inline void *kmap_local_page(struct page *page)
72{
73 return __kmap_local_page_prot(page, kmap_prot);
74}
75
76static inline void *kmap_local_folio(struct folio *folio, size_t offset)
77{
78 struct page *page = folio_page(folio, offset / PAGE_SIZE);
79 return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
80}
81
82static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
83{
84 return __kmap_local_page_prot(page, prot);
85}
86
87static inline void *kmap_local_pfn(unsigned long pfn)
88{
89 return __kmap_local_pfn_prot(pfn, kmap_prot);
90}
91
92static inline void __kunmap_local(const void *vaddr)
93{
94 kunmap_local_indexed(vaddr);
95}
96
97static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
98{
99 if (IS_ENABLED(CONFIG_PREEMPT_RT))
100 migrate_disable();
101 else
102 preempt_disable();
103
104 pagefault_disable();
105 return __kmap_local_page_prot(page, prot);
106}
107
108static inline void *kmap_atomic(struct page *page)
109{
110 return kmap_atomic_prot(page, kmap_prot);
111}
112
113static inline void *kmap_atomic_pfn(unsigned long pfn)
114{
115 if (IS_ENABLED(CONFIG_PREEMPT_RT))
116 migrate_disable();
117 else
118 preempt_disable();
119
120 pagefault_disable();
121 return __kmap_local_pfn_prot(pfn, kmap_prot);
122}
123
124static inline void __kunmap_atomic(const void *addr)
125{
126 kunmap_local_indexed(addr);
127 pagefault_enable();
128 if (IS_ENABLED(CONFIG_PREEMPT_RT))
129 migrate_enable();
130 else
131 preempt_enable();
132}
133
134unsigned int __nr_free_highpages(void);
135extern atomic_long_t _totalhigh_pages;
136
137static inline unsigned int nr_free_highpages(void)
138{
139 return __nr_free_highpages();
140}
141
142static inline unsigned long totalhigh_pages(void)
143{
144 return (unsigned long)atomic_long_read(&_totalhigh_pages);
145}
146
147static inline void totalhigh_pages_add(long count)
148{
149 atomic_long_add(count, &_totalhigh_pages);
150}
151
152static inline bool is_kmap_addr(const void *x)
153{
154 unsigned long addr = (unsigned long)x;
155 return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
156}
157#else /* CONFIG_HIGHMEM */
158
159static inline struct page *kmap_to_page(void *addr)
160{
161 return virt_to_page(addr);
162}
163
164static inline void *kmap(struct page *page)
165{
166 might_sleep();
167 return page_address(page);
168}
169
170static inline void kunmap_high(struct page *page) { }
171static inline void kmap_flush_unused(void) { }
172
173static inline void kunmap(struct page *page)
174{
175#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
176 kunmap_flush_on_unmap(page_address(page));
177#endif
178}
179
180static inline void *kmap_local_page(struct page *page)
181{
182 return page_address(page);
183}
184
185static inline void *kmap_local_folio(struct folio *folio, size_t offset)
186{
187 return page_address(&folio->page) + offset;
188}
189
190static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
191{
192 return kmap_local_page(page);
193}
194
195static inline void *kmap_local_pfn(unsigned long pfn)
196{
197 return kmap_local_page(pfn_to_page(pfn));
198}
199
200static inline void __kunmap_local(const void *addr)
201{
202#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
203 kunmap_flush_on_unmap(addr);
204#endif
205}
206
207static inline void *kmap_atomic(struct page *page)
208{
209 if (IS_ENABLED(CONFIG_PREEMPT_RT))
210 migrate_disable();
211 else
212 preempt_disable();
213 pagefault_disable();
214 return page_address(page);
215}
216
217static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
218{
219 return kmap_atomic(page);
220}
221
222static inline void *kmap_atomic_pfn(unsigned long pfn)
223{
224 return kmap_atomic(pfn_to_page(pfn));
225}
226
227static inline void __kunmap_atomic(const void *addr)
228{
229#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
230 kunmap_flush_on_unmap(addr);
231#endif
232 pagefault_enable();
233 if (IS_ENABLED(CONFIG_PREEMPT_RT))
234 migrate_enable();
235 else
236 preempt_enable();
237}
238
239static inline unsigned int nr_free_highpages(void) { return 0; }
240static inline unsigned long totalhigh_pages(void) { return 0UL; }
241
242static inline bool is_kmap_addr(const void *x)
243{
244 return false;
245}
246
247#endif /* CONFIG_HIGHMEM */
248
249/**
250 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
251 * @__addr: Virtual address to be unmapped
252 *
253 * Unmaps an address previously mapped by kmap_atomic() and re-enables
254 * pagefaults. Depending on PREEMP_RT configuration, re-enables also
255 * migration and preemption. Users should not count on these side effects.
256 *
257 * Mappings should be unmapped in the reverse order that they were mapped.
258 * See kmap_local_page() for details on nesting.
259 *
260 * @__addr can be any address within the mapped page, so there is no need
261 * to subtract any offset that has been added. In contrast to kunmap(),
262 * this function takes the address returned from kmap_atomic(), not the
263 * page passed to it. The compiler will warn you if you pass the page.
264 */
265#define kunmap_atomic(__addr) \
266do { \
267 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
268 __kunmap_atomic(__addr); \
269} while (0)
270
271/**
272 * kunmap_local - Unmap a page mapped via kmap_local_page().
273 * @__addr: An address within the page mapped
274 *
275 * @__addr can be any address within the mapped page. Commonly it is the
276 * address return from kmap_local_page(), but it can also include offsets.
277 *
278 * Unmapping should be done in the reverse order of the mapping. See
279 * kmap_local_page() for details.
280 */
281#define kunmap_local(__addr) \
282do { \
283 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
284 __kunmap_local(__addr); \
285} while (0)
286
287#endif
288

source code of linux/include/linux/highmem-internal.h