1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_HIGHMEM_INTERNAL_H |
3 | #define _LINUX_HIGHMEM_INTERNAL_H |
4 | |
5 | /* |
6 | * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. |
7 | */ |
8 | #ifdef CONFIG_KMAP_LOCAL |
9 | void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); |
10 | void *__kmap_local_page_prot(struct page *page, pgprot_t prot); |
11 | void kunmap_local_indexed(const void *vaddr); |
12 | void kmap_local_fork(struct task_struct *tsk); |
13 | void __kmap_local_sched_out(void); |
14 | void __kmap_local_sched_in(void); |
15 | static inline void kmap_assert_nomap(void) |
16 | { |
17 | DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx); |
18 | } |
19 | #else |
20 | static inline void kmap_local_fork(struct task_struct *tsk) { } |
21 | static inline void kmap_assert_nomap(void) { } |
22 | #endif |
23 | |
24 | #ifdef CONFIG_HIGHMEM |
25 | #include <asm/highmem.h> |
26 | |
27 | #ifndef ARCH_HAS_KMAP_FLUSH_TLB |
28 | static inline void kmap_flush_tlb(unsigned long addr) { } |
29 | #endif |
30 | |
31 | #ifndef kmap_prot |
32 | #define kmap_prot PAGE_KERNEL |
33 | #endif |
34 | |
35 | void *kmap_high(struct page *page); |
36 | void kunmap_high(struct page *page); |
37 | void __kmap_flush_unused(void); |
38 | struct page *__kmap_to_page(void *addr); |
39 | |
40 | static inline void *kmap(struct page *page) |
41 | { |
42 | void *addr; |
43 | |
44 | might_sleep(); |
45 | if (!PageHighMem(page)) |
46 | addr = page_address(page); |
47 | else |
48 | addr = kmap_high(page); |
49 | kmap_flush_tlb((unsigned long)addr); |
50 | return addr; |
51 | } |
52 | |
53 | static inline void kunmap(struct page *page) |
54 | { |
55 | might_sleep(); |
56 | if (!PageHighMem(page)) |
57 | return; |
58 | kunmap_high(page); |
59 | } |
60 | |
61 | static inline struct page *kmap_to_page(void *addr) |
62 | { |
63 | return __kmap_to_page(addr); |
64 | } |
65 | |
66 | static inline void kmap_flush_unused(void) |
67 | { |
68 | __kmap_flush_unused(); |
69 | } |
70 | |
71 | static inline void *kmap_local_page(struct page *page) |
72 | { |
73 | return __kmap_local_page_prot(page, kmap_prot); |
74 | } |
75 | |
76 | static inline void *kmap_local_folio(struct folio *folio, size_t offset) |
77 | { |
78 | struct page *page = folio_page(folio, offset / PAGE_SIZE); |
79 | return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE; |
80 | } |
81 | |
82 | static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) |
83 | { |
84 | return __kmap_local_page_prot(page, prot); |
85 | } |
86 | |
87 | static inline void *kmap_local_pfn(unsigned long pfn) |
88 | { |
89 | return __kmap_local_pfn_prot(pfn, kmap_prot); |
90 | } |
91 | |
92 | static inline void __kunmap_local(const void *vaddr) |
93 | { |
94 | kunmap_local_indexed(vaddr); |
95 | } |
96 | |
97 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
98 | { |
99 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
100 | migrate_disable(); |
101 | else |
102 | preempt_disable(); |
103 | |
104 | pagefault_disable(); |
105 | return __kmap_local_page_prot(page, prot); |
106 | } |
107 | |
108 | static inline void *kmap_atomic(struct page *page) |
109 | { |
110 | return kmap_atomic_prot(page, kmap_prot); |
111 | } |
112 | |
113 | static inline void *kmap_atomic_pfn(unsigned long pfn) |
114 | { |
115 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
116 | migrate_disable(); |
117 | else |
118 | preempt_disable(); |
119 | |
120 | pagefault_disable(); |
121 | return __kmap_local_pfn_prot(pfn, kmap_prot); |
122 | } |
123 | |
124 | static inline void __kunmap_atomic(const void *addr) |
125 | { |
126 | kunmap_local_indexed(addr); |
127 | pagefault_enable(); |
128 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
129 | migrate_enable(); |
130 | else |
131 | preempt_enable(); |
132 | } |
133 | |
134 | unsigned int __nr_free_highpages(void); |
135 | extern atomic_long_t _totalhigh_pages; |
136 | |
137 | static inline unsigned int nr_free_highpages(void) |
138 | { |
139 | return __nr_free_highpages(); |
140 | } |
141 | |
142 | static inline unsigned long totalhigh_pages(void) |
143 | { |
144 | return (unsigned long)atomic_long_read(&_totalhigh_pages); |
145 | } |
146 | |
147 | static inline void totalhigh_pages_add(long count) |
148 | { |
149 | atomic_long_add(count, &_totalhigh_pages); |
150 | } |
151 | |
152 | static inline bool is_kmap_addr(const void *x) |
153 | { |
154 | unsigned long addr = (unsigned long)x; |
155 | |
156 | return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) || |
157 | (addr >= __fix_to_virt(FIX_KMAP_END) && |
158 | addr < __fix_to_virt(FIX_KMAP_BEGIN)); |
159 | } |
160 | #else /* CONFIG_HIGHMEM */ |
161 | |
162 | static inline struct page *kmap_to_page(void *addr) |
163 | { |
164 | return virt_to_page(addr); |
165 | } |
166 | |
167 | static inline void *kmap(struct page *page) |
168 | { |
169 | might_sleep(); |
170 | return page_address(page); |
171 | } |
172 | |
173 | static inline void kunmap_high(struct page *page) { } |
174 | static inline void kmap_flush_unused(void) { } |
175 | |
176 | static inline void kunmap(struct page *page) |
177 | { |
178 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP |
179 | kunmap_flush_on_unmap(page_address(page)); |
180 | #endif |
181 | } |
182 | |
183 | static inline void *kmap_local_page(struct page *page) |
184 | { |
185 | return page_address(page); |
186 | } |
187 | |
188 | static inline void *kmap_local_folio(struct folio *folio, size_t offset) |
189 | { |
190 | return page_address(&folio->page) + offset; |
191 | } |
192 | |
193 | static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) |
194 | { |
195 | return kmap_local_page(page); |
196 | } |
197 | |
198 | static inline void *kmap_local_pfn(unsigned long pfn) |
199 | { |
200 | return kmap_local_page(pfn_to_page(pfn)); |
201 | } |
202 | |
203 | static inline void __kunmap_local(const void *addr) |
204 | { |
205 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP |
206 | kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE)); |
207 | #endif |
208 | } |
209 | |
210 | static inline void *kmap_atomic(struct page *page) |
211 | { |
212 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
213 | migrate_disable(); |
214 | else |
215 | preempt_disable(); |
216 | pagefault_disable(); |
217 | return page_address(page); |
218 | } |
219 | |
220 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
221 | { |
222 | return kmap_atomic(page); |
223 | } |
224 | |
225 | static inline void *kmap_atomic_pfn(unsigned long pfn) |
226 | { |
227 | return kmap_atomic(pfn_to_page(pfn)); |
228 | } |
229 | |
230 | static inline void __kunmap_atomic(const void *addr) |
231 | { |
232 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP |
233 | kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE)); |
234 | #endif |
235 | pagefault_enable(); |
236 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
237 | migrate_enable(); |
238 | else |
239 | preempt_enable(); |
240 | } |
241 | |
242 | static inline unsigned int nr_free_highpages(void) { return 0; } |
243 | static inline unsigned long totalhigh_pages(void) { return 0UL; } |
244 | |
245 | static inline bool is_kmap_addr(const void *x) |
246 | { |
247 | return false; |
248 | } |
249 | |
250 | #endif /* CONFIG_HIGHMEM */ |
251 | |
252 | /** |
253 | * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated! |
254 | * @__addr: Virtual address to be unmapped |
255 | * |
256 | * Unmaps an address previously mapped by kmap_atomic() and re-enables |
257 | * pagefaults. Depending on PREEMP_RT configuration, re-enables also |
258 | * migration and preemption. Users should not count on these side effects. |
259 | * |
260 | * Mappings should be unmapped in the reverse order that they were mapped. |
261 | * See kmap_local_page() for details on nesting. |
262 | * |
263 | * @__addr can be any address within the mapped page, so there is no need |
264 | * to subtract any offset that has been added. In contrast to kunmap(), |
265 | * this function takes the address returned from kmap_atomic(), not the |
266 | * page passed to it. The compiler will warn you if you pass the page. |
267 | */ |
268 | #define kunmap_atomic(__addr) \ |
269 | do { \ |
270 | BUILD_BUG_ON(__same_type((__addr), struct page *)); \ |
271 | __kunmap_atomic(__addr); \ |
272 | } while (0) |
273 | |
274 | /** |
275 | * kunmap_local - Unmap a page mapped via kmap_local_page(). |
276 | * @__addr: An address within the page mapped |
277 | * |
278 | * @__addr can be any address within the mapped page. Commonly it is the |
279 | * address return from kmap_local_page(), but it can also include offsets. |
280 | * |
281 | * Unmapping should be done in the reverse order of the mapping. See |
282 | * kmap_local_page() for details. |
283 | */ |
284 | #define kunmap_local(__addr) \ |
285 | do { \ |
286 | BUILD_BUG_ON(__same_type((__addr), struct page *)); \ |
287 | __kunmap_local(__addr); \ |
288 | } while (0) |
289 | |
290 | #endif |
291 | |