1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_MM_H |
3 | #define _LINUX_MM_H |
4 | |
5 | #include <linux/errno.h> |
6 | #include <linux/mmdebug.h> |
7 | #include <linux/gfp.h> |
8 | #include <linux/bug.h> |
9 | #include <linux/list.h> |
10 | #include <linux/mmzone.h> |
11 | #include <linux/rbtree.h> |
12 | #include <linux/atomic.h> |
13 | #include <linux/debug_locks.h> |
14 | #include <linux/mm_types.h> |
15 | #include <linux/mmap_lock.h> |
16 | #include <linux/range.h> |
17 | #include <linux/pfn.h> |
18 | #include <linux/percpu-refcount.h> |
19 | #include <linux/bit_spinlock.h> |
20 | #include <linux/shrinker.h> |
21 | #include <linux/resource.h> |
22 | #include <linux/page_ext.h> |
23 | #include <linux/err.h> |
24 | #include <linux/page-flags.h> |
25 | #include <linux/page_ref.h> |
26 | #include <linux/overflow.h> |
27 | #include <linux/sizes.h> |
28 | #include <linux/sched.h> |
29 | #include <linux/pgtable.h> |
30 | #include <linux/kasan.h> |
31 | #include <linux/memremap.h> |
32 | #include <linux/slab.h> |
33 | |
34 | struct mempolicy; |
35 | struct anon_vma; |
36 | struct anon_vma_chain; |
37 | struct user_struct; |
38 | struct pt_regs; |
39 | struct folio_batch; |
40 | |
41 | extern int sysctl_page_lock_unfairness; |
42 | |
43 | void mm_core_init(void); |
44 | void init_mm_internals(void); |
45 | |
46 | #ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ |
47 | extern unsigned long max_mapnr; |
48 | |
49 | static inline void set_max_mapnr(unsigned long limit) |
50 | { |
51 | max_mapnr = limit; |
52 | } |
53 | #else |
54 | static inline void set_max_mapnr(unsigned long limit) { } |
55 | #endif |
56 | |
57 | extern atomic_long_t _totalram_pages; |
58 | static inline unsigned long totalram_pages(void) |
59 | { |
60 | return (unsigned long)atomic_long_read(v: &_totalram_pages); |
61 | } |
62 | |
63 | static inline void totalram_pages_inc(void) |
64 | { |
65 | atomic_long_inc(v: &_totalram_pages); |
66 | } |
67 | |
68 | static inline void totalram_pages_dec(void) |
69 | { |
70 | atomic_long_dec(v: &_totalram_pages); |
71 | } |
72 | |
73 | static inline void totalram_pages_add(long count) |
74 | { |
75 | atomic_long_add(i: count, v: &_totalram_pages); |
76 | } |
77 | |
78 | extern void * high_memory; |
79 | extern int page_cluster; |
80 | extern const int page_cluster_max; |
81 | |
82 | #ifdef CONFIG_SYSCTL |
83 | extern int sysctl_legacy_va_layout; |
84 | #else |
85 | #define sysctl_legacy_va_layout 0 |
86 | #endif |
87 | |
88 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS |
89 | extern const int mmap_rnd_bits_min; |
90 | extern int mmap_rnd_bits_max __ro_after_init; |
91 | extern int mmap_rnd_bits __read_mostly; |
92 | #endif |
93 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS |
94 | extern const int mmap_rnd_compat_bits_min; |
95 | extern const int mmap_rnd_compat_bits_max; |
96 | extern int mmap_rnd_compat_bits __read_mostly; |
97 | #endif |
98 | |
99 | #include <asm/page.h> |
100 | #include <asm/processor.h> |
101 | |
102 | #ifndef __pa_symbol |
103 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
104 | #endif |
105 | |
106 | #ifndef page_to_virt |
107 | #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) |
108 | #endif |
109 | |
110 | #ifndef lm_alias |
111 | #define lm_alias(x) __va(__pa_symbol(x)) |
112 | #endif |
113 | |
114 | /* |
115 | * To prevent common memory management code establishing |
116 | * a zero page mapping on a read fault. |
117 | * This macro should be defined within <asm/pgtable.h>. |
118 | * s390 does this to prevent multiplexing of hardware bits |
119 | * related to the physical page in case of virtualization. |
120 | */ |
121 | #ifndef mm_forbids_zeropage |
122 | #define mm_forbids_zeropage(X) (0) |
123 | #endif |
124 | |
125 | /* |
126 | * On some architectures it is expensive to call memset() for small sizes. |
127 | * If an architecture decides to implement their own version of |
128 | * mm_zero_struct_page they should wrap the defines below in a #ifndef and |
129 | * define their own version of this macro in <asm/pgtable.h> |
130 | */ |
131 | #if BITS_PER_LONG == 64 |
132 | /* This function must be updated when the size of struct page grows above 96 |
133 | * or reduces below 56. The idea that compiler optimizes out switch() |
134 | * statement, and only leaves move/store instructions. Also the compiler can |
135 | * combine write statements if they are both assignments and can be reordered, |
136 | * this can result in several of the writes here being dropped. |
137 | */ |
138 | #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) |
139 | static inline void __mm_zero_struct_page(struct page *page) |
140 | { |
141 | unsigned long *_pp = (void *)page; |
142 | |
143 | /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */ |
144 | BUILD_BUG_ON(sizeof(struct page) & 7); |
145 | BUILD_BUG_ON(sizeof(struct page) < 56); |
146 | BUILD_BUG_ON(sizeof(struct page) > 96); |
147 | |
148 | switch (sizeof(struct page)) { |
149 | case 96: |
150 | _pp[11] = 0; |
151 | fallthrough; |
152 | case 88: |
153 | _pp[10] = 0; |
154 | fallthrough; |
155 | case 80: |
156 | _pp[9] = 0; |
157 | fallthrough; |
158 | case 72: |
159 | _pp[8] = 0; |
160 | fallthrough; |
161 | case 64: |
162 | _pp[7] = 0; |
163 | fallthrough; |
164 | case 56: |
165 | _pp[6] = 0; |
166 | _pp[5] = 0; |
167 | _pp[4] = 0; |
168 | _pp[3] = 0; |
169 | _pp[2] = 0; |
170 | _pp[1] = 0; |
171 | _pp[0] = 0; |
172 | } |
173 | } |
174 | #else |
175 | #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) |
176 | #endif |
177 | |
178 | /* |
179 | * Default maximum number of active map areas, this limits the number of vmas |
180 | * per mm struct. Users can overwrite this number by sysctl but there is a |
181 | * problem. |
182 | * |
183 | * When a program's coredump is generated as ELF format, a section is created |
184 | * per a vma. In ELF, the number of sections is represented in unsigned short. |
185 | * This means the number of sections should be smaller than 65535 at coredump. |
186 | * Because the kernel adds some informative sections to a image of program at |
187 | * generating coredump, we need some margin. The number of extra sections is |
188 | * 1-3 now and depends on arch. We use "5" as safe margin, here. |
189 | * |
190 | * ELF extended numbering allows more than 65535 sections, so 16-bit bound is |
191 | * not a hard limit any more. Although some userspace tools can be surprised by |
192 | * that. |
193 | */ |
194 | #define MAPCOUNT_ELF_CORE_MARGIN (5) |
195 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) |
196 | |
197 | extern int sysctl_max_map_count; |
198 | |
199 | extern unsigned long sysctl_user_reserve_kbytes; |
200 | extern unsigned long sysctl_admin_reserve_kbytes; |
201 | |
202 | extern int sysctl_overcommit_memory; |
203 | extern int sysctl_overcommit_ratio; |
204 | extern unsigned long sysctl_overcommit_kbytes; |
205 | |
206 | int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, |
207 | loff_t *); |
208 | int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, |
209 | loff_t *); |
210 | int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, |
211 | loff_t *); |
212 | |
213 | #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) |
214 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) |
215 | #define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio)) |
216 | #else |
217 | #define nth_page(page,n) ((page) + (n)) |
218 | #define folio_page_idx(folio, p) ((p) - &(folio)->page) |
219 | #endif |
220 | |
221 | /* to align the pointer to the (next) page boundary */ |
222 | #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) |
223 | |
224 | /* to align the pointer to the (prev) page boundary */ |
225 | #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) |
226 | |
227 | /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ |
228 | #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) |
229 | |
230 | static inline struct folio *lru_to_folio(struct list_head *head) |
231 | { |
232 | return list_entry((head)->prev, struct folio, lru); |
233 | } |
234 | |
235 | void setup_initial_init_mm(void *start_code, void *end_code, |
236 | void *end_data, void *brk); |
237 | |
238 | /* |
239 | * Linux kernel virtual memory manager primitives. |
240 | * The idea being to have a "virtual" mm in the same way |
241 | * we have a virtual fs - giving a cleaner interface to the |
242 | * mm details, and allowing different kinds of memory mappings |
243 | * (from shared memory to executable loading to arbitrary |
244 | * mmap() functions). |
245 | */ |
246 | |
247 | struct vm_area_struct *vm_area_alloc(struct mm_struct *); |
248 | struct vm_area_struct *vm_area_dup(struct vm_area_struct *); |
249 | void vm_area_free(struct vm_area_struct *); |
250 | /* Use only if VMA has no other users */ |
251 | void __vm_area_free(struct vm_area_struct *vma); |
252 | |
253 | #ifndef CONFIG_MMU |
254 | extern struct rb_root nommu_region_tree; |
255 | extern struct rw_semaphore nommu_region_sem; |
256 | |
257 | extern unsigned int kobjsize(const void *objp); |
258 | #endif |
259 | |
260 | /* |
261 | * vm_flags in vm_area_struct, see mm_types.h. |
262 | * When changing, update also include/trace/events/mmflags.h |
263 | */ |
264 | #define VM_NONE 0x00000000 |
265 | |
266 | #define VM_READ 0x00000001 /* currently active flags */ |
267 | #define VM_WRITE 0x00000002 |
268 | #define VM_EXEC 0x00000004 |
269 | #define VM_SHARED 0x00000008 |
270 | |
271 | /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ |
272 | #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ |
273 | #define VM_MAYWRITE 0x00000020 |
274 | #define VM_MAYEXEC 0x00000040 |
275 | #define VM_MAYSHARE 0x00000080 |
276 | |
277 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
278 | #ifdef CONFIG_MMU |
279 | #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ |
280 | #else /* CONFIG_MMU */ |
281 | #define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */ |
282 | #define VM_UFFD_MISSING 0 |
283 | #endif /* CONFIG_MMU */ |
284 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
285 | #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ |
286 | |
287 | #define VM_LOCKED 0x00002000 |
288 | #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ |
289 | |
290 | /* Used by sys_madvise() */ |
291 | #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ |
292 | #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ |
293 | |
294 | #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ |
295 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ |
296 | #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ |
297 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ |
298 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ |
299 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
300 | #define VM_SYNC 0x00800000 /* Synchronous page faults */ |
301 | #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ |
302 | #define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ |
303 | #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ |
304 | |
305 | #ifdef CONFIG_MEM_SOFT_DIRTY |
306 | # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ |
307 | #else |
308 | # define VM_SOFTDIRTY 0 |
309 | #endif |
310 | |
311 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
312 | #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ |
313 | #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ |
314 | #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ |
315 | |
316 | #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS |
317 | #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ |
318 | #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ |
319 | #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ |
320 | #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ |
321 | #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ |
322 | #define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */ |
323 | #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) |
324 | #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) |
325 | #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) |
326 | #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) |
327 | #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) |
328 | #define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) |
329 | #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ |
330 | |
331 | #ifdef CONFIG_ARCH_HAS_PKEYS |
332 | # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 |
333 | # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ |
334 | # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ |
335 | # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 |
336 | # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 |
337 | #ifdef CONFIG_PPC |
338 | # define VM_PKEY_BIT4 VM_HIGH_ARCH_4 |
339 | #else |
340 | # define VM_PKEY_BIT4 0 |
341 | #endif |
342 | #endif /* CONFIG_ARCH_HAS_PKEYS */ |
343 | |
344 | #ifdef CONFIG_X86_USER_SHADOW_STACK |
345 | /* |
346 | * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of |
347 | * support core mm. |
348 | * |
349 | * These VMAs will get a single end guard page. This helps userspace protect |
350 | * itself from attacks. A single page is enough for current shadow stack archs |
351 | * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c |
352 | * for more details on the guard size. |
353 | */ |
354 | # define VM_SHADOW_STACK VM_HIGH_ARCH_5 |
355 | #else |
356 | # define VM_SHADOW_STACK VM_NONE |
357 | #endif |
358 | |
359 | #if defined(CONFIG_X86) |
360 | # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ |
361 | #elif defined(CONFIG_PPC) |
362 | # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ |
363 | #elif defined(CONFIG_PARISC) |
364 | # define VM_GROWSUP VM_ARCH_1 |
365 | #elif defined(CONFIG_SPARC64) |
366 | # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ |
367 | # define VM_ARCH_CLEAR VM_SPARC_ADI |
368 | #elif defined(CONFIG_ARM64) |
369 | # define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ |
370 | # define VM_ARCH_CLEAR VM_ARM64_BTI |
371 | #elif !defined(CONFIG_MMU) |
372 | # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ |
373 | #endif |
374 | |
375 | #if defined(CONFIG_ARM64_MTE) |
376 | # define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ |
377 | # define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ |
378 | #else |
379 | # define VM_MTE VM_NONE |
380 | # define VM_MTE_ALLOWED VM_NONE |
381 | #endif |
382 | |
383 | #ifndef VM_GROWSUP |
384 | # define VM_GROWSUP VM_NONE |
385 | #endif |
386 | |
387 | #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
388 | # define VM_UFFD_MINOR_BIT 38 |
389 | # define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ |
390 | #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ |
391 | # define VM_UFFD_MINOR VM_NONE |
392 | #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ |
393 | |
394 | /* |
395 | * This flag is used to connect VFIO to arch specific KVM code. It |
396 | * indicates that the memory under this VMA is safe for use with any |
397 | * non-cachable memory type inside KVM. Some VFIO devices, on some |
398 | * platforms, are thought to be unsafe and can cause machine crashes |
399 | * if KVM does not lock down the memory type. |
400 | */ |
401 | #ifdef CONFIG_64BIT |
402 | #define VM_ALLOW_ANY_UNCACHED_BIT 39 |
403 | #define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT) |
404 | #else |
405 | #define VM_ALLOW_ANY_UNCACHED VM_NONE |
406 | #endif |
407 | |
408 | /* Bits set in the VMA until the stack is in its final location */ |
409 | #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) |
410 | |
411 | #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) |
412 | |
413 | /* Common data flag combinations */ |
414 | #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ |
415 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
416 | #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ |
417 | VM_MAYWRITE | VM_MAYEXEC) |
418 | #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ |
419 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
420 | |
421 | #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ |
422 | #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC |
423 | #endif |
424 | |
425 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
426 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
427 | #endif |
428 | |
429 | #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) |
430 | |
431 | #ifdef CONFIG_STACK_GROWSUP |
432 | #define VM_STACK VM_GROWSUP |
433 | #define VM_STACK_EARLY VM_GROWSDOWN |
434 | #else |
435 | #define VM_STACK VM_GROWSDOWN |
436 | #define VM_STACK_EARLY 0 |
437 | #endif |
438 | |
439 | #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) |
440 | |
441 | /* VMA basic access permission flags */ |
442 | #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) |
443 | |
444 | |
445 | /* |
446 | * Special vmas that are non-mergable, non-mlock()able. |
447 | */ |
448 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) |
449 | |
450 | /* This mask prevents VMA from being scanned with khugepaged */ |
451 | #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) |
452 | |
453 | /* This mask defines which mm->def_flags a process can inherit its parent */ |
454 | #define VM_INIT_DEF_MASK VM_NOHUGEPAGE |
455 | |
456 | /* This mask represents all the VMA flag bits used by mlock */ |
457 | #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) |
458 | |
459 | /* Arch-specific flags to clear when updating VM flags on protection change */ |
460 | #ifndef VM_ARCH_CLEAR |
461 | # define VM_ARCH_CLEAR VM_NONE |
462 | #endif |
463 | #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) |
464 | |
465 | /* |
466 | * mapping from the currently active vm_flags protection bits (the |
467 | * low four bits) to a page protection mask.. |
468 | */ |
469 | |
470 | /* |
471 | * The default fault flags that should be used by most of the |
472 | * arch-specific page fault handlers. |
473 | */ |
474 | #define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \ |
475 | FAULT_FLAG_KILLABLE | \ |
476 | FAULT_FLAG_INTERRUPTIBLE) |
477 | |
478 | /** |
479 | * fault_flag_allow_retry_first - check ALLOW_RETRY the first time |
480 | * @flags: Fault flags. |
481 | * |
482 | * This is mostly used for places where we want to try to avoid taking |
483 | * the mmap_lock for too long a time when waiting for another condition |
484 | * to change, in which case we can try to be polite to release the |
485 | * mmap_lock in the first round to avoid potential starvation of other |
486 | * processes that would also want the mmap_lock. |
487 | * |
488 | * Return: true if the page fault allows retry and this is the first |
489 | * attempt of the fault handling; false otherwise. |
490 | */ |
491 | static inline bool fault_flag_allow_retry_first(enum fault_flag flags) |
492 | { |
493 | return (flags & FAULT_FLAG_ALLOW_RETRY) && |
494 | (!(flags & FAULT_FLAG_TRIED)); |
495 | } |
496 | |
497 | #define FAULT_FLAG_TRACE \ |
498 | { FAULT_FLAG_WRITE, "WRITE" }, \ |
499 | { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ |
500 | { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ |
501 | { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ |
502 | { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ |
503 | { FAULT_FLAG_TRIED, "TRIED" }, \ |
504 | { FAULT_FLAG_USER, "USER" }, \ |
505 | { FAULT_FLAG_REMOTE, "REMOTE" }, \ |
506 | { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ |
507 | { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \ |
508 | { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" } |
509 | |
510 | /* |
511 | * vm_fault is filled by the pagefault handler and passed to the vma's |
512 | * ->fault function. The vma's ->fault is responsible for returning a bitmask |
513 | * of VM_FAULT_xxx flags that give details about how the fault was handled. |
514 | * |
515 | * MM layer fills up gfp_mask for page allocations but fault handler might |
516 | * alter it if its implementation requires a different allocation context. |
517 | * |
518 | * pgoff should be used in favour of virtual_address, if possible. |
519 | */ |
520 | struct vm_fault { |
521 | const struct { |
522 | struct vm_area_struct *vma; /* Target VMA */ |
523 | gfp_t gfp_mask; /* gfp mask to be used for allocations */ |
524 | pgoff_t pgoff; /* Logical page offset based on vma */ |
525 | unsigned long address; /* Faulting virtual address - masked */ |
526 | unsigned long real_address; /* Faulting virtual address - unmasked */ |
527 | }; |
528 | enum fault_flag flags; /* FAULT_FLAG_xxx flags |
529 | * XXX: should really be 'const' */ |
530 | pmd_t *pmd; /* Pointer to pmd entry matching |
531 | * the 'address' */ |
532 | pud_t *pud; /* Pointer to pud entry matching |
533 | * the 'address' |
534 | */ |
535 | union { |
536 | pte_t orig_pte; /* Value of PTE at the time of fault */ |
537 | pmd_t orig_pmd; /* Value of PMD at the time of fault, |
538 | * used by PMD fault only. |
539 | */ |
540 | }; |
541 | |
542 | struct page *cow_page; /* Page handler may use for COW fault */ |
543 | struct page *page; /* ->fault handlers should return a |
544 | * page here, unless VM_FAULT_NOPAGE |
545 | * is set (which is also implied by |
546 | * VM_FAULT_ERROR). |
547 | */ |
548 | /* These three entries are valid only while holding ptl lock */ |
549 | pte_t *pte; /* Pointer to pte entry matching |
550 | * the 'address'. NULL if the page |
551 | * table hasn't been allocated. |
552 | */ |
553 | spinlock_t *ptl; /* Page table lock. |
554 | * Protects pte page table if 'pte' |
555 | * is not NULL, otherwise pmd. |
556 | */ |
557 | pgtable_t prealloc_pte; /* Pre-allocated pte page table. |
558 | * vm_ops->map_pages() sets up a page |
559 | * table from atomic context. |
560 | * do_fault_around() pre-allocates |
561 | * page table to avoid allocation from |
562 | * atomic context. |
563 | */ |
564 | }; |
565 | |
566 | /* |
567 | * These are the virtual MM functions - opening of an area, closing and |
568 | * unmapping it (needed to keep files on disk up-to-date etc), pointer |
569 | * to the functions called when a no-page or a wp-page exception occurs. |
570 | */ |
571 | struct vm_operations_struct { |
572 | void (*open)(struct vm_area_struct * area); |
573 | /** |
574 | * @close: Called when the VMA is being removed from the MM. |
575 | * Context: User context. May sleep. Caller holds mmap_lock. |
576 | */ |
577 | void (*close)(struct vm_area_struct * area); |
578 | /* Called any time before splitting to check if it's allowed */ |
579 | int (*may_split)(struct vm_area_struct *area, unsigned long addr); |
580 | int (*mremap)(struct vm_area_struct *area); |
581 | /* |
582 | * Called by mprotect() to make driver-specific permission |
583 | * checks before mprotect() is finalised. The VMA must not |
584 | * be modified. Returns 0 if mprotect() can proceed. |
585 | */ |
586 | int (*mprotect)(struct vm_area_struct *vma, unsigned long start, |
587 | unsigned long end, unsigned long newflags); |
588 | vm_fault_t (*fault)(struct vm_fault *vmf); |
589 | vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); |
590 | vm_fault_t (*map_pages)(struct vm_fault *vmf, |
591 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
592 | unsigned long (*pagesize)(struct vm_area_struct * area); |
593 | |
594 | /* notification that a previously read-only page is about to become |
595 | * writable, if an error is returned it will cause a SIGBUS */ |
596 | vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); |
597 | |
598 | /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ |
599 | vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); |
600 | |
601 | /* called by access_process_vm when get_user_pages() fails, typically |
602 | * for use by special VMAs. See also generic_access_phys() for a generic |
603 | * implementation useful for any iomem mapping. |
604 | */ |
605 | int (*access)(struct vm_area_struct *vma, unsigned long addr, |
606 | void *buf, int len, int write); |
607 | |
608 | /* Called by the /proc/PID/maps code to ask the vma whether it |
609 | * has a special name. Returning non-NULL will also cause this |
610 | * vma to be dumped unconditionally. */ |
611 | const char *(*name)(struct vm_area_struct *vma); |
612 | |
613 | #ifdef CONFIG_NUMA |
614 | /* |
615 | * set_policy() op must add a reference to any non-NULL @new mempolicy |
616 | * to hold the policy upon return. Caller should pass NULL @new to |
617 | * remove a policy and fall back to surrounding context--i.e. do not |
618 | * install a MPOL_DEFAULT policy, nor the task or system default |
619 | * mempolicy. |
620 | */ |
621 | int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); |
622 | |
623 | /* |
624 | * get_policy() op must add reference [mpol_get()] to any policy at |
625 | * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure |
626 | * in mm/mempolicy.c will do this automatically. |
627 | * get_policy() must NOT add a ref if the policy at (vma,addr) is not |
628 | * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. |
629 | * If no [shared/vma] mempolicy exists at the addr, get_policy() op |
630 | * must return NULL--i.e., do not "fallback" to task or system default |
631 | * policy. |
632 | */ |
633 | struct mempolicy *(*get_policy)(struct vm_area_struct *vma, |
634 | unsigned long addr, pgoff_t *ilx); |
635 | #endif |
636 | /* |
637 | * Called by vm_normal_page() for special PTEs to find the |
638 | * page for @addr. This is useful if the default behavior |
639 | * (using pte_page()) would not find the correct page. |
640 | */ |
641 | struct page *(*find_special_page)(struct vm_area_struct *vma, |
642 | unsigned long addr); |
643 | }; |
644 | |
645 | #ifdef CONFIG_NUMA_BALANCING |
646 | static inline void vma_numab_state_init(struct vm_area_struct *vma) |
647 | { |
648 | vma->numab_state = NULL; |
649 | } |
650 | static inline void vma_numab_state_free(struct vm_area_struct *vma) |
651 | { |
652 | kfree(objp: vma->numab_state); |
653 | } |
654 | #else |
655 | static inline void vma_numab_state_init(struct vm_area_struct *vma) {} |
656 | static inline void vma_numab_state_free(struct vm_area_struct *vma) {} |
657 | #endif /* CONFIG_NUMA_BALANCING */ |
658 | |
659 | #ifdef CONFIG_PER_VMA_LOCK |
660 | /* |
661 | * Try to read-lock a vma. The function is allowed to occasionally yield false |
662 | * locked result to avoid performance overhead, in which case we fall back to |
663 | * using mmap_lock. The function should never yield false unlocked result. |
664 | */ |
665 | static inline bool vma_start_read(struct vm_area_struct *vma) |
666 | { |
667 | /* |
668 | * Check before locking. A race might cause false locked result. |
669 | * We can use READ_ONCE() for the mm_lock_seq here, and don't need |
670 | * ACQUIRE semantics, because this is just a lockless check whose result |
671 | * we don't rely on for anything - the mm_lock_seq read against which we |
672 | * need ordering is below. |
673 | */ |
674 | if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq)) |
675 | return false; |
676 | |
677 | if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) |
678 | return false; |
679 | |
680 | /* |
681 | * Overflow might produce false locked result. |
682 | * False unlocked result is impossible because we modify and check |
683 | * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq |
684 | * modification invalidates all existing locks. |
685 | * |
686 | * We must use ACQUIRE semantics for the mm_lock_seq so that if we are |
687 | * racing with vma_end_write_all(), we only start reading from the VMA |
688 | * after it has been unlocked. |
689 | * This pairs with RELEASE semantics in vma_end_write_all(). |
690 | */ |
691 | if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) { |
692 | up_read(sem: &vma->vm_lock->lock); |
693 | return false; |
694 | } |
695 | return true; |
696 | } |
697 | |
698 | static inline void vma_end_read(struct vm_area_struct *vma) |
699 | { |
700 | rcu_read_lock(); /* keeps vma alive till the end of up_read */ |
701 | up_read(sem: &vma->vm_lock->lock); |
702 | rcu_read_unlock(); |
703 | } |
704 | |
705 | /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ |
706 | static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) |
707 | { |
708 | mmap_assert_write_locked(mm: vma->vm_mm); |
709 | |
710 | /* |
711 | * current task is holding mmap_write_lock, both vma->vm_lock_seq and |
712 | * mm->mm_lock_seq can't be concurrently modified. |
713 | */ |
714 | *mm_lock_seq = vma->vm_mm->mm_lock_seq; |
715 | return (vma->vm_lock_seq == *mm_lock_seq); |
716 | } |
717 | |
718 | /* |
719 | * Begin writing to a VMA. |
720 | * Exclude concurrent readers under the per-VMA lock until the currently |
721 | * write-locked mmap_lock is dropped or downgraded. |
722 | */ |
723 | static inline void vma_start_write(struct vm_area_struct *vma) |
724 | { |
725 | int mm_lock_seq; |
726 | |
727 | if (__is_vma_write_locked(vma, mm_lock_seq: &mm_lock_seq)) |
728 | return; |
729 | |
730 | down_write(sem: &vma->vm_lock->lock); |
731 | /* |
732 | * We should use WRITE_ONCE() here because we can have concurrent reads |
733 | * from the early lockless pessimistic check in vma_start_read(). |
734 | * We don't really care about the correctness of that early check, but |
735 | * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. |
736 | */ |
737 | WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); |
738 | up_write(sem: &vma->vm_lock->lock); |
739 | } |
740 | |
741 | static inline void vma_assert_write_locked(struct vm_area_struct *vma) |
742 | { |
743 | int mm_lock_seq; |
744 | |
745 | VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); |
746 | } |
747 | |
748 | static inline void vma_assert_locked(struct vm_area_struct *vma) |
749 | { |
750 | if (!rwsem_is_locked(sem: &vma->vm_lock->lock)) |
751 | vma_assert_write_locked(vma); |
752 | } |
753 | |
754 | static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) |
755 | { |
756 | /* When detaching vma should be write-locked */ |
757 | if (detached) |
758 | vma_assert_write_locked(vma); |
759 | vma->detached = detached; |
760 | } |
761 | |
762 | static inline void release_fault_lock(struct vm_fault *vmf) |
763 | { |
764 | if (vmf->flags & FAULT_FLAG_VMA_LOCK) |
765 | vma_end_read(vma: vmf->vma); |
766 | else |
767 | mmap_read_unlock(mm: vmf->vma->vm_mm); |
768 | } |
769 | |
770 | static inline void assert_fault_locked(struct vm_fault *vmf) |
771 | { |
772 | if (vmf->flags & FAULT_FLAG_VMA_LOCK) |
773 | vma_assert_locked(vma: vmf->vma); |
774 | else |
775 | mmap_assert_locked(mm: vmf->vma->vm_mm); |
776 | } |
777 | |
778 | struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, |
779 | unsigned long address); |
780 | |
781 | #else /* CONFIG_PER_VMA_LOCK */ |
782 | |
783 | static inline bool vma_start_read(struct vm_area_struct *vma) |
784 | { return false; } |
785 | static inline void vma_end_read(struct vm_area_struct *vma) {} |
786 | static inline void vma_start_write(struct vm_area_struct *vma) {} |
787 | static inline void vma_assert_write_locked(struct vm_area_struct *vma) |
788 | { mmap_assert_write_locked(vma->vm_mm); } |
789 | static inline void vma_mark_detached(struct vm_area_struct *vma, |
790 | bool detached) {} |
791 | |
792 | static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, |
793 | unsigned long address) |
794 | { |
795 | return NULL; |
796 | } |
797 | |
798 | static inline void vma_assert_locked(struct vm_area_struct *vma) |
799 | { |
800 | mmap_assert_locked(vma->vm_mm); |
801 | } |
802 | |
803 | static inline void release_fault_lock(struct vm_fault *vmf) |
804 | { |
805 | mmap_read_unlock(vmf->vma->vm_mm); |
806 | } |
807 | |
808 | static inline void assert_fault_locked(struct vm_fault *vmf) |
809 | { |
810 | mmap_assert_locked(vmf->vma->vm_mm); |
811 | } |
812 | |
813 | #endif /* CONFIG_PER_VMA_LOCK */ |
814 | |
815 | extern const struct vm_operations_struct vma_dummy_vm_ops; |
816 | |
817 | /* |
818 | * WARNING: vma_init does not initialize vma->vm_lock. |
819 | * Use vm_area_alloc()/vm_area_free() if vma needs locking. |
820 | */ |
821 | static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) |
822 | { |
823 | memset(vma, 0, sizeof(*vma)); |
824 | vma->vm_mm = mm; |
825 | vma->vm_ops = &vma_dummy_vm_ops; |
826 | INIT_LIST_HEAD(list: &vma->anon_vma_chain); |
827 | vma_mark_detached(vma, detached: false); |
828 | vma_numab_state_init(vma); |
829 | } |
830 | |
831 | /* Use when VMA is not part of the VMA tree and needs no locking */ |
832 | static inline void vm_flags_init(struct vm_area_struct *vma, |
833 | vm_flags_t flags) |
834 | { |
835 | ACCESS_PRIVATE(vma, __vm_flags) = flags; |
836 | } |
837 | |
838 | /* |
839 | * Use when VMA is part of the VMA tree and modifications need coordination |
840 | * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and |
841 | * it should be locked explicitly beforehand. |
842 | */ |
843 | static inline void vm_flags_reset(struct vm_area_struct *vma, |
844 | vm_flags_t flags) |
845 | { |
846 | vma_assert_write_locked(vma); |
847 | vm_flags_init(vma, flags); |
848 | } |
849 | |
850 | static inline void vm_flags_reset_once(struct vm_area_struct *vma, |
851 | vm_flags_t flags) |
852 | { |
853 | vma_assert_write_locked(vma); |
854 | WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); |
855 | } |
856 | |
857 | static inline void vm_flags_set(struct vm_area_struct *vma, |
858 | vm_flags_t flags) |
859 | { |
860 | vma_start_write(vma); |
861 | ACCESS_PRIVATE(vma, __vm_flags) |= flags; |
862 | } |
863 | |
864 | static inline void vm_flags_clear(struct vm_area_struct *vma, |
865 | vm_flags_t flags) |
866 | { |
867 | vma_start_write(vma); |
868 | ACCESS_PRIVATE(vma, __vm_flags) &= ~flags; |
869 | } |
870 | |
871 | /* |
872 | * Use only if VMA is not part of the VMA tree or has no other users and |
873 | * therefore needs no locking. |
874 | */ |
875 | static inline void __vm_flags_mod(struct vm_area_struct *vma, |
876 | vm_flags_t set, vm_flags_t clear) |
877 | { |
878 | vm_flags_init(vma, flags: (vma->vm_flags | set) & ~clear); |
879 | } |
880 | |
881 | /* |
882 | * Use only when the order of set/clear operations is unimportant, otherwise |
883 | * use vm_flags_{set|clear} explicitly. |
884 | */ |
885 | static inline void vm_flags_mod(struct vm_area_struct *vma, |
886 | vm_flags_t set, vm_flags_t clear) |
887 | { |
888 | vma_start_write(vma); |
889 | __vm_flags_mod(vma, set, clear); |
890 | } |
891 | |
892 | static inline void vma_set_anonymous(struct vm_area_struct *vma) |
893 | { |
894 | vma->vm_ops = NULL; |
895 | } |
896 | |
897 | static inline bool vma_is_anonymous(struct vm_area_struct *vma) |
898 | { |
899 | return !vma->vm_ops; |
900 | } |
901 | |
902 | /* |
903 | * Indicate if the VMA is a heap for the given task; for |
904 | * /proc/PID/maps that is the heap of the main task. |
905 | */ |
906 | static inline bool vma_is_initial_heap(const struct vm_area_struct *vma) |
907 | { |
908 | return vma->vm_start < vma->vm_mm->brk && |
909 | vma->vm_end > vma->vm_mm->start_brk; |
910 | } |
911 | |
912 | /* |
913 | * Indicate if the VMA is a stack for the given task; for |
914 | * /proc/PID/maps that is the stack of the main task. |
915 | */ |
916 | static inline bool vma_is_initial_stack(const struct vm_area_struct *vma) |
917 | { |
918 | /* |
919 | * We make no effort to guess what a given thread considers to be |
920 | * its "stack". It's not even well-defined for programs written |
921 | * languages like Go. |
922 | */ |
923 | return vma->vm_start <= vma->vm_mm->start_stack && |
924 | vma->vm_end >= vma->vm_mm->start_stack; |
925 | } |
926 | |
927 | static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) |
928 | { |
929 | int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); |
930 | |
931 | if (!maybe_stack) |
932 | return false; |
933 | |
934 | if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == |
935 | VM_STACK_INCOMPLETE_SETUP) |
936 | return true; |
937 | |
938 | return false; |
939 | } |
940 | |
941 | static inline bool vma_is_foreign(struct vm_area_struct *vma) |
942 | { |
943 | if (!current->mm) |
944 | return true; |
945 | |
946 | if (current->mm != vma->vm_mm) |
947 | return true; |
948 | |
949 | return false; |
950 | } |
951 | |
952 | static inline bool vma_is_accessible(struct vm_area_struct *vma) |
953 | { |
954 | return vma->vm_flags & VM_ACCESS_FLAGS; |
955 | } |
956 | |
957 | static inline bool is_shared_maywrite(vm_flags_t vm_flags) |
958 | { |
959 | return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == |
960 | (VM_SHARED | VM_MAYWRITE); |
961 | } |
962 | |
963 | static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) |
964 | { |
965 | return is_shared_maywrite(vm_flags: vma->vm_flags); |
966 | } |
967 | |
968 | static inline |
969 | struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) |
970 | { |
971 | return mas_find(mas: &vmi->mas, max: max - 1); |
972 | } |
973 | |
974 | static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) |
975 | { |
976 | /* |
977 | * Uses mas_find() to get the first VMA when the iterator starts. |
978 | * Calling mas_next() could skip the first entry. |
979 | */ |
980 | return mas_find(mas: &vmi->mas, ULONG_MAX); |
981 | } |
982 | |
983 | static inline |
984 | struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi) |
985 | { |
986 | return mas_next_range(mas: &vmi->mas, ULONG_MAX); |
987 | } |
988 | |
989 | |
990 | static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) |
991 | { |
992 | return mas_prev(mas: &vmi->mas, min: 0); |
993 | } |
994 | |
995 | static inline |
996 | struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) |
997 | { |
998 | return mas_prev_range(mas: &vmi->mas, max: 0); |
999 | } |
1000 | |
1001 | static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) |
1002 | { |
1003 | return vmi->mas.index; |
1004 | } |
1005 | |
1006 | static inline unsigned long vma_iter_end(struct vma_iterator *vmi) |
1007 | { |
1008 | return vmi->mas.last + 1; |
1009 | } |
1010 | static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, |
1011 | unsigned long count) |
1012 | { |
1013 | return mas_expected_entries(mas: &vmi->mas, nr_entries: count); |
1014 | } |
1015 | |
1016 | static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, |
1017 | unsigned long start, unsigned long end, gfp_t gfp) |
1018 | { |
1019 | __mas_set_range(mas: &vmi->mas, start, last: end - 1); |
1020 | mas_store_gfp(mas: &vmi->mas, NULL, gfp); |
1021 | if (unlikely(mas_is_err(&vmi->mas))) |
1022 | return -ENOMEM; |
1023 | |
1024 | return 0; |
1025 | } |
1026 | |
1027 | /* Free any unused preallocations */ |
1028 | static inline void vma_iter_free(struct vma_iterator *vmi) |
1029 | { |
1030 | mas_destroy(mas: &vmi->mas); |
1031 | } |
1032 | |
1033 | static inline int vma_iter_bulk_store(struct vma_iterator *vmi, |
1034 | struct vm_area_struct *vma) |
1035 | { |
1036 | vmi->mas.index = vma->vm_start; |
1037 | vmi->mas.last = vma->vm_end - 1; |
1038 | mas_store(mas: &vmi->mas, entry: vma); |
1039 | if (unlikely(mas_is_err(&vmi->mas))) |
1040 | return -ENOMEM; |
1041 | |
1042 | return 0; |
1043 | } |
1044 | |
1045 | static inline void vma_iter_invalidate(struct vma_iterator *vmi) |
1046 | { |
1047 | mas_pause(mas: &vmi->mas); |
1048 | } |
1049 | |
1050 | static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) |
1051 | { |
1052 | mas_set(mas: &vmi->mas, index: addr); |
1053 | } |
1054 | |
1055 | #define for_each_vma(__vmi, __vma) \ |
1056 | while (((__vma) = vma_next(&(__vmi))) != NULL) |
1057 | |
1058 | /* The MM code likes to work with exclusive end addresses */ |
1059 | #define for_each_vma_range(__vmi, __vma, __end) \ |
1060 | while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) |
1061 | |
1062 | #ifdef CONFIG_SHMEM |
1063 | /* |
1064 | * The vma_is_shmem is not inline because it is used only by slow |
1065 | * paths in userfault. |
1066 | */ |
1067 | bool vma_is_shmem(struct vm_area_struct *vma); |
1068 | bool vma_is_anon_shmem(struct vm_area_struct *vma); |
1069 | #else |
1070 | static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } |
1071 | static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; } |
1072 | #endif |
1073 | |
1074 | int vma_is_stack_for_current(struct vm_area_struct *vma); |
1075 | |
1076 | /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ |
1077 | #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } |
1078 | |
1079 | struct mmu_gather; |
1080 | struct inode; |
1081 | |
1082 | /* |
1083 | * compound_order() can be called without holding a reference, which means |
1084 | * that niceties like page_folio() don't work. These callers should be |
1085 | * prepared to handle wild return values. For example, PG_head may be |
1086 | * set before the order is initialised, or this may be a tail page. |
1087 | * See compaction.c for some good examples. |
1088 | */ |
1089 | static inline unsigned int compound_order(struct page *page) |
1090 | { |
1091 | struct folio *folio = (struct folio *)page; |
1092 | |
1093 | if (!test_bit(PG_head, &folio->flags)) |
1094 | return 0; |
1095 | return folio->_flags_1 & 0xff; |
1096 | } |
1097 | |
1098 | /** |
1099 | * folio_order - The allocation order of a folio. |
1100 | * @folio: The folio. |
1101 | * |
1102 | * A folio is composed of 2^order pages. See get_order() for the definition |
1103 | * of order. |
1104 | * |
1105 | * Return: The order of the folio. |
1106 | */ |
1107 | static inline unsigned int folio_order(struct folio *folio) |
1108 | { |
1109 | if (!folio_test_large(folio)) |
1110 | return 0; |
1111 | return folio->_flags_1 & 0xff; |
1112 | } |
1113 | |
1114 | #include <linux/huge_mm.h> |
1115 | |
1116 | /* |
1117 | * Methods to modify the page usage count. |
1118 | * |
1119 | * What counts for a page usage: |
1120 | * - cache mapping (page->mapping) |
1121 | * - private data (page->private) |
1122 | * - page mapped in a task's page tables, each mapping |
1123 | * is counted separately |
1124 | * |
1125 | * Also, many kernel routines increase the page count before a critical |
1126 | * routine so they can be sure the page doesn't go away from under them. |
1127 | */ |
1128 | |
1129 | /* |
1130 | * Drop a ref, return true if the refcount fell to zero (the page has no users) |
1131 | */ |
1132 | static inline int put_page_testzero(struct page *page) |
1133 | { |
1134 | VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); |
1135 | return page_ref_dec_and_test(page); |
1136 | } |
1137 | |
1138 | static inline int folio_put_testzero(struct folio *folio) |
1139 | { |
1140 | return put_page_testzero(page: &folio->page); |
1141 | } |
1142 | |
1143 | /* |
1144 | * Try to grab a ref unless the page has a refcount of zero, return false if |
1145 | * that is the case. |
1146 | * This can be called when MMU is off so it must not access |
1147 | * any of the virtual mappings. |
1148 | */ |
1149 | static inline bool get_page_unless_zero(struct page *page) |
1150 | { |
1151 | return page_ref_add_unless(page, nr: 1, u: 0); |
1152 | } |
1153 | |
1154 | static inline struct folio *folio_get_nontail_page(struct page *page) |
1155 | { |
1156 | if (unlikely(!get_page_unless_zero(page))) |
1157 | return NULL; |
1158 | return (struct folio *)page; |
1159 | } |
1160 | |
1161 | extern int page_is_ram(unsigned long pfn); |
1162 | |
1163 | enum { |
1164 | REGION_INTERSECTS, |
1165 | REGION_DISJOINT, |
1166 | REGION_MIXED, |
1167 | }; |
1168 | |
1169 | int region_intersects(resource_size_t offset, size_t size, unsigned long flags, |
1170 | unsigned long desc); |
1171 | |
1172 | /* Support for virtually mapped pages */ |
1173 | struct page *vmalloc_to_page(const void *addr); |
1174 | unsigned long vmalloc_to_pfn(const void *addr); |
1175 | |
1176 | /* |
1177 | * Determine if an address is within the vmalloc range |
1178 | * |
1179 | * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there |
1180 | * is no special casing required. |
1181 | */ |
1182 | #ifdef CONFIG_MMU |
1183 | extern bool is_vmalloc_addr(const void *x); |
1184 | extern int is_vmalloc_or_module_addr(const void *x); |
1185 | #else |
1186 | static inline bool is_vmalloc_addr(const void *x) |
1187 | { |
1188 | return false; |
1189 | } |
1190 | static inline int is_vmalloc_or_module_addr(const void *x) |
1191 | { |
1192 | return 0; |
1193 | } |
1194 | #endif |
1195 | |
1196 | /* |
1197 | * How many times the entire folio is mapped as a single unit (eg by a |
1198 | * PMD or PUD entry). This is probably not what you want, except for |
1199 | * debugging purposes - it does not include PTE-mapped sub-pages; look |
1200 | * at folio_mapcount() or page_mapcount() instead. |
1201 | */ |
1202 | static inline int folio_entire_mapcount(struct folio *folio) |
1203 | { |
1204 | VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); |
1205 | return atomic_read(v: &folio->_entire_mapcount) + 1; |
1206 | } |
1207 | |
1208 | /* |
1209 | * The atomic page->_mapcount, starts from -1: so that transitions |
1210 | * both from it and to it can be tracked, using atomic_inc_and_test |
1211 | * and atomic_add_negative(-1). |
1212 | */ |
1213 | static inline void page_mapcount_reset(struct page *page) |
1214 | { |
1215 | atomic_set(v: &(page)->_mapcount, i: -1); |
1216 | } |
1217 | |
1218 | /** |
1219 | * page_mapcount() - Number of times this precise page is mapped. |
1220 | * @page: The page. |
1221 | * |
1222 | * The number of times this page is mapped. If this page is part of |
1223 | * a large folio, it includes the number of times this page is mapped |
1224 | * as part of that folio. |
1225 | * |
1226 | * The result is undefined for pages which cannot be mapped into userspace. |
1227 | * For example SLAB or special types of pages. See function page_has_type(). |
1228 | * They use this field in struct page differently. |
1229 | */ |
1230 | static inline int page_mapcount(struct page *page) |
1231 | { |
1232 | int mapcount = atomic_read(v: &page->_mapcount) + 1; |
1233 | |
1234 | if (unlikely(PageCompound(page))) |
1235 | mapcount += folio_entire_mapcount(page_folio(page)); |
1236 | |
1237 | return mapcount; |
1238 | } |
1239 | |
1240 | int folio_total_mapcount(struct folio *folio); |
1241 | |
1242 | /** |
1243 | * folio_mapcount() - Calculate the number of mappings of this folio. |
1244 | * @folio: The folio. |
1245 | * |
1246 | * A large folio tracks both how many times the entire folio is mapped, |
1247 | * and how many times each individual page in the folio is mapped. |
1248 | * This function calculates the total number of times the folio is |
1249 | * mapped. |
1250 | * |
1251 | * Return: The number of times this folio is mapped. |
1252 | */ |
1253 | static inline int folio_mapcount(struct folio *folio) |
1254 | { |
1255 | if (likely(!folio_test_large(folio))) |
1256 | return atomic_read(v: &folio->_mapcount) + 1; |
1257 | return folio_total_mapcount(folio); |
1258 | } |
1259 | |
1260 | static inline bool folio_large_is_mapped(struct folio *folio) |
1261 | { |
1262 | /* |
1263 | * Reading _entire_mapcount below could be omitted if hugetlb |
1264 | * participated in incrementing nr_pages_mapped when compound mapped. |
1265 | */ |
1266 | return atomic_read(v: &folio->_nr_pages_mapped) > 0 || |
1267 | atomic_read(v: &folio->_entire_mapcount) >= 0; |
1268 | } |
1269 | |
1270 | /** |
1271 | * folio_mapped - Is this folio mapped into userspace? |
1272 | * @folio: The folio. |
1273 | * |
1274 | * Return: True if any page in this folio is referenced by user page tables. |
1275 | */ |
1276 | static inline bool folio_mapped(struct folio *folio) |
1277 | { |
1278 | if (likely(!folio_test_large(folio))) |
1279 | return atomic_read(v: &folio->_mapcount) >= 0; |
1280 | return folio_large_is_mapped(folio); |
1281 | } |
1282 | |
1283 | /* |
1284 | * Return true if this page is mapped into pagetables. |
1285 | * For compound page it returns true if any sub-page of compound page is mapped, |
1286 | * even if this particular sub-page is not itself mapped by any PTE or PMD. |
1287 | */ |
1288 | static inline bool page_mapped(struct page *page) |
1289 | { |
1290 | if (likely(!PageCompound(page))) |
1291 | return atomic_read(v: &page->_mapcount) >= 0; |
1292 | return folio_large_is_mapped(page_folio(page)); |
1293 | } |
1294 | |
1295 | static inline struct page *virt_to_head_page(const void *x) |
1296 | { |
1297 | struct page *page = virt_to_page(x); |
1298 | |
1299 | return compound_head(page); |
1300 | } |
1301 | |
1302 | static inline struct folio *virt_to_folio(const void *x) |
1303 | { |
1304 | struct page *page = virt_to_page(x); |
1305 | |
1306 | return page_folio(page); |
1307 | } |
1308 | |
1309 | void __folio_put(struct folio *folio); |
1310 | |
1311 | void put_pages_list(struct list_head *pages); |
1312 | |
1313 | void split_page(struct page *page, unsigned int order); |
1314 | void folio_copy(struct folio *dst, struct folio *src); |
1315 | |
1316 | unsigned long nr_free_buffer_pages(void); |
1317 | |
1318 | void destroy_large_folio(struct folio *folio); |
1319 | |
1320 | /* Returns the number of bytes in this potentially compound page. */ |
1321 | static inline unsigned long page_size(struct page *page) |
1322 | { |
1323 | return PAGE_SIZE << compound_order(page); |
1324 | } |
1325 | |
1326 | /* Returns the number of bits needed for the number of bytes in a page */ |
1327 | static inline unsigned int page_shift(struct page *page) |
1328 | { |
1329 | return PAGE_SHIFT + compound_order(page); |
1330 | } |
1331 | |
1332 | /** |
1333 | * thp_order - Order of a transparent huge page. |
1334 | * @page: Head page of a transparent huge page. |
1335 | */ |
1336 | static inline unsigned int thp_order(struct page *page) |
1337 | { |
1338 | VM_BUG_ON_PGFLAGS(PageTail(page), page); |
1339 | return compound_order(page); |
1340 | } |
1341 | |
1342 | /** |
1343 | * thp_size - Size of a transparent huge page. |
1344 | * @page: Head page of a transparent huge page. |
1345 | * |
1346 | * Return: Number of bytes in this page. |
1347 | */ |
1348 | static inline unsigned long thp_size(struct page *page) |
1349 | { |
1350 | return PAGE_SIZE << thp_order(page); |
1351 | } |
1352 | |
1353 | #ifdef CONFIG_MMU |
1354 | /* |
1355 | * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when |
1356 | * servicing faults for write access. In the normal case, do always want |
1357 | * pte_mkwrite. But get_user_pages can cause write faults for mappings |
1358 | * that do not have writing enabled, when used by access_process_vm. |
1359 | */ |
1360 | static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) |
1361 | { |
1362 | if (likely(vma->vm_flags & VM_WRITE)) |
1363 | pte = pte_mkwrite(pte, vma); |
1364 | return pte; |
1365 | } |
1366 | |
1367 | vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); |
1368 | void set_pte_range(struct vm_fault *vmf, struct folio *folio, |
1369 | struct page *page, unsigned int nr, unsigned long addr); |
1370 | |
1371 | vm_fault_t finish_fault(struct vm_fault *vmf); |
1372 | #endif |
1373 | |
1374 | /* |
1375 | * Multiple processes may "see" the same page. E.g. for untouched |
1376 | * mappings of /dev/null, all processes see the same page full of |
1377 | * zeroes, and text pages of executables and shared libraries have |
1378 | * only one copy in memory, at most, normally. |
1379 | * |
1380 | * For the non-reserved pages, page_count(page) denotes a reference count. |
1381 | * page_count() == 0 means the page is free. page->lru is then used for |
1382 | * freelist management in the buddy allocator. |
1383 | * page_count() > 0 means the page has been allocated. |
1384 | * |
1385 | * Pages are allocated by the slab allocator in order to provide memory |
1386 | * to kmalloc and kmem_cache_alloc. In this case, the management of the |
1387 | * page, and the fields in 'struct page' are the responsibility of mm/slab.c |
1388 | * unless a particular usage is carefully commented. (the responsibility of |
1389 | * freeing the kmalloc memory is the caller's, of course). |
1390 | * |
1391 | * A page may be used by anyone else who does a __get_free_page(). |
1392 | * In this case, page_count still tracks the references, and should only |
1393 | * be used through the normal accessor functions. The top bits of page->flags |
1394 | * and page->virtual store page management information, but all other fields |
1395 | * are unused and could be used privately, carefully. The management of this |
1396 | * page is the responsibility of the one who allocated it, and those who have |
1397 | * subsequently been given references to it. |
1398 | * |
1399 | * The other pages (we may call them "pagecache pages") are completely |
1400 | * managed by the Linux memory manager: I/O, buffers, swapping etc. |
1401 | * The following discussion applies only to them. |
1402 | * |
1403 | * A pagecache page contains an opaque `private' member, which belongs to the |
1404 | * page's address_space. Usually, this is the address of a circular list of |
1405 | * the page's disk buffers. PG_private must be set to tell the VM to call |
1406 | * into the filesystem to release these pages. |
1407 | * |
1408 | * A page may belong to an inode's memory mapping. In this case, page->mapping |
1409 | * is the pointer to the inode, and page->index is the file offset of the page, |
1410 | * in units of PAGE_SIZE. |
1411 | * |
1412 | * If pagecache pages are not associated with an inode, they are said to be |
1413 | * anonymous pages. These may become associated with the swapcache, and in that |
1414 | * case PG_swapcache is set, and page->private is an offset into the swapcache. |
1415 | * |
1416 | * In either case (swapcache or inode backed), the pagecache itself holds one |
1417 | * reference to the page. Setting PG_private should also increment the |
1418 | * refcount. The each user mapping also has a reference to the page. |
1419 | * |
1420 | * The pagecache pages are stored in a per-mapping radix tree, which is |
1421 | * rooted at mapping->i_pages, and indexed by offset. |
1422 | * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space |
1423 | * lists, we instead now tag pages as dirty/writeback in the radix tree. |
1424 | * |
1425 | * All pagecache pages may be subject to I/O: |
1426 | * - inode pages may need to be read from disk, |
1427 | * - inode pages which have been modified and are MAP_SHARED may need |
1428 | * to be written back to the inode on disk, |
1429 | * - anonymous pages (including MAP_PRIVATE file mappings) which have been |
1430 | * modified may need to be swapped out to swap space and (later) to be read |
1431 | * back into memory. |
1432 | */ |
1433 | |
1434 | #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) |
1435 | DECLARE_STATIC_KEY_FALSE(devmap_managed_key); |
1436 | |
1437 | bool __put_devmap_managed_page_refs(struct page *page, int refs); |
1438 | static inline bool put_devmap_managed_page_refs(struct page *page, int refs) |
1439 | { |
1440 | if (!static_branch_unlikely(&devmap_managed_key)) |
1441 | return false; |
1442 | if (!is_zone_device_page(page)) |
1443 | return false; |
1444 | return __put_devmap_managed_page_refs(page, refs); |
1445 | } |
1446 | #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ |
1447 | static inline bool put_devmap_managed_page_refs(struct page *page, int refs) |
1448 | { |
1449 | return false; |
1450 | } |
1451 | #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ |
1452 | |
1453 | static inline bool put_devmap_managed_page(struct page *page) |
1454 | { |
1455 | return put_devmap_managed_page_refs(page, refs: 1); |
1456 | } |
1457 | |
1458 | /* 127: arbitrary random number, small enough to assemble well */ |
1459 | #define folio_ref_zero_or_close_to_overflow(folio) \ |
1460 | ((unsigned int) folio_ref_count(folio) + 127u <= 127u) |
1461 | |
1462 | /** |
1463 | * folio_get - Increment the reference count on a folio. |
1464 | * @folio: The folio. |
1465 | * |
1466 | * Context: May be called in any context, as long as you know that |
1467 | * you have a refcount on the folio. If you do not already have one, |
1468 | * folio_try_get() may be the right interface for you to use. |
1469 | */ |
1470 | static inline void folio_get(struct folio *folio) |
1471 | { |
1472 | VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio); |
1473 | folio_ref_inc(folio); |
1474 | } |
1475 | |
1476 | static inline void get_page(struct page *page) |
1477 | { |
1478 | folio_get(page_folio(page)); |
1479 | } |
1480 | |
1481 | static inline __must_check bool try_get_page(struct page *page) |
1482 | { |
1483 | page = compound_head(page); |
1484 | if (WARN_ON_ONCE(page_ref_count(page) <= 0)) |
1485 | return false; |
1486 | page_ref_inc(page); |
1487 | return true; |
1488 | } |
1489 | |
1490 | /** |
1491 | * folio_put - Decrement the reference count on a folio. |
1492 | * @folio: The folio. |
1493 | * |
1494 | * If the folio's reference count reaches zero, the memory will be |
1495 | * released back to the page allocator and may be used by another |
1496 | * allocation immediately. Do not access the memory or the struct folio |
1497 | * after calling folio_put() unless you can be sure that it wasn't the |
1498 | * last reference. |
1499 | * |
1500 | * Context: May be called in process or interrupt context, but not in NMI |
1501 | * context. May be called while holding a spinlock. |
1502 | */ |
1503 | static inline void folio_put(struct folio *folio) |
1504 | { |
1505 | if (folio_put_testzero(folio)) |
1506 | __folio_put(folio); |
1507 | } |
1508 | |
1509 | /** |
1510 | * folio_put_refs - Reduce the reference count on a folio. |
1511 | * @folio: The folio. |
1512 | * @refs: The amount to subtract from the folio's reference count. |
1513 | * |
1514 | * If the folio's reference count reaches zero, the memory will be |
1515 | * released back to the page allocator and may be used by another |
1516 | * allocation immediately. Do not access the memory or the struct folio |
1517 | * after calling folio_put_refs() unless you can be sure that these weren't |
1518 | * the last references. |
1519 | * |
1520 | * Context: May be called in process or interrupt context, but not in NMI |
1521 | * context. May be called while holding a spinlock. |
1522 | */ |
1523 | static inline void folio_put_refs(struct folio *folio, int refs) |
1524 | { |
1525 | if (folio_ref_sub_and_test(folio, nr: refs)) |
1526 | __folio_put(folio); |
1527 | } |
1528 | |
1529 | void folios_put_refs(struct folio_batch *folios, unsigned int *refs); |
1530 | |
1531 | /* |
1532 | * union release_pages_arg - an array of pages or folios |
1533 | * |
1534 | * release_pages() releases a simple array of multiple pages, and |
1535 | * accepts various different forms of said page array: either |
1536 | * a regular old boring array of pages, an array of folios, or |
1537 | * an array of encoded page pointers. |
1538 | * |
1539 | * The transparent union syntax for this kind of "any of these |
1540 | * argument types" is all kinds of ugly, so look away. |
1541 | */ |
1542 | typedef union { |
1543 | struct page **pages; |
1544 | struct folio **folios; |
1545 | struct encoded_page **encoded_pages; |
1546 | } release_pages_arg __attribute__ ((__transparent_union__)); |
1547 | |
1548 | void release_pages(release_pages_arg, int nr); |
1549 | |
1550 | /** |
1551 | * folios_put - Decrement the reference count on an array of folios. |
1552 | * @folios: The folios. |
1553 | * |
1554 | * Like folio_put(), but for a batch of folios. This is more efficient |
1555 | * than writing the loop yourself as it will optimise the locks which need |
1556 | * to be taken if the folios are freed. The folios batch is returned |
1557 | * empty and ready to be reused for another batch; there is no need to |
1558 | * reinitialise it. |
1559 | * |
1560 | * Context: May be called in process or interrupt context, but not in NMI |
1561 | * context. May be called while holding a spinlock. |
1562 | */ |
1563 | static inline void folios_put(struct folio_batch *folios) |
1564 | { |
1565 | folios_put_refs(folios, NULL); |
1566 | } |
1567 | |
1568 | static inline void put_page(struct page *page) |
1569 | { |
1570 | struct folio *folio = page_folio(page); |
1571 | |
1572 | /* |
1573 | * For some devmap managed pages we need to catch refcount transition |
1574 | * from 2 to 1: |
1575 | */ |
1576 | if (put_devmap_managed_page(page: &folio->page)) |
1577 | return; |
1578 | folio_put(folio); |
1579 | } |
1580 | |
1581 | /* |
1582 | * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload |
1583 | * the page's refcount so that two separate items are tracked: the original page |
1584 | * reference count, and also a new count of how many pin_user_pages() calls were |
1585 | * made against the page. ("gup-pinned" is another term for the latter). |
1586 | * |
1587 | * With this scheme, pin_user_pages() becomes special: such pages are marked as |
1588 | * distinct from normal pages. As such, the unpin_user_page() call (and its |
1589 | * variants) must be used in order to release gup-pinned pages. |
1590 | * |
1591 | * Choice of value: |
1592 | * |
1593 | * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference |
1594 | * counts with respect to pin_user_pages() and unpin_user_page() becomes |
1595 | * simpler, due to the fact that adding an even power of two to the page |
1596 | * refcount has the effect of using only the upper N bits, for the code that |
1597 | * counts up using the bias value. This means that the lower bits are left for |
1598 | * the exclusive use of the original code that increments and decrements by one |
1599 | * (or at least, by much smaller values than the bias value). |
1600 | * |
1601 | * Of course, once the lower bits overflow into the upper bits (and this is |
1602 | * OK, because subtraction recovers the original values), then visual inspection |
1603 | * no longer suffices to directly view the separate counts. However, for normal |
1604 | * applications that don't have huge page reference counts, this won't be an |
1605 | * issue. |
1606 | * |
1607 | * Locking: the lockless algorithm described in folio_try_get_rcu() |
1608 | * provides safe operation for get_user_pages(), page_mkclean() and |
1609 | * other calls that race to set up page table entries. |
1610 | */ |
1611 | #define GUP_PIN_COUNTING_BIAS (1U << 10) |
1612 | |
1613 | void unpin_user_page(struct page *page); |
1614 | void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, |
1615 | bool make_dirty); |
1616 | void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, |
1617 | bool make_dirty); |
1618 | void unpin_user_pages(struct page **pages, unsigned long npages); |
1619 | |
1620 | static inline bool is_cow_mapping(vm_flags_t flags) |
1621 | { |
1622 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; |
1623 | } |
1624 | |
1625 | #ifndef CONFIG_MMU |
1626 | static inline bool is_nommu_shared_mapping(vm_flags_t flags) |
1627 | { |
1628 | /* |
1629 | * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected |
1630 | * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of |
1631 | * a file mapping. R/O MAP_PRIVATE mappings might still modify |
1632 | * underlying memory if ptrace is active, so this is only possible if |
1633 | * ptrace does not apply. Note that there is no mprotect() to upgrade |
1634 | * write permissions later. |
1635 | */ |
1636 | return flags & (VM_MAYSHARE | VM_MAYOVERLAY); |
1637 | } |
1638 | #endif |
1639 | |
1640 | #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) |
1641 | #define SECTION_IN_PAGE_FLAGS |
1642 | #endif |
1643 | |
1644 | /* |
1645 | * The identification function is mainly used by the buddy allocator for |
1646 | * determining if two pages could be buddies. We are not really identifying |
1647 | * the zone since we could be using the section number id if we do not have |
1648 | * node id available in page flags. |
1649 | * We only guarantee that it will return the same value for two combinable |
1650 | * pages in a zone. |
1651 | */ |
1652 | static inline int page_zone_id(struct page *page) |
1653 | { |
1654 | return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; |
1655 | } |
1656 | |
1657 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
1658 | int page_to_nid(const struct page *page); |
1659 | #else |
1660 | static inline int page_to_nid(const struct page *page) |
1661 | { |
1662 | return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK; |
1663 | } |
1664 | #endif |
1665 | |
1666 | static inline int folio_nid(const struct folio *folio) |
1667 | { |
1668 | return page_to_nid(page: &folio->page); |
1669 | } |
1670 | |
1671 | #ifdef CONFIG_NUMA_BALANCING |
1672 | /* page access time bits needs to hold at least 4 seconds */ |
1673 | #define PAGE_ACCESS_TIME_MIN_BITS 12 |
1674 | #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS |
1675 | #define PAGE_ACCESS_TIME_BUCKETS \ |
1676 | (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT) |
1677 | #else |
1678 | #define PAGE_ACCESS_TIME_BUCKETS 0 |
1679 | #endif |
1680 | |
1681 | #define PAGE_ACCESS_TIME_MASK \ |
1682 | (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS) |
1683 | |
1684 | static inline int cpu_pid_to_cpupid(int cpu, int pid) |
1685 | { |
1686 | return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); |
1687 | } |
1688 | |
1689 | static inline int cpupid_to_pid(int cpupid) |
1690 | { |
1691 | return cpupid & LAST__PID_MASK; |
1692 | } |
1693 | |
1694 | static inline int cpupid_to_cpu(int cpupid) |
1695 | { |
1696 | return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; |
1697 | } |
1698 | |
1699 | static inline int cpupid_to_nid(int cpupid) |
1700 | { |
1701 | return cpu_to_node(cpu: cpupid_to_cpu(cpupid)); |
1702 | } |
1703 | |
1704 | static inline bool cpupid_pid_unset(int cpupid) |
1705 | { |
1706 | return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); |
1707 | } |
1708 | |
1709 | static inline bool cpupid_cpu_unset(int cpupid) |
1710 | { |
1711 | return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); |
1712 | } |
1713 | |
1714 | static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) |
1715 | { |
1716 | return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); |
1717 | } |
1718 | |
1719 | #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) |
1720 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
1721 | static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) |
1722 | { |
1723 | return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK); |
1724 | } |
1725 | |
1726 | static inline int folio_last_cpupid(struct folio *folio) |
1727 | { |
1728 | return folio->_last_cpupid; |
1729 | } |
1730 | static inline void page_cpupid_reset_last(struct page *page) |
1731 | { |
1732 | page->_last_cpupid = -1 & LAST_CPUPID_MASK; |
1733 | } |
1734 | #else |
1735 | static inline int folio_last_cpupid(struct folio *folio) |
1736 | { |
1737 | return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; |
1738 | } |
1739 | |
1740 | int folio_xchg_last_cpupid(struct folio *folio, int cpupid); |
1741 | |
1742 | static inline void page_cpupid_reset_last(struct page *page) |
1743 | { |
1744 | page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; |
1745 | } |
1746 | #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ |
1747 | |
1748 | static inline int folio_xchg_access_time(struct folio *folio, int time) |
1749 | { |
1750 | int last_time; |
1751 | |
1752 | last_time = folio_xchg_last_cpupid(folio, |
1753 | time >> PAGE_ACCESS_TIME_BUCKETS); |
1754 | return last_time << PAGE_ACCESS_TIME_BUCKETS; |
1755 | } |
1756 | |
1757 | static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) |
1758 | { |
1759 | unsigned int pid_bit; |
1760 | |
1761 | pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG)); |
1762 | if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) { |
1763 | __set_bit(pid_bit, &vma->numab_state->pids_active[1]); |
1764 | } |
1765 | } |
1766 | #else /* !CONFIG_NUMA_BALANCING */ |
1767 | static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) |
1768 | { |
1769 | return folio_nid(folio); /* XXX */ |
1770 | } |
1771 | |
1772 | static inline int folio_xchg_access_time(struct folio *folio, int time) |
1773 | { |
1774 | return 0; |
1775 | } |
1776 | |
1777 | static inline int folio_last_cpupid(struct folio *folio) |
1778 | { |
1779 | return folio_nid(folio); /* XXX */ |
1780 | } |
1781 | |
1782 | static inline int cpupid_to_nid(int cpupid) |
1783 | { |
1784 | return -1; |
1785 | } |
1786 | |
1787 | static inline int cpupid_to_pid(int cpupid) |
1788 | { |
1789 | return -1; |
1790 | } |
1791 | |
1792 | static inline int cpupid_to_cpu(int cpupid) |
1793 | { |
1794 | return -1; |
1795 | } |
1796 | |
1797 | static inline int cpu_pid_to_cpupid(int nid, int pid) |
1798 | { |
1799 | return -1; |
1800 | } |
1801 | |
1802 | static inline bool cpupid_pid_unset(int cpupid) |
1803 | { |
1804 | return true; |
1805 | } |
1806 | |
1807 | static inline void page_cpupid_reset_last(struct page *page) |
1808 | { |
1809 | } |
1810 | |
1811 | static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) |
1812 | { |
1813 | return false; |
1814 | } |
1815 | |
1816 | static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) |
1817 | { |
1818 | } |
1819 | #endif /* CONFIG_NUMA_BALANCING */ |
1820 | |
1821 | #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) |
1822 | |
1823 | /* |
1824 | * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid |
1825 | * setting tags for all pages to native kernel tag value 0xff, as the default |
1826 | * value 0x00 maps to 0xff. |
1827 | */ |
1828 | |
1829 | static inline u8 page_kasan_tag(const struct page *page) |
1830 | { |
1831 | u8 tag = KASAN_TAG_KERNEL; |
1832 | |
1833 | if (kasan_enabled()) { |
1834 | tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; |
1835 | tag ^= 0xff; |
1836 | } |
1837 | |
1838 | return tag; |
1839 | } |
1840 | |
1841 | static inline void page_kasan_tag_set(struct page *page, u8 tag) |
1842 | { |
1843 | unsigned long old_flags, flags; |
1844 | |
1845 | if (!kasan_enabled()) |
1846 | return; |
1847 | |
1848 | tag ^= 0xff; |
1849 | old_flags = READ_ONCE(page->flags); |
1850 | do { |
1851 | flags = old_flags; |
1852 | flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); |
1853 | flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; |
1854 | } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); |
1855 | } |
1856 | |
1857 | static inline void page_kasan_tag_reset(struct page *page) |
1858 | { |
1859 | if (kasan_enabled()) |
1860 | page_kasan_tag_set(page, KASAN_TAG_KERNEL); |
1861 | } |
1862 | |
1863 | #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ |
1864 | |
1865 | static inline u8 page_kasan_tag(const struct page *page) |
1866 | { |
1867 | return 0xff; |
1868 | } |
1869 | |
1870 | static inline void page_kasan_tag_set(struct page *page, u8 tag) { } |
1871 | static inline void page_kasan_tag_reset(struct page *page) { } |
1872 | |
1873 | #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ |
1874 | |
1875 | static inline struct zone *page_zone(const struct page *page) |
1876 | { |
1877 | return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; |
1878 | } |
1879 | |
1880 | static inline pg_data_t *page_pgdat(const struct page *page) |
1881 | { |
1882 | return NODE_DATA(page_to_nid(page)); |
1883 | } |
1884 | |
1885 | static inline struct zone *folio_zone(const struct folio *folio) |
1886 | { |
1887 | return page_zone(page: &folio->page); |
1888 | } |
1889 | |
1890 | static inline pg_data_t *folio_pgdat(const struct folio *folio) |
1891 | { |
1892 | return page_pgdat(page: &folio->page); |
1893 | } |
1894 | |
1895 | #ifdef SECTION_IN_PAGE_FLAGS |
1896 | static inline void set_page_section(struct page *page, unsigned long section) |
1897 | { |
1898 | page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); |
1899 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; |
1900 | } |
1901 | |
1902 | static inline unsigned long page_to_section(const struct page *page) |
1903 | { |
1904 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; |
1905 | } |
1906 | #endif |
1907 | |
1908 | /** |
1909 | * folio_pfn - Return the Page Frame Number of a folio. |
1910 | * @folio: The folio. |
1911 | * |
1912 | * A folio may contain multiple pages. The pages have consecutive |
1913 | * Page Frame Numbers. |
1914 | * |
1915 | * Return: The Page Frame Number of the first page in the folio. |
1916 | */ |
1917 | static inline unsigned long folio_pfn(struct folio *folio) |
1918 | { |
1919 | return page_to_pfn(&folio->page); |
1920 | } |
1921 | |
1922 | static inline struct folio *pfn_folio(unsigned long pfn) |
1923 | { |
1924 | return page_folio(pfn_to_page(pfn)); |
1925 | } |
1926 | |
1927 | /** |
1928 | * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA. |
1929 | * @folio: The folio. |
1930 | * |
1931 | * This function checks if a folio has been pinned via a call to |
1932 | * a function in the pin_user_pages() family. |
1933 | * |
1934 | * For small folios, the return value is partially fuzzy: false is not fuzzy, |
1935 | * because it means "definitely not pinned for DMA", but true means "probably |
1936 | * pinned for DMA, but possibly a false positive due to having at least |
1937 | * GUP_PIN_COUNTING_BIAS worth of normal folio references". |
1938 | * |
1939 | * False positives are OK, because: a) it's unlikely for a folio to |
1940 | * get that many refcounts, and b) all the callers of this routine are |
1941 | * expected to be able to deal gracefully with a false positive. |
1942 | * |
1943 | * For large folios, the result will be exactly correct. That's because |
1944 | * we have more tracking data available: the _pincount field is used |
1945 | * instead of the GUP_PIN_COUNTING_BIAS scheme. |
1946 | * |
1947 | * For more information, please see Documentation/core-api/pin_user_pages.rst. |
1948 | * |
1949 | * Return: True, if it is likely that the page has been "dma-pinned". |
1950 | * False, if the page is definitely not dma-pinned. |
1951 | */ |
1952 | static inline bool folio_maybe_dma_pinned(struct folio *folio) |
1953 | { |
1954 | if (folio_test_large(folio)) |
1955 | return atomic_read(v: &folio->_pincount) > 0; |
1956 | |
1957 | /* |
1958 | * folio_ref_count() is signed. If that refcount overflows, then |
1959 | * folio_ref_count() returns a negative value, and callers will avoid |
1960 | * further incrementing the refcount. |
1961 | * |
1962 | * Here, for that overflow case, use the sign bit to count a little |
1963 | * bit higher via unsigned math, and thus still get an accurate result. |
1964 | */ |
1965 | return ((unsigned int)folio_ref_count(folio)) >= |
1966 | GUP_PIN_COUNTING_BIAS; |
1967 | } |
1968 | |
1969 | static inline bool page_maybe_dma_pinned(struct page *page) |
1970 | { |
1971 | return folio_maybe_dma_pinned(page_folio(page)); |
1972 | } |
1973 | |
1974 | /* |
1975 | * This should most likely only be called during fork() to see whether we |
1976 | * should break the cow immediately for an anon page on the src mm. |
1977 | * |
1978 | * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq. |
1979 | */ |
1980 | static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma, |
1981 | struct folio *folio) |
1982 | { |
1983 | VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); |
1984 | |
1985 | if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) |
1986 | return false; |
1987 | |
1988 | return folio_maybe_dma_pinned(folio); |
1989 | } |
1990 | |
1991 | /** |
1992 | * is_zero_page - Query if a page is a zero page |
1993 | * @page: The page to query |
1994 | * |
1995 | * This returns true if @page is one of the permanent zero pages. |
1996 | */ |
1997 | static inline bool is_zero_page(const struct page *page) |
1998 | { |
1999 | return is_zero_pfn(page_to_pfn(page)); |
2000 | } |
2001 | |
2002 | /** |
2003 | * is_zero_folio - Query if a folio is a zero page |
2004 | * @folio: The folio to query |
2005 | * |
2006 | * This returns true if @folio is one of the permanent zero pages. |
2007 | */ |
2008 | static inline bool is_zero_folio(const struct folio *folio) |
2009 | { |
2010 | return is_zero_page(page: &folio->page); |
2011 | } |
2012 | |
2013 | /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */ |
2014 | #ifdef CONFIG_MIGRATION |
2015 | static inline bool folio_is_longterm_pinnable(struct folio *folio) |
2016 | { |
2017 | #ifdef CONFIG_CMA |
2018 | int mt = folio_migratetype(folio); |
2019 | |
2020 | if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) |
2021 | return false; |
2022 | #endif |
2023 | /* The zero page can be "pinned" but gets special handling. */ |
2024 | if (is_zero_folio(folio)) |
2025 | return true; |
2026 | |
2027 | /* Coherent device memory must always allow eviction. */ |
2028 | if (folio_is_device_coherent(folio)) |
2029 | return false; |
2030 | |
2031 | /* Otherwise, non-movable zone folios can be pinned. */ |
2032 | return !folio_is_zone_movable(folio); |
2033 | |
2034 | } |
2035 | #else |
2036 | static inline bool folio_is_longterm_pinnable(struct folio *folio) |
2037 | { |
2038 | return true; |
2039 | } |
2040 | #endif |
2041 | |
2042 | static inline void set_page_zone(struct page *page, enum zone_type zone) |
2043 | { |
2044 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); |
2045 | page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; |
2046 | } |
2047 | |
2048 | static inline void set_page_node(struct page *page, unsigned long node) |
2049 | { |
2050 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); |
2051 | page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; |
2052 | } |
2053 | |
2054 | static inline void set_page_links(struct page *page, enum zone_type zone, |
2055 | unsigned long node, unsigned long pfn) |
2056 | { |
2057 | set_page_zone(page, zone); |
2058 | set_page_node(page, node); |
2059 | #ifdef SECTION_IN_PAGE_FLAGS |
2060 | set_page_section(page, pfn_to_section_nr(pfn)); |
2061 | #endif |
2062 | } |
2063 | |
2064 | /** |
2065 | * folio_nr_pages - The number of pages in the folio. |
2066 | * @folio: The folio. |
2067 | * |
2068 | * Return: A positive power of two. |
2069 | */ |
2070 | static inline long folio_nr_pages(struct folio *folio) |
2071 | { |
2072 | if (!folio_test_large(folio)) |
2073 | return 1; |
2074 | #ifdef CONFIG_64BIT |
2075 | return folio->_folio_nr_pages; |
2076 | #else |
2077 | return 1L << (folio->_flags_1 & 0xff); |
2078 | #endif |
2079 | } |
2080 | |
2081 | /* Only hugetlbfs can allocate folios larger than MAX_ORDER */ |
2082 | #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE |
2083 | #define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER) |
2084 | #else |
2085 | #define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES |
2086 | #endif |
2087 | |
2088 | /* |
2089 | * compound_nr() returns the number of pages in this potentially compound |
2090 | * page. compound_nr() can be called on a tail page, and is defined to |
2091 | * return 1 in that case. |
2092 | */ |
2093 | static inline unsigned long compound_nr(struct page *page) |
2094 | { |
2095 | struct folio *folio = (struct folio *)page; |
2096 | |
2097 | if (!test_bit(PG_head, &folio->flags)) |
2098 | return 1; |
2099 | #ifdef CONFIG_64BIT |
2100 | return folio->_folio_nr_pages; |
2101 | #else |
2102 | return 1L << (folio->_flags_1 & 0xff); |
2103 | #endif |
2104 | } |
2105 | |
2106 | /** |
2107 | * thp_nr_pages - The number of regular pages in this huge page. |
2108 | * @page: The head page of a huge page. |
2109 | */ |
2110 | static inline int thp_nr_pages(struct page *page) |
2111 | { |
2112 | return folio_nr_pages(folio: (struct folio *)page); |
2113 | } |
2114 | |
2115 | /** |
2116 | * folio_next - Move to the next physical folio. |
2117 | * @folio: The folio we're currently operating on. |
2118 | * |
2119 | * If you have physically contiguous memory which may span more than |
2120 | * one folio (eg a &struct bio_vec), use this function to move from one |
2121 | * folio to the next. Do not use it if the memory is only virtually |
2122 | * contiguous as the folios are almost certainly not adjacent to each |
2123 | * other. This is the folio equivalent to writing ``page++``. |
2124 | * |
2125 | * Context: We assume that the folios are refcounted and/or locked at a |
2126 | * higher level and do not adjust the reference counts. |
2127 | * Return: The next struct folio. |
2128 | */ |
2129 | static inline struct folio *folio_next(struct folio *folio) |
2130 | { |
2131 | return (struct folio *)folio_page(folio, folio_nr_pages(folio)); |
2132 | } |
2133 | |
2134 | /** |
2135 | * folio_shift - The size of the memory described by this folio. |
2136 | * @folio: The folio. |
2137 | * |
2138 | * A folio represents a number of bytes which is a power-of-two in size. |
2139 | * This function tells you which power-of-two the folio is. See also |
2140 | * folio_size() and folio_order(). |
2141 | * |
2142 | * Context: The caller should have a reference on the folio to prevent |
2143 | * it from being split. It is not necessary for the folio to be locked. |
2144 | * Return: The base-2 logarithm of the size of this folio. |
2145 | */ |
2146 | static inline unsigned int folio_shift(struct folio *folio) |
2147 | { |
2148 | return PAGE_SHIFT + folio_order(folio); |
2149 | } |
2150 | |
2151 | /** |
2152 | * folio_size - The number of bytes in a folio. |
2153 | * @folio: The folio. |
2154 | * |
2155 | * Context: The caller should have a reference on the folio to prevent |
2156 | * it from being split. It is not necessary for the folio to be locked. |
2157 | * Return: The number of bytes in this folio. |
2158 | */ |
2159 | static inline size_t folio_size(struct folio *folio) |
2160 | { |
2161 | return PAGE_SIZE << folio_order(folio); |
2162 | } |
2163 | |
2164 | /** |
2165 | * folio_estimated_sharers - Estimate the number of sharers of a folio. |
2166 | * @folio: The folio. |
2167 | * |
2168 | * folio_estimated_sharers() aims to serve as a function to efficiently |
2169 | * estimate the number of processes sharing a folio. This is done by |
2170 | * looking at the precise mapcount of the first subpage in the folio, and |
2171 | * assuming the other subpages are the same. This may not be true for large |
2172 | * folios. If you want exact mapcounts for exact calculations, look at |
2173 | * page_mapcount() or folio_total_mapcount(). |
2174 | * |
2175 | * Return: The estimated number of processes sharing a folio. |
2176 | */ |
2177 | static inline int folio_estimated_sharers(struct folio *folio) |
2178 | { |
2179 | return page_mapcount(folio_page(folio, 0)); |
2180 | } |
2181 | |
2182 | #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE |
2183 | static inline int arch_make_page_accessible(struct page *page) |
2184 | { |
2185 | return 0; |
2186 | } |
2187 | #endif |
2188 | |
2189 | #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE |
2190 | static inline int arch_make_folio_accessible(struct folio *folio) |
2191 | { |
2192 | int ret; |
2193 | long i, nr = folio_nr_pages(folio); |
2194 | |
2195 | for (i = 0; i < nr; i++) { |
2196 | ret = arch_make_page_accessible(folio_page(folio, i)); |
2197 | if (ret) |
2198 | break; |
2199 | } |
2200 | |
2201 | return ret; |
2202 | } |
2203 | #endif |
2204 | |
2205 | /* |
2206 | * Some inline functions in vmstat.h depend on page_zone() |
2207 | */ |
2208 | #include <linux/vmstat.h> |
2209 | |
2210 | #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) |
2211 | #define HASHED_PAGE_VIRTUAL |
2212 | #endif |
2213 | |
2214 | #if defined(WANT_PAGE_VIRTUAL) |
2215 | static inline void *page_address(const struct page *page) |
2216 | { |
2217 | return page->virtual; |
2218 | } |
2219 | static inline void set_page_address(struct page *page, void *address) |
2220 | { |
2221 | page->virtual = address; |
2222 | } |
2223 | #define page_address_init() do { } while(0) |
2224 | #endif |
2225 | |
2226 | #if defined(HASHED_PAGE_VIRTUAL) |
2227 | void *page_address(const struct page *page); |
2228 | void set_page_address(struct page *page, void *virtual); |
2229 | void page_address_init(void); |
2230 | #endif |
2231 | |
2232 | static __always_inline void *lowmem_page_address(const struct page *page) |
2233 | { |
2234 | return page_to_virt(page); |
2235 | } |
2236 | |
2237 | #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) |
2238 | #define page_address(page) lowmem_page_address(page) |
2239 | #define set_page_address(page, address) do { } while(0) |
2240 | #define page_address_init() do { } while(0) |
2241 | #endif |
2242 | |
2243 | static inline void *folio_address(const struct folio *folio) |
2244 | { |
2245 | return page_address(&folio->page); |
2246 | } |
2247 | |
2248 | extern pgoff_t __page_file_index(struct page *page); |
2249 | |
2250 | /* |
2251 | * Return the pagecache index of the passed page. Regular pagecache pages |
2252 | * use ->index whereas swapcache pages use swp_offset(->private) |
2253 | */ |
2254 | static inline pgoff_t page_index(struct page *page) |
2255 | { |
2256 | if (unlikely(PageSwapCache(page))) |
2257 | return __page_file_index(page); |
2258 | return page->index; |
2259 | } |
2260 | |
2261 | /* |
2262 | * Return true only if the page has been allocated with |
2263 | * ALLOC_NO_WATERMARKS and the low watermark was not |
2264 | * met implying that the system is under some pressure. |
2265 | */ |
2266 | static inline bool page_is_pfmemalloc(const struct page *page) |
2267 | { |
2268 | /* |
2269 | * lru.next has bit 1 set if the page is allocated from the |
2270 | * pfmemalloc reserves. Callers may simply overwrite it if |
2271 | * they do not need to preserve that information. |
2272 | */ |
2273 | return (uintptr_t)page->lru.next & BIT(1); |
2274 | } |
2275 | |
2276 | /* |
2277 | * Return true only if the folio has been allocated with |
2278 | * ALLOC_NO_WATERMARKS and the low watermark was not |
2279 | * met implying that the system is under some pressure. |
2280 | */ |
2281 | static inline bool folio_is_pfmemalloc(const struct folio *folio) |
2282 | { |
2283 | /* |
2284 | * lru.next has bit 1 set if the page is allocated from the |
2285 | * pfmemalloc reserves. Callers may simply overwrite it if |
2286 | * they do not need to preserve that information. |
2287 | */ |
2288 | return (uintptr_t)folio->lru.next & BIT(1); |
2289 | } |
2290 | |
2291 | /* |
2292 | * Only to be called by the page allocator on a freshly allocated |
2293 | * page. |
2294 | */ |
2295 | static inline void set_page_pfmemalloc(struct page *page) |
2296 | { |
2297 | page->lru.next = (void *)BIT(1); |
2298 | } |
2299 | |
2300 | static inline void clear_page_pfmemalloc(struct page *page) |
2301 | { |
2302 | page->lru.next = NULL; |
2303 | } |
2304 | |
2305 | /* |
2306 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. |
2307 | */ |
2308 | extern void pagefault_out_of_memory(void); |
2309 | |
2310 | #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
2311 | #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) |
2312 | #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1)) |
2313 | |
2314 | /* |
2315 | * Parameter block passed down to zap_pte_range in exceptional cases. |
2316 | */ |
2317 | struct zap_details { |
2318 | struct folio *single_folio; /* Locked folio to be unmapped */ |
2319 | bool even_cows; /* Zap COWed private pages too? */ |
2320 | zap_flags_t zap_flags; /* Extra flags for zapping */ |
2321 | }; |
2322 | |
2323 | /* |
2324 | * Whether to drop the pte markers, for example, the uffd-wp information for |
2325 | * file-backed memory. This should only be specified when we will completely |
2326 | * drop the page in the mm, either by truncation or unmapping of the vma. By |
2327 | * default, the flag is not set. |
2328 | */ |
2329 | #define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0)) |
2330 | /* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */ |
2331 | #define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1)) |
2332 | |
2333 | #ifdef CONFIG_SCHED_MM_CID |
2334 | void sched_mm_cid_before_execve(struct task_struct *t); |
2335 | void sched_mm_cid_after_execve(struct task_struct *t); |
2336 | void sched_mm_cid_fork(struct task_struct *t); |
2337 | void sched_mm_cid_exit_signals(struct task_struct *t); |
2338 | static inline int task_mm_cid(struct task_struct *t) |
2339 | { |
2340 | return t->mm_cid; |
2341 | } |
2342 | #else |
2343 | static inline void sched_mm_cid_before_execve(struct task_struct *t) { } |
2344 | static inline void sched_mm_cid_after_execve(struct task_struct *t) { } |
2345 | static inline void sched_mm_cid_fork(struct task_struct *t) { } |
2346 | static inline void sched_mm_cid_exit_signals(struct task_struct *t) { } |
2347 | static inline int task_mm_cid(struct task_struct *t) |
2348 | { |
2349 | /* |
2350 | * Use the processor id as a fall-back when the mm cid feature is |
2351 | * disabled. This provides functional per-cpu data structure accesses |
2352 | * in user-space, althrough it won't provide the memory usage benefits. |
2353 | */ |
2354 | return raw_smp_processor_id(); |
2355 | } |
2356 | #endif |
2357 | |
2358 | #ifdef CONFIG_MMU |
2359 | extern bool can_do_mlock(void); |
2360 | #else |
2361 | static inline bool can_do_mlock(void) { return false; } |
2362 | #endif |
2363 | extern int user_shm_lock(size_t, struct ucounts *); |
2364 | extern void user_shm_unlock(size_t, struct ucounts *); |
2365 | |
2366 | struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, |
2367 | pte_t pte); |
2368 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
2369 | pte_t pte); |
2370 | struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, |
2371 | unsigned long addr, pmd_t pmd); |
2372 | struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, |
2373 | pmd_t pmd); |
2374 | |
2375 | void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, |
2376 | unsigned long size); |
2377 | void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, |
2378 | unsigned long size, struct zap_details *details); |
2379 | static inline void zap_vma_pages(struct vm_area_struct *vma) |
2380 | { |
2381 | zap_page_range_single(vma, address: vma->vm_start, |
2382 | size: vma->vm_end - vma->vm_start, NULL); |
2383 | } |
2384 | void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, |
2385 | struct vm_area_struct *start_vma, unsigned long start, |
2386 | unsigned long end, unsigned long tree_end, bool mm_wr_locked); |
2387 | |
2388 | struct mmu_notifier_range; |
2389 | |
2390 | void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
2391 | unsigned long end, unsigned long floor, unsigned long ceiling); |
2392 | int |
2393 | copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
2394 | int follow_pte(struct mm_struct *mm, unsigned long address, |
2395 | pte_t **ptepp, spinlock_t **ptlp); |
2396 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
2397 | unsigned long *pfn); |
2398 | int follow_phys(struct vm_area_struct *vma, unsigned long address, |
2399 | unsigned int flags, unsigned long *prot, resource_size_t *phys); |
2400 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
2401 | void *buf, int len, int write); |
2402 | |
2403 | extern void truncate_pagecache(struct inode *inode, loff_t new); |
2404 | extern void truncate_setsize(struct inode *inode, loff_t newsize); |
2405 | void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); |
2406 | void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); |
2407 | int generic_error_remove_folio(struct address_space *mapping, |
2408 | struct folio *folio); |
2409 | |
2410 | struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, |
2411 | unsigned long address, struct pt_regs *regs); |
2412 | |
2413 | #ifdef CONFIG_MMU |
2414 | extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, |
2415 | unsigned long address, unsigned int flags, |
2416 | struct pt_regs *regs); |
2417 | extern int fixup_user_fault(struct mm_struct *mm, |
2418 | unsigned long address, unsigned int fault_flags, |
2419 | bool *unlocked); |
2420 | void unmap_mapping_pages(struct address_space *mapping, |
2421 | pgoff_t start, pgoff_t nr, bool even_cows); |
2422 | void unmap_mapping_range(struct address_space *mapping, |
2423 | loff_t const holebegin, loff_t const holelen, int even_cows); |
2424 | #else |
2425 | static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, |
2426 | unsigned long address, unsigned int flags, |
2427 | struct pt_regs *regs) |
2428 | { |
2429 | /* should never happen if there's no MMU */ |
2430 | BUG(); |
2431 | return VM_FAULT_SIGBUS; |
2432 | } |
2433 | static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, |
2434 | unsigned int fault_flags, bool *unlocked) |
2435 | { |
2436 | /* should never happen if there's no MMU */ |
2437 | BUG(); |
2438 | return -EFAULT; |
2439 | } |
2440 | static inline void unmap_mapping_pages(struct address_space *mapping, |
2441 | pgoff_t start, pgoff_t nr, bool even_cows) { } |
2442 | static inline void unmap_mapping_range(struct address_space *mapping, |
2443 | loff_t const holebegin, loff_t const holelen, int even_cows) { } |
2444 | #endif |
2445 | |
2446 | static inline void unmap_shared_mapping_range(struct address_space *mapping, |
2447 | loff_t const holebegin, loff_t const holelen) |
2448 | { |
2449 | unmap_mapping_range(mapping, holebegin, holelen, even_cows: 0); |
2450 | } |
2451 | |
2452 | static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm, |
2453 | unsigned long addr); |
2454 | |
2455 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, |
2456 | void *buf, int len, unsigned int gup_flags); |
2457 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
2458 | void *buf, int len, unsigned int gup_flags); |
2459 | |
2460 | long get_user_pages_remote(struct mm_struct *mm, |
2461 | unsigned long start, unsigned long nr_pages, |
2462 | unsigned int gup_flags, struct page **pages, |
2463 | int *locked); |
2464 | long pin_user_pages_remote(struct mm_struct *mm, |
2465 | unsigned long start, unsigned long nr_pages, |
2466 | unsigned int gup_flags, struct page **pages, |
2467 | int *locked); |
2468 | |
2469 | /* |
2470 | * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT. |
2471 | */ |
2472 | static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, |
2473 | unsigned long addr, |
2474 | int gup_flags, |
2475 | struct vm_area_struct **vmap) |
2476 | { |
2477 | struct page *page; |
2478 | struct vm_area_struct *vma; |
2479 | int got; |
2480 | |
2481 | if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT))) |
2482 | return ERR_PTR(error: -EINVAL); |
2483 | |
2484 | got = get_user_pages_remote(mm, start: addr, nr_pages: 1, gup_flags, pages: &page, NULL); |
2485 | |
2486 | if (got < 0) |
2487 | return ERR_PTR(error: got); |
2488 | |
2489 | vma = vma_lookup(mm, addr); |
2490 | if (WARN_ON_ONCE(!vma)) { |
2491 | put_page(page); |
2492 | return ERR_PTR(error: -EINVAL); |
2493 | } |
2494 | |
2495 | *vmap = vma; |
2496 | return page; |
2497 | } |
2498 | |
2499 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
2500 | unsigned int gup_flags, struct page **pages); |
2501 | long pin_user_pages(unsigned long start, unsigned long nr_pages, |
2502 | unsigned int gup_flags, struct page **pages); |
2503 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
2504 | struct page **pages, unsigned int gup_flags); |
2505 | long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
2506 | struct page **pages, unsigned int gup_flags); |
2507 | |
2508 | int get_user_pages_fast(unsigned long start, int nr_pages, |
2509 | unsigned int gup_flags, struct page **pages); |
2510 | int pin_user_pages_fast(unsigned long start, int nr_pages, |
2511 | unsigned int gup_flags, struct page **pages); |
2512 | void folio_add_pin(struct folio *folio); |
2513 | |
2514 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); |
2515 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, |
2516 | struct task_struct *task, bool bypass_rlim); |
2517 | |
2518 | struct kvec; |
2519 | struct page *get_dump_page(unsigned long addr); |
2520 | |
2521 | bool folio_mark_dirty(struct folio *folio); |
2522 | bool set_page_dirty(struct page *page); |
2523 | int set_page_dirty_lock(struct page *page); |
2524 | |
2525 | int get_cmdline(struct task_struct *task, char *buffer, int buflen); |
2526 | |
2527 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
2528 | unsigned long old_addr, struct vm_area_struct *new_vma, |
2529 | unsigned long new_addr, unsigned long len, |
2530 | bool need_rmap_locks, bool for_stack); |
2531 | |
2532 | /* |
2533 | * Flags used by change_protection(). For now we make it a bitmap so |
2534 | * that we can pass in multiple flags just like parameters. However |
2535 | * for now all the callers are only use one of the flags at the same |
2536 | * time. |
2537 | */ |
2538 | /* |
2539 | * Whether we should manually check if we can map individual PTEs writable, |
2540 | * because something (e.g., COW, uffd-wp) blocks that from happening for all |
2541 | * PTEs automatically in a writable mapping. |
2542 | */ |
2543 | #define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0) |
2544 | /* Whether this protection change is for NUMA hints */ |
2545 | #define MM_CP_PROT_NUMA (1UL << 1) |
2546 | /* Whether this change is for write protecting */ |
2547 | #define MM_CP_UFFD_WP (1UL << 2) /* do wp */ |
2548 | #define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ |
2549 | #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ |
2550 | MM_CP_UFFD_WP_RESOLVE) |
2551 | |
2552 | bool vma_needs_dirty_tracking(struct vm_area_struct *vma); |
2553 | int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); |
2554 | static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) |
2555 | { |
2556 | /* |
2557 | * We want to check manually if we can change individual PTEs writable |
2558 | * if we can't do that automatically for all PTEs in a mapping. For |
2559 | * private mappings, that's always the case when we have write |
2560 | * permissions as we properly have to handle COW. |
2561 | */ |
2562 | if (vma->vm_flags & VM_SHARED) |
2563 | return vma_wants_writenotify(vma, vm_page_prot: vma->vm_page_prot); |
2564 | return !!(vma->vm_flags & VM_WRITE); |
2565 | |
2566 | } |
2567 | bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, |
2568 | pte_t pte); |
2569 | extern long change_protection(struct mmu_gather *tlb, |
2570 | struct vm_area_struct *vma, unsigned long start, |
2571 | unsigned long end, unsigned long cp_flags); |
2572 | extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, |
2573 | struct vm_area_struct *vma, struct vm_area_struct **pprev, |
2574 | unsigned long start, unsigned long end, unsigned long newflags); |
2575 | |
2576 | /* |
2577 | * doesn't attempt to fault and will return short. |
2578 | */ |
2579 | int get_user_pages_fast_only(unsigned long start, int nr_pages, |
2580 | unsigned int gup_flags, struct page **pages); |
2581 | |
2582 | static inline bool get_user_page_fast_only(unsigned long addr, |
2583 | unsigned int gup_flags, struct page **pagep) |
2584 | { |
2585 | return get_user_pages_fast_only(start: addr, nr_pages: 1, gup_flags, pages: pagep) == 1; |
2586 | } |
2587 | /* |
2588 | * per-process(per-mm_struct) statistics. |
2589 | */ |
2590 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) |
2591 | { |
2592 | return percpu_counter_read_positive(fbc: &mm->rss_stat[member]); |
2593 | } |
2594 | |
2595 | void (struct mm_struct *mm, int member); |
2596 | |
2597 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) |
2598 | { |
2599 | percpu_counter_add(fbc: &mm->rss_stat[member], amount: value); |
2600 | |
2601 | mm_trace_rss_stat(mm, member); |
2602 | } |
2603 | |
2604 | static inline void inc_mm_counter(struct mm_struct *mm, int member) |
2605 | { |
2606 | percpu_counter_inc(fbc: &mm->rss_stat[member]); |
2607 | |
2608 | mm_trace_rss_stat(mm, member); |
2609 | } |
2610 | |
2611 | static inline void dec_mm_counter(struct mm_struct *mm, int member) |
2612 | { |
2613 | percpu_counter_dec(fbc: &mm->rss_stat[member]); |
2614 | |
2615 | mm_trace_rss_stat(mm, member); |
2616 | } |
2617 | |
2618 | /* Optimized variant when folio is already known not to be anon */ |
2619 | static inline int mm_counter_file(struct folio *folio) |
2620 | { |
2621 | if (folio_test_swapbacked(folio)) |
2622 | return MM_SHMEMPAGES; |
2623 | return MM_FILEPAGES; |
2624 | } |
2625 | |
2626 | static inline int mm_counter(struct folio *folio) |
2627 | { |
2628 | if (folio_test_anon(folio)) |
2629 | return MM_ANONPAGES; |
2630 | return mm_counter_file(folio); |
2631 | } |
2632 | |
2633 | static inline unsigned long (struct mm_struct *mm) |
2634 | { |
2635 | return get_mm_counter(mm, member: MM_FILEPAGES) + |
2636 | get_mm_counter(mm, member: MM_ANONPAGES) + |
2637 | get_mm_counter(mm, member: MM_SHMEMPAGES); |
2638 | } |
2639 | |
2640 | static inline unsigned long (struct mm_struct *mm) |
2641 | { |
2642 | return max(mm->hiwater_rss, get_mm_rss(mm)); |
2643 | } |
2644 | |
2645 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) |
2646 | { |
2647 | return max(mm->hiwater_vm, mm->total_vm); |
2648 | } |
2649 | |
2650 | static inline void (struct mm_struct *mm) |
2651 | { |
2652 | unsigned long = get_mm_rss(mm); |
2653 | |
2654 | if ((mm)->hiwater_rss < _rss) |
2655 | (mm)->hiwater_rss = _rss; |
2656 | } |
2657 | |
2658 | static inline void update_hiwater_vm(struct mm_struct *mm) |
2659 | { |
2660 | if (mm->hiwater_vm < mm->total_vm) |
2661 | mm->hiwater_vm = mm->total_vm; |
2662 | } |
2663 | |
2664 | static inline void (struct mm_struct *mm) |
2665 | { |
2666 | mm->hiwater_rss = get_mm_rss(mm); |
2667 | } |
2668 | |
2669 | static inline void (unsigned long *, |
2670 | struct mm_struct *mm) |
2671 | { |
2672 | unsigned long = get_mm_hiwater_rss(mm); |
2673 | |
2674 | if (*maxrss < hiwater_rss) |
2675 | *maxrss = hiwater_rss; |
2676 | } |
2677 | |
2678 | #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL |
2679 | static inline int pte_special(pte_t pte) |
2680 | { |
2681 | return 0; |
2682 | } |
2683 | |
2684 | static inline pte_t pte_mkspecial(pte_t pte) |
2685 | { |
2686 | return pte; |
2687 | } |
2688 | #endif |
2689 | |
2690 | #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP |
2691 | static inline int pte_devmap(pte_t pte) |
2692 | { |
2693 | return 0; |
2694 | } |
2695 | #endif |
2696 | |
2697 | extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, |
2698 | spinlock_t **ptl); |
2699 | static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, |
2700 | spinlock_t **ptl) |
2701 | { |
2702 | pte_t *ptep; |
2703 | __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); |
2704 | return ptep; |
2705 | } |
2706 | |
2707 | #ifdef __PAGETABLE_P4D_FOLDED |
2708 | static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, |
2709 | unsigned long address) |
2710 | { |
2711 | return 0; |
2712 | } |
2713 | #else |
2714 | int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); |
2715 | #endif |
2716 | |
2717 | #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) |
2718 | static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, |
2719 | unsigned long address) |
2720 | { |
2721 | return 0; |
2722 | } |
2723 | static inline void mm_inc_nr_puds(struct mm_struct *mm) {} |
2724 | static inline void mm_dec_nr_puds(struct mm_struct *mm) {} |
2725 | |
2726 | #else |
2727 | int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); |
2728 | |
2729 | static inline void mm_inc_nr_puds(struct mm_struct *mm) |
2730 | { |
2731 | if (mm_pud_folded(mm)) |
2732 | return; |
2733 | atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), v: &mm->pgtables_bytes); |
2734 | } |
2735 | |
2736 | static inline void mm_dec_nr_puds(struct mm_struct *mm) |
2737 | { |
2738 | if (mm_pud_folded(mm)) |
2739 | return; |
2740 | atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), v: &mm->pgtables_bytes); |
2741 | } |
2742 | #endif |
2743 | |
2744 | #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) |
2745 | static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, |
2746 | unsigned long address) |
2747 | { |
2748 | return 0; |
2749 | } |
2750 | |
2751 | static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} |
2752 | static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} |
2753 | |
2754 | #else |
2755 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); |
2756 | |
2757 | static inline void mm_inc_nr_pmds(struct mm_struct *mm) |
2758 | { |
2759 | if (mm_pmd_folded(mm)) |
2760 | return; |
2761 | atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), v: &mm->pgtables_bytes); |
2762 | } |
2763 | |
2764 | static inline void mm_dec_nr_pmds(struct mm_struct *mm) |
2765 | { |
2766 | if (mm_pmd_folded(mm)) |
2767 | return; |
2768 | atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), v: &mm->pgtables_bytes); |
2769 | } |
2770 | #endif |
2771 | |
2772 | #ifdef CONFIG_MMU |
2773 | static inline void mm_pgtables_bytes_init(struct mm_struct *mm) |
2774 | { |
2775 | atomic_long_set(v: &mm->pgtables_bytes, i: 0); |
2776 | } |
2777 | |
2778 | static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) |
2779 | { |
2780 | return atomic_long_read(v: &mm->pgtables_bytes); |
2781 | } |
2782 | |
2783 | static inline void mm_inc_nr_ptes(struct mm_struct *mm) |
2784 | { |
2785 | atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), v: &mm->pgtables_bytes); |
2786 | } |
2787 | |
2788 | static inline void mm_dec_nr_ptes(struct mm_struct *mm) |
2789 | { |
2790 | atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), v: &mm->pgtables_bytes); |
2791 | } |
2792 | #else |
2793 | |
2794 | static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} |
2795 | static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) |
2796 | { |
2797 | return 0; |
2798 | } |
2799 | |
2800 | static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} |
2801 | static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} |
2802 | #endif |
2803 | |
2804 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); |
2805 | int __pte_alloc_kernel(pmd_t *pmd); |
2806 | |
2807 | #if defined(CONFIG_MMU) |
2808 | |
2809 | static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, |
2810 | unsigned long address) |
2811 | { |
2812 | return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? |
2813 | NULL : p4d_offset(pgd, address); |
2814 | } |
2815 | |
2816 | static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, |
2817 | unsigned long address) |
2818 | { |
2819 | return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? |
2820 | NULL : pud_offset(p4d, address); |
2821 | } |
2822 | |
2823 | static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) |
2824 | { |
2825 | return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? |
2826 | NULL: pmd_offset(pud, address); |
2827 | } |
2828 | #endif /* CONFIG_MMU */ |
2829 | |
2830 | static inline struct ptdesc *virt_to_ptdesc(const void *x) |
2831 | { |
2832 | return page_ptdesc(virt_to_page(x)); |
2833 | } |
2834 | |
2835 | static inline void *ptdesc_to_virt(const struct ptdesc *pt) |
2836 | { |
2837 | return page_to_virt(ptdesc_page(pt)); |
2838 | } |
2839 | |
2840 | static inline void *ptdesc_address(const struct ptdesc *pt) |
2841 | { |
2842 | return folio_address(ptdesc_folio(pt)); |
2843 | } |
2844 | |
2845 | static inline bool pagetable_is_reserved(struct ptdesc *pt) |
2846 | { |
2847 | return folio_test_reserved(ptdesc_folio(pt)); |
2848 | } |
2849 | |
2850 | /** |
2851 | * pagetable_alloc - Allocate pagetables |
2852 | * @gfp: GFP flags |
2853 | * @order: desired pagetable order |
2854 | * |
2855 | * pagetable_alloc allocates memory for page tables as well as a page table |
2856 | * descriptor to describe that memory. |
2857 | * |
2858 | * Return: The ptdesc describing the allocated page tables. |
2859 | */ |
2860 | static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order) |
2861 | { |
2862 | struct page *page = alloc_pages(gfp: gfp | __GFP_COMP, order); |
2863 | |
2864 | return page_ptdesc(page); |
2865 | } |
2866 | |
2867 | /** |
2868 | * pagetable_free - Free pagetables |
2869 | * @pt: The page table descriptor |
2870 | * |
2871 | * pagetable_free frees the memory of all page tables described by a page |
2872 | * table descriptor and the memory for the descriptor itself. |
2873 | */ |
2874 | static inline void pagetable_free(struct ptdesc *pt) |
2875 | { |
2876 | struct page *page = ptdesc_page(pt); |
2877 | |
2878 | __free_pages(page, order: compound_order(page)); |
2879 | } |
2880 | |
2881 | #if USE_SPLIT_PTE_PTLOCKS |
2882 | #if ALLOC_SPLIT_PTLOCKS |
2883 | void __init ptlock_cache_init(void); |
2884 | bool ptlock_alloc(struct ptdesc *ptdesc); |
2885 | void ptlock_free(struct ptdesc *ptdesc); |
2886 | |
2887 | static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) |
2888 | { |
2889 | return ptdesc->ptl; |
2890 | } |
2891 | #else /* ALLOC_SPLIT_PTLOCKS */ |
2892 | static inline void ptlock_cache_init(void) |
2893 | { |
2894 | } |
2895 | |
2896 | static inline bool ptlock_alloc(struct ptdesc *ptdesc) |
2897 | { |
2898 | return true; |
2899 | } |
2900 | |
2901 | static inline void ptlock_free(struct ptdesc *ptdesc) |
2902 | { |
2903 | } |
2904 | |
2905 | static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) |
2906 | { |
2907 | return &ptdesc->ptl; |
2908 | } |
2909 | #endif /* ALLOC_SPLIT_PTLOCKS */ |
2910 | |
2911 | static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) |
2912 | { |
2913 | return ptlock_ptr(page_ptdesc(pmd_page(*pmd))); |
2914 | } |
2915 | |
2916 | static inline bool ptlock_init(struct ptdesc *ptdesc) |
2917 | { |
2918 | /* |
2919 | * prep_new_page() initialize page->private (and therefore page->ptl) |
2920 | * with 0. Make sure nobody took it in use in between. |
2921 | * |
2922 | * It can happen if arch try to use slab for page table allocation: |
2923 | * slab code uses page->slab_cache, which share storage with page->ptl. |
2924 | */ |
2925 | VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc)); |
2926 | if (!ptlock_alloc(ptdesc)) |
2927 | return false; |
2928 | spin_lock_init(ptlock_ptr(ptdesc)); |
2929 | return true; |
2930 | } |
2931 | |
2932 | #else /* !USE_SPLIT_PTE_PTLOCKS */ |
2933 | /* |
2934 | * We use mm->page_table_lock to guard all pagetable pages of the mm. |
2935 | */ |
2936 | static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) |
2937 | { |
2938 | return &mm->page_table_lock; |
2939 | } |
2940 | static inline void ptlock_cache_init(void) {} |
2941 | static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } |
2942 | static inline void ptlock_free(struct ptdesc *ptdesc) {} |
2943 | #endif /* USE_SPLIT_PTE_PTLOCKS */ |
2944 | |
2945 | static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) |
2946 | { |
2947 | struct folio *folio = ptdesc_folio(ptdesc); |
2948 | |
2949 | if (!ptlock_init(ptdesc)) |
2950 | return false; |
2951 | __folio_set_pgtable(folio); |
2952 | lruvec_stat_add_folio(folio, idx: NR_PAGETABLE); |
2953 | return true; |
2954 | } |
2955 | |
2956 | static inline void pagetable_pte_dtor(struct ptdesc *ptdesc) |
2957 | { |
2958 | struct folio *folio = ptdesc_folio(ptdesc); |
2959 | |
2960 | ptlock_free(ptdesc); |
2961 | __folio_clear_pgtable(folio); |
2962 | lruvec_stat_sub_folio(folio, idx: NR_PAGETABLE); |
2963 | } |
2964 | |
2965 | pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); |
2966 | static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr) |
2967 | { |
2968 | return __pte_offset_map(pmd, addr, NULL); |
2969 | } |
2970 | |
2971 | pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, |
2972 | unsigned long addr, spinlock_t **ptlp); |
2973 | static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, |
2974 | unsigned long addr, spinlock_t **ptlp) |
2975 | { |
2976 | pte_t *pte; |
2977 | |
2978 | __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)); |
2979 | return pte; |
2980 | } |
2981 | |
2982 | pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, |
2983 | unsigned long addr, spinlock_t **ptlp); |
2984 | |
2985 | #define pte_unmap_unlock(pte, ptl) do { \ |
2986 | spin_unlock(ptl); \ |
2987 | pte_unmap(pte); \ |
2988 | } while (0) |
2989 | |
2990 | #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) |
2991 | |
2992 | #define pte_alloc_map(mm, pmd, address) \ |
2993 | (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) |
2994 | |
2995 | #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ |
2996 | (pte_alloc(mm, pmd) ? \ |
2997 | NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) |
2998 | |
2999 | #define pte_alloc_kernel(pmd, address) \ |
3000 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ |
3001 | NULL: pte_offset_kernel(pmd, address)) |
3002 | |
3003 | #if USE_SPLIT_PMD_PTLOCKS |
3004 | |
3005 | static inline struct page *pmd_pgtable_page(pmd_t *pmd) |
3006 | { |
3007 | unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); |
3008 | return virt_to_page((void *)((unsigned long) pmd & mask)); |
3009 | } |
3010 | |
3011 | static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd) |
3012 | { |
3013 | return page_ptdesc(pmd_pgtable_page(pmd)); |
3014 | } |
3015 | |
3016 | static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) |
3017 | { |
3018 | return ptlock_ptr(ptdesc: pmd_ptdesc(pmd)); |
3019 | } |
3020 | |
3021 | static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) |
3022 | { |
3023 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3024 | ptdesc->pmd_huge_pte = NULL; |
3025 | #endif |
3026 | return ptlock_init(ptdesc); |
3027 | } |
3028 | |
3029 | static inline void pmd_ptlock_free(struct ptdesc *ptdesc) |
3030 | { |
3031 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3032 | VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc)); |
3033 | #endif |
3034 | ptlock_free(ptdesc); |
3035 | } |
3036 | |
3037 | #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte) |
3038 | |
3039 | #else |
3040 | |
3041 | static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) |
3042 | { |
3043 | return &mm->page_table_lock; |
3044 | } |
3045 | |
3046 | static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; } |
3047 | static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {} |
3048 | |
3049 | #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) |
3050 | |
3051 | #endif |
3052 | |
3053 | static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) |
3054 | { |
3055 | spinlock_t *ptl = pmd_lockptr(mm, pmd); |
3056 | spin_lock(lock: ptl); |
3057 | return ptl; |
3058 | } |
3059 | |
3060 | static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) |
3061 | { |
3062 | struct folio *folio = ptdesc_folio(ptdesc); |
3063 | |
3064 | if (!pmd_ptlock_init(ptdesc)) |
3065 | return false; |
3066 | __folio_set_pgtable(folio); |
3067 | lruvec_stat_add_folio(folio, idx: NR_PAGETABLE); |
3068 | return true; |
3069 | } |
3070 | |
3071 | static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc) |
3072 | { |
3073 | struct folio *folio = ptdesc_folio(ptdesc); |
3074 | |
3075 | pmd_ptlock_free(ptdesc); |
3076 | __folio_clear_pgtable(folio); |
3077 | lruvec_stat_sub_folio(folio, idx: NR_PAGETABLE); |
3078 | } |
3079 | |
3080 | /* |
3081 | * No scalability reason to split PUD locks yet, but follow the same pattern |
3082 | * as the PMD locks to make it easier if we decide to. The VM should not be |
3083 | * considered ready to switch to split PUD locks yet; there may be places |
3084 | * which need to be converted from page_table_lock. |
3085 | */ |
3086 | static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) |
3087 | { |
3088 | return &mm->page_table_lock; |
3089 | } |
3090 | |
3091 | static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) |
3092 | { |
3093 | spinlock_t *ptl = pud_lockptr(mm, pud); |
3094 | |
3095 | spin_lock(lock: ptl); |
3096 | return ptl; |
3097 | } |
3098 | |
3099 | static inline void pagetable_pud_ctor(struct ptdesc *ptdesc) |
3100 | { |
3101 | struct folio *folio = ptdesc_folio(ptdesc); |
3102 | |
3103 | __folio_set_pgtable(folio); |
3104 | lruvec_stat_add_folio(folio, idx: NR_PAGETABLE); |
3105 | } |
3106 | |
3107 | static inline void pagetable_pud_dtor(struct ptdesc *ptdesc) |
3108 | { |
3109 | struct folio *folio = ptdesc_folio(ptdesc); |
3110 | |
3111 | __folio_clear_pgtable(folio); |
3112 | lruvec_stat_sub_folio(folio, idx: NR_PAGETABLE); |
3113 | } |
3114 | |
3115 | extern void __init pagecache_init(void); |
3116 | extern void free_initmem(void); |
3117 | |
3118 | /* |
3119 | * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) |
3120 | * into the buddy system. The freed pages will be poisoned with pattern |
3121 | * "poison" if it's within range [0, UCHAR_MAX]. |
3122 | * Return pages freed into the buddy system. |
3123 | */ |
3124 | extern unsigned long free_reserved_area(void *start, void *end, |
3125 | int poison, const char *s); |
3126 | |
3127 | extern void adjust_managed_page_count(struct page *page, long count); |
3128 | |
3129 | extern void reserve_bootmem_region(phys_addr_t start, |
3130 | phys_addr_t end, int nid); |
3131 | |
3132 | /* Free the reserved page into the buddy system, so it gets managed. */ |
3133 | static inline void free_reserved_page(struct page *page) |
3134 | { |
3135 | ClearPageReserved(page); |
3136 | init_page_count(page); |
3137 | __free_page(page); |
3138 | adjust_managed_page_count(page, count: 1); |
3139 | } |
3140 | #define free_highmem_page(page) free_reserved_page(page) |
3141 | |
3142 | static inline void mark_page_reserved(struct page *page) |
3143 | { |
3144 | SetPageReserved(page); |
3145 | adjust_managed_page_count(page, count: -1); |
3146 | } |
3147 | |
3148 | static inline void free_reserved_ptdesc(struct ptdesc *pt) |
3149 | { |
3150 | free_reserved_page(ptdesc_page(pt)); |
3151 | } |
3152 | |
3153 | /* |
3154 | * Default method to free all the __init memory into the buddy system. |
3155 | * The freed pages will be poisoned with pattern "poison" if it's within |
3156 | * range [0, UCHAR_MAX]. |
3157 | * Return pages freed into the buddy system. |
3158 | */ |
3159 | static inline unsigned long free_initmem_default(int poison) |
3160 | { |
3161 | extern char __init_begin[], __init_end[]; |
3162 | |
3163 | return free_reserved_area(start: &__init_begin, end: &__init_end, |
3164 | poison, s: "unused kernel image (initmem)" ); |
3165 | } |
3166 | |
3167 | static inline unsigned long get_num_physpages(void) |
3168 | { |
3169 | int nid; |
3170 | unsigned long phys_pages = 0; |
3171 | |
3172 | for_each_online_node(nid) |
3173 | phys_pages += node_present_pages(nid); |
3174 | |
3175 | return phys_pages; |
3176 | } |
3177 | |
3178 | /* |
3179 | * Using memblock node mappings, an architecture may initialise its |
3180 | * zones, allocate the backing mem_map and account for memory holes in an |
3181 | * architecture independent manner. |
3182 | * |
3183 | * An architecture is expected to register range of page frames backed by |
3184 | * physical memory with memblock_add[_node]() before calling |
3185 | * free_area_init() passing in the PFN each zone ends at. At a basic |
3186 | * usage, an architecture is expected to do something like |
3187 | * |
3188 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, |
3189 | * max_highmem_pfn}; |
3190 | * for_each_valid_physical_page_range() |
3191 | * memblock_add_node(base, size, nid, MEMBLOCK_NONE) |
3192 | * free_area_init(max_zone_pfns); |
3193 | */ |
3194 | void free_area_init(unsigned long *max_zone_pfn); |
3195 | unsigned long node_map_pfn_alignment(void); |
3196 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, |
3197 | unsigned long end_pfn); |
3198 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
3199 | unsigned long end_pfn); |
3200 | extern void get_pfn_range_for_nid(unsigned int nid, |
3201 | unsigned long *start_pfn, unsigned long *end_pfn); |
3202 | |
3203 | #ifndef CONFIG_NUMA |
3204 | static inline int early_pfn_to_nid(unsigned long pfn) |
3205 | { |
3206 | return 0; |
3207 | } |
3208 | #else |
3209 | /* please see mm/page_alloc.c */ |
3210 | extern int __meminit early_pfn_to_nid(unsigned long pfn); |
3211 | #endif |
3212 | |
3213 | extern void set_dma_reserve(unsigned long new_dma_reserve); |
3214 | extern void mem_init(void); |
3215 | extern void __init mmap_init(void); |
3216 | |
3217 | extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); |
3218 | static inline void show_mem(void) |
3219 | { |
3220 | __show_mem(flags: 0, NULL, max_zone_idx: MAX_NR_ZONES - 1); |
3221 | } |
3222 | extern long si_mem_available(void); |
3223 | extern void si_meminfo(struct sysinfo * val); |
3224 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
3225 | #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES |
3226 | extern unsigned long arch_reserved_kernel_pages(void); |
3227 | #endif |
3228 | |
3229 | extern __printf(3, 4) |
3230 | void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); |
3231 | |
3232 | extern void setup_per_cpu_pageset(void); |
3233 | |
3234 | /* nommu.c */ |
3235 | extern atomic_long_t mmap_pages_allocated; |
3236 | extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); |
3237 | |
3238 | /* interval_tree.c */ |
3239 | void vma_interval_tree_insert(struct vm_area_struct *node, |
3240 | struct rb_root_cached *root); |
3241 | void vma_interval_tree_insert_after(struct vm_area_struct *node, |
3242 | struct vm_area_struct *prev, |
3243 | struct rb_root_cached *root); |
3244 | void vma_interval_tree_remove(struct vm_area_struct *node, |
3245 | struct rb_root_cached *root); |
3246 | struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, |
3247 | unsigned long start, unsigned long last); |
3248 | struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, |
3249 | unsigned long start, unsigned long last); |
3250 | |
3251 | #define vma_interval_tree_foreach(vma, root, start, last) \ |
3252 | for (vma = vma_interval_tree_iter_first(root, start, last); \ |
3253 | vma; vma = vma_interval_tree_iter_next(vma, start, last)) |
3254 | |
3255 | void anon_vma_interval_tree_insert(struct anon_vma_chain *node, |
3256 | struct rb_root_cached *root); |
3257 | void anon_vma_interval_tree_remove(struct anon_vma_chain *node, |
3258 | struct rb_root_cached *root); |
3259 | struct anon_vma_chain * |
3260 | anon_vma_interval_tree_iter_first(struct rb_root_cached *root, |
3261 | unsigned long start, unsigned long last); |
3262 | struct anon_vma_chain *anon_vma_interval_tree_iter_next( |
3263 | struct anon_vma_chain *node, unsigned long start, unsigned long last); |
3264 | #ifdef CONFIG_DEBUG_VM_RB |
3265 | void anon_vma_interval_tree_verify(struct anon_vma_chain *node); |
3266 | #endif |
3267 | |
3268 | #define anon_vma_interval_tree_foreach(avc, root, start, last) \ |
3269 | for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ |
3270 | avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) |
3271 | |
3272 | /* mmap.c */ |
3273 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); |
3274 | extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, |
3275 | unsigned long start, unsigned long end, pgoff_t pgoff, |
3276 | struct vm_area_struct *next); |
3277 | extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, |
3278 | unsigned long start, unsigned long end, pgoff_t pgoff); |
3279 | extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); |
3280 | extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); |
3281 | extern void unlink_file_vma(struct vm_area_struct *); |
3282 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, |
3283 | unsigned long addr, unsigned long len, pgoff_t pgoff, |
3284 | bool *need_rmap_locks); |
3285 | extern void exit_mmap(struct mm_struct *); |
3286 | struct vm_area_struct *vma_modify(struct vma_iterator *vmi, |
3287 | struct vm_area_struct *prev, |
3288 | struct vm_area_struct *vma, |
3289 | unsigned long start, unsigned long end, |
3290 | unsigned long vm_flags, |
3291 | struct mempolicy *policy, |
3292 | struct vm_userfaultfd_ctx uffd_ctx, |
3293 | struct anon_vma_name *anon_name); |
3294 | |
3295 | /* We are about to modify the VMA's flags. */ |
3296 | static inline struct vm_area_struct |
3297 | *vma_modify_flags(struct vma_iterator *vmi, |
3298 | struct vm_area_struct *prev, |
3299 | struct vm_area_struct *vma, |
3300 | unsigned long start, unsigned long end, |
3301 | unsigned long new_flags) |
3302 | { |
3303 | return vma_modify(vmi, prev, vma, start, end, vm_flags: new_flags, |
3304 | vma_policy(vma), uffd_ctx: vma->vm_userfaultfd_ctx, |
3305 | anon_name: anon_vma_name(vma)); |
3306 | } |
3307 | |
3308 | /* We are about to modify the VMA's flags and/or anon_name. */ |
3309 | static inline struct vm_area_struct |
3310 | *vma_modify_flags_name(struct vma_iterator *vmi, |
3311 | struct vm_area_struct *prev, |
3312 | struct vm_area_struct *vma, |
3313 | unsigned long start, |
3314 | unsigned long end, |
3315 | unsigned long new_flags, |
3316 | struct anon_vma_name *new_name) |
3317 | { |
3318 | return vma_modify(vmi, prev, vma, start, end, vm_flags: new_flags, |
3319 | vma_policy(vma), uffd_ctx: vma->vm_userfaultfd_ctx, anon_name: new_name); |
3320 | } |
3321 | |
3322 | /* We are about to modify the VMA's memory policy. */ |
3323 | static inline struct vm_area_struct |
3324 | *vma_modify_policy(struct vma_iterator *vmi, |
3325 | struct vm_area_struct *prev, |
3326 | struct vm_area_struct *vma, |
3327 | unsigned long start, unsigned long end, |
3328 | struct mempolicy *new_pol) |
3329 | { |
3330 | return vma_modify(vmi, prev, vma, start, end, vm_flags: vma->vm_flags, |
3331 | policy: new_pol, uffd_ctx: vma->vm_userfaultfd_ctx, anon_name: anon_vma_name(vma)); |
3332 | } |
3333 | |
3334 | /* We are about to modify the VMA's flags and/or uffd context. */ |
3335 | static inline struct vm_area_struct |
3336 | *vma_modify_flags_uffd(struct vma_iterator *vmi, |
3337 | struct vm_area_struct *prev, |
3338 | struct vm_area_struct *vma, |
3339 | unsigned long start, unsigned long end, |
3340 | unsigned long new_flags, |
3341 | struct vm_userfaultfd_ctx new_ctx) |
3342 | { |
3343 | return vma_modify(vmi, prev, vma, start, end, vm_flags: new_flags, |
3344 | vma_policy(vma), uffd_ctx: new_ctx, anon_name: anon_vma_name(vma)); |
3345 | } |
3346 | |
3347 | static inline int check_data_rlimit(unsigned long rlim, |
3348 | unsigned long new, |
3349 | unsigned long start, |
3350 | unsigned long end_data, |
3351 | unsigned long start_data) |
3352 | { |
3353 | if (rlim < RLIM_INFINITY) { |
3354 | if (((new - start) + (end_data - start_data)) > rlim) |
3355 | return -ENOSPC; |
3356 | } |
3357 | |
3358 | return 0; |
3359 | } |
3360 | |
3361 | extern int mm_take_all_locks(struct mm_struct *mm); |
3362 | extern void mm_drop_all_locks(struct mm_struct *mm); |
3363 | |
3364 | extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); |
3365 | extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); |
3366 | extern struct file *get_mm_exe_file(struct mm_struct *mm); |
3367 | extern struct file *get_task_exe_file(struct task_struct *task); |
3368 | |
3369 | extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); |
3370 | extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); |
3371 | |
3372 | extern bool vma_is_special_mapping(const struct vm_area_struct *vma, |
3373 | const struct vm_special_mapping *sm); |
3374 | extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, |
3375 | unsigned long addr, unsigned long len, |
3376 | unsigned long flags, |
3377 | const struct vm_special_mapping *spec); |
3378 | /* This is an obsolete alternative to _install_special_mapping. */ |
3379 | extern int install_special_mapping(struct mm_struct *mm, |
3380 | unsigned long addr, unsigned long len, |
3381 | unsigned long flags, struct page **pages); |
3382 | |
3383 | unsigned long randomize_stack_top(unsigned long stack_top); |
3384 | unsigned long randomize_page(unsigned long start, unsigned long range); |
3385 | |
3386 | extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); |
3387 | |
3388 | extern unsigned long mmap_region(struct file *file, unsigned long addr, |
3389 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, |
3390 | struct list_head *uf); |
3391 | extern unsigned long do_mmap(struct file *file, unsigned long addr, |
3392 | unsigned long len, unsigned long prot, unsigned long flags, |
3393 | vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, |
3394 | struct list_head *uf); |
3395 | extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, |
3396 | unsigned long start, size_t len, struct list_head *uf, |
3397 | bool unlock); |
3398 | extern int do_munmap(struct mm_struct *, unsigned long, size_t, |
3399 | struct list_head *uf); |
3400 | extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); |
3401 | |
3402 | #ifdef CONFIG_MMU |
3403 | extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, |
3404 | unsigned long start, unsigned long end, |
3405 | struct list_head *uf, bool unlock); |
3406 | extern int __mm_populate(unsigned long addr, unsigned long len, |
3407 | int ignore_errors); |
3408 | static inline void mm_populate(unsigned long addr, unsigned long len) |
3409 | { |
3410 | /* Ignore errors */ |
3411 | (void) __mm_populate(addr, len, ignore_errors: 1); |
3412 | } |
3413 | #else |
3414 | static inline void mm_populate(unsigned long addr, unsigned long len) {} |
3415 | #endif |
3416 | |
3417 | /* This takes the mm semaphore itself */ |
3418 | extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); |
3419 | extern int vm_munmap(unsigned long, size_t); |
3420 | extern unsigned long __must_check vm_mmap(struct file *, unsigned long, |
3421 | unsigned long, unsigned long, |
3422 | unsigned long, unsigned long); |
3423 | |
3424 | struct vm_unmapped_area_info { |
3425 | #define VM_UNMAPPED_AREA_TOPDOWN 1 |
3426 | unsigned long flags; |
3427 | unsigned long length; |
3428 | unsigned long low_limit; |
3429 | unsigned long high_limit; |
3430 | unsigned long align_mask; |
3431 | unsigned long align_offset; |
3432 | }; |
3433 | |
3434 | extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); |
3435 | |
3436 | /* truncate.c */ |
3437 | extern void truncate_inode_pages(struct address_space *, loff_t); |
3438 | extern void truncate_inode_pages_range(struct address_space *, |
3439 | loff_t lstart, loff_t lend); |
3440 | extern void truncate_inode_pages_final(struct address_space *); |
3441 | |
3442 | /* generic vm_area_ops exported for stackable file systems */ |
3443 | extern vm_fault_t filemap_fault(struct vm_fault *vmf); |
3444 | extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, |
3445 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
3446 | extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); |
3447 | |
3448 | extern unsigned long stack_guard_gap; |
3449 | /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ |
3450 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address); |
3451 | struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr); |
3452 | |
3453 | /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ |
3454 | int expand_downwards(struct vm_area_struct *vma, unsigned long address); |
3455 | |
3456 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ |
3457 | extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); |
3458 | extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, |
3459 | struct vm_area_struct **pprev); |
3460 | |
3461 | /* |
3462 | * Look up the first VMA which intersects the interval [start_addr, end_addr) |
3463 | * NULL if none. Assume start_addr < end_addr. |
3464 | */ |
3465 | struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, |
3466 | unsigned long start_addr, unsigned long end_addr); |
3467 | |
3468 | /** |
3469 | * vma_lookup() - Find a VMA at a specific address |
3470 | * @mm: The process address space. |
3471 | * @addr: The user address. |
3472 | * |
3473 | * Return: The vm_area_struct at the given address, %NULL otherwise. |
3474 | */ |
3475 | static inline |
3476 | struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) |
3477 | { |
3478 | return mtree_load(mt: &mm->mm_mt, index: addr); |
3479 | } |
3480 | |
3481 | static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) |
3482 | { |
3483 | if (vma->vm_flags & VM_GROWSDOWN) |
3484 | return stack_guard_gap; |
3485 | |
3486 | /* See reasoning around the VM_SHADOW_STACK definition */ |
3487 | if (vma->vm_flags & VM_SHADOW_STACK) |
3488 | return PAGE_SIZE; |
3489 | |
3490 | return 0; |
3491 | } |
3492 | |
3493 | static inline unsigned long vm_start_gap(struct vm_area_struct *vma) |
3494 | { |
3495 | unsigned long gap = stack_guard_start_gap(vma); |
3496 | unsigned long vm_start = vma->vm_start; |
3497 | |
3498 | vm_start -= gap; |
3499 | if (vm_start > vma->vm_start) |
3500 | vm_start = 0; |
3501 | return vm_start; |
3502 | } |
3503 | |
3504 | static inline unsigned long vm_end_gap(struct vm_area_struct *vma) |
3505 | { |
3506 | unsigned long vm_end = vma->vm_end; |
3507 | |
3508 | if (vma->vm_flags & VM_GROWSUP) { |
3509 | vm_end += stack_guard_gap; |
3510 | if (vm_end < vma->vm_end) |
3511 | vm_end = -PAGE_SIZE; |
3512 | } |
3513 | return vm_end; |
3514 | } |
3515 | |
3516 | static inline unsigned long vma_pages(struct vm_area_struct *vma) |
3517 | { |
3518 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
3519 | } |
3520 | |
3521 | /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ |
3522 | static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, |
3523 | unsigned long vm_start, unsigned long vm_end) |
3524 | { |
3525 | struct vm_area_struct *vma = vma_lookup(mm, addr: vm_start); |
3526 | |
3527 | if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) |
3528 | vma = NULL; |
3529 | |
3530 | return vma; |
3531 | } |
3532 | |
3533 | static inline bool range_in_vma(struct vm_area_struct *vma, |
3534 | unsigned long start, unsigned long end) |
3535 | { |
3536 | return (vma && vma->vm_start <= start && end <= vma->vm_end); |
3537 | } |
3538 | |
3539 | #ifdef CONFIG_MMU |
3540 | pgprot_t vm_get_page_prot(unsigned long vm_flags); |
3541 | void vma_set_page_prot(struct vm_area_struct *vma); |
3542 | #else |
3543 | static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) |
3544 | { |
3545 | return __pgprot(0); |
3546 | } |
3547 | static inline void vma_set_page_prot(struct vm_area_struct *vma) |
3548 | { |
3549 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
3550 | } |
3551 | #endif |
3552 | |
3553 | void vma_set_file(struct vm_area_struct *vma, struct file *file); |
3554 | |
3555 | #ifdef CONFIG_NUMA_BALANCING |
3556 | unsigned long change_prot_numa(struct vm_area_struct *vma, |
3557 | unsigned long start, unsigned long end); |
3558 | #endif |
3559 | |
3560 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *, |
3561 | unsigned long addr); |
3562 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
3563 | unsigned long pfn, unsigned long size, pgprot_t); |
3564 | int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, |
3565 | unsigned long pfn, unsigned long size, pgprot_t prot); |
3566 | int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); |
3567 | int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, |
3568 | struct page **pages, unsigned long *num); |
3569 | int vm_map_pages(struct vm_area_struct *vma, struct page **pages, |
3570 | unsigned long num); |
3571 | int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, |
3572 | unsigned long num); |
3573 | vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, |
3574 | unsigned long pfn); |
3575 | vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, |
3576 | unsigned long pfn, pgprot_t pgprot); |
3577 | vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, |
3578 | pfn_t pfn); |
3579 | vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, |
3580 | unsigned long addr, pfn_t pfn); |
3581 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); |
3582 | |
3583 | static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, |
3584 | unsigned long addr, struct page *page) |
3585 | { |
3586 | int err = vm_insert_page(vma, addr, page); |
3587 | |
3588 | if (err == -ENOMEM) |
3589 | return VM_FAULT_OOM; |
3590 | if (err < 0 && err != -EBUSY) |
3591 | return VM_FAULT_SIGBUS; |
3592 | |
3593 | return VM_FAULT_NOPAGE; |
3594 | } |
3595 | |
3596 | #ifndef io_remap_pfn_range |
3597 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, |
3598 | unsigned long addr, unsigned long pfn, |
3599 | unsigned long size, pgprot_t prot) |
3600 | { |
3601 | return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); |
3602 | } |
3603 | #endif |
3604 | |
3605 | static inline vm_fault_t vmf_error(int err) |
3606 | { |
3607 | if (err == -ENOMEM) |
3608 | return VM_FAULT_OOM; |
3609 | else if (err == -EHWPOISON) |
3610 | return VM_FAULT_HWPOISON; |
3611 | return VM_FAULT_SIGBUS; |
3612 | } |
3613 | |
3614 | /* |
3615 | * Convert errno to return value for ->page_mkwrite() calls. |
3616 | * |
3617 | * This should eventually be merged with vmf_error() above, but will need a |
3618 | * careful audit of all vmf_error() callers. |
3619 | */ |
3620 | static inline vm_fault_t vmf_fs_error(int err) |
3621 | { |
3622 | if (err == 0) |
3623 | return VM_FAULT_LOCKED; |
3624 | if (err == -EFAULT || err == -EAGAIN) |
3625 | return VM_FAULT_NOPAGE; |
3626 | if (err == -ENOMEM) |
3627 | return VM_FAULT_OOM; |
3628 | /* -ENOSPC, -EDQUOT, -EIO ... */ |
3629 | return VM_FAULT_SIGBUS; |
3630 | } |
3631 | |
3632 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
3633 | unsigned int foll_flags); |
3634 | |
3635 | static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) |
3636 | { |
3637 | if (vm_fault & VM_FAULT_OOM) |
3638 | return -ENOMEM; |
3639 | if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) |
3640 | return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; |
3641 | if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) |
3642 | return -EFAULT; |
3643 | return 0; |
3644 | } |
3645 | |
3646 | /* |
3647 | * Indicates whether GUP can follow a PROT_NONE mapped page, or whether |
3648 | * a (NUMA hinting) fault is required. |
3649 | */ |
3650 | static inline bool gup_can_follow_protnone(struct vm_area_struct *vma, |
3651 | unsigned int flags) |
3652 | { |
3653 | /* |
3654 | * If callers don't want to honor NUMA hinting faults, no need to |
3655 | * determine if we would actually have to trigger a NUMA hinting fault. |
3656 | */ |
3657 | if (!(flags & FOLL_HONOR_NUMA_FAULT)) |
3658 | return true; |
3659 | |
3660 | /* |
3661 | * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs. |
3662 | * |
3663 | * Requiring a fault here even for inaccessible VMAs would mean that |
3664 | * FOLL_FORCE cannot make any progress, because handle_mm_fault() |
3665 | * refuses to process NUMA hinting faults in inaccessible VMAs. |
3666 | */ |
3667 | return !vma_is_accessible(vma); |
3668 | } |
3669 | |
3670 | typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); |
3671 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, |
3672 | unsigned long size, pte_fn_t fn, void *data); |
3673 | extern int apply_to_existing_page_range(struct mm_struct *mm, |
3674 | unsigned long address, unsigned long size, |
3675 | pte_fn_t fn, void *data); |
3676 | |
3677 | #ifdef CONFIG_PAGE_POISONING |
3678 | extern void __kernel_poison_pages(struct page *page, int numpages); |
3679 | extern void __kernel_unpoison_pages(struct page *page, int numpages); |
3680 | extern bool _page_poisoning_enabled_early; |
3681 | DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); |
3682 | static inline bool page_poisoning_enabled(void) |
3683 | { |
3684 | return _page_poisoning_enabled_early; |
3685 | } |
3686 | /* |
3687 | * For use in fast paths after init_mem_debugging() has run, or when a |
3688 | * false negative result is not harmful when called too early. |
3689 | */ |
3690 | static inline bool page_poisoning_enabled_static(void) |
3691 | { |
3692 | return static_branch_unlikely(&_page_poisoning_enabled); |
3693 | } |
3694 | static inline void kernel_poison_pages(struct page *page, int numpages) |
3695 | { |
3696 | if (page_poisoning_enabled_static()) |
3697 | __kernel_poison_pages(page, numpages); |
3698 | } |
3699 | static inline void kernel_unpoison_pages(struct page *page, int numpages) |
3700 | { |
3701 | if (page_poisoning_enabled_static()) |
3702 | __kernel_unpoison_pages(page, numpages); |
3703 | } |
3704 | #else |
3705 | static inline bool page_poisoning_enabled(void) { return false; } |
3706 | static inline bool page_poisoning_enabled_static(void) { return false; } |
3707 | static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } |
3708 | static inline void kernel_poison_pages(struct page *page, int numpages) { } |
3709 | static inline void kernel_unpoison_pages(struct page *page, int numpages) { } |
3710 | #endif |
3711 | |
3712 | DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); |
3713 | static inline bool want_init_on_alloc(gfp_t flags) |
3714 | { |
3715 | if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, |
3716 | &init_on_alloc)) |
3717 | return true; |
3718 | return flags & __GFP_ZERO; |
3719 | } |
3720 | |
3721 | DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); |
3722 | static inline bool want_init_on_free(void) |
3723 | { |
3724 | return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, |
3725 | &init_on_free); |
3726 | } |
3727 | |
3728 | extern bool _debug_pagealloc_enabled_early; |
3729 | DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); |
3730 | |
3731 | static inline bool debug_pagealloc_enabled(void) |
3732 | { |
3733 | return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && |
3734 | _debug_pagealloc_enabled_early; |
3735 | } |
3736 | |
3737 | /* |
3738 | * For use in fast paths after mem_debugging_and_hardening_init() has run, |
3739 | * or when a false negative result is not harmful when called too early. |
3740 | */ |
3741 | static inline bool debug_pagealloc_enabled_static(void) |
3742 | { |
3743 | if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) |
3744 | return false; |
3745 | |
3746 | return static_branch_unlikely(&_debug_pagealloc_enabled); |
3747 | } |
3748 | |
3749 | /* |
3750 | * To support DEBUG_PAGEALLOC architecture must ensure that |
3751 | * __kernel_map_pages() never fails |
3752 | */ |
3753 | extern void __kernel_map_pages(struct page *page, int numpages, int enable); |
3754 | #ifdef CONFIG_DEBUG_PAGEALLOC |
3755 | static inline void debug_pagealloc_map_pages(struct page *page, int numpages) |
3756 | { |
3757 | if (debug_pagealloc_enabled_static()) |
3758 | __kernel_map_pages(page, numpages, enable: 1); |
3759 | } |
3760 | |
3761 | static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) |
3762 | { |
3763 | if (debug_pagealloc_enabled_static()) |
3764 | __kernel_map_pages(page, numpages, enable: 0); |
3765 | } |
3766 | |
3767 | extern unsigned int _debug_guardpage_minorder; |
3768 | DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); |
3769 | |
3770 | static inline unsigned int debug_guardpage_minorder(void) |
3771 | { |
3772 | return _debug_guardpage_minorder; |
3773 | } |
3774 | |
3775 | static inline bool debug_guardpage_enabled(void) |
3776 | { |
3777 | return static_branch_unlikely(&_debug_guardpage_enabled); |
3778 | } |
3779 | |
3780 | static inline bool page_is_guard(struct page *page) |
3781 | { |
3782 | if (!debug_guardpage_enabled()) |
3783 | return false; |
3784 | |
3785 | return PageGuard(page); |
3786 | } |
3787 | |
3788 | bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, |
3789 | int migratetype); |
3790 | static inline bool set_page_guard(struct zone *zone, struct page *page, |
3791 | unsigned int order, int migratetype) |
3792 | { |
3793 | if (!debug_guardpage_enabled()) |
3794 | return false; |
3795 | return __set_page_guard(zone, page, order, migratetype); |
3796 | } |
3797 | |
3798 | void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, |
3799 | int migratetype); |
3800 | static inline void clear_page_guard(struct zone *zone, struct page *page, |
3801 | unsigned int order, int migratetype) |
3802 | { |
3803 | if (!debug_guardpage_enabled()) |
3804 | return; |
3805 | __clear_page_guard(zone, page, order, migratetype); |
3806 | } |
3807 | |
3808 | #else /* CONFIG_DEBUG_PAGEALLOC */ |
3809 | static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} |
3810 | static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} |
3811 | static inline unsigned int debug_guardpage_minorder(void) { return 0; } |
3812 | static inline bool debug_guardpage_enabled(void) { return false; } |
3813 | static inline bool page_is_guard(struct page *page) { return false; } |
3814 | static inline bool set_page_guard(struct zone *zone, struct page *page, |
3815 | unsigned int order, int migratetype) { return false; } |
3816 | static inline void clear_page_guard(struct zone *zone, struct page *page, |
3817 | unsigned int order, int migratetype) {} |
3818 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
3819 | |
3820 | #ifdef __HAVE_ARCH_GATE_AREA |
3821 | extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); |
3822 | extern int in_gate_area_no_mm(unsigned long addr); |
3823 | extern int in_gate_area(struct mm_struct *mm, unsigned long addr); |
3824 | #else |
3825 | static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
3826 | { |
3827 | return NULL; |
3828 | } |
3829 | static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } |
3830 | static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) |
3831 | { |
3832 | return 0; |
3833 | } |
3834 | #endif /* __HAVE_ARCH_GATE_AREA */ |
3835 | |
3836 | extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); |
3837 | |
3838 | #ifdef CONFIG_SYSCTL |
3839 | extern int sysctl_drop_caches; |
3840 | int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, |
3841 | loff_t *); |
3842 | #endif |
3843 | |
3844 | void drop_slab(void); |
3845 | |
3846 | #ifndef CONFIG_MMU |
3847 | #define randomize_va_space 0 |
3848 | #else |
3849 | extern int randomize_va_space; |
3850 | #endif |
3851 | |
3852 | const char * arch_vma_name(struct vm_area_struct *vma); |
3853 | #ifdef CONFIG_MMU |
3854 | void print_vma_addr(char *prefix, unsigned long rip); |
3855 | #else |
3856 | static inline void print_vma_addr(char *prefix, unsigned long rip) |
3857 | { |
3858 | } |
3859 | #endif |
3860 | |
3861 | void *sparse_buffer_alloc(unsigned long size); |
3862 | struct page * __populate_section_memmap(unsigned long pfn, |
3863 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap, |
3864 | struct dev_pagemap *pgmap); |
3865 | void pmd_init(void *addr); |
3866 | void pud_init(void *addr); |
3867 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
3868 | p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); |
3869 | pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); |
3870 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); |
3871 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
3872 | struct vmem_altmap *altmap, struct page *reuse); |
3873 | void *vmemmap_alloc_block(unsigned long size, int node); |
3874 | struct vmem_altmap; |
3875 | void *vmemmap_alloc_block_buf(unsigned long size, int node, |
3876 | struct vmem_altmap *altmap); |
3877 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); |
3878 | void vmemmap_set_pmd(pmd_t *pmd, void *p, int node, |
3879 | unsigned long addr, unsigned long next); |
3880 | int vmemmap_check_pmd(pmd_t *pmd, int node, |
3881 | unsigned long addr, unsigned long next); |
3882 | int vmemmap_populate_basepages(unsigned long start, unsigned long end, |
3883 | int node, struct vmem_altmap *altmap); |
3884 | int vmemmap_populate_hugepages(unsigned long start, unsigned long end, |
3885 | int node, struct vmem_altmap *altmap); |
3886 | int vmemmap_populate(unsigned long start, unsigned long end, int node, |
3887 | struct vmem_altmap *altmap); |
3888 | void vmemmap_populate_print_last(void); |
3889 | #ifdef CONFIG_MEMORY_HOTPLUG |
3890 | void vmemmap_free(unsigned long start, unsigned long end, |
3891 | struct vmem_altmap *altmap); |
3892 | #endif |
3893 | |
3894 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
3895 | static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
3896 | { |
3897 | /* number of pfns from base where pfn_to_page() is valid */ |
3898 | if (altmap) |
3899 | return altmap->reserve + altmap->free; |
3900 | return 0; |
3901 | } |
3902 | |
3903 | static inline void vmem_altmap_free(struct vmem_altmap *altmap, |
3904 | unsigned long nr_pfns) |
3905 | { |
3906 | altmap->alloc -= nr_pfns; |
3907 | } |
3908 | #else |
3909 | static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
3910 | { |
3911 | return 0; |
3912 | } |
3913 | |
3914 | static inline void vmem_altmap_free(struct vmem_altmap *altmap, |
3915 | unsigned long nr_pfns) |
3916 | { |
3917 | } |
3918 | #endif |
3919 | |
3920 | #define VMEMMAP_RESERVE_NR 2 |
3921 | #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP |
3922 | static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap, |
3923 | struct dev_pagemap *pgmap) |
3924 | { |
3925 | unsigned long nr_pages; |
3926 | unsigned long nr_vmemmap_pages; |
3927 | |
3928 | if (!pgmap || !is_power_of_2(n: sizeof(struct page))) |
3929 | return false; |
3930 | |
3931 | nr_pages = pgmap_vmemmap_nr(pgmap); |
3932 | nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT); |
3933 | /* |
3934 | * For vmemmap optimization with DAX we need minimum 2 vmemmap |
3935 | * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst |
3936 | */ |
3937 | return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR); |
3938 | } |
3939 | /* |
3940 | * If we don't have an architecture override, use the generic rule |
3941 | */ |
3942 | #ifndef vmemmap_can_optimize |
3943 | #define vmemmap_can_optimize __vmemmap_can_optimize |
3944 | #endif |
3945 | |
3946 | #else |
3947 | static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap, |
3948 | struct dev_pagemap *pgmap) |
3949 | { |
3950 | return false; |
3951 | } |
3952 | #endif |
3953 | |
3954 | void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, |
3955 | unsigned long nr_pages); |
3956 | |
3957 | enum mf_flags { |
3958 | MF_COUNT_INCREASED = 1 << 0, |
3959 | MF_ACTION_REQUIRED = 1 << 1, |
3960 | MF_MUST_KILL = 1 << 2, |
3961 | MF_SOFT_OFFLINE = 1 << 3, |
3962 | MF_UNPOISON = 1 << 4, |
3963 | MF_SW_SIMULATED = 1 << 5, |
3964 | MF_NO_RETRY = 1 << 6, |
3965 | MF_MEM_PRE_REMOVE = 1 << 7, |
3966 | }; |
3967 | int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, |
3968 | unsigned long count, int mf_flags); |
3969 | extern int memory_failure(unsigned long pfn, int flags); |
3970 | extern void memory_failure_queue_kick(int cpu); |
3971 | extern int unpoison_memory(unsigned long pfn); |
3972 | extern void shake_page(struct page *p); |
3973 | extern atomic_long_t num_poisoned_pages __read_mostly; |
3974 | extern int soft_offline_page(unsigned long pfn, int flags); |
3975 | #ifdef CONFIG_MEMORY_FAILURE |
3976 | /* |
3977 | * Sysfs entries for memory failure handling statistics. |
3978 | */ |
3979 | extern const struct attribute_group memory_failure_attr_group; |
3980 | extern void memory_failure_queue(unsigned long pfn, int flags); |
3981 | extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, |
3982 | bool *migratable_cleared); |
3983 | void num_poisoned_pages_inc(unsigned long pfn); |
3984 | void num_poisoned_pages_sub(unsigned long pfn, long i); |
3985 | struct task_struct *task_early_kill(struct task_struct *tsk, int force_early); |
3986 | #else |
3987 | static inline void memory_failure_queue(unsigned long pfn, int flags) |
3988 | { |
3989 | } |
3990 | |
3991 | static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, |
3992 | bool *migratable_cleared) |
3993 | { |
3994 | return 0; |
3995 | } |
3996 | |
3997 | static inline void num_poisoned_pages_inc(unsigned long pfn) |
3998 | { |
3999 | } |
4000 | |
4001 | static inline void num_poisoned_pages_sub(unsigned long pfn, long i) |
4002 | { |
4003 | } |
4004 | #endif |
4005 | |
4006 | #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_KSM) |
4007 | void add_to_kill_ksm(struct task_struct *tsk, struct page *p, |
4008 | struct vm_area_struct *vma, struct list_head *to_kill, |
4009 | unsigned long ksm_addr); |
4010 | #endif |
4011 | |
4012 | #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) |
4013 | extern void memblk_nr_poison_inc(unsigned long pfn); |
4014 | extern void memblk_nr_poison_sub(unsigned long pfn, long i); |
4015 | #else |
4016 | static inline void memblk_nr_poison_inc(unsigned long pfn) |
4017 | { |
4018 | } |
4019 | |
4020 | static inline void memblk_nr_poison_sub(unsigned long pfn, long i) |
4021 | { |
4022 | } |
4023 | #endif |
4024 | |
4025 | #ifndef arch_memory_failure |
4026 | static inline int arch_memory_failure(unsigned long pfn, int flags) |
4027 | { |
4028 | return -ENXIO; |
4029 | } |
4030 | #endif |
4031 | |
4032 | #ifndef arch_is_platform_page |
4033 | static inline bool arch_is_platform_page(u64 paddr) |
4034 | { |
4035 | return false; |
4036 | } |
4037 | #endif |
4038 | |
4039 | /* |
4040 | * Error handlers for various types of pages. |
4041 | */ |
4042 | enum mf_result { |
4043 | MF_IGNORED, /* Error: cannot be handled */ |
4044 | MF_FAILED, /* Error: handling failed */ |
4045 | MF_DELAYED, /* Will be handled later */ |
4046 | MF_RECOVERED, /* Successfully recovered */ |
4047 | }; |
4048 | |
4049 | enum mf_action_page_type { |
4050 | MF_MSG_KERNEL, |
4051 | MF_MSG_KERNEL_HIGH_ORDER, |
4052 | MF_MSG_SLAB, |
4053 | MF_MSG_DIFFERENT_COMPOUND, |
4054 | MF_MSG_HUGE, |
4055 | MF_MSG_FREE_HUGE, |
4056 | MF_MSG_UNMAP_FAILED, |
4057 | MF_MSG_DIRTY_SWAPCACHE, |
4058 | MF_MSG_CLEAN_SWAPCACHE, |
4059 | MF_MSG_DIRTY_MLOCKED_LRU, |
4060 | MF_MSG_CLEAN_MLOCKED_LRU, |
4061 | MF_MSG_DIRTY_UNEVICTABLE_LRU, |
4062 | MF_MSG_CLEAN_UNEVICTABLE_LRU, |
4063 | MF_MSG_DIRTY_LRU, |
4064 | MF_MSG_CLEAN_LRU, |
4065 | MF_MSG_TRUNCATED_LRU, |
4066 | MF_MSG_BUDDY, |
4067 | MF_MSG_DAX, |
4068 | MF_MSG_UNSPLIT_THP, |
4069 | MF_MSG_UNKNOWN, |
4070 | }; |
4071 | |
4072 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) |
4073 | extern void clear_huge_page(struct page *page, |
4074 | unsigned long addr_hint, |
4075 | unsigned int pages_per_huge_page); |
4076 | int copy_user_large_folio(struct folio *dst, struct folio *src, |
4077 | unsigned long addr_hint, |
4078 | struct vm_area_struct *vma); |
4079 | long copy_folio_from_user(struct folio *dst_folio, |
4080 | const void __user *usr_src, |
4081 | bool allow_pagefault); |
4082 | |
4083 | /** |
4084 | * vma_is_special_huge - Are transhuge page-table entries considered special? |
4085 | * @vma: Pointer to the struct vm_area_struct to consider |
4086 | * |
4087 | * Whether transhuge page-table entries are considered "special" following |
4088 | * the definition in vm_normal_page(). |
4089 | * |
4090 | * Return: true if transhuge page-table entries should be considered special, |
4091 | * false otherwise. |
4092 | */ |
4093 | static inline bool vma_is_special_huge(const struct vm_area_struct *vma) |
4094 | { |
4095 | return vma_is_dax(vma) || (vma->vm_file && |
4096 | (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); |
4097 | } |
4098 | |
4099 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
4100 | |
4101 | #if MAX_NUMNODES > 1 |
4102 | void __init setup_nr_node_ids(void); |
4103 | #else |
4104 | static inline void setup_nr_node_ids(void) {} |
4105 | #endif |
4106 | |
4107 | extern int memcmp_pages(struct page *page1, struct page *page2); |
4108 | |
4109 | static inline int pages_identical(struct page *page1, struct page *page2) |
4110 | { |
4111 | return !memcmp_pages(page1, page2); |
4112 | } |
4113 | |
4114 | #ifdef CONFIG_MAPPING_DIRTY_HELPERS |
4115 | unsigned long clean_record_shared_mapping_range(struct address_space *mapping, |
4116 | pgoff_t first_index, pgoff_t nr, |
4117 | pgoff_t bitmap_pgoff, |
4118 | unsigned long *bitmap, |
4119 | pgoff_t *start, |
4120 | pgoff_t *end); |
4121 | |
4122 | unsigned long wp_shared_mapping_range(struct address_space *mapping, |
4123 | pgoff_t first_index, pgoff_t nr); |
4124 | #endif |
4125 | |
4126 | extern int sysctl_nr_trim_pages; |
4127 | |
4128 | #ifdef CONFIG_PRINTK |
4129 | void mem_dump_obj(void *object); |
4130 | #else |
4131 | static inline void mem_dump_obj(void *object) {} |
4132 | #endif |
4133 | |
4134 | /** |
4135 | * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and |
4136 | * handle them. |
4137 | * @seals: the seals to check |
4138 | * @vma: the vma to operate on |
4139 | * |
4140 | * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper |
4141 | * check/handling on the vma flags. Return 0 if check pass, or <0 for errors. |
4142 | */ |
4143 | static inline int seal_check_write(int seals, struct vm_area_struct *vma) |
4144 | { |
4145 | if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
4146 | /* |
4147 | * New PROT_WRITE and MAP_SHARED mmaps are not allowed when |
4148 | * write seals are active. |
4149 | */ |
4150 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) |
4151 | return -EPERM; |
4152 | |
4153 | /* |
4154 | * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as |
4155 | * MAP_SHARED and read-only, take care to not allow mprotect to |
4156 | * revert protections on such mappings. Do this only for shared |
4157 | * mappings. For private mappings, don't need to mask |
4158 | * VM_MAYWRITE as we still want them to be COW-writable. |
4159 | */ |
4160 | if (vma->vm_flags & VM_SHARED) |
4161 | vm_flags_clear(vma, VM_MAYWRITE); |
4162 | } |
4163 | |
4164 | return 0; |
4165 | } |
4166 | |
4167 | #ifdef CONFIG_ANON_VMA_NAME |
4168 | int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, |
4169 | unsigned long len_in, |
4170 | struct anon_vma_name *anon_name); |
4171 | #else |
4172 | static inline int |
4173 | madvise_set_anon_name(struct mm_struct *mm, unsigned long start, |
4174 | unsigned long len_in, struct anon_vma_name *anon_name) { |
4175 | return 0; |
4176 | } |
4177 | #endif |
4178 | |
4179 | #ifdef CONFIG_UNACCEPTED_MEMORY |
4180 | |
4181 | bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end); |
4182 | void accept_memory(phys_addr_t start, phys_addr_t end); |
4183 | |
4184 | #else |
4185 | |
4186 | static inline bool range_contains_unaccepted_memory(phys_addr_t start, |
4187 | phys_addr_t end) |
4188 | { |
4189 | return false; |
4190 | } |
4191 | |
4192 | static inline void accept_memory(phys_addr_t start, phys_addr_t end) |
4193 | { |
4194 | } |
4195 | |
4196 | #endif |
4197 | |
4198 | static inline bool pfn_is_unaccepted_memory(unsigned long pfn) |
4199 | { |
4200 | phys_addr_t paddr = pfn << PAGE_SHIFT; |
4201 | |
4202 | return range_contains_unaccepted_memory(start: paddr, end: paddr + PAGE_SIZE); |
4203 | } |
4204 | |
4205 | #endif /* _LINUX_MM_H */ |
4206 | |