1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Macros for manipulating and testing page->flags |
4 | */ |
5 | |
6 | #ifndef PAGE_FLAGS_H |
7 | #define PAGE_FLAGS_H |
8 | |
9 | #include <linux/types.h> |
10 | #include <linux/bug.h> |
11 | #include <linux/mmdebug.h> |
12 | #ifndef __GENERATING_BOUNDS_H |
13 | #include <linux/mm_types.h> |
14 | #include <generated/bounds.h> |
15 | #endif /* !__GENERATING_BOUNDS_H */ |
16 | |
17 | /* |
18 | * Various page->flags bits: |
19 | * |
20 | * PG_reserved is set for special pages. The "struct page" of such a page |
21 | * should in general not be touched (e.g. set dirty) except by its owner. |
22 | * Pages marked as PG_reserved include: |
23 | * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, |
24 | * initrd, HW tables) |
25 | * - Pages reserved or allocated early during boot (before the page allocator |
26 | * was initialized). This includes (depending on the architecture) the |
27 | * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much |
28 | * much more. Once (if ever) freed, PG_reserved is cleared and they will |
29 | * be given to the page allocator. |
30 | * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying |
31 | * to read/write these pages might end badly. Don't touch! |
32 | * - The zero page(s) |
33 | * - Pages not added to the page allocator when onlining a section because |
34 | * they were excluded via the online_page_callback() or because they are |
35 | * PG_hwpoison. |
36 | * - Pages allocated in the context of kexec/kdump (loaded kernel image, |
37 | * control pages, vmcoreinfo) |
38 | * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are |
39 | * not marked PG_reserved (as they might be in use by somebody else who does |
40 | * not respect the caching strategy). |
41 | * - Pages part of an offline section (struct pages of offline sections should |
42 | * not be trusted as they will be initialized when first onlined). |
43 | * - MCA pages on ia64 |
44 | * - Pages holding CPU notes for POWER Firmware Assisted Dump |
45 | * - Device memory (e.g. PMEM, DAX, HMM) |
46 | * Some PG_reserved pages will be excluded from the hibernation image. |
47 | * PG_reserved does in general not hinder anybody from dumping or swapping |
48 | * and is no longer required for remap_pfn_range(). ioremap might require it. |
49 | * Consequently, PG_reserved for a page mapped into user space can indicate |
50 | * the zero page, the vDSO, MMIO pages or device memory. |
51 | * |
52 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
53 | * specific data (which is normally at page->private). It can be used by |
54 | * private allocations for its own usage. |
55 | * |
56 | * During initiation of disk I/O, PG_locked is set. This bit is set before I/O |
57 | * and cleared when writeback _starts_ or when read _completes_. PG_writeback |
58 | * is set before writeback starts and cleared when it finishes. |
59 | * |
60 | * PG_locked also pins a page in pagecache, and blocks truncation of the file |
61 | * while it is held. |
62 | * |
63 | * page_waitqueue(page) is a wait queue of all tasks waiting for the page |
64 | * to become unlocked. |
65 | * |
66 | * PG_swapbacked is set when a page uses swap as a backing storage. This are |
67 | * usually PageAnon or shmem pages but please note that even anonymous pages |
68 | * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as |
69 | * a result of MADV_FREE). |
70 | * |
71 | * PG_referenced, PG_reclaim are used for page reclaim for anonymous and |
72 | * file-backed pagecache (see mm/vmscan.c). |
73 | * |
74 | * PG_error is set to indicate that an I/O error occurred on this page. |
75 | * |
76 | * PG_arch_1 is an architecture specific page state bit. The generic code |
77 | * guarantees that this bit is cleared for a page when it first is entered into |
78 | * the page cache. |
79 | * |
80 | * PG_hwpoison indicates that a page got corrupted in hardware and contains |
81 | * data with incorrect ECC bits that triggered a machine check. Accessing is |
82 | * not safe since it may cause another machine check. Don't touch! |
83 | */ |
84 | |
85 | /* |
86 | * Don't use the pageflags directly. Use the PageFoo macros. |
87 | * |
88 | * The page flags field is split into two parts, the main flags area |
89 | * which extends from the low bits upwards, and the fields area which |
90 | * extends from the high bits downwards. |
91 | * |
92 | * | FIELD | ... | FLAGS | |
93 | * N-1 ^ 0 |
94 | * (NR_PAGEFLAGS) |
95 | * |
96 | * The fields area is reserved for fields mapping zone, node (for NUMA) and |
97 | * SPARSEMEM section (for variants of SPARSEMEM that require section ids like |
98 | * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). |
99 | */ |
100 | enum pageflags { |
101 | PG_locked, /* Page is locked. Don't touch. */ |
102 | PG_referenced, |
103 | PG_uptodate, |
104 | PG_dirty, |
105 | PG_lru, |
106 | PG_active, |
107 | PG_workingset, |
108 | PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ |
109 | PG_error, |
110 | PG_slab, |
111 | PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ |
112 | PG_arch_1, |
113 | PG_reserved, |
114 | PG_private, /* If pagecache, has fs-private data */ |
115 | PG_private_2, /* If pagecache, has fs aux data */ |
116 | PG_writeback, /* Page is under writeback */ |
117 | PG_head, /* A head page */ |
118 | PG_mappedtodisk, /* Has blocks allocated on-disk */ |
119 | PG_reclaim, /* To be reclaimed asap */ |
120 | PG_swapbacked, /* Page is backed by RAM/swap */ |
121 | PG_unevictable, /* Page is "unevictable" */ |
122 | #ifdef CONFIG_MMU |
123 | PG_mlocked, /* Page is vma mlocked */ |
124 | #endif |
125 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
126 | PG_uncached, /* Page has been mapped as uncached */ |
127 | #endif |
128 | #ifdef CONFIG_MEMORY_FAILURE |
129 | PG_hwpoison, /* hardware poisoned page. Don't touch */ |
130 | #endif |
131 | #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) |
132 | PG_young, |
133 | PG_idle, |
134 | #endif |
135 | #ifdef CONFIG_64BIT |
136 | PG_arch_2, |
137 | #endif |
138 | #ifdef CONFIG_KASAN_HW_TAGS |
139 | PG_skip_kasan_poison, |
140 | #endif |
141 | __NR_PAGEFLAGS, |
142 | |
143 | PG_readahead = PG_reclaim, |
144 | |
145 | /* |
146 | * Depending on the way an anonymous folio can be mapped into a page |
147 | * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped |
148 | * THP), PG_anon_exclusive may be set only for the head page or for |
149 | * tail pages of an anonymous folio. For now, we only expect it to be |
150 | * set on tail pages for PTE-mapped THP. |
151 | */ |
152 | PG_anon_exclusive = PG_mappedtodisk, |
153 | |
154 | /* Filesystems */ |
155 | PG_checked = PG_owner_priv_1, |
156 | |
157 | /* SwapBacked */ |
158 | PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ |
159 | |
160 | /* Two page bits are conscripted by FS-Cache to maintain local caching |
161 | * state. These bits are set on pages belonging to the netfs's inodes |
162 | * when those inodes are being locally cached. |
163 | */ |
164 | PG_fscache = PG_private_2, /* page backed by cache */ |
165 | |
166 | /* XEN */ |
167 | /* Pinned in Xen as a read-only pagetable page. */ |
168 | PG_pinned = PG_owner_priv_1, |
169 | /* Pinned as part of domain save (see xen_mm_pin_all()). */ |
170 | PG_savepinned = PG_dirty, |
171 | /* Has a grant mapping of another (foreign) domain's page. */ |
172 | PG_foreign = PG_owner_priv_1, |
173 | /* Remapped by swiotlb-xen. */ |
174 | PG_xen_remapped = PG_owner_priv_1, |
175 | |
176 | /* SLOB */ |
177 | PG_slob_free = PG_private, |
178 | |
179 | /* Compound pages. Stored in first tail page's flags */ |
180 | PG_double_map = PG_workingset, |
181 | |
182 | #ifdef CONFIG_MEMORY_FAILURE |
183 | /* |
184 | * Compound pages. Stored in first tail page's flags. |
185 | * Indicates that at least one subpage is hwpoisoned in the |
186 | * THP. |
187 | */ |
188 | PG_has_hwpoisoned = PG_error, |
189 | #endif |
190 | |
191 | /* non-lru isolated movable page */ |
192 | PG_isolated = PG_reclaim, |
193 | |
194 | /* Only valid for buddy pages. Used to track pages that are reported */ |
195 | PG_reported = PG_uptodate, |
196 | |
197 | #ifdef CONFIG_MEMORY_HOTPLUG |
198 | /* For self-hosted memmap pages */ |
199 | PG_vmemmap_self_hosted = PG_owner_priv_1, |
200 | #endif |
201 | }; |
202 | |
203 | #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) |
204 | |
205 | #ifndef __GENERATING_BOUNDS_H |
206 | |
207 | #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP |
208 | DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); |
209 | |
210 | /* |
211 | * Return the real head page struct iff the @page is a fake head page, otherwise |
212 | * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. |
213 | */ |
214 | static __always_inline const struct page *page_fixed_fake_head(const struct page *page) |
215 | { |
216 | if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) |
217 | return page; |
218 | |
219 | /* |
220 | * Only addresses aligned with PAGE_SIZE of struct page may be fake head |
221 | * struct page. The alignment check aims to avoid access the fields ( |
222 | * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) |
223 | * cold cacheline in some cases. |
224 | */ |
225 | if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && |
226 | test_bit(PG_head, &page->flags)) { |
227 | /* |
228 | * We can safely access the field of the @page[1] with PG_head |
229 | * because the @page is a compound page composed with at least |
230 | * two contiguous pages. |
231 | */ |
232 | unsigned long head = READ_ONCE(page[1].compound_head); |
233 | |
234 | if (likely(head & 1)) |
235 | return (const struct page *)(head - 1); |
236 | } |
237 | return page; |
238 | } |
239 | #else |
240 | static inline const struct page *page_fixed_fake_head(const struct page *page) |
241 | { |
242 | return page; |
243 | } |
244 | #endif |
245 | |
246 | static __always_inline int page_is_fake_head(struct page *page) |
247 | { |
248 | return page_fixed_fake_head(page) != page; |
249 | } |
250 | |
251 | static inline unsigned long _compound_head(const struct page *page) |
252 | { |
253 | unsigned long head = READ_ONCE(page->compound_head); |
254 | |
255 | if (unlikely(head & 1)) |
256 | return head - 1; |
257 | return (unsigned long)page_fixed_fake_head(page); |
258 | } |
259 | |
260 | #define compound_head(page) ((typeof(page))_compound_head(page)) |
261 | |
262 | /** |
263 | * page_folio - Converts from page to folio. |
264 | * @p: The page. |
265 | * |
266 | * Every page is part of a folio. This function cannot be called on a |
267 | * NULL pointer. |
268 | * |
269 | * Context: No reference, nor lock is required on @page. If the caller |
270 | * does not hold a reference, this call may race with a folio split, so |
271 | * it should re-check the folio still contains this page after gaining |
272 | * a reference on the folio. |
273 | * Return: The folio which contains this page. |
274 | */ |
275 | #define page_folio(p) (_Generic((p), \ |
276 | const struct page *: (const struct folio *)_compound_head(p), \ |
277 | struct page *: (struct folio *)_compound_head(p))) |
278 | |
279 | /** |
280 | * folio_page - Return a page from a folio. |
281 | * @folio: The folio. |
282 | * @n: The page number to return. |
283 | * |
284 | * @n is relative to the start of the folio. This function does not |
285 | * check that the page number lies within @folio; the caller is presumed |
286 | * to have a reference to the page. |
287 | */ |
288 | #define folio_page(folio, n) nth_page(&(folio)->page, n) |
289 | |
290 | static __always_inline int PageTail(struct page *page) |
291 | { |
292 | return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); |
293 | } |
294 | |
295 | static __always_inline int PageCompound(struct page *page) |
296 | { |
297 | return test_bit(PG_head, &page->flags) || |
298 | READ_ONCE(page->compound_head) & 1; |
299 | } |
300 | |
301 | #define PAGE_POISON_PATTERN -1l |
302 | static inline int PagePoisoned(const struct page *page) |
303 | { |
304 | return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; |
305 | } |
306 | |
307 | #ifdef CONFIG_DEBUG_VM |
308 | void page_init_poison(struct page *page, size_t size); |
309 | #else |
310 | static inline void page_init_poison(struct page *page, size_t size) |
311 | { |
312 | } |
313 | #endif |
314 | |
315 | static unsigned long *folio_flags(struct folio *folio, unsigned n) |
316 | { |
317 | struct page *page = &folio->page; |
318 | |
319 | VM_BUG_ON_PGFLAGS(PageTail(page), page); |
320 | VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); |
321 | return &page[n].flags; |
322 | } |
323 | |
324 | /* |
325 | * Page flags policies wrt compound pages |
326 | * |
327 | * PF_POISONED_CHECK |
328 | * check if this struct page poisoned/uninitialized |
329 | * |
330 | * PF_ANY: |
331 | * the page flag is relevant for small, head and tail pages. |
332 | * |
333 | * PF_HEAD: |
334 | * for compound page all operations related to the page flag applied to |
335 | * head page. |
336 | * |
337 | * PF_ONLY_HEAD: |
338 | * for compound page, callers only ever operate on the head page. |
339 | * |
340 | * PF_NO_TAIL: |
341 | * modifications of the page flag must be done on small or head pages, |
342 | * checks can be done on tail pages too. |
343 | * |
344 | * PF_NO_COMPOUND: |
345 | * the page flag is not relevant for compound pages. |
346 | * |
347 | * PF_SECOND: |
348 | * the page flag is stored in the first tail page. |
349 | */ |
350 | #define PF_POISONED_CHECK(page) ({ \ |
351 | VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ |
352 | page; }) |
353 | #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) |
354 | #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) |
355 | #define PF_ONLY_HEAD(page, enforce) ({ \ |
356 | VM_BUG_ON_PGFLAGS(PageTail(page), page); \ |
357 | PF_POISONED_CHECK(page); }) |
358 | #define PF_NO_TAIL(page, enforce) ({ \ |
359 | VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ |
360 | PF_POISONED_CHECK(compound_head(page)); }) |
361 | #define PF_NO_COMPOUND(page, enforce) ({ \ |
362 | VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ |
363 | PF_POISONED_CHECK(page); }) |
364 | #define PF_SECOND(page, enforce) ({ \ |
365 | VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ |
366 | PF_POISONED_CHECK(&page[1]); }) |
367 | |
368 | /* Which page is the flag stored in */ |
369 | #define FOLIO_PF_ANY 0 |
370 | #define FOLIO_PF_HEAD 0 |
371 | #define FOLIO_PF_ONLY_HEAD 0 |
372 | #define FOLIO_PF_NO_TAIL 0 |
373 | #define FOLIO_PF_NO_COMPOUND 0 |
374 | #define FOLIO_PF_SECOND 1 |
375 | |
376 | /* |
377 | * Macros to create function definitions for page flags |
378 | */ |
379 | #define TESTPAGEFLAG(uname, lname, policy) \ |
380 | static __always_inline bool folio_test_##lname(struct folio *folio) \ |
381 | { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ |
382 | static __always_inline int Page##uname(struct page *page) \ |
383 | { return test_bit(PG_##lname, &policy(page, 0)->flags); } |
384 | |
385 | #define SETPAGEFLAG(uname, lname, policy) \ |
386 | static __always_inline \ |
387 | void folio_set_##lname(struct folio *folio) \ |
388 | { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ |
389 | static __always_inline void SetPage##uname(struct page *page) \ |
390 | { set_bit(PG_##lname, &policy(page, 1)->flags); } |
391 | |
392 | #define CLEARPAGEFLAG(uname, lname, policy) \ |
393 | static __always_inline \ |
394 | void folio_clear_##lname(struct folio *folio) \ |
395 | { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ |
396 | static __always_inline void ClearPage##uname(struct page *page) \ |
397 | { clear_bit(PG_##lname, &policy(page, 1)->flags); } |
398 | |
399 | #define __SETPAGEFLAG(uname, lname, policy) \ |
400 | static __always_inline \ |
401 | void __folio_set_##lname(struct folio *folio) \ |
402 | { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ |
403 | static __always_inline void __SetPage##uname(struct page *page) \ |
404 | { __set_bit(PG_##lname, &policy(page, 1)->flags); } |
405 | |
406 | #define __CLEARPAGEFLAG(uname, lname, policy) \ |
407 | static __always_inline \ |
408 | void __folio_clear_##lname(struct folio *folio) \ |
409 | { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ |
410 | static __always_inline void __ClearPage##uname(struct page *page) \ |
411 | { __clear_bit(PG_##lname, &policy(page, 1)->flags); } |
412 | |
413 | #define TESTSETFLAG(uname, lname, policy) \ |
414 | static __always_inline \ |
415 | bool folio_test_set_##lname(struct folio *folio) \ |
416 | { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ |
417 | static __always_inline int TestSetPage##uname(struct page *page) \ |
418 | { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } |
419 | |
420 | #define TESTCLEARFLAG(uname, lname, policy) \ |
421 | static __always_inline \ |
422 | bool folio_test_clear_##lname(struct folio *folio) \ |
423 | { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ |
424 | static __always_inline int TestClearPage##uname(struct page *page) \ |
425 | { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } |
426 | |
427 | #define PAGEFLAG(uname, lname, policy) \ |
428 | TESTPAGEFLAG(uname, lname, policy) \ |
429 | SETPAGEFLAG(uname, lname, policy) \ |
430 | CLEARPAGEFLAG(uname, lname, policy) |
431 | |
432 | #define __PAGEFLAG(uname, lname, policy) \ |
433 | TESTPAGEFLAG(uname, lname, policy) \ |
434 | __SETPAGEFLAG(uname, lname, policy) \ |
435 | __CLEARPAGEFLAG(uname, lname, policy) |
436 | |
437 | #define TESTSCFLAG(uname, lname, policy) \ |
438 | TESTSETFLAG(uname, lname, policy) \ |
439 | TESTCLEARFLAG(uname, lname, policy) |
440 | |
441 | #define TESTPAGEFLAG_FALSE(uname, lname) \ |
442 | static inline bool folio_test_##lname(const struct folio *folio) { return false; } \ |
443 | static inline int Page##uname(const struct page *page) { return 0; } |
444 | |
445 | #define SETPAGEFLAG_NOOP(uname, lname) \ |
446 | static inline void folio_set_##lname(struct folio *folio) { } \ |
447 | static inline void SetPage##uname(struct page *page) { } |
448 | |
449 | #define CLEARPAGEFLAG_NOOP(uname, lname) \ |
450 | static inline void folio_clear_##lname(struct folio *folio) { } \ |
451 | static inline void ClearPage##uname(struct page *page) { } |
452 | |
453 | #define __CLEARPAGEFLAG_NOOP(uname, lname) \ |
454 | static inline void __folio_clear_##lname(struct folio *folio) { } \ |
455 | static inline void __ClearPage##uname(struct page *page) { } |
456 | |
457 | #define TESTSETFLAG_FALSE(uname, lname) \ |
458 | static inline bool folio_test_set_##lname(struct folio *folio) \ |
459 | { return 0; } \ |
460 | static inline int TestSetPage##uname(struct page *page) { return 0; } |
461 | |
462 | #define TESTCLEARFLAG_FALSE(uname, lname) \ |
463 | static inline bool folio_test_clear_##lname(struct folio *folio) \ |
464 | { return 0; } \ |
465 | static inline int TestClearPage##uname(struct page *page) { return 0; } |
466 | |
467 | #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ |
468 | SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) |
469 | |
470 | #define TESTSCFLAG_FALSE(uname, lname) \ |
471 | TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) |
472 | |
473 | __PAGEFLAG(Locked, locked, PF_NO_TAIL) |
474 | PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) |
475 | PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) |
476 | PAGEFLAG(Referenced, referenced, PF_HEAD) |
477 | TESTCLEARFLAG(Referenced, referenced, PF_HEAD) |
478 | __SETPAGEFLAG(Referenced, referenced, PF_HEAD) |
479 | PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) |
480 | __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) |
481 | PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) |
482 | TESTCLEARFLAG(LRU, lru, PF_HEAD) |
483 | PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) |
484 | TESTCLEARFLAG(Active, active, PF_HEAD) |
485 | PAGEFLAG(Workingset, workingset, PF_HEAD) |
486 | TESTCLEARFLAG(Workingset, workingset, PF_HEAD) |
487 | __PAGEFLAG(Slab, slab, PF_NO_TAIL) |
488 | __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) |
489 | PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ |
490 | |
491 | /* Xen */ |
492 | PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) |
493 | TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) |
494 | PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); |
495 | PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); |
496 | PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) |
497 | TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) |
498 | |
499 | PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
500 | __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
501 | __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) |
502 | PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
503 | __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
504 | __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) |
505 | |
506 | /* |
507 | * Private page markings that may be used by the filesystem that owns the page |
508 | * for its own purposes. |
509 | * - PG_private and PG_private_2 cause release_folio() and co to be invoked |
510 | */ |
511 | PAGEFLAG(Private, private, PF_ANY) |
512 | PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) |
513 | PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) |
514 | TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) |
515 | |
516 | /* |
517 | * Only test-and-set exist for PG_writeback. The unconditional operators are |
518 | * risky: they bypass page accounting. |
519 | */ |
520 | TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) |
521 | TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) |
522 | PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) |
523 | |
524 | /* PG_readahead is only used for reads; PG_reclaim is only for writes */ |
525 | PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) |
526 | TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) |
527 | PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) |
528 | TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) |
529 | |
530 | #ifdef CONFIG_HIGHMEM |
531 | /* |
532 | * Must use a macro here due to header dependency issues. page_zone() is not |
533 | * available at this point. |
534 | */ |
535 | #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) |
536 | #else |
537 | PAGEFLAG_FALSE(HighMem, highmem) |
538 | #endif |
539 | |
540 | #ifdef CONFIG_SWAP |
541 | static __always_inline bool folio_test_swapcache(struct folio *folio) |
542 | { |
543 | return folio_test_swapbacked(folio) && |
544 | test_bit(PG_swapcache, folio_flags(folio, 0)); |
545 | } |
546 | |
547 | static __always_inline bool PageSwapCache(struct page *page) |
548 | { |
549 | return folio_test_swapcache(page_folio(page)); |
550 | } |
551 | |
552 | SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) |
553 | CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) |
554 | #else |
555 | PAGEFLAG_FALSE(SwapCache, swapcache) |
556 | #endif |
557 | |
558 | PAGEFLAG(Unevictable, unevictable, PF_HEAD) |
559 | __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) |
560 | TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) |
561 | |
562 | #ifdef CONFIG_MMU |
563 | PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) |
564 | __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) |
565 | TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) |
566 | #else |
567 | PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) |
568 | TESTSCFLAG_FALSE(Mlocked, mlocked) |
569 | #endif |
570 | |
571 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
572 | PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) |
573 | #else |
574 | PAGEFLAG_FALSE(Uncached, uncached) |
575 | #endif |
576 | |
577 | #ifdef CONFIG_MEMORY_FAILURE |
578 | PAGEFLAG(HWPoison, hwpoison, PF_ANY) |
579 | TESTSCFLAG(HWPoison, hwpoison, PF_ANY) |
580 | #define __PG_HWPOISON (1UL << PG_hwpoison) |
581 | #define MAGIC_HWPOISON 0x48575053U /* HWPS */ |
582 | extern void SetPageHWPoisonTakenOff(struct page *page); |
583 | extern void ClearPageHWPoisonTakenOff(struct page *page); |
584 | extern bool take_page_off_buddy(struct page *page); |
585 | extern bool put_page_back_buddy(struct page *page); |
586 | #else |
587 | PAGEFLAG_FALSE(HWPoison, hwpoison) |
588 | #define __PG_HWPOISON 0 |
589 | #endif |
590 | |
591 | #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) |
592 | TESTPAGEFLAG(Young, young, PF_ANY) |
593 | SETPAGEFLAG(Young, young, PF_ANY) |
594 | TESTCLEARFLAG(Young, young, PF_ANY) |
595 | PAGEFLAG(Idle, idle, PF_ANY) |
596 | #endif |
597 | |
598 | #ifdef CONFIG_KASAN_HW_TAGS |
599 | PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) |
600 | #else |
601 | PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison) |
602 | #endif |
603 | |
604 | /* |
605 | * PageReported() is used to track reported free pages within the Buddy |
606 | * allocator. We can use the non-atomic version of the test and set |
607 | * operations as both should be shielded with the zone lock to prevent |
608 | * any possible races on the setting or clearing of the bit. |
609 | */ |
610 | __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) |
611 | |
612 | #ifdef CONFIG_MEMORY_HOTPLUG |
613 | PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) |
614 | #else |
615 | PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) |
616 | #endif |
617 | |
618 | /* |
619 | * On an anonymous page mapped into a user virtual memory area, |
620 | * page->mapping points to its anon_vma, not to a struct address_space; |
621 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. |
622 | * |
623 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, |
624 | * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON |
625 | * bit; and then page->mapping points, not to an anon_vma, but to a private |
626 | * structure which KSM associates with that merged page. See ksm.h. |
627 | * |
628 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable |
629 | * page and then page->mapping points to a struct movable_operations. |
630 | * |
631 | * Please note that, confusingly, "page_mapping" refers to the inode |
632 | * address_space which maps the page from disk; whereas "page_mapped" |
633 | * refers to user virtual address space into which the page is mapped. |
634 | */ |
635 | #define PAGE_MAPPING_ANON 0x1 |
636 | #define PAGE_MAPPING_MOVABLE 0x2 |
637 | #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) |
638 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) |
639 | |
640 | /* |
641 | * Different with flags above, this flag is used only for fsdax mode. It |
642 | * indicates that this page->mapping is now under reflink case. |
643 | */ |
644 | #define PAGE_MAPPING_DAX_COW 0x1 |
645 | |
646 | static __always_inline bool folio_mapping_flags(struct folio *folio) |
647 | { |
648 | return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; |
649 | } |
650 | |
651 | static __always_inline int PageMappingFlags(struct page *page) |
652 | { |
653 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; |
654 | } |
655 | |
656 | static __always_inline bool folio_test_anon(struct folio *folio) |
657 | { |
658 | return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; |
659 | } |
660 | |
661 | static __always_inline bool PageAnon(struct page *page) |
662 | { |
663 | return folio_test_anon(page_folio(page)); |
664 | } |
665 | |
666 | static __always_inline bool __folio_test_movable(const struct folio *folio) |
667 | { |
668 | return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == |
669 | PAGE_MAPPING_MOVABLE; |
670 | } |
671 | |
672 | static __always_inline int __PageMovable(struct page *page) |
673 | { |
674 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
675 | PAGE_MAPPING_MOVABLE; |
676 | } |
677 | |
678 | #ifdef CONFIG_KSM |
679 | /* |
680 | * A KSM page is one of those write-protected "shared pages" or "merged pages" |
681 | * which KSM maps into multiple mms, wherever identical anonymous page content |
682 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any |
683 | * anon_vma, but to that page's node of the stable tree. |
684 | */ |
685 | static __always_inline bool folio_test_ksm(struct folio *folio) |
686 | { |
687 | return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == |
688 | PAGE_MAPPING_KSM; |
689 | } |
690 | |
691 | static __always_inline bool PageKsm(struct page *page) |
692 | { |
693 | return folio_test_ksm(page_folio(page)); |
694 | } |
695 | #else |
696 | TESTPAGEFLAG_FALSE(Ksm, ksm) |
697 | #endif |
698 | |
699 | u64 stable_page_flags(struct page *page); |
700 | |
701 | /** |
702 | * folio_test_uptodate - Is this folio up to date? |
703 | * @folio: The folio. |
704 | * |
705 | * The uptodate flag is set on a folio when every byte in the folio is |
706 | * at least as new as the corresponding bytes on storage. Anonymous |
707 | * and CoW folios are always uptodate. If the folio is not uptodate, |
708 | * some of the bytes in it may be; see the is_partially_uptodate() |
709 | * address_space operation. |
710 | */ |
711 | static inline bool folio_test_uptodate(struct folio *folio) |
712 | { |
713 | bool ret = test_bit(PG_uptodate, folio_flags(folio, 0)); |
714 | /* |
715 | * Must ensure that the data we read out of the folio is loaded |
716 | * _after_ we've loaded folio->flags to check the uptodate bit. |
717 | * We can skip the barrier if the folio is not uptodate, because |
718 | * we wouldn't be reading anything from it. |
719 | * |
720 | * See folio_mark_uptodate() for the other side of the story. |
721 | */ |
722 | if (ret) |
723 | smp_rmb(); |
724 | |
725 | return ret; |
726 | } |
727 | |
728 | static inline int PageUptodate(struct page *page) |
729 | { |
730 | return folio_test_uptodate(page_folio(page)); |
731 | } |
732 | |
733 | static __always_inline void __folio_mark_uptodate(struct folio *folio) |
734 | { |
735 | smp_wmb(); |
736 | __set_bit(PG_uptodate, folio_flags(folio, 0)); |
737 | } |
738 | |
739 | static __always_inline void folio_mark_uptodate(struct folio *folio) |
740 | { |
741 | /* |
742 | * Memory barrier must be issued before setting the PG_uptodate bit, |
743 | * so that all previous stores issued in order to bring the folio |
744 | * uptodate are actually visible before folio_test_uptodate becomes true. |
745 | */ |
746 | smp_wmb(); |
747 | set_bit(PG_uptodate, folio_flags(folio, 0)); |
748 | } |
749 | |
750 | static __always_inline void __SetPageUptodate(struct page *page) |
751 | { |
752 | __folio_mark_uptodate((struct folio *)page); |
753 | } |
754 | |
755 | static __always_inline void SetPageUptodate(struct page *page) |
756 | { |
757 | folio_mark_uptodate((struct folio *)page); |
758 | } |
759 | |
760 | CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) |
761 | |
762 | bool __folio_start_writeback(struct folio *folio, bool keep_write); |
763 | bool set_page_writeback(struct page *page); |
764 | |
765 | #define folio_start_writeback(folio) \ |
766 | __folio_start_writeback(folio, false) |
767 | #define folio_start_writeback_keepwrite(folio) \ |
768 | __folio_start_writeback(folio, true) |
769 | |
770 | static inline void set_page_writeback_keepwrite(struct page *page) |
771 | { |
772 | folio_start_writeback_keepwrite(page_folio(page)); |
773 | } |
774 | |
775 | static inline bool test_set_page_writeback(struct page *page) |
776 | { |
777 | return set_page_writeback(page); |
778 | } |
779 | |
780 | static __always_inline bool folio_test_head(struct folio *folio) |
781 | { |
782 | return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); |
783 | } |
784 | |
785 | static __always_inline int PageHead(struct page *page) |
786 | { |
787 | PF_POISONED_CHECK(page); |
788 | return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); |
789 | } |
790 | |
791 | __SETPAGEFLAG(Head, head, PF_ANY) |
792 | __CLEARPAGEFLAG(Head, head, PF_ANY) |
793 | CLEARPAGEFLAG(Head, head, PF_ANY) |
794 | |
795 | /** |
796 | * folio_test_large() - Does this folio contain more than one page? |
797 | * @folio: The folio to test. |
798 | * |
799 | * Return: True if the folio is larger than one page. |
800 | */ |
801 | static inline bool folio_test_large(struct folio *folio) |
802 | { |
803 | return folio_test_head(folio); |
804 | } |
805 | |
806 | static __always_inline void set_compound_head(struct page *page, struct page *head) |
807 | { |
808 | WRITE_ONCE(page->compound_head, (unsigned long)head + 1); |
809 | } |
810 | |
811 | static __always_inline void clear_compound_head(struct page *page) |
812 | { |
813 | WRITE_ONCE(page->compound_head, 0); |
814 | } |
815 | |
816 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
817 | static inline void ClearPageCompound(struct page *page) |
818 | { |
819 | BUG_ON(!PageHead(page)); |
820 | ClearPageHead(page); |
821 | } |
822 | #endif |
823 | |
824 | #define PG_head_mask ((1UL << PG_head)) |
825 | |
826 | #ifdef CONFIG_HUGETLB_PAGE |
827 | int PageHuge(struct page *page); |
828 | int PageHeadHuge(struct page *page); |
829 | static inline bool folio_test_hugetlb(struct folio *folio) |
830 | { |
831 | return PageHeadHuge(&folio->page); |
832 | } |
833 | #else |
834 | TESTPAGEFLAG_FALSE(Huge, hugetlb) |
835 | TESTPAGEFLAG_FALSE(HeadHuge, headhuge) |
836 | #endif |
837 | |
838 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
839 | /* |
840 | * PageHuge() only returns true for hugetlbfs pages, but not for |
841 | * normal or transparent huge pages. |
842 | * |
843 | * PageTransHuge() returns true for both transparent huge and |
844 | * hugetlbfs pages, but not normal pages. PageTransHuge() can only be |
845 | * called only in the core VM paths where hugetlbfs pages can't exist. |
846 | */ |
847 | static inline int PageTransHuge(struct page *page) |
848 | { |
849 | VM_BUG_ON_PAGE(PageTail(page), page); |
850 | return PageHead(page); |
851 | } |
852 | |
853 | static inline bool folio_test_transhuge(struct folio *folio) |
854 | { |
855 | return folio_test_head(folio); |
856 | } |
857 | |
858 | /* |
859 | * PageTransCompound returns true for both transparent huge pages |
860 | * and hugetlbfs pages, so it should only be called when it's known |
861 | * that hugetlbfs pages aren't involved. |
862 | */ |
863 | static inline int PageTransCompound(struct page *page) |
864 | { |
865 | return PageCompound(page); |
866 | } |
867 | |
868 | /* |
869 | * PageTransTail returns true for both transparent huge pages |
870 | * and hugetlbfs pages, so it should only be called when it's known |
871 | * that hugetlbfs pages aren't involved. |
872 | */ |
873 | static inline int PageTransTail(struct page *page) |
874 | { |
875 | return PageTail(page); |
876 | } |
877 | |
878 | /* |
879 | * PageDoubleMap indicates that the compound page is mapped with PTEs as well |
880 | * as PMDs. |
881 | * |
882 | * This is required for optimization of rmap operations for THP: we can postpone |
883 | * per small page mapcount accounting (and its overhead from atomic operations) |
884 | * until the first PMD split. |
885 | * |
886 | * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up |
887 | * by one. This reference will go away with last compound_mapcount. |
888 | * |
889 | * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). |
890 | */ |
891 | PAGEFLAG(DoubleMap, double_map, PF_SECOND) |
892 | TESTSCFLAG(DoubleMap, double_map, PF_SECOND) |
893 | #else |
894 | TESTPAGEFLAG_FALSE(TransHuge, transhuge) |
895 | TESTPAGEFLAG_FALSE(TransCompound, transcompound) |
896 | TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) |
897 | TESTPAGEFLAG_FALSE(TransTail, transtail) |
898 | PAGEFLAG_FALSE(DoubleMap, double_map) |
899 | TESTSCFLAG_FALSE(DoubleMap, double_map) |
900 | #endif |
901 | |
902 | #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
903 | /* |
904 | * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the |
905 | * compound page. |
906 | * |
907 | * This flag is set by hwpoison handler. Cleared by THP split or free page. |
908 | */ |
909 | PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) |
910 | TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) |
911 | #else |
912 | PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) |
913 | TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) |
914 | #endif |
915 | |
916 | /* |
917 | * Check if a page is currently marked HWPoisoned. Note that this check is |
918 | * best effort only and inherently racy: there is no way to synchronize with |
919 | * failing hardware. |
920 | */ |
921 | static inline bool is_page_hwpoison(struct page *page) |
922 | { |
923 | if (PageHWPoison(page)) |
924 | return true; |
925 | return PageHuge(page) && PageHWPoison(compound_head(page)); |
926 | } |
927 | |
928 | /* |
929 | * For pages that are never mapped to userspace (and aren't PageSlab), |
930 | * page_type may be used. Because it is initialised to -1, we invert the |
931 | * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and |
932 | * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and |
933 | * low bits so that an underflow or overflow of page_mapcount() won't be |
934 | * mistaken for a page type value. |
935 | */ |
936 | |
937 | #define PAGE_TYPE_BASE 0xf0000000 |
938 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ |
939 | #define PAGE_MAPCOUNT_RESERVE -128 |
940 | #define PG_buddy 0x00000080 |
941 | #define PG_offline 0x00000100 |
942 | #define PG_table 0x00000200 |
943 | #define PG_guard 0x00000400 |
944 | |
945 | #define PageType(page, flag) \ |
946 | ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) |
947 | |
948 | static inline int page_has_type(struct page *page) |
949 | { |
950 | return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; |
951 | } |
952 | |
953 | #define PAGE_TYPE_OPS(uname, lname) \ |
954 | static __always_inline int Page##uname(struct page *page) \ |
955 | { \ |
956 | return PageType(page, PG_##lname); \ |
957 | } \ |
958 | static __always_inline void __SetPage##uname(struct page *page) \ |
959 | { \ |
960 | VM_BUG_ON_PAGE(!PageType(page, 0), page); \ |
961 | page->page_type &= ~PG_##lname; \ |
962 | } \ |
963 | static __always_inline void __ClearPage##uname(struct page *page) \ |
964 | { \ |
965 | VM_BUG_ON_PAGE(!Page##uname(page), page); \ |
966 | page->page_type |= PG_##lname; \ |
967 | } |
968 | |
969 | /* |
970 | * PageBuddy() indicates that the page is free and in the buddy system |
971 | * (see mm/page_alloc.c). |
972 | */ |
973 | PAGE_TYPE_OPS(Buddy, buddy) |
974 | |
975 | /* |
976 | * PageOffline() indicates that the page is logically offline although the |
977 | * containing section is online. (e.g. inflated in a balloon driver or |
978 | * not onlined when onlining the section). |
979 | * The content of these pages is effectively stale. Such pages should not |
980 | * be touched (read/write/dump/save) except by their owner. |
981 | * |
982 | * If a driver wants to allow to offline unmovable PageOffline() pages without |
983 | * putting them back to the buddy, it can do so via the memory notifier by |
984 | * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the |
985 | * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() |
986 | * pages (now with a reference count of zero) are treated like free pages, |
987 | * allowing the containing memory block to get offlined. A driver that |
988 | * relies on this feature is aware that re-onlining the memory block will |
989 | * require to re-set the pages PageOffline() and not giving them to the |
990 | * buddy via online_page_callback_t. |
991 | * |
992 | * There are drivers that mark a page PageOffline() and expect there won't be |
993 | * any further access to page content. PFN walkers that read content of random |
994 | * pages should check PageOffline() and synchronize with such drivers using |
995 | * page_offline_freeze()/page_offline_thaw(). |
996 | */ |
997 | PAGE_TYPE_OPS(Offline, offline) |
998 | |
999 | extern void page_offline_freeze(void); |
1000 | extern void page_offline_thaw(void); |
1001 | extern void page_offline_begin(void); |
1002 | extern void page_offline_end(void); |
1003 | |
1004 | /* |
1005 | * Marks pages in use as page tables. |
1006 | */ |
1007 | PAGE_TYPE_OPS(Table, table) |
1008 | |
1009 | /* |
1010 | * Marks guardpages used with debug_pagealloc. |
1011 | */ |
1012 | PAGE_TYPE_OPS(Guard, guard) |
1013 | |
1014 | extern bool is_free_buddy_page(struct page *page); |
1015 | |
1016 | PAGEFLAG(Isolated, isolated, PF_ANY); |
1017 | |
1018 | static __always_inline int PageAnonExclusive(struct page *page) |
1019 | { |
1020 | VM_BUG_ON_PGFLAGS(!PageAnon(page), page); |
1021 | VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); |
1022 | return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); |
1023 | } |
1024 | |
1025 | static __always_inline void SetPageAnonExclusive(struct page *page) |
1026 | { |
1027 | VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); |
1028 | VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); |
1029 | set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); |
1030 | } |
1031 | |
1032 | static __always_inline void ClearPageAnonExclusive(struct page *page) |
1033 | { |
1034 | VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); |
1035 | VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); |
1036 | clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); |
1037 | } |
1038 | |
1039 | static __always_inline void __ClearPageAnonExclusive(struct page *page) |
1040 | { |
1041 | VM_BUG_ON_PGFLAGS(!PageAnon(page), page); |
1042 | VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); |
1043 | __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); |
1044 | } |
1045 | |
1046 | #ifdef CONFIG_MMU |
1047 | #define __PG_MLOCKED (1UL << PG_mlocked) |
1048 | #else |
1049 | #define __PG_MLOCKED 0 |
1050 | #endif |
1051 | |
1052 | /* |
1053 | * Flags checked when a page is freed. Pages being freed should not have |
1054 | * these flags set. If they are, there is a problem. |
1055 | */ |
1056 | #define PAGE_FLAGS_CHECK_AT_FREE \ |
1057 | (1UL << PG_lru | 1UL << PG_locked | \ |
1058 | 1UL << PG_private | 1UL << PG_private_2 | \ |
1059 | 1UL << PG_writeback | 1UL << PG_reserved | \ |
1060 | 1UL << PG_slab | 1UL << PG_active | \ |
1061 | 1UL << PG_unevictable | __PG_MLOCKED) |
1062 | |
1063 | /* |
1064 | * Flags checked when a page is prepped for return by the page allocator. |
1065 | * Pages being prepped should not have these flags set. If they are set, |
1066 | * there has been a kernel bug or struct page corruption. |
1067 | * |
1068 | * __PG_HWPOISON is exceptional because it needs to be kept beyond page's |
1069 | * alloc-free cycle to prevent from reusing the page. |
1070 | */ |
1071 | #define PAGE_FLAGS_CHECK_AT_PREP \ |
1072 | (PAGEFLAGS_MASK & ~__PG_HWPOISON) |
1073 | |
1074 | #define PAGE_FLAGS_PRIVATE \ |
1075 | (1UL << PG_private | 1UL << PG_private_2) |
1076 | /** |
1077 | * page_has_private - Determine if page has private stuff |
1078 | * @page: The page to be checked |
1079 | * |
1080 | * Determine if a page has private stuff, indicating that release routines |
1081 | * should be invoked upon it. |
1082 | */ |
1083 | static inline int page_has_private(struct page *page) |
1084 | { |
1085 | return !!(page->flags & PAGE_FLAGS_PRIVATE); |
1086 | } |
1087 | |
1088 | static inline bool folio_has_private(struct folio *folio) |
1089 | { |
1090 | return page_has_private(&folio->page); |
1091 | } |
1092 | |
1093 | #undef PF_ANY |
1094 | #undef PF_HEAD |
1095 | #undef PF_ONLY_HEAD |
1096 | #undef PF_NO_TAIL |
1097 | #undef PF_NO_COMPOUND |
1098 | #undef PF_SECOND |
1099 | #endif /* !__GENERATING_BOUNDS_H */ |
1100 | |
1101 | #endif /* PAGE_FLAGS_H */ |
1102 | |