1/*
2 * Compatibility functions which bloat the callers too much to make inline.
3 * All of the callers of these functions should be converted to use folios
4 * eventually.
5 */
6
7#include <linux/migrate.h>
8#include <linux/pagemap.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include "internal.h"
12
13struct address_space *page_mapping(struct page *page)
14{
15 return folio_mapping(page_folio(page));
16}
17EXPORT_SYMBOL(page_mapping);
18
19void unlock_page(struct page *page)
20{
21 return folio_unlock(page_folio(page));
22}
23EXPORT_SYMBOL(unlock_page);
24
25void end_page_writeback(struct page *page)
26{
27 return folio_end_writeback(page_folio(page));
28}
29EXPORT_SYMBOL(end_page_writeback);
30
31void wait_on_page_writeback(struct page *page)
32{
33 return folio_wait_writeback(page_folio(page));
34}
35EXPORT_SYMBOL_GPL(wait_on_page_writeback);
36
37void wait_for_stable_page(struct page *page)
38{
39 return folio_wait_stable(page_folio(page));
40}
41EXPORT_SYMBOL_GPL(wait_for_stable_page);
42
43void mark_page_accessed(struct page *page)
44{
45 folio_mark_accessed(page_folio(page));
46}
47EXPORT_SYMBOL(mark_page_accessed);
48
49bool set_page_writeback(struct page *page)
50{
51 return folio_start_writeback(page_folio(page));
52}
53EXPORT_SYMBOL(set_page_writeback);
54
55bool set_page_dirty(struct page *page)
56{
57 return folio_mark_dirty(page_folio(page));
58}
59EXPORT_SYMBOL(set_page_dirty);
60
61int __set_page_dirty_nobuffers(struct page *page)
62{
63 return filemap_dirty_folio(mapping: page_mapping(page), page_folio(page));
64}
65EXPORT_SYMBOL(__set_page_dirty_nobuffers);
66
67bool clear_page_dirty_for_io(struct page *page)
68{
69 return folio_clear_dirty_for_io(page_folio(page));
70}
71EXPORT_SYMBOL(clear_page_dirty_for_io);
72
73bool redirty_page_for_writepage(struct writeback_control *wbc,
74 struct page *page)
75{
76 return folio_redirty_for_writepage(wbc, page_folio(page));
77}
78EXPORT_SYMBOL(redirty_page_for_writepage);
79
80void lru_cache_add_inactive_or_unevictable(struct page *page,
81 struct vm_area_struct *vma)
82{
83 folio_add_lru_vma(page_folio(page), vma);
84}
85
86int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
87 pgoff_t index, gfp_t gfp)
88{
89 return filemap_add_folio(mapping, page_folio(page), index, gfp);
90}
91EXPORT_SYMBOL(add_to_page_cache_lru);
92
93noinline
94struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
95 fgf_t fgp_flags, gfp_t gfp)
96{
97 struct folio *folio;
98
99 folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
100 if (IS_ERR(ptr: folio))
101 return NULL;
102 return folio_file_page(folio, index);
103}
104EXPORT_SYMBOL(pagecache_get_page);
105
106struct page *grab_cache_page_write_begin(struct address_space *mapping,
107 pgoff_t index)
108{
109 return pagecache_get_page(mapping, index, FGP_WRITEBEGIN,
110 mapping_gfp_mask(mapping));
111}
112EXPORT_SYMBOL(grab_cache_page_write_begin);
113
114bool isolate_lru_page(struct page *page)
115{
116 if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
117 return false;
118 return folio_isolate_lru(folio: (struct folio *)page);
119}
120
121void putback_lru_page(struct page *page)
122{
123 folio_putback_lru(page_folio(page));
124}
125
126#ifdef CONFIG_MMU
127void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
128 unsigned long address)
129{
130 VM_BUG_ON_PAGE(PageTail(page), page);
131
132 return folio_add_new_anon_rmap((struct folio *)page, vma, address);
133}
134#endif
135

source code of linux/mm/folio-compat.c