1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SWAP_H |
3 | #define _LINUX_SWAP_H |
4 | |
5 | #include <linux/spinlock.h> |
6 | #include <linux/linkage.h> |
7 | #include <linux/mmzone.h> |
8 | #include <linux/list.h> |
9 | #include <linux/memcontrol.h> |
10 | #include <linux/sched.h> |
11 | #include <linux/node.h> |
12 | #include <linux/fs.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/atomic.h> |
15 | #include <linux/page-flags.h> |
16 | #include <uapi/linux/mempolicy.h> |
17 | #include <asm/page.h> |
18 | |
19 | struct notifier_block; |
20 | |
21 | struct bio; |
22 | |
23 | struct pagevec; |
24 | |
25 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
26 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
27 | #define SWAP_FLAG_PRIO_SHIFT 0 |
28 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
29 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ |
30 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ |
31 | |
32 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
33 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
34 | SWAP_FLAG_DISCARD_PAGES) |
35 | #define SWAP_BATCH 64 |
36 | |
37 | static inline int current_is_kswapd(void) |
38 | { |
39 | return current->flags & PF_KSWAPD; |
40 | } |
41 | |
42 | /* |
43 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can |
44 | * be swapped to. The swap type and the offset into that swap type are |
45 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits |
46 | * for the type means that the maximum number of swapcache pages is 27 bits |
47 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs |
48 | * the type/offset into the pte as 5/27 as well. |
49 | */ |
50 | #define MAX_SWAPFILES_SHIFT 5 |
51 | |
52 | /* |
53 | * Use some of the swap files numbers for other purposes. This |
54 | * is a convenient way to hook into the VM to trigger special |
55 | * actions on faults. |
56 | */ |
57 | |
58 | #define SWP_SWAPIN_ERROR_NUM 1 |
59 | #define SWP_SWAPIN_ERROR (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ |
60 | SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \ |
61 | SWP_PTE_MARKER_NUM) |
62 | /* |
63 | * PTE markers are used to persist information onto PTEs that are mapped with |
64 | * file-backed memories. As its name "PTE" hints, it should only be applied to |
65 | * the leaves of pgtables. |
66 | */ |
67 | #ifdef CONFIG_PTE_MARKER |
68 | #define SWP_PTE_MARKER_NUM 1 |
69 | #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ |
70 | SWP_MIGRATION_NUM + SWP_DEVICE_NUM) |
71 | #else |
72 | #define SWP_PTE_MARKER_NUM 0 |
73 | #endif |
74 | |
75 | /* |
76 | * Unaddressable device memory support. See include/linux/hmm.h and |
77 | * Documentation/mm/hmm.rst. Short description is we need struct pages for |
78 | * device memory that is unaddressable (inaccessible) by CPU, so that we can |
79 | * migrate part of a process memory to device memory. |
80 | * |
81 | * When a page is migrated from CPU to device, we set the CPU page table entry |
82 | * to a special SWP_DEVICE_{READ|WRITE} entry. |
83 | * |
84 | * When a page is mapped by the device for exclusive access we set the CPU page |
85 | * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. |
86 | */ |
87 | #ifdef CONFIG_DEVICE_PRIVATE |
88 | #define SWP_DEVICE_NUM 4 |
89 | #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) |
90 | #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) |
91 | #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) |
92 | #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) |
93 | #else |
94 | #define SWP_DEVICE_NUM 0 |
95 | #endif |
96 | |
97 | /* |
98 | * Page migration support. |
99 | * |
100 | * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and |
101 | * indicates that the referenced (part of) an anonymous page is exclusive to |
102 | * a single process. For SWP_MIGRATION_WRITE, that information is implicit: |
103 | * (part of) an anonymous page that are mapped writable are exclusive to a |
104 | * single process. |
105 | */ |
106 | #ifdef CONFIG_MIGRATION |
107 | #define SWP_MIGRATION_NUM 3 |
108 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) |
109 | #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) |
110 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) |
111 | #else |
112 | #define SWP_MIGRATION_NUM 0 |
113 | #endif |
114 | |
115 | /* |
116 | * Handling of hardware poisoned pages with memory corruption. |
117 | */ |
118 | #ifdef CONFIG_MEMORY_FAILURE |
119 | #define SWP_HWPOISON_NUM 1 |
120 | #define SWP_HWPOISON MAX_SWAPFILES |
121 | #else |
122 | #define SWP_HWPOISON_NUM 0 |
123 | #endif |
124 | |
125 | #define MAX_SWAPFILES \ |
126 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ |
127 | SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ |
128 | SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM) |
129 | |
130 | /* |
131 | * Magic header for a swap area. The first part of the union is |
132 | * what the swap magic looks like for the old (limited to 128MB) |
133 | * swap area format, the second part of the union adds - in the |
134 | * old reserved area - some extra information. Note that the first |
135 | * kilobyte is reserved for boot loader or disk label stuff... |
136 | * |
137 | * Having the magic at the end of the PAGE_SIZE makes detecting swap |
138 | * areas somewhat tricky on machines that support multiple page sizes. |
139 | * For 2.5 we'll probably want to move the magic to just beyond the |
140 | * bootbits... |
141 | */ |
142 | union { |
143 | struct { |
144 | char [PAGE_SIZE - 10]; |
145 | char [10]; /* SWAP-SPACE or SWAPSPACE2 */ |
146 | } ; |
147 | struct { |
148 | char [1024]; /* Space for disklabel etc. */ |
149 | __u32 ; |
150 | __u32 ; |
151 | __u32 ; |
152 | unsigned char [16]; |
153 | unsigned char [16]; |
154 | __u32 [117]; |
155 | __u32 [1]; |
156 | } ; |
157 | }; |
158 | |
159 | /* |
160 | * current->reclaim_state points to one of these when a task is running |
161 | * memory reclaim |
162 | */ |
163 | struct reclaim_state { |
164 | unsigned long reclaimed_slab; |
165 | }; |
166 | |
167 | #ifdef __KERNEL__ |
168 | |
169 | struct address_space; |
170 | struct sysinfo; |
171 | struct writeback_control; |
172 | struct zone; |
173 | |
174 | /* |
175 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of |
176 | * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the |
177 | * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart |
178 | * from setup, they're handled identically. |
179 | * |
180 | * We always assume that blocks are of size PAGE_SIZE. |
181 | */ |
182 | struct swap_extent { |
183 | struct rb_node rb_node; |
184 | pgoff_t start_page; |
185 | pgoff_t nr_pages; |
186 | sector_t start_block; |
187 | }; |
188 | |
189 | /* |
190 | * Max bad pages in the new format.. |
191 | */ |
192 | #define MAX_SWAP_BADPAGES \ |
193 | ((offsetof(union swap_header, magic.magic) - \ |
194 | offsetof(union swap_header, info.badpages)) / sizeof(int)) |
195 | |
196 | enum { |
197 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
198 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
199 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
200 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
201 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
202 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
203 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
204 | SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ |
205 | SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ |
206 | SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ |
207 | SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ |
208 | SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ |
209 | SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ |
210 | /* add others here before... */ |
211 | SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ |
212 | }; |
213 | |
214 | #define SWAP_CLUSTER_MAX 32UL |
215 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
216 | |
217 | /* Bit flag in swap_map */ |
218 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
219 | #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ |
220 | |
221 | /* Special value in first swap_map */ |
222 | #define SWAP_MAP_MAX 0x3e /* Max count */ |
223 | #define SWAP_MAP_BAD 0x3f /* Note page is bad */ |
224 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ |
225 | |
226 | /* Special value in each swap_map continuation */ |
227 | #define SWAP_CONT_MAX 0x7f /* Max count */ |
228 | |
229 | /* |
230 | * We use this to track usage of a cluster. A cluster is a block of swap disk |
231 | * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All |
232 | * free clusters are organized into a list. We fetch an entry from the list to |
233 | * get a free cluster. |
234 | * |
235 | * The data field stores next cluster if the cluster is free or cluster usage |
236 | * counter otherwise. The flags field determines if a cluster is free. This is |
237 | * protected by swap_info_struct.lock. |
238 | */ |
239 | struct swap_cluster_info { |
240 | spinlock_t lock; /* |
241 | * Protect swap_cluster_info fields |
242 | * and swap_info_struct->swap_map |
243 | * elements correspond to the swap |
244 | * cluster |
245 | */ |
246 | unsigned int data:24; |
247 | unsigned int flags:8; |
248 | }; |
249 | #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ |
250 | #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ |
251 | #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ |
252 | |
253 | /* |
254 | * We assign a cluster to each CPU, so each CPU can allocate swap entry from |
255 | * its own cluster and swapout sequentially. The purpose is to optimize swapout |
256 | * throughput. |
257 | */ |
258 | struct percpu_cluster { |
259 | struct swap_cluster_info index; /* Current cluster index */ |
260 | unsigned int next; /* Likely next allocation offset */ |
261 | }; |
262 | |
263 | struct swap_cluster_list { |
264 | struct swap_cluster_info head; |
265 | struct swap_cluster_info tail; |
266 | }; |
267 | |
268 | /* |
269 | * The in-memory structure used to track swap areas. |
270 | */ |
271 | struct swap_info_struct { |
272 | struct percpu_ref users; /* indicate and keep swap device valid. */ |
273 | unsigned long flags; /* SWP_USED etc: see above */ |
274 | signed short prio; /* swap priority of this type */ |
275 | struct plist_node list; /* entry in swap_active_head */ |
276 | signed char type; /* strange name for an index */ |
277 | unsigned int max; /* extent of the swap_map */ |
278 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
279 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ |
280 | struct swap_cluster_list free_clusters; /* free clusters list */ |
281 | unsigned int lowest_bit; /* index of first free in swap_map */ |
282 | unsigned int highest_bit; /* index of last free in swap_map */ |
283 | unsigned int pages; /* total of usable pages of swap */ |
284 | unsigned int inuse_pages; /* number of those currently in use */ |
285 | unsigned int cluster_next; /* likely index for next allocation */ |
286 | unsigned int cluster_nr; /* countdown to next cluster search */ |
287 | unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ |
288 | struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ |
289 | struct rb_root swap_extent_root;/* root of the swap extent rbtree */ |
290 | struct block_device *bdev; /* swap device or bdev of swap file */ |
291 | struct file *swap_file; /* seldom referenced */ |
292 | unsigned int old_block_size; /* seldom referenced */ |
293 | struct completion comp; /* seldom referenced */ |
294 | #ifdef CONFIG_FRONTSWAP |
295 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ |
296 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ |
297 | #endif |
298 | spinlock_t lock; /* |
299 | * protect map scan related fields like |
300 | * swap_map, lowest_bit, highest_bit, |
301 | * inuse_pages, cluster_next, |
302 | * cluster_nr, lowest_alloc, |
303 | * highest_alloc, free/discard cluster |
304 | * list. other fields are only changed |
305 | * at swapon/swapoff, so are protected |
306 | * by swap_lock. changing flags need |
307 | * hold this lock and swap_lock. If |
308 | * both locks need hold, hold swap_lock |
309 | * first. |
310 | */ |
311 | spinlock_t cont_lock; /* |
312 | * protect swap count continuation page |
313 | * list. |
314 | */ |
315 | struct work_struct discard_work; /* discard worker */ |
316 | struct swap_cluster_list discard_clusters; /* discard clusters list */ |
317 | struct plist_node avail_lists[]; /* |
318 | * entries in swap_avail_heads, one |
319 | * entry per node. |
320 | * Must be last as the number of the |
321 | * array is nr_node_ids, which is not |
322 | * a fixed value so have to allocate |
323 | * dynamically. |
324 | * And it has to be an array so that |
325 | * plist_for_each_* can work. |
326 | */ |
327 | }; |
328 | |
329 | #ifdef CONFIG_64BIT |
330 | #define SWAP_RA_ORDER_CEILING 5 |
331 | #else |
332 | /* Avoid stack overflow, because we need to save part of page table */ |
333 | #define SWAP_RA_ORDER_CEILING 3 |
334 | #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING) |
335 | #endif |
336 | |
337 | struct vma_swap_readahead { |
338 | unsigned short win; |
339 | unsigned short offset; |
340 | unsigned short nr_pte; |
341 | #ifdef CONFIG_64BIT |
342 | pte_t *ptes; |
343 | #else |
344 | pte_t ptes[SWAP_RA_PTE_CACHE_SIZE]; |
345 | #endif |
346 | }; |
347 | |
348 | static inline swp_entry_t folio_swap_entry(struct folio *folio) |
349 | { |
350 | swp_entry_t entry = { .val = page_private(&folio->page) }; |
351 | return entry; |
352 | } |
353 | |
354 | /* linux/mm/workingset.c */ |
355 | void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); |
356 | void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); |
357 | void workingset_refault(struct folio *folio, void *shadow); |
358 | void workingset_activation(struct folio *folio); |
359 | |
360 | /* Only track the nodes of mappings with shadow entries */ |
361 | void workingset_update_node(struct xa_node *node); |
362 | extern struct list_lru shadow_nodes; |
363 | #define mapping_set_update(xas, mapping) do { \ |
364 | if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \ |
365 | xas_set_update(xas, workingset_update_node); \ |
366 | xas_set_lru(xas, &shadow_nodes); \ |
367 | } \ |
368 | } while (0) |
369 | |
370 | /* linux/mm/page_alloc.c */ |
371 | extern unsigned long totalreserve_pages; |
372 | |
373 | /* Definition of global_zone_page_state not available yet */ |
374 | #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) |
375 | |
376 | |
377 | /* linux/mm/swap.c */ |
378 | extern void lru_note_cost(struct lruvec *lruvec, bool file, |
379 | unsigned int nr_pages); |
380 | extern void lru_note_cost_folio(struct folio *); |
381 | extern void folio_add_lru(struct folio *); |
382 | extern void lru_cache_add(struct page *); |
383 | void mark_page_accessed(struct page *); |
384 | void folio_mark_accessed(struct folio *); |
385 | |
386 | extern atomic_t lru_disable_count; |
387 | |
388 | static inline bool lru_cache_disabled(void) |
389 | { |
390 | return atomic_read(&lru_disable_count); |
391 | } |
392 | |
393 | static inline void lru_cache_enable(void) |
394 | { |
395 | atomic_dec(&lru_disable_count); |
396 | } |
397 | |
398 | extern void lru_cache_disable(void); |
399 | extern void lru_add_drain(void); |
400 | extern void lru_add_drain_cpu(int cpu); |
401 | extern void lru_add_drain_cpu_zone(struct zone *zone); |
402 | extern void lru_add_drain_all(void); |
403 | extern void deactivate_page(struct page *page); |
404 | extern void mark_page_lazyfree(struct page *page); |
405 | extern void swap_setup(void); |
406 | |
407 | extern void lru_cache_add_inactive_or_unevictable(struct page *page, |
408 | struct vm_area_struct *vma); |
409 | |
410 | /* linux/mm/vmscan.c */ |
411 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
412 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
413 | gfp_t gfp_mask, nodemask_t *mask); |
414 | |
415 | #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) |
416 | #define MEMCG_RECLAIM_PROACTIVE (1 << 2) |
417 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, |
418 | unsigned long nr_pages, |
419 | gfp_t gfp_mask, |
420 | unsigned int reclaim_options); |
421 | extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, |
422 | gfp_t gfp_mask, bool noswap, |
423 | pg_data_t *pgdat, |
424 | unsigned long *nr_scanned); |
425 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
426 | extern int vm_swappiness; |
427 | long remove_mapping(struct address_space *mapping, struct folio *folio); |
428 | |
429 | extern unsigned long reclaim_pages(struct list_head *page_list); |
430 | #ifdef CONFIG_NUMA |
431 | extern int node_reclaim_mode; |
432 | extern int sysctl_min_unmapped_ratio; |
433 | extern int sysctl_min_slab_ratio; |
434 | #else |
435 | #define node_reclaim_mode 0 |
436 | #endif |
437 | |
438 | static inline bool node_reclaim_enabled(void) |
439 | { |
440 | /* Is any node_reclaim_mode bit set? */ |
441 | return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); |
442 | } |
443 | |
444 | void check_move_unevictable_folios(struct folio_batch *fbatch); |
445 | void check_move_unevictable_pages(struct pagevec *pvec); |
446 | |
447 | extern void kswapd_run(int nid); |
448 | extern void kswapd_stop(int nid); |
449 | |
450 | #ifdef CONFIG_SWAP |
451 | |
452 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
453 | unsigned long nr_pages, sector_t start_block); |
454 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, |
455 | sector_t *); |
456 | |
457 | static inline unsigned long total_swapcache_pages(void) |
458 | { |
459 | return global_node_page_state(NR_SWAPCACHE); |
460 | } |
461 | |
462 | extern void free_swap_cache(struct page *page); |
463 | extern void free_page_and_swap_cache(struct page *); |
464 | extern void free_pages_and_swap_cache(struct page **, int); |
465 | /* linux/mm/swapfile.c */ |
466 | extern atomic_long_t nr_swap_pages; |
467 | extern long total_swap_pages; |
468 | extern atomic_t nr_rotate_swap; |
469 | extern bool has_usable_swap(void); |
470 | |
471 | /* Swap 50% full? Release swapcache more aggressively.. */ |
472 | static inline bool vm_swap_full(void) |
473 | { |
474 | return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; |
475 | } |
476 | |
477 | static inline long get_nr_swap_pages(void) |
478 | { |
479 | return atomic_long_read(&nr_swap_pages); |
480 | } |
481 | |
482 | extern void si_swapinfo(struct sysinfo *); |
483 | swp_entry_t folio_alloc_swap(struct folio *folio); |
484 | extern void put_swap_page(struct page *page, swp_entry_t entry); |
485 | extern swp_entry_t get_swap_page_of_type(int); |
486 | extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); |
487 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
488 | extern void swap_shmem_alloc(swp_entry_t); |
489 | extern int swap_duplicate(swp_entry_t); |
490 | extern int swapcache_prepare(swp_entry_t); |
491 | extern void swap_free(swp_entry_t); |
492 | extern void swapcache_free_entries(swp_entry_t *entries, int n); |
493 | extern int free_swap_and_cache(swp_entry_t); |
494 | int swap_type_of(dev_t device, sector_t offset); |
495 | int find_first_swap(dev_t *device); |
496 | extern unsigned int count_swap_pages(int, int); |
497 | extern sector_t swapdev_block(int, pgoff_t); |
498 | extern int __swap_count(swp_entry_t entry); |
499 | extern int __swp_swapcount(swp_entry_t entry); |
500 | extern int swp_swapcount(swp_entry_t entry); |
501 | extern struct swap_info_struct *page_swap_info(struct page *); |
502 | extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); |
503 | extern int try_to_free_swap(struct page *); |
504 | struct backing_dev_info; |
505 | extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); |
506 | extern void exit_swap_address_space(unsigned int type); |
507 | extern struct swap_info_struct *get_swap_device(swp_entry_t entry); |
508 | sector_t swap_page_sector(struct page *page); |
509 | |
510 | static inline void put_swap_device(struct swap_info_struct *si) |
511 | { |
512 | percpu_ref_put(&si->users); |
513 | } |
514 | |
515 | #else /* CONFIG_SWAP */ |
516 | static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) |
517 | { |
518 | return NULL; |
519 | } |
520 | |
521 | static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) |
522 | { |
523 | return NULL; |
524 | } |
525 | |
526 | static inline void put_swap_device(struct swap_info_struct *si) |
527 | { |
528 | } |
529 | |
530 | #define get_nr_swap_pages() 0L |
531 | #define total_swap_pages 0L |
532 | #define total_swapcache_pages() 0UL |
533 | #define vm_swap_full() 0 |
534 | |
535 | #define si_swapinfo(val) \ |
536 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) |
537 | /* only sparc can not include linux/pagemap.h in this file |
538 | * so leave put_page and release_pages undeclared... */ |
539 | #define free_page_and_swap_cache(page) \ |
540 | put_page(page) |
541 | #define free_pages_and_swap_cache(pages, nr) \ |
542 | release_pages((pages), (nr)); |
543 | |
544 | /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ |
545 | #define free_swap_and_cache(e) is_pfn_swap_entry(e) |
546 | |
547 | static inline void free_swap_cache(struct page *page) |
548 | { |
549 | } |
550 | |
551 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
552 | { |
553 | return 0; |
554 | } |
555 | |
556 | static inline void swap_shmem_alloc(swp_entry_t swp) |
557 | { |
558 | } |
559 | |
560 | static inline int swap_duplicate(swp_entry_t swp) |
561 | { |
562 | return 0; |
563 | } |
564 | |
565 | static inline void swap_free(swp_entry_t swp) |
566 | { |
567 | } |
568 | |
569 | static inline void put_swap_page(struct page *page, swp_entry_t swp) |
570 | { |
571 | } |
572 | |
573 | static inline int __swap_count(swp_entry_t entry) |
574 | { |
575 | return 0; |
576 | } |
577 | |
578 | static inline int __swp_swapcount(swp_entry_t entry) |
579 | { |
580 | return 0; |
581 | } |
582 | |
583 | static inline int swp_swapcount(swp_entry_t entry) |
584 | { |
585 | return 0; |
586 | } |
587 | |
588 | static inline int try_to_free_swap(struct page *page) |
589 | { |
590 | return 0; |
591 | } |
592 | |
593 | static inline swp_entry_t folio_alloc_swap(struct folio *folio) |
594 | { |
595 | swp_entry_t entry; |
596 | entry.val = 0; |
597 | return entry; |
598 | } |
599 | |
600 | static inline int add_swap_extent(struct swap_info_struct *sis, |
601 | unsigned long start_page, |
602 | unsigned long nr_pages, sector_t start_block) |
603 | { |
604 | return -EINVAL; |
605 | } |
606 | #endif /* CONFIG_SWAP */ |
607 | |
608 | #ifdef CONFIG_THP_SWAP |
609 | extern int split_swap_cluster(swp_entry_t entry); |
610 | #else |
611 | static inline int split_swap_cluster(swp_entry_t entry) |
612 | { |
613 | return 0; |
614 | } |
615 | #endif |
616 | |
617 | #ifdef CONFIG_MEMCG |
618 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) |
619 | { |
620 | /* Cgroup2 doesn't have per-cgroup swappiness */ |
621 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
622 | return vm_swappiness; |
623 | |
624 | /* root ? */ |
625 | if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) |
626 | return vm_swappiness; |
627 | |
628 | return memcg->swappiness; |
629 | } |
630 | #else |
631 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) |
632 | { |
633 | return vm_swappiness; |
634 | } |
635 | #endif |
636 | |
637 | #ifdef CONFIG_ZSWAP |
638 | extern u64 zswap_pool_total_size; |
639 | extern atomic_t zswap_stored_pages; |
640 | #endif |
641 | |
642 | #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
643 | extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); |
644 | static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) |
645 | { |
646 | if (mem_cgroup_disabled()) |
647 | return; |
648 | __cgroup_throttle_swaprate(page, gfp_mask); |
649 | } |
650 | #else |
651 | static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) |
652 | { |
653 | } |
654 | #endif |
655 | static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) |
656 | { |
657 | cgroup_throttle_swaprate(&folio->page, gfp); |
658 | } |
659 | |
660 | #ifdef CONFIG_MEMCG_SWAP |
661 | void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry); |
662 | int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); |
663 | static inline int mem_cgroup_try_charge_swap(struct folio *folio, |
664 | swp_entry_t entry) |
665 | { |
666 | if (mem_cgroup_disabled()) |
667 | return 0; |
668 | return __mem_cgroup_try_charge_swap(folio, entry); |
669 | } |
670 | |
671 | extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); |
672 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) |
673 | { |
674 | if (mem_cgroup_disabled()) |
675 | return; |
676 | __mem_cgroup_uncharge_swap(entry, nr_pages); |
677 | } |
678 | |
679 | extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); |
680 | extern bool mem_cgroup_swap_full(struct page *page); |
681 | #else |
682 | static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) |
683 | { |
684 | } |
685 | |
686 | static inline int mem_cgroup_try_charge_swap(struct folio *folio, |
687 | swp_entry_t entry) |
688 | { |
689 | return 0; |
690 | } |
691 | |
692 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, |
693 | unsigned int nr_pages) |
694 | { |
695 | } |
696 | |
697 | static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) |
698 | { |
699 | return get_nr_swap_pages(); |
700 | } |
701 | |
702 | static inline bool mem_cgroup_swap_full(struct page *page) |
703 | { |
704 | return vm_swap_full(); |
705 | } |
706 | #endif |
707 | |
708 | #endif /* __KERNEL__*/ |
709 | #endif /* _LINUX_SWAP_H */ |
710 | |