1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SWAP_H |
3 | #define _LINUX_SWAP_H |
4 | |
5 | #include <linux/spinlock.h> |
6 | #include <linux/linkage.h> |
7 | #include <linux/mmzone.h> |
8 | #include <linux/list.h> |
9 | #include <linux/memcontrol.h> |
10 | #include <linux/sched.h> |
11 | #include <linux/node.h> |
12 | #include <linux/fs.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/atomic.h> |
15 | #include <linux/page-flags.h> |
16 | #include <uapi/linux/mempolicy.h> |
17 | #include <asm/page.h> |
18 | |
19 | struct notifier_block; |
20 | |
21 | struct bio; |
22 | |
23 | struct pagevec; |
24 | |
25 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
26 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
27 | #define SWAP_FLAG_PRIO_SHIFT 0 |
28 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
29 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ |
30 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ |
31 | |
32 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
33 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
34 | SWAP_FLAG_DISCARD_PAGES) |
35 | #define SWAP_BATCH 64 |
36 | |
37 | static inline int current_is_kswapd(void) |
38 | { |
39 | return current->flags & PF_KSWAPD; |
40 | } |
41 | |
42 | /* |
43 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can |
44 | * be swapped to. The swap type and the offset into that swap type are |
45 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits |
46 | * for the type means that the maximum number of swapcache pages is 27 bits |
47 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs |
48 | * the type/offset into the pte as 5/27 as well. |
49 | */ |
50 | #define MAX_SWAPFILES_SHIFT 5 |
51 | |
52 | /* |
53 | * Use some of the swap files numbers for other purposes. This |
54 | * is a convenient way to hook into the VM to trigger special |
55 | * actions on faults. |
56 | */ |
57 | |
58 | /* |
59 | * PTE markers are used to persist information onto PTEs that otherwise |
60 | * should be a none pte. As its name "PTE" hints, it should only be |
61 | * applied to the leaves of pgtables. |
62 | */ |
63 | #define SWP_PTE_MARKER_NUM 1 |
64 | #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ |
65 | SWP_MIGRATION_NUM + SWP_DEVICE_NUM) |
66 | |
67 | /* |
68 | * Unaddressable device memory support. See include/linux/hmm.h and |
69 | * Documentation/mm/hmm.rst. Short description is we need struct pages for |
70 | * device memory that is unaddressable (inaccessible) by CPU, so that we can |
71 | * migrate part of a process memory to device memory. |
72 | * |
73 | * When a page is migrated from CPU to device, we set the CPU page table entry |
74 | * to a special SWP_DEVICE_{READ|WRITE} entry. |
75 | * |
76 | * When a page is mapped by the device for exclusive access we set the CPU page |
77 | * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. |
78 | */ |
79 | #ifdef CONFIG_DEVICE_PRIVATE |
80 | #define SWP_DEVICE_NUM 4 |
81 | #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) |
82 | #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) |
83 | #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) |
84 | #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) |
85 | #else |
86 | #define SWP_DEVICE_NUM 0 |
87 | #endif |
88 | |
89 | /* |
90 | * Page migration support. |
91 | * |
92 | * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and |
93 | * indicates that the referenced (part of) an anonymous page is exclusive to |
94 | * a single process. For SWP_MIGRATION_WRITE, that information is implicit: |
95 | * (part of) an anonymous page that are mapped writable are exclusive to a |
96 | * single process. |
97 | */ |
98 | #ifdef CONFIG_MIGRATION |
99 | #define SWP_MIGRATION_NUM 3 |
100 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) |
101 | #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) |
102 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) |
103 | #else |
104 | #define SWP_MIGRATION_NUM 0 |
105 | #endif |
106 | |
107 | /* |
108 | * Handling of hardware poisoned pages with memory corruption. |
109 | */ |
110 | #ifdef CONFIG_MEMORY_FAILURE |
111 | #define SWP_HWPOISON_NUM 1 |
112 | #define SWP_HWPOISON MAX_SWAPFILES |
113 | #else |
114 | #define SWP_HWPOISON_NUM 0 |
115 | #endif |
116 | |
117 | #define MAX_SWAPFILES \ |
118 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ |
119 | SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ |
120 | SWP_PTE_MARKER_NUM) |
121 | |
122 | /* |
123 | * Magic header for a swap area. The first part of the union is |
124 | * what the swap magic looks like for the old (limited to 128MB) |
125 | * swap area format, the second part of the union adds - in the |
126 | * old reserved area - some extra information. Note that the first |
127 | * kilobyte is reserved for boot loader or disk label stuff... |
128 | * |
129 | * Having the magic at the end of the PAGE_SIZE makes detecting swap |
130 | * areas somewhat tricky on machines that support multiple page sizes. |
131 | * For 2.5 we'll probably want to move the magic to just beyond the |
132 | * bootbits... |
133 | */ |
134 | union { |
135 | struct { |
136 | char [PAGE_SIZE - 10]; |
137 | char [10]; /* SWAP-SPACE or SWAPSPACE2 */ |
138 | } ; |
139 | struct { |
140 | char [1024]; /* Space for disklabel etc. */ |
141 | __u32 ; |
142 | __u32 ; |
143 | __u32 ; |
144 | unsigned char [16]; |
145 | unsigned char [16]; |
146 | __u32 [117]; |
147 | __u32 [1]; |
148 | } ; |
149 | }; |
150 | |
151 | /* |
152 | * current->reclaim_state points to one of these when a task is running |
153 | * memory reclaim |
154 | */ |
155 | struct reclaim_state { |
156 | /* pages reclaimed outside of LRU-based reclaim */ |
157 | unsigned long reclaimed; |
158 | #ifdef CONFIG_LRU_GEN |
159 | /* per-thread mm walk data */ |
160 | struct lru_gen_mm_walk *mm_walk; |
161 | #endif |
162 | }; |
163 | |
164 | /* |
165 | * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based |
166 | * reclaim |
167 | * @pages: number of pages reclaimed |
168 | * |
169 | * If the current process is undergoing a reclaim operation, increment the |
170 | * number of reclaimed pages by @pages. |
171 | */ |
172 | static inline void mm_account_reclaimed_pages(unsigned long pages) |
173 | { |
174 | if (current->reclaim_state) |
175 | current->reclaim_state->reclaimed += pages; |
176 | } |
177 | |
178 | #ifdef __KERNEL__ |
179 | |
180 | struct address_space; |
181 | struct sysinfo; |
182 | struct writeback_control; |
183 | struct zone; |
184 | |
185 | /* |
186 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of |
187 | * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the |
188 | * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart |
189 | * from setup, they're handled identically. |
190 | * |
191 | * We always assume that blocks are of size PAGE_SIZE. |
192 | */ |
193 | struct swap_extent { |
194 | struct rb_node rb_node; |
195 | pgoff_t start_page; |
196 | pgoff_t nr_pages; |
197 | sector_t start_block; |
198 | }; |
199 | |
200 | /* |
201 | * Max bad pages in the new format.. |
202 | */ |
203 | #define MAX_SWAP_BADPAGES \ |
204 | ((offsetof(union swap_header, magic.magic) - \ |
205 | offsetof(union swap_header, info.badpages)) / sizeof(int)) |
206 | |
207 | enum { |
208 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
209 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
210 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
211 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
212 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
213 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
214 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
215 | SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ |
216 | SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ |
217 | SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ |
218 | SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ |
219 | SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ |
220 | SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ |
221 | /* add others here before... */ |
222 | SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ |
223 | }; |
224 | |
225 | #define SWAP_CLUSTER_MAX 32UL |
226 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
227 | |
228 | /* Bit flag in swap_map */ |
229 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
230 | #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ |
231 | |
232 | /* Special value in first swap_map */ |
233 | #define SWAP_MAP_MAX 0x3e /* Max count */ |
234 | #define SWAP_MAP_BAD 0x3f /* Note page is bad */ |
235 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ |
236 | |
237 | /* Special value in each swap_map continuation */ |
238 | #define SWAP_CONT_MAX 0x7f /* Max count */ |
239 | |
240 | /* |
241 | * We use this to track usage of a cluster. A cluster is a block of swap disk |
242 | * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All |
243 | * free clusters are organized into a list. We fetch an entry from the list to |
244 | * get a free cluster. |
245 | * |
246 | * The data field stores next cluster if the cluster is free or cluster usage |
247 | * counter otherwise. The flags field determines if a cluster is free. This is |
248 | * protected by swap_info_struct.lock. |
249 | */ |
250 | struct swap_cluster_info { |
251 | spinlock_t lock; /* |
252 | * Protect swap_cluster_info fields |
253 | * and swap_info_struct->swap_map |
254 | * elements correspond to the swap |
255 | * cluster |
256 | */ |
257 | unsigned int data:24; |
258 | unsigned int flags:8; |
259 | }; |
260 | #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ |
261 | #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ |
262 | #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ |
263 | |
264 | /* |
265 | * We assign a cluster to each CPU, so each CPU can allocate swap entry from |
266 | * its own cluster and swapout sequentially. The purpose is to optimize swapout |
267 | * throughput. |
268 | */ |
269 | struct percpu_cluster { |
270 | struct swap_cluster_info index; /* Current cluster index */ |
271 | unsigned int next; /* Likely next allocation offset */ |
272 | }; |
273 | |
274 | struct swap_cluster_list { |
275 | struct swap_cluster_info head; |
276 | struct swap_cluster_info tail; |
277 | }; |
278 | |
279 | /* |
280 | * The in-memory structure used to track swap areas. |
281 | */ |
282 | struct swap_info_struct { |
283 | struct percpu_ref users; /* indicate and keep swap device valid. */ |
284 | unsigned long flags; /* SWP_USED etc: see above */ |
285 | signed short prio; /* swap priority of this type */ |
286 | struct plist_node list; /* entry in swap_active_head */ |
287 | signed char type; /* strange name for an index */ |
288 | unsigned int max; /* extent of the swap_map */ |
289 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
290 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ |
291 | struct swap_cluster_list free_clusters; /* free clusters list */ |
292 | unsigned int lowest_bit; /* index of first free in swap_map */ |
293 | unsigned int highest_bit; /* index of last free in swap_map */ |
294 | unsigned int pages; /* total of usable pages of swap */ |
295 | unsigned int inuse_pages; /* number of those currently in use */ |
296 | unsigned int cluster_next; /* likely index for next allocation */ |
297 | unsigned int cluster_nr; /* countdown to next cluster search */ |
298 | unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ |
299 | struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ |
300 | struct rb_root swap_extent_root;/* root of the swap extent rbtree */ |
301 | struct file *bdev_file; /* open handle of the bdev */ |
302 | struct block_device *bdev; /* swap device or bdev of swap file */ |
303 | struct file *swap_file; /* seldom referenced */ |
304 | unsigned int old_block_size; /* seldom referenced */ |
305 | struct completion comp; /* seldom referenced */ |
306 | spinlock_t lock; /* |
307 | * protect map scan related fields like |
308 | * swap_map, lowest_bit, highest_bit, |
309 | * inuse_pages, cluster_next, |
310 | * cluster_nr, lowest_alloc, |
311 | * highest_alloc, free/discard cluster |
312 | * list. other fields are only changed |
313 | * at swapon/swapoff, so are protected |
314 | * by swap_lock. changing flags need |
315 | * hold this lock and swap_lock. If |
316 | * both locks need hold, hold swap_lock |
317 | * first. |
318 | */ |
319 | spinlock_t cont_lock; /* |
320 | * protect swap count continuation page |
321 | * list. |
322 | */ |
323 | struct work_struct discard_work; /* discard worker */ |
324 | struct swap_cluster_list discard_clusters; /* discard clusters list */ |
325 | struct plist_node avail_lists[]; /* |
326 | * entries in swap_avail_heads, one |
327 | * entry per node. |
328 | * Must be last as the number of the |
329 | * array is nr_node_ids, which is not |
330 | * a fixed value so have to allocate |
331 | * dynamically. |
332 | * And it has to be an array so that |
333 | * plist_for_each_* can work. |
334 | */ |
335 | }; |
336 | |
337 | static inline swp_entry_t page_swap_entry(struct page *page) |
338 | { |
339 | struct folio *folio = page_folio(page); |
340 | swp_entry_t entry = folio->swap; |
341 | |
342 | entry.val += folio_page_idx(folio, page); |
343 | return entry; |
344 | } |
345 | |
346 | /* linux/mm/workingset.c */ |
347 | bool workingset_test_recent(void *shadow, bool file, bool *workingset); |
348 | void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); |
349 | void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); |
350 | void workingset_refault(struct folio *folio, void *shadow); |
351 | void workingset_activation(struct folio *folio); |
352 | |
353 | /* linux/mm/page_alloc.c */ |
354 | extern unsigned long totalreserve_pages; |
355 | |
356 | /* Definition of global_zone_page_state not available yet */ |
357 | #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) |
358 | |
359 | |
360 | /* linux/mm/swap.c */ |
361 | void lru_note_cost(struct lruvec *lruvec, bool file, |
362 | unsigned int nr_io, unsigned int nr_rotated); |
363 | void lru_note_cost_refault(struct folio *); |
364 | void folio_add_lru(struct folio *); |
365 | void folio_add_lru_vma(struct folio *, struct vm_area_struct *); |
366 | void mark_page_accessed(struct page *); |
367 | void folio_mark_accessed(struct folio *); |
368 | |
369 | extern atomic_t lru_disable_count; |
370 | |
371 | static inline bool lru_cache_disabled(void) |
372 | { |
373 | return atomic_read(v: &lru_disable_count); |
374 | } |
375 | |
376 | static inline void lru_cache_enable(void) |
377 | { |
378 | atomic_dec(v: &lru_disable_count); |
379 | } |
380 | |
381 | extern void lru_cache_disable(void); |
382 | extern void lru_add_drain(void); |
383 | extern void lru_add_drain_cpu(int cpu); |
384 | extern void lru_add_drain_cpu_zone(struct zone *zone); |
385 | extern void lru_add_drain_all(void); |
386 | void folio_deactivate(struct folio *folio); |
387 | void folio_mark_lazyfree(struct folio *folio); |
388 | extern void swap_setup(void); |
389 | |
390 | /* linux/mm/vmscan.c */ |
391 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
392 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
393 | gfp_t gfp_mask, nodemask_t *mask); |
394 | |
395 | #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) |
396 | #define MEMCG_RECLAIM_PROACTIVE (1 << 2) |
397 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, |
398 | unsigned long nr_pages, |
399 | gfp_t gfp_mask, |
400 | unsigned int reclaim_options); |
401 | extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, |
402 | gfp_t gfp_mask, bool noswap, |
403 | pg_data_t *pgdat, |
404 | unsigned long *nr_scanned); |
405 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
406 | extern int vm_swappiness; |
407 | long remove_mapping(struct address_space *mapping, struct folio *folio); |
408 | |
409 | #ifdef CONFIG_NUMA |
410 | extern int node_reclaim_mode; |
411 | extern int sysctl_min_unmapped_ratio; |
412 | extern int sysctl_min_slab_ratio; |
413 | #else |
414 | #define node_reclaim_mode 0 |
415 | #endif |
416 | |
417 | static inline bool node_reclaim_enabled(void) |
418 | { |
419 | /* Is any node_reclaim_mode bit set? */ |
420 | return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); |
421 | } |
422 | |
423 | void check_move_unevictable_folios(struct folio_batch *fbatch); |
424 | |
425 | extern void __meminit kswapd_run(int nid); |
426 | extern void __meminit kswapd_stop(int nid); |
427 | |
428 | #ifdef CONFIG_SWAP |
429 | |
430 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
431 | unsigned long nr_pages, sector_t start_block); |
432 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, |
433 | sector_t *); |
434 | |
435 | static inline unsigned long total_swapcache_pages(void) |
436 | { |
437 | return global_node_page_state(item: NR_SWAPCACHE); |
438 | } |
439 | |
440 | void free_swap_cache(struct folio *folio); |
441 | void free_page_and_swap_cache(struct page *); |
442 | void free_pages_and_swap_cache(struct encoded_page **, int); |
443 | /* linux/mm/swapfile.c */ |
444 | extern atomic_long_t nr_swap_pages; |
445 | extern long total_swap_pages; |
446 | extern atomic_t nr_rotate_swap; |
447 | extern bool has_usable_swap(void); |
448 | |
449 | /* Swap 50% full? Release swapcache more aggressively.. */ |
450 | static inline bool vm_swap_full(void) |
451 | { |
452 | return atomic_long_read(v: &nr_swap_pages) * 2 < total_swap_pages; |
453 | } |
454 | |
455 | static inline long get_nr_swap_pages(void) |
456 | { |
457 | return atomic_long_read(v: &nr_swap_pages); |
458 | } |
459 | |
460 | extern void si_swapinfo(struct sysinfo *); |
461 | swp_entry_t folio_alloc_swap(struct folio *folio); |
462 | bool folio_free_swap(struct folio *folio); |
463 | void put_swap_folio(struct folio *folio, swp_entry_t entry); |
464 | extern swp_entry_t get_swap_page_of_type(int); |
465 | extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); |
466 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
467 | extern void swap_shmem_alloc(swp_entry_t); |
468 | extern int swap_duplicate(swp_entry_t); |
469 | extern int swapcache_prepare(swp_entry_t); |
470 | extern void swap_free(swp_entry_t); |
471 | extern void swapcache_free_entries(swp_entry_t *entries, int n); |
472 | extern int free_swap_and_cache(swp_entry_t); |
473 | int swap_type_of(dev_t device, sector_t offset); |
474 | int find_first_swap(dev_t *device); |
475 | extern unsigned int count_swap_pages(int, int); |
476 | extern sector_t swapdev_block(int, pgoff_t); |
477 | extern int __swap_count(swp_entry_t entry); |
478 | extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry); |
479 | extern int swp_swapcount(swp_entry_t entry); |
480 | struct swap_info_struct *swp_swap_info(swp_entry_t entry); |
481 | struct backing_dev_info; |
482 | extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); |
483 | extern void exit_swap_address_space(unsigned int type); |
484 | extern struct swap_info_struct *get_swap_device(swp_entry_t entry); |
485 | sector_t swap_folio_sector(struct folio *folio); |
486 | |
487 | static inline void put_swap_device(struct swap_info_struct *si) |
488 | { |
489 | percpu_ref_put(ref: &si->users); |
490 | } |
491 | |
492 | #else /* CONFIG_SWAP */ |
493 | static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) |
494 | { |
495 | return NULL; |
496 | } |
497 | |
498 | static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) |
499 | { |
500 | return NULL; |
501 | } |
502 | |
503 | static inline void put_swap_device(struct swap_info_struct *si) |
504 | { |
505 | } |
506 | |
507 | #define get_nr_swap_pages() 0L |
508 | #define total_swap_pages 0L |
509 | #define total_swapcache_pages() 0UL |
510 | #define vm_swap_full() 0 |
511 | |
512 | #define si_swapinfo(val) \ |
513 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) |
514 | /* only sparc can not include linux/pagemap.h in this file |
515 | * so leave put_page and release_pages undeclared... */ |
516 | #define free_page_and_swap_cache(page) \ |
517 | put_page(page) |
518 | #define free_pages_and_swap_cache(pages, nr) \ |
519 | release_pages((pages), (nr)); |
520 | |
521 | /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ |
522 | #define free_swap_and_cache(e) is_pfn_swap_entry(e) |
523 | |
524 | static inline void free_swap_cache(struct folio *folio) |
525 | { |
526 | } |
527 | |
528 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
529 | { |
530 | return 0; |
531 | } |
532 | |
533 | static inline void swap_shmem_alloc(swp_entry_t swp) |
534 | { |
535 | } |
536 | |
537 | static inline int swap_duplicate(swp_entry_t swp) |
538 | { |
539 | return 0; |
540 | } |
541 | |
542 | static inline int swapcache_prepare(swp_entry_t swp) |
543 | { |
544 | return 0; |
545 | } |
546 | |
547 | static inline void swap_free(swp_entry_t swp) |
548 | { |
549 | } |
550 | |
551 | static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) |
552 | { |
553 | } |
554 | |
555 | static inline int __swap_count(swp_entry_t entry) |
556 | { |
557 | return 0; |
558 | } |
559 | |
560 | static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) |
561 | { |
562 | return 0; |
563 | } |
564 | |
565 | static inline int swp_swapcount(swp_entry_t entry) |
566 | { |
567 | return 0; |
568 | } |
569 | |
570 | static inline swp_entry_t folio_alloc_swap(struct folio *folio) |
571 | { |
572 | swp_entry_t entry; |
573 | entry.val = 0; |
574 | return entry; |
575 | } |
576 | |
577 | static inline bool folio_free_swap(struct folio *folio) |
578 | { |
579 | return false; |
580 | } |
581 | |
582 | static inline int add_swap_extent(struct swap_info_struct *sis, |
583 | unsigned long start_page, |
584 | unsigned long nr_pages, sector_t start_block) |
585 | { |
586 | return -EINVAL; |
587 | } |
588 | #endif /* CONFIG_SWAP */ |
589 | |
590 | #ifdef CONFIG_THP_SWAP |
591 | extern int split_swap_cluster(swp_entry_t entry); |
592 | #else |
593 | static inline int split_swap_cluster(swp_entry_t entry) |
594 | { |
595 | return 0; |
596 | } |
597 | #endif |
598 | |
599 | #ifdef CONFIG_MEMCG |
600 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) |
601 | { |
602 | /* Cgroup2 doesn't have per-cgroup swappiness */ |
603 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
604 | return READ_ONCE(vm_swappiness); |
605 | |
606 | /* root ? */ |
607 | if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) |
608 | return READ_ONCE(vm_swappiness); |
609 | |
610 | return READ_ONCE(memcg->swappiness); |
611 | } |
612 | #else |
613 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) |
614 | { |
615 | return READ_ONCE(vm_swappiness); |
616 | } |
617 | #endif |
618 | |
619 | #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
620 | void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); |
621 | static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) |
622 | { |
623 | if (mem_cgroup_disabled()) |
624 | return; |
625 | __folio_throttle_swaprate(folio, gfp); |
626 | } |
627 | #else |
628 | static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) |
629 | { |
630 | } |
631 | #endif |
632 | |
633 | #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) |
634 | void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry); |
635 | int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); |
636 | static inline int mem_cgroup_try_charge_swap(struct folio *folio, |
637 | swp_entry_t entry) |
638 | { |
639 | if (mem_cgroup_disabled()) |
640 | return 0; |
641 | return __mem_cgroup_try_charge_swap(folio, entry); |
642 | } |
643 | |
644 | extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); |
645 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) |
646 | { |
647 | if (mem_cgroup_disabled()) |
648 | return; |
649 | __mem_cgroup_uncharge_swap(entry, nr_pages); |
650 | } |
651 | |
652 | extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); |
653 | extern bool mem_cgroup_swap_full(struct folio *folio); |
654 | #else |
655 | static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) |
656 | { |
657 | } |
658 | |
659 | static inline int mem_cgroup_try_charge_swap(struct folio *folio, |
660 | swp_entry_t entry) |
661 | { |
662 | return 0; |
663 | } |
664 | |
665 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, |
666 | unsigned int nr_pages) |
667 | { |
668 | } |
669 | |
670 | static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) |
671 | { |
672 | return get_nr_swap_pages(); |
673 | } |
674 | |
675 | static inline bool mem_cgroup_swap_full(struct folio *folio) |
676 | { |
677 | return vm_swap_full(); |
678 | } |
679 | #endif |
680 | |
681 | #endif /* __KERNEL__*/ |
682 | #endif /* _LINUX_SWAP_H */ |
683 | |