1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SWAPOPS_H |
3 | #define _LINUX_SWAPOPS_H |
4 | |
5 | #include <linux/radix-tree.h> |
6 | #include <linux/bug.h> |
7 | #include <linux/mm_types.h> |
8 | |
9 | #ifdef CONFIG_MMU |
10 | |
11 | #ifdef CONFIG_SWAP |
12 | #include <linux/swapfile.h> |
13 | #endif /* CONFIG_SWAP */ |
14 | |
15 | /* |
16 | * swapcache pages are stored in the swapper_space radix tree. We want to |
17 | * get good packing density in that tree, so the index should be dense in |
18 | * the low-order bits. |
19 | * |
20 | * We arrange the `type' and `offset' fields so that `type' is at the six |
21 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
22 | * remaining bits. Although `type' itself needs only five bits, we allow for |
23 | * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry(). |
24 | * |
25 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. |
26 | */ |
27 | #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) |
28 | #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) |
29 | |
30 | /* |
31 | * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To |
32 | * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries |
33 | * can use the extra bits to store other information besides PFN. |
34 | */ |
35 | #ifdef MAX_PHYSMEM_BITS |
36 | #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) |
37 | #else /* MAX_PHYSMEM_BITS */ |
38 | #define SWP_PFN_BITS min_t(int, \ |
39 | sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \ |
40 | SWP_TYPE_SHIFT) |
41 | #endif /* MAX_PHYSMEM_BITS */ |
42 | #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) |
43 | |
44 | /** |
45 | * Migration swap entry specific bitfield definitions. Layout: |
46 | * |
47 | * |----------+--------------------| |
48 | * | swp_type | swp_offset | |
49 | * |----------+--------+-+-+-------| |
50 | * | | resv |D|A| PFN | |
51 | * |----------+--------+-+-+-------| |
52 | * |
53 | * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A) |
54 | * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D) |
55 | * |
56 | * Note: A/D bits will be stored in migration entries iff there're enough |
57 | * free bits in arch specific swp offset. By default we'll ignore A/D bits |
58 | * when migrating a page. Please refer to migration_entry_supports_ad() |
59 | * for more information. If there're more bits besides PFN and A/D bits, |
60 | * they should be reserved and always be zeros. |
61 | */ |
62 | #define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS) |
63 | #define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1) |
64 | #define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2) |
65 | |
66 | #define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT) |
67 | #define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT) |
68 | |
69 | static inline bool is_pfn_swap_entry(swp_entry_t entry); |
70 | |
71 | /* Clear all flags but only keep swp_entry_t related information */ |
72 | static inline pte_t pte_swp_clear_flags(pte_t pte) |
73 | { |
74 | if (pte_swp_exclusive(pte)) |
75 | pte = pte_swp_clear_exclusive(pte); |
76 | if (pte_swp_soft_dirty(pte)) |
77 | pte = pte_swp_clear_soft_dirty(pte); |
78 | if (pte_swp_uffd_wp(pte)) |
79 | pte = pte_swp_clear_uffd_wp(pte); |
80 | return pte; |
81 | } |
82 | |
83 | /* |
84 | * Store a type+offset into a swp_entry_t in an arch-independent format |
85 | */ |
86 | static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) |
87 | { |
88 | swp_entry_t ret; |
89 | |
90 | ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); |
91 | return ret; |
92 | } |
93 | |
94 | /* |
95 | * Extract the `type' field from a swp_entry_t. The swp_entry_t is in |
96 | * arch-independent format |
97 | */ |
98 | static inline unsigned swp_type(swp_entry_t entry) |
99 | { |
100 | return (entry.val >> SWP_TYPE_SHIFT); |
101 | } |
102 | |
103 | /* |
104 | * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in |
105 | * arch-independent format |
106 | */ |
107 | static inline pgoff_t swp_offset(swp_entry_t entry) |
108 | { |
109 | return entry.val & SWP_OFFSET_MASK; |
110 | } |
111 | |
112 | /* |
113 | * This should only be called upon a pfn swap entry to get the PFN stored |
114 | * in the swap entry. Please refers to is_pfn_swap_entry() for definition |
115 | * of pfn swap entry. |
116 | */ |
117 | static inline unsigned long swp_offset_pfn(swp_entry_t entry) |
118 | { |
119 | VM_BUG_ON(!is_pfn_swap_entry(entry)); |
120 | return swp_offset(entry) & SWP_PFN_MASK; |
121 | } |
122 | |
123 | /* check whether a pte points to a swap entry */ |
124 | static inline int is_swap_pte(pte_t pte) |
125 | { |
126 | return !pte_none(pte) && !pte_present(a: pte); |
127 | } |
128 | |
129 | /* |
130 | * Convert the arch-dependent pte representation of a swp_entry_t into an |
131 | * arch-independent swp_entry_t. |
132 | */ |
133 | static inline swp_entry_t pte_to_swp_entry(pte_t pte) |
134 | { |
135 | swp_entry_t arch_entry; |
136 | |
137 | pte = pte_swp_clear_flags(pte); |
138 | arch_entry = __pte_to_swp_entry(pte); |
139 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
140 | } |
141 | |
142 | /* |
143 | * Convert the arch-independent representation of a swp_entry_t into the |
144 | * arch-dependent pte representation. |
145 | */ |
146 | static inline pte_t swp_entry_to_pte(swp_entry_t entry) |
147 | { |
148 | swp_entry_t arch_entry; |
149 | |
150 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
151 | return __swp_entry_to_pte(arch_entry); |
152 | } |
153 | |
154 | static inline swp_entry_t radix_to_swp_entry(void *arg) |
155 | { |
156 | swp_entry_t entry; |
157 | |
158 | entry.val = xa_to_value(entry: arg); |
159 | return entry; |
160 | } |
161 | |
162 | static inline void *swp_to_radix_entry(swp_entry_t entry) |
163 | { |
164 | return xa_mk_value(v: entry.val); |
165 | } |
166 | |
167 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
168 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) |
169 | { |
170 | return swp_entry(SWP_DEVICE_READ, offset); |
171 | } |
172 | |
173 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) |
174 | { |
175 | return swp_entry(SWP_DEVICE_WRITE, offset); |
176 | } |
177 | |
178 | static inline bool is_device_private_entry(swp_entry_t entry) |
179 | { |
180 | int type = swp_type(entry); |
181 | return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; |
182 | } |
183 | |
184 | static inline bool is_writable_device_private_entry(swp_entry_t entry) |
185 | { |
186 | return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); |
187 | } |
188 | |
189 | static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) |
190 | { |
191 | return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); |
192 | } |
193 | |
194 | static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) |
195 | { |
196 | return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); |
197 | } |
198 | |
199 | static inline bool is_device_exclusive_entry(swp_entry_t entry) |
200 | { |
201 | return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || |
202 | swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; |
203 | } |
204 | |
205 | static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) |
206 | { |
207 | return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); |
208 | } |
209 | #else /* CONFIG_DEVICE_PRIVATE */ |
210 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) |
211 | { |
212 | return swp_entry(0, 0); |
213 | } |
214 | |
215 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) |
216 | { |
217 | return swp_entry(0, 0); |
218 | } |
219 | |
220 | static inline bool is_device_private_entry(swp_entry_t entry) |
221 | { |
222 | return false; |
223 | } |
224 | |
225 | static inline bool is_writable_device_private_entry(swp_entry_t entry) |
226 | { |
227 | return false; |
228 | } |
229 | |
230 | static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) |
231 | { |
232 | return swp_entry(0, 0); |
233 | } |
234 | |
235 | static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) |
236 | { |
237 | return swp_entry(0, 0); |
238 | } |
239 | |
240 | static inline bool is_device_exclusive_entry(swp_entry_t entry) |
241 | { |
242 | return false; |
243 | } |
244 | |
245 | static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) |
246 | { |
247 | return false; |
248 | } |
249 | #endif /* CONFIG_DEVICE_PRIVATE */ |
250 | |
251 | #ifdef CONFIG_MIGRATION |
252 | static inline int is_migration_entry(swp_entry_t entry) |
253 | { |
254 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ || |
255 | swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE || |
256 | swp_type(entry) == SWP_MIGRATION_WRITE); |
257 | } |
258 | |
259 | static inline int is_writable_migration_entry(swp_entry_t entry) |
260 | { |
261 | return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); |
262 | } |
263 | |
264 | static inline int is_readable_migration_entry(swp_entry_t entry) |
265 | { |
266 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ); |
267 | } |
268 | |
269 | static inline int is_readable_exclusive_migration_entry(swp_entry_t entry) |
270 | { |
271 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE); |
272 | } |
273 | |
274 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) |
275 | { |
276 | return swp_entry(SWP_MIGRATION_READ, offset); |
277 | } |
278 | |
279 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) |
280 | { |
281 | return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset); |
282 | } |
283 | |
284 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) |
285 | { |
286 | return swp_entry(SWP_MIGRATION_WRITE, offset); |
287 | } |
288 | |
289 | /* |
290 | * Returns whether the host has large enough swap offset field to support |
291 | * carrying over pgtable A/D bits for page migrations. The result is |
292 | * pretty much arch specific. |
293 | */ |
294 | static inline bool migration_entry_supports_ad(void) |
295 | { |
296 | #ifdef CONFIG_SWAP |
297 | return swap_migration_ad_supported; |
298 | #else /* CONFIG_SWAP */ |
299 | return false; |
300 | #endif /* CONFIG_SWAP */ |
301 | } |
302 | |
303 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) |
304 | { |
305 | if (migration_entry_supports_ad()) |
306 | return swp_entry(type: swp_type(entry), |
307 | offset: swp_offset(entry) | SWP_MIG_YOUNG); |
308 | return entry; |
309 | } |
310 | |
311 | static inline bool is_migration_entry_young(swp_entry_t entry) |
312 | { |
313 | if (migration_entry_supports_ad()) |
314 | return swp_offset(entry) & SWP_MIG_YOUNG; |
315 | /* Keep the old behavior of aging page after migration */ |
316 | return false; |
317 | } |
318 | |
319 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) |
320 | { |
321 | if (migration_entry_supports_ad()) |
322 | return swp_entry(type: swp_type(entry), |
323 | offset: swp_offset(entry) | SWP_MIG_DIRTY); |
324 | return entry; |
325 | } |
326 | |
327 | static inline bool is_migration_entry_dirty(swp_entry_t entry) |
328 | { |
329 | if (migration_entry_supports_ad()) |
330 | return swp_offset(entry) & SWP_MIG_DIRTY; |
331 | /* Keep the old behavior of clean page after migration */ |
332 | return false; |
333 | } |
334 | |
335 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
336 | unsigned long address); |
337 | extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte); |
338 | #else /* CONFIG_MIGRATION */ |
339 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) |
340 | { |
341 | return swp_entry(0, 0); |
342 | } |
343 | |
344 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) |
345 | { |
346 | return swp_entry(0, 0); |
347 | } |
348 | |
349 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) |
350 | { |
351 | return swp_entry(0, 0); |
352 | } |
353 | |
354 | static inline int is_migration_entry(swp_entry_t swp) |
355 | { |
356 | return 0; |
357 | } |
358 | |
359 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
360 | unsigned long address) { } |
361 | static inline void migration_entry_wait_huge(struct vm_area_struct *vma, |
362 | pte_t *pte) { } |
363 | static inline int is_writable_migration_entry(swp_entry_t entry) |
364 | { |
365 | return 0; |
366 | } |
367 | static inline int is_readable_migration_entry(swp_entry_t entry) |
368 | { |
369 | return 0; |
370 | } |
371 | |
372 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) |
373 | { |
374 | return entry; |
375 | } |
376 | |
377 | static inline bool is_migration_entry_young(swp_entry_t entry) |
378 | { |
379 | return false; |
380 | } |
381 | |
382 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) |
383 | { |
384 | return entry; |
385 | } |
386 | |
387 | static inline bool is_migration_entry_dirty(swp_entry_t entry) |
388 | { |
389 | return false; |
390 | } |
391 | #endif /* CONFIG_MIGRATION */ |
392 | |
393 | #ifdef CONFIG_MEMORY_FAILURE |
394 | |
395 | /* |
396 | * Support for hardware poisoned pages |
397 | */ |
398 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
399 | { |
400 | BUG_ON(!PageLocked(page)); |
401 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); |
402 | } |
403 | |
404 | static inline int is_hwpoison_entry(swp_entry_t entry) |
405 | { |
406 | return swp_type(entry) == SWP_HWPOISON; |
407 | } |
408 | |
409 | #else |
410 | |
411 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
412 | { |
413 | return swp_entry(0, 0); |
414 | } |
415 | |
416 | static inline int is_hwpoison_entry(swp_entry_t swp) |
417 | { |
418 | return 0; |
419 | } |
420 | #endif |
421 | |
422 | typedef unsigned long pte_marker; |
423 | |
424 | #define PTE_MARKER_UFFD_WP BIT(0) |
425 | /* |
426 | * "Poisoned" here is meant in the very general sense of "future accesses are |
427 | * invalid", instead of referring very specifically to hardware memory errors. |
428 | * This marker is meant to represent any of various different causes of this. |
429 | */ |
430 | #define PTE_MARKER_POISONED BIT(1) |
431 | #define PTE_MARKER_MASK (BIT(2) - 1) |
432 | |
433 | static inline swp_entry_t make_pte_marker_entry(pte_marker marker) |
434 | { |
435 | return swp_entry(SWP_PTE_MARKER, offset: marker); |
436 | } |
437 | |
438 | static inline bool is_pte_marker_entry(swp_entry_t entry) |
439 | { |
440 | return swp_type(entry) == SWP_PTE_MARKER; |
441 | } |
442 | |
443 | static inline pte_marker pte_marker_get(swp_entry_t entry) |
444 | { |
445 | return swp_offset(entry) & PTE_MARKER_MASK; |
446 | } |
447 | |
448 | static inline bool is_pte_marker(pte_t pte) |
449 | { |
450 | return is_swap_pte(pte) && is_pte_marker_entry(entry: pte_to_swp_entry(pte)); |
451 | } |
452 | |
453 | static inline pte_t make_pte_marker(pte_marker marker) |
454 | { |
455 | return swp_entry_to_pte(entry: make_pte_marker_entry(marker)); |
456 | } |
457 | |
458 | static inline swp_entry_t make_poisoned_swp_entry(void) |
459 | { |
460 | return make_pte_marker_entry(PTE_MARKER_POISONED); |
461 | } |
462 | |
463 | static inline int is_poisoned_swp_entry(swp_entry_t entry) |
464 | { |
465 | return is_pte_marker_entry(entry) && |
466 | (pte_marker_get(entry) & PTE_MARKER_POISONED); |
467 | } |
468 | |
469 | /* |
470 | * This is a special version to check pte_none() just to cover the case when |
471 | * the pte is a pte marker. It existed because in many cases the pte marker |
472 | * should be seen as a none pte; it's just that we have stored some information |
473 | * onto the none pte so it becomes not-none any more. |
474 | * |
475 | * It should be used when the pte is file-backed, ram-based and backing |
476 | * userspace pages, like shmem. It is not needed upon pgtables that do not |
477 | * support pte markers at all. For example, it's not needed on anonymous |
478 | * memory, kernel-only memory (including when the system is during-boot), |
479 | * non-ram based generic file-system. It's fine to be used even there, but the |
480 | * extra pte marker check will be pure overhead. |
481 | */ |
482 | static inline int pte_none_mostly(pte_t pte) |
483 | { |
484 | return pte_none(pte) || is_pte_marker(pte); |
485 | } |
486 | |
487 | static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) |
488 | { |
489 | struct page *p = pfn_to_page(swp_offset_pfn(entry)); |
490 | |
491 | /* |
492 | * Any use of migration entries may only occur while the |
493 | * corresponding page is locked |
494 | */ |
495 | BUG_ON(is_migration_entry(entry) && !PageLocked(p)); |
496 | |
497 | return p; |
498 | } |
499 | |
500 | static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry) |
501 | { |
502 | struct folio *folio = pfn_folio(pfn: swp_offset_pfn(entry)); |
503 | |
504 | /* |
505 | * Any use of migration entries may only occur while the |
506 | * corresponding folio is locked |
507 | */ |
508 | BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio)); |
509 | |
510 | return folio; |
511 | } |
512 | |
513 | /* |
514 | * A pfn swap entry is a special type of swap entry that always has a pfn stored |
515 | * in the swap offset. They can either be used to represent unaddressable device |
516 | * memory, to restrict access to a page undergoing migration or to represent a |
517 | * pfn which has been hwpoisoned and unmapped. |
518 | */ |
519 | static inline bool is_pfn_swap_entry(swp_entry_t entry) |
520 | { |
521 | /* Make sure the swp offset can always store the needed fields */ |
522 | BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); |
523 | |
524 | return is_migration_entry(entry) || is_device_private_entry(entry) || |
525 | is_device_exclusive_entry(entry) || is_hwpoison_entry(entry); |
526 | } |
527 | |
528 | struct page_vma_mapped_walk; |
529 | |
530 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
531 | extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
532 | struct page *page); |
533 | |
534 | extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
535 | struct page *new); |
536 | |
537 | extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); |
538 | |
539 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
540 | { |
541 | swp_entry_t arch_entry; |
542 | |
543 | if (pmd_swp_soft_dirty(pmd)) |
544 | pmd = pmd_swp_clear_soft_dirty(pmd); |
545 | if (pmd_swp_uffd_wp(pmd)) |
546 | pmd = pmd_swp_clear_uffd_wp(pmd); |
547 | arch_entry = __pmd_to_swp_entry(pmd); |
548 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
549 | } |
550 | |
551 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
552 | { |
553 | swp_entry_t arch_entry; |
554 | |
555 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
556 | return __swp_entry_to_pmd(arch_entry); |
557 | } |
558 | |
559 | static inline int is_pmd_migration_entry(pmd_t pmd) |
560 | { |
561 | return is_swap_pmd(pmd) && is_migration_entry(entry: pmd_to_swp_entry(pmd)); |
562 | } |
563 | #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
564 | static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
565 | struct page *page) |
566 | { |
567 | BUILD_BUG(); |
568 | } |
569 | |
570 | static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
571 | struct page *new) |
572 | { |
573 | BUILD_BUG(); |
574 | } |
575 | |
576 | static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } |
577 | |
578 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
579 | { |
580 | return swp_entry(0, 0); |
581 | } |
582 | |
583 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
584 | { |
585 | return __pmd(0); |
586 | } |
587 | |
588 | static inline int is_pmd_migration_entry(pmd_t pmd) |
589 | { |
590 | return 0; |
591 | } |
592 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
593 | |
594 | static inline int non_swap_entry(swp_entry_t entry) |
595 | { |
596 | return swp_type(entry) >= MAX_SWAPFILES; |
597 | } |
598 | |
599 | #endif /* CONFIG_MMU */ |
600 | #endif /* _LINUX_SWAPOPS_H */ |
601 | |