1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Memory merging support. |
4 | * |
5 | * This code enables dynamic sharing of identical pages found in different |
6 | * memory areas, even if they are not shared by fork() |
7 | * |
8 | * Copyright (C) 2008-2009 Red Hat, Inc. |
9 | * Authors: |
10 | * Izik Eidus |
11 | * Andrea Arcangeli |
12 | * Chris Wright |
13 | * Hugh Dickins |
14 | */ |
15 | |
16 | #include <linux/errno.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/mm_inline.h> |
19 | #include <linux/fs.h> |
20 | #include <linux/mman.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/sched/mm.h> |
23 | #include <linux/sched/coredump.h> |
24 | #include <linux/rwsem.h> |
25 | #include <linux/pagemap.h> |
26 | #include <linux/rmap.h> |
27 | #include <linux/spinlock.h> |
28 | #include <linux/xxhash.h> |
29 | #include <linux/delay.h> |
30 | #include <linux/kthread.h> |
31 | #include <linux/wait.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/rbtree.h> |
34 | #include <linux/memory.h> |
35 | #include <linux/mmu_notifier.h> |
36 | #include <linux/swap.h> |
37 | #include <linux/ksm.h> |
38 | #include <linux/hashtable.h> |
39 | #include <linux/freezer.h> |
40 | #include <linux/oom.h> |
41 | #include <linux/numa.h> |
42 | #include <linux/pagewalk.h> |
43 | |
44 | #include <asm/tlbflush.h> |
45 | #include "internal.h" |
46 | #include "mm_slot.h" |
47 | |
48 | #define CREATE_TRACE_POINTS |
49 | #include <trace/events/ksm.h> |
50 | |
51 | #ifdef CONFIG_NUMA |
52 | #define NUMA(x) (x) |
53 | #define DO_NUMA(x) do { (x); } while (0) |
54 | #else |
55 | #define NUMA(x) (0) |
56 | #define DO_NUMA(x) do { } while (0) |
57 | #endif |
58 | |
59 | typedef u8 rmap_age_t; |
60 | |
61 | /** |
62 | * DOC: Overview |
63 | * |
64 | * A few notes about the KSM scanning process, |
65 | * to make it easier to understand the data structures below: |
66 | * |
67 | * In order to reduce excessive scanning, KSM sorts the memory pages by their |
68 | * contents into a data structure that holds pointers to the pages' locations. |
69 | * |
70 | * Since the contents of the pages may change at any moment, KSM cannot just |
71 | * insert the pages into a normal sorted tree and expect it to find anything. |
72 | * Therefore KSM uses two data structures - the stable and the unstable tree. |
73 | * |
74 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted |
75 | * by their contents. Because each such page is write-protected, searching on |
76 | * this tree is fully assured to be working (except when pages are unmapped), |
77 | * and therefore this tree is called the stable tree. |
78 | * |
79 | * The stable tree node includes information required for reverse |
80 | * mapping from a KSM page to virtual addresses that map this page. |
81 | * |
82 | * In order to avoid large latencies of the rmap walks on KSM pages, |
83 | * KSM maintains two types of nodes in the stable tree: |
84 | * |
85 | * * the regular nodes that keep the reverse mapping structures in a |
86 | * linked list |
87 | * * the "chains" that link nodes ("dups") that represent the same |
88 | * write protected memory content, but each "dup" corresponds to a |
89 | * different KSM page copy of that content |
90 | * |
91 | * Internally, the regular nodes, "dups" and "chains" are represented |
92 | * using the same struct ksm_stable_node structure. |
93 | * |
94 | * In addition to the stable tree, KSM uses a second data structure called the |
95 | * unstable tree: this tree holds pointers to pages which have been found to |
96 | * be "unchanged for a period of time". The unstable tree sorts these pages |
97 | * by their contents, but since they are not write-protected, KSM cannot rely |
98 | * upon the unstable tree to work correctly - the unstable tree is liable to |
99 | * be corrupted as its contents are modified, and so it is called unstable. |
100 | * |
101 | * KSM solves this problem by several techniques: |
102 | * |
103 | * 1) The unstable tree is flushed every time KSM completes scanning all |
104 | * memory areas, and then the tree is rebuilt again from the beginning. |
105 | * 2) KSM will only insert into the unstable tree, pages whose hash value |
106 | * has not changed since the previous scan of all memory areas. |
107 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the |
108 | * colors of the nodes and not on their contents, assuring that even when |
109 | * the tree gets "corrupted" it won't get out of balance, so scanning time |
110 | * remains the same (also, searching and inserting nodes in an rbtree uses |
111 | * the same algorithm, so we have no overhead when we flush and rebuild). |
112 | * 4) KSM never flushes the stable tree, which means that even if it were to |
113 | * take 10 attempts to find a page in the unstable tree, once it is found, |
114 | * it is secured in the stable tree. (When we scan a new page, we first |
115 | * compare it against the stable tree, and then against the unstable tree.) |
116 | * |
117 | * If the merge_across_nodes tunable is unset, then KSM maintains multiple |
118 | * stable trees and multiple unstable trees: one of each for each NUMA node. |
119 | */ |
120 | |
121 | /** |
122 | * struct ksm_mm_slot - ksm information per mm that is being scanned |
123 | * @slot: hash lookup from mm to mm_slot |
124 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
125 | */ |
126 | struct ksm_mm_slot { |
127 | struct mm_slot slot; |
128 | struct ksm_rmap_item *rmap_list; |
129 | }; |
130 | |
131 | /** |
132 | * struct ksm_scan - cursor for scanning |
133 | * @mm_slot: the current mm_slot we are scanning |
134 | * @address: the next address inside that to be scanned |
135 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
136 | * @seqnr: count of completed full scans (needed when removing unstable node) |
137 | * |
138 | * There is only the one ksm_scan instance of this cursor structure. |
139 | */ |
140 | struct ksm_scan { |
141 | struct ksm_mm_slot *mm_slot; |
142 | unsigned long address; |
143 | struct ksm_rmap_item **rmap_list; |
144 | unsigned long seqnr; |
145 | }; |
146 | |
147 | /** |
148 | * struct ksm_stable_node - node of the stable rbtree |
149 | * @node: rb node of this ksm page in the stable tree |
150 | * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list |
151 | * @hlist_dup: linked into the stable_node->hlist with a stable_node chain |
152 | * @list: linked into migrate_nodes, pending placement in the proper node tree |
153 | * @hlist: hlist head of rmap_items using this ksm page |
154 | * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) |
155 | * @chain_prune_time: time of the last full garbage collection |
156 | * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN |
157 | * @nid: NUMA node id of stable tree in which linked (may not match kpfn) |
158 | */ |
159 | struct ksm_stable_node { |
160 | union { |
161 | struct rb_node node; /* when node of stable tree */ |
162 | struct { /* when listed for migration */ |
163 | struct list_head *head; |
164 | struct { |
165 | struct hlist_node hlist_dup; |
166 | struct list_head list; |
167 | }; |
168 | }; |
169 | }; |
170 | struct hlist_head hlist; |
171 | union { |
172 | unsigned long kpfn; |
173 | unsigned long chain_prune_time; |
174 | }; |
175 | /* |
176 | * STABLE_NODE_CHAIN can be any negative number in |
177 | * rmap_hlist_len negative range, but better not -1 to be able |
178 | * to reliably detect underflows. |
179 | */ |
180 | #define STABLE_NODE_CHAIN -1024 |
181 | int rmap_hlist_len; |
182 | #ifdef CONFIG_NUMA |
183 | int nid; |
184 | #endif |
185 | }; |
186 | |
187 | /** |
188 | * struct ksm_rmap_item - reverse mapping item for virtual addresses |
189 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
190 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
191 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
192 | * @mm: the memory structure this rmap_item is pointing into |
193 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) |
194 | * @oldchecksum: previous checksum of the page at that virtual address |
195 | * @node: rb node of this rmap_item in the unstable tree |
196 | * @head: pointer to stable_node heading this list in the stable tree |
197 | * @hlist: link into hlist of rmap_items hanging off that stable_node |
198 | * @age: number of scan iterations since creation |
199 | * @remaining_skips: how many scans to skip |
200 | */ |
201 | struct ksm_rmap_item { |
202 | struct ksm_rmap_item *rmap_list; |
203 | union { |
204 | struct anon_vma *anon_vma; /* when stable */ |
205 | #ifdef CONFIG_NUMA |
206 | int nid; /* when node of unstable tree */ |
207 | #endif |
208 | }; |
209 | struct mm_struct *mm; |
210 | unsigned long address; /* + low bits used for flags below */ |
211 | unsigned int oldchecksum; /* when unstable */ |
212 | rmap_age_t age; |
213 | rmap_age_t remaining_skips; |
214 | union { |
215 | struct rb_node node; /* when node of unstable tree */ |
216 | struct { /* when listed from stable tree */ |
217 | struct ksm_stable_node *head; |
218 | struct hlist_node hlist; |
219 | }; |
220 | }; |
221 | }; |
222 | |
223 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ |
224 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
225 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ |
226 | |
227 | /* The stable and unstable tree heads */ |
228 | static struct rb_root one_stable_tree[1] = { RB_ROOT }; |
229 | static struct rb_root one_unstable_tree[1] = { RB_ROOT }; |
230 | static struct rb_root *root_stable_tree = one_stable_tree; |
231 | static struct rb_root *root_unstable_tree = one_unstable_tree; |
232 | |
233 | /* Recently migrated nodes of stable tree, pending proper placement */ |
234 | static LIST_HEAD(migrate_nodes); |
235 | #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) |
236 | |
237 | #define MM_SLOTS_HASH_BITS 10 |
238 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); |
239 | |
240 | static struct ksm_mm_slot ksm_mm_head = { |
241 | .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), |
242 | }; |
243 | static struct ksm_scan ksm_scan = { |
244 | .mm_slot = &ksm_mm_head, |
245 | }; |
246 | |
247 | static struct kmem_cache *rmap_item_cache; |
248 | static struct kmem_cache *stable_node_cache; |
249 | static struct kmem_cache *mm_slot_cache; |
250 | |
251 | /* The number of pages scanned */ |
252 | static unsigned long ksm_pages_scanned; |
253 | |
254 | /* The number of nodes in the stable tree */ |
255 | static unsigned long ksm_pages_shared; |
256 | |
257 | /* The number of page slots additionally sharing those nodes */ |
258 | static unsigned long ksm_pages_sharing; |
259 | |
260 | /* The number of nodes in the unstable tree */ |
261 | static unsigned long ksm_pages_unshared; |
262 | |
263 | /* The number of rmap_items in use: to calculate pages_volatile */ |
264 | static unsigned long ksm_rmap_items; |
265 | |
266 | /* The number of stable_node chains */ |
267 | static unsigned long ksm_stable_node_chains; |
268 | |
269 | /* The number of stable_node dups linked to the stable_node chains */ |
270 | static unsigned long ksm_stable_node_dups; |
271 | |
272 | /* Delay in pruning stale stable_node_dups in the stable_node_chains */ |
273 | static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; |
274 | |
275 | /* Maximum number of page slots sharing a stable node */ |
276 | static int ksm_max_page_sharing = 256; |
277 | |
278 | /* Number of pages ksmd should scan in one batch */ |
279 | static unsigned int ksm_thread_pages_to_scan = 100; |
280 | |
281 | /* Milliseconds ksmd should sleep between batches */ |
282 | static unsigned int ksm_thread_sleep_millisecs = 20; |
283 | |
284 | /* Checksum of an empty (zeroed) page */ |
285 | static unsigned int zero_checksum __read_mostly; |
286 | |
287 | /* Whether to merge empty (zeroed) pages with actual zero pages */ |
288 | static bool ksm_use_zero_pages __read_mostly; |
289 | |
290 | /* Skip pages that couldn't be de-duplicated previously */ |
291 | /* Default to true at least temporarily, for testing */ |
292 | static bool ksm_smart_scan = true; |
293 | |
294 | /* The number of zero pages which is placed by KSM */ |
295 | unsigned long ksm_zero_pages; |
296 | |
297 | /* The number of pages that have been skipped due to "smart scanning" */ |
298 | static unsigned long ksm_pages_skipped; |
299 | |
300 | #ifdef CONFIG_NUMA |
301 | /* Zeroed when merging across nodes is not allowed */ |
302 | static unsigned int ksm_merge_across_nodes = 1; |
303 | static int ksm_nr_node_ids = 1; |
304 | #else |
305 | #define ksm_merge_across_nodes 1U |
306 | #define ksm_nr_node_ids 1 |
307 | #endif |
308 | |
309 | #define KSM_RUN_STOP 0 |
310 | #define KSM_RUN_MERGE 1 |
311 | #define KSM_RUN_UNMERGE 2 |
312 | #define KSM_RUN_OFFLINE 4 |
313 | static unsigned long ksm_run = KSM_RUN_STOP; |
314 | static void wait_while_offlining(void); |
315 | |
316 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); |
317 | static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); |
318 | static DEFINE_MUTEX(ksm_thread_mutex); |
319 | static DEFINE_SPINLOCK(ksm_mmlist_lock); |
320 | |
321 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ |
322 | sizeof(struct __struct), __alignof__(struct __struct),\ |
323 | (__flags), NULL) |
324 | |
325 | static int __init ksm_slab_init(void) |
326 | { |
327 | rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); |
328 | if (!rmap_item_cache) |
329 | goto out; |
330 | |
331 | stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); |
332 | if (!stable_node_cache) |
333 | goto out_free1; |
334 | |
335 | mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); |
336 | if (!mm_slot_cache) |
337 | goto out_free2; |
338 | |
339 | return 0; |
340 | |
341 | out_free2: |
342 | kmem_cache_destroy(s: stable_node_cache); |
343 | out_free1: |
344 | kmem_cache_destroy(s: rmap_item_cache); |
345 | out: |
346 | return -ENOMEM; |
347 | } |
348 | |
349 | static void __init ksm_slab_free(void) |
350 | { |
351 | kmem_cache_destroy(s: mm_slot_cache); |
352 | kmem_cache_destroy(s: stable_node_cache); |
353 | kmem_cache_destroy(s: rmap_item_cache); |
354 | mm_slot_cache = NULL; |
355 | } |
356 | |
357 | static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) |
358 | { |
359 | return chain->rmap_hlist_len == STABLE_NODE_CHAIN; |
360 | } |
361 | |
362 | static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) |
363 | { |
364 | return dup->head == STABLE_NODE_DUP_HEAD; |
365 | } |
366 | |
367 | static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, |
368 | struct ksm_stable_node *chain) |
369 | { |
370 | VM_BUG_ON(is_stable_node_dup(dup)); |
371 | dup->head = STABLE_NODE_DUP_HEAD; |
372 | VM_BUG_ON(!is_stable_node_chain(chain)); |
373 | hlist_add_head(n: &dup->hlist_dup, h: &chain->hlist); |
374 | ksm_stable_node_dups++; |
375 | } |
376 | |
377 | static inline void __stable_node_dup_del(struct ksm_stable_node *dup) |
378 | { |
379 | VM_BUG_ON(!is_stable_node_dup(dup)); |
380 | hlist_del(n: &dup->hlist_dup); |
381 | ksm_stable_node_dups--; |
382 | } |
383 | |
384 | static inline void stable_node_dup_del(struct ksm_stable_node *dup) |
385 | { |
386 | VM_BUG_ON(is_stable_node_chain(dup)); |
387 | if (is_stable_node_dup(dup)) |
388 | __stable_node_dup_del(dup); |
389 | else |
390 | rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); |
391 | #ifdef CONFIG_DEBUG_VM |
392 | dup->head = NULL; |
393 | #endif |
394 | } |
395 | |
396 | static inline struct ksm_rmap_item *alloc_rmap_item(void) |
397 | { |
398 | struct ksm_rmap_item *rmap_item; |
399 | |
400 | rmap_item = kmem_cache_zalloc(k: rmap_item_cache, GFP_KERNEL | |
401 | __GFP_NORETRY | __GFP_NOWARN); |
402 | if (rmap_item) |
403 | ksm_rmap_items++; |
404 | return rmap_item; |
405 | } |
406 | |
407 | static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) |
408 | { |
409 | ksm_rmap_items--; |
410 | rmap_item->mm->ksm_rmap_items--; |
411 | rmap_item->mm = NULL; /* debug safety */ |
412 | kmem_cache_free(s: rmap_item_cache, objp: rmap_item); |
413 | } |
414 | |
415 | static inline struct ksm_stable_node *alloc_stable_node(void) |
416 | { |
417 | /* |
418 | * The allocation can take too long with GFP_KERNEL when memory is under |
419 | * pressure, which may lead to hung task warnings. Adding __GFP_HIGH |
420 | * grants access to memory reserves, helping to avoid this problem. |
421 | */ |
422 | return kmem_cache_alloc(cachep: stable_node_cache, GFP_KERNEL | __GFP_HIGH); |
423 | } |
424 | |
425 | static inline void free_stable_node(struct ksm_stable_node *stable_node) |
426 | { |
427 | VM_BUG_ON(stable_node->rmap_hlist_len && |
428 | !is_stable_node_chain(stable_node)); |
429 | kmem_cache_free(s: stable_node_cache, objp: stable_node); |
430 | } |
431 | |
432 | /* |
433 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's |
434 | * page tables after it has passed through ksm_exit() - which, if necessary, |
435 | * takes mmap_lock briefly to serialize against them. ksm_exit() does not set |
436 | * a special flag: they can just back out as soon as mm_users goes to zero. |
437 | * ksm_test_exit() is used throughout to make this test for exit: in some |
438 | * places for correctness, in some places just to avoid unnecessary work. |
439 | */ |
440 | static inline bool ksm_test_exit(struct mm_struct *mm) |
441 | { |
442 | return atomic_read(v: &mm->mm_users) == 0; |
443 | } |
444 | |
445 | static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, |
446 | struct mm_walk *walk) |
447 | { |
448 | struct page *page = NULL; |
449 | spinlock_t *ptl; |
450 | pte_t *pte; |
451 | pte_t ptent; |
452 | int ret; |
453 | |
454 | pte = pte_offset_map_lock(mm: walk->mm, pmd, addr, ptlp: &ptl); |
455 | if (!pte) |
456 | return 0; |
457 | ptent = ptep_get(ptep: pte); |
458 | if (pte_present(a: ptent)) { |
459 | page = vm_normal_page(vma: walk->vma, addr, pte: ptent); |
460 | } else if (!pte_none(pte: ptent)) { |
461 | swp_entry_t entry = pte_to_swp_entry(pte: ptent); |
462 | |
463 | /* |
464 | * As KSM pages remain KSM pages until freed, no need to wait |
465 | * here for migration to end. |
466 | */ |
467 | if (is_migration_entry(entry)) |
468 | page = pfn_swap_entry_to_page(entry); |
469 | } |
470 | /* return 1 if the page is an normal ksm page or KSM-placed zero page */ |
471 | ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte); |
472 | pte_unmap_unlock(pte, ptl); |
473 | return ret; |
474 | } |
475 | |
476 | static const struct mm_walk_ops break_ksm_ops = { |
477 | .pmd_entry = break_ksm_pmd_entry, |
478 | .walk_lock = PGWALK_RDLOCK, |
479 | }; |
480 | |
481 | static const struct mm_walk_ops break_ksm_lock_vma_ops = { |
482 | .pmd_entry = break_ksm_pmd_entry, |
483 | .walk_lock = PGWALK_WRLOCK, |
484 | }; |
485 | |
486 | /* |
487 | * We use break_ksm to break COW on a ksm page by triggering unsharing, |
488 | * such that the ksm page will get replaced by an exclusive anonymous page. |
489 | * |
490 | * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, |
491 | * in case the application has unmapped and remapped mm,addr meanwhile. |
492 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP |
493 | * mmap of /dev/mem, where we would not want to touch it. |
494 | * |
495 | * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context |
496 | * of the process that owns 'vma'. We also do not want to enforce |
497 | * protection keys here anyway. |
498 | */ |
499 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) |
500 | { |
501 | vm_fault_t ret = 0; |
502 | const struct mm_walk_ops *ops = lock_vma ? |
503 | &break_ksm_lock_vma_ops : &break_ksm_ops; |
504 | |
505 | do { |
506 | int ksm_page; |
507 | |
508 | cond_resched(); |
509 | ksm_page = walk_page_range_vma(vma, start: addr, end: addr + 1, ops, NULL); |
510 | if (WARN_ON_ONCE(ksm_page < 0)) |
511 | return ksm_page; |
512 | if (!ksm_page) |
513 | return 0; |
514 | ret = handle_mm_fault(vma, address: addr, |
515 | flags: FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, |
516 | NULL); |
517 | } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); |
518 | /* |
519 | * We must loop until we no longer find a KSM page because |
520 | * handle_mm_fault() may back out if there's any difficulty e.g. if |
521 | * pte accessed bit gets updated concurrently. |
522 | * |
523 | * VM_FAULT_SIGBUS could occur if we race with truncation of the |
524 | * backing file, which also invalidates anonymous pages: that's |
525 | * okay, that truncation will have unmapped the PageKsm for us. |
526 | * |
527 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting |
528 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the |
529 | * current task has TIF_MEMDIE set, and will be OOM killed on return |
530 | * to user; and ksmd, having no mm, would never be chosen for that. |
531 | * |
532 | * But if the mm is in a limited mem_cgroup, then the fault may fail |
533 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and |
534 | * even ksmd can fail in this way - though it's usually breaking ksm |
535 | * just to undo a merge it made a moment before, so unlikely to oom. |
536 | * |
537 | * That's a pity: we might therefore have more kernel pages allocated |
538 | * than we're counting as nodes in the stable tree; but ksm_do_scan |
539 | * will retry to break_cow on each pass, so should recover the page |
540 | * in due course. The important thing is to not let VM_MERGEABLE |
541 | * be cleared while any such pages might remain in the area. |
542 | */ |
543 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; |
544 | } |
545 | |
546 | static bool vma_ksm_compatible(struct vm_area_struct *vma) |
547 | { |
548 | if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | |
549 | VM_IO | VM_DONTEXPAND | VM_HUGETLB | |
550 | VM_MIXEDMAP)) |
551 | return false; /* just ignore the advice */ |
552 | |
553 | if (vma_is_dax(vma)) |
554 | return false; |
555 | |
556 | #ifdef VM_SAO |
557 | if (vma->vm_flags & VM_SAO) |
558 | return false; |
559 | #endif |
560 | #ifdef VM_SPARC_ADI |
561 | if (vma->vm_flags & VM_SPARC_ADI) |
562 | return false; |
563 | #endif |
564 | |
565 | return true; |
566 | } |
567 | |
568 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
569 | unsigned long addr) |
570 | { |
571 | struct vm_area_struct *vma; |
572 | if (ksm_test_exit(mm)) |
573 | return NULL; |
574 | vma = vma_lookup(mm, addr); |
575 | if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
576 | return NULL; |
577 | return vma; |
578 | } |
579 | |
580 | static void break_cow(struct ksm_rmap_item *rmap_item) |
581 | { |
582 | struct mm_struct *mm = rmap_item->mm; |
583 | unsigned long addr = rmap_item->address; |
584 | struct vm_area_struct *vma; |
585 | |
586 | /* |
587 | * It is not an accident that whenever we want to break COW |
588 | * to undo, we also need to drop a reference to the anon_vma. |
589 | */ |
590 | put_anon_vma(anon_vma: rmap_item->anon_vma); |
591 | |
592 | mmap_read_lock(mm); |
593 | vma = find_mergeable_vma(mm, addr); |
594 | if (vma) |
595 | break_ksm(vma, addr, lock_vma: false); |
596 | mmap_read_unlock(mm); |
597 | } |
598 | |
599 | static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) |
600 | { |
601 | struct mm_struct *mm = rmap_item->mm; |
602 | unsigned long addr = rmap_item->address; |
603 | struct vm_area_struct *vma; |
604 | struct page *page; |
605 | |
606 | mmap_read_lock(mm); |
607 | vma = find_mergeable_vma(mm, addr); |
608 | if (!vma) |
609 | goto out; |
610 | |
611 | page = follow_page(vma, address: addr, foll_flags: FOLL_GET); |
612 | if (IS_ERR_OR_NULL(ptr: page)) |
613 | goto out; |
614 | if (is_zone_device_page(page)) |
615 | goto out_putpage; |
616 | if (PageAnon(page)) { |
617 | flush_anon_page(vma, page, vmaddr: addr); |
618 | flush_dcache_page(page); |
619 | } else { |
620 | out_putpage: |
621 | put_page(page); |
622 | out: |
623 | page = NULL; |
624 | } |
625 | mmap_read_unlock(mm); |
626 | return page; |
627 | } |
628 | |
629 | /* |
630 | * This helper is used for getting right index into array of tree roots. |
631 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for |
632 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, |
633 | * every node has its own stable and unstable tree. |
634 | */ |
635 | static inline int get_kpfn_nid(unsigned long kpfn) |
636 | { |
637 | return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); |
638 | } |
639 | |
640 | static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, |
641 | struct rb_root *root) |
642 | { |
643 | struct ksm_stable_node *chain = alloc_stable_node(); |
644 | VM_BUG_ON(is_stable_node_chain(dup)); |
645 | if (likely(chain)) { |
646 | INIT_HLIST_HEAD(&chain->hlist); |
647 | chain->chain_prune_time = jiffies; |
648 | chain->rmap_hlist_len = STABLE_NODE_CHAIN; |
649 | #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) |
650 | chain->nid = NUMA_NO_NODE; /* debug */ |
651 | #endif |
652 | ksm_stable_node_chains++; |
653 | |
654 | /* |
655 | * Put the stable node chain in the first dimension of |
656 | * the stable tree and at the same time remove the old |
657 | * stable node. |
658 | */ |
659 | rb_replace_node(victim: &dup->node, new: &chain->node, root); |
660 | |
661 | /* |
662 | * Move the old stable node to the second dimension |
663 | * queued in the hlist_dup. The invariant is that all |
664 | * dup stable_nodes in the chain->hlist point to pages |
665 | * that are write protected and have the exact same |
666 | * content. |
667 | */ |
668 | stable_node_chain_add_dup(dup, chain); |
669 | } |
670 | return chain; |
671 | } |
672 | |
673 | static inline void free_stable_node_chain(struct ksm_stable_node *chain, |
674 | struct rb_root *root) |
675 | { |
676 | rb_erase(&chain->node, root); |
677 | free_stable_node(stable_node: chain); |
678 | ksm_stable_node_chains--; |
679 | } |
680 | |
681 | static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) |
682 | { |
683 | struct ksm_rmap_item *rmap_item; |
684 | |
685 | /* check it's not STABLE_NODE_CHAIN or negative */ |
686 | BUG_ON(stable_node->rmap_hlist_len < 0); |
687 | |
688 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
689 | if (rmap_item->hlist.next) { |
690 | ksm_pages_sharing--; |
691 | trace_ksm_remove_rmap_item(pfn: stable_node->kpfn, rmap_item, mm: rmap_item->mm); |
692 | } else { |
693 | ksm_pages_shared--; |
694 | } |
695 | |
696 | rmap_item->mm->ksm_merging_pages--; |
697 | |
698 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
699 | stable_node->rmap_hlist_len--; |
700 | put_anon_vma(anon_vma: rmap_item->anon_vma); |
701 | rmap_item->address &= PAGE_MASK; |
702 | cond_resched(); |
703 | } |
704 | |
705 | /* |
706 | * We need the second aligned pointer of the migrate_nodes |
707 | * list_head to stay clear from the rb_parent_color union |
708 | * (aligned and different than any node) and also different |
709 | * from &migrate_nodes. This will verify that future list.h changes |
710 | * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. |
711 | */ |
712 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); |
713 | BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); |
714 | |
715 | trace_ksm_remove_ksm_page(pfn: stable_node->kpfn); |
716 | if (stable_node->head == &migrate_nodes) |
717 | list_del(entry: &stable_node->list); |
718 | else |
719 | stable_node_dup_del(dup: stable_node); |
720 | free_stable_node(stable_node); |
721 | } |
722 | |
723 | enum get_ksm_page_flags { |
724 | GET_KSM_PAGE_NOLOCK, |
725 | GET_KSM_PAGE_LOCK, |
726 | GET_KSM_PAGE_TRYLOCK |
727 | }; |
728 | |
729 | /* |
730 | * get_ksm_page: checks if the page indicated by the stable node |
731 | * is still its ksm page, despite having held no reference to it. |
732 | * In which case we can trust the content of the page, and it |
733 | * returns the gotten page; but if the page has now been zapped, |
734 | * remove the stale node from the stable tree and return NULL. |
735 | * But beware, the stable node's page might be being migrated. |
736 | * |
737 | * You would expect the stable_node to hold a reference to the ksm page. |
738 | * But if it increments the page's count, swapping out has to wait for |
739 | * ksmd to come around again before it can free the page, which may take |
740 | * seconds or even minutes: much too unresponsive. So instead we use a |
741 | * "keyhole reference": access to the ksm page from the stable node peeps |
742 | * out through its keyhole to see if that page still holds the right key, |
743 | * pointing back to this stable node. This relies on freeing a PageAnon |
744 | * page to reset its page->mapping to NULL, and relies on no other use of |
745 | * a page to put something that might look like our key in page->mapping. |
746 | * is on its way to being freed; but it is an anomaly to bear in mind. |
747 | */ |
748 | static struct page *get_ksm_page(struct ksm_stable_node *stable_node, |
749 | enum get_ksm_page_flags flags) |
750 | { |
751 | struct page *page; |
752 | void *expected_mapping; |
753 | unsigned long kpfn; |
754 | |
755 | expected_mapping = (void *)((unsigned long)stable_node | |
756 | PAGE_MAPPING_KSM); |
757 | again: |
758 | kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ |
759 | page = pfn_to_page(kpfn); |
760 | if (READ_ONCE(page->mapping) != expected_mapping) |
761 | goto stale; |
762 | |
763 | /* |
764 | * We cannot do anything with the page while its refcount is 0. |
765 | * Usually 0 means free, or tail of a higher-order page: in which |
766 | * case this node is no longer referenced, and should be freed; |
767 | * however, it might mean that the page is under page_ref_freeze(). |
768 | * The __remove_mapping() case is easy, again the node is now stale; |
769 | * the same is in reuse_ksm_page() case; but if page is swapcache |
770 | * in folio_migrate_mapping(), it might still be our page, |
771 | * in which case it's essential to keep the node. |
772 | */ |
773 | while (!get_page_unless_zero(page)) { |
774 | /* |
775 | * Another check for page->mapping != expected_mapping would |
776 | * work here too. We have chosen the !PageSwapCache test to |
777 | * optimize the common case, when the page is or is about to |
778 | * be freed: PageSwapCache is cleared (under spin_lock_irq) |
779 | * in the ref_freeze section of __remove_mapping(); but Anon |
780 | * page->mapping reset to NULL later, in free_pages_prepare(). |
781 | */ |
782 | if (!PageSwapCache(page)) |
783 | goto stale; |
784 | cpu_relax(); |
785 | } |
786 | |
787 | if (READ_ONCE(page->mapping) != expected_mapping) { |
788 | put_page(page); |
789 | goto stale; |
790 | } |
791 | |
792 | if (flags == GET_KSM_PAGE_TRYLOCK) { |
793 | if (!trylock_page(page)) { |
794 | put_page(page); |
795 | return ERR_PTR(error: -EBUSY); |
796 | } |
797 | } else if (flags == GET_KSM_PAGE_LOCK) |
798 | lock_page(page); |
799 | |
800 | if (flags != GET_KSM_PAGE_NOLOCK) { |
801 | if (READ_ONCE(page->mapping) != expected_mapping) { |
802 | unlock_page(page); |
803 | put_page(page); |
804 | goto stale; |
805 | } |
806 | } |
807 | return page; |
808 | |
809 | stale: |
810 | /* |
811 | * We come here from above when page->mapping or !PageSwapCache |
812 | * suggests that the node is stale; but it might be under migration. |
813 | * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), |
814 | * before checking whether node->kpfn has been changed. |
815 | */ |
816 | smp_rmb(); |
817 | if (READ_ONCE(stable_node->kpfn) != kpfn) |
818 | goto again; |
819 | remove_node_from_stable_tree(stable_node); |
820 | return NULL; |
821 | } |
822 | |
823 | /* |
824 | * Removing rmap_item from stable or unstable tree. |
825 | * This function will clean the information from the stable/unstable tree. |
826 | */ |
827 | static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) |
828 | { |
829 | if (rmap_item->address & STABLE_FLAG) { |
830 | struct ksm_stable_node *stable_node; |
831 | struct page *page; |
832 | |
833 | stable_node = rmap_item->head; |
834 | page = get_ksm_page(stable_node, flags: GET_KSM_PAGE_LOCK); |
835 | if (!page) |
836 | goto out; |
837 | |
838 | hlist_del(n: &rmap_item->hlist); |
839 | unlock_page(page); |
840 | put_page(page); |
841 | |
842 | if (!hlist_empty(h: &stable_node->hlist)) |
843 | ksm_pages_sharing--; |
844 | else |
845 | ksm_pages_shared--; |
846 | |
847 | rmap_item->mm->ksm_merging_pages--; |
848 | |
849 | VM_BUG_ON(stable_node->rmap_hlist_len <= 0); |
850 | stable_node->rmap_hlist_len--; |
851 | |
852 | put_anon_vma(anon_vma: rmap_item->anon_vma); |
853 | rmap_item->head = NULL; |
854 | rmap_item->address &= PAGE_MASK; |
855 | |
856 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
857 | unsigned char age; |
858 | /* |
859 | * Usually ksmd can and must skip the rb_erase, because |
860 | * root_unstable_tree was already reset to RB_ROOT. |
861 | * But be careful when an mm is exiting: do the rb_erase |
862 | * if this rmap_item was inserted by this scan, rather |
863 | * than left over from before. |
864 | */ |
865 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); |
866 | BUG_ON(age > 1); |
867 | if (!age) |
868 | rb_erase(&rmap_item->node, |
869 | root_unstable_tree + NUMA(rmap_item->nid)); |
870 | ksm_pages_unshared--; |
871 | rmap_item->address &= PAGE_MASK; |
872 | } |
873 | out: |
874 | cond_resched(); /* we're called from many long loops */ |
875 | } |
876 | |
877 | static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) |
878 | { |
879 | while (*rmap_list) { |
880 | struct ksm_rmap_item *rmap_item = *rmap_list; |
881 | *rmap_list = rmap_item->rmap_list; |
882 | remove_rmap_item_from_tree(rmap_item); |
883 | free_rmap_item(rmap_item); |
884 | } |
885 | } |
886 | |
887 | /* |
888 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
889 | * than check every pte of a given vma, the locking doesn't quite work for |
890 | * that - an rmap_item is assigned to the stable tree after inserting ksm |
891 | * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing |
892 | * rmap_items from parent to child at fork time (so as not to waste time |
893 | * if exit comes before the next scan reaches it). |
894 | * |
895 | * Similarly, although we'd like to remove rmap_items (so updating counts |
896 | * and freeing memory) when unmerging an area, it's easier to leave that |
897 | * to the next pass of ksmd - consider, for example, how ksmd might be |
898 | * in cmp_and_merge_page on one of the rmap_items we would be removing. |
899 | */ |
900 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
901 | unsigned long start, unsigned long end, bool lock_vma) |
902 | { |
903 | unsigned long addr; |
904 | int err = 0; |
905 | |
906 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
907 | if (ksm_test_exit(mm: vma->vm_mm)) |
908 | break; |
909 | if (signal_pending(current)) |
910 | err = -ERESTARTSYS; |
911 | else |
912 | err = break_ksm(vma, addr, lock_vma); |
913 | } |
914 | return err; |
915 | } |
916 | |
917 | static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) |
918 | { |
919 | return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; |
920 | } |
921 | |
922 | static inline struct ksm_stable_node *page_stable_node(struct page *page) |
923 | { |
924 | return folio_stable_node(page_folio(page)); |
925 | } |
926 | |
927 | static inline void set_page_stable_node(struct page *page, |
928 | struct ksm_stable_node *stable_node) |
929 | { |
930 | VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page); |
931 | page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); |
932 | } |
933 | |
934 | #ifdef CONFIG_SYSFS |
935 | /* |
936 | * Only called through the sysfs control interface: |
937 | */ |
938 | static int remove_stable_node(struct ksm_stable_node *stable_node) |
939 | { |
940 | struct page *page; |
941 | int err; |
942 | |
943 | page = get_ksm_page(stable_node, flags: GET_KSM_PAGE_LOCK); |
944 | if (!page) { |
945 | /* |
946 | * get_ksm_page did remove_node_from_stable_tree itself. |
947 | */ |
948 | return 0; |
949 | } |
950 | |
951 | /* |
952 | * Page could be still mapped if this races with __mmput() running in |
953 | * between ksm_exit() and exit_mmap(). Just refuse to let |
954 | * merge_across_nodes/max_page_sharing be switched. |
955 | */ |
956 | err = -EBUSY; |
957 | if (!page_mapped(page)) { |
958 | /* |
959 | * The stable node did not yet appear stale to get_ksm_page(), |
960 | * since that allows for an unmapped ksm page to be recognized |
961 | * right up until it is freed; but the node is safe to remove. |
962 | * This page might be in an LRU cache waiting to be freed, |
963 | * or it might be PageSwapCache (perhaps under writeback), |
964 | * or it might have been removed from swapcache a moment ago. |
965 | */ |
966 | set_page_stable_node(page, NULL); |
967 | remove_node_from_stable_tree(stable_node); |
968 | err = 0; |
969 | } |
970 | |
971 | unlock_page(page); |
972 | put_page(page); |
973 | return err; |
974 | } |
975 | |
976 | static int remove_stable_node_chain(struct ksm_stable_node *stable_node, |
977 | struct rb_root *root) |
978 | { |
979 | struct ksm_stable_node *dup; |
980 | struct hlist_node *hlist_safe; |
981 | |
982 | if (!is_stable_node_chain(chain: stable_node)) { |
983 | VM_BUG_ON(is_stable_node_dup(stable_node)); |
984 | if (remove_stable_node(stable_node)) |
985 | return true; |
986 | else |
987 | return false; |
988 | } |
989 | |
990 | hlist_for_each_entry_safe(dup, hlist_safe, |
991 | &stable_node->hlist, hlist_dup) { |
992 | VM_BUG_ON(!is_stable_node_dup(dup)); |
993 | if (remove_stable_node(stable_node: dup)) |
994 | return true; |
995 | } |
996 | BUG_ON(!hlist_empty(&stable_node->hlist)); |
997 | free_stable_node_chain(chain: stable_node, root); |
998 | return false; |
999 | } |
1000 | |
1001 | static int remove_all_stable_nodes(void) |
1002 | { |
1003 | struct ksm_stable_node *stable_node, *next; |
1004 | int nid; |
1005 | int err = 0; |
1006 | |
1007 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
1008 | while (root_stable_tree[nid].rb_node) { |
1009 | stable_node = rb_entry(root_stable_tree[nid].rb_node, |
1010 | struct ksm_stable_node, node); |
1011 | if (remove_stable_node_chain(stable_node, |
1012 | root: root_stable_tree + nid)) { |
1013 | err = -EBUSY; |
1014 | break; /* proceed to next nid */ |
1015 | } |
1016 | cond_resched(); |
1017 | } |
1018 | } |
1019 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
1020 | if (remove_stable_node(stable_node)) |
1021 | err = -EBUSY; |
1022 | cond_resched(); |
1023 | } |
1024 | return err; |
1025 | } |
1026 | |
1027 | static int unmerge_and_remove_all_rmap_items(void) |
1028 | { |
1029 | struct ksm_mm_slot *mm_slot; |
1030 | struct mm_slot *slot; |
1031 | struct mm_struct *mm; |
1032 | struct vm_area_struct *vma; |
1033 | int err = 0; |
1034 | |
1035 | spin_lock(lock: &ksm_mmlist_lock); |
1036 | slot = list_entry(ksm_mm_head.slot.mm_node.next, |
1037 | struct mm_slot, mm_node); |
1038 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
1039 | spin_unlock(lock: &ksm_mmlist_lock); |
1040 | |
1041 | for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; |
1042 | mm_slot = ksm_scan.mm_slot) { |
1043 | VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); |
1044 | |
1045 | mm = mm_slot->slot.mm; |
1046 | mmap_read_lock(mm); |
1047 | |
1048 | /* |
1049 | * Exit right away if mm is exiting to avoid lockdep issue in |
1050 | * the maple tree |
1051 | */ |
1052 | if (ksm_test_exit(mm)) |
1053 | goto mm_exiting; |
1054 | |
1055 | for_each_vma(vmi, vma) { |
1056 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
1057 | continue; |
1058 | err = unmerge_ksm_pages(vma, |
1059 | start: vma->vm_start, end: vma->vm_end, lock_vma: false); |
1060 | if (err) |
1061 | goto error; |
1062 | } |
1063 | |
1064 | mm_exiting: |
1065 | remove_trailing_rmap_items(rmap_list: &mm_slot->rmap_list); |
1066 | mmap_read_unlock(mm); |
1067 | |
1068 | spin_lock(lock: &ksm_mmlist_lock); |
1069 | slot = list_entry(mm_slot->slot.mm_node.next, |
1070 | struct mm_slot, mm_node); |
1071 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
1072 | if (ksm_test_exit(mm)) { |
1073 | hash_del(node: &mm_slot->slot.hash); |
1074 | list_del(entry: &mm_slot->slot.mm_node); |
1075 | spin_unlock(lock: &ksm_mmlist_lock); |
1076 | |
1077 | mm_slot_free(cache: mm_slot_cache, objp: mm_slot); |
1078 | clear_bit(MMF_VM_MERGEABLE, addr: &mm->flags); |
1079 | clear_bit(MMF_VM_MERGE_ANY, addr: &mm->flags); |
1080 | mmdrop(mm); |
1081 | } else |
1082 | spin_unlock(lock: &ksm_mmlist_lock); |
1083 | } |
1084 | |
1085 | /* Clean up stable nodes, but don't worry if some are still busy */ |
1086 | remove_all_stable_nodes(); |
1087 | ksm_scan.seqnr = 0; |
1088 | return 0; |
1089 | |
1090 | error: |
1091 | mmap_read_unlock(mm); |
1092 | spin_lock(lock: &ksm_mmlist_lock); |
1093 | ksm_scan.mm_slot = &ksm_mm_head; |
1094 | spin_unlock(lock: &ksm_mmlist_lock); |
1095 | return err; |
1096 | } |
1097 | #endif /* CONFIG_SYSFS */ |
1098 | |
1099 | static u32 calc_checksum(struct page *page) |
1100 | { |
1101 | u32 checksum; |
1102 | void *addr = kmap_atomic(page); |
1103 | checksum = xxhash(input: addr, PAGE_SIZE, seed: 0); |
1104 | kunmap_atomic(addr); |
1105 | return checksum; |
1106 | } |
1107 | |
1108 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, |
1109 | pte_t *orig_pte) |
1110 | { |
1111 | struct mm_struct *mm = vma->vm_mm; |
1112 | DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); |
1113 | int swapped; |
1114 | int err = -EFAULT; |
1115 | struct mmu_notifier_range range; |
1116 | bool anon_exclusive; |
1117 | pte_t entry; |
1118 | |
1119 | pvmw.address = page_address_in_vma(page, vma); |
1120 | if (pvmw.address == -EFAULT) |
1121 | goto out; |
1122 | |
1123 | BUG_ON(PageTransCompound(page)); |
1124 | |
1125 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_CLEAR, flags: 0, mm, start: pvmw.address, |
1126 | end: pvmw.address + PAGE_SIZE); |
1127 | mmu_notifier_invalidate_range_start(range: &range); |
1128 | |
1129 | if (!page_vma_mapped_walk(pvmw: &pvmw)) |
1130 | goto out_mn; |
1131 | if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?" )) |
1132 | goto out_unlock; |
1133 | |
1134 | anon_exclusive = PageAnonExclusive(page); |
1135 | entry = ptep_get(ptep: pvmw.pte); |
1136 | if (pte_write(pte: entry) || pte_dirty(pte: entry) || |
1137 | anon_exclusive || mm_tlb_flush_pending(mm)) { |
1138 | swapped = PageSwapCache(page); |
1139 | flush_cache_page(vma, vmaddr: pvmw.address, page_to_pfn(page)); |
1140 | /* |
1141 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
1142 | * take any lock, therefore the check that we are going to make |
1143 | * with the pagecount against the mapcount is racy and |
1144 | * O_DIRECT can happen right after the check. |
1145 | * So we clear the pte and flush the tlb before the check |
1146 | * this assure us that no O_DIRECT can happen after the check |
1147 | * or in the middle of the check. |
1148 | * |
1149 | * No need to notify as we are downgrading page table to read |
1150 | * only not changing it to point to a new page. |
1151 | * |
1152 | * See Documentation/mm/mmu_notifier.rst |
1153 | */ |
1154 | entry = ptep_clear_flush(vma, address: pvmw.address, ptep: pvmw.pte); |
1155 | /* |
1156 | * Check that no O_DIRECT or similar I/O is in progress on the |
1157 | * page |
1158 | */ |
1159 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
1160 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
1161 | goto out_unlock; |
1162 | } |
1163 | |
1164 | /* See page_try_share_anon_rmap(): clear PTE first. */ |
1165 | if (anon_exclusive && page_try_share_anon_rmap(page)) { |
1166 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
1167 | goto out_unlock; |
1168 | } |
1169 | |
1170 | if (pte_dirty(pte: entry)) |
1171 | set_page_dirty(page); |
1172 | entry = pte_mkclean(pte: entry); |
1173 | |
1174 | if (pte_write(pte: entry)) |
1175 | entry = pte_wrprotect(pte: entry); |
1176 | |
1177 | set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); |
1178 | } |
1179 | *orig_pte = entry; |
1180 | err = 0; |
1181 | |
1182 | out_unlock: |
1183 | page_vma_mapped_walk_done(pvmw: &pvmw); |
1184 | out_mn: |
1185 | mmu_notifier_invalidate_range_end(range: &range); |
1186 | out: |
1187 | return err; |
1188 | } |
1189 | |
1190 | /** |
1191 | * replace_page - replace page in vma by new ksm page |
1192 | * @vma: vma that holds the pte pointing to page |
1193 | * @page: the page we are replacing by kpage |
1194 | * @kpage: the ksm page we replace page by |
1195 | * @orig_pte: the original value of the pte |
1196 | * |
1197 | * Returns 0 on success, -EFAULT on failure. |
1198 | */ |
1199 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
1200 | struct page *kpage, pte_t orig_pte) |
1201 | { |
1202 | struct mm_struct *mm = vma->vm_mm; |
1203 | struct folio *folio; |
1204 | pmd_t *pmd; |
1205 | pmd_t pmde; |
1206 | pte_t *ptep; |
1207 | pte_t newpte; |
1208 | spinlock_t *ptl; |
1209 | unsigned long addr; |
1210 | int err = -EFAULT; |
1211 | struct mmu_notifier_range range; |
1212 | |
1213 | addr = page_address_in_vma(page, vma); |
1214 | if (addr == -EFAULT) |
1215 | goto out; |
1216 | |
1217 | pmd = mm_find_pmd(mm, address: addr); |
1218 | if (!pmd) |
1219 | goto out; |
1220 | /* |
1221 | * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() |
1222 | * without holding anon_vma lock for write. So when looking for a |
1223 | * genuine pmde (in which to find pte), test present and !THP together. |
1224 | */ |
1225 | pmde = pmdp_get_lockless(pmdp: pmd); |
1226 | if (!pmd_present(pmd: pmde) || pmd_trans_huge(pmd: pmde)) |
1227 | goto out; |
1228 | |
1229 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_CLEAR, flags: 0, mm, start: addr, |
1230 | end: addr + PAGE_SIZE); |
1231 | mmu_notifier_invalidate_range_start(range: &range); |
1232 | |
1233 | ptep = pte_offset_map_lock(mm, pmd, addr, ptlp: &ptl); |
1234 | if (!ptep) |
1235 | goto out_mn; |
1236 | if (!pte_same(a: ptep_get(ptep), b: orig_pte)) { |
1237 | pte_unmap_unlock(ptep, ptl); |
1238 | goto out_mn; |
1239 | } |
1240 | VM_BUG_ON_PAGE(PageAnonExclusive(page), page); |
1241 | VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); |
1242 | |
1243 | /* |
1244 | * No need to check ksm_use_zero_pages here: we can only have a |
1245 | * zero_page here if ksm_use_zero_pages was enabled already. |
1246 | */ |
1247 | if (!is_zero_pfn(page_to_pfn(kpage))) { |
1248 | get_page(page: kpage); |
1249 | page_add_anon_rmap(kpage, vma, address: addr, RMAP_NONE); |
1250 | newpte = mk_pte(kpage, vma->vm_page_prot); |
1251 | } else { |
1252 | /* |
1253 | * Use pte_mkdirty to mark the zero page mapped by KSM, and then |
1254 | * we can easily track all KSM-placed zero pages by checking if |
1255 | * the dirty bit in zero page's PTE is set. |
1256 | */ |
1257 | newpte = pte_mkdirty(pte: pte_mkspecial(pte: pfn_pte(page_to_pfn(kpage), pgprot: vma->vm_page_prot))); |
1258 | ksm_zero_pages++; |
1259 | mm->ksm_zero_pages++; |
1260 | /* |
1261 | * We're replacing an anonymous page with a zero page, which is |
1262 | * not anonymous. We need to do proper accounting otherwise we |
1263 | * will get wrong values in /proc, and a BUG message in dmesg |
1264 | * when tearing down the mm. |
1265 | */ |
1266 | dec_mm_counter(mm, member: MM_ANONPAGES); |
1267 | } |
1268 | |
1269 | flush_cache_page(vma, vmaddr: addr, pfn: pte_pfn(pte: ptep_get(ptep))); |
1270 | /* |
1271 | * No need to notify as we are replacing a read only page with another |
1272 | * read only page with the same content. |
1273 | * |
1274 | * See Documentation/mm/mmu_notifier.rst |
1275 | */ |
1276 | ptep_clear_flush(vma, address: addr, ptep); |
1277 | set_pte_at_notify(mm, addr, ptep, newpte); |
1278 | |
1279 | folio = page_folio(page); |
1280 | page_remove_rmap(page, vma, compound: false); |
1281 | if (!folio_mapped(folio)) |
1282 | folio_free_swap(folio); |
1283 | folio_put(folio); |
1284 | |
1285 | pte_unmap_unlock(ptep, ptl); |
1286 | err = 0; |
1287 | out_mn: |
1288 | mmu_notifier_invalidate_range_end(range: &range); |
1289 | out: |
1290 | return err; |
1291 | } |
1292 | |
1293 | /* |
1294 | * try_to_merge_one_page - take two pages and merge them into one |
1295 | * @vma: the vma that holds the pte pointing to page |
1296 | * @page: the PageAnon page that we want to replace with kpage |
1297 | * @kpage: the PageKsm page that we want to map instead of page, |
1298 | * or NULL the first time when we want to use page as kpage. |
1299 | * |
1300 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
1301 | */ |
1302 | static int try_to_merge_one_page(struct vm_area_struct *vma, |
1303 | struct page *page, struct page *kpage) |
1304 | { |
1305 | pte_t orig_pte = __pte(val: 0); |
1306 | int err = -EFAULT; |
1307 | |
1308 | if (page == kpage) /* ksm page forked */ |
1309 | return 0; |
1310 | |
1311 | if (!PageAnon(page)) |
1312 | goto out; |
1313 | |
1314 | /* |
1315 | * We need the page lock to read a stable PageSwapCache in |
1316 | * write_protect_page(). We use trylock_page() instead of |
1317 | * lock_page() because we don't want to wait here - we |
1318 | * prefer to continue scanning and merging different pages, |
1319 | * then come back to this page when it is unlocked. |
1320 | */ |
1321 | if (!trylock_page(page)) |
1322 | goto out; |
1323 | |
1324 | if (PageTransCompound(page)) { |
1325 | if (split_huge_page(page)) |
1326 | goto out_unlock; |
1327 | } |
1328 | |
1329 | /* |
1330 | * If this anonymous page is mapped only here, its pte may need |
1331 | * to be write-protected. If it's mapped elsewhere, all of its |
1332 | * ptes are necessarily already write-protected. But in either |
1333 | * case, we need to lock and check page_count is not raised. |
1334 | */ |
1335 | if (write_protect_page(vma, page, orig_pte: &orig_pte) == 0) { |
1336 | if (!kpage) { |
1337 | /* |
1338 | * While we hold page lock, upgrade page from |
1339 | * PageAnon+anon_vma to PageKsm+NULL stable_node: |
1340 | * stable_tree_insert() will update stable_node. |
1341 | */ |
1342 | set_page_stable_node(page, NULL); |
1343 | mark_page_accessed(page); |
1344 | /* |
1345 | * Page reclaim just frees a clean page with no dirty |
1346 | * ptes: make sure that the ksm page would be swapped. |
1347 | */ |
1348 | if (!PageDirty(page)) |
1349 | SetPageDirty(page); |
1350 | err = 0; |
1351 | } else if (pages_identical(page1: page, page2: kpage)) |
1352 | err = replace_page(vma, page, kpage, orig_pte); |
1353 | } |
1354 | |
1355 | out_unlock: |
1356 | unlock_page(page); |
1357 | out: |
1358 | return err; |
1359 | } |
1360 | |
1361 | /* |
1362 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, |
1363 | * but no new kernel page is allocated: kpage must already be a ksm page. |
1364 | * |
1365 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
1366 | */ |
1367 | static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, |
1368 | struct page *page, struct page *kpage) |
1369 | { |
1370 | struct mm_struct *mm = rmap_item->mm; |
1371 | struct vm_area_struct *vma; |
1372 | int err = -EFAULT; |
1373 | |
1374 | mmap_read_lock(mm); |
1375 | vma = find_mergeable_vma(mm, addr: rmap_item->address); |
1376 | if (!vma) |
1377 | goto out; |
1378 | |
1379 | err = try_to_merge_one_page(vma, page, kpage); |
1380 | if (err) |
1381 | goto out; |
1382 | |
1383 | /* Unstable nid is in union with stable anon_vma: remove first */ |
1384 | remove_rmap_item_from_tree(rmap_item); |
1385 | |
1386 | /* Must get reference to anon_vma while still holding mmap_lock */ |
1387 | rmap_item->anon_vma = vma->anon_vma; |
1388 | get_anon_vma(anon_vma: vma->anon_vma); |
1389 | out: |
1390 | mmap_read_unlock(mm); |
1391 | trace_ksm_merge_with_ksm_page(ksm_page: kpage, page_to_pfn(kpage ? kpage : page), |
1392 | rmap_item, mm, err); |
1393 | return err; |
1394 | } |
1395 | |
1396 | /* |
1397 | * try_to_merge_two_pages - take two identical pages and prepare them |
1398 | * to be merged into one page. |
1399 | * |
1400 | * This function returns the kpage if we successfully merged two identical |
1401 | * pages into one ksm page, NULL otherwise. |
1402 | * |
1403 | * Note that this function upgrades page to ksm page: if one of the pages |
1404 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
1405 | */ |
1406 | static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, |
1407 | struct page *page, |
1408 | struct ksm_rmap_item *tree_rmap_item, |
1409 | struct page *tree_page) |
1410 | { |
1411 | int err; |
1412 | |
1413 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
1414 | if (!err) { |
1415 | err = try_to_merge_with_ksm_page(rmap_item: tree_rmap_item, |
1416 | page: tree_page, kpage: page); |
1417 | /* |
1418 | * If that fails, we have a ksm page with only one pte |
1419 | * pointing to it: so break it. |
1420 | */ |
1421 | if (err) |
1422 | break_cow(rmap_item); |
1423 | } |
1424 | return err ? NULL : page; |
1425 | } |
1426 | |
1427 | static __always_inline |
1428 | bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) |
1429 | { |
1430 | VM_BUG_ON(stable_node->rmap_hlist_len < 0); |
1431 | /* |
1432 | * Check that at least one mapping still exists, otherwise |
1433 | * there's no much point to merge and share with this |
1434 | * stable_node, as the underlying tree_page of the other |
1435 | * sharer is going to be freed soon. |
1436 | */ |
1437 | return stable_node->rmap_hlist_len && |
1438 | stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; |
1439 | } |
1440 | |
1441 | static __always_inline |
1442 | bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) |
1443 | { |
1444 | return __is_page_sharing_candidate(stable_node, offset: 0); |
1445 | } |
1446 | |
1447 | static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, |
1448 | struct ksm_stable_node **_stable_node, |
1449 | struct rb_root *root, |
1450 | bool prune_stale_stable_nodes) |
1451 | { |
1452 | struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; |
1453 | struct hlist_node *hlist_safe; |
1454 | struct page *_tree_page, *tree_page = NULL; |
1455 | int nr = 0; |
1456 | int found_rmap_hlist_len; |
1457 | |
1458 | if (!prune_stale_stable_nodes || |
1459 | time_before(jiffies, stable_node->chain_prune_time + |
1460 | msecs_to_jiffies( |
1461 | ksm_stable_node_chains_prune_millisecs))) |
1462 | prune_stale_stable_nodes = false; |
1463 | else |
1464 | stable_node->chain_prune_time = jiffies; |
1465 | |
1466 | hlist_for_each_entry_safe(dup, hlist_safe, |
1467 | &stable_node->hlist, hlist_dup) { |
1468 | cond_resched(); |
1469 | /* |
1470 | * We must walk all stable_node_dup to prune the stale |
1471 | * stable nodes during lookup. |
1472 | * |
1473 | * get_ksm_page can drop the nodes from the |
1474 | * stable_node->hlist if they point to freed pages |
1475 | * (that's why we do a _safe walk). The "dup" |
1476 | * stable_node parameter itself will be freed from |
1477 | * under us if it returns NULL. |
1478 | */ |
1479 | _tree_page = get_ksm_page(stable_node: dup, flags: GET_KSM_PAGE_NOLOCK); |
1480 | if (!_tree_page) |
1481 | continue; |
1482 | nr += 1; |
1483 | if (is_page_sharing_candidate(stable_node: dup)) { |
1484 | if (!found || |
1485 | dup->rmap_hlist_len > found_rmap_hlist_len) { |
1486 | if (found) |
1487 | put_page(page: tree_page); |
1488 | found = dup; |
1489 | found_rmap_hlist_len = found->rmap_hlist_len; |
1490 | tree_page = _tree_page; |
1491 | |
1492 | /* skip put_page for found dup */ |
1493 | if (!prune_stale_stable_nodes) |
1494 | break; |
1495 | continue; |
1496 | } |
1497 | } |
1498 | put_page(page: _tree_page); |
1499 | } |
1500 | |
1501 | if (found) { |
1502 | /* |
1503 | * nr is counting all dups in the chain only if |
1504 | * prune_stale_stable_nodes is true, otherwise we may |
1505 | * break the loop at nr == 1 even if there are |
1506 | * multiple entries. |
1507 | */ |
1508 | if (prune_stale_stable_nodes && nr == 1) { |
1509 | /* |
1510 | * If there's not just one entry it would |
1511 | * corrupt memory, better BUG_ON. In KSM |
1512 | * context with no lock held it's not even |
1513 | * fatal. |
1514 | */ |
1515 | BUG_ON(stable_node->hlist.first->next); |
1516 | |
1517 | /* |
1518 | * There's just one entry and it is below the |
1519 | * deduplication limit so drop the chain. |
1520 | */ |
1521 | rb_replace_node(victim: &stable_node->node, new: &found->node, |
1522 | root); |
1523 | free_stable_node(stable_node); |
1524 | ksm_stable_node_chains--; |
1525 | ksm_stable_node_dups--; |
1526 | /* |
1527 | * NOTE: the caller depends on the stable_node |
1528 | * to be equal to stable_node_dup if the chain |
1529 | * was collapsed. |
1530 | */ |
1531 | *_stable_node = found; |
1532 | /* |
1533 | * Just for robustness, as stable_node is |
1534 | * otherwise left as a stable pointer, the |
1535 | * compiler shall optimize it away at build |
1536 | * time. |
1537 | */ |
1538 | stable_node = NULL; |
1539 | } else if (stable_node->hlist.first != &found->hlist_dup && |
1540 | __is_page_sharing_candidate(stable_node: found, offset: 1)) { |
1541 | /* |
1542 | * If the found stable_node dup can accept one |
1543 | * more future merge (in addition to the one |
1544 | * that is underway) and is not at the head of |
1545 | * the chain, put it there so next search will |
1546 | * be quicker in the !prune_stale_stable_nodes |
1547 | * case. |
1548 | * |
1549 | * NOTE: it would be inaccurate to use nr > 1 |
1550 | * instead of checking the hlist.first pointer |
1551 | * directly, because in the |
1552 | * prune_stale_stable_nodes case "nr" isn't |
1553 | * the position of the found dup in the chain, |
1554 | * but the total number of dups in the chain. |
1555 | */ |
1556 | hlist_del(n: &found->hlist_dup); |
1557 | hlist_add_head(n: &found->hlist_dup, |
1558 | h: &stable_node->hlist); |
1559 | } |
1560 | } |
1561 | |
1562 | *_stable_node_dup = found; |
1563 | return tree_page; |
1564 | } |
1565 | |
1566 | static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, |
1567 | struct rb_root *root) |
1568 | { |
1569 | if (!is_stable_node_chain(chain: stable_node)) |
1570 | return stable_node; |
1571 | if (hlist_empty(h: &stable_node->hlist)) { |
1572 | free_stable_node_chain(chain: stable_node, root); |
1573 | return NULL; |
1574 | } |
1575 | return hlist_entry(stable_node->hlist.first, |
1576 | typeof(*stable_node), hlist_dup); |
1577 | } |
1578 | |
1579 | /* |
1580 | * Like for get_ksm_page, this function can free the *_stable_node and |
1581 | * *_stable_node_dup if the returned tree_page is NULL. |
1582 | * |
1583 | * It can also free and overwrite *_stable_node with the found |
1584 | * stable_node_dup if the chain is collapsed (in which case |
1585 | * *_stable_node will be equal to *_stable_node_dup like if the chain |
1586 | * never existed). It's up to the caller to verify tree_page is not |
1587 | * NULL before dereferencing *_stable_node or *_stable_node_dup. |
1588 | * |
1589 | * *_stable_node_dup is really a second output parameter of this |
1590 | * function and will be overwritten in all cases, the caller doesn't |
1591 | * need to initialize it. |
1592 | */ |
1593 | static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, |
1594 | struct ksm_stable_node **_stable_node, |
1595 | struct rb_root *root, |
1596 | bool prune_stale_stable_nodes) |
1597 | { |
1598 | struct ksm_stable_node *stable_node = *_stable_node; |
1599 | if (!is_stable_node_chain(chain: stable_node)) { |
1600 | if (is_page_sharing_candidate(stable_node)) { |
1601 | *_stable_node_dup = stable_node; |
1602 | return get_ksm_page(stable_node, flags: GET_KSM_PAGE_NOLOCK); |
1603 | } |
1604 | /* |
1605 | * _stable_node_dup set to NULL means the stable_node |
1606 | * reached the ksm_max_page_sharing limit. |
1607 | */ |
1608 | *_stable_node_dup = NULL; |
1609 | return NULL; |
1610 | } |
1611 | return stable_node_dup(_stable_node_dup, _stable_node, root, |
1612 | prune_stale_stable_nodes); |
1613 | } |
1614 | |
1615 | static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, |
1616 | struct ksm_stable_node **s_n, |
1617 | struct rb_root *root) |
1618 | { |
1619 | return __stable_node_chain(stable_node_dup: s_n_d, stable_node: s_n, root, prune_stale_stable_nodes: true); |
1620 | } |
1621 | |
1622 | static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, |
1623 | struct ksm_stable_node *s_n, |
1624 | struct rb_root *root) |
1625 | { |
1626 | struct ksm_stable_node *old_stable_node = s_n; |
1627 | struct page *tree_page; |
1628 | |
1629 | tree_page = __stable_node_chain(stable_node_dup: s_n_d, stable_node: &s_n, root, prune_stale_stable_nodes: false); |
1630 | /* not pruning dups so s_n cannot have changed */ |
1631 | VM_BUG_ON(s_n != old_stable_node); |
1632 | return tree_page; |
1633 | } |
1634 | |
1635 | /* |
1636 | * stable_tree_search - search for page inside the stable tree |
1637 | * |
1638 | * This function checks if there is a page inside the stable tree |
1639 | * with identical content to the page that we are scanning right now. |
1640 | * |
1641 | * This function returns the stable tree node of identical content if found, |
1642 | * NULL otherwise. |
1643 | */ |
1644 | static struct page *stable_tree_search(struct page *page) |
1645 | { |
1646 | int nid; |
1647 | struct rb_root *root; |
1648 | struct rb_node **new; |
1649 | struct rb_node *parent; |
1650 | struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; |
1651 | struct ksm_stable_node *page_node; |
1652 | |
1653 | page_node = page_stable_node(page); |
1654 | if (page_node && page_node->head != &migrate_nodes) { |
1655 | /* ksm page forked */ |
1656 | get_page(page); |
1657 | return page; |
1658 | } |
1659 | |
1660 | nid = get_kpfn_nid(page_to_pfn(page)); |
1661 | root = root_stable_tree + nid; |
1662 | again: |
1663 | new = &root->rb_node; |
1664 | parent = NULL; |
1665 | |
1666 | while (*new) { |
1667 | struct page *tree_page; |
1668 | int ret; |
1669 | |
1670 | cond_resched(); |
1671 | stable_node = rb_entry(*new, struct ksm_stable_node, node); |
1672 | stable_node_any = NULL; |
1673 | tree_page = chain_prune(s_n_d: &stable_node_dup, s_n: &stable_node, root); |
1674 | /* |
1675 | * NOTE: stable_node may have been freed by |
1676 | * chain_prune() if the returned stable_node_dup is |
1677 | * not NULL. stable_node_dup may have been inserted in |
1678 | * the rbtree instead as a regular stable_node (in |
1679 | * order to collapse the stable_node chain if a single |
1680 | * stable_node dup was found in it). In such case the |
1681 | * stable_node is overwritten by the callee to point |
1682 | * to the stable_node_dup that was collapsed in the |
1683 | * stable rbtree and stable_node will be equal to |
1684 | * stable_node_dup like if the chain never existed. |
1685 | */ |
1686 | if (!stable_node_dup) { |
1687 | /* |
1688 | * Either all stable_node dups were full in |
1689 | * this stable_node chain, or this chain was |
1690 | * empty and should be rb_erased. |
1691 | */ |
1692 | stable_node_any = stable_node_dup_any(stable_node, |
1693 | root); |
1694 | if (!stable_node_any) { |
1695 | /* rb_erase just run */ |
1696 | goto again; |
1697 | } |
1698 | /* |
1699 | * Take any of the stable_node dups page of |
1700 | * this stable_node chain to let the tree walk |
1701 | * continue. All KSM pages belonging to the |
1702 | * stable_node dups in a stable_node chain |
1703 | * have the same content and they're |
1704 | * write protected at all times. Any will work |
1705 | * fine to continue the walk. |
1706 | */ |
1707 | tree_page = get_ksm_page(stable_node: stable_node_any, |
1708 | flags: GET_KSM_PAGE_NOLOCK); |
1709 | } |
1710 | VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); |
1711 | if (!tree_page) { |
1712 | /* |
1713 | * If we walked over a stale stable_node, |
1714 | * get_ksm_page() will call rb_erase() and it |
1715 | * may rebalance the tree from under us. So |
1716 | * restart the search from scratch. Returning |
1717 | * NULL would be safe too, but we'd generate |
1718 | * false negative insertions just because some |
1719 | * stable_node was stale. |
1720 | */ |
1721 | goto again; |
1722 | } |
1723 | |
1724 | ret = memcmp_pages(page1: page, page2: tree_page); |
1725 | put_page(page: tree_page); |
1726 | |
1727 | parent = *new; |
1728 | if (ret < 0) |
1729 | new = &parent->rb_left; |
1730 | else if (ret > 0) |
1731 | new = &parent->rb_right; |
1732 | else { |
1733 | if (page_node) { |
1734 | VM_BUG_ON(page_node->head != &migrate_nodes); |
1735 | /* |
1736 | * Test if the migrated page should be merged |
1737 | * into a stable node dup. If the mapcount is |
1738 | * 1 we can migrate it with another KSM page |
1739 | * without adding it to the chain. |
1740 | */ |
1741 | if (page_mapcount(page) > 1) |
1742 | goto chain_append; |
1743 | } |
1744 | |
1745 | if (!stable_node_dup) { |
1746 | /* |
1747 | * If the stable_node is a chain and |
1748 | * we got a payload match in memcmp |
1749 | * but we cannot merge the scanned |
1750 | * page in any of the existing |
1751 | * stable_node dups because they're |
1752 | * all full, we need to wait the |
1753 | * scanned page to find itself a match |
1754 | * in the unstable tree to create a |
1755 | * brand new KSM page to add later to |
1756 | * the dups of this stable_node. |
1757 | */ |
1758 | return NULL; |
1759 | } |
1760 | |
1761 | /* |
1762 | * Lock and unlock the stable_node's page (which |
1763 | * might already have been migrated) so that page |
1764 | * migration is sure to notice its raised count. |
1765 | * It would be more elegant to return stable_node |
1766 | * than kpage, but that involves more changes. |
1767 | */ |
1768 | tree_page = get_ksm_page(stable_node: stable_node_dup, |
1769 | flags: GET_KSM_PAGE_TRYLOCK); |
1770 | |
1771 | if (PTR_ERR(ptr: tree_page) == -EBUSY) |
1772 | return ERR_PTR(error: -EBUSY); |
1773 | |
1774 | if (unlikely(!tree_page)) |
1775 | /* |
1776 | * The tree may have been rebalanced, |
1777 | * so re-evaluate parent and new. |
1778 | */ |
1779 | goto again; |
1780 | unlock_page(page: tree_page); |
1781 | |
1782 | if (get_kpfn_nid(kpfn: stable_node_dup->kpfn) != |
1783 | NUMA(stable_node_dup->nid)) { |
1784 | put_page(page: tree_page); |
1785 | goto replace; |
1786 | } |
1787 | return tree_page; |
1788 | } |
1789 | } |
1790 | |
1791 | if (!page_node) |
1792 | return NULL; |
1793 | |
1794 | list_del(entry: &page_node->list); |
1795 | DO_NUMA(page_node->nid = nid); |
1796 | rb_link_node(node: &page_node->node, parent, rb_link: new); |
1797 | rb_insert_color(&page_node->node, root); |
1798 | out: |
1799 | if (is_page_sharing_candidate(stable_node: page_node)) { |
1800 | get_page(page); |
1801 | return page; |
1802 | } else |
1803 | return NULL; |
1804 | |
1805 | replace: |
1806 | /* |
1807 | * If stable_node was a chain and chain_prune collapsed it, |
1808 | * stable_node has been updated to be the new regular |
1809 | * stable_node. A collapse of the chain is indistinguishable |
1810 | * from the case there was no chain in the stable |
1811 | * rbtree. Otherwise stable_node is the chain and |
1812 | * stable_node_dup is the dup to replace. |
1813 | */ |
1814 | if (stable_node_dup == stable_node) { |
1815 | VM_BUG_ON(is_stable_node_chain(stable_node_dup)); |
1816 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); |
1817 | /* there is no chain */ |
1818 | if (page_node) { |
1819 | VM_BUG_ON(page_node->head != &migrate_nodes); |
1820 | list_del(entry: &page_node->list); |
1821 | DO_NUMA(page_node->nid = nid); |
1822 | rb_replace_node(victim: &stable_node_dup->node, |
1823 | new: &page_node->node, |
1824 | root); |
1825 | if (is_page_sharing_candidate(stable_node: page_node)) |
1826 | get_page(page); |
1827 | else |
1828 | page = NULL; |
1829 | } else { |
1830 | rb_erase(&stable_node_dup->node, root); |
1831 | page = NULL; |
1832 | } |
1833 | } else { |
1834 | VM_BUG_ON(!is_stable_node_chain(stable_node)); |
1835 | __stable_node_dup_del(dup: stable_node_dup); |
1836 | if (page_node) { |
1837 | VM_BUG_ON(page_node->head != &migrate_nodes); |
1838 | list_del(entry: &page_node->list); |
1839 | DO_NUMA(page_node->nid = nid); |
1840 | stable_node_chain_add_dup(dup: page_node, chain: stable_node); |
1841 | if (is_page_sharing_candidate(stable_node: page_node)) |
1842 | get_page(page); |
1843 | else |
1844 | page = NULL; |
1845 | } else { |
1846 | page = NULL; |
1847 | } |
1848 | } |
1849 | stable_node_dup->head = &migrate_nodes; |
1850 | list_add(new: &stable_node_dup->list, head: stable_node_dup->head); |
1851 | return page; |
1852 | |
1853 | chain_append: |
1854 | /* stable_node_dup could be null if it reached the limit */ |
1855 | if (!stable_node_dup) |
1856 | stable_node_dup = stable_node_any; |
1857 | /* |
1858 | * If stable_node was a chain and chain_prune collapsed it, |
1859 | * stable_node has been updated to be the new regular |
1860 | * stable_node. A collapse of the chain is indistinguishable |
1861 | * from the case there was no chain in the stable |
1862 | * rbtree. Otherwise stable_node is the chain and |
1863 | * stable_node_dup is the dup to replace. |
1864 | */ |
1865 | if (stable_node_dup == stable_node) { |
1866 | VM_BUG_ON(is_stable_node_dup(stable_node_dup)); |
1867 | /* chain is missing so create it */ |
1868 | stable_node = alloc_stable_node_chain(dup: stable_node_dup, |
1869 | root); |
1870 | if (!stable_node) |
1871 | return NULL; |
1872 | } |
1873 | /* |
1874 | * Add this stable_node dup that was |
1875 | * migrated to the stable_node chain |
1876 | * of the current nid for this page |
1877 | * content. |
1878 | */ |
1879 | VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); |
1880 | VM_BUG_ON(page_node->head != &migrate_nodes); |
1881 | list_del(entry: &page_node->list); |
1882 | DO_NUMA(page_node->nid = nid); |
1883 | stable_node_chain_add_dup(dup: page_node, chain: stable_node); |
1884 | goto out; |
1885 | } |
1886 | |
1887 | /* |
1888 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
1889 | * into the stable tree. |
1890 | * |
1891 | * This function returns the stable tree node just allocated on success, |
1892 | * NULL otherwise. |
1893 | */ |
1894 | static struct ksm_stable_node *stable_tree_insert(struct page *kpage) |
1895 | { |
1896 | int nid; |
1897 | unsigned long kpfn; |
1898 | struct rb_root *root; |
1899 | struct rb_node **new; |
1900 | struct rb_node *parent; |
1901 | struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; |
1902 | bool need_chain = false; |
1903 | |
1904 | kpfn = page_to_pfn(kpage); |
1905 | nid = get_kpfn_nid(kpfn); |
1906 | root = root_stable_tree + nid; |
1907 | again: |
1908 | parent = NULL; |
1909 | new = &root->rb_node; |
1910 | |
1911 | while (*new) { |
1912 | struct page *tree_page; |
1913 | int ret; |
1914 | |
1915 | cond_resched(); |
1916 | stable_node = rb_entry(*new, struct ksm_stable_node, node); |
1917 | stable_node_any = NULL; |
1918 | tree_page = chain(s_n_d: &stable_node_dup, s_n: stable_node, root); |
1919 | if (!stable_node_dup) { |
1920 | /* |
1921 | * Either all stable_node dups were full in |
1922 | * this stable_node chain, or this chain was |
1923 | * empty and should be rb_erased. |
1924 | */ |
1925 | stable_node_any = stable_node_dup_any(stable_node, |
1926 | root); |
1927 | if (!stable_node_any) { |
1928 | /* rb_erase just run */ |
1929 | goto again; |
1930 | } |
1931 | /* |
1932 | * Take any of the stable_node dups page of |
1933 | * this stable_node chain to let the tree walk |
1934 | * continue. All KSM pages belonging to the |
1935 | * stable_node dups in a stable_node chain |
1936 | * have the same content and they're |
1937 | * write protected at all times. Any will work |
1938 | * fine to continue the walk. |
1939 | */ |
1940 | tree_page = get_ksm_page(stable_node: stable_node_any, |
1941 | flags: GET_KSM_PAGE_NOLOCK); |
1942 | } |
1943 | VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); |
1944 | if (!tree_page) { |
1945 | /* |
1946 | * If we walked over a stale stable_node, |
1947 | * get_ksm_page() will call rb_erase() and it |
1948 | * may rebalance the tree from under us. So |
1949 | * restart the search from scratch. Returning |
1950 | * NULL would be safe too, but we'd generate |
1951 | * false negative insertions just because some |
1952 | * stable_node was stale. |
1953 | */ |
1954 | goto again; |
1955 | } |
1956 | |
1957 | ret = memcmp_pages(page1: kpage, page2: tree_page); |
1958 | put_page(page: tree_page); |
1959 | |
1960 | parent = *new; |
1961 | if (ret < 0) |
1962 | new = &parent->rb_left; |
1963 | else if (ret > 0) |
1964 | new = &parent->rb_right; |
1965 | else { |
1966 | need_chain = true; |
1967 | break; |
1968 | } |
1969 | } |
1970 | |
1971 | stable_node_dup = alloc_stable_node(); |
1972 | if (!stable_node_dup) |
1973 | return NULL; |
1974 | |
1975 | INIT_HLIST_HEAD(&stable_node_dup->hlist); |
1976 | stable_node_dup->kpfn = kpfn; |
1977 | set_page_stable_node(page: kpage, stable_node: stable_node_dup); |
1978 | stable_node_dup->rmap_hlist_len = 0; |
1979 | DO_NUMA(stable_node_dup->nid = nid); |
1980 | if (!need_chain) { |
1981 | rb_link_node(node: &stable_node_dup->node, parent, rb_link: new); |
1982 | rb_insert_color(&stable_node_dup->node, root); |
1983 | } else { |
1984 | if (!is_stable_node_chain(chain: stable_node)) { |
1985 | struct ksm_stable_node *orig = stable_node; |
1986 | /* chain is missing so create it */ |
1987 | stable_node = alloc_stable_node_chain(dup: orig, root); |
1988 | if (!stable_node) { |
1989 | free_stable_node(stable_node: stable_node_dup); |
1990 | return NULL; |
1991 | } |
1992 | } |
1993 | stable_node_chain_add_dup(dup: stable_node_dup, chain: stable_node); |
1994 | } |
1995 | |
1996 | return stable_node_dup; |
1997 | } |
1998 | |
1999 | /* |
2000 | * unstable_tree_search_insert - search for identical page, |
2001 | * else insert rmap_item into the unstable tree. |
2002 | * |
2003 | * This function searches for a page in the unstable tree identical to the |
2004 | * page currently being scanned; and if no identical page is found in the |
2005 | * tree, we insert rmap_item as a new object into the unstable tree. |
2006 | * |
2007 | * This function returns pointer to rmap_item found to be identical |
2008 | * to the currently scanned page, NULL otherwise. |
2009 | * |
2010 | * This function does both searching and inserting, because they share |
2011 | * the same walking algorithm in an rbtree. |
2012 | */ |
2013 | static |
2014 | struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, |
2015 | struct page *page, |
2016 | struct page **tree_pagep) |
2017 | { |
2018 | struct rb_node **new; |
2019 | struct rb_root *root; |
2020 | struct rb_node *parent = NULL; |
2021 | int nid; |
2022 | |
2023 | nid = get_kpfn_nid(page_to_pfn(page)); |
2024 | root = root_unstable_tree + nid; |
2025 | new = &root->rb_node; |
2026 | |
2027 | while (*new) { |
2028 | struct ksm_rmap_item *tree_rmap_item; |
2029 | struct page *tree_page; |
2030 | int ret; |
2031 | |
2032 | cond_resched(); |
2033 | tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); |
2034 | tree_page = get_mergeable_page(rmap_item: tree_rmap_item); |
2035 | if (!tree_page) |
2036 | return NULL; |
2037 | |
2038 | /* |
2039 | * Don't substitute a ksm page for a forked page. |
2040 | */ |
2041 | if (page == tree_page) { |
2042 | put_page(page: tree_page); |
2043 | return NULL; |
2044 | } |
2045 | |
2046 | ret = memcmp_pages(page1: page, page2: tree_page); |
2047 | |
2048 | parent = *new; |
2049 | if (ret < 0) { |
2050 | put_page(page: tree_page); |
2051 | new = &parent->rb_left; |
2052 | } else if (ret > 0) { |
2053 | put_page(page: tree_page); |
2054 | new = &parent->rb_right; |
2055 | } else if (!ksm_merge_across_nodes && |
2056 | page_to_nid(page: tree_page) != nid) { |
2057 | /* |
2058 | * If tree_page has been migrated to another NUMA node, |
2059 | * it will be flushed out and put in the right unstable |
2060 | * tree next time: only merge with it when across_nodes. |
2061 | */ |
2062 | put_page(page: tree_page); |
2063 | return NULL; |
2064 | } else { |
2065 | *tree_pagep = tree_page; |
2066 | return tree_rmap_item; |
2067 | } |
2068 | } |
2069 | |
2070 | rmap_item->address |= UNSTABLE_FLAG; |
2071 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
2072 | DO_NUMA(rmap_item->nid = nid); |
2073 | rb_link_node(node: &rmap_item->node, parent, rb_link: new); |
2074 | rb_insert_color(&rmap_item->node, root); |
2075 | |
2076 | ksm_pages_unshared++; |
2077 | return NULL; |
2078 | } |
2079 | |
2080 | /* |
2081 | * stable_tree_append - add another rmap_item to the linked list of |
2082 | * rmap_items hanging off a given node of the stable tree, all sharing |
2083 | * the same ksm page. |
2084 | */ |
2085 | static void stable_tree_append(struct ksm_rmap_item *rmap_item, |
2086 | struct ksm_stable_node *stable_node, |
2087 | bool max_page_sharing_bypass) |
2088 | { |
2089 | /* |
2090 | * rmap won't find this mapping if we don't insert the |
2091 | * rmap_item in the right stable_node |
2092 | * duplicate. page_migration could break later if rmap breaks, |
2093 | * so we can as well crash here. We really need to check for |
2094 | * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check |
2095 | * for other negative values as an underflow if detected here |
2096 | * for the first time (and not when decreasing rmap_hlist_len) |
2097 | * would be sign of memory corruption in the stable_node. |
2098 | */ |
2099 | BUG_ON(stable_node->rmap_hlist_len < 0); |
2100 | |
2101 | stable_node->rmap_hlist_len++; |
2102 | if (!max_page_sharing_bypass) |
2103 | /* possibly non fatal but unexpected overflow, only warn */ |
2104 | WARN_ON_ONCE(stable_node->rmap_hlist_len > |
2105 | ksm_max_page_sharing); |
2106 | |
2107 | rmap_item->head = stable_node; |
2108 | rmap_item->address |= STABLE_FLAG; |
2109 | hlist_add_head(n: &rmap_item->hlist, h: &stable_node->hlist); |
2110 | |
2111 | if (rmap_item->hlist.next) |
2112 | ksm_pages_sharing++; |
2113 | else |
2114 | ksm_pages_shared++; |
2115 | |
2116 | rmap_item->mm->ksm_merging_pages++; |
2117 | } |
2118 | |
2119 | /* |
2120 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
2121 | * if not, compare checksum to previous and if it's the same, see if page can |
2122 | * be inserted into the unstable tree, or merged with a page already there and |
2123 | * both transferred to the stable tree. |
2124 | * |
2125 | * @page: the page that we are searching identical page to. |
2126 | * @rmap_item: the reverse mapping into the virtual address of this page |
2127 | */ |
2128 | static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) |
2129 | { |
2130 | struct mm_struct *mm = rmap_item->mm; |
2131 | struct ksm_rmap_item *tree_rmap_item; |
2132 | struct page *tree_page = NULL; |
2133 | struct ksm_stable_node *stable_node; |
2134 | struct page *kpage; |
2135 | unsigned int checksum; |
2136 | int err; |
2137 | bool max_page_sharing_bypass = false; |
2138 | |
2139 | stable_node = page_stable_node(page); |
2140 | if (stable_node) { |
2141 | if (stable_node->head != &migrate_nodes && |
2142 | get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != |
2143 | NUMA(stable_node->nid)) { |
2144 | stable_node_dup_del(dup: stable_node); |
2145 | stable_node->head = &migrate_nodes; |
2146 | list_add(new: &stable_node->list, head: stable_node->head); |
2147 | } |
2148 | if (stable_node->head != &migrate_nodes && |
2149 | rmap_item->head == stable_node) |
2150 | return; |
2151 | /* |
2152 | * If it's a KSM fork, allow it to go over the sharing limit |
2153 | * without warnings. |
2154 | */ |
2155 | if (!is_page_sharing_candidate(stable_node)) |
2156 | max_page_sharing_bypass = true; |
2157 | } |
2158 | |
2159 | /* We first start with searching the page inside the stable tree */ |
2160 | kpage = stable_tree_search(page); |
2161 | if (kpage == page && rmap_item->head == stable_node) { |
2162 | put_page(page: kpage); |
2163 | return; |
2164 | } |
2165 | |
2166 | remove_rmap_item_from_tree(rmap_item); |
2167 | |
2168 | if (kpage) { |
2169 | if (PTR_ERR(ptr: kpage) == -EBUSY) |
2170 | return; |
2171 | |
2172 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
2173 | if (!err) { |
2174 | /* |
2175 | * The page was successfully merged: |
2176 | * add its rmap_item to the stable tree. |
2177 | */ |
2178 | lock_page(page: kpage); |
2179 | stable_tree_append(rmap_item, stable_node: page_stable_node(page: kpage), |
2180 | max_page_sharing_bypass); |
2181 | unlock_page(page: kpage); |
2182 | } |
2183 | put_page(page: kpage); |
2184 | return; |
2185 | } |
2186 | |
2187 | /* |
2188 | * If the hash value of the page has changed from the last time |
2189 | * we calculated it, this page is changing frequently: therefore we |
2190 | * don't want to insert it in the unstable tree, and we don't want |
2191 | * to waste our time searching for something identical to it there. |
2192 | */ |
2193 | checksum = calc_checksum(page); |
2194 | if (rmap_item->oldchecksum != checksum) { |
2195 | rmap_item->oldchecksum = checksum; |
2196 | return; |
2197 | } |
2198 | |
2199 | /* |
2200 | * Same checksum as an empty page. We attempt to merge it with the |
2201 | * appropriate zero page if the user enabled this via sysfs. |
2202 | */ |
2203 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { |
2204 | struct vm_area_struct *vma; |
2205 | |
2206 | mmap_read_lock(mm); |
2207 | vma = find_mergeable_vma(mm, addr: rmap_item->address); |
2208 | if (vma) { |
2209 | err = try_to_merge_one_page(vma, page, |
2210 | ZERO_PAGE(rmap_item->address)); |
2211 | trace_ksm_merge_one_page( |
2212 | page_to_pfn(ZERO_PAGE(rmap_item->address)), |
2213 | rmap_item, mm, err); |
2214 | } else { |
2215 | /* |
2216 | * If the vma is out of date, we do not need to |
2217 | * continue. |
2218 | */ |
2219 | err = 0; |
2220 | } |
2221 | mmap_read_unlock(mm); |
2222 | /* |
2223 | * In case of failure, the page was not really empty, so we |
2224 | * need to continue. Otherwise we're done. |
2225 | */ |
2226 | if (!err) |
2227 | return; |
2228 | } |
2229 | tree_rmap_item = |
2230 | unstable_tree_search_insert(rmap_item, page, tree_pagep: &tree_page); |
2231 | if (tree_rmap_item) { |
2232 | bool split; |
2233 | |
2234 | kpage = try_to_merge_two_pages(rmap_item, page, |
2235 | tree_rmap_item, tree_page); |
2236 | /* |
2237 | * If both pages we tried to merge belong to the same compound |
2238 | * page, then we actually ended up increasing the reference |
2239 | * count of the same compound page twice, and split_huge_page |
2240 | * failed. |
2241 | * Here we set a flag if that happened, and we use it later to |
2242 | * try split_huge_page again. Since we call put_page right |
2243 | * afterwards, the reference count will be correct and |
2244 | * split_huge_page should succeed. |
2245 | */ |
2246 | split = PageTransCompound(page) |
2247 | && compound_head(page) == compound_head(tree_page); |
2248 | put_page(page: tree_page); |
2249 | if (kpage) { |
2250 | /* |
2251 | * The pages were successfully merged: insert new |
2252 | * node in the stable tree and add both rmap_items. |
2253 | */ |
2254 | lock_page(page: kpage); |
2255 | stable_node = stable_tree_insert(kpage); |
2256 | if (stable_node) { |
2257 | stable_tree_append(rmap_item: tree_rmap_item, stable_node, |
2258 | max_page_sharing_bypass: false); |
2259 | stable_tree_append(rmap_item, stable_node, |
2260 | max_page_sharing_bypass: false); |
2261 | } |
2262 | unlock_page(page: kpage); |
2263 | |
2264 | /* |
2265 | * If we fail to insert the page into the stable tree, |
2266 | * we will have 2 virtual addresses that are pointing |
2267 | * to a ksm page left outside the stable tree, |
2268 | * in which case we need to break_cow on both. |
2269 | */ |
2270 | if (!stable_node) { |
2271 | break_cow(rmap_item: tree_rmap_item); |
2272 | break_cow(rmap_item); |
2273 | } |
2274 | } else if (split) { |
2275 | /* |
2276 | * We are here if we tried to merge two pages and |
2277 | * failed because they both belonged to the same |
2278 | * compound page. We will split the page now, but no |
2279 | * merging will take place. |
2280 | * We do not want to add the cost of a full lock; if |
2281 | * the page is locked, it is better to skip it and |
2282 | * perhaps try again later. |
2283 | */ |
2284 | if (!trylock_page(page)) |
2285 | return; |
2286 | split_huge_page(page); |
2287 | unlock_page(page); |
2288 | } |
2289 | } |
2290 | } |
2291 | |
2292 | static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, |
2293 | struct ksm_rmap_item **rmap_list, |
2294 | unsigned long addr) |
2295 | { |
2296 | struct ksm_rmap_item *rmap_item; |
2297 | |
2298 | while (*rmap_list) { |
2299 | rmap_item = *rmap_list; |
2300 | if ((rmap_item->address & PAGE_MASK) == addr) |
2301 | return rmap_item; |
2302 | if (rmap_item->address > addr) |
2303 | break; |
2304 | *rmap_list = rmap_item->rmap_list; |
2305 | remove_rmap_item_from_tree(rmap_item); |
2306 | free_rmap_item(rmap_item); |
2307 | } |
2308 | |
2309 | rmap_item = alloc_rmap_item(); |
2310 | if (rmap_item) { |
2311 | /* It has already been zeroed */ |
2312 | rmap_item->mm = mm_slot->slot.mm; |
2313 | rmap_item->mm->ksm_rmap_items++; |
2314 | rmap_item->address = addr; |
2315 | rmap_item->rmap_list = *rmap_list; |
2316 | *rmap_list = rmap_item; |
2317 | } |
2318 | return rmap_item; |
2319 | } |
2320 | |
2321 | /* |
2322 | * Calculate skip age for the ksm page age. The age determines how often |
2323 | * de-duplicating has already been tried unsuccessfully. If the age is |
2324 | * smaller, the scanning of this page is skipped for less scans. |
2325 | * |
2326 | * @age: rmap_item age of page |
2327 | */ |
2328 | static unsigned int skip_age(rmap_age_t age) |
2329 | { |
2330 | if (age <= 3) |
2331 | return 1; |
2332 | if (age <= 5) |
2333 | return 2; |
2334 | if (age <= 8) |
2335 | return 4; |
2336 | |
2337 | return 8; |
2338 | } |
2339 | |
2340 | /* |
2341 | * Determines if a page should be skipped for the current scan. |
2342 | * |
2343 | * @page: page to check |
2344 | * @rmap_item: associated rmap_item of page |
2345 | */ |
2346 | static bool should_skip_rmap_item(struct page *page, |
2347 | struct ksm_rmap_item *rmap_item) |
2348 | { |
2349 | rmap_age_t age; |
2350 | |
2351 | if (!ksm_smart_scan) |
2352 | return false; |
2353 | |
2354 | /* |
2355 | * Never skip pages that are already KSM; pages cmp_and_merge_page() |
2356 | * will essentially ignore them, but we still have to process them |
2357 | * properly. |
2358 | */ |
2359 | if (PageKsm(page)) |
2360 | return false; |
2361 | |
2362 | age = rmap_item->age; |
2363 | if (age != U8_MAX) |
2364 | rmap_item->age++; |
2365 | |
2366 | /* |
2367 | * Smaller ages are not skipped, they need to get a chance to go |
2368 | * through the different phases of the KSM merging. |
2369 | */ |
2370 | if (age < 3) |
2371 | return false; |
2372 | |
2373 | /* |
2374 | * Are we still allowed to skip? If not, then don't skip it |
2375 | * and determine how much more often we are allowed to skip next. |
2376 | */ |
2377 | if (!rmap_item->remaining_skips) { |
2378 | rmap_item->remaining_skips = skip_age(age); |
2379 | return false; |
2380 | } |
2381 | |
2382 | /* Skip this page */ |
2383 | ksm_pages_skipped++; |
2384 | rmap_item->remaining_skips--; |
2385 | remove_rmap_item_from_tree(rmap_item); |
2386 | return true; |
2387 | } |
2388 | |
2389 | static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) |
2390 | { |
2391 | struct mm_struct *mm; |
2392 | struct ksm_mm_slot *mm_slot; |
2393 | struct mm_slot *slot; |
2394 | struct vm_area_struct *vma; |
2395 | struct ksm_rmap_item *rmap_item; |
2396 | struct vma_iterator vmi; |
2397 | int nid; |
2398 | |
2399 | if (list_empty(head: &ksm_mm_head.slot.mm_node)) |
2400 | return NULL; |
2401 | |
2402 | mm_slot = ksm_scan.mm_slot; |
2403 | if (mm_slot == &ksm_mm_head) { |
2404 | trace_ksm_start_scan(seq: ksm_scan.seqnr, rmap_entries: ksm_rmap_items); |
2405 | |
2406 | /* |
2407 | * A number of pages can hang around indefinitely in per-cpu |
2408 | * LRU cache, raised page count preventing write_protect_page |
2409 | * from merging them. Though it doesn't really matter much, |
2410 | * it is puzzling to see some stuck in pages_volatile until |
2411 | * other activity jostles them out, and they also prevented |
2412 | * LTP's KSM test from succeeding deterministically; so drain |
2413 | * them here (here rather than on entry to ksm_do_scan(), |
2414 | * so we don't IPI too often when pages_to_scan is set low). |
2415 | */ |
2416 | lru_add_drain_all(); |
2417 | |
2418 | /* |
2419 | * Whereas stale stable_nodes on the stable_tree itself |
2420 | * get pruned in the regular course of stable_tree_search(), |
2421 | * those moved out to the migrate_nodes list can accumulate: |
2422 | * so prune them once before each full scan. |
2423 | */ |
2424 | if (!ksm_merge_across_nodes) { |
2425 | struct ksm_stable_node *stable_node, *next; |
2426 | struct page *page; |
2427 | |
2428 | list_for_each_entry_safe(stable_node, next, |
2429 | &migrate_nodes, list) { |
2430 | page = get_ksm_page(stable_node, |
2431 | flags: GET_KSM_PAGE_NOLOCK); |
2432 | if (page) |
2433 | put_page(page); |
2434 | cond_resched(); |
2435 | } |
2436 | } |
2437 | |
2438 | for (nid = 0; nid < ksm_nr_node_ids; nid++) |
2439 | root_unstable_tree[nid] = RB_ROOT; |
2440 | |
2441 | spin_lock(lock: &ksm_mmlist_lock); |
2442 | slot = list_entry(mm_slot->slot.mm_node.next, |
2443 | struct mm_slot, mm_node); |
2444 | mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
2445 | ksm_scan.mm_slot = mm_slot; |
2446 | spin_unlock(lock: &ksm_mmlist_lock); |
2447 | /* |
2448 | * Although we tested list_empty() above, a racing __ksm_exit |
2449 | * of the last mm on the list may have removed it since then. |
2450 | */ |
2451 | if (mm_slot == &ksm_mm_head) |
2452 | return NULL; |
2453 | next_mm: |
2454 | ksm_scan.address = 0; |
2455 | ksm_scan.rmap_list = &mm_slot->rmap_list; |
2456 | } |
2457 | |
2458 | slot = &mm_slot->slot; |
2459 | mm = slot->mm; |
2460 | vma_iter_init(vmi: &vmi, mm, addr: ksm_scan.address); |
2461 | |
2462 | mmap_read_lock(mm); |
2463 | if (ksm_test_exit(mm)) |
2464 | goto no_vmas; |
2465 | |
2466 | for_each_vma(vmi, vma) { |
2467 | if (!(vma->vm_flags & VM_MERGEABLE)) |
2468 | continue; |
2469 | if (ksm_scan.address < vma->vm_start) |
2470 | ksm_scan.address = vma->vm_start; |
2471 | if (!vma->anon_vma) |
2472 | ksm_scan.address = vma->vm_end; |
2473 | |
2474 | while (ksm_scan.address < vma->vm_end) { |
2475 | if (ksm_test_exit(mm)) |
2476 | break; |
2477 | *page = follow_page(vma, address: ksm_scan.address, foll_flags: FOLL_GET); |
2478 | if (IS_ERR_OR_NULL(ptr: *page)) { |
2479 | ksm_scan.address += PAGE_SIZE; |
2480 | cond_resched(); |
2481 | continue; |
2482 | } |
2483 | if (is_zone_device_page(page: *page)) |
2484 | goto next_page; |
2485 | if (PageAnon(page: *page)) { |
2486 | flush_anon_page(vma, page: *page, vmaddr: ksm_scan.address); |
2487 | flush_dcache_page(page: *page); |
2488 | rmap_item = get_next_rmap_item(mm_slot, |
2489 | rmap_list: ksm_scan.rmap_list, addr: ksm_scan.address); |
2490 | if (rmap_item) { |
2491 | ksm_scan.rmap_list = |
2492 | &rmap_item->rmap_list; |
2493 | |
2494 | if (should_skip_rmap_item(page: *page, rmap_item)) |
2495 | goto next_page; |
2496 | |
2497 | ksm_scan.address += PAGE_SIZE; |
2498 | } else |
2499 | put_page(page: *page); |
2500 | mmap_read_unlock(mm); |
2501 | return rmap_item; |
2502 | } |
2503 | next_page: |
2504 | put_page(page: *page); |
2505 | ksm_scan.address += PAGE_SIZE; |
2506 | cond_resched(); |
2507 | } |
2508 | } |
2509 | |
2510 | if (ksm_test_exit(mm)) { |
2511 | no_vmas: |
2512 | ksm_scan.address = 0; |
2513 | ksm_scan.rmap_list = &mm_slot->rmap_list; |
2514 | } |
2515 | /* |
2516 | * Nuke all the rmap_items that are above this current rmap: |
2517 | * because there were no VM_MERGEABLE vmas with such addresses. |
2518 | */ |
2519 | remove_trailing_rmap_items(rmap_list: ksm_scan.rmap_list); |
2520 | |
2521 | spin_lock(lock: &ksm_mmlist_lock); |
2522 | slot = list_entry(mm_slot->slot.mm_node.next, |
2523 | struct mm_slot, mm_node); |
2524 | ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
2525 | if (ksm_scan.address == 0) { |
2526 | /* |
2527 | * We've completed a full scan of all vmas, holding mmap_lock |
2528 | * throughout, and found no VM_MERGEABLE: so do the same as |
2529 | * __ksm_exit does to remove this mm from all our lists now. |
2530 | * This applies either when cleaning up after __ksm_exit |
2531 | * (but beware: we can reach here even before __ksm_exit), |
2532 | * or when all VM_MERGEABLE areas have been unmapped (and |
2533 | * mmap_lock then protects against race with MADV_MERGEABLE). |
2534 | */ |
2535 | hash_del(node: &mm_slot->slot.hash); |
2536 | list_del(entry: &mm_slot->slot.mm_node); |
2537 | spin_unlock(lock: &ksm_mmlist_lock); |
2538 | |
2539 | mm_slot_free(cache: mm_slot_cache, objp: mm_slot); |
2540 | clear_bit(MMF_VM_MERGEABLE, addr: &mm->flags); |
2541 | clear_bit(MMF_VM_MERGE_ANY, addr: &mm->flags); |
2542 | mmap_read_unlock(mm); |
2543 | mmdrop(mm); |
2544 | } else { |
2545 | mmap_read_unlock(mm); |
2546 | /* |
2547 | * mmap_read_unlock(mm) first because after |
2548 | * spin_unlock(&ksm_mmlist_lock) run, the "mm" may |
2549 | * already have been freed under us by __ksm_exit() |
2550 | * because the "mm_slot" is still hashed and |
2551 | * ksm_scan.mm_slot doesn't point to it anymore. |
2552 | */ |
2553 | spin_unlock(lock: &ksm_mmlist_lock); |
2554 | } |
2555 | |
2556 | /* Repeat until we've completed scanning the whole list */ |
2557 | mm_slot = ksm_scan.mm_slot; |
2558 | if (mm_slot != &ksm_mm_head) |
2559 | goto next_mm; |
2560 | |
2561 | trace_ksm_stop_scan(seq: ksm_scan.seqnr, rmap_entries: ksm_rmap_items); |
2562 | ksm_scan.seqnr++; |
2563 | return NULL; |
2564 | } |
2565 | |
2566 | /** |
2567 | * ksm_do_scan - the ksm scanner main worker function. |
2568 | * @scan_npages: number of pages we want to scan before we return. |
2569 | */ |
2570 | static void ksm_do_scan(unsigned int scan_npages) |
2571 | { |
2572 | struct ksm_rmap_item *rmap_item; |
2573 | struct page *page; |
2574 | unsigned int npages = scan_npages; |
2575 | |
2576 | while (npages-- && likely(!freezing(current))) { |
2577 | cond_resched(); |
2578 | rmap_item = scan_get_next_rmap_item(page: &page); |
2579 | if (!rmap_item) |
2580 | return; |
2581 | cmp_and_merge_page(page, rmap_item); |
2582 | put_page(page); |
2583 | } |
2584 | |
2585 | ksm_pages_scanned += scan_npages - npages; |
2586 | } |
2587 | |
2588 | static int ksmd_should_run(void) |
2589 | { |
2590 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(head: &ksm_mm_head.slot.mm_node); |
2591 | } |
2592 | |
2593 | static int ksm_scan_thread(void *nothing) |
2594 | { |
2595 | unsigned int sleep_ms; |
2596 | |
2597 | set_freezable(); |
2598 | set_user_nice(current, nice: 5); |
2599 | |
2600 | while (!kthread_should_stop()) { |
2601 | mutex_lock(&ksm_thread_mutex); |
2602 | wait_while_offlining(); |
2603 | if (ksmd_should_run()) |
2604 | ksm_do_scan(scan_npages: ksm_thread_pages_to_scan); |
2605 | mutex_unlock(lock: &ksm_thread_mutex); |
2606 | |
2607 | try_to_freeze(); |
2608 | |
2609 | if (ksmd_should_run()) { |
2610 | sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); |
2611 | wait_event_interruptible_timeout(ksm_iter_wait, |
2612 | sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), |
2613 | msecs_to_jiffies(sleep_ms)); |
2614 | } else { |
2615 | wait_event_freezable(ksm_thread_wait, |
2616 | ksmd_should_run() || kthread_should_stop()); |
2617 | } |
2618 | } |
2619 | return 0; |
2620 | } |
2621 | |
2622 | static void __ksm_add_vma(struct vm_area_struct *vma) |
2623 | { |
2624 | unsigned long vm_flags = vma->vm_flags; |
2625 | |
2626 | if (vm_flags & VM_MERGEABLE) |
2627 | return; |
2628 | |
2629 | if (vma_ksm_compatible(vma)) |
2630 | vm_flags_set(vma, VM_MERGEABLE); |
2631 | } |
2632 | |
2633 | static int __ksm_del_vma(struct vm_area_struct *vma) |
2634 | { |
2635 | int err; |
2636 | |
2637 | if (!(vma->vm_flags & VM_MERGEABLE)) |
2638 | return 0; |
2639 | |
2640 | if (vma->anon_vma) { |
2641 | err = unmerge_ksm_pages(vma, start: vma->vm_start, end: vma->vm_end, lock_vma: true); |
2642 | if (err) |
2643 | return err; |
2644 | } |
2645 | |
2646 | vm_flags_clear(vma, VM_MERGEABLE); |
2647 | return 0; |
2648 | } |
2649 | /** |
2650 | * ksm_add_vma - Mark vma as mergeable if compatible |
2651 | * |
2652 | * @vma: Pointer to vma |
2653 | */ |
2654 | void ksm_add_vma(struct vm_area_struct *vma) |
2655 | { |
2656 | struct mm_struct *mm = vma->vm_mm; |
2657 | |
2658 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
2659 | __ksm_add_vma(vma); |
2660 | } |
2661 | |
2662 | static void ksm_add_vmas(struct mm_struct *mm) |
2663 | { |
2664 | struct vm_area_struct *vma; |
2665 | |
2666 | VMA_ITERATOR(vmi, mm, 0); |
2667 | for_each_vma(vmi, vma) |
2668 | __ksm_add_vma(vma); |
2669 | } |
2670 | |
2671 | static int ksm_del_vmas(struct mm_struct *mm) |
2672 | { |
2673 | struct vm_area_struct *vma; |
2674 | int err; |
2675 | |
2676 | VMA_ITERATOR(vmi, mm, 0); |
2677 | for_each_vma(vmi, vma) { |
2678 | err = __ksm_del_vma(vma); |
2679 | if (err) |
2680 | return err; |
2681 | } |
2682 | return 0; |
2683 | } |
2684 | |
2685 | /** |
2686 | * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all |
2687 | * compatible VMA's |
2688 | * |
2689 | * @mm: Pointer to mm |
2690 | * |
2691 | * Returns 0 on success, otherwise error code |
2692 | */ |
2693 | int ksm_enable_merge_any(struct mm_struct *mm) |
2694 | { |
2695 | int err; |
2696 | |
2697 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
2698 | return 0; |
2699 | |
2700 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
2701 | err = __ksm_enter(mm); |
2702 | if (err) |
2703 | return err; |
2704 | } |
2705 | |
2706 | set_bit(MMF_VM_MERGE_ANY, addr: &mm->flags); |
2707 | ksm_add_vmas(mm); |
2708 | |
2709 | return 0; |
2710 | } |
2711 | |
2712 | /** |
2713 | * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, |
2714 | * previously enabled via ksm_enable_merge_any(). |
2715 | * |
2716 | * Disabling merging implies unmerging any merged pages, like setting |
2717 | * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and |
2718 | * merging on all compatible VMA's remains enabled. |
2719 | * |
2720 | * @mm: Pointer to mm |
2721 | * |
2722 | * Returns 0 on success, otherwise error code |
2723 | */ |
2724 | int ksm_disable_merge_any(struct mm_struct *mm) |
2725 | { |
2726 | int err; |
2727 | |
2728 | if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
2729 | return 0; |
2730 | |
2731 | err = ksm_del_vmas(mm); |
2732 | if (err) { |
2733 | ksm_add_vmas(mm); |
2734 | return err; |
2735 | } |
2736 | |
2737 | clear_bit(MMF_VM_MERGE_ANY, addr: &mm->flags); |
2738 | return 0; |
2739 | } |
2740 | |
2741 | int ksm_disable(struct mm_struct *mm) |
2742 | { |
2743 | mmap_assert_write_locked(mm); |
2744 | |
2745 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) |
2746 | return 0; |
2747 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
2748 | return ksm_disable_merge_any(mm); |
2749 | return ksm_del_vmas(mm); |
2750 | } |
2751 | |
2752 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
2753 | unsigned long end, int advice, unsigned long *vm_flags) |
2754 | { |
2755 | struct mm_struct *mm = vma->vm_mm; |
2756 | int err; |
2757 | |
2758 | switch (advice) { |
2759 | case MADV_MERGEABLE: |
2760 | if (vma->vm_flags & VM_MERGEABLE) |
2761 | return 0; |
2762 | if (!vma_ksm_compatible(vma)) |
2763 | return 0; |
2764 | |
2765 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
2766 | err = __ksm_enter(mm); |
2767 | if (err) |
2768 | return err; |
2769 | } |
2770 | |
2771 | *vm_flags |= VM_MERGEABLE; |
2772 | break; |
2773 | |
2774 | case MADV_UNMERGEABLE: |
2775 | if (!(*vm_flags & VM_MERGEABLE)) |
2776 | return 0; /* just ignore the advice */ |
2777 | |
2778 | if (vma->anon_vma) { |
2779 | err = unmerge_ksm_pages(vma, start, end, lock_vma: true); |
2780 | if (err) |
2781 | return err; |
2782 | } |
2783 | |
2784 | *vm_flags &= ~VM_MERGEABLE; |
2785 | break; |
2786 | } |
2787 | |
2788 | return 0; |
2789 | } |
2790 | EXPORT_SYMBOL_GPL(ksm_madvise); |
2791 | |
2792 | int __ksm_enter(struct mm_struct *mm) |
2793 | { |
2794 | struct ksm_mm_slot *mm_slot; |
2795 | struct mm_slot *slot; |
2796 | int needs_wakeup; |
2797 | |
2798 | mm_slot = mm_slot_alloc(cache: mm_slot_cache); |
2799 | if (!mm_slot) |
2800 | return -ENOMEM; |
2801 | |
2802 | slot = &mm_slot->slot; |
2803 | |
2804 | /* Check ksm_run too? Would need tighter locking */ |
2805 | needs_wakeup = list_empty(head: &ksm_mm_head.slot.mm_node); |
2806 | |
2807 | spin_lock(lock: &ksm_mmlist_lock); |
2808 | mm_slot_insert(mm_slots_hash, mm, slot); |
2809 | /* |
2810 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
2811 | * insert just behind the scanning cursor, to let the area settle |
2812 | * down a little; when fork is followed by immediate exec, we don't |
2813 | * want ksmd to waste time setting up and tearing down an rmap_list. |
2814 | * |
2815 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its |
2816 | * scanning cursor, otherwise KSM pages in newly forked mms will be |
2817 | * missed: then we might as well insert at the end of the list. |
2818 | */ |
2819 | if (ksm_run & KSM_RUN_UNMERGE) |
2820 | list_add_tail(new: &slot->mm_node, head: &ksm_mm_head.slot.mm_node); |
2821 | else |
2822 | list_add_tail(new: &slot->mm_node, head: &ksm_scan.mm_slot->slot.mm_node); |
2823 | spin_unlock(lock: &ksm_mmlist_lock); |
2824 | |
2825 | set_bit(MMF_VM_MERGEABLE, addr: &mm->flags); |
2826 | mmgrab(mm); |
2827 | |
2828 | if (needs_wakeup) |
2829 | wake_up_interruptible(&ksm_thread_wait); |
2830 | |
2831 | trace_ksm_enter(mm); |
2832 | return 0; |
2833 | } |
2834 | |
2835 | void __ksm_exit(struct mm_struct *mm) |
2836 | { |
2837 | struct ksm_mm_slot *mm_slot; |
2838 | struct mm_slot *slot; |
2839 | int easy_to_free = 0; |
2840 | |
2841 | /* |
2842 | * This process is exiting: if it's straightforward (as is the |
2843 | * case when ksmd was never running), free mm_slot immediately. |
2844 | * But if it's at the cursor or has rmap_items linked to it, use |
2845 | * mmap_lock to synchronize with any break_cows before pagetables |
2846 | * are freed, and leave the mm_slot on the list for ksmd to free. |
2847 | * Beware: ksm may already have noticed it exiting and freed the slot. |
2848 | */ |
2849 | |
2850 | spin_lock(lock: &ksm_mmlist_lock); |
2851 | slot = mm_slot_lookup(mm_slots_hash, mm); |
2852 | mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); |
2853 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
2854 | if (!mm_slot->rmap_list) { |
2855 | hash_del(node: &slot->hash); |
2856 | list_del(entry: &slot->mm_node); |
2857 | easy_to_free = 1; |
2858 | } else { |
2859 | list_move(list: &slot->mm_node, |
2860 | head: &ksm_scan.mm_slot->slot.mm_node); |
2861 | } |
2862 | } |
2863 | spin_unlock(lock: &ksm_mmlist_lock); |
2864 | |
2865 | if (easy_to_free) { |
2866 | mm_slot_free(cache: mm_slot_cache, objp: mm_slot); |
2867 | clear_bit(MMF_VM_MERGE_ANY, addr: &mm->flags); |
2868 | clear_bit(MMF_VM_MERGEABLE, addr: &mm->flags); |
2869 | mmdrop(mm); |
2870 | } else if (mm_slot) { |
2871 | mmap_write_lock(mm); |
2872 | mmap_write_unlock(mm); |
2873 | } |
2874 | |
2875 | trace_ksm_exit(mm); |
2876 | } |
2877 | |
2878 | struct page *ksm_might_need_to_copy(struct page *page, |
2879 | struct vm_area_struct *vma, unsigned long address) |
2880 | { |
2881 | struct folio *folio = page_folio(page); |
2882 | struct anon_vma *anon_vma = folio_anon_vma(folio); |
2883 | struct page *new_page; |
2884 | |
2885 | if (PageKsm(page)) { |
2886 | if (page_stable_node(page) && |
2887 | !(ksm_run & KSM_RUN_UNMERGE)) |
2888 | return page; /* no need to copy it */ |
2889 | } else if (!anon_vma) { |
2890 | return page; /* no need to copy it */ |
2891 | } else if (page->index == linear_page_index(vma, address) && |
2892 | anon_vma->root == vma->anon_vma->root) { |
2893 | return page; /* still no need to copy it */ |
2894 | } |
2895 | if (PageHWPoison(page)) |
2896 | return ERR_PTR(error: -EHWPOISON); |
2897 | if (!PageUptodate(page)) |
2898 | return page; /* let do_swap_page report the error */ |
2899 | |
2900 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr: address); |
2901 | if (new_page && |
2902 | mem_cgroup_charge(page_folio(new_page), mm: vma->vm_mm, GFP_KERNEL)) { |
2903 | put_page(page: new_page); |
2904 | new_page = NULL; |
2905 | } |
2906 | if (new_page) { |
2907 | if (copy_mc_user_highpage(to: new_page, from: page, vaddr: address, vma)) { |
2908 | put_page(page: new_page); |
2909 | memory_failure_queue(page_to_pfn(page), flags: 0); |
2910 | return ERR_PTR(error: -EHWPOISON); |
2911 | } |
2912 | SetPageDirty(new_page); |
2913 | __SetPageUptodate(page: new_page); |
2914 | __SetPageLocked(page: new_page); |
2915 | #ifdef CONFIG_SWAP |
2916 | count_vm_event(item: KSM_SWPIN_COPY); |
2917 | #endif |
2918 | } |
2919 | |
2920 | return new_page; |
2921 | } |
2922 | |
2923 | void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) |
2924 | { |
2925 | struct ksm_stable_node *stable_node; |
2926 | struct ksm_rmap_item *rmap_item; |
2927 | int search_new_forks = 0; |
2928 | |
2929 | VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); |
2930 | |
2931 | /* |
2932 | * Rely on the page lock to protect against concurrent modifications |
2933 | * to that page's node of the stable tree. |
2934 | */ |
2935 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
2936 | |
2937 | stable_node = folio_stable_node(folio); |
2938 | if (!stable_node) |
2939 | return; |
2940 | again: |
2941 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
2942 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
2943 | struct anon_vma_chain *vmac; |
2944 | struct vm_area_struct *vma; |
2945 | |
2946 | cond_resched(); |
2947 | if (!anon_vma_trylock_read(anon_vma)) { |
2948 | if (rwc->try_lock) { |
2949 | rwc->contended = true; |
2950 | return; |
2951 | } |
2952 | anon_vma_lock_read(anon_vma); |
2953 | } |
2954 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
2955 | 0, ULONG_MAX) { |
2956 | unsigned long addr; |
2957 | |
2958 | cond_resched(); |
2959 | vma = vmac->vma; |
2960 | |
2961 | /* Ignore the stable/unstable/sqnr flags */ |
2962 | addr = rmap_item->address & PAGE_MASK; |
2963 | |
2964 | if (addr < vma->vm_start || addr >= vma->vm_end) |
2965 | continue; |
2966 | /* |
2967 | * Initially we examine only the vma which covers this |
2968 | * rmap_item; but later, if there is still work to do, |
2969 | * we examine covering vmas in other mms: in case they |
2970 | * were forked from the original since ksmd passed. |
2971 | */ |
2972 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) |
2973 | continue; |
2974 | |
2975 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
2976 | continue; |
2977 | |
2978 | if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { |
2979 | anon_vma_unlock_read(anon_vma); |
2980 | return; |
2981 | } |
2982 | if (rwc->done && rwc->done(folio)) { |
2983 | anon_vma_unlock_read(anon_vma); |
2984 | return; |
2985 | } |
2986 | } |
2987 | anon_vma_unlock_read(anon_vma); |
2988 | } |
2989 | if (!search_new_forks++) |
2990 | goto again; |
2991 | } |
2992 | |
2993 | #ifdef CONFIG_MEMORY_FAILURE |
2994 | /* |
2995 | * Collect processes when the error hit an ksm page. |
2996 | */ |
2997 | void collect_procs_ksm(struct page *page, struct list_head *to_kill, |
2998 | int force_early) |
2999 | { |
3000 | struct ksm_stable_node *stable_node; |
3001 | struct ksm_rmap_item *rmap_item; |
3002 | struct folio *folio = page_folio(page); |
3003 | struct vm_area_struct *vma; |
3004 | struct task_struct *tsk; |
3005 | |
3006 | stable_node = folio_stable_node(folio); |
3007 | if (!stable_node) |
3008 | return; |
3009 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
3010 | struct anon_vma *av = rmap_item->anon_vma; |
3011 | |
3012 | anon_vma_lock_read(anon_vma: av); |
3013 | rcu_read_lock(); |
3014 | for_each_process(tsk) { |
3015 | struct anon_vma_chain *vmac; |
3016 | unsigned long addr; |
3017 | struct task_struct *t = |
3018 | task_early_kill(tsk, force_early); |
3019 | if (!t) |
3020 | continue; |
3021 | anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, |
3022 | ULONG_MAX) |
3023 | { |
3024 | vma = vmac->vma; |
3025 | if (vma->vm_mm == t->mm) { |
3026 | addr = rmap_item->address & PAGE_MASK; |
3027 | add_to_kill_ksm(tsk: t, p: page, vma, to_kill, |
3028 | ksm_addr: addr); |
3029 | } |
3030 | } |
3031 | } |
3032 | rcu_read_unlock(); |
3033 | anon_vma_unlock_read(anon_vma: av); |
3034 | } |
3035 | } |
3036 | #endif |
3037 | |
3038 | #ifdef CONFIG_MIGRATION |
3039 | void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) |
3040 | { |
3041 | struct ksm_stable_node *stable_node; |
3042 | |
3043 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
3044 | VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); |
3045 | VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); |
3046 | |
3047 | stable_node = folio_stable_node(folio); |
3048 | if (stable_node) { |
3049 | VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); |
3050 | stable_node->kpfn = folio_pfn(folio: newfolio); |
3051 | /* |
3052 | * newfolio->mapping was set in advance; now we need smp_wmb() |
3053 | * to make sure that the new stable_node->kpfn is visible |
3054 | * to get_ksm_page() before it can see that folio->mapping |
3055 | * has gone stale (or that folio_test_swapcache has been cleared). |
3056 | */ |
3057 | smp_wmb(); |
3058 | set_page_stable_node(page: &folio->page, NULL); |
3059 | } |
3060 | } |
3061 | #endif /* CONFIG_MIGRATION */ |
3062 | |
3063 | #ifdef CONFIG_MEMORY_HOTREMOVE |
3064 | static void wait_while_offlining(void) |
3065 | { |
3066 | while (ksm_run & KSM_RUN_OFFLINE) { |
3067 | mutex_unlock(lock: &ksm_thread_mutex); |
3068 | wait_on_bit(word: &ksm_run, ilog2(KSM_RUN_OFFLINE), |
3069 | TASK_UNINTERRUPTIBLE); |
3070 | mutex_lock(&ksm_thread_mutex); |
3071 | } |
3072 | } |
3073 | |
3074 | static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, |
3075 | unsigned long start_pfn, |
3076 | unsigned long end_pfn) |
3077 | { |
3078 | if (stable_node->kpfn >= start_pfn && |
3079 | stable_node->kpfn < end_pfn) { |
3080 | /* |
3081 | * Don't get_ksm_page, page has already gone: |
3082 | * which is why we keep kpfn instead of page* |
3083 | */ |
3084 | remove_node_from_stable_tree(stable_node); |
3085 | return true; |
3086 | } |
3087 | return false; |
3088 | } |
3089 | |
3090 | static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, |
3091 | unsigned long start_pfn, |
3092 | unsigned long end_pfn, |
3093 | struct rb_root *root) |
3094 | { |
3095 | struct ksm_stable_node *dup; |
3096 | struct hlist_node *hlist_safe; |
3097 | |
3098 | if (!is_stable_node_chain(chain: stable_node)) { |
3099 | VM_BUG_ON(is_stable_node_dup(stable_node)); |
3100 | return stable_node_dup_remove_range(stable_node, start_pfn, |
3101 | end_pfn); |
3102 | } |
3103 | |
3104 | hlist_for_each_entry_safe(dup, hlist_safe, |
3105 | &stable_node->hlist, hlist_dup) { |
3106 | VM_BUG_ON(!is_stable_node_dup(dup)); |
3107 | stable_node_dup_remove_range(stable_node: dup, start_pfn, end_pfn); |
3108 | } |
3109 | if (hlist_empty(h: &stable_node->hlist)) { |
3110 | free_stable_node_chain(chain: stable_node, root); |
3111 | return true; /* notify caller that tree was rebalanced */ |
3112 | } else |
3113 | return false; |
3114 | } |
3115 | |
3116 | static void ksm_check_stable_tree(unsigned long start_pfn, |
3117 | unsigned long end_pfn) |
3118 | { |
3119 | struct ksm_stable_node *stable_node, *next; |
3120 | struct rb_node *node; |
3121 | int nid; |
3122 | |
3123 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
3124 | node = rb_first(root_stable_tree + nid); |
3125 | while (node) { |
3126 | stable_node = rb_entry(node, struct ksm_stable_node, node); |
3127 | if (stable_node_chain_remove_range(stable_node, |
3128 | start_pfn, end_pfn, |
3129 | root: root_stable_tree + |
3130 | nid)) |
3131 | node = rb_first(root_stable_tree + nid); |
3132 | else |
3133 | node = rb_next(node); |
3134 | cond_resched(); |
3135 | } |
3136 | } |
3137 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
3138 | if (stable_node->kpfn >= start_pfn && |
3139 | stable_node->kpfn < end_pfn) |
3140 | remove_node_from_stable_tree(stable_node); |
3141 | cond_resched(); |
3142 | } |
3143 | } |
3144 | |
3145 | static int ksm_memory_callback(struct notifier_block *self, |
3146 | unsigned long action, void *arg) |
3147 | { |
3148 | struct memory_notify *mn = arg; |
3149 | |
3150 | switch (action) { |
3151 | case MEM_GOING_OFFLINE: |
3152 | /* |
3153 | * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() |
3154 | * and remove_all_stable_nodes() while memory is going offline: |
3155 | * it is unsafe for them to touch the stable tree at this time. |
3156 | * But unmerge_ksm_pages(), rmap lookups and other entry points |
3157 | * which do not need the ksm_thread_mutex are all safe. |
3158 | */ |
3159 | mutex_lock(&ksm_thread_mutex); |
3160 | ksm_run |= KSM_RUN_OFFLINE; |
3161 | mutex_unlock(lock: &ksm_thread_mutex); |
3162 | break; |
3163 | |
3164 | case MEM_OFFLINE: |
3165 | /* |
3166 | * Most of the work is done by page migration; but there might |
3167 | * be a few stable_nodes left over, still pointing to struct |
3168 | * pages which have been offlined: prune those from the tree, |
3169 | * otherwise get_ksm_page() might later try to access a |
3170 | * non-existent struct page. |
3171 | */ |
3172 | ksm_check_stable_tree(start_pfn: mn->start_pfn, |
3173 | end_pfn: mn->start_pfn + mn->nr_pages); |
3174 | fallthrough; |
3175 | case MEM_CANCEL_OFFLINE: |
3176 | mutex_lock(&ksm_thread_mutex); |
3177 | ksm_run &= ~KSM_RUN_OFFLINE; |
3178 | mutex_unlock(lock: &ksm_thread_mutex); |
3179 | |
3180 | smp_mb(); /* wake_up_bit advises this */ |
3181 | wake_up_bit(word: &ksm_run, ilog2(KSM_RUN_OFFLINE)); |
3182 | break; |
3183 | } |
3184 | return NOTIFY_OK; |
3185 | } |
3186 | #else |
3187 | static void wait_while_offlining(void) |
3188 | { |
3189 | } |
3190 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
3191 | |
3192 | #ifdef CONFIG_PROC_FS |
3193 | long ksm_process_profit(struct mm_struct *mm) |
3194 | { |
3195 | return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE - |
3196 | mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); |
3197 | } |
3198 | #endif /* CONFIG_PROC_FS */ |
3199 | |
3200 | #ifdef CONFIG_SYSFS |
3201 | /* |
3202 | * This all compiles without CONFIG_SYSFS, but is a waste of space. |
3203 | */ |
3204 | |
3205 | #define KSM_ATTR_RO(_name) \ |
3206 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
3207 | #define KSM_ATTR(_name) \ |
3208 | static struct kobj_attribute _name##_attr = __ATTR_RW(_name) |
3209 | |
3210 | static ssize_t sleep_millisecs_show(struct kobject *kobj, |
3211 | struct kobj_attribute *attr, char *buf) |
3212 | { |
3213 | return sysfs_emit(buf, fmt: "%u\n" , ksm_thread_sleep_millisecs); |
3214 | } |
3215 | |
3216 | static ssize_t sleep_millisecs_store(struct kobject *kobj, |
3217 | struct kobj_attribute *attr, |
3218 | const char *buf, size_t count) |
3219 | { |
3220 | unsigned int msecs; |
3221 | int err; |
3222 | |
3223 | err = kstrtouint(s: buf, base: 10, res: &msecs); |
3224 | if (err) |
3225 | return -EINVAL; |
3226 | |
3227 | ksm_thread_sleep_millisecs = msecs; |
3228 | wake_up_interruptible(&ksm_iter_wait); |
3229 | |
3230 | return count; |
3231 | } |
3232 | KSM_ATTR(sleep_millisecs); |
3233 | |
3234 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
3235 | struct kobj_attribute *attr, char *buf) |
3236 | { |
3237 | return sysfs_emit(buf, fmt: "%u\n" , ksm_thread_pages_to_scan); |
3238 | } |
3239 | |
3240 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
3241 | struct kobj_attribute *attr, |
3242 | const char *buf, size_t count) |
3243 | { |
3244 | unsigned int nr_pages; |
3245 | int err; |
3246 | |
3247 | err = kstrtouint(s: buf, base: 10, res: &nr_pages); |
3248 | if (err) |
3249 | return -EINVAL; |
3250 | |
3251 | ksm_thread_pages_to_scan = nr_pages; |
3252 | |
3253 | return count; |
3254 | } |
3255 | KSM_ATTR(pages_to_scan); |
3256 | |
3257 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, |
3258 | char *buf) |
3259 | { |
3260 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_run); |
3261 | } |
3262 | |
3263 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, |
3264 | const char *buf, size_t count) |
3265 | { |
3266 | unsigned int flags; |
3267 | int err; |
3268 | |
3269 | err = kstrtouint(s: buf, base: 10, res: &flags); |
3270 | if (err) |
3271 | return -EINVAL; |
3272 | if (flags > KSM_RUN_UNMERGE) |
3273 | return -EINVAL; |
3274 | |
3275 | /* |
3276 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. |
3277 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, |
3278 | * breaking COW to free the pages_shared (but leaves mm_slots |
3279 | * on the list for when ksmd may be set running again). |
3280 | */ |
3281 | |
3282 | mutex_lock(&ksm_thread_mutex); |
3283 | wait_while_offlining(); |
3284 | if (ksm_run != flags) { |
3285 | ksm_run = flags; |
3286 | if (flags & KSM_RUN_UNMERGE) { |
3287 | set_current_oom_origin(); |
3288 | err = unmerge_and_remove_all_rmap_items(); |
3289 | clear_current_oom_origin(); |
3290 | if (err) { |
3291 | ksm_run = KSM_RUN_STOP; |
3292 | count = err; |
3293 | } |
3294 | } |
3295 | } |
3296 | mutex_unlock(lock: &ksm_thread_mutex); |
3297 | |
3298 | if (flags & KSM_RUN_MERGE) |
3299 | wake_up_interruptible(&ksm_thread_wait); |
3300 | |
3301 | return count; |
3302 | } |
3303 | KSM_ATTR(run); |
3304 | |
3305 | #ifdef CONFIG_NUMA |
3306 | static ssize_t merge_across_nodes_show(struct kobject *kobj, |
3307 | struct kobj_attribute *attr, char *buf) |
3308 | { |
3309 | return sysfs_emit(buf, fmt: "%u\n" , ksm_merge_across_nodes); |
3310 | } |
3311 | |
3312 | static ssize_t merge_across_nodes_store(struct kobject *kobj, |
3313 | struct kobj_attribute *attr, |
3314 | const char *buf, size_t count) |
3315 | { |
3316 | int err; |
3317 | unsigned long knob; |
3318 | |
3319 | err = kstrtoul(s: buf, base: 10, res: &knob); |
3320 | if (err) |
3321 | return err; |
3322 | if (knob > 1) |
3323 | return -EINVAL; |
3324 | |
3325 | mutex_lock(&ksm_thread_mutex); |
3326 | wait_while_offlining(); |
3327 | if (ksm_merge_across_nodes != knob) { |
3328 | if (ksm_pages_shared || remove_all_stable_nodes()) |
3329 | err = -EBUSY; |
3330 | else if (root_stable_tree == one_stable_tree) { |
3331 | struct rb_root *buf; |
3332 | /* |
3333 | * This is the first time that we switch away from the |
3334 | * default of merging across nodes: must now allocate |
3335 | * a buffer to hold as many roots as may be needed. |
3336 | * Allocate stable and unstable together: |
3337 | * MAXSMP NODES_SHIFT 10 will use 16kB. |
3338 | */ |
3339 | buf = kcalloc(n: nr_node_ids + nr_node_ids, size: sizeof(*buf), |
3340 | GFP_KERNEL); |
3341 | /* Let us assume that RB_ROOT is NULL is zero */ |
3342 | if (!buf) |
3343 | err = -ENOMEM; |
3344 | else { |
3345 | root_stable_tree = buf; |
3346 | root_unstable_tree = buf + nr_node_ids; |
3347 | /* Stable tree is empty but not the unstable */ |
3348 | root_unstable_tree[0] = one_unstable_tree[0]; |
3349 | } |
3350 | } |
3351 | if (!err) { |
3352 | ksm_merge_across_nodes = knob; |
3353 | ksm_nr_node_ids = knob ? 1 : nr_node_ids; |
3354 | } |
3355 | } |
3356 | mutex_unlock(lock: &ksm_thread_mutex); |
3357 | |
3358 | return err ? err : count; |
3359 | } |
3360 | KSM_ATTR(merge_across_nodes); |
3361 | #endif |
3362 | |
3363 | static ssize_t use_zero_pages_show(struct kobject *kobj, |
3364 | struct kobj_attribute *attr, char *buf) |
3365 | { |
3366 | return sysfs_emit(buf, fmt: "%u\n" , ksm_use_zero_pages); |
3367 | } |
3368 | static ssize_t use_zero_pages_store(struct kobject *kobj, |
3369 | struct kobj_attribute *attr, |
3370 | const char *buf, size_t count) |
3371 | { |
3372 | int err; |
3373 | bool value; |
3374 | |
3375 | err = kstrtobool(s: buf, res: &value); |
3376 | if (err) |
3377 | return -EINVAL; |
3378 | |
3379 | ksm_use_zero_pages = value; |
3380 | |
3381 | return count; |
3382 | } |
3383 | KSM_ATTR(use_zero_pages); |
3384 | |
3385 | static ssize_t max_page_sharing_show(struct kobject *kobj, |
3386 | struct kobj_attribute *attr, char *buf) |
3387 | { |
3388 | return sysfs_emit(buf, fmt: "%u\n" , ksm_max_page_sharing); |
3389 | } |
3390 | |
3391 | static ssize_t max_page_sharing_store(struct kobject *kobj, |
3392 | struct kobj_attribute *attr, |
3393 | const char *buf, size_t count) |
3394 | { |
3395 | int err; |
3396 | int knob; |
3397 | |
3398 | err = kstrtoint(s: buf, base: 10, res: &knob); |
3399 | if (err) |
3400 | return err; |
3401 | /* |
3402 | * When a KSM page is created it is shared by 2 mappings. This |
3403 | * being a signed comparison, it implicitly verifies it's not |
3404 | * negative. |
3405 | */ |
3406 | if (knob < 2) |
3407 | return -EINVAL; |
3408 | |
3409 | if (READ_ONCE(ksm_max_page_sharing) == knob) |
3410 | return count; |
3411 | |
3412 | mutex_lock(&ksm_thread_mutex); |
3413 | wait_while_offlining(); |
3414 | if (ksm_max_page_sharing != knob) { |
3415 | if (ksm_pages_shared || remove_all_stable_nodes()) |
3416 | err = -EBUSY; |
3417 | else |
3418 | ksm_max_page_sharing = knob; |
3419 | } |
3420 | mutex_unlock(lock: &ksm_thread_mutex); |
3421 | |
3422 | return err ? err : count; |
3423 | } |
3424 | KSM_ATTR(max_page_sharing); |
3425 | |
3426 | static ssize_t pages_scanned_show(struct kobject *kobj, |
3427 | struct kobj_attribute *attr, char *buf) |
3428 | { |
3429 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_pages_scanned); |
3430 | } |
3431 | KSM_ATTR_RO(pages_scanned); |
3432 | |
3433 | static ssize_t pages_shared_show(struct kobject *kobj, |
3434 | struct kobj_attribute *attr, char *buf) |
3435 | { |
3436 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_pages_shared); |
3437 | } |
3438 | KSM_ATTR_RO(pages_shared); |
3439 | |
3440 | static ssize_t pages_sharing_show(struct kobject *kobj, |
3441 | struct kobj_attribute *attr, char *buf) |
3442 | { |
3443 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_pages_sharing); |
3444 | } |
3445 | KSM_ATTR_RO(pages_sharing); |
3446 | |
3447 | static ssize_t pages_unshared_show(struct kobject *kobj, |
3448 | struct kobj_attribute *attr, char *buf) |
3449 | { |
3450 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_pages_unshared); |
3451 | } |
3452 | KSM_ATTR_RO(pages_unshared); |
3453 | |
3454 | static ssize_t pages_volatile_show(struct kobject *kobj, |
3455 | struct kobj_attribute *attr, char *buf) |
3456 | { |
3457 | long ksm_pages_volatile; |
3458 | |
3459 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared |
3460 | - ksm_pages_sharing - ksm_pages_unshared; |
3461 | /* |
3462 | * It was not worth any locking to calculate that statistic, |
3463 | * but it might therefore sometimes be negative: conceal that. |
3464 | */ |
3465 | if (ksm_pages_volatile < 0) |
3466 | ksm_pages_volatile = 0; |
3467 | return sysfs_emit(buf, fmt: "%ld\n" , ksm_pages_volatile); |
3468 | } |
3469 | KSM_ATTR_RO(pages_volatile); |
3470 | |
3471 | static ssize_t pages_skipped_show(struct kobject *kobj, |
3472 | struct kobj_attribute *attr, char *buf) |
3473 | { |
3474 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_pages_skipped); |
3475 | } |
3476 | KSM_ATTR_RO(pages_skipped); |
3477 | |
3478 | static ssize_t ksm_zero_pages_show(struct kobject *kobj, |
3479 | struct kobj_attribute *attr, char *buf) |
3480 | { |
3481 | return sysfs_emit(buf, fmt: "%ld\n" , ksm_zero_pages); |
3482 | } |
3483 | KSM_ATTR_RO(ksm_zero_pages); |
3484 | |
3485 | static ssize_t general_profit_show(struct kobject *kobj, |
3486 | struct kobj_attribute *attr, char *buf) |
3487 | { |
3488 | long general_profit; |
3489 | |
3490 | general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE - |
3491 | ksm_rmap_items * sizeof(struct ksm_rmap_item); |
3492 | |
3493 | return sysfs_emit(buf, fmt: "%ld\n" , general_profit); |
3494 | } |
3495 | KSM_ATTR_RO(general_profit); |
3496 | |
3497 | static ssize_t stable_node_dups_show(struct kobject *kobj, |
3498 | struct kobj_attribute *attr, char *buf) |
3499 | { |
3500 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_stable_node_dups); |
3501 | } |
3502 | KSM_ATTR_RO(stable_node_dups); |
3503 | |
3504 | static ssize_t stable_node_chains_show(struct kobject *kobj, |
3505 | struct kobj_attribute *attr, char *buf) |
3506 | { |
3507 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_stable_node_chains); |
3508 | } |
3509 | KSM_ATTR_RO(stable_node_chains); |
3510 | |
3511 | static ssize_t |
3512 | stable_node_chains_prune_millisecs_show(struct kobject *kobj, |
3513 | struct kobj_attribute *attr, |
3514 | char *buf) |
3515 | { |
3516 | return sysfs_emit(buf, fmt: "%u\n" , ksm_stable_node_chains_prune_millisecs); |
3517 | } |
3518 | |
3519 | static ssize_t |
3520 | stable_node_chains_prune_millisecs_store(struct kobject *kobj, |
3521 | struct kobj_attribute *attr, |
3522 | const char *buf, size_t count) |
3523 | { |
3524 | unsigned int msecs; |
3525 | int err; |
3526 | |
3527 | err = kstrtouint(s: buf, base: 10, res: &msecs); |
3528 | if (err) |
3529 | return -EINVAL; |
3530 | |
3531 | ksm_stable_node_chains_prune_millisecs = msecs; |
3532 | |
3533 | return count; |
3534 | } |
3535 | KSM_ATTR(stable_node_chains_prune_millisecs); |
3536 | |
3537 | static ssize_t full_scans_show(struct kobject *kobj, |
3538 | struct kobj_attribute *attr, char *buf) |
3539 | { |
3540 | return sysfs_emit(buf, fmt: "%lu\n" , ksm_scan.seqnr); |
3541 | } |
3542 | KSM_ATTR_RO(full_scans); |
3543 | |
3544 | static ssize_t smart_scan_show(struct kobject *kobj, |
3545 | struct kobj_attribute *attr, char *buf) |
3546 | { |
3547 | return sysfs_emit(buf, fmt: "%u\n" , ksm_smart_scan); |
3548 | } |
3549 | |
3550 | static ssize_t smart_scan_store(struct kobject *kobj, |
3551 | struct kobj_attribute *attr, |
3552 | const char *buf, size_t count) |
3553 | { |
3554 | int err; |
3555 | bool value; |
3556 | |
3557 | err = kstrtobool(s: buf, res: &value); |
3558 | if (err) |
3559 | return -EINVAL; |
3560 | |
3561 | ksm_smart_scan = value; |
3562 | return count; |
3563 | } |
3564 | KSM_ATTR(smart_scan); |
3565 | |
3566 | static struct attribute *ksm_attrs[] = { |
3567 | &sleep_millisecs_attr.attr, |
3568 | &pages_to_scan_attr.attr, |
3569 | &run_attr.attr, |
3570 | &pages_scanned_attr.attr, |
3571 | &pages_shared_attr.attr, |
3572 | &pages_sharing_attr.attr, |
3573 | &pages_unshared_attr.attr, |
3574 | &pages_volatile_attr.attr, |
3575 | &pages_skipped_attr.attr, |
3576 | &ksm_zero_pages_attr.attr, |
3577 | &full_scans_attr.attr, |
3578 | #ifdef CONFIG_NUMA |
3579 | &merge_across_nodes_attr.attr, |
3580 | #endif |
3581 | &max_page_sharing_attr.attr, |
3582 | &stable_node_chains_attr.attr, |
3583 | &stable_node_dups_attr.attr, |
3584 | &stable_node_chains_prune_millisecs_attr.attr, |
3585 | &use_zero_pages_attr.attr, |
3586 | &general_profit_attr.attr, |
3587 | &smart_scan_attr.attr, |
3588 | NULL, |
3589 | }; |
3590 | |
3591 | static const struct attribute_group ksm_attr_group = { |
3592 | .attrs = ksm_attrs, |
3593 | .name = "ksm" , |
3594 | }; |
3595 | #endif /* CONFIG_SYSFS */ |
3596 | |
3597 | static int __init ksm_init(void) |
3598 | { |
3599 | struct task_struct *ksm_thread; |
3600 | int err; |
3601 | |
3602 | /* The correct value depends on page size and endianness */ |
3603 | zero_checksum = calc_checksum(ZERO_PAGE(0)); |
3604 | /* Default to false for backwards compatibility */ |
3605 | ksm_use_zero_pages = false; |
3606 | |
3607 | err = ksm_slab_init(); |
3608 | if (err) |
3609 | goto out; |
3610 | |
3611 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd" ); |
3612 | if (IS_ERR(ptr: ksm_thread)) { |
3613 | pr_err("ksm: creating kthread failed\n" ); |
3614 | err = PTR_ERR(ptr: ksm_thread); |
3615 | goto out_free; |
3616 | } |
3617 | |
3618 | #ifdef CONFIG_SYSFS |
3619 | err = sysfs_create_group(kobj: mm_kobj, grp: &ksm_attr_group); |
3620 | if (err) { |
3621 | pr_err("ksm: register sysfs failed\n" ); |
3622 | kthread_stop(k: ksm_thread); |
3623 | goto out_free; |
3624 | } |
3625 | #else |
3626 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ |
3627 | |
3628 | #endif /* CONFIG_SYSFS */ |
3629 | |
3630 | #ifdef CONFIG_MEMORY_HOTREMOVE |
3631 | /* There is no significance to this priority 100 */ |
3632 | hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); |
3633 | #endif |
3634 | return 0; |
3635 | |
3636 | out_free: |
3637 | ksm_slab_free(); |
3638 | out: |
3639 | return err; |
3640 | } |
3641 | subsys_initcall(ksm_init); |
3642 | |