1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef LINUX_MM_INLINE_H |
3 | #define LINUX_MM_INLINE_H |
4 | |
5 | #include <linux/atomic.h> |
6 | #include <linux/huge_mm.h> |
7 | #include <linux/swap.h> |
8 | #include <linux/string.h> |
9 | #include <linux/userfaultfd_k.h> |
10 | #include <linux/swapops.h> |
11 | |
12 | /** |
13 | * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? |
14 | * @folio: The folio to test. |
15 | * |
16 | * We would like to get this info without a page flag, but the state |
17 | * needs to survive until the folio is last deleted from the LRU, which |
18 | * could be as far down as __page_cache_release. |
19 | * |
20 | * Return: An integer (not a boolean!) used to sort a folio onto the |
21 | * right LRU list and to account folios correctly. |
22 | * 1 if @folio is a regular filesystem backed page cache folio |
23 | * or a lazily freed anonymous folio (e.g. via MADV_FREE). |
24 | * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise |
25 | * ram or swap backed folio. |
26 | */ |
27 | static inline int folio_is_file_lru(struct folio *folio) |
28 | { |
29 | return !folio_test_swapbacked(folio); |
30 | } |
31 | |
32 | static inline int page_is_file_lru(struct page *page) |
33 | { |
34 | return folio_is_file_lru(page_folio(page)); |
35 | } |
36 | |
37 | static __always_inline void update_lru_size(struct lruvec *lruvec, |
38 | enum lru_list lru, enum zone_type zid, |
39 | long nr_pages) |
40 | { |
41 | struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
42 | |
43 | __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); |
44 | __mod_zone_page_state(&pgdat->node_zones[zid], |
45 | NR_ZONE_LRU_BASE + lru, nr_pages); |
46 | #ifdef CONFIG_MEMCG |
47 | mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); |
48 | #endif |
49 | } |
50 | |
51 | /** |
52 | * __folio_clear_lru_flags - Clear page lru flags before releasing a page. |
53 | * @folio: The folio that was on lru and now has a zero reference. |
54 | */ |
55 | static __always_inline void __folio_clear_lru_flags(struct folio *folio) |
56 | { |
57 | VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); |
58 | |
59 | __folio_clear_lru(folio); |
60 | |
61 | /* this shouldn't happen, so leave the flags to bad_page() */ |
62 | if (folio_test_active(folio) && folio_test_unevictable(folio)) |
63 | return; |
64 | |
65 | __folio_clear_active(folio); |
66 | __folio_clear_unevictable(folio); |
67 | } |
68 | |
69 | static __always_inline void __clear_page_lru_flags(struct page *page) |
70 | { |
71 | __folio_clear_lru_flags(page_folio(page)); |
72 | } |
73 | |
74 | /** |
75 | * folio_lru_list - Which LRU list should a folio be on? |
76 | * @folio: The folio to test. |
77 | * |
78 | * Return: The LRU list a folio should be on, as an index |
79 | * into the array of LRU lists. |
80 | */ |
81 | static __always_inline enum lru_list folio_lru_list(struct folio *folio) |
82 | { |
83 | enum lru_list lru; |
84 | |
85 | VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); |
86 | |
87 | if (folio_test_unevictable(folio)) |
88 | return LRU_UNEVICTABLE; |
89 | |
90 | lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; |
91 | if (folio_test_active(folio)) |
92 | lru += LRU_ACTIVE; |
93 | |
94 | return lru; |
95 | } |
96 | |
97 | static __always_inline |
98 | void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) |
99 | { |
100 | enum lru_list lru = folio_lru_list(folio); |
101 | |
102 | update_lru_size(lruvec, lru, folio_zonenum(folio), |
103 | folio_nr_pages(folio)); |
104 | if (lru != LRU_UNEVICTABLE) |
105 | list_add(&folio->lru, &lruvec->lists[lru]); |
106 | } |
107 | |
108 | static __always_inline void add_page_to_lru_list(struct page *page, |
109 | struct lruvec *lruvec) |
110 | { |
111 | lruvec_add_folio(lruvec, page_folio(page)); |
112 | } |
113 | |
114 | static __always_inline |
115 | void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) |
116 | { |
117 | enum lru_list lru = folio_lru_list(folio); |
118 | |
119 | update_lru_size(lruvec, lru, folio_zonenum(folio), |
120 | folio_nr_pages(folio)); |
121 | /* This is not expected to be used on LRU_UNEVICTABLE */ |
122 | list_add_tail(&folio->lru, &lruvec->lists[lru]); |
123 | } |
124 | |
125 | static __always_inline void add_page_to_lru_list_tail(struct page *page, |
126 | struct lruvec *lruvec) |
127 | { |
128 | lruvec_add_folio_tail(lruvec, page_folio(page)); |
129 | } |
130 | |
131 | static __always_inline |
132 | void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) |
133 | { |
134 | enum lru_list lru = folio_lru_list(folio); |
135 | |
136 | if (lru != LRU_UNEVICTABLE) |
137 | list_del(&folio->lru); |
138 | update_lru_size(lruvec, lru, folio_zonenum(folio), |
139 | -folio_nr_pages(folio)); |
140 | } |
141 | |
142 | static __always_inline void del_page_from_lru_list(struct page *page, |
143 | struct lruvec *lruvec) |
144 | { |
145 | lruvec_del_folio(lruvec, page_folio(page)); |
146 | } |
147 | |
148 | #ifdef CONFIG_ANON_VMA_NAME |
149 | /* |
150 | * mmap_lock should be read-locked when calling anon_vma_name(). Caller should |
151 | * either keep holding the lock while using the returned pointer or it should |
152 | * raise anon_vma_name refcount before releasing the lock. |
153 | */ |
154 | extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); |
155 | extern struct anon_vma_name *anon_vma_name_alloc(const char *name); |
156 | extern void anon_vma_name_free(struct kref *kref); |
157 | |
158 | /* mmap_lock should be read-locked */ |
159 | static inline void anon_vma_name_get(struct anon_vma_name *anon_name) |
160 | { |
161 | if (anon_name) |
162 | kref_get(&anon_name->kref); |
163 | } |
164 | |
165 | static inline void anon_vma_name_put(struct anon_vma_name *anon_name) |
166 | { |
167 | if (anon_name) |
168 | kref_put(&anon_name->kref, anon_vma_name_free); |
169 | } |
170 | |
171 | static inline |
172 | struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) |
173 | { |
174 | /* Prevent anon_name refcount saturation early on */ |
175 | if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { |
176 | anon_vma_name_get(anon_name); |
177 | return anon_name; |
178 | |
179 | } |
180 | return anon_vma_name_alloc(anon_name->name); |
181 | } |
182 | |
183 | static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, |
184 | struct vm_area_struct *new_vma) |
185 | { |
186 | struct anon_vma_name *anon_name = anon_vma_name(orig_vma); |
187 | |
188 | if (anon_name) |
189 | new_vma->anon_name = anon_vma_name_reuse(anon_name); |
190 | } |
191 | |
192 | static inline void free_anon_vma_name(struct vm_area_struct *vma) |
193 | { |
194 | /* |
195 | * Not using anon_vma_name because it generates a warning if mmap_lock |
196 | * is not held, which might be the case here. |
197 | */ |
198 | if (!vma->vm_file) |
199 | anon_vma_name_put(vma->anon_name); |
200 | } |
201 | |
202 | static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, |
203 | struct anon_vma_name *anon_name2) |
204 | { |
205 | if (anon_name1 == anon_name2) |
206 | return true; |
207 | |
208 | return anon_name1 && anon_name2 && |
209 | !strcmp(anon_name1->name, anon_name2->name); |
210 | } |
211 | |
212 | #else /* CONFIG_ANON_VMA_NAME */ |
213 | static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) |
214 | { |
215 | return NULL; |
216 | } |
217 | |
218 | static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) |
219 | { |
220 | return NULL; |
221 | } |
222 | |
223 | static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} |
224 | static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} |
225 | static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, |
226 | struct vm_area_struct *new_vma) {} |
227 | static inline void free_anon_vma_name(struct vm_area_struct *vma) {} |
228 | |
229 | static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, |
230 | struct anon_vma_name *anon_name2) |
231 | { |
232 | return true; |
233 | } |
234 | |
235 | #endif /* CONFIG_ANON_VMA_NAME */ |
236 | |
237 | static inline void init_tlb_flush_pending(struct mm_struct *mm) |
238 | { |
239 | atomic_set(&mm->tlb_flush_pending, 0); |
240 | } |
241 | |
242 | static inline void inc_tlb_flush_pending(struct mm_struct *mm) |
243 | { |
244 | atomic_inc(&mm->tlb_flush_pending); |
245 | /* |
246 | * The only time this value is relevant is when there are indeed pages |
247 | * to flush. And we'll only flush pages after changing them, which |
248 | * requires the PTL. |
249 | * |
250 | * So the ordering here is: |
251 | * |
252 | * atomic_inc(&mm->tlb_flush_pending); |
253 | * spin_lock(&ptl); |
254 | * ... |
255 | * set_pte_at(); |
256 | * spin_unlock(&ptl); |
257 | * |
258 | * spin_lock(&ptl) |
259 | * mm_tlb_flush_pending(); |
260 | * .... |
261 | * spin_unlock(&ptl); |
262 | * |
263 | * flush_tlb_range(); |
264 | * atomic_dec(&mm->tlb_flush_pending); |
265 | * |
266 | * Where the increment if constrained by the PTL unlock, it thus |
267 | * ensures that the increment is visible if the PTE modification is |
268 | * visible. After all, if there is no PTE modification, nobody cares |
269 | * about TLB flushes either. |
270 | * |
271 | * This very much relies on users (mm_tlb_flush_pending() and |
272 | * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and |
273 | * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc |
274 | * locks (PPC) the unlock of one doesn't order against the lock of |
275 | * another PTL. |
276 | * |
277 | * The decrement is ordered by the flush_tlb_range(), such that |
278 | * mm_tlb_flush_pending() will not return false unless all flushes have |
279 | * completed. |
280 | */ |
281 | } |
282 | |
283 | static inline void dec_tlb_flush_pending(struct mm_struct *mm) |
284 | { |
285 | /* |
286 | * See inc_tlb_flush_pending(). |
287 | * |
288 | * This cannot be smp_mb__before_atomic() because smp_mb() simply does |
289 | * not order against TLB invalidate completion, which is what we need. |
290 | * |
291 | * Therefore we must rely on tlb_flush_*() to guarantee order. |
292 | */ |
293 | atomic_dec(&mm->tlb_flush_pending); |
294 | } |
295 | |
296 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) |
297 | { |
298 | /* |
299 | * Must be called after having acquired the PTL; orders against that |
300 | * PTLs release and therefore ensures that if we observe the modified |
301 | * PTE we must also observe the increment from inc_tlb_flush_pending(). |
302 | * |
303 | * That is, it only guarantees to return true if there is a flush |
304 | * pending for _this_ PTL. |
305 | */ |
306 | return atomic_read(&mm->tlb_flush_pending); |
307 | } |
308 | |
309 | static inline bool mm_tlb_flush_nested(struct mm_struct *mm) |
310 | { |
311 | /* |
312 | * Similar to mm_tlb_flush_pending(), we must have acquired the PTL |
313 | * for which there is a TLB flush pending in order to guarantee |
314 | * we've seen both that PTE modification and the increment. |
315 | * |
316 | * (no requirement on actually still holding the PTL, that is irrelevant) |
317 | */ |
318 | return atomic_read(&mm->tlb_flush_pending) > 1; |
319 | } |
320 | |
321 | /* |
322 | * If this pte is wr-protected by uffd-wp in any form, arm the special pte to |
323 | * replace a none pte. NOTE! This should only be called when *pte is already |
324 | * cleared so we will never accidentally replace something valuable. Meanwhile |
325 | * none pte also means we are not demoting the pte so tlb flushed is not needed. |
326 | * E.g., when pte cleared the caller should have taken care of the tlb flush. |
327 | * |
328 | * Must be called with pgtable lock held so that no thread will see the none |
329 | * pte, and if they see it, they'll fault and serialize at the pgtable lock. |
330 | * |
331 | * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. |
332 | */ |
333 | static inline void |
334 | pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, |
335 | pte_t *pte, pte_t pteval) |
336 | { |
337 | #ifdef CONFIG_PTE_MARKER_UFFD_WP |
338 | bool arm_uffd_pte = false; |
339 | |
340 | /* The current status of the pte should be "cleared" before calling */ |
341 | WARN_ON_ONCE(!pte_none(*pte)); |
342 | |
343 | if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) |
344 | return; |
345 | |
346 | /* A uffd-wp wr-protected normal pte */ |
347 | if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) |
348 | arm_uffd_pte = true; |
349 | |
350 | /* |
351 | * A uffd-wp wr-protected swap pte. Note: this should even cover an |
352 | * existing pte marker with uffd-wp bit set. |
353 | */ |
354 | if (unlikely(pte_swp_uffd_wp_any(pteval))) |
355 | arm_uffd_pte = true; |
356 | |
357 | if (unlikely(arm_uffd_pte)) |
358 | set_pte_at(vma->vm_mm, addr, pte, |
359 | make_pte_marker(PTE_MARKER_UFFD_WP)); |
360 | #endif |
361 | } |
362 | |
363 | #endif |
364 | |