1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * linux/mm/filemap.c |
4 | * |
5 | * Copyright (C) 1994-1999 Linus Torvalds |
6 | */ |
7 | |
8 | /* |
9 | * This file handles the generic file mmap semantics used by |
10 | * most "normal" filesystems (but you don't /have/ to use this: |
11 | * the NFS filesystem used to do this differently, for example) |
12 | */ |
13 | #include <linux/export.h> |
14 | #include <linux/compiler.h> |
15 | #include <linux/dax.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/sched/signal.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/capability.h> |
20 | #include <linux/kernel_stat.h> |
21 | #include <linux/gfp.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/swap.h> |
24 | #include <linux/swapops.h> |
25 | #include <linux/syscalls.h> |
26 | #include <linux/mman.h> |
27 | #include <linux/pagemap.h> |
28 | #include <linux/file.h> |
29 | #include <linux/uio.h> |
30 | #include <linux/error-injection.h> |
31 | #include <linux/hash.h> |
32 | #include <linux/writeback.h> |
33 | #include <linux/backing-dev.h> |
34 | #include <linux/pagevec.h> |
35 | #include <linux/security.h> |
36 | #include <linux/cpuset.h> |
37 | #include <linux/hugetlb.h> |
38 | #include <linux/memcontrol.h> |
39 | #include <linux/shmem_fs.h> |
40 | #include <linux/rmap.h> |
41 | #include <linux/delayacct.h> |
42 | #include <linux/psi.h> |
43 | #include <linux/ramfs.h> |
44 | #include <linux/page_idle.h> |
45 | #include <linux/migrate.h> |
46 | #include <linux/pipe_fs_i.h> |
47 | #include <linux/splice.h> |
48 | #include <linux/rcupdate_wait.h> |
49 | #include <asm/pgalloc.h> |
50 | #include <asm/tlbflush.h> |
51 | #include "internal.h" |
52 | |
53 | #define CREATE_TRACE_POINTS |
54 | #include <trace/events/filemap.h> |
55 | |
56 | /* |
57 | * FIXME: remove all knowledge of the buffer layer from the core VM |
58 | */ |
59 | #include <linux/buffer_head.h> /* for try_to_free_buffers */ |
60 | |
61 | #include <asm/mman.h> |
62 | |
63 | #include "swap.h" |
64 | |
65 | /* |
66 | * Shared mappings implemented 30.11.1994. It's not fully working yet, |
67 | * though. |
68 | * |
69 | * Shared mappings now work. 15.8.1995 Bruno. |
70 | * |
71 | * finished 'unifying' the page and buffer cache and SMP-threaded the |
72 | * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> |
73 | * |
74 | * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> |
75 | */ |
76 | |
77 | /* |
78 | * Lock ordering: |
79 | * |
80 | * ->i_mmap_rwsem (truncate_pagecache) |
81 | * ->private_lock (__free_pte->block_dirty_folio) |
82 | * ->swap_lock (exclusive_swap_page, others) |
83 | * ->i_pages lock |
84 | * |
85 | * ->i_rwsem |
86 | * ->invalidate_lock (acquired by fs in truncate path) |
87 | * ->i_mmap_rwsem (truncate->unmap_mapping_range) |
88 | * |
89 | * ->mmap_lock |
90 | * ->i_mmap_rwsem |
91 | * ->page_table_lock or pte_lock (various, mainly in memory.c) |
92 | * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) |
93 | * |
94 | * ->mmap_lock |
95 | * ->invalidate_lock (filemap_fault) |
96 | * ->lock_page (filemap_fault, access_process_vm) |
97 | * |
98 | * ->i_rwsem (generic_perform_write) |
99 | * ->mmap_lock (fault_in_readable->do_page_fault) |
100 | * |
101 | * bdi->wb.list_lock |
102 | * sb_lock (fs/fs-writeback.c) |
103 | * ->i_pages lock (__sync_single_inode) |
104 | * |
105 | * ->i_mmap_rwsem |
106 | * ->anon_vma.lock (vma_merge) |
107 | * |
108 | * ->anon_vma.lock |
109 | * ->page_table_lock or pte_lock (anon_vma_prepare and various) |
110 | * |
111 | * ->page_table_lock or pte_lock |
112 | * ->swap_lock (try_to_unmap_one) |
113 | * ->private_lock (try_to_unmap_one) |
114 | * ->i_pages lock (try_to_unmap_one) |
115 | * ->lruvec->lru_lock (follow_page->mark_page_accessed) |
116 | * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) |
117 | * ->private_lock (folio_remove_rmap_pte->set_page_dirty) |
118 | * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty) |
119 | * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty) |
120 | * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty) |
121 | * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock) |
122 | * bdi.wb->list_lock (zap_pte_range->set_page_dirty) |
123 | * ->inode->i_lock (zap_pte_range->set_page_dirty) |
124 | * ->private_lock (zap_pte_range->block_dirty_folio) |
125 | */ |
126 | |
127 | static void mapping_set_update(struct xa_state *xas, |
128 | struct address_space *mapping) |
129 | { |
130 | if (dax_mapping(mapping) || shmem_mapping(mapping)) |
131 | return; |
132 | xas_set_update(xas, update: workingset_update_node); |
133 | xas_set_lru(xas, lru: &shadow_nodes); |
134 | } |
135 | |
136 | static void page_cache_delete(struct address_space *mapping, |
137 | struct folio *folio, void *shadow) |
138 | { |
139 | XA_STATE(xas, &mapping->i_pages, folio->index); |
140 | long nr = 1; |
141 | |
142 | mapping_set_update(xas: &xas, mapping); |
143 | |
144 | xas_set_order(xas: &xas, index: folio->index, order: folio_order(folio)); |
145 | nr = folio_nr_pages(folio); |
146 | |
147 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
148 | |
149 | xas_store(&xas, entry: shadow); |
150 | xas_init_marks(&xas); |
151 | |
152 | folio->mapping = NULL; |
153 | /* Leave page->index set: truncation lookup relies upon it */ |
154 | mapping->nrpages -= nr; |
155 | } |
156 | |
157 | static void filemap_unaccount_folio(struct address_space *mapping, |
158 | struct folio *folio) |
159 | { |
160 | long nr; |
161 | |
162 | VM_BUG_ON_FOLIO(folio_mapped(folio), folio); |
163 | if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { |
164 | pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", |
165 | current->comm, folio_pfn(folio)); |
166 | dump_page(page: &folio->page, reason: "still mapped when deleted"); |
167 | dump_stack(); |
168 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
169 | |
170 | if (mapping_exiting(mapping) && !folio_test_large(folio)) { |
171 | int mapcount = page_mapcount(page: &folio->page); |
172 | |
173 | if (folio_ref_count(folio) >= mapcount + 2) { |
174 | /* |
175 | * All vmas have already been torn down, so it's |
176 | * a good bet that actually the page is unmapped |
177 | * and we'd rather not leak it: if we're wrong, |
178 | * another bad page check should catch it later. |
179 | */ |
180 | page_mapcount_reset(page: &folio->page); |
181 | folio_ref_sub(folio, nr: mapcount); |
182 | } |
183 | } |
184 | } |
185 | |
186 | /* hugetlb folios do not participate in page cache accounting. */ |
187 | if (folio_test_hugetlb(folio)) |
188 | return; |
189 | |
190 | nr = folio_nr_pages(folio); |
191 | |
192 | __lruvec_stat_mod_folio(folio, idx: NR_FILE_PAGES, val: -nr); |
193 | if (folio_test_swapbacked(folio)) { |
194 | __lruvec_stat_mod_folio(folio, idx: NR_SHMEM, val: -nr); |
195 | if (folio_test_pmd_mappable(folio)) |
196 | __lruvec_stat_mod_folio(folio, idx: NR_SHMEM_THPS, val: -nr); |
197 | } else if (folio_test_pmd_mappable(folio)) { |
198 | __lruvec_stat_mod_folio(folio, idx: NR_FILE_THPS, val: -nr); |
199 | filemap_nr_thps_dec(mapping); |
200 | } |
201 | |
202 | /* |
203 | * At this point folio must be either written or cleaned by |
204 | * truncate. Dirty folio here signals a bug and loss of |
205 | * unwritten data - on ordinary filesystems. |
206 | * |
207 | * But it's harmless on in-memory filesystems like tmpfs; and can |
208 | * occur when a driver which did get_user_pages() sets page dirty |
209 | * before putting it, while the inode is being finally evicted. |
210 | * |
211 | * Below fixes dirty accounting after removing the folio entirely |
212 | * but leaves the dirty flag set: it has no effect for truncated |
213 | * folio and anyway will be cleared before returning folio to |
214 | * buddy allocator. |
215 | */ |
216 | if (WARN_ON_ONCE(folio_test_dirty(folio) && |
217 | mapping_can_writeback(mapping))) |
218 | folio_account_cleaned(folio, wb: inode_to_wb(inode: mapping->host)); |
219 | } |
220 | |
221 | /* |
222 | * Delete a page from the page cache and free it. Caller has to make |
223 | * sure the page is locked and that nobody else uses it - or that usage |
224 | * is safe. The caller must hold the i_pages lock. |
225 | */ |
226 | void __filemap_remove_folio(struct folio *folio, void *shadow) |
227 | { |
228 | struct address_space *mapping = folio->mapping; |
229 | |
230 | trace_mm_filemap_delete_from_page_cache(folio); |
231 | filemap_unaccount_folio(mapping, folio); |
232 | page_cache_delete(mapping, folio, shadow); |
233 | } |
234 | |
235 | void filemap_free_folio(struct address_space *mapping, struct folio *folio) |
236 | { |
237 | void (*free_folio)(struct folio *); |
238 | int refs = 1; |
239 | |
240 | free_folio = mapping->a_ops->free_folio; |
241 | if (free_folio) |
242 | free_folio(folio); |
243 | |
244 | if (folio_test_large(folio)) |
245 | refs = folio_nr_pages(folio); |
246 | folio_put_refs(folio, refs); |
247 | } |
248 | |
249 | /** |
250 | * filemap_remove_folio - Remove folio from page cache. |
251 | * @folio: The folio. |
252 | * |
253 | * This must be called only on folios that are locked and have been |
254 | * verified to be in the page cache. It will never put the folio into |
255 | * the free list because the caller has a reference on the page. |
256 | */ |
257 | void filemap_remove_folio(struct folio *folio) |
258 | { |
259 | struct address_space *mapping = folio->mapping; |
260 | |
261 | BUG_ON(!folio_test_locked(folio)); |
262 | spin_lock(lock: &mapping->host->i_lock); |
263 | xa_lock_irq(&mapping->i_pages); |
264 | __filemap_remove_folio(folio, NULL); |
265 | xa_unlock_irq(&mapping->i_pages); |
266 | if (mapping_shrinkable(mapping)) |
267 | inode_add_lru(inode: mapping->host); |
268 | spin_unlock(lock: &mapping->host->i_lock); |
269 | |
270 | filemap_free_folio(mapping, folio); |
271 | } |
272 | |
273 | /* |
274 | * page_cache_delete_batch - delete several folios from page cache |
275 | * @mapping: the mapping to which folios belong |
276 | * @fbatch: batch of folios to delete |
277 | * |
278 | * The function walks over mapping->i_pages and removes folios passed in |
279 | * @fbatch from the mapping. The function expects @fbatch to be sorted |
280 | * by page index and is optimised for it to be dense. |
281 | * It tolerates holes in @fbatch (mapping entries at those indices are not |
282 | * modified). |
283 | * |
284 | * The function expects the i_pages lock to be held. |
285 | */ |
286 | static void page_cache_delete_batch(struct address_space *mapping, |
287 | struct folio_batch *fbatch) |
288 | { |
289 | XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); |
290 | long total_pages = 0; |
291 | int i = 0; |
292 | struct folio *folio; |
293 | |
294 | mapping_set_update(xas: &xas, mapping); |
295 | xas_for_each(&xas, folio, ULONG_MAX) { |
296 | if (i >= folio_batch_count(fbatch)) |
297 | break; |
298 | |
299 | /* A swap/dax/shadow entry got inserted? Skip it. */ |
300 | if (xa_is_value(entry: folio)) |
301 | continue; |
302 | /* |
303 | * A page got inserted in our range? Skip it. We have our |
304 | * pages locked so they are protected from being removed. |
305 | * If we see a page whose index is higher than ours, it |
306 | * means our page has been removed, which shouldn't be |
307 | * possible because we're holding the PageLock. |
308 | */ |
309 | if (folio != fbatch->folios[i]) { |
310 | VM_BUG_ON_FOLIO(folio->index > |
311 | fbatch->folios[i]->index, folio); |
312 | continue; |
313 | } |
314 | |
315 | WARN_ON_ONCE(!folio_test_locked(folio)); |
316 | |
317 | folio->mapping = NULL; |
318 | /* Leave folio->index set: truncation lookup relies on it */ |
319 | |
320 | i++; |
321 | xas_store(&xas, NULL); |
322 | total_pages += folio_nr_pages(folio); |
323 | } |
324 | mapping->nrpages -= total_pages; |
325 | } |
326 | |
327 | void delete_from_page_cache_batch(struct address_space *mapping, |
328 | struct folio_batch *fbatch) |
329 | { |
330 | int i; |
331 | |
332 | if (!folio_batch_count(fbatch)) |
333 | return; |
334 | |
335 | spin_lock(lock: &mapping->host->i_lock); |
336 | xa_lock_irq(&mapping->i_pages); |
337 | for (i = 0; i < folio_batch_count(fbatch); i++) { |
338 | struct folio *folio = fbatch->folios[i]; |
339 | |
340 | trace_mm_filemap_delete_from_page_cache(folio); |
341 | filemap_unaccount_folio(mapping, folio); |
342 | } |
343 | page_cache_delete_batch(mapping, fbatch); |
344 | xa_unlock_irq(&mapping->i_pages); |
345 | if (mapping_shrinkable(mapping)) |
346 | inode_add_lru(inode: mapping->host); |
347 | spin_unlock(lock: &mapping->host->i_lock); |
348 | |
349 | for (i = 0; i < folio_batch_count(fbatch); i++) |
350 | filemap_free_folio(mapping, folio: fbatch->folios[i]); |
351 | } |
352 | |
353 | int filemap_check_errors(struct address_space *mapping) |
354 | { |
355 | int ret = 0; |
356 | /* Check for outstanding write errors */ |
357 | if (test_bit(AS_ENOSPC, &mapping->flags) && |
358 | test_and_clear_bit(nr: AS_ENOSPC, addr: &mapping->flags)) |
359 | ret = -ENOSPC; |
360 | if (test_bit(AS_EIO, &mapping->flags) && |
361 | test_and_clear_bit(nr: AS_EIO, addr: &mapping->flags)) |
362 | ret = -EIO; |
363 | return ret; |
364 | } |
365 | EXPORT_SYMBOL(filemap_check_errors); |
366 | |
367 | static int filemap_check_and_keep_errors(struct address_space *mapping) |
368 | { |
369 | /* Check for outstanding write errors */ |
370 | if (test_bit(AS_EIO, &mapping->flags)) |
371 | return -EIO; |
372 | if (test_bit(AS_ENOSPC, &mapping->flags)) |
373 | return -ENOSPC; |
374 | return 0; |
375 | } |
376 | |
377 | /** |
378 | * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range |
379 | * @mapping: address space structure to write |
380 | * @wbc: the writeback_control controlling the writeout |
381 | * |
382 | * Call writepages on the mapping using the provided wbc to control the |
383 | * writeout. |
384 | * |
385 | * Return: %0 on success, negative error code otherwise. |
386 | */ |
387 | int filemap_fdatawrite_wbc(struct address_space *mapping, |
388 | struct writeback_control *wbc) |
389 | { |
390 | int ret; |
391 | |
392 | if (!mapping_can_writeback(mapping) || |
393 | !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
394 | return 0; |
395 | |
396 | wbc_attach_fdatawrite_inode(wbc, inode: mapping->host); |
397 | ret = do_writepages(mapping, wbc); |
398 | wbc_detach_inode(wbc); |
399 | return ret; |
400 | } |
401 | EXPORT_SYMBOL(filemap_fdatawrite_wbc); |
402 | |
403 | /** |
404 | * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range |
405 | * @mapping: address space structure to write |
406 | * @start: offset in bytes where the range starts |
407 | * @end: offset in bytes where the range ends (inclusive) |
408 | * @sync_mode: enable synchronous operation |
409 | * |
410 | * Start writeback against all of a mapping's dirty pages that lie |
411 | * within the byte offsets <start, end> inclusive. |
412 | * |
413 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as |
414 | * opposed to a regular memory cleansing writeback. The difference between |
415 | * these two operations is that if a dirty page/buffer is encountered, it must |
416 | * be waited upon, and not just skipped over. |
417 | * |
418 | * Return: %0 on success, negative error code otherwise. |
419 | */ |
420 | int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, |
421 | loff_t end, int sync_mode) |
422 | { |
423 | struct writeback_control wbc = { |
424 | .sync_mode = sync_mode, |
425 | .nr_to_write = LONG_MAX, |
426 | .range_start = start, |
427 | .range_end = end, |
428 | }; |
429 | |
430 | return filemap_fdatawrite_wbc(mapping, &wbc); |
431 | } |
432 | |
433 | static inline int __filemap_fdatawrite(struct address_space *mapping, |
434 | int sync_mode) |
435 | { |
436 | return __filemap_fdatawrite_range(mapping, start: 0, LLONG_MAX, sync_mode); |
437 | } |
438 | |
439 | int filemap_fdatawrite(struct address_space *mapping) |
440 | { |
441 | return __filemap_fdatawrite(mapping, sync_mode: WB_SYNC_ALL); |
442 | } |
443 | EXPORT_SYMBOL(filemap_fdatawrite); |
444 | |
445 | int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, |
446 | loff_t end) |
447 | { |
448 | return __filemap_fdatawrite_range(mapping, start, end, sync_mode: WB_SYNC_ALL); |
449 | } |
450 | EXPORT_SYMBOL(filemap_fdatawrite_range); |
451 | |
452 | /** |
453 | * filemap_flush - mostly a non-blocking flush |
454 | * @mapping: target address_space |
455 | * |
456 | * This is a mostly non-blocking flush. Not suitable for data-integrity |
457 | * purposes - I/O may not be started against all dirty pages. |
458 | * |
459 | * Return: %0 on success, negative error code otherwise. |
460 | */ |
461 | int filemap_flush(struct address_space *mapping) |
462 | { |
463 | return __filemap_fdatawrite(mapping, sync_mode: WB_SYNC_NONE); |
464 | } |
465 | EXPORT_SYMBOL(filemap_flush); |
466 | |
467 | /** |
468 | * filemap_range_has_page - check if a page exists in range. |
469 | * @mapping: address space within which to check |
470 | * @start_byte: offset in bytes where the range starts |
471 | * @end_byte: offset in bytes where the range ends (inclusive) |
472 | * |
473 | * Find at least one page in the range supplied, usually used to check if |
474 | * direct writing in this range will trigger a writeback. |
475 | * |
476 | * Return: %true if at least one page exists in the specified range, |
477 | * %false otherwise. |
478 | */ |
479 | bool filemap_range_has_page(struct address_space *mapping, |
480 | loff_t start_byte, loff_t end_byte) |
481 | { |
482 | struct folio *folio; |
483 | XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); |
484 | pgoff_t max = end_byte >> PAGE_SHIFT; |
485 | |
486 | if (end_byte < start_byte) |
487 | return false; |
488 | |
489 | rcu_read_lock(); |
490 | for (;;) { |
491 | folio = xas_find(&xas, max); |
492 | if (xas_retry(xas: &xas, entry: folio)) |
493 | continue; |
494 | /* Shadow entries don't count */ |
495 | if (xa_is_value(entry: folio)) |
496 | continue; |
497 | /* |
498 | * We don't need to try to pin this page; we're about to |
499 | * release the RCU lock anyway. It is enough to know that |
500 | * there was a page here recently. |
501 | */ |
502 | break; |
503 | } |
504 | rcu_read_unlock(); |
505 | |
506 | return folio != NULL; |
507 | } |
508 | EXPORT_SYMBOL(filemap_range_has_page); |
509 | |
510 | static void __filemap_fdatawait_range(struct address_space *mapping, |
511 | loff_t start_byte, loff_t end_byte) |
512 | { |
513 | pgoff_t index = start_byte >> PAGE_SHIFT; |
514 | pgoff_t end = end_byte >> PAGE_SHIFT; |
515 | struct folio_batch fbatch; |
516 | unsigned nr_folios; |
517 | |
518 | folio_batch_init(fbatch: &fbatch); |
519 | |
520 | while (index <= end) { |
521 | unsigned i; |
522 | |
523 | nr_folios = filemap_get_folios_tag(mapping, start: &index, end, |
524 | PAGECACHE_TAG_WRITEBACK, fbatch: &fbatch); |
525 | |
526 | if (!nr_folios) |
527 | break; |
528 | |
529 | for (i = 0; i < nr_folios; i++) { |
530 | struct folio *folio = fbatch.folios[i]; |
531 | |
532 | folio_wait_writeback(folio); |
533 | folio_clear_error(folio); |
534 | } |
535 | folio_batch_release(fbatch: &fbatch); |
536 | cond_resched(); |
537 | } |
538 | } |
539 | |
540 | /** |
541 | * filemap_fdatawait_range - wait for writeback to complete |
542 | * @mapping: address space structure to wait for |
543 | * @start_byte: offset in bytes where the range starts |
544 | * @end_byte: offset in bytes where the range ends (inclusive) |
545 | * |
546 | * Walk the list of under-writeback pages of the given address space |
547 | * in the given range and wait for all of them. Check error status of |
548 | * the address space and return it. |
549 | * |
550 | * Since the error status of the address space is cleared by this function, |
551 | * callers are responsible for checking the return value and handling and/or |
552 | * reporting the error. |
553 | * |
554 | * Return: error status of the address space. |
555 | */ |
556 | int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, |
557 | loff_t end_byte) |
558 | { |
559 | __filemap_fdatawait_range(mapping, start_byte, end_byte); |
560 | return filemap_check_errors(mapping); |
561 | } |
562 | EXPORT_SYMBOL(filemap_fdatawait_range); |
563 | |
564 | /** |
565 | * filemap_fdatawait_range_keep_errors - wait for writeback to complete |
566 | * @mapping: address space structure to wait for |
567 | * @start_byte: offset in bytes where the range starts |
568 | * @end_byte: offset in bytes where the range ends (inclusive) |
569 | * |
570 | * Walk the list of under-writeback pages of the given address space in the |
571 | * given range and wait for all of them. Unlike filemap_fdatawait_range(), |
572 | * this function does not clear error status of the address space. |
573 | * |
574 | * Use this function if callers don't handle errors themselves. Expected |
575 | * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), |
576 | * fsfreeze(8) |
577 | */ |
578 | int filemap_fdatawait_range_keep_errors(struct address_space *mapping, |
579 | loff_t start_byte, loff_t end_byte) |
580 | { |
581 | __filemap_fdatawait_range(mapping, start_byte, end_byte); |
582 | return filemap_check_and_keep_errors(mapping); |
583 | } |
584 | EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); |
585 | |
586 | /** |
587 | * file_fdatawait_range - wait for writeback to complete |
588 | * @file: file pointing to address space structure to wait for |
589 | * @start_byte: offset in bytes where the range starts |
590 | * @end_byte: offset in bytes where the range ends (inclusive) |
591 | * |
592 | * Walk the list of under-writeback pages of the address space that file |
593 | * refers to, in the given range and wait for all of them. Check error |
594 | * status of the address space vs. the file->f_wb_err cursor and return it. |
595 | * |
596 | * Since the error status of the file is advanced by this function, |
597 | * callers are responsible for checking the return value and handling and/or |
598 | * reporting the error. |
599 | * |
600 | * Return: error status of the address space vs. the file->f_wb_err cursor. |
601 | */ |
602 | int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) |
603 | { |
604 | struct address_space *mapping = file->f_mapping; |
605 | |
606 | __filemap_fdatawait_range(mapping, start_byte, end_byte); |
607 | return file_check_and_advance_wb_err(file); |
608 | } |
609 | EXPORT_SYMBOL(file_fdatawait_range); |
610 | |
611 | /** |
612 | * filemap_fdatawait_keep_errors - wait for writeback without clearing errors |
613 | * @mapping: address space structure to wait for |
614 | * |
615 | * Walk the list of under-writeback pages of the given address space |
616 | * and wait for all of them. Unlike filemap_fdatawait(), this function |
617 | * does not clear error status of the address space. |
618 | * |
619 | * Use this function if callers don't handle errors themselves. Expected |
620 | * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), |
621 | * fsfreeze(8) |
622 | * |
623 | * Return: error status of the address space. |
624 | */ |
625 | int filemap_fdatawait_keep_errors(struct address_space *mapping) |
626 | { |
627 | __filemap_fdatawait_range(mapping, start_byte: 0, LLONG_MAX); |
628 | return filemap_check_and_keep_errors(mapping); |
629 | } |
630 | EXPORT_SYMBOL(filemap_fdatawait_keep_errors); |
631 | |
632 | /* Returns true if writeback might be needed or already in progress. */ |
633 | static bool mapping_needs_writeback(struct address_space *mapping) |
634 | { |
635 | return mapping->nrpages; |
636 | } |
637 | |
638 | bool filemap_range_has_writeback(struct address_space *mapping, |
639 | loff_t start_byte, loff_t end_byte) |
640 | { |
641 | XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); |
642 | pgoff_t max = end_byte >> PAGE_SHIFT; |
643 | struct folio *folio; |
644 | |
645 | if (end_byte < start_byte) |
646 | return false; |
647 | |
648 | rcu_read_lock(); |
649 | xas_for_each(&xas, folio, max) { |
650 | if (xas_retry(xas: &xas, entry: folio)) |
651 | continue; |
652 | if (xa_is_value(entry: folio)) |
653 | continue; |
654 | if (folio_test_dirty(folio) || folio_test_locked(folio) || |
655 | folio_test_writeback(folio)) |
656 | break; |
657 | } |
658 | rcu_read_unlock(); |
659 | return folio != NULL; |
660 | } |
661 | EXPORT_SYMBOL_GPL(filemap_range_has_writeback); |
662 | |
663 | /** |
664 | * filemap_write_and_wait_range - write out & wait on a file range |
665 | * @mapping: the address_space for the pages |
666 | * @lstart: offset in bytes where the range starts |
667 | * @lend: offset in bytes where the range ends (inclusive) |
668 | * |
669 | * Write out and wait upon file offsets lstart->lend, inclusive. |
670 | * |
671 | * Note that @lend is inclusive (describes the last byte to be written) so |
672 | * that this function can be used to write to the very end-of-file (end = -1). |
673 | * |
674 | * Return: error status of the address space. |
675 | */ |
676 | int filemap_write_and_wait_range(struct address_space *mapping, |
677 | loff_t lstart, loff_t lend) |
678 | { |
679 | int err = 0, err2; |
680 | |
681 | if (lend < lstart) |
682 | return 0; |
683 | |
684 | if (mapping_needs_writeback(mapping)) { |
685 | err = __filemap_fdatawrite_range(mapping, start: lstart, end: lend, |
686 | sync_mode: WB_SYNC_ALL); |
687 | /* |
688 | * Even if the above returned error, the pages may be |
689 | * written partially (e.g. -ENOSPC), so we wait for it. |
690 | * But the -EIO is special case, it may indicate the worst |
691 | * thing (e.g. bug) happened, so we avoid waiting for it. |
692 | */ |
693 | if (err != -EIO) |
694 | __filemap_fdatawait_range(mapping, start_byte: lstart, end_byte: lend); |
695 | } |
696 | err2 = filemap_check_errors(mapping); |
697 | if (!err) |
698 | err = err2; |
699 | return err; |
700 | } |
701 | EXPORT_SYMBOL(filemap_write_and_wait_range); |
702 | |
703 | void __filemap_set_wb_err(struct address_space *mapping, int err) |
704 | { |
705 | errseq_t eseq = errseq_set(eseq: &mapping->wb_err, err); |
706 | |
707 | trace_filemap_set_wb_err(mapping, eseq); |
708 | } |
709 | EXPORT_SYMBOL(__filemap_set_wb_err); |
710 | |
711 | /** |
712 | * file_check_and_advance_wb_err - report wb error (if any) that was previously |
713 | * and advance wb_err to current one |
714 | * @file: struct file on which the error is being reported |
715 | * |
716 | * When userland calls fsync (or something like nfsd does the equivalent), we |
717 | * want to report any writeback errors that occurred since the last fsync (or |
718 | * since the file was opened if there haven't been any). |
719 | * |
720 | * Grab the wb_err from the mapping. If it matches what we have in the file, |
721 | * then just quickly return 0. The file is all caught up. |
722 | * |
723 | * If it doesn't match, then take the mapping value, set the "seen" flag in |
724 | * it and try to swap it into place. If it works, or another task beat us |
725 | * to it with the new value, then update the f_wb_err and return the error |
726 | * portion. The error at this point must be reported via proper channels |
727 | * (a'la fsync, or NFS COMMIT operation, etc.). |
728 | * |
729 | * While we handle mapping->wb_err with atomic operations, the f_wb_err |
730 | * value is protected by the f_lock since we must ensure that it reflects |
731 | * the latest value swapped in for this file descriptor. |
732 | * |
733 | * Return: %0 on success, negative error code otherwise. |
734 | */ |
735 | int file_check_and_advance_wb_err(struct file *file) |
736 | { |
737 | int err = 0; |
738 | errseq_t old = READ_ONCE(file->f_wb_err); |
739 | struct address_space *mapping = file->f_mapping; |
740 | |
741 | /* Locklessly handle the common case where nothing has changed */ |
742 | if (errseq_check(eseq: &mapping->wb_err, since: old)) { |
743 | /* Something changed, must use slow path */ |
744 | spin_lock(lock: &file->f_lock); |
745 | old = file->f_wb_err; |
746 | err = errseq_check_and_advance(eseq: &mapping->wb_err, |
747 | since: &file->f_wb_err); |
748 | trace_file_check_and_advance_wb_err(file, old); |
749 | spin_unlock(lock: &file->f_lock); |
750 | } |
751 | |
752 | /* |
753 | * We're mostly using this function as a drop in replacement for |
754 | * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect |
755 | * that the legacy code would have had on these flags. |
756 | */ |
757 | clear_bit(nr: AS_EIO, addr: &mapping->flags); |
758 | clear_bit(nr: AS_ENOSPC, addr: &mapping->flags); |
759 | return err; |
760 | } |
761 | EXPORT_SYMBOL(file_check_and_advance_wb_err); |
762 | |
763 | /** |
764 | * file_write_and_wait_range - write out & wait on a file range |
765 | * @file: file pointing to address_space with pages |
766 | * @lstart: offset in bytes where the range starts |
767 | * @lend: offset in bytes where the range ends (inclusive) |
768 | * |
769 | * Write out and wait upon file offsets lstart->lend, inclusive. |
770 | * |
771 | * Note that @lend is inclusive (describes the last byte to be written) so |
772 | * that this function can be used to write to the very end-of-file (end = -1). |
773 | * |
774 | * After writing out and waiting on the data, we check and advance the |
775 | * f_wb_err cursor to the latest value, and return any errors detected there. |
776 | * |
777 | * Return: %0 on success, negative error code otherwise. |
778 | */ |
779 | int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) |
780 | { |
781 | int err = 0, err2; |
782 | struct address_space *mapping = file->f_mapping; |
783 | |
784 | if (lend < lstart) |
785 | return 0; |
786 | |
787 | if (mapping_needs_writeback(mapping)) { |
788 | err = __filemap_fdatawrite_range(mapping, start: lstart, end: lend, |
789 | sync_mode: WB_SYNC_ALL); |
790 | /* See comment of filemap_write_and_wait() */ |
791 | if (err != -EIO) |
792 | __filemap_fdatawait_range(mapping, start_byte: lstart, end_byte: lend); |
793 | } |
794 | err2 = file_check_and_advance_wb_err(file); |
795 | if (!err) |
796 | err = err2; |
797 | return err; |
798 | } |
799 | EXPORT_SYMBOL(file_write_and_wait_range); |
800 | |
801 | /** |
802 | * replace_page_cache_folio - replace a pagecache folio with a new one |
803 | * @old: folio to be replaced |
804 | * @new: folio to replace with |
805 | * |
806 | * This function replaces a folio in the pagecache with a new one. On |
807 | * success it acquires the pagecache reference for the new folio and |
808 | * drops it for the old folio. Both the old and new folios must be |
809 | * locked. This function does not add the new folio to the LRU, the |
810 | * caller must do that. |
811 | * |
812 | * The remove + add is atomic. This function cannot fail. |
813 | */ |
814 | void replace_page_cache_folio(struct folio *old, struct folio *new) |
815 | { |
816 | struct address_space *mapping = old->mapping; |
817 | void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; |
818 | pgoff_t offset = old->index; |
819 | XA_STATE(xas, &mapping->i_pages, offset); |
820 | |
821 | VM_BUG_ON_FOLIO(!folio_test_locked(old), old); |
822 | VM_BUG_ON_FOLIO(!folio_test_locked(new), new); |
823 | VM_BUG_ON_FOLIO(new->mapping, new); |
824 | |
825 | folio_get(folio: new); |
826 | new->mapping = mapping; |
827 | new->index = offset; |
828 | |
829 | mem_cgroup_replace_folio(old, new); |
830 | |
831 | xas_lock_irq(&xas); |
832 | xas_store(&xas, entry: new); |
833 | |
834 | old->mapping = NULL; |
835 | /* hugetlb pages do not participate in page cache accounting. */ |
836 | if (!folio_test_hugetlb(folio: old)) |
837 | __lruvec_stat_sub_folio(folio: old, idx: NR_FILE_PAGES); |
838 | if (!folio_test_hugetlb(folio: new)) |
839 | __lruvec_stat_add_folio(folio: new, idx: NR_FILE_PAGES); |
840 | if (folio_test_swapbacked(folio: old)) |
841 | __lruvec_stat_sub_folio(folio: old, idx: NR_SHMEM); |
842 | if (folio_test_swapbacked(folio: new)) |
843 | __lruvec_stat_add_folio(folio: new, idx: NR_SHMEM); |
844 | xas_unlock_irq(&xas); |
845 | if (free_folio) |
846 | free_folio(old); |
847 | folio_put(folio: old); |
848 | } |
849 | EXPORT_SYMBOL_GPL(replace_page_cache_folio); |
850 | |
851 | noinline int __filemap_add_folio(struct address_space *mapping, |
852 | struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) |
853 | { |
854 | XA_STATE(xas, &mapping->i_pages, index); |
855 | bool huge = folio_test_hugetlb(folio); |
856 | bool charged = false; |
857 | long nr = 1; |
858 | |
859 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
860 | VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); |
861 | mapping_set_update(xas: &xas, mapping); |
862 | |
863 | if (!huge) { |
864 | int error = mem_cgroup_charge(folio, NULL, gfp); |
865 | if (error) |
866 | return error; |
867 | charged = true; |
868 | } |
869 | |
870 | VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); |
871 | xas_set_order(xas: &xas, index, order: folio_order(folio)); |
872 | nr = folio_nr_pages(folio); |
873 | |
874 | gfp &= GFP_RECLAIM_MASK; |
875 | folio_ref_add(folio, nr); |
876 | folio->mapping = mapping; |
877 | folio->index = xas.xa_index; |
878 | |
879 | do { |
880 | unsigned int order = xa_get_order(xas.xa, index: xas.xa_index); |
881 | void *entry, *old = NULL; |
882 | |
883 | if (order > folio_order(folio)) |
884 | xas_split_alloc(&xas, entry: xa_load(xas.xa, index: xas.xa_index), |
885 | order, gfp); |
886 | xas_lock_irq(&xas); |
887 | xas_for_each_conflict(&xas, entry) { |
888 | old = entry; |
889 | if (!xa_is_value(entry)) { |
890 | xas_set_err(xas: &xas, err: -EEXIST); |
891 | goto unlock; |
892 | } |
893 | } |
894 | |
895 | if (old) { |
896 | if (shadowp) |
897 | *shadowp = old; |
898 | /* entry may have been split before we acquired lock */ |
899 | order = xa_get_order(xas.xa, index: xas.xa_index); |
900 | if (order > folio_order(folio)) { |
901 | /* How to handle large swap entries? */ |
902 | BUG_ON(shmem_mapping(mapping)); |
903 | xas_split(&xas, entry: old, order); |
904 | xas_reset(xas: &xas); |
905 | } |
906 | } |
907 | |
908 | xas_store(&xas, entry: folio); |
909 | if (xas_error(xas: &xas)) |
910 | goto unlock; |
911 | |
912 | mapping->nrpages += nr; |
913 | |
914 | /* hugetlb pages do not participate in page cache accounting */ |
915 | if (!huge) { |
916 | __lruvec_stat_mod_folio(folio, idx: NR_FILE_PAGES, val: nr); |
917 | if (folio_test_pmd_mappable(folio)) |
918 | __lruvec_stat_mod_folio(folio, |
919 | idx: NR_FILE_THPS, val: nr); |
920 | } |
921 | unlock: |
922 | xas_unlock_irq(&xas); |
923 | } while (xas_nomem(&xas, gfp)); |
924 | |
925 | if (xas_error(xas: &xas)) |
926 | goto error; |
927 | |
928 | trace_mm_filemap_add_to_page_cache(folio); |
929 | return 0; |
930 | error: |
931 | if (charged) |
932 | mem_cgroup_uncharge(folio); |
933 | folio->mapping = NULL; |
934 | /* Leave page->index set: truncation relies upon it */ |
935 | folio_put_refs(folio, refs: nr); |
936 | return xas_error(xas: &xas); |
937 | } |
938 | ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); |
939 | |
940 | int filemap_add_folio(struct address_space *mapping, struct folio *folio, |
941 | pgoff_t index, gfp_t gfp) |
942 | { |
943 | void *shadow = NULL; |
944 | int ret; |
945 | |
946 | __folio_set_locked(folio); |
947 | ret = __filemap_add_folio(mapping, folio, index, gfp, shadowp: &shadow); |
948 | if (unlikely(ret)) |
949 | __folio_clear_locked(folio); |
950 | else { |
951 | /* |
952 | * The folio might have been evicted from cache only |
953 | * recently, in which case it should be activated like |
954 | * any other repeatedly accessed folio. |
955 | * The exception is folios getting rewritten; evicting other |
956 | * data from the working set, only to cache data that will |
957 | * get overwritten with something else, is a waste of memory. |
958 | */ |
959 | WARN_ON_ONCE(folio_test_active(folio)); |
960 | if (!(gfp & __GFP_WRITE) && shadow) |
961 | workingset_refault(folio, shadow); |
962 | folio_add_lru(folio); |
963 | } |
964 | return ret; |
965 | } |
966 | EXPORT_SYMBOL_GPL(filemap_add_folio); |
967 | |
968 | #ifdef CONFIG_NUMA |
969 | struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) |
970 | { |
971 | int n; |
972 | struct folio *folio; |
973 | |
974 | if (cpuset_do_page_mem_spread()) { |
975 | unsigned int cpuset_mems_cookie; |
976 | do { |
977 | cpuset_mems_cookie = read_mems_allowed_begin(); |
978 | n = cpuset_mem_spread_node(); |
979 | folio = __folio_alloc_node(gfp, order, nid: n); |
980 | } while (!folio && read_mems_allowed_retry(seq: cpuset_mems_cookie)); |
981 | |
982 | return folio; |
983 | } |
984 | return folio_alloc(gfp, order); |
985 | } |
986 | EXPORT_SYMBOL(filemap_alloc_folio); |
987 | #endif |
988 | |
989 | /* |
990 | * filemap_invalidate_lock_two - lock invalidate_lock for two mappings |
991 | * |
992 | * Lock exclusively invalidate_lock of any passed mapping that is not NULL. |
993 | * |
994 | * @mapping1: the first mapping to lock |
995 | * @mapping2: the second mapping to lock |
996 | */ |
997 | void filemap_invalidate_lock_two(struct address_space *mapping1, |
998 | struct address_space *mapping2) |
999 | { |
1000 | if (mapping1 > mapping2) |
1001 | swap(mapping1, mapping2); |
1002 | if (mapping1) |
1003 | down_write(sem: &mapping1->invalidate_lock); |
1004 | if (mapping2 && mapping1 != mapping2) |
1005 | down_write_nested(sem: &mapping2->invalidate_lock, subclass: 1); |
1006 | } |
1007 | EXPORT_SYMBOL(filemap_invalidate_lock_two); |
1008 | |
1009 | /* |
1010 | * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings |
1011 | * |
1012 | * Unlock exclusive invalidate_lock of any passed mapping that is not NULL. |
1013 | * |
1014 | * @mapping1: the first mapping to unlock |
1015 | * @mapping2: the second mapping to unlock |
1016 | */ |
1017 | void filemap_invalidate_unlock_two(struct address_space *mapping1, |
1018 | struct address_space *mapping2) |
1019 | { |
1020 | if (mapping1) |
1021 | up_write(sem: &mapping1->invalidate_lock); |
1022 | if (mapping2 && mapping1 != mapping2) |
1023 | up_write(sem: &mapping2->invalidate_lock); |
1024 | } |
1025 | EXPORT_SYMBOL(filemap_invalidate_unlock_two); |
1026 | |
1027 | /* |
1028 | * In order to wait for pages to become available there must be |
1029 | * waitqueues associated with pages. By using a hash table of |
1030 | * waitqueues where the bucket discipline is to maintain all |
1031 | * waiters on the same queue and wake all when any of the pages |
1032 | * become available, and for the woken contexts to check to be |
1033 | * sure the appropriate page became available, this saves space |
1034 | * at a cost of "thundering herd" phenomena during rare hash |
1035 | * collisions. |
1036 | */ |
1037 | #define PAGE_WAIT_TABLE_BITS 8 |
1038 | #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) |
1039 | static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; |
1040 | |
1041 | static wait_queue_head_t *folio_waitqueue(struct folio *folio) |
1042 | { |
1043 | return &folio_wait_table[hash_ptr(ptr: folio, PAGE_WAIT_TABLE_BITS)]; |
1044 | } |
1045 | |
1046 | void __init pagecache_init(void) |
1047 | { |
1048 | int i; |
1049 | |
1050 | for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) |
1051 | init_waitqueue_head(&folio_wait_table[i]); |
1052 | |
1053 | page_writeback_init(); |
1054 | } |
1055 | |
1056 | /* |
1057 | * The page wait code treats the "wait->flags" somewhat unusually, because |
1058 | * we have multiple different kinds of waits, not just the usual "exclusive" |
1059 | * one. |
1060 | * |
1061 | * We have: |
1062 | * |
1063 | * (a) no special bits set: |
1064 | * |
1065 | * We're just waiting for the bit to be released, and when a waker |
1066 | * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up, |
1067 | * and remove it from the wait queue. |
1068 | * |
1069 | * Simple and straightforward. |
1070 | * |
1071 | * (b) WQ_FLAG_EXCLUSIVE: |
1072 | * |
1073 | * The waiter is waiting to get the lock, and only one waiter should |
1074 | * be woken up to avoid any thundering herd behavior. We'll set the |
1075 | * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue. |
1076 | * |
1077 | * This is the traditional exclusive wait. |
1078 | * |
1079 | * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: |
1080 | * |
1081 | * The waiter is waiting to get the bit, and additionally wants the |
1082 | * lock to be transferred to it for fair lock behavior. If the lock |
1083 | * cannot be taken, we stop walking the wait queue without waking |
1084 | * the waiter. |
1085 | * |
1086 | * This is the "fair lock handoff" case, and in addition to setting |
1087 | * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see |
1088 | * that it now has the lock. |
1089 | */ |
1090 | static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) |
1091 | { |
1092 | unsigned int flags; |
1093 | struct wait_page_key *key = arg; |
1094 | struct wait_page_queue *wait_page |
1095 | = container_of(wait, struct wait_page_queue, wait); |
1096 | |
1097 | if (!wake_page_match(wait_page, key)) |
1098 | return 0; |
1099 | |
1100 | /* |
1101 | * If it's a lock handoff wait, we get the bit for it, and |
1102 | * stop walking (and do not wake it up) if we can't. |
1103 | */ |
1104 | flags = wait->flags; |
1105 | if (flags & WQ_FLAG_EXCLUSIVE) { |
1106 | if (test_bit(key->bit_nr, &key->folio->flags)) |
1107 | return -1; |
1108 | if (flags & WQ_FLAG_CUSTOM) { |
1109 | if (test_and_set_bit(nr: key->bit_nr, addr: &key->folio->flags)) |
1110 | return -1; |
1111 | flags |= WQ_FLAG_DONE; |
1112 | } |
1113 | } |
1114 | |
1115 | /* |
1116 | * We are holding the wait-queue lock, but the waiter that |
1117 | * is waiting for this will be checking the flags without |
1118 | * any locking. |
1119 | * |
1120 | * So update the flags atomically, and wake up the waiter |
1121 | * afterwards to avoid any races. This store-release pairs |
1122 | * with the load-acquire in folio_wait_bit_common(). |
1123 | */ |
1124 | smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); |
1125 | wake_up_state(tsk: wait->private, state: mode); |
1126 | |
1127 | /* |
1128 | * Ok, we have successfully done what we're waiting for, |
1129 | * and we can unconditionally remove the wait entry. |
1130 | * |
1131 | * Note that this pairs with the "finish_wait()" in the |
1132 | * waiter, and has to be the absolute last thing we do. |
1133 | * After this list_del_init(&wait->entry) the wait entry |
1134 | * might be de-allocated and the process might even have |
1135 | * exited. |
1136 | */ |
1137 | list_del_init_careful(entry: &wait->entry); |
1138 | return (flags & WQ_FLAG_EXCLUSIVE) != 0; |
1139 | } |
1140 | |
1141 | static void folio_wake_bit(struct folio *folio, int bit_nr) |
1142 | { |
1143 | wait_queue_head_t *q = folio_waitqueue(folio); |
1144 | struct wait_page_key key; |
1145 | unsigned long flags; |
1146 | |
1147 | key.folio = folio; |
1148 | key.bit_nr = bit_nr; |
1149 | key.page_match = 0; |
1150 | |
1151 | spin_lock_irqsave(&q->lock, flags); |
1152 | __wake_up_locked_key(wq_head: q, TASK_NORMAL, key: &key); |
1153 | |
1154 | /* |
1155 | * It's possible to miss clearing waiters here, when we woke our page |
1156 | * waiters, but the hashed waitqueue has waiters for other pages on it. |
1157 | * That's okay, it's a rare case. The next waker will clear it. |
1158 | * |
1159 | * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE, |
1160 | * other), the flag may be cleared in the course of freeing the page; |
1161 | * but that is not required for correctness. |
1162 | */ |
1163 | if (!waitqueue_active(wq_head: q) || !key.page_match) |
1164 | folio_clear_waiters(folio); |
1165 | |
1166 | spin_unlock_irqrestore(lock: &q->lock, flags); |
1167 | } |
1168 | |
1169 | /* |
1170 | * A choice of three behaviors for folio_wait_bit_common(): |
1171 | */ |
1172 | enum behavior { |
1173 | EXCLUSIVE, /* Hold ref to page and take the bit when woken, like |
1174 | * __folio_lock() waiting on then setting PG_locked. |
1175 | */ |
1176 | SHARED, /* Hold ref to page and check the bit when woken, like |
1177 | * folio_wait_writeback() waiting on PG_writeback. |
1178 | */ |
1179 | DROP, /* Drop ref to page before wait, no check when woken, |
1180 | * like folio_put_wait_locked() on PG_locked. |
1181 | */ |
1182 | }; |
1183 | |
1184 | /* |
1185 | * Attempt to check (or get) the folio flag, and mark us done |
1186 | * if successful. |
1187 | */ |
1188 | static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, |
1189 | struct wait_queue_entry *wait) |
1190 | { |
1191 | if (wait->flags & WQ_FLAG_EXCLUSIVE) { |
1192 | if (test_and_set_bit(nr: bit_nr, addr: &folio->flags)) |
1193 | return false; |
1194 | } else if (test_bit(bit_nr, &folio->flags)) |
1195 | return false; |
1196 | |
1197 | wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; |
1198 | return true; |
1199 | } |
1200 | |
1201 | /* How many times do we accept lock stealing from under a waiter? */ |
1202 | int sysctl_page_lock_unfairness = 5; |
1203 | |
1204 | static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, |
1205 | int state, enum behavior behavior) |
1206 | { |
1207 | wait_queue_head_t *q = folio_waitqueue(folio); |
1208 | int unfairness = sysctl_page_lock_unfairness; |
1209 | struct wait_page_queue wait_page; |
1210 | wait_queue_entry_t *wait = &wait_page.wait; |
1211 | bool thrashing = false; |
1212 | unsigned long pflags; |
1213 | bool in_thrashing; |
1214 | |
1215 | if (bit_nr == PG_locked && |
1216 | !folio_test_uptodate(folio) && folio_test_workingset(folio)) { |
1217 | delayacct_thrashing_start(in_thrashing: &in_thrashing); |
1218 | psi_memstall_enter(flags: &pflags); |
1219 | thrashing = true; |
1220 | } |
1221 | |
1222 | init_wait(wait); |
1223 | wait->func = wake_page_function; |
1224 | wait_page.folio = folio; |
1225 | wait_page.bit_nr = bit_nr; |
1226 | |
1227 | repeat: |
1228 | wait->flags = 0; |
1229 | if (behavior == EXCLUSIVE) { |
1230 | wait->flags = WQ_FLAG_EXCLUSIVE; |
1231 | if (--unfairness < 0) |
1232 | wait->flags |= WQ_FLAG_CUSTOM; |
1233 | } |
1234 | |
1235 | /* |
1236 | * Do one last check whether we can get the |
1237 | * page bit synchronously. |
1238 | * |
1239 | * Do the folio_set_waiters() marking before that |
1240 | * to let any waker we _just_ missed know they |
1241 | * need to wake us up (otherwise they'll never |
1242 | * even go to the slow case that looks at the |
1243 | * page queue), and add ourselves to the wait |
1244 | * queue if we need to sleep. |
1245 | * |
1246 | * This part needs to be done under the queue |
1247 | * lock to avoid races. |
1248 | */ |
1249 | spin_lock_irq(lock: &q->lock); |
1250 | folio_set_waiters(folio); |
1251 | if (!folio_trylock_flag(folio, bit_nr, wait)) |
1252 | __add_wait_queue_entry_tail(wq_head: q, wq_entry: wait); |
1253 | spin_unlock_irq(lock: &q->lock); |
1254 | |
1255 | /* |
1256 | * From now on, all the logic will be based on |
1257 | * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to |
1258 | * see whether the page bit testing has already |
1259 | * been done by the wake function. |
1260 | * |
1261 | * We can drop our reference to the folio. |
1262 | */ |
1263 | if (behavior == DROP) |
1264 | folio_put(folio); |
1265 | |
1266 | /* |
1267 | * Note that until the "finish_wait()", or until |
1268 | * we see the WQ_FLAG_WOKEN flag, we need to |
1269 | * be very careful with the 'wait->flags', because |
1270 | * we may race with a waker that sets them. |
1271 | */ |
1272 | for (;;) { |
1273 | unsigned int flags; |
1274 | |
1275 | set_current_state(state); |
1276 | |
1277 | /* Loop until we've been woken or interrupted */ |
1278 | flags = smp_load_acquire(&wait->flags); |
1279 | if (!(flags & WQ_FLAG_WOKEN)) { |
1280 | if (signal_pending_state(state, current)) |
1281 | break; |
1282 | |
1283 | io_schedule(); |
1284 | continue; |
1285 | } |
1286 | |
1287 | /* If we were non-exclusive, we're done */ |
1288 | if (behavior != EXCLUSIVE) |
1289 | break; |
1290 | |
1291 | /* If the waker got the lock for us, we're done */ |
1292 | if (flags & WQ_FLAG_DONE) |
1293 | break; |
1294 | |
1295 | /* |
1296 | * Otherwise, if we're getting the lock, we need to |
1297 | * try to get it ourselves. |
1298 | * |
1299 | * And if that fails, we'll have to retry this all. |
1300 | */ |
1301 | if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) |
1302 | goto repeat; |
1303 | |
1304 | wait->flags |= WQ_FLAG_DONE; |
1305 | break; |
1306 | } |
1307 | |
1308 | /* |
1309 | * If a signal happened, this 'finish_wait()' may remove the last |
1310 | * waiter from the wait-queues, but the folio waiters bit will remain |
1311 | * set. That's ok. The next wakeup will take care of it, and trying |
1312 | * to do it here would be difficult and prone to races. |
1313 | */ |
1314 | finish_wait(wq_head: q, wq_entry: wait); |
1315 | |
1316 | if (thrashing) { |
1317 | delayacct_thrashing_end(in_thrashing: &in_thrashing); |
1318 | psi_memstall_leave(flags: &pflags); |
1319 | } |
1320 | |
1321 | /* |
1322 | * NOTE! The wait->flags weren't stable until we've done the |
1323 | * 'finish_wait()', and we could have exited the loop above due |
1324 | * to a signal, and had a wakeup event happen after the signal |
1325 | * test but before the 'finish_wait()'. |
1326 | * |
1327 | * So only after the finish_wait() can we reliably determine |
1328 | * if we got woken up or not, so we can now figure out the final |
1329 | * return value based on that state without races. |
1330 | * |
1331 | * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive |
1332 | * waiter, but an exclusive one requires WQ_FLAG_DONE. |
1333 | */ |
1334 | if (behavior == EXCLUSIVE) |
1335 | return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; |
1336 | |
1337 | return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; |
1338 | } |
1339 | |
1340 | #ifdef CONFIG_MIGRATION |
1341 | /** |
1342 | * migration_entry_wait_on_locked - Wait for a migration entry to be removed |
1343 | * @entry: migration swap entry. |
1344 | * @ptl: already locked ptl. This function will drop the lock. |
1345 | * |
1346 | * Wait for a migration entry referencing the given page to be removed. This is |
1347 | * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except |
1348 | * this can be called without taking a reference on the page. Instead this |
1349 | * should be called while holding the ptl for the migration entry referencing |
1350 | * the page. |
1351 | * |
1352 | * Returns after unlocking the ptl. |
1353 | * |
1354 | * This follows the same logic as folio_wait_bit_common() so see the comments |
1355 | * there. |
1356 | */ |
1357 | void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) |
1358 | __releases(ptl) |
1359 | { |
1360 | struct wait_page_queue wait_page; |
1361 | wait_queue_entry_t *wait = &wait_page.wait; |
1362 | bool thrashing = false; |
1363 | unsigned long pflags; |
1364 | bool in_thrashing; |
1365 | wait_queue_head_t *q; |
1366 | struct folio *folio = pfn_swap_entry_folio(entry); |
1367 | |
1368 | q = folio_waitqueue(folio); |
1369 | if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { |
1370 | delayacct_thrashing_start(in_thrashing: &in_thrashing); |
1371 | psi_memstall_enter(flags: &pflags); |
1372 | thrashing = true; |
1373 | } |
1374 | |
1375 | init_wait(wait); |
1376 | wait->func = wake_page_function; |
1377 | wait_page.folio = folio; |
1378 | wait_page.bit_nr = PG_locked; |
1379 | wait->flags = 0; |
1380 | |
1381 | spin_lock_irq(lock: &q->lock); |
1382 | folio_set_waiters(folio); |
1383 | if (!folio_trylock_flag(folio, bit_nr: PG_locked, wait)) |
1384 | __add_wait_queue_entry_tail(wq_head: q, wq_entry: wait); |
1385 | spin_unlock_irq(lock: &q->lock); |
1386 | |
1387 | /* |
1388 | * If a migration entry exists for the page the migration path must hold |
1389 | * a valid reference to the page, and it must take the ptl to remove the |
1390 | * migration entry. So the page is valid until the ptl is dropped. |
1391 | */ |
1392 | spin_unlock(lock: ptl); |
1393 | |
1394 | for (;;) { |
1395 | unsigned int flags; |
1396 | |
1397 | set_current_state(TASK_UNINTERRUPTIBLE); |
1398 | |
1399 | /* Loop until we've been woken or interrupted */ |
1400 | flags = smp_load_acquire(&wait->flags); |
1401 | if (!(flags & WQ_FLAG_WOKEN)) { |
1402 | if (signal_pending_state(TASK_UNINTERRUPTIBLE, current)) |
1403 | break; |
1404 | |
1405 | io_schedule(); |
1406 | continue; |
1407 | } |
1408 | break; |
1409 | } |
1410 | |
1411 | finish_wait(wq_head: q, wq_entry: wait); |
1412 | |
1413 | if (thrashing) { |
1414 | delayacct_thrashing_end(in_thrashing: &in_thrashing); |
1415 | psi_memstall_leave(flags: &pflags); |
1416 | } |
1417 | } |
1418 | #endif |
1419 | |
1420 | void folio_wait_bit(struct folio *folio, int bit_nr) |
1421 | { |
1422 | folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, behavior: SHARED); |
1423 | } |
1424 | EXPORT_SYMBOL(folio_wait_bit); |
1425 | |
1426 | int folio_wait_bit_killable(struct folio *folio, int bit_nr) |
1427 | { |
1428 | return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, behavior: SHARED); |
1429 | } |
1430 | EXPORT_SYMBOL(folio_wait_bit_killable); |
1431 | |
1432 | /** |
1433 | * folio_put_wait_locked - Drop a reference and wait for it to be unlocked |
1434 | * @folio: The folio to wait for. |
1435 | * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc). |
1436 | * |
1437 | * The caller should hold a reference on @folio. They expect the page to |
1438 | * become unlocked relatively soon, but do not wish to hold up migration |
1439 | * (for example) by holding the reference while waiting for the folio to |
1440 | * come unlocked. After this function returns, the caller should not |
1441 | * dereference @folio. |
1442 | * |
1443 | * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal. |
1444 | */ |
1445 | static int folio_put_wait_locked(struct folio *folio, int state) |
1446 | { |
1447 | return folio_wait_bit_common(folio, bit_nr: PG_locked, state, behavior: DROP); |
1448 | } |
1449 | |
1450 | /** |
1451 | * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue |
1452 | * @folio: Folio defining the wait queue of interest |
1453 | * @waiter: Waiter to add to the queue |
1454 | * |
1455 | * Add an arbitrary @waiter to the wait queue for the nominated @folio. |
1456 | */ |
1457 | void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) |
1458 | { |
1459 | wait_queue_head_t *q = folio_waitqueue(folio); |
1460 | unsigned long flags; |
1461 | |
1462 | spin_lock_irqsave(&q->lock, flags); |
1463 | __add_wait_queue_entry_tail(wq_head: q, wq_entry: waiter); |
1464 | folio_set_waiters(folio); |
1465 | spin_unlock_irqrestore(lock: &q->lock, flags); |
1466 | } |
1467 | EXPORT_SYMBOL_GPL(folio_add_wait_queue); |
1468 | |
1469 | /** |
1470 | * folio_unlock - Unlock a locked folio. |
1471 | * @folio: The folio. |
1472 | * |
1473 | * Unlocks the folio and wakes up any thread sleeping on the page lock. |
1474 | * |
1475 | * Context: May be called from interrupt or process context. May not be |
1476 | * called from NMI context. |
1477 | */ |
1478 | void folio_unlock(struct folio *folio) |
1479 | { |
1480 | /* Bit 7 allows x86 to check the byte's sign bit */ |
1481 | BUILD_BUG_ON(PG_waiters != 7); |
1482 | BUILD_BUG_ON(PG_locked > 7); |
1483 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
1484 | if (folio_xor_flags_has_waiters(folio, mask: 1 << PG_locked)) |
1485 | folio_wake_bit(folio, bit_nr: PG_locked); |
1486 | } |
1487 | EXPORT_SYMBOL(folio_unlock); |
1488 | |
1489 | /** |
1490 | * folio_end_read - End read on a folio. |
1491 | * @folio: The folio. |
1492 | * @success: True if all reads completed successfully. |
1493 | * |
1494 | * When all reads against a folio have completed, filesystems should |
1495 | * call this function to let the pagecache know that no more reads |
1496 | * are outstanding. This will unlock the folio and wake up any thread |
1497 | * sleeping on the lock. The folio will also be marked uptodate if all |
1498 | * reads succeeded. |
1499 | * |
1500 | * Context: May be called from interrupt or process context. May not be |
1501 | * called from NMI context. |
1502 | */ |
1503 | void folio_end_read(struct folio *folio, bool success) |
1504 | { |
1505 | unsigned long mask = 1 << PG_locked; |
1506 | |
1507 | /* Must be in bottom byte for x86 to work */ |
1508 | BUILD_BUG_ON(PG_uptodate > 7); |
1509 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
1510 | VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); |
1511 | |
1512 | if (likely(success)) |
1513 | mask |= 1 << PG_uptodate; |
1514 | if (folio_xor_flags_has_waiters(folio, mask)) |
1515 | folio_wake_bit(folio, bit_nr: PG_locked); |
1516 | } |
1517 | EXPORT_SYMBOL(folio_end_read); |
1518 | |
1519 | /** |
1520 | * folio_end_private_2 - Clear PG_private_2 and wake any waiters. |
1521 | * @folio: The folio. |
1522 | * |
1523 | * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for |
1524 | * it. The folio reference held for PG_private_2 being set is released. |
1525 | * |
1526 | * This is, for example, used when a netfs folio is being written to a local |
1527 | * disk cache, thereby allowing writes to the cache for the same folio to be |
1528 | * serialised. |
1529 | */ |
1530 | void folio_end_private_2(struct folio *folio) |
1531 | { |
1532 | VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); |
1533 | clear_bit_unlock(nr: PG_private_2, addr: folio_flags(folio, n: 0)); |
1534 | folio_wake_bit(folio, bit_nr: PG_private_2); |
1535 | folio_put(folio); |
1536 | } |
1537 | EXPORT_SYMBOL(folio_end_private_2); |
1538 | |
1539 | /** |
1540 | * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio. |
1541 | * @folio: The folio to wait on. |
1542 | * |
1543 | * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio. |
1544 | */ |
1545 | void folio_wait_private_2(struct folio *folio) |
1546 | { |
1547 | while (folio_test_private_2(folio)) |
1548 | folio_wait_bit(folio, PG_private_2); |
1549 | } |
1550 | EXPORT_SYMBOL(folio_wait_private_2); |
1551 | |
1552 | /** |
1553 | * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio. |
1554 | * @folio: The folio to wait on. |
1555 | * |
1556 | * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a |
1557 | * fatal signal is received by the calling task. |
1558 | * |
1559 | * Return: |
1560 | * - 0 if successful. |
1561 | * - -EINTR if a fatal signal was encountered. |
1562 | */ |
1563 | int folio_wait_private_2_killable(struct folio *folio) |
1564 | { |
1565 | int ret = 0; |
1566 | |
1567 | while (folio_test_private_2(folio)) { |
1568 | ret = folio_wait_bit_killable(folio, PG_private_2); |
1569 | if (ret < 0) |
1570 | break; |
1571 | } |
1572 | |
1573 | return ret; |
1574 | } |
1575 | EXPORT_SYMBOL(folio_wait_private_2_killable); |
1576 | |
1577 | /** |
1578 | * folio_end_writeback - End writeback against a folio. |
1579 | * @folio: The folio. |
1580 | * |
1581 | * The folio must actually be under writeback. |
1582 | * |
1583 | * Context: May be called from process or interrupt context. |
1584 | */ |
1585 | void folio_end_writeback(struct folio *folio) |
1586 | { |
1587 | VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); |
1588 | |
1589 | /* |
1590 | * folio_test_clear_reclaim() could be used here but it is an |
1591 | * atomic operation and overkill in this particular case. Failing |
1592 | * to shuffle a folio marked for immediate reclaim is too mild |
1593 | * a gain to justify taking an atomic operation penalty at the |
1594 | * end of every folio writeback. |
1595 | */ |
1596 | if (folio_test_reclaim(folio)) { |
1597 | folio_clear_reclaim(folio); |
1598 | folio_rotate_reclaimable(folio); |
1599 | } |
1600 | |
1601 | /* |
1602 | * Writeback does not hold a folio reference of its own, relying |
1603 | * on truncation to wait for the clearing of PG_writeback. |
1604 | * But here we must make sure that the folio is not freed and |
1605 | * reused before the folio_wake_bit(). |
1606 | */ |
1607 | folio_get(folio); |
1608 | if (__folio_end_writeback(folio)) |
1609 | folio_wake_bit(folio, bit_nr: PG_writeback); |
1610 | acct_reclaim_writeback(folio); |
1611 | folio_put(folio); |
1612 | } |
1613 | EXPORT_SYMBOL(folio_end_writeback); |
1614 | |
1615 | /** |
1616 | * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it. |
1617 | * @folio: The folio to lock |
1618 | */ |
1619 | void __folio_lock(struct folio *folio) |
1620 | { |
1621 | folio_wait_bit_common(folio, bit_nr: PG_locked, TASK_UNINTERRUPTIBLE, |
1622 | behavior: EXCLUSIVE); |
1623 | } |
1624 | EXPORT_SYMBOL(__folio_lock); |
1625 | |
1626 | int __folio_lock_killable(struct folio *folio) |
1627 | { |
1628 | return folio_wait_bit_common(folio, bit_nr: PG_locked, TASK_KILLABLE, |
1629 | behavior: EXCLUSIVE); |
1630 | } |
1631 | EXPORT_SYMBOL_GPL(__folio_lock_killable); |
1632 | |
1633 | static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) |
1634 | { |
1635 | struct wait_queue_head *q = folio_waitqueue(folio); |
1636 | int ret; |
1637 | |
1638 | wait->folio = folio; |
1639 | wait->bit_nr = PG_locked; |
1640 | |
1641 | spin_lock_irq(lock: &q->lock); |
1642 | __add_wait_queue_entry_tail(wq_head: q, wq_entry: &wait->wait); |
1643 | folio_set_waiters(folio); |
1644 | ret = !folio_trylock(folio); |
1645 | /* |
1646 | * If we were successful now, we know we're still on the |
1647 | * waitqueue as we're still under the lock. This means it's |
1648 | * safe to remove and return success, we know the callback |
1649 | * isn't going to trigger. |
1650 | */ |
1651 | if (!ret) |
1652 | __remove_wait_queue(wq_head: q, wq_entry: &wait->wait); |
1653 | else |
1654 | ret = -EIOCBQUEUED; |
1655 | spin_unlock_irq(lock: &q->lock); |
1656 | return ret; |
1657 | } |
1658 | |
1659 | /* |
1660 | * Return values: |
1661 | * 0 - folio is locked. |
1662 | * non-zero - folio is not locked. |
1663 | * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or |
1664 | * vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and |
1665 | * FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held. |
1666 | * |
1667 | * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0 |
1668 | * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed. |
1669 | */ |
1670 | vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) |
1671 | { |
1672 | unsigned int flags = vmf->flags; |
1673 | |
1674 | if (fault_flag_allow_retry_first(flags)) { |
1675 | /* |
1676 | * CAUTION! In this case, mmap_lock/per-VMA lock is not |
1677 | * released even though returning VM_FAULT_RETRY. |
1678 | */ |
1679 | if (flags & FAULT_FLAG_RETRY_NOWAIT) |
1680 | return VM_FAULT_RETRY; |
1681 | |
1682 | release_fault_lock(vmf); |
1683 | if (flags & FAULT_FLAG_KILLABLE) |
1684 | folio_wait_locked_killable(folio); |
1685 | else |
1686 | folio_wait_locked(folio); |
1687 | return VM_FAULT_RETRY; |
1688 | } |
1689 | if (flags & FAULT_FLAG_KILLABLE) { |
1690 | bool ret; |
1691 | |
1692 | ret = __folio_lock_killable(folio); |
1693 | if (ret) { |
1694 | release_fault_lock(vmf); |
1695 | return VM_FAULT_RETRY; |
1696 | } |
1697 | } else { |
1698 | __folio_lock(folio); |
1699 | } |
1700 | |
1701 | return 0; |
1702 | } |
1703 | |
1704 | /** |
1705 | * page_cache_next_miss() - Find the next gap in the page cache. |
1706 | * @mapping: Mapping. |
1707 | * @index: Index. |
1708 | * @max_scan: Maximum range to search. |
1709 | * |
1710 | * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the |
1711 | * gap with the lowest index. |
1712 | * |
1713 | * This function may be called under the rcu_read_lock. However, this will |
1714 | * not atomically search a snapshot of the cache at a single point in time. |
1715 | * For example, if a gap is created at index 5, then subsequently a gap is |
1716 | * created at index 10, page_cache_next_miss covering both indices may |
1717 | * return 10 if called under the rcu_read_lock. |
1718 | * |
1719 | * Return: The index of the gap if found, otherwise an index outside the |
1720 | * range specified (in which case 'return - index >= max_scan' will be true). |
1721 | * In the rare case of index wrap-around, 0 will be returned. |
1722 | */ |
1723 | pgoff_t page_cache_next_miss(struct address_space *mapping, |
1724 | pgoff_t index, unsigned long max_scan) |
1725 | { |
1726 | XA_STATE(xas, &mapping->i_pages, index); |
1727 | |
1728 | while (max_scan--) { |
1729 | void *entry = xas_next(xas: &xas); |
1730 | if (!entry || xa_is_value(entry)) |
1731 | break; |
1732 | if (xas.xa_index == 0) |
1733 | break; |
1734 | } |
1735 | |
1736 | return xas.xa_index; |
1737 | } |
1738 | EXPORT_SYMBOL(page_cache_next_miss); |
1739 | |
1740 | /** |
1741 | * page_cache_prev_miss() - Find the previous gap in the page cache. |
1742 | * @mapping: Mapping. |
1743 | * @index: Index. |
1744 | * @max_scan: Maximum range to search. |
1745 | * |
1746 | * Search the range [max(index - max_scan + 1, 0), index] for the |
1747 | * gap with the highest index. |
1748 | * |
1749 | * This function may be called under the rcu_read_lock. However, this will |
1750 | * not atomically search a snapshot of the cache at a single point in time. |
1751 | * For example, if a gap is created at index 10, then subsequently a gap is |
1752 | * created at index 5, page_cache_prev_miss() covering both indices may |
1753 | * return 5 if called under the rcu_read_lock. |
1754 | * |
1755 | * Return: The index of the gap if found, otherwise an index outside the |
1756 | * range specified (in which case 'index - return >= max_scan' will be true). |
1757 | * In the rare case of wrap-around, ULONG_MAX will be returned. |
1758 | */ |
1759 | pgoff_t page_cache_prev_miss(struct address_space *mapping, |
1760 | pgoff_t index, unsigned long max_scan) |
1761 | { |
1762 | XA_STATE(xas, &mapping->i_pages, index); |
1763 | |
1764 | while (max_scan--) { |
1765 | void *entry = xas_prev(xas: &xas); |
1766 | if (!entry || xa_is_value(entry)) |
1767 | break; |
1768 | if (xas.xa_index == ULONG_MAX) |
1769 | break; |
1770 | } |
1771 | |
1772 | return xas.xa_index; |
1773 | } |
1774 | EXPORT_SYMBOL(page_cache_prev_miss); |
1775 | |
1776 | /* |
1777 | * Lockless page cache protocol: |
1778 | * On the lookup side: |
1779 | * 1. Load the folio from i_pages |
1780 | * 2. Increment the refcount if it's not zero |
1781 | * 3. If the folio is not found by xas_reload(), put the refcount and retry |
1782 | * |
1783 | * On the removal side: |
1784 | * A. Freeze the page (by zeroing the refcount if nobody else has a reference) |
1785 | * B. Remove the page from i_pages |
1786 | * C. Return the page to the page allocator |
1787 | * |
1788 | * This means that any page may have its reference count temporarily |
1789 | * increased by a speculative page cache (or fast GUP) lookup as it can |
1790 | * be allocated by another user before the RCU grace period expires. |
1791 | * Because the refcount temporarily acquired here may end up being the |
1792 | * last refcount on the page, any page allocation must be freeable by |
1793 | * folio_put(). |
1794 | */ |
1795 | |
1796 | /* |
1797 | * filemap_get_entry - Get a page cache entry. |
1798 | * @mapping: the address_space to search |
1799 | * @index: The page cache index. |
1800 | * |
1801 | * Looks up the page cache entry at @mapping & @index. If it is a folio, |
1802 | * it is returned with an increased refcount. If it is a shadow entry |
1803 | * of a previously evicted folio, or a swap entry from shmem/tmpfs, |
1804 | * it is returned without further action. |
1805 | * |
1806 | * Return: The folio, swap or shadow entry, %NULL if nothing is found. |
1807 | */ |
1808 | void *filemap_get_entry(struct address_space *mapping, pgoff_t index) |
1809 | { |
1810 | XA_STATE(xas, &mapping->i_pages, index); |
1811 | struct folio *folio; |
1812 | |
1813 | rcu_read_lock(); |
1814 | repeat: |
1815 | xas_reset(xas: &xas); |
1816 | folio = xas_load(&xas); |
1817 | if (xas_retry(xas: &xas, entry: folio)) |
1818 | goto repeat; |
1819 | /* |
1820 | * A shadow entry of a recently evicted page, or a swap entry from |
1821 | * shmem/tmpfs. Return it without attempting to raise page count. |
1822 | */ |
1823 | if (!folio || xa_is_value(entry: folio)) |
1824 | goto out; |
1825 | |
1826 | if (!folio_try_get_rcu(folio)) |
1827 | goto repeat; |
1828 | |
1829 | if (unlikely(folio != xas_reload(&xas))) { |
1830 | folio_put(folio); |
1831 | goto repeat; |
1832 | } |
1833 | out: |
1834 | rcu_read_unlock(); |
1835 | |
1836 | return folio; |
1837 | } |
1838 | |
1839 | /** |
1840 | * __filemap_get_folio - Find and get a reference to a folio. |
1841 | * @mapping: The address_space to search. |
1842 | * @index: The page index. |
1843 | * @fgp_flags: %FGP flags modify how the folio is returned. |
1844 | * @gfp: Memory allocation flags to use if %FGP_CREAT is specified. |
1845 | * |
1846 | * Looks up the page cache entry at @mapping & @index. |
1847 | * |
1848 | * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even |
1849 | * if the %GFP flags specified for %FGP_CREAT are atomic. |
1850 | * |
1851 | * If this function returns a folio, it is returned with an increased refcount. |
1852 | * |
1853 | * Return: The found folio or an ERR_PTR() otherwise. |
1854 | */ |
1855 | struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, |
1856 | fgf_t fgp_flags, gfp_t gfp) |
1857 | { |
1858 | struct folio *folio; |
1859 | |
1860 | repeat: |
1861 | folio = filemap_get_entry(mapping, index); |
1862 | if (xa_is_value(entry: folio)) |
1863 | folio = NULL; |
1864 | if (!folio) |
1865 | goto no_page; |
1866 | |
1867 | if (fgp_flags & FGP_LOCK) { |
1868 | if (fgp_flags & FGP_NOWAIT) { |
1869 | if (!folio_trylock(folio)) { |
1870 | folio_put(folio); |
1871 | return ERR_PTR(error: -EAGAIN); |
1872 | } |
1873 | } else { |
1874 | folio_lock(folio); |
1875 | } |
1876 | |
1877 | /* Has the page been truncated? */ |
1878 | if (unlikely(folio->mapping != mapping)) { |
1879 | folio_unlock(folio); |
1880 | folio_put(folio); |
1881 | goto repeat; |
1882 | } |
1883 | VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); |
1884 | } |
1885 | |
1886 | if (fgp_flags & FGP_ACCESSED) |
1887 | folio_mark_accessed(folio); |
1888 | else if (fgp_flags & FGP_WRITE) { |
1889 | /* Clear idle flag for buffer write */ |
1890 | if (folio_test_idle(folio)) |
1891 | folio_clear_idle(folio); |
1892 | } |
1893 | |
1894 | if (fgp_flags & FGP_STABLE) |
1895 | folio_wait_stable(folio); |
1896 | no_page: |
1897 | if (!folio && (fgp_flags & FGP_CREAT)) { |
1898 | unsigned order = FGF_GET_ORDER(fgp_flags); |
1899 | int err; |
1900 | |
1901 | if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) |
1902 | gfp |= __GFP_WRITE; |
1903 | if (fgp_flags & FGP_NOFS) |
1904 | gfp &= ~__GFP_FS; |
1905 | if (fgp_flags & FGP_NOWAIT) { |
1906 | gfp &= ~GFP_KERNEL; |
1907 | gfp |= GFP_NOWAIT | __GFP_NOWARN; |
1908 | } |
1909 | if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) |
1910 | fgp_flags |= FGP_LOCK; |
1911 | |
1912 | if (!mapping_large_folio_support(mapping)) |
1913 | order = 0; |
1914 | if (order > MAX_PAGECACHE_ORDER) |
1915 | order = MAX_PAGECACHE_ORDER; |
1916 | /* If we're not aligned, allocate a smaller folio */ |
1917 | if (index & ((1UL << order) - 1)) |
1918 | order = __ffs(index); |
1919 | |
1920 | do { |
1921 | gfp_t alloc_gfp = gfp; |
1922 | |
1923 | err = -ENOMEM; |
1924 | if (order > 0) |
1925 | alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; |
1926 | folio = filemap_alloc_folio(alloc_gfp, order); |
1927 | if (!folio) |
1928 | continue; |
1929 | |
1930 | /* Init accessed so avoid atomic mark_page_accessed later */ |
1931 | if (fgp_flags & FGP_ACCESSED) |
1932 | __folio_set_referenced(folio); |
1933 | |
1934 | err = filemap_add_folio(mapping, folio, index, gfp); |
1935 | if (!err) |
1936 | break; |
1937 | folio_put(folio); |
1938 | folio = NULL; |
1939 | } while (order-- > 0); |
1940 | |
1941 | if (err == -EEXIST) |
1942 | goto repeat; |
1943 | if (err) |
1944 | return ERR_PTR(error: err); |
1945 | /* |
1946 | * filemap_add_folio locks the page, and for mmap |
1947 | * we expect an unlocked page. |
1948 | */ |
1949 | if (folio && (fgp_flags & FGP_FOR_MMAP)) |
1950 | folio_unlock(folio); |
1951 | } |
1952 | |
1953 | if (!folio) |
1954 | return ERR_PTR(error: -ENOENT); |
1955 | return folio; |
1956 | } |
1957 | EXPORT_SYMBOL(__filemap_get_folio); |
1958 | |
1959 | static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, |
1960 | xa_mark_t mark) |
1961 | { |
1962 | struct folio *folio; |
1963 | |
1964 | retry: |
1965 | if (mark == XA_PRESENT) |
1966 | folio = xas_find(xas, max); |
1967 | else |
1968 | folio = xas_find_marked(xas, max, mark); |
1969 | |
1970 | if (xas_retry(xas, entry: folio)) |
1971 | goto retry; |
1972 | /* |
1973 | * A shadow entry of a recently evicted page, a swap |
1974 | * entry from shmem/tmpfs or a DAX entry. Return it |
1975 | * without attempting to raise page count. |
1976 | */ |
1977 | if (!folio || xa_is_value(entry: folio)) |
1978 | return folio; |
1979 | |
1980 | if (!folio_try_get_rcu(folio)) |
1981 | goto reset; |
1982 | |
1983 | if (unlikely(folio != xas_reload(xas))) { |
1984 | folio_put(folio); |
1985 | goto reset; |
1986 | } |
1987 | |
1988 | return folio; |
1989 | reset: |
1990 | xas_reset(xas); |
1991 | goto retry; |
1992 | } |
1993 | |
1994 | /** |
1995 | * find_get_entries - gang pagecache lookup |
1996 | * @mapping: The address_space to search |
1997 | * @start: The starting page cache index |
1998 | * @end: The final page index (inclusive). |
1999 | * @fbatch: Where the resulting entries are placed. |
2000 | * @indices: The cache indices corresponding to the entries in @entries |
2001 | * |
2002 | * find_get_entries() will search for and return a batch of entries in |
2003 | * the mapping. The entries are placed in @fbatch. find_get_entries() |
2004 | * takes a reference on any actual folios it returns. |
2005 | * |
2006 | * The entries have ascending indexes. The indices may not be consecutive |
2007 | * due to not-present entries or large folios. |
2008 | * |
2009 | * Any shadow entries of evicted folios, or swap entries from |
2010 | * shmem/tmpfs, are included in the returned array. |
2011 | * |
2012 | * Return: The number of entries which were found. |
2013 | */ |
2014 | unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, |
2015 | pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) |
2016 | { |
2017 | XA_STATE(xas, &mapping->i_pages, *start); |
2018 | struct folio *folio; |
2019 | |
2020 | rcu_read_lock(); |
2021 | while ((folio = find_get_entry(xas: &xas, max: end, XA_PRESENT)) != NULL) { |
2022 | indices[fbatch->nr] = xas.xa_index; |
2023 | if (!folio_batch_add(fbatch, folio)) |
2024 | break; |
2025 | } |
2026 | rcu_read_unlock(); |
2027 | |
2028 | if (folio_batch_count(fbatch)) { |
2029 | unsigned long nr = 1; |
2030 | int idx = folio_batch_count(fbatch) - 1; |
2031 | |
2032 | folio = fbatch->folios[idx]; |
2033 | if (!xa_is_value(entry: folio)) |
2034 | nr = folio_nr_pages(folio); |
2035 | *start = indices[idx] + nr; |
2036 | } |
2037 | return folio_batch_count(fbatch); |
2038 | } |
2039 | |
2040 | /** |
2041 | * find_lock_entries - Find a batch of pagecache entries. |
2042 | * @mapping: The address_space to search. |
2043 | * @start: The starting page cache index. |
2044 | * @end: The final page index (inclusive). |
2045 | * @fbatch: Where the resulting entries are placed. |
2046 | * @indices: The cache indices of the entries in @fbatch. |
2047 | * |
2048 | * find_lock_entries() will return a batch of entries from @mapping. |
2049 | * Swap, shadow and DAX entries are included. Folios are returned |
2050 | * locked and with an incremented refcount. Folios which are locked |
2051 | * by somebody else or under writeback are skipped. Folios which are |
2052 | * partially outside the range are not returned. |
2053 | * |
2054 | * The entries have ascending indexes. The indices may not be consecutive |
2055 | * due to not-present entries, large folios, folios which could not be |
2056 | * locked or folios under writeback. |
2057 | * |
2058 | * Return: The number of entries which were found. |
2059 | */ |
2060 | unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, |
2061 | pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) |
2062 | { |
2063 | XA_STATE(xas, &mapping->i_pages, *start); |
2064 | struct folio *folio; |
2065 | |
2066 | rcu_read_lock(); |
2067 | while ((folio = find_get_entry(xas: &xas, max: end, XA_PRESENT))) { |
2068 | if (!xa_is_value(entry: folio)) { |
2069 | if (folio->index < *start) |
2070 | goto put; |
2071 | if (folio_next_index(folio) - 1 > end) |
2072 | goto put; |
2073 | if (!folio_trylock(folio)) |
2074 | goto put; |
2075 | if (folio->mapping != mapping || |
2076 | folio_test_writeback(folio)) |
2077 | goto unlock; |
2078 | VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), |
2079 | folio); |
2080 | } |
2081 | indices[fbatch->nr] = xas.xa_index; |
2082 | if (!folio_batch_add(fbatch, folio)) |
2083 | break; |
2084 | continue; |
2085 | unlock: |
2086 | folio_unlock(folio); |
2087 | put: |
2088 | folio_put(folio); |
2089 | } |
2090 | rcu_read_unlock(); |
2091 | |
2092 | if (folio_batch_count(fbatch)) { |
2093 | unsigned long nr = 1; |
2094 | int idx = folio_batch_count(fbatch) - 1; |
2095 | |
2096 | folio = fbatch->folios[idx]; |
2097 | if (!xa_is_value(entry: folio)) |
2098 | nr = folio_nr_pages(folio); |
2099 | *start = indices[idx] + nr; |
2100 | } |
2101 | return folio_batch_count(fbatch); |
2102 | } |
2103 | |
2104 | /** |
2105 | * filemap_get_folios - Get a batch of folios |
2106 | * @mapping: The address_space to search |
2107 | * @start: The starting page index |
2108 | * @end: The final page index (inclusive) |
2109 | * @fbatch: The batch to fill. |
2110 | * |
2111 | * Search for and return a batch of folios in the mapping starting at |
2112 | * index @start and up to index @end (inclusive). The folios are returned |
2113 | * in @fbatch with an elevated reference count. |
2114 | * |
2115 | * Return: The number of folios which were found. |
2116 | * We also update @start to index the next folio for the traversal. |
2117 | */ |
2118 | unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, |
2119 | pgoff_t end, struct folio_batch *fbatch) |
2120 | { |
2121 | return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch); |
2122 | } |
2123 | EXPORT_SYMBOL(filemap_get_folios); |
2124 | |
2125 | /** |
2126 | * filemap_get_folios_contig - Get a batch of contiguous folios |
2127 | * @mapping: The address_space to search |
2128 | * @start: The starting page index |
2129 | * @end: The final page index (inclusive) |
2130 | * @fbatch: The batch to fill |
2131 | * |
2132 | * filemap_get_folios_contig() works exactly like filemap_get_folios(), |
2133 | * except the returned folios are guaranteed to be contiguous. This may |
2134 | * not return all contiguous folios if the batch gets filled up. |
2135 | * |
2136 | * Return: The number of folios found. |
2137 | * Also update @start to be positioned for traversal of the next folio. |
2138 | */ |
2139 | |
2140 | unsigned filemap_get_folios_contig(struct address_space *mapping, |
2141 | pgoff_t *start, pgoff_t end, struct folio_batch *fbatch) |
2142 | { |
2143 | XA_STATE(xas, &mapping->i_pages, *start); |
2144 | unsigned long nr; |
2145 | struct folio *folio; |
2146 | |
2147 | rcu_read_lock(); |
2148 | |
2149 | for (folio = xas_load(&xas); folio && xas.xa_index <= end; |
2150 | folio = xas_next(xas: &xas)) { |
2151 | if (xas_retry(xas: &xas, entry: folio)) |
2152 | continue; |
2153 | /* |
2154 | * If the entry has been swapped out, we can stop looking. |
2155 | * No current caller is looking for DAX entries. |
2156 | */ |
2157 | if (xa_is_value(entry: folio)) |
2158 | goto update_start; |
2159 | |
2160 | if (!folio_try_get_rcu(folio)) |
2161 | goto retry; |
2162 | |
2163 | if (unlikely(folio != xas_reload(&xas))) |
2164 | goto put_folio; |
2165 | |
2166 | if (!folio_batch_add(fbatch, folio)) { |
2167 | nr = folio_nr_pages(folio); |
2168 | *start = folio->index + nr; |
2169 | goto out; |
2170 | } |
2171 | continue; |
2172 | put_folio: |
2173 | folio_put(folio); |
2174 | |
2175 | retry: |
2176 | xas_reset(xas: &xas); |
2177 | } |
2178 | |
2179 | update_start: |
2180 | nr = folio_batch_count(fbatch); |
2181 | |
2182 | if (nr) { |
2183 | folio = fbatch->folios[nr - 1]; |
2184 | *start = folio_next_index(folio); |
2185 | } |
2186 | out: |
2187 | rcu_read_unlock(); |
2188 | return folio_batch_count(fbatch); |
2189 | } |
2190 | EXPORT_SYMBOL(filemap_get_folios_contig); |
2191 | |
2192 | /** |
2193 | * filemap_get_folios_tag - Get a batch of folios matching @tag |
2194 | * @mapping: The address_space to search |
2195 | * @start: The starting page index |
2196 | * @end: The final page index (inclusive) |
2197 | * @tag: The tag index |
2198 | * @fbatch: The batch to fill |
2199 | * |
2200 | * The first folio may start before @start; if it does, it will contain |
2201 | * @start. The final folio may extend beyond @end; if it does, it will |
2202 | * contain @end. The folios have ascending indices. There may be gaps |
2203 | * between the folios if there are indices which have no folio in the |
2204 | * page cache. If folios are added to or removed from the page cache |
2205 | * while this is running, they may or may not be found by this call. |
2206 | * Only returns folios that are tagged with @tag. |
2207 | * |
2208 | * Return: The number of folios found. |
2209 | * Also update @start to index the next folio for traversal. |
2210 | */ |
2211 | unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, |
2212 | pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch) |
2213 | { |
2214 | XA_STATE(xas, &mapping->i_pages, *start); |
2215 | struct folio *folio; |
2216 | |
2217 | rcu_read_lock(); |
2218 | while ((folio = find_get_entry(xas: &xas, max: end, mark: tag)) != NULL) { |
2219 | /* |
2220 | * Shadow entries should never be tagged, but this iteration |
2221 | * is lockless so there is a window for page reclaim to evict |
2222 | * a page we saw tagged. Skip over it. |
2223 | */ |
2224 | if (xa_is_value(entry: folio)) |
2225 | continue; |
2226 | if (!folio_batch_add(fbatch, folio)) { |
2227 | unsigned long nr = folio_nr_pages(folio); |
2228 | *start = folio->index + nr; |
2229 | goto out; |
2230 | } |
2231 | } |
2232 | /* |
2233 | * We come here when there is no page beyond @end. We take care to not |
2234 | * overflow the index @start as it confuses some of the callers. This |
2235 | * breaks the iteration when there is a page at index -1 but that is |
2236 | * already broke anyway. |
2237 | */ |
2238 | if (end == (pgoff_t)-1) |
2239 | *start = (pgoff_t)-1; |
2240 | else |
2241 | *start = end + 1; |
2242 | out: |
2243 | rcu_read_unlock(); |
2244 | |
2245 | return folio_batch_count(fbatch); |
2246 | } |
2247 | EXPORT_SYMBOL(filemap_get_folios_tag); |
2248 | |
2249 | /* |
2250 | * CD/DVDs are error prone. When a medium error occurs, the driver may fail |
2251 | * a _large_ part of the i/o request. Imagine the worst scenario: |
2252 | * |
2253 | * ---R__________________________________________B__________ |
2254 | * ^ reading here ^ bad block(assume 4k) |
2255 | * |
2256 | * read(R) => miss => readahead(R...B) => media error => frustrating retries |
2257 | * => failing the whole request => read(R) => read(R+1) => |
2258 | * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => |
2259 | * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => |
2260 | * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... |
2261 | * |
2262 | * It is going insane. Fix it by quickly scaling down the readahead size. |
2263 | */ |
2264 | static void shrink_readahead_size_eio(struct file_ra_state *ra) |
2265 | { |
2266 | ra->ra_pages /= 4; |
2267 | } |
2268 | |
2269 | /* |
2270 | * filemap_get_read_batch - Get a batch of folios for read |
2271 | * |
2272 | * Get a batch of folios which represent a contiguous range of bytes in |
2273 | * the file. No exceptional entries will be returned. If @index is in |
2274 | * the middle of a folio, the entire folio will be returned. The last |
2275 | * folio in the batch may have the readahead flag set or the uptodate flag |
2276 | * clear so that the caller can take the appropriate action. |
2277 | */ |
2278 | static void filemap_get_read_batch(struct address_space *mapping, |
2279 | pgoff_t index, pgoff_t max, struct folio_batch *fbatch) |
2280 | { |
2281 | XA_STATE(xas, &mapping->i_pages, index); |
2282 | struct folio *folio; |
2283 | |
2284 | rcu_read_lock(); |
2285 | for (folio = xas_load(&xas); folio; folio = xas_next(xas: &xas)) { |
2286 | if (xas_retry(xas: &xas, entry: folio)) |
2287 | continue; |
2288 | if (xas.xa_index > max || xa_is_value(entry: folio)) |
2289 | break; |
2290 | if (xa_is_sibling(entry: folio)) |
2291 | break; |
2292 | if (!folio_try_get_rcu(folio)) |
2293 | goto retry; |
2294 | |
2295 | if (unlikely(folio != xas_reload(&xas))) |
2296 | goto put_folio; |
2297 | |
2298 | if (!folio_batch_add(fbatch, folio)) |
2299 | break; |
2300 | if (!folio_test_uptodate(folio)) |
2301 | break; |
2302 | if (folio_test_readahead(folio)) |
2303 | break; |
2304 | xas_advance(xas: &xas, index: folio_next_index(folio) - 1); |
2305 | continue; |
2306 | put_folio: |
2307 | folio_put(folio); |
2308 | retry: |
2309 | xas_reset(xas: &xas); |
2310 | } |
2311 | rcu_read_unlock(); |
2312 | } |
2313 | |
2314 | static int filemap_read_folio(struct file *file, filler_t filler, |
2315 | struct folio *folio) |
2316 | { |
2317 | bool workingset = folio_test_workingset(folio); |
2318 | unsigned long pflags; |
2319 | int error; |
2320 | |
2321 | /* |
2322 | * A previous I/O error may have been due to temporary failures, |
2323 | * eg. multipath errors. PG_error will be set again if read_folio |
2324 | * fails. |
2325 | */ |
2326 | folio_clear_error(folio); |
2327 | |
2328 | /* Start the actual read. The read will unlock the page. */ |
2329 | if (unlikely(workingset)) |
2330 | psi_memstall_enter(flags: &pflags); |
2331 | error = filler(file, folio); |
2332 | if (unlikely(workingset)) |
2333 | psi_memstall_leave(flags: &pflags); |
2334 | if (error) |
2335 | return error; |
2336 | |
2337 | error = folio_wait_locked_killable(folio); |
2338 | if (error) |
2339 | return error; |
2340 | if (folio_test_uptodate(folio)) |
2341 | return 0; |
2342 | if (file) |
2343 | shrink_readahead_size_eio(ra: &file->f_ra); |
2344 | return -EIO; |
2345 | } |
2346 | |
2347 | static bool filemap_range_uptodate(struct address_space *mapping, |
2348 | loff_t pos, size_t count, struct folio *folio, |
2349 | bool need_uptodate) |
2350 | { |
2351 | if (folio_test_uptodate(folio)) |
2352 | return true; |
2353 | /* pipes can't handle partially uptodate pages */ |
2354 | if (need_uptodate) |
2355 | return false; |
2356 | if (!mapping->a_ops->is_partially_uptodate) |
2357 | return false; |
2358 | if (mapping->host->i_blkbits >= folio_shift(folio)) |
2359 | return false; |
2360 | |
2361 | if (folio_pos(folio) > pos) { |
2362 | count -= folio_pos(folio) - pos; |
2363 | pos = 0; |
2364 | } else { |
2365 | pos -= folio_pos(folio); |
2366 | } |
2367 | |
2368 | return mapping->a_ops->is_partially_uptodate(folio, pos, count); |
2369 | } |
2370 | |
2371 | static int filemap_update_page(struct kiocb *iocb, |
2372 | struct address_space *mapping, size_t count, |
2373 | struct folio *folio, bool need_uptodate) |
2374 | { |
2375 | int error; |
2376 | |
2377 | if (iocb->ki_flags & IOCB_NOWAIT) { |
2378 | if (!filemap_invalidate_trylock_shared(mapping)) |
2379 | return -EAGAIN; |
2380 | } else { |
2381 | filemap_invalidate_lock_shared(mapping); |
2382 | } |
2383 | |
2384 | if (!folio_trylock(folio)) { |
2385 | error = -EAGAIN; |
2386 | if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) |
2387 | goto unlock_mapping; |
2388 | if (!(iocb->ki_flags & IOCB_WAITQ)) { |
2389 | filemap_invalidate_unlock_shared(mapping); |
2390 | /* |
2391 | * This is where we usually end up waiting for a |
2392 | * previously submitted readahead to finish. |
2393 | */ |
2394 | folio_put_wait_locked(folio, TASK_KILLABLE); |
2395 | return AOP_TRUNCATED_PAGE; |
2396 | } |
2397 | error = __folio_lock_async(folio, wait: iocb->ki_waitq); |
2398 | if (error) |
2399 | goto unlock_mapping; |
2400 | } |
2401 | |
2402 | error = AOP_TRUNCATED_PAGE; |
2403 | if (!folio->mapping) |
2404 | goto unlock; |
2405 | |
2406 | error = 0; |
2407 | if (filemap_range_uptodate(mapping, pos: iocb->ki_pos, count, folio, |
2408 | need_uptodate)) |
2409 | goto unlock; |
2410 | |
2411 | error = -EAGAIN; |
2412 | if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) |
2413 | goto unlock; |
2414 | |
2415 | error = filemap_read_folio(file: iocb->ki_filp, filler: mapping->a_ops->read_folio, |
2416 | folio); |
2417 | goto unlock_mapping; |
2418 | unlock: |
2419 | folio_unlock(folio); |
2420 | unlock_mapping: |
2421 | filemap_invalidate_unlock_shared(mapping); |
2422 | if (error == AOP_TRUNCATED_PAGE) |
2423 | folio_put(folio); |
2424 | return error; |
2425 | } |
2426 | |
2427 | static int filemap_create_folio(struct file *file, |
2428 | struct address_space *mapping, pgoff_t index, |
2429 | struct folio_batch *fbatch) |
2430 | { |
2431 | struct folio *folio; |
2432 | int error; |
2433 | |
2434 | folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); |
2435 | if (!folio) |
2436 | return -ENOMEM; |
2437 | |
2438 | /* |
2439 | * Protect against truncate / hole punch. Grabbing invalidate_lock |
2440 | * here assures we cannot instantiate and bring uptodate new |
2441 | * pagecache folios after evicting page cache during truncate |
2442 | * and before actually freeing blocks. Note that we could |
2443 | * release invalidate_lock after inserting the folio into |
2444 | * the page cache as the locked folio would then be enough to |
2445 | * synchronize with hole punching. But there are code paths |
2446 | * such as filemap_update_page() filling in partially uptodate |
2447 | * pages or ->readahead() that need to hold invalidate_lock |
2448 | * while mapping blocks for IO so let's hold the lock here as |
2449 | * well to keep locking rules simple. |
2450 | */ |
2451 | filemap_invalidate_lock_shared(mapping); |
2452 | error = filemap_add_folio(mapping, folio, index, |
2453 | mapping_gfp_constraint(mapping, GFP_KERNEL)); |
2454 | if (error == -EEXIST) |
2455 | error = AOP_TRUNCATED_PAGE; |
2456 | if (error) |
2457 | goto error; |
2458 | |
2459 | error = filemap_read_folio(file, filler: mapping->a_ops->read_folio, folio); |
2460 | if (error) |
2461 | goto error; |
2462 | |
2463 | filemap_invalidate_unlock_shared(mapping); |
2464 | folio_batch_add(fbatch, folio); |
2465 | return 0; |
2466 | error: |
2467 | filemap_invalidate_unlock_shared(mapping); |
2468 | folio_put(folio); |
2469 | return error; |
2470 | } |
2471 | |
2472 | static int filemap_readahead(struct kiocb *iocb, struct file *file, |
2473 | struct address_space *mapping, struct folio *folio, |
2474 | pgoff_t last_index) |
2475 | { |
2476 | DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); |
2477 | |
2478 | if (iocb->ki_flags & IOCB_NOIO) |
2479 | return -EAGAIN; |
2480 | page_cache_async_ra(&ractl, folio, req_count: last_index - folio->index); |
2481 | return 0; |
2482 | } |
2483 | |
2484 | static int filemap_get_pages(struct kiocb *iocb, size_t count, |
2485 | struct folio_batch *fbatch, bool need_uptodate) |
2486 | { |
2487 | struct file *filp = iocb->ki_filp; |
2488 | struct address_space *mapping = filp->f_mapping; |
2489 | struct file_ra_state *ra = &filp->f_ra; |
2490 | pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; |
2491 | pgoff_t last_index; |
2492 | struct folio *folio; |
2493 | int err = 0; |
2494 | |
2495 | /* "last_index" is the index of the page beyond the end of the read */ |
2496 | last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); |
2497 | retry: |
2498 | if (fatal_signal_pending(current)) |
2499 | return -EINTR; |
2500 | |
2501 | filemap_get_read_batch(mapping, index, max: last_index - 1, fbatch); |
2502 | if (!folio_batch_count(fbatch)) { |
2503 | if (iocb->ki_flags & IOCB_NOIO) |
2504 | return -EAGAIN; |
2505 | page_cache_sync_readahead(mapping, ra, file: filp, index, |
2506 | req_count: last_index - index); |
2507 | filemap_get_read_batch(mapping, index, max: last_index - 1, fbatch); |
2508 | } |
2509 | if (!folio_batch_count(fbatch)) { |
2510 | if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) |
2511 | return -EAGAIN; |
2512 | err = filemap_create_folio(file: filp, mapping, |
2513 | index: iocb->ki_pos >> PAGE_SHIFT, fbatch); |
2514 | if (err == AOP_TRUNCATED_PAGE) |
2515 | goto retry; |
2516 | return err; |
2517 | } |
2518 | |
2519 | folio = fbatch->folios[folio_batch_count(fbatch) - 1]; |
2520 | if (folio_test_readahead(folio)) { |
2521 | err = filemap_readahead(iocb, file: filp, mapping, folio, last_index); |
2522 | if (err) |
2523 | goto err; |
2524 | } |
2525 | if (!folio_test_uptodate(folio)) { |
2526 | if ((iocb->ki_flags & IOCB_WAITQ) && |
2527 | folio_batch_count(fbatch) > 1) |
2528 | iocb->ki_flags |= IOCB_NOWAIT; |
2529 | err = filemap_update_page(iocb, mapping, count, folio, |
2530 | need_uptodate); |
2531 | if (err) |
2532 | goto err; |
2533 | } |
2534 | |
2535 | return 0; |
2536 | err: |
2537 | if (err < 0) |
2538 | folio_put(folio); |
2539 | if (likely(--fbatch->nr)) |
2540 | return 0; |
2541 | if (err == AOP_TRUNCATED_PAGE) |
2542 | goto retry; |
2543 | return err; |
2544 | } |
2545 | |
2546 | static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) |
2547 | { |
2548 | unsigned int shift = folio_shift(folio); |
2549 | |
2550 | return (pos1 >> shift == pos2 >> shift); |
2551 | } |
2552 | |
2553 | /** |
2554 | * filemap_read - Read data from the page cache. |
2555 | * @iocb: The iocb to read. |
2556 | * @iter: Destination for the data. |
2557 | * @already_read: Number of bytes already read by the caller. |
2558 | * |
2559 | * Copies data from the page cache. If the data is not currently present, |
2560 | * uses the readahead and read_folio address_space operations to fetch it. |
2561 | * |
2562 | * Return: Total number of bytes copied, including those already read by |
2563 | * the caller. If an error happens before any bytes are copied, returns |
2564 | * a negative error number. |
2565 | */ |
2566 | ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, |
2567 | ssize_t already_read) |
2568 | { |
2569 | struct file *filp = iocb->ki_filp; |
2570 | struct file_ra_state *ra = &filp->f_ra; |
2571 | struct address_space *mapping = filp->f_mapping; |
2572 | struct inode *inode = mapping->host; |
2573 | struct folio_batch fbatch; |
2574 | int i, error = 0; |
2575 | bool writably_mapped; |
2576 | loff_t isize, end_offset; |
2577 | loff_t last_pos = ra->prev_pos; |
2578 | |
2579 | if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) |
2580 | return 0; |
2581 | if (unlikely(!iov_iter_count(iter))) |
2582 | return 0; |
2583 | |
2584 | iov_iter_truncate(i: iter, count: inode->i_sb->s_maxbytes); |
2585 | folio_batch_init(fbatch: &fbatch); |
2586 | |
2587 | do { |
2588 | cond_resched(); |
2589 | |
2590 | /* |
2591 | * If we've already successfully copied some data, then we |
2592 | * can no longer safely return -EIOCBQUEUED. Hence mark |
2593 | * an async read NOWAIT at that point. |
2594 | */ |
2595 | if ((iocb->ki_flags & IOCB_WAITQ) && already_read) |
2596 | iocb->ki_flags |= IOCB_NOWAIT; |
2597 | |
2598 | if (unlikely(iocb->ki_pos >= i_size_read(inode))) |
2599 | break; |
2600 | |
2601 | error = filemap_get_pages(iocb, count: iter->count, fbatch: &fbatch, need_uptodate: false); |
2602 | if (error < 0) |
2603 | break; |
2604 | |
2605 | /* |
2606 | * i_size must be checked after we know the pages are Uptodate. |
2607 | * |
2608 | * Checking i_size after the check allows us to calculate |
2609 | * the correct value for "nr", which means the zero-filled |
2610 | * part of the page is not copied back to userspace (unless |
2611 | * another truncate extends the file - this is desired though). |
2612 | */ |
2613 | isize = i_size_read(inode); |
2614 | if (unlikely(iocb->ki_pos >= isize)) |
2615 | goto put_folios; |
2616 | end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); |
2617 | |
2618 | /* |
2619 | * Once we start copying data, we don't want to be touching any |
2620 | * cachelines that might be contended: |
2621 | */ |
2622 | writably_mapped = mapping_writably_mapped(mapping); |
2623 | |
2624 | /* |
2625 | * When a read accesses the same folio several times, only |
2626 | * mark it as accessed the first time. |
2627 | */ |
2628 | if (!pos_same_folio(pos1: iocb->ki_pos, pos2: last_pos - 1, |
2629 | folio: fbatch.folios[0])) |
2630 | folio_mark_accessed(fbatch.folios[0]); |
2631 | |
2632 | for (i = 0; i < folio_batch_count(fbatch: &fbatch); i++) { |
2633 | struct folio *folio = fbatch.folios[i]; |
2634 | size_t fsize = folio_size(folio); |
2635 | size_t offset = iocb->ki_pos & (fsize - 1); |
2636 | size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, |
2637 | fsize - offset); |
2638 | size_t copied; |
2639 | |
2640 | if (end_offset < folio_pos(folio)) |
2641 | break; |
2642 | if (i > 0) |
2643 | folio_mark_accessed(folio); |
2644 | /* |
2645 | * If users can be writing to this folio using arbitrary |
2646 | * virtual addresses, take care of potential aliasing |
2647 | * before reading the folio on the kernel side. |
2648 | */ |
2649 | if (writably_mapped) |
2650 | flush_dcache_folio(folio); |
2651 | |
2652 | copied = copy_folio_to_iter(folio, offset, bytes, i: iter); |
2653 | |
2654 | already_read += copied; |
2655 | iocb->ki_pos += copied; |
2656 | last_pos = iocb->ki_pos; |
2657 | |
2658 | if (copied < bytes) { |
2659 | error = -EFAULT; |
2660 | break; |
2661 | } |
2662 | } |
2663 | put_folios: |
2664 | for (i = 0; i < folio_batch_count(fbatch: &fbatch); i++) |
2665 | folio_put(folio: fbatch.folios[i]); |
2666 | folio_batch_init(fbatch: &fbatch); |
2667 | } while (iov_iter_count(i: iter) && iocb->ki_pos < isize && !error); |
2668 | |
2669 | file_accessed(file: filp); |
2670 | ra->prev_pos = last_pos; |
2671 | return already_read ? already_read : error; |
2672 | } |
2673 | EXPORT_SYMBOL_GPL(filemap_read); |
2674 | |
2675 | int kiocb_write_and_wait(struct kiocb *iocb, size_t count) |
2676 | { |
2677 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
2678 | loff_t pos = iocb->ki_pos; |
2679 | loff_t end = pos + count - 1; |
2680 | |
2681 | if (iocb->ki_flags & IOCB_NOWAIT) { |
2682 | if (filemap_range_needs_writeback(mapping, start_byte: pos, end_byte: end)) |
2683 | return -EAGAIN; |
2684 | return 0; |
2685 | } |
2686 | |
2687 | return filemap_write_and_wait_range(mapping, pos, end); |
2688 | } |
2689 | EXPORT_SYMBOL_GPL(kiocb_write_and_wait); |
2690 | |
2691 | int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) |
2692 | { |
2693 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
2694 | loff_t pos = iocb->ki_pos; |
2695 | loff_t end = pos + count - 1; |
2696 | int ret; |
2697 | |
2698 | if (iocb->ki_flags & IOCB_NOWAIT) { |
2699 | /* we could block if there are any pages in the range */ |
2700 | if (filemap_range_has_page(mapping, pos, end)) |
2701 | return -EAGAIN; |
2702 | } else { |
2703 | ret = filemap_write_and_wait_range(mapping, pos, end); |
2704 | if (ret) |
2705 | return ret; |
2706 | } |
2707 | |
2708 | /* |
2709 | * After a write we want buffered reads to be sure to go to disk to get |
2710 | * the new data. We invalidate clean cached page from the region we're |
2711 | * about to write. We do this *before* the write so that we can return |
2712 | * without clobbering -EIOCBQUEUED from ->direct_IO(). |
2713 | */ |
2714 | return invalidate_inode_pages2_range(mapping, start: pos >> PAGE_SHIFT, |
2715 | end: end >> PAGE_SHIFT); |
2716 | } |
2717 | EXPORT_SYMBOL_GPL(kiocb_invalidate_pages); |
2718 | |
2719 | /** |
2720 | * generic_file_read_iter - generic filesystem read routine |
2721 | * @iocb: kernel I/O control block |
2722 | * @iter: destination for the data read |
2723 | * |
2724 | * This is the "read_iter()" routine for all filesystems |
2725 | * that can use the page cache directly. |
2726 | * |
2727 | * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall |
2728 | * be returned when no data can be read without waiting for I/O requests |
2729 | * to complete; it doesn't prevent readahead. |
2730 | * |
2731 | * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O |
2732 | * requests shall be made for the read or for readahead. When no data |
2733 | * can be read, -EAGAIN shall be returned. When readahead would be |
2734 | * triggered, a partial, possibly empty read shall be returned. |
2735 | * |
2736 | * Return: |
2737 | * * number of bytes copied, even for partial reads |
2738 | * * negative error code (or 0 if IOCB_NOIO) if nothing was read |
2739 | */ |
2740 | ssize_t |
2741 | generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) |
2742 | { |
2743 | size_t count = iov_iter_count(i: iter); |
2744 | ssize_t retval = 0; |
2745 | |
2746 | if (!count) |
2747 | return 0; /* skip atime */ |
2748 | |
2749 | if (iocb->ki_flags & IOCB_DIRECT) { |
2750 | struct file *file = iocb->ki_filp; |
2751 | struct address_space *mapping = file->f_mapping; |
2752 | struct inode *inode = mapping->host; |
2753 | |
2754 | retval = kiocb_write_and_wait(iocb, count); |
2755 | if (retval < 0) |
2756 | return retval; |
2757 | file_accessed(file); |
2758 | |
2759 | retval = mapping->a_ops->direct_IO(iocb, iter); |
2760 | if (retval >= 0) { |
2761 | iocb->ki_pos += retval; |
2762 | count -= retval; |
2763 | } |
2764 | if (retval != -EIOCBQUEUED) |
2765 | iov_iter_revert(i: iter, bytes: count - iov_iter_count(i: iter)); |
2766 | |
2767 | /* |
2768 | * Btrfs can have a short DIO read if we encounter |
2769 | * compressed extents, so if there was an error, or if |
2770 | * we've already read everything we wanted to, or if |
2771 | * there was a short read because we hit EOF, go ahead |
2772 | * and return. Otherwise fallthrough to buffered io for |
2773 | * the rest of the read. Buffered reads will not work for |
2774 | * DAX files, so don't bother trying. |
2775 | */ |
2776 | if (retval < 0 || !count || IS_DAX(inode)) |
2777 | return retval; |
2778 | if (iocb->ki_pos >= i_size_read(inode)) |
2779 | return retval; |
2780 | } |
2781 | |
2782 | return filemap_read(iocb, iter, retval); |
2783 | } |
2784 | EXPORT_SYMBOL(generic_file_read_iter); |
2785 | |
2786 | /* |
2787 | * Splice subpages from a folio into a pipe. |
2788 | */ |
2789 | size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, |
2790 | struct folio *folio, loff_t fpos, size_t size) |
2791 | { |
2792 | struct page *page; |
2793 | size_t spliced = 0, offset = offset_in_folio(folio, fpos); |
2794 | |
2795 | page = folio_page(folio, offset / PAGE_SIZE); |
2796 | size = min(size, folio_size(folio) - offset); |
2797 | offset %= PAGE_SIZE; |
2798 | |
2799 | while (spliced < size && |
2800 | !pipe_full(head: pipe->head, tail: pipe->tail, limit: pipe->max_usage)) { |
2801 | struct pipe_buffer *buf = pipe_head_buf(pipe); |
2802 | size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); |
2803 | |
2804 | *buf = (struct pipe_buffer) { |
2805 | .ops = &page_cache_pipe_buf_ops, |
2806 | .page = page, |
2807 | .offset = offset, |
2808 | .len = part, |
2809 | }; |
2810 | folio_get(folio); |
2811 | pipe->head++; |
2812 | page++; |
2813 | spliced += part; |
2814 | offset = 0; |
2815 | } |
2816 | |
2817 | return spliced; |
2818 | } |
2819 | |
2820 | /** |
2821 | * filemap_splice_read - Splice data from a file's pagecache into a pipe |
2822 | * @in: The file to read from |
2823 | * @ppos: Pointer to the file position to read from |
2824 | * @pipe: The pipe to splice into |
2825 | * @len: The amount to splice |
2826 | * @flags: The SPLICE_F_* flags |
2827 | * |
2828 | * This function gets folios from a file's pagecache and splices them into the |
2829 | * pipe. Readahead will be called as necessary to fill more folios. This may |
2830 | * be used for blockdevs also. |
2831 | * |
2832 | * Return: On success, the number of bytes read will be returned and *@ppos |
2833 | * will be updated if appropriate; 0 will be returned if there is no more data |
2834 | * to be read; -EAGAIN will be returned if the pipe had no space, and some |
2835 | * other negative error code will be returned on error. A short read may occur |
2836 | * if the pipe has insufficient space, we reach the end of the data or we hit a |
2837 | * hole. |
2838 | */ |
2839 | ssize_t filemap_splice_read(struct file *in, loff_t *ppos, |
2840 | struct pipe_inode_info *pipe, |
2841 | size_t len, unsigned int flags) |
2842 | { |
2843 | struct folio_batch fbatch; |
2844 | struct kiocb iocb; |
2845 | size_t total_spliced = 0, used, npages; |
2846 | loff_t isize, end_offset; |
2847 | bool writably_mapped; |
2848 | int i, error = 0; |
2849 | |
2850 | if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) |
2851 | return 0; |
2852 | |
2853 | init_sync_kiocb(kiocb: &iocb, filp: in); |
2854 | iocb.ki_pos = *ppos; |
2855 | |
2856 | /* Work out how much data we can actually add into the pipe */ |
2857 | used = pipe_occupancy(head: pipe->head, tail: pipe->tail); |
2858 | npages = max_t(ssize_t, pipe->max_usage - used, 0); |
2859 | len = min_t(size_t, len, npages * PAGE_SIZE); |
2860 | |
2861 | folio_batch_init(fbatch: &fbatch); |
2862 | |
2863 | do { |
2864 | cond_resched(); |
2865 | |
2866 | if (*ppos >= i_size_read(inode: in->f_mapping->host)) |
2867 | break; |
2868 | |
2869 | iocb.ki_pos = *ppos; |
2870 | error = filemap_get_pages(iocb: &iocb, count: len, fbatch: &fbatch, need_uptodate: true); |
2871 | if (error < 0) |
2872 | break; |
2873 | |
2874 | /* |
2875 | * i_size must be checked after we know the pages are Uptodate. |
2876 | * |
2877 | * Checking i_size after the check allows us to calculate |
2878 | * the correct value for "nr", which means the zero-filled |
2879 | * part of the page is not copied back to userspace (unless |
2880 | * another truncate extends the file - this is desired though). |
2881 | */ |
2882 | isize = i_size_read(inode: in->f_mapping->host); |
2883 | if (unlikely(*ppos >= isize)) |
2884 | break; |
2885 | end_offset = min_t(loff_t, isize, *ppos + len); |
2886 | |
2887 | /* |
2888 | * Once we start copying data, we don't want to be touching any |
2889 | * cachelines that might be contended: |
2890 | */ |
2891 | writably_mapped = mapping_writably_mapped(mapping: in->f_mapping); |
2892 | |
2893 | for (i = 0; i < folio_batch_count(fbatch: &fbatch); i++) { |
2894 | struct folio *folio = fbatch.folios[i]; |
2895 | size_t n; |
2896 | |
2897 | if (folio_pos(folio) >= end_offset) |
2898 | goto out; |
2899 | folio_mark_accessed(folio); |
2900 | |
2901 | /* |
2902 | * If users can be writing to this folio using arbitrary |
2903 | * virtual addresses, take care of potential aliasing |
2904 | * before reading the folio on the kernel side. |
2905 | */ |
2906 | if (writably_mapped) |
2907 | flush_dcache_folio(folio); |
2908 | |
2909 | n = min_t(loff_t, len, isize - *ppos); |
2910 | n = splice_folio_into_pipe(pipe, folio, fpos: *ppos, size: n); |
2911 | if (!n) |
2912 | goto out; |
2913 | len -= n; |
2914 | total_spliced += n; |
2915 | *ppos += n; |
2916 | in->f_ra.prev_pos = *ppos; |
2917 | if (pipe_full(head: pipe->head, tail: pipe->tail, limit: pipe->max_usage)) |
2918 | goto out; |
2919 | } |
2920 | |
2921 | folio_batch_release(fbatch: &fbatch); |
2922 | } while (len); |
2923 | |
2924 | out: |
2925 | folio_batch_release(fbatch: &fbatch); |
2926 | file_accessed(file: in); |
2927 | |
2928 | return total_spliced ? total_spliced : error; |
2929 | } |
2930 | EXPORT_SYMBOL(filemap_splice_read); |
2931 | |
2932 | static inline loff_t folio_seek_hole_data(struct xa_state *xas, |
2933 | struct address_space *mapping, struct folio *folio, |
2934 | loff_t start, loff_t end, bool seek_data) |
2935 | { |
2936 | const struct address_space_operations *ops = mapping->a_ops; |
2937 | size_t offset, bsz = i_blocksize(node: mapping->host); |
2938 | |
2939 | if (xa_is_value(entry: folio) || folio_test_uptodate(folio)) |
2940 | return seek_data ? start : end; |
2941 | if (!ops->is_partially_uptodate) |
2942 | return seek_data ? end : start; |
2943 | |
2944 | xas_pause(xas); |
2945 | rcu_read_unlock(); |
2946 | folio_lock(folio); |
2947 | if (unlikely(folio->mapping != mapping)) |
2948 | goto unlock; |
2949 | |
2950 | offset = offset_in_folio(folio, start) & ~(bsz - 1); |
2951 | |
2952 | do { |
2953 | if (ops->is_partially_uptodate(folio, offset, bsz) == |
2954 | seek_data) |
2955 | break; |
2956 | start = (start + bsz) & ~(bsz - 1); |
2957 | offset += bsz; |
2958 | } while (offset < folio_size(folio)); |
2959 | unlock: |
2960 | folio_unlock(folio); |
2961 | rcu_read_lock(); |
2962 | return start; |
2963 | } |
2964 | |
2965 | static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) |
2966 | { |
2967 | if (xa_is_value(entry: folio)) |
2968 | return PAGE_SIZE << xa_get_order(xas->xa, index: xas->xa_index); |
2969 | return folio_size(folio); |
2970 | } |
2971 | |
2972 | /** |
2973 | * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache. |
2974 | * @mapping: Address space to search. |
2975 | * @start: First byte to consider. |
2976 | * @end: Limit of search (exclusive). |
2977 | * @whence: Either SEEK_HOLE or SEEK_DATA. |
2978 | * |
2979 | * If the page cache knows which blocks contain holes and which blocks |
2980 | * contain data, your filesystem can use this function to implement |
2981 | * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are |
2982 | * entirely memory-based such as tmpfs, and filesystems which support |
2983 | * unwritten extents. |
2984 | * |
2985 | * Return: The requested offset on success, or -ENXIO if @whence specifies |
2986 | * SEEK_DATA and there is no data after @start. There is an implicit hole |
2987 | * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start |
2988 | * and @end contain data. |
2989 | */ |
2990 | loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, |
2991 | loff_t end, int whence) |
2992 | { |
2993 | XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); |
2994 | pgoff_t max = (end - 1) >> PAGE_SHIFT; |
2995 | bool seek_data = (whence == SEEK_DATA); |
2996 | struct folio *folio; |
2997 | |
2998 | if (end <= start) |
2999 | return -ENXIO; |
3000 | |
3001 | rcu_read_lock(); |
3002 | while ((folio = find_get_entry(xas: &xas, max, XA_PRESENT))) { |
3003 | loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; |
3004 | size_t seek_size; |
3005 | |
3006 | if (start < pos) { |
3007 | if (!seek_data) |
3008 | goto unlock; |
3009 | start = pos; |
3010 | } |
3011 | |
3012 | seek_size = seek_folio_size(xas: &xas, folio); |
3013 | pos = round_up((u64)pos + 1, seek_size); |
3014 | start = folio_seek_hole_data(xas: &xas, mapping, folio, start, end: pos, |
3015 | seek_data); |
3016 | if (start < pos) |
3017 | goto unlock; |
3018 | if (start >= end) |
3019 | break; |
3020 | if (seek_size > PAGE_SIZE) |
3021 | xas_set(xas: &xas, index: pos >> PAGE_SHIFT); |
3022 | if (!xa_is_value(entry: folio)) |
3023 | folio_put(folio); |
3024 | } |
3025 | if (seek_data) |
3026 | start = -ENXIO; |
3027 | unlock: |
3028 | rcu_read_unlock(); |
3029 | if (folio && !xa_is_value(entry: folio)) |
3030 | folio_put(folio); |
3031 | if (start > end) |
3032 | return end; |
3033 | return start; |
3034 | } |
3035 | |
3036 | #ifdef CONFIG_MMU |
3037 | #define MMAP_LOTSAMISS (100) |
3038 | /* |
3039 | * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock |
3040 | * @vmf - the vm_fault for this fault. |
3041 | * @folio - the folio to lock. |
3042 | * @fpin - the pointer to the file we may pin (or is already pinned). |
3043 | * |
3044 | * This works similar to lock_folio_or_retry in that it can drop the |
3045 | * mmap_lock. It differs in that it actually returns the folio locked |
3046 | * if it returns 1 and 0 if it couldn't lock the folio. If we did have |
3047 | * to drop the mmap_lock then fpin will point to the pinned file and |
3048 | * needs to be fput()'ed at a later point. |
3049 | */ |
3050 | static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, |
3051 | struct file **fpin) |
3052 | { |
3053 | if (folio_trylock(folio)) |
3054 | return 1; |
3055 | |
3056 | /* |
3057 | * NOTE! This will make us return with VM_FAULT_RETRY, but with |
3058 | * the fault lock still held. That's how FAULT_FLAG_RETRY_NOWAIT |
3059 | * is supposed to work. We have way too many special cases.. |
3060 | */ |
3061 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
3062 | return 0; |
3063 | |
3064 | *fpin = maybe_unlock_mmap_for_io(vmf, fpin: *fpin); |
3065 | if (vmf->flags & FAULT_FLAG_KILLABLE) { |
3066 | if (__folio_lock_killable(folio)) { |
3067 | /* |
3068 | * We didn't have the right flags to drop the |
3069 | * fault lock, but all fault_handlers only check |
3070 | * for fatal signals if we return VM_FAULT_RETRY, |
3071 | * so we need to drop the fault lock here and |
3072 | * return 0 if we don't have a fpin. |
3073 | */ |
3074 | if (*fpin == NULL) |
3075 | release_fault_lock(vmf); |
3076 | return 0; |
3077 | } |
3078 | } else |
3079 | __folio_lock(folio); |
3080 | |
3081 | return 1; |
3082 | } |
3083 | |
3084 | /* |
3085 | * Synchronous readahead happens when we don't even find a page in the page |
3086 | * cache at all. We don't want to perform IO under the mmap sem, so if we have |
3087 | * to drop the mmap sem we return the file that was pinned in order for us to do |
3088 | * that. If we didn't pin a file then we return NULL. The file that is |
3089 | * returned needs to be fput()'ed when we're done with it. |
3090 | */ |
3091 | static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) |
3092 | { |
3093 | struct file *file = vmf->vma->vm_file; |
3094 | struct file_ra_state *ra = &file->f_ra; |
3095 | struct address_space *mapping = file->f_mapping; |
3096 | DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); |
3097 | struct file *fpin = NULL; |
3098 | unsigned long vm_flags = vmf->vma->vm_flags; |
3099 | unsigned int mmap_miss; |
3100 | |
3101 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3102 | /* Use the readahead code, even if readahead is disabled */ |
3103 | if (vm_flags & VM_HUGEPAGE) { |
3104 | fpin = maybe_unlock_mmap_for_io(vmf, fpin); |
3105 | ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); |
3106 | ra->size = HPAGE_PMD_NR; |
3107 | /* |
3108 | * Fetch two PMD folios, so we get the chance to actually |
3109 | * readahead, unless we've been told not to. |
3110 | */ |
3111 | if (!(vm_flags & VM_RAND_READ)) |
3112 | ra->size *= 2; |
3113 | ra->async_size = HPAGE_PMD_NR; |
3114 | page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); |
3115 | return fpin; |
3116 | } |
3117 | #endif |
3118 | |
3119 | /* If we don't want any read-ahead, don't bother */ |
3120 | if (vm_flags & VM_RAND_READ) |
3121 | return fpin; |
3122 | if (!ra->ra_pages) |
3123 | return fpin; |
3124 | |
3125 | if (vm_flags & VM_SEQ_READ) { |
3126 | fpin = maybe_unlock_mmap_for_io(vmf, fpin); |
3127 | page_cache_sync_ra(&ractl, req_count: ra->ra_pages); |
3128 | return fpin; |
3129 | } |
3130 | |
3131 | /* Avoid banging the cache line if not needed */ |
3132 | mmap_miss = READ_ONCE(ra->mmap_miss); |
3133 | if (mmap_miss < MMAP_LOTSAMISS * 10) |
3134 | WRITE_ONCE(ra->mmap_miss, ++mmap_miss); |
3135 | |
3136 | /* |
3137 | * Do we miss much more than hit in this file? If so, |
3138 | * stop bothering with read-ahead. It will only hurt. |
3139 | */ |
3140 | if (mmap_miss > MMAP_LOTSAMISS) |
3141 | return fpin; |
3142 | |
3143 | /* |
3144 | * mmap read-around |
3145 | */ |
3146 | fpin = maybe_unlock_mmap_for_io(vmf, fpin); |
3147 | ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); |
3148 | ra->size = ra->ra_pages; |
3149 | ra->async_size = ra->ra_pages / 4; |
3150 | ractl._index = ra->start; |
3151 | page_cache_ra_order(&ractl, ra, order: 0); |
3152 | return fpin; |
3153 | } |
3154 | |
3155 | /* |
3156 | * Asynchronous readahead happens when we find the page and PG_readahead, |
3157 | * so we want to possibly extend the readahead further. We return the file that |
3158 | * was pinned if we have to drop the mmap_lock in order to do IO. |
3159 | */ |
3160 | static struct file *do_async_mmap_readahead(struct vm_fault *vmf, |
3161 | struct folio *folio) |
3162 | { |
3163 | struct file *file = vmf->vma->vm_file; |
3164 | struct file_ra_state *ra = &file->f_ra; |
3165 | DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); |
3166 | struct file *fpin = NULL; |
3167 | unsigned int mmap_miss; |
3168 | |
3169 | /* If we don't want any read-ahead, don't bother */ |
3170 | if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) |
3171 | return fpin; |
3172 | |
3173 | mmap_miss = READ_ONCE(ra->mmap_miss); |
3174 | if (mmap_miss) |
3175 | WRITE_ONCE(ra->mmap_miss, --mmap_miss); |
3176 | |
3177 | if (folio_test_readahead(folio)) { |
3178 | fpin = maybe_unlock_mmap_for_io(vmf, fpin); |
3179 | page_cache_async_ra(&ractl, folio, req_count: ra->ra_pages); |
3180 | } |
3181 | return fpin; |
3182 | } |
3183 | |
3184 | static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf) |
3185 | { |
3186 | struct vm_area_struct *vma = vmf->vma; |
3187 | vm_fault_t ret = 0; |
3188 | pte_t *ptep; |
3189 | |
3190 | /* |
3191 | * We might have COW'ed a pagecache folio and might now have an mlocked |
3192 | * anon folio mapped. The original pagecache folio is not mlocked and |
3193 | * might have been evicted. During a read+clear/modify/write update of |
3194 | * the PTE, such as done in do_numa_page()/change_pte_range(), we |
3195 | * temporarily clear the PTE under PT lock and might detect it here as |
3196 | * "none" when not holding the PT lock. |
3197 | * |
3198 | * Not rechecking the PTE under PT lock could result in an unexpected |
3199 | * major fault in an mlock'ed region. Recheck only for this special |
3200 | * scenario while holding the PT lock, to not degrade non-mlocked |
3201 | * scenarios. Recheck the PTE without PT lock firstly, thereby reducing |
3202 | * the number of times we hold PT lock. |
3203 | */ |
3204 | if (!(vma->vm_flags & VM_LOCKED)) |
3205 | return 0; |
3206 | |
3207 | if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) |
3208 | return 0; |
3209 | |
3210 | ptep = pte_offset_map(pmd: vmf->pmd, addr: vmf->address); |
3211 | if (unlikely(!ptep)) |
3212 | return VM_FAULT_NOPAGE; |
3213 | |
3214 | if (unlikely(!pte_none(ptep_get_lockless(ptep)))) { |
3215 | ret = VM_FAULT_NOPAGE; |
3216 | } else { |
3217 | spin_lock(lock: vmf->ptl); |
3218 | if (unlikely(!pte_none(ptep_get(ptep)))) |
3219 | ret = VM_FAULT_NOPAGE; |
3220 | spin_unlock(lock: vmf->ptl); |
3221 | } |
3222 | pte_unmap(pte: ptep); |
3223 | return ret; |
3224 | } |
3225 | |
3226 | /** |
3227 | * filemap_fault - read in file data for page fault handling |
3228 | * @vmf: struct vm_fault containing details of the fault |
3229 | * |
3230 | * filemap_fault() is invoked via the vma operations vector for a |
3231 | * mapped memory region to read in file data during a page fault. |
3232 | * |
3233 | * The goto's are kind of ugly, but this streamlines the normal case of having |
3234 | * it in the page cache, and handles the special cases reasonably without |
3235 | * having a lot of duplicated code. |
3236 | * |
3237 | * vma->vm_mm->mmap_lock must be held on entry. |
3238 | * |
3239 | * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock |
3240 | * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap(). |
3241 | * |
3242 | * If our return value does not have VM_FAULT_RETRY set, the mmap_lock |
3243 | * has not been released. |
3244 | * |
3245 | * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. |
3246 | * |
3247 | * Return: bitwise-OR of %VM_FAULT_ codes. |
3248 | */ |
3249 | vm_fault_t filemap_fault(struct vm_fault *vmf) |
3250 | { |
3251 | int error; |
3252 | struct file *file = vmf->vma->vm_file; |
3253 | struct file *fpin = NULL; |
3254 | struct address_space *mapping = file->f_mapping; |
3255 | struct inode *inode = mapping->host; |
3256 | pgoff_t max_idx, index = vmf->pgoff; |
3257 | struct folio *folio; |
3258 | vm_fault_t ret = 0; |
3259 | bool mapping_locked = false; |
3260 | |
3261 | max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
3262 | if (unlikely(index >= max_idx)) |
3263 | return VM_FAULT_SIGBUS; |
3264 | |
3265 | /* |
3266 | * Do we have something in the page cache already? |
3267 | */ |
3268 | folio = filemap_get_folio(mapping, index); |
3269 | if (likely(!IS_ERR(folio))) { |
3270 | /* |
3271 | * We found the page, so try async readahead before waiting for |
3272 | * the lock. |
3273 | */ |
3274 | if (!(vmf->flags & FAULT_FLAG_TRIED)) |
3275 | fpin = do_async_mmap_readahead(vmf, folio); |
3276 | if (unlikely(!folio_test_uptodate(folio))) { |
3277 | filemap_invalidate_lock_shared(mapping); |
3278 | mapping_locked = true; |
3279 | } |
3280 | } else { |
3281 | ret = filemap_fault_recheck_pte_none(vmf); |
3282 | if (unlikely(ret)) |
3283 | return ret; |
3284 | |
3285 | /* No page in the page cache at all */ |
3286 | count_vm_event(item: PGMAJFAULT); |
3287 | count_memcg_event_mm(mm: vmf->vma->vm_mm, idx: PGMAJFAULT); |
3288 | ret = VM_FAULT_MAJOR; |
3289 | fpin = do_sync_mmap_readahead(vmf); |
3290 | retry_find: |
3291 | /* |
3292 | * See comment in filemap_create_folio() why we need |
3293 | * invalidate_lock |
3294 | */ |
3295 | if (!mapping_locked) { |
3296 | filemap_invalidate_lock_shared(mapping); |
3297 | mapping_locked = true; |
3298 | } |
3299 | folio = __filemap_get_folio(mapping, index, |
3300 | FGP_CREAT|FGP_FOR_MMAP, |
3301 | vmf->gfp_mask); |
3302 | if (IS_ERR(ptr: folio)) { |
3303 | if (fpin) |
3304 | goto out_retry; |
3305 | filemap_invalidate_unlock_shared(mapping); |
3306 | return VM_FAULT_OOM; |
3307 | } |
3308 | } |
3309 | |
3310 | if (!lock_folio_maybe_drop_mmap(vmf, folio, fpin: &fpin)) |
3311 | goto out_retry; |
3312 | |
3313 | /* Did it get truncated? */ |
3314 | if (unlikely(folio->mapping != mapping)) { |
3315 | folio_unlock(folio); |
3316 | folio_put(folio); |
3317 | goto retry_find; |
3318 | } |
3319 | VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); |
3320 | |
3321 | /* |
3322 | * We have a locked folio in the page cache, now we need to check |
3323 | * that it's up-to-date. If not, it is going to be due to an error, |
3324 | * or because readahead was otherwise unable to retrieve it. |
3325 | */ |
3326 | if (unlikely(!folio_test_uptodate(folio))) { |
3327 | /* |
3328 | * If the invalidate lock is not held, the folio was in cache |
3329 | * and uptodate and now it is not. Strange but possible since we |
3330 | * didn't hold the page lock all the time. Let's drop |
3331 | * everything, get the invalidate lock and try again. |
3332 | */ |
3333 | if (!mapping_locked) { |
3334 | folio_unlock(folio); |
3335 | folio_put(folio); |
3336 | goto retry_find; |
3337 | } |
3338 | |
3339 | /* |
3340 | * OK, the folio is really not uptodate. This can be because the |
3341 | * VMA has the VM_RAND_READ flag set, or because an error |
3342 | * arose. Let's read it in directly. |
3343 | */ |
3344 | goto page_not_uptodate; |
3345 | } |
3346 | |
3347 | /* |
3348 | * We've made it this far and we had to drop our mmap_lock, now is the |
3349 | * time to return to the upper layer and have it re-find the vma and |
3350 | * redo the fault. |
3351 | */ |
3352 | if (fpin) { |
3353 | folio_unlock(folio); |
3354 | goto out_retry; |
3355 | } |
3356 | if (mapping_locked) |
3357 | filemap_invalidate_unlock_shared(mapping); |
3358 | |
3359 | /* |
3360 | * Found the page and have a reference on it. |
3361 | * We must recheck i_size under page lock. |
3362 | */ |
3363 | max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
3364 | if (unlikely(index >= max_idx)) { |
3365 | folio_unlock(folio); |
3366 | folio_put(folio); |
3367 | return VM_FAULT_SIGBUS; |
3368 | } |
3369 | |
3370 | vmf->page = folio_file_page(folio, index); |
3371 | return ret | VM_FAULT_LOCKED; |
3372 | |
3373 | page_not_uptodate: |
3374 | /* |
3375 | * Umm, take care of errors if the page isn't up-to-date. |
3376 | * Try to re-read it _once_. We do this synchronously, |
3377 | * because there really aren't any performance issues here |
3378 | * and we need to check for errors. |
3379 | */ |
3380 | fpin = maybe_unlock_mmap_for_io(vmf, fpin); |
3381 | error = filemap_read_folio(file, filler: mapping->a_ops->read_folio, folio); |
3382 | if (fpin) |
3383 | goto out_retry; |
3384 | folio_put(folio); |
3385 | |
3386 | if (!error || error == AOP_TRUNCATED_PAGE) |
3387 | goto retry_find; |
3388 | filemap_invalidate_unlock_shared(mapping); |
3389 | |
3390 | return VM_FAULT_SIGBUS; |
3391 | |
3392 | out_retry: |
3393 | /* |
3394 | * We dropped the mmap_lock, we need to return to the fault handler to |
3395 | * re-find the vma and come back and find our hopefully still populated |
3396 | * page. |
3397 | */ |
3398 | if (!IS_ERR(ptr: folio)) |
3399 | folio_put(folio); |
3400 | if (mapping_locked) |
3401 | filemap_invalidate_unlock_shared(mapping); |
3402 | if (fpin) |
3403 | fput(fpin); |
3404 | return ret | VM_FAULT_RETRY; |
3405 | } |
3406 | EXPORT_SYMBOL(filemap_fault); |
3407 | |
3408 | static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, |
3409 | pgoff_t start) |
3410 | { |
3411 | struct mm_struct *mm = vmf->vma->vm_mm; |
3412 | |
3413 | /* Huge page is mapped? No need to proceed. */ |
3414 | if (pmd_trans_huge(pmd: *vmf->pmd)) { |
3415 | folio_unlock(folio); |
3416 | folio_put(folio); |
3417 | return true; |
3418 | } |
3419 | |
3420 | if (pmd_none(pmd: *vmf->pmd) && folio_test_pmd_mappable(folio)) { |
3421 | struct page *page = folio_file_page(folio, index: start); |
3422 | vm_fault_t ret = do_set_pmd(vmf, page); |
3423 | if (!ret) { |
3424 | /* The page is mapped successfully, reference consumed. */ |
3425 | folio_unlock(folio); |
3426 | return true; |
3427 | } |
3428 | } |
3429 | |
3430 | if (pmd_none(pmd: *vmf->pmd) && vmf->prealloc_pte) |
3431 | pmd_install(mm, pmd: vmf->pmd, pte: &vmf->prealloc_pte); |
3432 | |
3433 | return false; |
3434 | } |
3435 | |
3436 | static struct folio *next_uptodate_folio(struct xa_state *xas, |
3437 | struct address_space *mapping, pgoff_t end_pgoff) |
3438 | { |
3439 | struct folio *folio = xas_next_entry(xas, max: end_pgoff); |
3440 | unsigned long max_idx; |
3441 | |
3442 | do { |
3443 | if (!folio) |
3444 | return NULL; |
3445 | if (xas_retry(xas, entry: folio)) |
3446 | continue; |
3447 | if (xa_is_value(entry: folio)) |
3448 | continue; |
3449 | if (folio_test_locked(folio)) |
3450 | continue; |
3451 | if (!folio_try_get_rcu(folio)) |
3452 | continue; |
3453 | /* Has the page moved or been split? */ |
3454 | if (unlikely(folio != xas_reload(xas))) |
3455 | goto skip; |
3456 | if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) |
3457 | goto skip; |
3458 | if (!folio_trylock(folio)) |
3459 | goto skip; |
3460 | if (folio->mapping != mapping) |
3461 | goto unlock; |
3462 | if (!folio_test_uptodate(folio)) |
3463 | goto unlock; |
3464 | max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); |
3465 | if (xas->xa_index >= max_idx) |
3466 | goto unlock; |
3467 | return folio; |
3468 | unlock: |
3469 | folio_unlock(folio); |
3470 | skip: |
3471 | folio_put(folio); |
3472 | } while ((folio = xas_next_entry(xas, max: end_pgoff)) != NULL); |
3473 | |
3474 | return NULL; |
3475 | } |
3476 | |
3477 | /* |
3478 | * Map page range [start_page, start_page + nr_pages) of folio. |
3479 | * start_page is gotten from start by folio_page(folio, start) |
3480 | */ |
3481 | static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, |
3482 | struct folio *folio, unsigned long start, |
3483 | unsigned long addr, unsigned int nr_pages, |
3484 | unsigned int *mmap_miss) |
3485 | { |
3486 | vm_fault_t ret = 0; |
3487 | struct page *page = folio_page(folio, start); |
3488 | unsigned int count = 0; |
3489 | pte_t *old_ptep = vmf->pte; |
3490 | |
3491 | do { |
3492 | if (PageHWPoison(page: page + count)) |
3493 | goto skip; |
3494 | |
3495 | (*mmap_miss)++; |
3496 | |
3497 | /* |
3498 | * NOTE: If there're PTE markers, we'll leave them to be |
3499 | * handled in the specific fault path, and it'll prohibit the |
3500 | * fault-around logic. |
3501 | */ |
3502 | if (!pte_none(pte: ptep_get(ptep: &vmf->pte[count]))) |
3503 | goto skip; |
3504 | |
3505 | count++; |
3506 | continue; |
3507 | skip: |
3508 | if (count) { |
3509 | set_pte_range(vmf, folio, page, nr: count, addr); |
3510 | folio_ref_add(folio, nr: count); |
3511 | if (in_range(vmf->address, addr, count * PAGE_SIZE)) |
3512 | ret = VM_FAULT_NOPAGE; |
3513 | } |
3514 | |
3515 | count++; |
3516 | page += count; |
3517 | vmf->pte += count; |
3518 | addr += count * PAGE_SIZE; |
3519 | count = 0; |
3520 | } while (--nr_pages > 0); |
3521 | |
3522 | if (count) { |
3523 | set_pte_range(vmf, folio, page, nr: count, addr); |
3524 | folio_ref_add(folio, nr: count); |
3525 | if (in_range(vmf->address, addr, count * PAGE_SIZE)) |
3526 | ret = VM_FAULT_NOPAGE; |
3527 | } |
3528 | |
3529 | vmf->pte = old_ptep; |
3530 | |
3531 | return ret; |
3532 | } |
3533 | |
3534 | static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, |
3535 | struct folio *folio, unsigned long addr, |
3536 | unsigned int *mmap_miss) |
3537 | { |
3538 | vm_fault_t ret = 0; |
3539 | struct page *page = &folio->page; |
3540 | |
3541 | if (PageHWPoison(page)) |
3542 | return ret; |
3543 | |
3544 | (*mmap_miss)++; |
3545 | |
3546 | /* |
3547 | * NOTE: If there're PTE markers, we'll leave them to be |
3548 | * handled in the specific fault path, and it'll prohibit |
3549 | * the fault-around logic. |
3550 | */ |
3551 | if (!pte_none(pte: ptep_get(ptep: vmf->pte))) |
3552 | return ret; |
3553 | |
3554 | if (vmf->address == addr) |
3555 | ret = VM_FAULT_NOPAGE; |
3556 | |
3557 | set_pte_range(vmf, folio, page, nr: 1, addr); |
3558 | folio_ref_inc(folio); |
3559 | |
3560 | return ret; |
3561 | } |
3562 | |
3563 | vm_fault_t filemap_map_pages(struct vm_fault *vmf, |
3564 | pgoff_t start_pgoff, pgoff_t end_pgoff) |
3565 | { |
3566 | struct vm_area_struct *vma = vmf->vma; |
3567 | struct file *file = vma->vm_file; |
3568 | struct address_space *mapping = file->f_mapping; |
3569 | pgoff_t last_pgoff = start_pgoff; |
3570 | unsigned long addr; |
3571 | XA_STATE(xas, &mapping->i_pages, start_pgoff); |
3572 | struct folio *folio; |
3573 | vm_fault_t ret = 0; |
3574 | unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved; |
3575 | |
3576 | rcu_read_lock(); |
3577 | folio = next_uptodate_folio(xas: &xas, mapping, end_pgoff); |
3578 | if (!folio) |
3579 | goto out; |
3580 | |
3581 | if (filemap_map_pmd(vmf, folio, start: start_pgoff)) { |
3582 | ret = VM_FAULT_NOPAGE; |
3583 | goto out; |
3584 | } |
3585 | |
3586 | addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
3587 | vmf->pte = pte_offset_map_lock(mm: vma->vm_mm, pmd: vmf->pmd, addr, ptlp: &vmf->ptl); |
3588 | if (!vmf->pte) { |
3589 | folio_unlock(folio); |
3590 | folio_put(folio); |
3591 | goto out; |
3592 | } |
3593 | do { |
3594 | unsigned long end; |
3595 | |
3596 | addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; |
3597 | vmf->pte += xas.xa_index - last_pgoff; |
3598 | last_pgoff = xas.xa_index; |
3599 | end = folio_next_index(folio) - 1; |
3600 | nr_pages = min(end, end_pgoff) - xas.xa_index + 1; |
3601 | |
3602 | if (!folio_test_large(folio)) |
3603 | ret |= filemap_map_order0_folio(vmf, |
3604 | folio, addr, mmap_miss: &mmap_miss); |
3605 | else |
3606 | ret |= filemap_map_folio_range(vmf, folio, |
3607 | start: xas.xa_index - folio->index, addr, |
3608 | nr_pages, mmap_miss: &mmap_miss); |
3609 | |
3610 | folio_unlock(folio); |
3611 | folio_put(folio); |
3612 | } while ((folio = next_uptodate_folio(xas: &xas, mapping, end_pgoff)) != NULL); |
3613 | pte_unmap_unlock(vmf->pte, vmf->ptl); |
3614 | out: |
3615 | rcu_read_unlock(); |
3616 | |
3617 | mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); |
3618 | if (mmap_miss >= mmap_miss_saved) |
3619 | WRITE_ONCE(file->f_ra.mmap_miss, 0); |
3620 | else |
3621 | WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); |
3622 | |
3623 | return ret; |
3624 | } |
3625 | EXPORT_SYMBOL(filemap_map_pages); |
3626 | |
3627 | vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) |
3628 | { |
3629 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
3630 | struct folio *folio = page_folio(vmf->page); |
3631 | vm_fault_t ret = VM_FAULT_LOCKED; |
3632 | |
3633 | sb_start_pagefault(sb: mapping->host->i_sb); |
3634 | file_update_time(file: vmf->vma->vm_file); |
3635 | folio_lock(folio); |
3636 | if (folio->mapping != mapping) { |
3637 | folio_unlock(folio); |
3638 | ret = VM_FAULT_NOPAGE; |
3639 | goto out; |
3640 | } |
3641 | /* |
3642 | * We mark the folio dirty already here so that when freeze is in |
3643 | * progress, we are guaranteed that writeback during freezing will |
3644 | * see the dirty folio and writeprotect it again. |
3645 | */ |
3646 | folio_mark_dirty(folio); |
3647 | folio_wait_stable(folio); |
3648 | out: |
3649 | sb_end_pagefault(sb: mapping->host->i_sb); |
3650 | return ret; |
3651 | } |
3652 | |
3653 | const struct vm_operations_struct generic_file_vm_ops = { |
3654 | .fault = filemap_fault, |
3655 | .map_pages = filemap_map_pages, |
3656 | .page_mkwrite = filemap_page_mkwrite, |
3657 | }; |
3658 | |
3659 | /* This is used for a general mmap of a disk file */ |
3660 | |
3661 | int generic_file_mmap(struct file *file, struct vm_area_struct *vma) |
3662 | { |
3663 | struct address_space *mapping = file->f_mapping; |
3664 | |
3665 | if (!mapping->a_ops->read_folio) |
3666 | return -ENOEXEC; |
3667 | file_accessed(file); |
3668 | vma->vm_ops = &generic_file_vm_ops; |
3669 | return 0; |
3670 | } |
3671 | |
3672 | /* |
3673 | * This is for filesystems which do not implement ->writepage. |
3674 | */ |
3675 | int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) |
3676 | { |
3677 | if (vma_is_shared_maywrite(vma)) |
3678 | return -EINVAL; |
3679 | return generic_file_mmap(file, vma); |
3680 | } |
3681 | #else |
3682 | vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) |
3683 | { |
3684 | return VM_FAULT_SIGBUS; |
3685 | } |
3686 | int generic_file_mmap(struct file *file, struct vm_area_struct *vma) |
3687 | { |
3688 | return -ENOSYS; |
3689 | } |
3690 | int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) |
3691 | { |
3692 | return -ENOSYS; |
3693 | } |
3694 | #endif /* CONFIG_MMU */ |
3695 | |
3696 | EXPORT_SYMBOL(filemap_page_mkwrite); |
3697 | EXPORT_SYMBOL(generic_file_mmap); |
3698 | EXPORT_SYMBOL(generic_file_readonly_mmap); |
3699 | |
3700 | static struct folio *do_read_cache_folio(struct address_space *mapping, |
3701 | pgoff_t index, filler_t filler, struct file *file, gfp_t gfp) |
3702 | { |
3703 | struct folio *folio; |
3704 | int err; |
3705 | |
3706 | if (!filler) |
3707 | filler = mapping->a_ops->read_folio; |
3708 | repeat: |
3709 | folio = filemap_get_folio(mapping, index); |
3710 | if (IS_ERR(ptr: folio)) { |
3711 | folio = filemap_alloc_folio(gfp, 0); |
3712 | if (!folio) |
3713 | return ERR_PTR(error: -ENOMEM); |
3714 | err = filemap_add_folio(mapping, folio, index, gfp); |
3715 | if (unlikely(err)) { |
3716 | folio_put(folio); |
3717 | if (err == -EEXIST) |
3718 | goto repeat; |
3719 | /* Presumably ENOMEM for xarray node */ |
3720 | return ERR_PTR(error: err); |
3721 | } |
3722 | |
3723 | goto filler; |
3724 | } |
3725 | if (folio_test_uptodate(folio)) |
3726 | goto out; |
3727 | |
3728 | if (!folio_trylock(folio)) { |
3729 | folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); |
3730 | goto repeat; |
3731 | } |
3732 | |
3733 | /* Folio was truncated from mapping */ |
3734 | if (!folio->mapping) { |
3735 | folio_unlock(folio); |
3736 | folio_put(folio); |
3737 | goto repeat; |
3738 | } |
3739 | |
3740 | /* Someone else locked and filled the page in a very small window */ |
3741 | if (folio_test_uptodate(folio)) { |
3742 | folio_unlock(folio); |
3743 | goto out; |
3744 | } |
3745 | |
3746 | filler: |
3747 | err = filemap_read_folio(file, filler, folio); |
3748 | if (err) { |
3749 | folio_put(folio); |
3750 | if (err == AOP_TRUNCATED_PAGE) |
3751 | goto repeat; |
3752 | return ERR_PTR(error: err); |
3753 | } |
3754 | |
3755 | out: |
3756 | folio_mark_accessed(folio); |
3757 | return folio; |
3758 | } |
3759 | |
3760 | /** |
3761 | * read_cache_folio - Read into page cache, fill it if needed. |
3762 | * @mapping: The address_space to read from. |
3763 | * @index: The index to read. |
3764 | * @filler: Function to perform the read, or NULL to use aops->read_folio(). |
3765 | * @file: Passed to filler function, may be NULL if not required. |
3766 | * |
3767 | * Read one page into the page cache. If it succeeds, the folio returned |
3768 | * will contain @index, but it may not be the first page of the folio. |
3769 | * |
3770 | * If the filler function returns an error, it will be returned to the |
3771 | * caller. |
3772 | * |
3773 | * Context: May sleep. Expects mapping->invalidate_lock to be held. |
3774 | * Return: An uptodate folio on success, ERR_PTR() on failure. |
3775 | */ |
3776 | struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, |
3777 | filler_t filler, struct file *file) |
3778 | { |
3779 | return do_read_cache_folio(mapping, index, filler, file, |
3780 | gfp: mapping_gfp_mask(mapping)); |
3781 | } |
3782 | EXPORT_SYMBOL(read_cache_folio); |
3783 | |
3784 | /** |
3785 | * mapping_read_folio_gfp - Read into page cache, using specified allocation flags. |
3786 | * @mapping: The address_space for the folio. |
3787 | * @index: The index that the allocated folio will contain. |
3788 | * @gfp: The page allocator flags to use if allocating. |
3789 | * |
3790 | * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with |
3791 | * any new memory allocations done using the specified allocation flags. |
3792 | * |
3793 | * The most likely error from this function is EIO, but ENOMEM is |
3794 | * possible and so is EINTR. If ->read_folio returns another error, |
3795 | * that will be returned to the caller. |
3796 | * |
3797 | * The function expects mapping->invalidate_lock to be already held. |
3798 | * |
3799 | * Return: Uptodate folio on success, ERR_PTR() on failure. |
3800 | */ |
3801 | struct folio *mapping_read_folio_gfp(struct address_space *mapping, |
3802 | pgoff_t index, gfp_t gfp) |
3803 | { |
3804 | return do_read_cache_folio(mapping, index, NULL, NULL, gfp); |
3805 | } |
3806 | EXPORT_SYMBOL(mapping_read_folio_gfp); |
3807 | |
3808 | static struct page *do_read_cache_page(struct address_space *mapping, |
3809 | pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp) |
3810 | { |
3811 | struct folio *folio; |
3812 | |
3813 | folio = do_read_cache_folio(mapping, index, filler, file, gfp); |
3814 | if (IS_ERR(ptr: folio)) |
3815 | return &folio->page; |
3816 | return folio_file_page(folio, index); |
3817 | } |
3818 | |
3819 | struct page *read_cache_page(struct address_space *mapping, |
3820 | pgoff_t index, filler_t *filler, struct file *file) |
3821 | { |
3822 | return do_read_cache_page(mapping, index, filler, file, |
3823 | gfp: mapping_gfp_mask(mapping)); |
3824 | } |
3825 | EXPORT_SYMBOL(read_cache_page); |
3826 | |
3827 | /** |
3828 | * read_cache_page_gfp - read into page cache, using specified page allocation flags. |
3829 | * @mapping: the page's address_space |
3830 | * @index: the page index |
3831 | * @gfp: the page allocator flags to use if allocating |
3832 | * |
3833 | * This is the same as "read_mapping_page(mapping, index, NULL)", but with |
3834 | * any new page allocations done using the specified allocation flags. |
3835 | * |
3836 | * If the page does not get brought uptodate, return -EIO. |
3837 | * |
3838 | * The function expects mapping->invalidate_lock to be already held. |
3839 | * |
3840 | * Return: up to date page on success, ERR_PTR() on failure. |
3841 | */ |
3842 | struct page *read_cache_page_gfp(struct address_space *mapping, |
3843 | pgoff_t index, |
3844 | gfp_t gfp) |
3845 | { |
3846 | return do_read_cache_page(mapping, index, NULL, NULL, gfp); |
3847 | } |
3848 | EXPORT_SYMBOL(read_cache_page_gfp); |
3849 | |
3850 | /* |
3851 | * Warn about a page cache invalidation failure during a direct I/O write. |
3852 | */ |
3853 | static void dio_warn_stale_pagecache(struct file *filp) |
3854 | { |
3855 | static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); |
3856 | char pathname[128]; |
3857 | char *path; |
3858 | |
3859 | errseq_set(eseq: &filp->f_mapping->wb_err, err: -EIO); |
3860 | if (__ratelimit(&_rs)) { |
3861 | path = file_path(filp, pathname, sizeof(pathname)); |
3862 | if (IS_ERR(ptr: path)) |
3863 | path = "(unknown)"; |
3864 | pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n"); |
3865 | pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, |
3866 | current->comm); |
3867 | } |
3868 | } |
3869 | |
3870 | void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count) |
3871 | { |
3872 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
3873 | |
3874 | if (mapping->nrpages && |
3875 | invalidate_inode_pages2_range(mapping, |
3876 | start: iocb->ki_pos >> PAGE_SHIFT, |
3877 | end: (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) |
3878 | dio_warn_stale_pagecache(filp: iocb->ki_filp); |
3879 | } |
3880 | |
3881 | ssize_t |
3882 | generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) |
3883 | { |
3884 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
3885 | size_t write_len = iov_iter_count(i: from); |
3886 | ssize_t written; |
3887 | |
3888 | /* |
3889 | * If a page can not be invalidated, return 0 to fall back |
3890 | * to buffered write. |
3891 | */ |
3892 | written = kiocb_invalidate_pages(iocb, write_len); |
3893 | if (written) { |
3894 | if (written == -EBUSY) |
3895 | return 0; |
3896 | return written; |
3897 | } |
3898 | |
3899 | written = mapping->a_ops->direct_IO(iocb, from); |
3900 | |
3901 | /* |
3902 | * Finally, try again to invalidate clean pages which might have been |
3903 | * cached by non-direct readahead, or faulted in by get_user_pages() |
3904 | * if the source of the write was an mmap'ed region of the file |
3905 | * we're writing. Either one is a pretty crazy thing to do, |
3906 | * so we don't support it 100%. If this invalidation |
3907 | * fails, tough, the write still worked... |
3908 | * |
3909 | * Most of the time we do not need this since dio_complete() will do |
3910 | * the invalidation for us. However there are some file systems that |
3911 | * do not end up with dio_complete() being called, so let's not break |
3912 | * them by removing it completely. |
3913 | * |
3914 | * Noticeable example is a blkdev_direct_IO(). |
3915 | * |
3916 | * Skip invalidation for async writes or if mapping has no pages. |
3917 | */ |
3918 | if (written > 0) { |
3919 | struct inode *inode = mapping->host; |
3920 | loff_t pos = iocb->ki_pos; |
3921 | |
3922 | kiocb_invalidate_post_direct_write(iocb, count: written); |
3923 | pos += written; |
3924 | write_len -= written; |
3925 | if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { |
3926 | i_size_write(inode, i_size: pos); |
3927 | mark_inode_dirty(inode); |
3928 | } |
3929 | iocb->ki_pos = pos; |
3930 | } |
3931 | if (written != -EIOCBQUEUED) |
3932 | iov_iter_revert(i: from, bytes: write_len - iov_iter_count(i: from)); |
3933 | return written; |
3934 | } |
3935 | EXPORT_SYMBOL(generic_file_direct_write); |
3936 | |
3937 | ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) |
3938 | { |
3939 | struct file *file = iocb->ki_filp; |
3940 | loff_t pos = iocb->ki_pos; |
3941 | struct address_space *mapping = file->f_mapping; |
3942 | const struct address_space_operations *a_ops = mapping->a_ops; |
3943 | long status = 0; |
3944 | ssize_t written = 0; |
3945 | |
3946 | do { |
3947 | struct page *page; |
3948 | unsigned long offset; /* Offset into pagecache page */ |
3949 | unsigned long bytes; /* Bytes to write to page */ |
3950 | size_t copied; /* Bytes copied from user */ |
3951 | void *fsdata = NULL; |
3952 | |
3953 | offset = (pos & (PAGE_SIZE - 1)); |
3954 | bytes = min_t(unsigned long, PAGE_SIZE - offset, |
3955 | iov_iter_count(i)); |
3956 | |
3957 | again: |
3958 | /* |
3959 | * Bring in the user page that we will copy from _first_. |
3960 | * Otherwise there's a nasty deadlock on copying from the |
3961 | * same page as we're writing to, without it being marked |
3962 | * up-to-date. |
3963 | */ |
3964 | if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { |
3965 | status = -EFAULT; |
3966 | break; |
3967 | } |
3968 | |
3969 | if (fatal_signal_pending(current)) { |
3970 | status = -EINTR; |
3971 | break; |
3972 | } |
3973 | |
3974 | status = a_ops->write_begin(file, mapping, pos, bytes, |
3975 | &page, &fsdata); |
3976 | if (unlikely(status < 0)) |
3977 | break; |
3978 | |
3979 | if (mapping_writably_mapped(mapping)) |
3980 | flush_dcache_page(page); |
3981 | |
3982 | copied = copy_page_from_iter_atomic(page, offset, bytes, i); |
3983 | flush_dcache_page(page); |
3984 | |
3985 | status = a_ops->write_end(file, mapping, pos, bytes, copied, |
3986 | page, fsdata); |
3987 | if (unlikely(status != copied)) { |
3988 | iov_iter_revert(i, bytes: copied - max(status, 0L)); |
3989 | if (unlikely(status < 0)) |
3990 | break; |
3991 | } |
3992 | cond_resched(); |
3993 | |
3994 | if (unlikely(status == 0)) { |
3995 | /* |
3996 | * A short copy made ->write_end() reject the |
3997 | * thing entirely. Might be memory poisoning |
3998 | * halfway through, might be a race with munmap, |
3999 | * might be severe memory pressure. |
4000 | */ |
4001 | if (copied) |
4002 | bytes = copied; |
4003 | goto again; |
4004 | } |
4005 | pos += status; |
4006 | written += status; |
4007 | |
4008 | balance_dirty_pages_ratelimited(mapping); |
4009 | } while (iov_iter_count(i)); |
4010 | |
4011 | if (!written) |
4012 | return status; |
4013 | iocb->ki_pos += written; |
4014 | return written; |
4015 | } |
4016 | EXPORT_SYMBOL(generic_perform_write); |
4017 | |
4018 | /** |
4019 | * __generic_file_write_iter - write data to a file |
4020 | * @iocb: IO state structure (file, offset, etc.) |
4021 | * @from: iov_iter with data to write |
4022 | * |
4023 | * This function does all the work needed for actually writing data to a |
4024 | * file. It does all basic checks, removes SUID from the file, updates |
4025 | * modification times and calls proper subroutines depending on whether we |
4026 | * do direct IO or a standard buffered write. |
4027 | * |
4028 | * It expects i_rwsem to be grabbed unless we work on a block device or similar |
4029 | * object which does not need locking at all. |
4030 | * |
4031 | * This function does *not* take care of syncing data in case of O_SYNC write. |
4032 | * A caller has to handle it. This is mainly due to the fact that we want to |
4033 | * avoid syncing under i_rwsem. |
4034 | * |
4035 | * Return: |
4036 | * * number of bytes written, even for truncated writes |
4037 | * * negative error code if no data has been written at all |
4038 | */ |
4039 | ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
4040 | { |
4041 | struct file *file = iocb->ki_filp; |
4042 | struct address_space *mapping = file->f_mapping; |
4043 | struct inode *inode = mapping->host; |
4044 | ssize_t ret; |
4045 | |
4046 | ret = file_remove_privs(file); |
4047 | if (ret) |
4048 | return ret; |
4049 | |
4050 | ret = file_update_time(file); |
4051 | if (ret) |
4052 | return ret; |
4053 | |
4054 | if (iocb->ki_flags & IOCB_DIRECT) { |
4055 | ret = generic_file_direct_write(iocb, from); |
4056 | /* |
4057 | * If the write stopped short of completing, fall back to |
4058 | * buffered writes. Some filesystems do this for writes to |
4059 | * holes, for example. For DAX files, a buffered write will |
4060 | * not succeed (even if it did, DAX does not handle dirty |
4061 | * page-cache pages correctly). |
4062 | */ |
4063 | if (ret < 0 || !iov_iter_count(i: from) || IS_DAX(inode)) |
4064 | return ret; |
4065 | return direct_write_fallback(iocb, iter: from, direct_written: ret, |
4066 | buffered_written: generic_perform_write(iocb, from)); |
4067 | } |
4068 | |
4069 | return generic_perform_write(iocb, from); |
4070 | } |
4071 | EXPORT_SYMBOL(__generic_file_write_iter); |
4072 | |
4073 | /** |
4074 | * generic_file_write_iter - write data to a file |
4075 | * @iocb: IO state structure |
4076 | * @from: iov_iter with data to write |
4077 | * |
4078 | * This is a wrapper around __generic_file_write_iter() to be used by most |
4079 | * filesystems. It takes care of syncing the file in case of O_SYNC file |
4080 | * and acquires i_rwsem as needed. |
4081 | * Return: |
4082 | * * negative error code if no data has been written at all of |
4083 | * vfs_fsync_range() failed for a synchronous write |
4084 | * * number of bytes written, even for truncated writes |
4085 | */ |
4086 | ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
4087 | { |
4088 | struct file *file = iocb->ki_filp; |
4089 | struct inode *inode = file->f_mapping->host; |
4090 | ssize_t ret; |
4091 | |
4092 | inode_lock(inode); |
4093 | ret = generic_write_checks(iocb, from); |
4094 | if (ret > 0) |
4095 | ret = __generic_file_write_iter(iocb, from); |
4096 | inode_unlock(inode); |
4097 | |
4098 | if (ret > 0) |
4099 | ret = generic_write_sync(iocb, count: ret); |
4100 | return ret; |
4101 | } |
4102 | EXPORT_SYMBOL(generic_file_write_iter); |
4103 | |
4104 | /** |
4105 | * filemap_release_folio() - Release fs-specific metadata on a folio. |
4106 | * @folio: The folio which the kernel is trying to free. |
4107 | * @gfp: Memory allocation flags (and I/O mode). |
4108 | * |
4109 | * The address_space is trying to release any data attached to a folio |
4110 | * (presumably at folio->private). |
4111 | * |
4112 | * This will also be called if the private_2 flag is set on a page, |
4113 | * indicating that the folio has other metadata associated with it. |
4114 | * |
4115 | * The @gfp argument specifies whether I/O may be performed to release |
4116 | * this page (__GFP_IO), and whether the call may block |
4117 | * (__GFP_RECLAIM & __GFP_FS). |
4118 | * |
4119 | * Return: %true if the release was successful, otherwise %false. |
4120 | */ |
4121 | bool filemap_release_folio(struct folio *folio, gfp_t gfp) |
4122 | { |
4123 | struct address_space * const mapping = folio->mapping; |
4124 | |
4125 | BUG_ON(!folio_test_locked(folio)); |
4126 | if (!folio_needs_release(folio)) |
4127 | return true; |
4128 | if (folio_test_writeback(folio)) |
4129 | return false; |
4130 | |
4131 | if (mapping && mapping->a_ops->release_folio) |
4132 | return mapping->a_ops->release_folio(folio, gfp); |
4133 | return try_to_free_buffers(folio); |
4134 | } |
4135 | EXPORT_SYMBOL(filemap_release_folio); |
4136 | |
4137 | #ifdef CONFIG_CACHESTAT_SYSCALL |
4138 | /** |
4139 | * filemap_cachestat() - compute the page cache statistics of a mapping |
4140 | * @mapping: The mapping to compute the statistics for. |
4141 | * @first_index: The starting page cache index. |
4142 | * @last_index: The final page index (inclusive). |
4143 | * @cs: the cachestat struct to write the result to. |
4144 | * |
4145 | * This will query the page cache statistics of a mapping in the |
4146 | * page range of [first_index, last_index] (inclusive). The statistics |
4147 | * queried include: number of dirty pages, number of pages marked for |
4148 | * writeback, and the number of (recently) evicted pages. |
4149 | */ |
4150 | static void filemap_cachestat(struct address_space *mapping, |
4151 | pgoff_t first_index, pgoff_t last_index, struct cachestat *cs) |
4152 | { |
4153 | XA_STATE(xas, &mapping->i_pages, first_index); |
4154 | struct folio *folio; |
4155 | |
4156 | rcu_read_lock(); |
4157 | xas_for_each(&xas, folio, last_index) { |
4158 | int order; |
4159 | unsigned long nr_pages; |
4160 | pgoff_t folio_first_index, folio_last_index; |
4161 | |
4162 | /* |
4163 | * Don't deref the folio. It is not pinned, and might |
4164 | * get freed (and reused) underneath us. |
4165 | * |
4166 | * We *could* pin it, but that would be expensive for |
4167 | * what should be a fast and lightweight syscall. |
4168 | * |
4169 | * Instead, derive all information of interest from |
4170 | * the rcu-protected xarray. |
4171 | */ |
4172 | |
4173 | if (xas_retry(xas: &xas, entry: folio)) |
4174 | continue; |
4175 | |
4176 | order = xa_get_order(xas.xa, index: xas.xa_index); |
4177 | nr_pages = 1 << order; |
4178 | folio_first_index = round_down(xas.xa_index, 1 << order); |
4179 | folio_last_index = folio_first_index + nr_pages - 1; |
4180 | |
4181 | /* Folios might straddle the range boundaries, only count covered pages */ |
4182 | if (folio_first_index < first_index) |
4183 | nr_pages -= first_index - folio_first_index; |
4184 | |
4185 | if (folio_last_index > last_index) |
4186 | nr_pages -= folio_last_index - last_index; |
4187 | |
4188 | if (xa_is_value(entry: folio)) { |
4189 | /* page is evicted */ |
4190 | void *shadow = (void *)folio; |
4191 | bool workingset; /* not used */ |
4192 | |
4193 | cs->nr_evicted += nr_pages; |
4194 | |
4195 | #ifdef CONFIG_SWAP /* implies CONFIG_MMU */ |
4196 | if (shmem_mapping(mapping)) { |
4197 | /* shmem file - in swap cache */ |
4198 | swp_entry_t swp = radix_to_swp_entry(arg: folio); |
4199 | |
4200 | /* swapin error results in poisoned entry */ |
4201 | if (non_swap_entry(entry: swp)) |
4202 | goto resched; |
4203 | |
4204 | /* |
4205 | * Getting a swap entry from the shmem |
4206 | * inode means we beat |
4207 | * shmem_unuse(). rcu_read_lock() |
4208 | * ensures swapoff waits for us before |
4209 | * freeing the swapper space. However, |
4210 | * we can race with swapping and |
4211 | * invalidation, so there might not be |
4212 | * a shadow in the swapcache (yet). |
4213 | */ |
4214 | shadow = get_shadow_from_swap_cache(entry: swp); |
4215 | if (!shadow) |
4216 | goto resched; |
4217 | } |
4218 | #endif |
4219 | if (workingset_test_recent(shadow, file: true, workingset: &workingset)) |
4220 | cs->nr_recently_evicted += nr_pages; |
4221 | |
4222 | goto resched; |
4223 | } |
4224 | |
4225 | /* page is in cache */ |
4226 | cs->nr_cache += nr_pages; |
4227 | |
4228 | if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY)) |
4229 | cs->nr_dirty += nr_pages; |
4230 | |
4231 | if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK)) |
4232 | cs->nr_writeback += nr_pages; |
4233 | |
4234 | resched: |
4235 | if (need_resched()) { |
4236 | xas_pause(&xas); |
4237 | cond_resched_rcu(); |
4238 | } |
4239 | } |
4240 | rcu_read_unlock(); |
4241 | } |
4242 | |
4243 | /* |
4244 | * The cachestat(2) system call. |
4245 | * |
4246 | * cachestat() returns the page cache statistics of a file in the |
4247 | * bytes range specified by `off` and `len`: number of cached pages, |
4248 | * number of dirty pages, number of pages marked for writeback, |
4249 | * number of evicted pages, and number of recently evicted pages. |
4250 | * |
4251 | * An evicted page is a page that is previously in the page cache |
4252 | * but has been evicted since. A page is recently evicted if its last |
4253 | * eviction was recent enough that its reentry to the cache would |
4254 | * indicate that it is actively being used by the system, and that |
4255 | * there is memory pressure on the system. |
4256 | * |
4257 | * `off` and `len` must be non-negative integers. If `len` > 0, |
4258 | * the queried range is [`off`, `off` + `len`]. If `len` == 0, |
4259 | * we will query in the range from `off` to the end of the file. |
4260 | * |
4261 | * The `flags` argument is unused for now, but is included for future |
4262 | * extensibility. User should pass 0 (i.e no flag specified). |
4263 | * |
4264 | * Currently, hugetlbfs is not supported. |
4265 | * |
4266 | * Because the status of a page can change after cachestat() checks it |
4267 | * but before it returns to the application, the returned values may |
4268 | * contain stale information. |
4269 | * |
4270 | * return values: |
4271 | * zero - success |
4272 | * -EFAULT - cstat or cstat_range points to an illegal address |
4273 | * -EINVAL - invalid flags |
4274 | * -EBADF - invalid file descriptor |
4275 | * -EOPNOTSUPP - file descriptor is of a hugetlbfs file |
4276 | */ |
4277 | SYSCALL_DEFINE4(cachestat, unsigned int, fd, |
4278 | struct cachestat_range __user *, cstat_range, |
4279 | struct cachestat __user *, cstat, unsigned int, flags) |
4280 | { |
4281 | struct fd f = fdget(fd); |
4282 | struct address_space *mapping; |
4283 | struct cachestat_range csr; |
4284 | struct cachestat cs; |
4285 | pgoff_t first_index, last_index; |
4286 | |
4287 | if (!f.file) |
4288 | return -EBADF; |
4289 | |
4290 | if (copy_from_user(to: &csr, from: cstat_range, |
4291 | n: sizeof(struct cachestat_range))) { |
4292 | fdput(fd: f); |
4293 | return -EFAULT; |
4294 | } |
4295 | |
4296 | /* hugetlbfs is not supported */ |
4297 | if (is_file_hugepages(file: f.file)) { |
4298 | fdput(fd: f); |
4299 | return -EOPNOTSUPP; |
4300 | } |
4301 | |
4302 | if (flags != 0) { |
4303 | fdput(fd: f); |
4304 | return -EINVAL; |
4305 | } |
4306 | |
4307 | first_index = csr.off >> PAGE_SHIFT; |
4308 | last_index = |
4309 | csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; |
4310 | memset(&cs, 0, sizeof(struct cachestat)); |
4311 | mapping = f.file->f_mapping; |
4312 | filemap_cachestat(mapping, first_index, last_index, cs: &cs); |
4313 | fdput(fd: f); |
4314 | |
4315 | if (copy_to_user(to: cstat, from: &cs, n: sizeof(struct cachestat))) |
4316 | return -EFAULT; |
4317 | |
4318 | return 0; |
4319 | } |
4320 | #endif /* CONFIG_CACHESTAT_SYSCALL */ |
4321 |
Definitions
- mapping_set_update
- page_cache_delete
- filemap_unaccount_folio
- __filemap_remove_folio
- filemap_free_folio
- filemap_remove_folio
- page_cache_delete_batch
- delete_from_page_cache_batch
- filemap_check_errors
- filemap_check_and_keep_errors
- filemap_fdatawrite_wbc
- __filemap_fdatawrite_range
- __filemap_fdatawrite
- filemap_fdatawrite
- filemap_fdatawrite_range
- filemap_flush
- filemap_range_has_page
- __filemap_fdatawait_range
- filemap_fdatawait_range
- filemap_fdatawait_range_keep_errors
- file_fdatawait_range
- filemap_fdatawait_keep_errors
- mapping_needs_writeback
- filemap_range_has_writeback
- filemap_write_and_wait_range
- __filemap_set_wb_err
- file_check_and_advance_wb_err
- file_write_and_wait_range
- replace_page_cache_folio
- __filemap_add_folio
- filemap_add_folio
- filemap_alloc_folio
- filemap_invalidate_lock_two
- filemap_invalidate_unlock_two
- folio_wait_table
- folio_waitqueue
- pagecache_init
- wake_page_function
- folio_wake_bit
- behavior
- folio_trylock_flag
- sysctl_page_lock_unfairness
- folio_wait_bit_common
- migration_entry_wait_on_locked
- folio_wait_bit
- folio_wait_bit_killable
- folio_put_wait_locked
- folio_add_wait_queue
- folio_unlock
- folio_end_read
- folio_end_private_2
- folio_wait_private_2
- folio_wait_private_2_killable
- folio_end_writeback
- __folio_lock
- __folio_lock_killable
- __folio_lock_async
- __folio_lock_or_retry
- page_cache_next_miss
- page_cache_prev_miss
- filemap_get_entry
- __filemap_get_folio
- find_get_entry
- find_get_entries
- find_lock_entries
- filemap_get_folios
- filemap_get_folios_contig
- filemap_get_folios_tag
- shrink_readahead_size_eio
- filemap_get_read_batch
- filemap_read_folio
- filemap_range_uptodate
- filemap_update_page
- filemap_create_folio
- filemap_readahead
- filemap_get_pages
- pos_same_folio
- filemap_read
- kiocb_write_and_wait
- kiocb_invalidate_pages
- generic_file_read_iter
- splice_folio_into_pipe
- filemap_splice_read
- folio_seek_hole_data
- seek_folio_size
- mapping_seek_hole_data
- lock_folio_maybe_drop_mmap
- do_sync_mmap_readahead
- do_async_mmap_readahead
- filemap_fault_recheck_pte_none
- filemap_fault
- filemap_map_pmd
- next_uptodate_folio
- filemap_map_folio_range
- filemap_map_order0_folio
- filemap_map_pages
- filemap_page_mkwrite
- generic_file_vm_ops
- generic_file_mmap
- generic_file_readonly_mmap
- do_read_cache_folio
- read_cache_folio
- mapping_read_folio_gfp
- do_read_cache_page
- read_cache_page
- read_cache_page_gfp
- dio_warn_stale_pagecache
- kiocb_invalidate_post_direct_write
- generic_file_direct_write
- generic_perform_write
- __generic_file_write_iter
- generic_file_write_iter
- filemap_release_folio
Improve your Profiling and Debugging skills
Find out more