1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
5 | */ |
6 | |
7 | #include <linux/sched.h> |
8 | #include <linux/slab.h> |
9 | #include <linux/spinlock.h> |
10 | #include <linux/completion.h> |
11 | #include <linux/buffer_head.h> |
12 | #include <linux/pagemap.h> |
13 | #include <linux/pagevec.h> |
14 | #include <linux/mpage.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/writeback.h> |
17 | #include <linux/swap.h> |
18 | #include <linux/gfs2_ondisk.h> |
19 | #include <linux/backing-dev.h> |
20 | #include <linux/uio.h> |
21 | #include <trace/events/writeback.h> |
22 | #include <linux/sched/signal.h> |
23 | |
24 | #include "gfs2.h" |
25 | #include "incore.h" |
26 | #include "bmap.h" |
27 | #include "glock.h" |
28 | #include "inode.h" |
29 | #include "log.h" |
30 | #include "meta_io.h" |
31 | #include "quota.h" |
32 | #include "trans.h" |
33 | #include "rgrp.h" |
34 | #include "super.h" |
35 | #include "util.h" |
36 | #include "glops.h" |
37 | #include "aops.h" |
38 | |
39 | |
40 | void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, |
41 | size_t from, size_t len) |
42 | { |
43 | struct buffer_head *head = folio_buffers(folio); |
44 | unsigned int bsize = head->b_size; |
45 | struct buffer_head *bh; |
46 | size_t to = from + len; |
47 | size_t start, end; |
48 | |
49 | for (bh = head, start = 0; bh != head || !start; |
50 | bh = bh->b_this_page, start = end) { |
51 | end = start + bsize; |
52 | if (end <= from) |
53 | continue; |
54 | if (start >= to) |
55 | break; |
56 | set_buffer_uptodate(bh); |
57 | gfs2_trans_add_data(gl: ip->i_gl, bh); |
58 | } |
59 | } |
60 | |
61 | /** |
62 | * gfs2_get_block_noalloc - Fills in a buffer head with details about a block |
63 | * @inode: The inode |
64 | * @lblock: The block number to look up |
65 | * @bh_result: The buffer head to return the result in |
66 | * @create: Non-zero if we may add block to the file |
67 | * |
68 | * Returns: errno |
69 | */ |
70 | |
71 | static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, |
72 | struct buffer_head *bh_result, int create) |
73 | { |
74 | int error; |
75 | |
76 | error = gfs2_block_map(inode, lblock, bh: bh_result, create: 0); |
77 | if (error) |
78 | return error; |
79 | if (!buffer_mapped(bh: bh_result)) |
80 | return -ENODATA; |
81 | return 0; |
82 | } |
83 | |
84 | /** |
85 | * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio |
86 | * @folio: The folio to write |
87 | * @wbc: The writeback control |
88 | * |
89 | * This is the same as calling block_write_full_folio, but it also |
90 | * writes pages outside of i_size |
91 | */ |
92 | static int gfs2_write_jdata_folio(struct folio *folio, |
93 | struct writeback_control *wbc) |
94 | { |
95 | struct inode * const inode = folio->mapping->host; |
96 | loff_t i_size = i_size_read(inode); |
97 | |
98 | /* |
99 | * The folio straddles i_size. It must be zeroed out on each and every |
100 | * writepage invocation because it may be mmapped. "A file is mapped |
101 | * in multiples of the page size. For a file that is not a multiple of |
102 | * the page size, the remaining memory is zeroed when mapped, and |
103 | * writes to that region are not written out to the file." |
104 | */ |
105 | if (folio_pos(folio) < i_size && |
106 | i_size < folio_pos(folio) + folio_size(folio)) |
107 | folio_zero_segment(folio, offset_in_folio(folio, i_size), |
108 | xend: folio_size(folio)); |
109 | |
110 | return __block_write_full_folio(inode, folio, get_block: gfs2_get_block_noalloc, |
111 | wbc); |
112 | } |
113 | |
114 | /** |
115 | * __gfs2_jdata_write_folio - The core of jdata writepage |
116 | * @folio: The folio to write |
117 | * @wbc: The writeback control |
118 | * |
119 | * This is shared between writepage and writepages and implements the |
120 | * core of the writepage operation. If a transaction is required then |
121 | * the checked flag will have been set and the transaction will have |
122 | * already been started before this is called. |
123 | */ |
124 | static int __gfs2_jdata_write_folio(struct folio *folio, |
125 | struct writeback_control *wbc) |
126 | { |
127 | struct inode *inode = folio->mapping->host; |
128 | struct gfs2_inode *ip = GFS2_I(inode); |
129 | |
130 | if (folio_test_checked(folio)) { |
131 | folio_clear_checked(folio); |
132 | if (!folio_buffers(folio)) { |
133 | create_empty_buffers(folio, |
134 | blocksize: inode->i_sb->s_blocksize, |
135 | BIT(BH_Dirty)|BIT(BH_Uptodate)); |
136 | } |
137 | gfs2_trans_add_databufs(ip, folio, from: 0, len: folio_size(folio)); |
138 | } |
139 | return gfs2_write_jdata_folio(folio, wbc); |
140 | } |
141 | |
142 | /** |
143 | * gfs2_jdata_writepage - Write complete page |
144 | * @page: Page to write |
145 | * @wbc: The writeback control |
146 | * |
147 | * Returns: errno |
148 | * |
149 | */ |
150 | |
151 | static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) |
152 | { |
153 | struct folio *folio = page_folio(page); |
154 | struct inode *inode = page->mapping->host; |
155 | struct gfs2_inode *ip = GFS2_I(inode); |
156 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
157 | |
158 | if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE)) |
159 | goto out; |
160 | if (folio_test_checked(folio) || current->journal_info) |
161 | goto out_ignore; |
162 | return __gfs2_jdata_write_folio(folio, wbc); |
163 | |
164 | out_ignore: |
165 | folio_redirty_for_writepage(wbc, folio); |
166 | out: |
167 | folio_unlock(folio); |
168 | return 0; |
169 | } |
170 | |
171 | /** |
172 | * gfs2_writepages - Write a bunch of dirty pages back to disk |
173 | * @mapping: The mapping to write |
174 | * @wbc: Write-back control |
175 | * |
176 | * Used for both ordered and writeback modes. |
177 | */ |
178 | static int gfs2_writepages(struct address_space *mapping, |
179 | struct writeback_control *wbc) |
180 | { |
181 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
182 | struct iomap_writepage_ctx wpc = { }; |
183 | int ret; |
184 | |
185 | /* |
186 | * Even if we didn't write enough pages here, we might still be holding |
187 | * dirty pages in the ail. We forcibly flush the ail because we don't |
188 | * want balance_dirty_pages() to loop indefinitely trying to write out |
189 | * pages held in the ail that it can't find. |
190 | */ |
191 | ret = iomap_writepages(mapping, wbc, wpc: &wpc, ops: &gfs2_writeback_ops); |
192 | if (ret == 0 && wbc->nr_to_write > 0) |
193 | set_bit(nr: SDF_FORCE_AIL_FLUSH, addr: &sdp->sd_flags); |
194 | return ret; |
195 | } |
196 | |
197 | /** |
198 | * gfs2_write_jdata_batch - Write back a folio batch's worth of folios |
199 | * @mapping: The mapping |
200 | * @wbc: The writeback control |
201 | * @fbatch: The batch of folios |
202 | * @done_index: Page index |
203 | * |
204 | * Returns: non-zero if loop should terminate, zero otherwise |
205 | */ |
206 | |
207 | static int gfs2_write_jdata_batch(struct address_space *mapping, |
208 | struct writeback_control *wbc, |
209 | struct folio_batch *fbatch, |
210 | pgoff_t *done_index) |
211 | { |
212 | struct inode *inode = mapping->host; |
213 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
214 | unsigned nrblocks; |
215 | int i; |
216 | int ret; |
217 | size_t size = 0; |
218 | int nr_folios = folio_batch_count(fbatch); |
219 | |
220 | for (i = 0; i < nr_folios; i++) |
221 | size += folio_size(folio: fbatch->folios[i]); |
222 | nrblocks = size >> inode->i_blkbits; |
223 | |
224 | ret = gfs2_trans_begin(sdp, blocks: nrblocks, revokes: nrblocks); |
225 | if (ret < 0) |
226 | return ret; |
227 | |
228 | for (i = 0; i < nr_folios; i++) { |
229 | struct folio *folio = fbatch->folios[i]; |
230 | |
231 | *done_index = folio->index; |
232 | |
233 | folio_lock(folio); |
234 | |
235 | if (unlikely(folio->mapping != mapping)) { |
236 | continue_unlock: |
237 | folio_unlock(folio); |
238 | continue; |
239 | } |
240 | |
241 | if (!folio_test_dirty(folio)) { |
242 | /* someone wrote it for us */ |
243 | goto continue_unlock; |
244 | } |
245 | |
246 | if (folio_test_writeback(folio)) { |
247 | if (wbc->sync_mode != WB_SYNC_NONE) |
248 | folio_wait_writeback(folio); |
249 | else |
250 | goto continue_unlock; |
251 | } |
252 | |
253 | BUG_ON(folio_test_writeback(folio)); |
254 | if (!folio_clear_dirty_for_io(folio)) |
255 | goto continue_unlock; |
256 | |
257 | trace_wbc_writepage(wbc, bdi: inode_to_bdi(inode)); |
258 | |
259 | ret = __gfs2_jdata_write_folio(folio, wbc); |
260 | if (unlikely(ret)) { |
261 | if (ret == AOP_WRITEPAGE_ACTIVATE) { |
262 | folio_unlock(folio); |
263 | ret = 0; |
264 | } else { |
265 | |
266 | /* |
267 | * done_index is set past this page, |
268 | * so media errors will not choke |
269 | * background writeout for the entire |
270 | * file. This has consequences for |
271 | * range_cyclic semantics (ie. it may |
272 | * not be suitable for data integrity |
273 | * writeout). |
274 | */ |
275 | *done_index = folio_next_index(folio); |
276 | ret = 1; |
277 | break; |
278 | } |
279 | } |
280 | |
281 | /* |
282 | * We stop writing back only if we are not doing |
283 | * integrity sync. In case of integrity sync we have to |
284 | * keep going until we have written all the pages |
285 | * we tagged for writeback prior to entering this loop. |
286 | */ |
287 | if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { |
288 | ret = 1; |
289 | break; |
290 | } |
291 | |
292 | } |
293 | gfs2_trans_end(sdp); |
294 | return ret; |
295 | } |
296 | |
297 | /** |
298 | * gfs2_write_cache_jdata - Like write_cache_pages but different |
299 | * @mapping: The mapping to write |
300 | * @wbc: The writeback control |
301 | * |
302 | * The reason that we use our own function here is that we need to |
303 | * start transactions before we grab page locks. This allows us |
304 | * to get the ordering right. |
305 | */ |
306 | |
307 | static int gfs2_write_cache_jdata(struct address_space *mapping, |
308 | struct writeback_control *wbc) |
309 | { |
310 | int ret = 0; |
311 | int done = 0; |
312 | struct folio_batch fbatch; |
313 | int nr_folios; |
314 | pgoff_t writeback_index; |
315 | pgoff_t index; |
316 | pgoff_t end; |
317 | pgoff_t done_index; |
318 | int cycled; |
319 | int range_whole = 0; |
320 | xa_mark_t tag; |
321 | |
322 | folio_batch_init(fbatch: &fbatch); |
323 | if (wbc->range_cyclic) { |
324 | writeback_index = mapping->writeback_index; /* prev offset */ |
325 | index = writeback_index; |
326 | if (index == 0) |
327 | cycled = 1; |
328 | else |
329 | cycled = 0; |
330 | end = -1; |
331 | } else { |
332 | index = wbc->range_start >> PAGE_SHIFT; |
333 | end = wbc->range_end >> PAGE_SHIFT; |
334 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
335 | range_whole = 1; |
336 | cycled = 1; /* ignore range_cyclic tests */ |
337 | } |
338 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
339 | tag = PAGECACHE_TAG_TOWRITE; |
340 | else |
341 | tag = PAGECACHE_TAG_DIRTY; |
342 | |
343 | retry: |
344 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
345 | tag_pages_for_writeback(mapping, start: index, end); |
346 | done_index = index; |
347 | while (!done && (index <= end)) { |
348 | nr_folios = filemap_get_folios_tag(mapping, start: &index, end, |
349 | tag, fbatch: &fbatch); |
350 | if (nr_folios == 0) |
351 | break; |
352 | |
353 | ret = gfs2_write_jdata_batch(mapping, wbc, fbatch: &fbatch, |
354 | done_index: &done_index); |
355 | if (ret) |
356 | done = 1; |
357 | if (ret > 0) |
358 | ret = 0; |
359 | folio_batch_release(fbatch: &fbatch); |
360 | cond_resched(); |
361 | } |
362 | |
363 | if (!cycled && !done) { |
364 | /* |
365 | * range_cyclic: |
366 | * We hit the last page and there is more work to be done: wrap |
367 | * back to the start of the file |
368 | */ |
369 | cycled = 1; |
370 | index = 0; |
371 | end = writeback_index - 1; |
372 | goto retry; |
373 | } |
374 | |
375 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
376 | mapping->writeback_index = done_index; |
377 | |
378 | return ret; |
379 | } |
380 | |
381 | |
382 | /** |
383 | * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk |
384 | * @mapping: The mapping to write |
385 | * @wbc: The writeback control |
386 | * |
387 | */ |
388 | |
389 | static int gfs2_jdata_writepages(struct address_space *mapping, |
390 | struct writeback_control *wbc) |
391 | { |
392 | struct gfs2_inode *ip = GFS2_I(inode: mapping->host); |
393 | struct gfs2_sbd *sdp = GFS2_SB(inode: mapping->host); |
394 | int ret; |
395 | |
396 | ret = gfs2_write_cache_jdata(mapping, wbc); |
397 | if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { |
398 | gfs2_log_flush(sdp, gl: ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | |
399 | GFS2_LFC_JDATA_WPAGES); |
400 | ret = gfs2_write_cache_jdata(mapping, wbc); |
401 | } |
402 | return ret; |
403 | } |
404 | |
405 | /** |
406 | * stuffed_read_folio - Fill in a Linux folio with stuffed file data |
407 | * @ip: the inode |
408 | * @folio: the folio |
409 | * |
410 | * Returns: errno |
411 | */ |
412 | static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio) |
413 | { |
414 | struct buffer_head *dibh = NULL; |
415 | size_t dsize = i_size_read(inode: &ip->i_inode); |
416 | void *from = NULL; |
417 | int error = 0; |
418 | |
419 | /* |
420 | * Due to the order of unstuffing files and ->fault(), we can be |
421 | * asked for a zero folio in the case of a stuffed file being extended, |
422 | * so we need to supply one here. It doesn't happen often. |
423 | */ |
424 | if (unlikely(folio->index)) { |
425 | dsize = 0; |
426 | } else { |
427 | error = gfs2_meta_inode_buffer(ip, bhp: &dibh); |
428 | if (error) |
429 | goto out; |
430 | from = dibh->b_data + sizeof(struct gfs2_dinode); |
431 | } |
432 | |
433 | folio_fill_tail(folio, offset: 0, from, len: dsize); |
434 | brelse(bh: dibh); |
435 | out: |
436 | folio_end_read(folio, success: error == 0); |
437 | |
438 | return error; |
439 | } |
440 | |
441 | /** |
442 | * gfs2_read_folio - read a folio from a file |
443 | * @file: The file to read |
444 | * @folio: The folio in the file |
445 | */ |
446 | static int gfs2_read_folio(struct file *file, struct folio *folio) |
447 | { |
448 | struct inode *inode = folio->mapping->host; |
449 | struct gfs2_inode *ip = GFS2_I(inode); |
450 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
451 | int error; |
452 | |
453 | if (!gfs2_is_jdata(ip) || |
454 | (i_blocksize(node: inode) == PAGE_SIZE && !folio_buffers(folio))) { |
455 | error = iomap_read_folio(folio, ops: &gfs2_iomap_ops); |
456 | } else if (gfs2_is_stuffed(ip)) { |
457 | error = stuffed_read_folio(ip, folio); |
458 | } else { |
459 | error = mpage_read_folio(folio, get_block: gfs2_block_map); |
460 | } |
461 | |
462 | if (gfs2_withdrawing_or_withdrawn(sdp)) |
463 | return -EIO; |
464 | |
465 | return error; |
466 | } |
467 | |
468 | /** |
469 | * gfs2_internal_read - read an internal file |
470 | * @ip: The gfs2 inode |
471 | * @buf: The buffer to fill |
472 | * @pos: The file position |
473 | * @size: The amount to read |
474 | * |
475 | */ |
476 | |
477 | ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, |
478 | size_t size) |
479 | { |
480 | struct address_space *mapping = ip->i_inode.i_mapping; |
481 | unsigned long index = *pos >> PAGE_SHIFT; |
482 | size_t copied = 0; |
483 | |
484 | do { |
485 | size_t offset, chunk; |
486 | struct folio *folio; |
487 | |
488 | folio = read_cache_folio(mapping, index, filler: gfs2_read_folio, NULL); |
489 | if (IS_ERR(ptr: folio)) { |
490 | if (PTR_ERR(ptr: folio) == -EINTR) |
491 | continue; |
492 | return PTR_ERR(ptr: folio); |
493 | } |
494 | offset = *pos + copied - folio_pos(folio); |
495 | chunk = min(size - copied, folio_size(folio) - offset); |
496 | memcpy_from_folio(to: buf + copied, folio, offset, len: chunk); |
497 | index = folio_next_index(folio); |
498 | folio_put(folio); |
499 | copied += chunk; |
500 | } while(copied < size); |
501 | (*pos) += size; |
502 | return size; |
503 | } |
504 | |
505 | /** |
506 | * gfs2_readahead - Read a bunch of pages at once |
507 | * @rac: Read-ahead control structure |
508 | * |
509 | * Some notes: |
510 | * 1. This is only for readahead, so we can simply ignore any things |
511 | * which are slightly inconvenient (such as locking conflicts between |
512 | * the page lock and the glock) and return having done no I/O. Its |
513 | * obviously not something we'd want to do on too regular a basis. |
514 | * Any I/O we ignore at this time will be done via readpage later. |
515 | * 2. We don't handle stuffed files here we let readpage do the honours. |
516 | * 3. mpage_readahead() does most of the heavy lifting in the common case. |
517 | * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. |
518 | */ |
519 | |
520 | static void gfs2_readahead(struct readahead_control *rac) |
521 | { |
522 | struct inode *inode = rac->mapping->host; |
523 | struct gfs2_inode *ip = GFS2_I(inode); |
524 | |
525 | if (gfs2_is_stuffed(ip)) |
526 | ; |
527 | else if (gfs2_is_jdata(ip)) |
528 | mpage_readahead(rac, get_block: gfs2_block_map); |
529 | else |
530 | iomap_readahead(rac, ops: &gfs2_iomap_ops); |
531 | } |
532 | |
533 | /** |
534 | * adjust_fs_space - Adjusts the free space available due to gfs2_grow |
535 | * @inode: the rindex inode |
536 | */ |
537 | void adjust_fs_space(struct inode *inode) |
538 | { |
539 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
540 | struct gfs2_inode *m_ip = GFS2_I(inode: sdp->sd_statfs_inode); |
541 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; |
542 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; |
543 | struct buffer_head *m_bh; |
544 | u64 fs_total, new_free; |
545 | |
546 | if (gfs2_trans_begin(sdp, blocks: 2 * RES_STATFS, revokes: 0) != 0) |
547 | return; |
548 | |
549 | /* Total up the file system space, according to the latest rindex. */ |
550 | fs_total = gfs2_ri_total(sdp); |
551 | if (gfs2_meta_inode_buffer(ip: m_ip, bhp: &m_bh) != 0) |
552 | goto out; |
553 | |
554 | spin_lock(lock: &sdp->sd_statfs_spin); |
555 | gfs2_statfs_change_in(sc: m_sc, buf: m_bh->b_data + |
556 | sizeof(struct gfs2_dinode)); |
557 | if (fs_total > (m_sc->sc_total + l_sc->sc_total)) |
558 | new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); |
559 | else |
560 | new_free = 0; |
561 | spin_unlock(lock: &sdp->sd_statfs_spin); |
562 | fs_warn(sdp, "File system extended by %llu blocks.\n" , |
563 | (unsigned long long)new_free); |
564 | gfs2_statfs_change(sdp, total: new_free, free: new_free, dinodes: 0); |
565 | |
566 | update_statfs(sdp, m_bh); |
567 | brelse(bh: m_bh); |
568 | out: |
569 | sdp->sd_rindex_uptodate = 0; |
570 | gfs2_trans_end(sdp); |
571 | } |
572 | |
573 | static bool jdata_dirty_folio(struct address_space *mapping, |
574 | struct folio *folio) |
575 | { |
576 | if (current->journal_info) |
577 | folio_set_checked(folio); |
578 | return block_dirty_folio(mapping, folio); |
579 | } |
580 | |
581 | /** |
582 | * gfs2_bmap - Block map function |
583 | * @mapping: Address space info |
584 | * @lblock: The block to map |
585 | * |
586 | * Returns: The disk address for the block or 0 on hole or error |
587 | */ |
588 | |
589 | static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) |
590 | { |
591 | struct gfs2_inode *ip = GFS2_I(inode: mapping->host); |
592 | struct gfs2_holder i_gh; |
593 | sector_t dblock = 0; |
594 | int error; |
595 | |
596 | error = gfs2_glock_nq_init(gl: ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, gh: &i_gh); |
597 | if (error) |
598 | return 0; |
599 | |
600 | if (!gfs2_is_stuffed(ip)) |
601 | dblock = iomap_bmap(mapping, bno: lblock, ops: &gfs2_iomap_ops); |
602 | |
603 | gfs2_glock_dq_uninit(gh: &i_gh); |
604 | |
605 | return dblock; |
606 | } |
607 | |
608 | static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) |
609 | { |
610 | struct gfs2_bufdata *bd; |
611 | |
612 | lock_buffer(bh); |
613 | gfs2_log_lock(sdp); |
614 | clear_buffer_dirty(bh); |
615 | bd = bh->b_private; |
616 | if (bd) { |
617 | if (!list_empty(head: &bd->bd_list) && !buffer_pinned(bh)) |
618 | list_del_init(entry: &bd->bd_list); |
619 | else { |
620 | spin_lock(lock: &sdp->sd_ail_lock); |
621 | gfs2_remove_from_journal(bh, meta: REMOVE_JDATA); |
622 | spin_unlock(lock: &sdp->sd_ail_lock); |
623 | } |
624 | } |
625 | bh->b_bdev = NULL; |
626 | clear_buffer_mapped(bh); |
627 | clear_buffer_req(bh); |
628 | clear_buffer_new(bh); |
629 | gfs2_log_unlock(sdp); |
630 | unlock_buffer(bh); |
631 | } |
632 | |
633 | static void gfs2_invalidate_folio(struct folio *folio, size_t offset, |
634 | size_t length) |
635 | { |
636 | struct gfs2_sbd *sdp = GFS2_SB(inode: folio->mapping->host); |
637 | size_t stop = offset + length; |
638 | int partial_page = (offset || length < folio_size(folio)); |
639 | struct buffer_head *bh, *head; |
640 | unsigned long pos = 0; |
641 | |
642 | BUG_ON(!folio_test_locked(folio)); |
643 | if (!partial_page) |
644 | folio_clear_checked(folio); |
645 | head = folio_buffers(folio); |
646 | if (!head) |
647 | goto out; |
648 | |
649 | bh = head; |
650 | do { |
651 | if (pos + bh->b_size > stop) |
652 | return; |
653 | |
654 | if (offset <= pos) |
655 | gfs2_discard(sdp, bh); |
656 | pos += bh->b_size; |
657 | bh = bh->b_this_page; |
658 | } while (bh != head); |
659 | out: |
660 | if (!partial_page) |
661 | filemap_release_folio(folio, gfp: 0); |
662 | } |
663 | |
664 | /** |
665 | * gfs2_release_folio - free the metadata associated with a folio |
666 | * @folio: the folio that's being released |
667 | * @gfp_mask: passed from Linux VFS, ignored by us |
668 | * |
669 | * Calls try_to_free_buffers() to free the buffers and put the folio if the |
670 | * buffers can be released. |
671 | * |
672 | * Returns: true if the folio was put or else false |
673 | */ |
674 | |
675 | bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) |
676 | { |
677 | struct address_space *mapping = folio->mapping; |
678 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
679 | struct buffer_head *bh, *head; |
680 | struct gfs2_bufdata *bd; |
681 | |
682 | head = folio_buffers(folio); |
683 | if (!head) |
684 | return false; |
685 | |
686 | /* |
687 | * mm accommodates an old ext3 case where clean folios might |
688 | * not have had the dirty bit cleared. Thus, it can send actual |
689 | * dirty folios to ->release_folio() via shrink_active_list(). |
690 | * |
691 | * As a workaround, we skip folios that contain dirty buffers |
692 | * below. Once ->release_folio isn't called on dirty folios |
693 | * anymore, we can warn on dirty buffers like we used to here |
694 | * again. |
695 | */ |
696 | |
697 | gfs2_log_lock(sdp); |
698 | bh = head; |
699 | do { |
700 | if (atomic_read(v: &bh->b_count)) |
701 | goto cannot_release; |
702 | bd = bh->b_private; |
703 | if (bd && bd->bd_tr) |
704 | goto cannot_release; |
705 | if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) |
706 | goto cannot_release; |
707 | bh = bh->b_this_page; |
708 | } while (bh != head); |
709 | |
710 | bh = head; |
711 | do { |
712 | bd = bh->b_private; |
713 | if (bd) { |
714 | gfs2_assert_warn(sdp, bd->bd_bh == bh); |
715 | bd->bd_bh = NULL; |
716 | bh->b_private = NULL; |
717 | /* |
718 | * The bd may still be queued as a revoke, in which |
719 | * case we must not dequeue nor free it. |
720 | */ |
721 | if (!bd->bd_blkno && !list_empty(head: &bd->bd_list)) |
722 | list_del_init(entry: &bd->bd_list); |
723 | if (list_empty(head: &bd->bd_list)) |
724 | kmem_cache_free(s: gfs2_bufdata_cachep, objp: bd); |
725 | } |
726 | |
727 | bh = bh->b_this_page; |
728 | } while (bh != head); |
729 | gfs2_log_unlock(sdp); |
730 | |
731 | return try_to_free_buffers(folio); |
732 | |
733 | cannot_release: |
734 | gfs2_log_unlock(sdp); |
735 | return false; |
736 | } |
737 | |
738 | static const struct address_space_operations gfs2_aops = { |
739 | .writepages = gfs2_writepages, |
740 | .read_folio = gfs2_read_folio, |
741 | .readahead = gfs2_readahead, |
742 | .dirty_folio = iomap_dirty_folio, |
743 | .release_folio = iomap_release_folio, |
744 | .invalidate_folio = iomap_invalidate_folio, |
745 | .bmap = gfs2_bmap, |
746 | .migrate_folio = filemap_migrate_folio, |
747 | .is_partially_uptodate = iomap_is_partially_uptodate, |
748 | .error_remove_folio = generic_error_remove_folio, |
749 | }; |
750 | |
751 | static const struct address_space_operations gfs2_jdata_aops = { |
752 | .writepage = gfs2_jdata_writepage, |
753 | .writepages = gfs2_jdata_writepages, |
754 | .read_folio = gfs2_read_folio, |
755 | .readahead = gfs2_readahead, |
756 | .dirty_folio = jdata_dirty_folio, |
757 | .bmap = gfs2_bmap, |
758 | .invalidate_folio = gfs2_invalidate_folio, |
759 | .release_folio = gfs2_release_folio, |
760 | .is_partially_uptodate = block_is_partially_uptodate, |
761 | .error_remove_folio = generic_error_remove_folio, |
762 | }; |
763 | |
764 | void gfs2_set_aops(struct inode *inode) |
765 | { |
766 | if (gfs2_is_jdata(ip: GFS2_I(inode))) |
767 | inode->i_mapping->a_ops = &gfs2_jdata_aops; |
768 | else |
769 | inode->i_mapping->a_ops = &gfs2_aops; |
770 | } |
771 | |