1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/ext4/inode.c |
4 | * |
5 | * Copyright (C) 1992, 1993, 1994, 1995 |
6 | * Remy Card (card@masi.ibp.fr) |
7 | * Laboratoire MASI - Institut Blaise Pascal |
8 | * Universite Pierre et Marie Curie (Paris VI) |
9 | * |
10 | * from |
11 | * |
12 | * linux/fs/minix/inode.c |
13 | * |
14 | * Copyright (C) 1991, 1992 Linus Torvalds |
15 | * |
16 | * 64-bit file support on 64-bit platforms by Jakub Jelinek |
17 | * (jj@sunsite.ms.mff.cuni.cz) |
18 | * |
19 | * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 |
20 | */ |
21 | |
22 | #include <linux/fs.h> |
23 | #include <linux/mount.h> |
24 | #include <linux/time.h> |
25 | #include <linux/highuid.h> |
26 | #include <linux/pagemap.h> |
27 | #include <linux/dax.h> |
28 | #include <linux/quotaops.h> |
29 | #include <linux/string.h> |
30 | #include <linux/buffer_head.h> |
31 | #include <linux/writeback.h> |
32 | #include <linux/pagevec.h> |
33 | #include <linux/mpage.h> |
34 | #include <linux/namei.h> |
35 | #include <linux/uio.h> |
36 | #include <linux/bio.h> |
37 | #include <linux/workqueue.h> |
38 | #include <linux/kernel.h> |
39 | #include <linux/printk.h> |
40 | #include <linux/slab.h> |
41 | #include <linux/bitops.h> |
42 | #include <linux/iomap.h> |
43 | #include <linux/iversion.h> |
44 | |
45 | #include "ext4_jbd2.h" |
46 | #include "xattr.h" |
47 | #include "acl.h" |
48 | #include "truncate.h" |
49 | |
50 | #include <trace/events/ext4.h> |
51 | |
52 | static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, |
53 | struct ext4_inode_info *ei) |
54 | { |
55 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
56 | __u32 csum; |
57 | __u16 dummy_csum = 0; |
58 | int offset = offsetof(struct ext4_inode, i_checksum_lo); |
59 | unsigned int csum_size = sizeof(dummy_csum); |
60 | |
61 | csum = ext4_chksum(sbi, crc: ei->i_csum_seed, address: (__u8 *)raw, length: offset); |
62 | csum = ext4_chksum(sbi, crc: csum, address: (__u8 *)&dummy_csum, length: csum_size); |
63 | offset += csum_size; |
64 | csum = ext4_chksum(sbi, crc: csum, address: (__u8 *)raw + offset, |
65 | EXT4_GOOD_OLD_INODE_SIZE - offset); |
66 | |
67 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { |
68 | offset = offsetof(struct ext4_inode, i_checksum_hi); |
69 | csum = ext4_chksum(sbi, crc: csum, address: (__u8 *)raw + |
70 | EXT4_GOOD_OLD_INODE_SIZE, |
71 | length: offset - EXT4_GOOD_OLD_INODE_SIZE); |
72 | if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { |
73 | csum = ext4_chksum(sbi, crc: csum, address: (__u8 *)&dummy_csum, |
74 | length: csum_size); |
75 | offset += csum_size; |
76 | } |
77 | csum = ext4_chksum(sbi, crc: csum, address: (__u8 *)raw + offset, |
78 | EXT4_INODE_SIZE(inode->i_sb) - offset); |
79 | } |
80 | |
81 | return csum; |
82 | } |
83 | |
84 | static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, |
85 | struct ext4_inode_info *ei) |
86 | { |
87 | __u32 provided, calculated; |
88 | |
89 | if (EXT4_SB(sb: inode->i_sb)->s_es->s_creator_os != |
90 | cpu_to_le32(EXT4_OS_LINUX) || |
91 | !ext4_has_metadata_csum(sb: inode->i_sb)) |
92 | return 1; |
93 | |
94 | provided = le16_to_cpu(raw->i_checksum_lo); |
95 | calculated = ext4_inode_csum(inode, raw, ei); |
96 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && |
97 | EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) |
98 | provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; |
99 | else |
100 | calculated &= 0xFFFF; |
101 | |
102 | return provided == calculated; |
103 | } |
104 | |
105 | void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, |
106 | struct ext4_inode_info *ei) |
107 | { |
108 | __u32 csum; |
109 | |
110 | if (EXT4_SB(sb: inode->i_sb)->s_es->s_creator_os != |
111 | cpu_to_le32(EXT4_OS_LINUX) || |
112 | !ext4_has_metadata_csum(sb: inode->i_sb)) |
113 | return; |
114 | |
115 | csum = ext4_inode_csum(inode, raw, ei); |
116 | raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); |
117 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && |
118 | EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) |
119 | raw->i_checksum_hi = cpu_to_le16(csum >> 16); |
120 | } |
121 | |
122 | static inline int ext4_begin_ordered_truncate(struct inode *inode, |
123 | loff_t new_size) |
124 | { |
125 | trace_ext4_begin_ordered_truncate(inode, new_size); |
126 | /* |
127 | * If jinode is zero, then we never opened the file for |
128 | * writing, so there's no need to call |
129 | * jbd2_journal_begin_ordered_truncate() since there's no |
130 | * outstanding writes we need to flush. |
131 | */ |
132 | if (!EXT4_I(inode)->jinode) |
133 | return 0; |
134 | return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), |
135 | EXT4_I(inode)->jinode, |
136 | new_size); |
137 | } |
138 | |
139 | static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, |
140 | int pextents); |
141 | |
142 | /* |
143 | * Test whether an inode is a fast symlink. |
144 | * A fast symlink has its symlink data stored in ext4_inode_info->i_data. |
145 | */ |
146 | int ext4_inode_is_fast_symlink(struct inode *inode) |
147 | { |
148 | if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { |
149 | int ea_blocks = EXT4_I(inode)->i_file_acl ? |
150 | EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; |
151 | |
152 | if (ext4_has_inline_data(inode)) |
153 | return 0; |
154 | |
155 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); |
156 | } |
157 | return S_ISLNK(inode->i_mode) && inode->i_size && |
158 | (inode->i_size < EXT4_N_BLOCKS * 4); |
159 | } |
160 | |
161 | /* |
162 | * Called at the last iput() if i_nlink is zero. |
163 | */ |
164 | void ext4_evict_inode(struct inode *inode) |
165 | { |
166 | handle_t *handle; |
167 | int err; |
168 | /* |
169 | * Credits for final inode cleanup and freeing: |
170 | * sb + inode (ext4_orphan_del()), block bitmap, group descriptor |
171 | * (xattr block freeing), bitmap, group descriptor (inode freeing) |
172 | */ |
173 | int = 6; |
174 | struct ext4_xattr_inode_array *ea_inode_array = NULL; |
175 | bool freeze_protected = false; |
176 | |
177 | trace_ext4_evict_inode(inode); |
178 | |
179 | if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL) |
180 | ext4_evict_ea_inode(inode); |
181 | if (inode->i_nlink) { |
182 | truncate_inode_pages_final(&inode->i_data); |
183 | |
184 | goto no_delete; |
185 | } |
186 | |
187 | if (is_bad_inode(inode)) |
188 | goto no_delete; |
189 | dquot_initialize(inode); |
190 | |
191 | if (ext4_should_order_data(inode)) |
192 | ext4_begin_ordered_truncate(inode, new_size: 0); |
193 | truncate_inode_pages_final(&inode->i_data); |
194 | |
195 | /* |
196 | * For inodes with journalled data, transaction commit could have |
197 | * dirtied the inode. And for inodes with dioread_nolock, unwritten |
198 | * extents converting worker could merge extents and also have dirtied |
199 | * the inode. Flush worker is ignoring it because of I_FREEING flag but |
200 | * we still need to remove the inode from the writeback lists. |
201 | */ |
202 | if (!list_empty_careful(head: &inode->i_io_list)) |
203 | inode_io_list_del(inode); |
204 | |
205 | /* |
206 | * Protect us against freezing - iput() caller didn't have to have any |
207 | * protection against it. When we are in a running transaction though, |
208 | * we are already protected against freezing and we cannot grab further |
209 | * protection due to lock ordering constraints. |
210 | */ |
211 | if (!ext4_journal_current_handle()) { |
212 | sb_start_intwrite(sb: inode->i_sb); |
213 | freeze_protected = true; |
214 | } |
215 | |
216 | if (!IS_NOQUOTA(inode)) |
217 | extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); |
218 | |
219 | /* |
220 | * Block bitmap, group descriptor, and inode are accounted in both |
221 | * ext4_blocks_for_truncate() and extra_credits. So subtract 3. |
222 | */ |
223 | handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, |
224 | ext4_blocks_for_truncate(inode) + extra_credits - 3); |
225 | if (IS_ERR(ptr: handle)) { |
226 | ext4_std_error(inode->i_sb, PTR_ERR(handle)); |
227 | /* |
228 | * If we're going to skip the normal cleanup, we still need to |
229 | * make sure that the in-core orphan linked list is properly |
230 | * cleaned up. |
231 | */ |
232 | ext4_orphan_del(NULL, inode); |
233 | if (freeze_protected) |
234 | sb_end_intwrite(sb: inode->i_sb); |
235 | goto no_delete; |
236 | } |
237 | |
238 | if (IS_SYNC(inode)) |
239 | ext4_handle_sync(handle); |
240 | |
241 | /* |
242 | * Set inode->i_size to 0 before calling ext4_truncate(). We need |
243 | * special handling of symlinks here because i_size is used to |
244 | * determine whether ext4_inode_info->i_data contains symlink data or |
245 | * block mappings. Setting i_size to 0 will remove its fast symlink |
246 | * status. Erase i_data so that it becomes a valid empty block map. |
247 | */ |
248 | if (ext4_inode_is_fast_symlink(inode)) |
249 | memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); |
250 | inode->i_size = 0; |
251 | err = ext4_mark_inode_dirty(handle, inode); |
252 | if (err) { |
253 | ext4_warning(inode->i_sb, |
254 | "couldn't mark inode dirty (err %d)" , err); |
255 | goto stop_handle; |
256 | } |
257 | if (inode->i_blocks) { |
258 | err = ext4_truncate(inode); |
259 | if (err) { |
260 | ext4_error_err(inode->i_sb, -err, |
261 | "couldn't truncate inode %lu (err %d)" , |
262 | inode->i_ino, err); |
263 | goto stop_handle; |
264 | } |
265 | } |
266 | |
267 | /* Remove xattr references. */ |
268 | err = ext4_xattr_delete_inode(handle, inode, array: &ea_inode_array, |
269 | extra_credits); |
270 | if (err) { |
271 | ext4_warning(inode->i_sb, "xattr delete (err %d)" , err); |
272 | stop_handle: |
273 | ext4_journal_stop(handle); |
274 | ext4_orphan_del(NULL, inode); |
275 | if (freeze_protected) |
276 | sb_end_intwrite(sb: inode->i_sb); |
277 | ext4_xattr_inode_array_free(array: ea_inode_array); |
278 | goto no_delete; |
279 | } |
280 | |
281 | /* |
282 | * Kill off the orphan record which ext4_truncate created. |
283 | * AKPM: I think this can be inside the above `if'. |
284 | * Note that ext4_orphan_del() has to be able to cope with the |
285 | * deletion of a non-existent orphan - this is because we don't |
286 | * know if ext4_truncate() actually created an orphan record. |
287 | * (Well, we could do this if we need to, but heck - it works) |
288 | */ |
289 | ext4_orphan_del(handle, inode); |
290 | EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); |
291 | |
292 | /* |
293 | * One subtle ordering requirement: if anything has gone wrong |
294 | * (transaction abort, IO errors, whatever), then we can still |
295 | * do these next steps (the fs will already have been marked as |
296 | * having errors), but we can't free the inode if the mark_dirty |
297 | * fails. |
298 | */ |
299 | if (ext4_mark_inode_dirty(handle, inode)) |
300 | /* If that failed, just do the required in-core inode clear. */ |
301 | ext4_clear_inode(inode); |
302 | else |
303 | ext4_free_inode(handle, inode); |
304 | ext4_journal_stop(handle); |
305 | if (freeze_protected) |
306 | sb_end_intwrite(sb: inode->i_sb); |
307 | ext4_xattr_inode_array_free(array: ea_inode_array); |
308 | return; |
309 | no_delete: |
310 | /* |
311 | * Check out some where else accidentally dirty the evicting inode, |
312 | * which may probably cause inode use-after-free issues later. |
313 | */ |
314 | WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list)); |
315 | |
316 | if (!list_empty(head: &EXT4_I(inode)->i_fc_list)) |
317 | ext4_fc_mark_ineligible(sb: inode->i_sb, reason: EXT4_FC_REASON_NOMEM, NULL); |
318 | ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ |
319 | } |
320 | |
321 | #ifdef CONFIG_QUOTA |
322 | qsize_t *ext4_get_reserved_space(struct inode *inode) |
323 | { |
324 | return &EXT4_I(inode)->i_reserved_quota; |
325 | } |
326 | #endif |
327 | |
328 | /* |
329 | * Called with i_data_sem down, which is important since we can call |
330 | * ext4_discard_preallocations() from here. |
331 | */ |
332 | void ext4_da_update_reserve_space(struct inode *inode, |
333 | int used, int quota_claim) |
334 | { |
335 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
336 | struct ext4_inode_info *ei = EXT4_I(inode); |
337 | |
338 | spin_lock(lock: &ei->i_block_reservation_lock); |
339 | trace_ext4_da_update_reserve_space(inode, used_blocks: used, quota_claim); |
340 | if (unlikely(used > ei->i_reserved_data_blocks)) { |
341 | ext4_warning(inode->i_sb, "%s: ino %lu, used %d " |
342 | "with only %d reserved data blocks" , |
343 | __func__, inode->i_ino, used, |
344 | ei->i_reserved_data_blocks); |
345 | WARN_ON(1); |
346 | used = ei->i_reserved_data_blocks; |
347 | } |
348 | |
349 | /* Update per-inode reservations */ |
350 | ei->i_reserved_data_blocks -= used; |
351 | percpu_counter_sub(fbc: &sbi->s_dirtyclusters_counter, amount: used); |
352 | |
353 | spin_unlock(lock: &ei->i_block_reservation_lock); |
354 | |
355 | /* Update quota subsystem for data blocks */ |
356 | if (quota_claim) |
357 | dquot_claim_block(inode, EXT4_C2B(sbi, used)); |
358 | else { |
359 | /* |
360 | * We did fallocate with an offset that is already delayed |
361 | * allocated. So on delayed allocated writeback we should |
362 | * not re-claim the quota for fallocated blocks. |
363 | */ |
364 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); |
365 | } |
366 | |
367 | /* |
368 | * If we have done all the pending block allocations and if |
369 | * there aren't any writers on the inode, we can discard the |
370 | * inode's preallocations. |
371 | */ |
372 | if ((ei->i_reserved_data_blocks == 0) && |
373 | !inode_is_open_for_write(inode)) |
374 | ext4_discard_preallocations(inode); |
375 | } |
376 | |
377 | static int __check_block_validity(struct inode *inode, const char *func, |
378 | unsigned int line, |
379 | struct ext4_map_blocks *map) |
380 | { |
381 | if (ext4_has_feature_journal(sb: inode->i_sb) && |
382 | (inode->i_ino == |
383 | le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) |
384 | return 0; |
385 | if (!ext4_inode_block_valid(inode, start_blk: map->m_pblk, count: map->m_len)) { |
386 | ext4_error_inode(inode, func, line, map->m_pblk, |
387 | "lblock %lu mapped to illegal pblock %llu " |
388 | "(length %d)" , (unsigned long) map->m_lblk, |
389 | map->m_pblk, map->m_len); |
390 | return -EFSCORRUPTED; |
391 | } |
392 | return 0; |
393 | } |
394 | |
395 | int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, |
396 | ext4_lblk_t len) |
397 | { |
398 | int ret; |
399 | |
400 | if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) |
401 | return fscrypt_zeroout_range(inode, lblk, pblk, len); |
402 | |
403 | ret = sb_issue_zeroout(sb: inode->i_sb, block: pblk, nr_blocks: len, GFP_NOFS); |
404 | if (ret > 0) |
405 | ret = 0; |
406 | |
407 | return ret; |
408 | } |
409 | |
410 | #define check_block_validity(inode, map) \ |
411 | __check_block_validity((inode), __func__, __LINE__, (map)) |
412 | |
413 | #ifdef ES_AGGRESSIVE_TEST |
414 | static void ext4_map_blocks_es_recheck(handle_t *handle, |
415 | struct inode *inode, |
416 | struct ext4_map_blocks *es_map, |
417 | struct ext4_map_blocks *map, |
418 | int flags) |
419 | { |
420 | int retval; |
421 | |
422 | map->m_flags = 0; |
423 | /* |
424 | * There is a race window that the result is not the same. |
425 | * e.g. xfstests #223 when dioread_nolock enables. The reason |
426 | * is that we lookup a block mapping in extent status tree with |
427 | * out taking i_data_sem. So at the time the unwritten extent |
428 | * could be converted. |
429 | */ |
430 | down_read(&EXT4_I(inode)->i_data_sem); |
431 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { |
432 | retval = ext4_ext_map_blocks(handle, inode, map, 0); |
433 | } else { |
434 | retval = ext4_ind_map_blocks(handle, inode, map, 0); |
435 | } |
436 | up_read((&EXT4_I(inode)->i_data_sem)); |
437 | |
438 | /* |
439 | * We don't check m_len because extent will be collpased in status |
440 | * tree. So the m_len might not equal. |
441 | */ |
442 | if (es_map->m_lblk != map->m_lblk || |
443 | es_map->m_flags != map->m_flags || |
444 | es_map->m_pblk != map->m_pblk) { |
445 | printk("ES cache assertion failed for inode: %lu " |
446 | "es_cached ex [%d/%d/%llu/%x] != " |
447 | "found ex [%d/%d/%llu/%x] retval %d flags %x\n" , |
448 | inode->i_ino, es_map->m_lblk, es_map->m_len, |
449 | es_map->m_pblk, es_map->m_flags, map->m_lblk, |
450 | map->m_len, map->m_pblk, map->m_flags, |
451 | retval, flags); |
452 | } |
453 | } |
454 | #endif /* ES_AGGRESSIVE_TEST */ |
455 | |
456 | /* |
457 | * The ext4_map_blocks() function tries to look up the requested blocks, |
458 | * and returns if the blocks are already mapped. |
459 | * |
460 | * Otherwise it takes the write lock of the i_data_sem and allocate blocks |
461 | * and store the allocated blocks in the result buffer head and mark it |
462 | * mapped. |
463 | * |
464 | * If file type is extents based, it will call ext4_ext_map_blocks(), |
465 | * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping |
466 | * based files |
467 | * |
468 | * On success, it returns the number of blocks being mapped or allocated. |
469 | * If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are |
470 | * pre-allocated and unwritten, the resulting @map is marked as unwritten. |
471 | * If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped. |
472 | * |
473 | * It returns 0 if plain look up failed (blocks have not been allocated), in |
474 | * that case, @map is returned as unmapped but we still do fill map->m_len to |
475 | * indicate the length of a hole starting at map->m_lblk. |
476 | * |
477 | * It returns the error in case of allocation failure. |
478 | */ |
479 | int ext4_map_blocks(handle_t *handle, struct inode *inode, |
480 | struct ext4_map_blocks *map, int flags) |
481 | { |
482 | struct extent_status es; |
483 | int retval; |
484 | int ret = 0; |
485 | #ifdef ES_AGGRESSIVE_TEST |
486 | struct ext4_map_blocks orig_map; |
487 | |
488 | memcpy(&orig_map, map, sizeof(*map)); |
489 | #endif |
490 | |
491 | map->m_flags = 0; |
492 | ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n" , |
493 | flags, map->m_len, (unsigned long) map->m_lblk); |
494 | |
495 | /* |
496 | * ext4_map_blocks returns an int, and m_len is an unsigned int |
497 | */ |
498 | if (unlikely(map->m_len > INT_MAX)) |
499 | map->m_len = INT_MAX; |
500 | |
501 | /* We can handle the block number less than EXT_MAX_BLOCKS */ |
502 | if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) |
503 | return -EFSCORRUPTED; |
504 | |
505 | /* Lookup extent status tree firstly */ |
506 | if (!(EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) && |
507 | ext4_es_lookup_extent(inode, lblk: map->m_lblk, NULL, es: &es)) { |
508 | if (ext4_es_is_written(es: &es) || ext4_es_is_unwritten(es: &es)) { |
509 | map->m_pblk = ext4_es_pblock(es: &es) + |
510 | map->m_lblk - es.es_lblk; |
511 | map->m_flags |= ext4_es_is_written(es: &es) ? |
512 | EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; |
513 | retval = es.es_len - (map->m_lblk - es.es_lblk); |
514 | if (retval > map->m_len) |
515 | retval = map->m_len; |
516 | map->m_len = retval; |
517 | } else if (ext4_es_is_delayed(es: &es) || ext4_es_is_hole(es: &es)) { |
518 | map->m_pblk = 0; |
519 | map->m_flags |= ext4_es_is_delayed(es: &es) ? |
520 | EXT4_MAP_DELAYED : 0; |
521 | retval = es.es_len - (map->m_lblk - es.es_lblk); |
522 | if (retval > map->m_len) |
523 | retval = map->m_len; |
524 | map->m_len = retval; |
525 | retval = 0; |
526 | } else { |
527 | BUG(); |
528 | } |
529 | |
530 | if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT) |
531 | return retval; |
532 | #ifdef ES_AGGRESSIVE_TEST |
533 | ext4_map_blocks_es_recheck(handle, inode, map, |
534 | &orig_map, flags); |
535 | #endif |
536 | goto found; |
537 | } |
538 | /* |
539 | * In the query cache no-wait mode, nothing we can do more if we |
540 | * cannot find extent in the cache. |
541 | */ |
542 | if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT) |
543 | return 0; |
544 | |
545 | /* |
546 | * Try to see if we can get the block without requesting a new |
547 | * file system block. |
548 | */ |
549 | down_read(sem: &EXT4_I(inode)->i_data_sem); |
550 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) { |
551 | retval = ext4_ext_map_blocks(handle, inode, map, flags: 0); |
552 | } else { |
553 | retval = ext4_ind_map_blocks(handle, inode, map, flags: 0); |
554 | } |
555 | if (retval > 0) { |
556 | unsigned int status; |
557 | |
558 | if (unlikely(retval != map->m_len)) { |
559 | ext4_warning(inode->i_sb, |
560 | "ES len assertion failed for inode " |
561 | "%lu: retval %d != map->m_len %d" , |
562 | inode->i_ino, retval, map->m_len); |
563 | WARN_ON(1); |
564 | } |
565 | |
566 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
567 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
568 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
569 | !(status & EXTENT_STATUS_WRITTEN) && |
570 | ext4_es_scan_range(inode, matching_fn: &ext4_es_is_delayed, lblk: map->m_lblk, |
571 | end: map->m_lblk + map->m_len - 1)) |
572 | status |= EXTENT_STATUS_DELAYED; |
573 | ext4_es_insert_extent(inode, lblk: map->m_lblk, len: map->m_len, |
574 | pblk: map->m_pblk, status); |
575 | } |
576 | up_read(sem: (&EXT4_I(inode)->i_data_sem)); |
577 | |
578 | found: |
579 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
580 | ret = check_block_validity(inode, map); |
581 | if (ret != 0) |
582 | return ret; |
583 | } |
584 | |
585 | /* If it is only a block(s) look up */ |
586 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) |
587 | return retval; |
588 | |
589 | /* |
590 | * Returns if the blocks have already allocated |
591 | * |
592 | * Note that if blocks have been preallocated |
593 | * ext4_ext_map_blocks() returns with buffer head unmapped |
594 | */ |
595 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) |
596 | /* |
597 | * If we need to convert extent to unwritten |
598 | * we continue and do the actual work in |
599 | * ext4_ext_map_blocks() |
600 | */ |
601 | if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) |
602 | return retval; |
603 | |
604 | /* |
605 | * Here we clear m_flags because after allocating an new extent, |
606 | * it will be set again. |
607 | */ |
608 | map->m_flags &= ~EXT4_MAP_FLAGS; |
609 | |
610 | /* |
611 | * New blocks allocate and/or writing to unwritten extent |
612 | * will possibly result in updating i_data, so we take |
613 | * the write lock of i_data_sem, and call get_block() |
614 | * with create == 1 flag. |
615 | */ |
616 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
617 | |
618 | /* |
619 | * We need to check for EXT4 here because migrate |
620 | * could have changed the inode type in between |
621 | */ |
622 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) { |
623 | retval = ext4_ext_map_blocks(handle, inode, map, flags); |
624 | } else { |
625 | retval = ext4_ind_map_blocks(handle, inode, map, flags); |
626 | |
627 | if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { |
628 | /* |
629 | * We allocated new blocks which will result in |
630 | * i_data's format changing. Force the migrate |
631 | * to fail by clearing migrate flags |
632 | */ |
633 | ext4_clear_inode_state(inode, bit: EXT4_STATE_EXT_MIGRATE); |
634 | } |
635 | } |
636 | |
637 | if (retval > 0) { |
638 | unsigned int status; |
639 | |
640 | if (unlikely(retval != map->m_len)) { |
641 | ext4_warning(inode->i_sb, |
642 | "ES len assertion failed for inode " |
643 | "%lu: retval %d != map->m_len %d" , |
644 | inode->i_ino, retval, map->m_len); |
645 | WARN_ON(1); |
646 | } |
647 | |
648 | /* |
649 | * We have to zeroout blocks before inserting them into extent |
650 | * status tree. Otherwise someone could look them up there and |
651 | * use them before they are really zeroed. We also have to |
652 | * unmap metadata before zeroing as otherwise writeback can |
653 | * overwrite zeros with stale data from block device. |
654 | */ |
655 | if (flags & EXT4_GET_BLOCKS_ZERO && |
656 | map->m_flags & EXT4_MAP_MAPPED && |
657 | map->m_flags & EXT4_MAP_NEW) { |
658 | ret = ext4_issue_zeroout(inode, lblk: map->m_lblk, |
659 | pblk: map->m_pblk, len: map->m_len); |
660 | if (ret) { |
661 | retval = ret; |
662 | goto out_sem; |
663 | } |
664 | } |
665 | |
666 | /* |
667 | * If the extent has been zeroed out, we don't need to update |
668 | * extent status tree. |
669 | */ |
670 | if ((flags & EXT4_GET_BLOCKS_PRE_IO) && |
671 | ext4_es_lookup_extent(inode, lblk: map->m_lblk, NULL, es: &es)) { |
672 | if (ext4_es_is_written(es: &es)) |
673 | goto out_sem; |
674 | } |
675 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
676 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
677 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
678 | !(status & EXTENT_STATUS_WRITTEN) && |
679 | ext4_es_scan_range(inode, matching_fn: &ext4_es_is_delayed, lblk: map->m_lblk, |
680 | end: map->m_lblk + map->m_len - 1)) |
681 | status |= EXTENT_STATUS_DELAYED; |
682 | ext4_es_insert_extent(inode, lblk: map->m_lblk, len: map->m_len, |
683 | pblk: map->m_pblk, status); |
684 | } |
685 | |
686 | out_sem: |
687 | up_write(sem: (&EXT4_I(inode)->i_data_sem)); |
688 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
689 | ret = check_block_validity(inode, map); |
690 | if (ret != 0) |
691 | return ret; |
692 | |
693 | /* |
694 | * Inodes with freshly allocated blocks where contents will be |
695 | * visible after transaction commit must be on transaction's |
696 | * ordered data list. |
697 | */ |
698 | if (map->m_flags & EXT4_MAP_NEW && |
699 | !(map->m_flags & EXT4_MAP_UNWRITTEN) && |
700 | !(flags & EXT4_GET_BLOCKS_ZERO) && |
701 | !ext4_is_quota_file(inode) && |
702 | ext4_should_order_data(inode)) { |
703 | loff_t start_byte = |
704 | (loff_t)map->m_lblk << inode->i_blkbits; |
705 | loff_t length = (loff_t)map->m_len << inode->i_blkbits; |
706 | |
707 | if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) |
708 | ret = ext4_jbd2_inode_add_wait(handle, inode, |
709 | start_byte, length); |
710 | else |
711 | ret = ext4_jbd2_inode_add_write(handle, inode, |
712 | start_byte, length); |
713 | if (ret) |
714 | return ret; |
715 | } |
716 | } |
717 | if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN || |
718 | map->m_flags & EXT4_MAP_MAPPED)) |
719 | ext4_fc_track_range(handle, inode, start: map->m_lblk, |
720 | end: map->m_lblk + map->m_len - 1); |
721 | if (retval < 0) |
722 | ext_debug(inode, "failed with err %d\n" , retval); |
723 | return retval; |
724 | } |
725 | |
726 | /* |
727 | * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages |
728 | * we have to be careful as someone else may be manipulating b_state as well. |
729 | */ |
730 | static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) |
731 | { |
732 | unsigned long old_state; |
733 | unsigned long new_state; |
734 | |
735 | flags &= EXT4_MAP_FLAGS; |
736 | |
737 | /* Dummy buffer_head? Set non-atomically. */ |
738 | if (!bh->b_page) { |
739 | bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; |
740 | return; |
741 | } |
742 | /* |
743 | * Someone else may be modifying b_state. Be careful! This is ugly but |
744 | * once we get rid of using bh as a container for mapping information |
745 | * to pass to / from get_block functions, this can go away. |
746 | */ |
747 | old_state = READ_ONCE(bh->b_state); |
748 | do { |
749 | new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; |
750 | } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state))); |
751 | } |
752 | |
753 | static int _ext4_get_block(struct inode *inode, sector_t iblock, |
754 | struct buffer_head *bh, int flags) |
755 | { |
756 | struct ext4_map_blocks map; |
757 | int ret = 0; |
758 | |
759 | if (ext4_has_inline_data(inode)) |
760 | return -ERANGE; |
761 | |
762 | map.m_lblk = iblock; |
763 | map.m_len = bh->b_size >> inode->i_blkbits; |
764 | |
765 | ret = ext4_map_blocks(handle: ext4_journal_current_handle(), inode, map: &map, |
766 | flags); |
767 | if (ret > 0) { |
768 | map_bh(bh, sb: inode->i_sb, block: map.m_pblk); |
769 | ext4_update_bh_state(bh, flags: map.m_flags); |
770 | bh->b_size = inode->i_sb->s_blocksize * map.m_len; |
771 | ret = 0; |
772 | } else if (ret == 0) { |
773 | /* hole case, need to fill in bh->b_size */ |
774 | bh->b_size = inode->i_sb->s_blocksize * map.m_len; |
775 | } |
776 | return ret; |
777 | } |
778 | |
779 | int ext4_get_block(struct inode *inode, sector_t iblock, |
780 | struct buffer_head *bh, int create) |
781 | { |
782 | return _ext4_get_block(inode, iblock, bh, |
783 | flags: create ? EXT4_GET_BLOCKS_CREATE : 0); |
784 | } |
785 | |
786 | /* |
787 | * Get block function used when preparing for buffered write if we require |
788 | * creating an unwritten extent if blocks haven't been allocated. The extent |
789 | * will be converted to written after the IO is complete. |
790 | */ |
791 | int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, |
792 | struct buffer_head *bh_result, int create) |
793 | { |
794 | int ret = 0; |
795 | |
796 | ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n" , |
797 | inode->i_ino, create); |
798 | ret = _ext4_get_block(inode, iblock, bh: bh_result, |
799 | EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT); |
800 | |
801 | /* |
802 | * If the buffer is marked unwritten, mark it as new to make sure it is |
803 | * zeroed out correctly in case of partial writes. Otherwise, there is |
804 | * a chance of stale data getting exposed. |
805 | */ |
806 | if (ret == 0 && buffer_unwritten(bh: bh_result)) |
807 | set_buffer_new(bh_result); |
808 | |
809 | return ret; |
810 | } |
811 | |
812 | /* Maximum number of blocks we map for direct IO at once. */ |
813 | #define DIO_MAX_BLOCKS 4096 |
814 | |
815 | /* |
816 | * `handle' can be NULL if create is zero |
817 | */ |
818 | struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, |
819 | ext4_lblk_t block, int map_flags) |
820 | { |
821 | struct ext4_map_blocks map; |
822 | struct buffer_head *bh; |
823 | int create = map_flags & EXT4_GET_BLOCKS_CREATE; |
824 | bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT; |
825 | int err; |
826 | |
827 | ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
828 | || handle != NULL || create == 0); |
829 | ASSERT(create == 0 || !nowait); |
830 | |
831 | map.m_lblk = block; |
832 | map.m_len = 1; |
833 | err = ext4_map_blocks(handle, inode, map: &map, flags: map_flags); |
834 | |
835 | if (err == 0) |
836 | return create ? ERR_PTR(error: -ENOSPC) : NULL; |
837 | if (err < 0) |
838 | return ERR_PTR(error: err); |
839 | |
840 | if (nowait) |
841 | return sb_find_get_block(sb: inode->i_sb, block: map.m_pblk); |
842 | |
843 | bh = sb_getblk(sb: inode->i_sb, block: map.m_pblk); |
844 | if (unlikely(!bh)) |
845 | return ERR_PTR(error: -ENOMEM); |
846 | if (map.m_flags & EXT4_MAP_NEW) { |
847 | ASSERT(create != 0); |
848 | ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) |
849 | || (handle != NULL)); |
850 | |
851 | /* |
852 | * Now that we do not always journal data, we should |
853 | * keep in mind whether this should always journal the |
854 | * new buffer as metadata. For now, regular file |
855 | * writes use ext4_get_block instead, so it's not a |
856 | * problem. |
857 | */ |
858 | lock_buffer(bh); |
859 | BUFFER_TRACE(bh, "call get_create_access" ); |
860 | err = ext4_journal_get_create_access(handle, inode->i_sb, bh, |
861 | EXT4_JTR_NONE); |
862 | if (unlikely(err)) { |
863 | unlock_buffer(bh); |
864 | goto errout; |
865 | } |
866 | if (!buffer_uptodate(bh)) { |
867 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); |
868 | set_buffer_uptodate(bh); |
869 | } |
870 | unlock_buffer(bh); |
871 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata" ); |
872 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
873 | if (unlikely(err)) |
874 | goto errout; |
875 | } else |
876 | BUFFER_TRACE(bh, "not a new buffer" ); |
877 | return bh; |
878 | errout: |
879 | brelse(bh); |
880 | return ERR_PTR(error: err); |
881 | } |
882 | |
883 | struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, |
884 | ext4_lblk_t block, int map_flags) |
885 | { |
886 | struct buffer_head *bh; |
887 | int ret; |
888 | |
889 | bh = ext4_getblk(handle, inode, block, map_flags); |
890 | if (IS_ERR(ptr: bh)) |
891 | return bh; |
892 | if (!bh || ext4_buffer_uptodate(bh)) |
893 | return bh; |
894 | |
895 | ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, wait: true); |
896 | if (ret) { |
897 | put_bh(bh); |
898 | return ERR_PTR(error: ret); |
899 | } |
900 | return bh; |
901 | } |
902 | |
903 | /* Read a contiguous batch of blocks. */ |
904 | int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, |
905 | bool wait, struct buffer_head **bhs) |
906 | { |
907 | int i, err; |
908 | |
909 | for (i = 0; i < bh_count; i++) { |
910 | bhs[i] = ext4_getblk(NULL, inode, block: block + i, map_flags: 0 /* map_flags */); |
911 | if (IS_ERR(ptr: bhs[i])) { |
912 | err = PTR_ERR(ptr: bhs[i]); |
913 | bh_count = i; |
914 | goto out_brelse; |
915 | } |
916 | } |
917 | |
918 | for (i = 0; i < bh_count; i++) |
919 | /* Note that NULL bhs[i] is valid because of holes. */ |
920 | if (bhs[i] && !ext4_buffer_uptodate(bh: bhs[i])) |
921 | ext4_read_bh_lock(bh: bhs[i], REQ_META | REQ_PRIO, wait: false); |
922 | |
923 | if (!wait) |
924 | return 0; |
925 | |
926 | for (i = 0; i < bh_count; i++) |
927 | if (bhs[i]) |
928 | wait_on_buffer(bh: bhs[i]); |
929 | |
930 | for (i = 0; i < bh_count; i++) { |
931 | if (bhs[i] && !buffer_uptodate(bh: bhs[i])) { |
932 | err = -EIO; |
933 | goto out_brelse; |
934 | } |
935 | } |
936 | return 0; |
937 | |
938 | out_brelse: |
939 | for (i = 0; i < bh_count; i++) { |
940 | brelse(bh: bhs[i]); |
941 | bhs[i] = NULL; |
942 | } |
943 | return err; |
944 | } |
945 | |
946 | int ext4_walk_page_buffers(handle_t *handle, struct inode *inode, |
947 | struct buffer_head *head, |
948 | unsigned from, |
949 | unsigned to, |
950 | int *partial, |
951 | int (*fn)(handle_t *handle, struct inode *inode, |
952 | struct buffer_head *bh)) |
953 | { |
954 | struct buffer_head *bh; |
955 | unsigned block_start, block_end; |
956 | unsigned blocksize = head->b_size; |
957 | int err, ret = 0; |
958 | struct buffer_head *next; |
959 | |
960 | for (bh = head, block_start = 0; |
961 | ret == 0 && (bh != head || !block_start); |
962 | block_start = block_end, bh = next) { |
963 | next = bh->b_this_page; |
964 | block_end = block_start + blocksize; |
965 | if (block_end <= from || block_start >= to) { |
966 | if (partial && !buffer_uptodate(bh)) |
967 | *partial = 1; |
968 | continue; |
969 | } |
970 | err = (*fn)(handle, inode, bh); |
971 | if (!ret) |
972 | ret = err; |
973 | } |
974 | return ret; |
975 | } |
976 | |
977 | /* |
978 | * Helper for handling dirtying of journalled data. We also mark the folio as |
979 | * dirty so that writeback code knows about this page (and inode) contains |
980 | * dirty data. ext4_writepages() then commits appropriate transaction to |
981 | * make data stable. |
982 | */ |
983 | static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh) |
984 | { |
985 | folio_mark_dirty(folio: bh->b_folio); |
986 | return ext4_handle_dirty_metadata(handle, NULL, bh); |
987 | } |
988 | |
989 | int do_journal_get_write_access(handle_t *handle, struct inode *inode, |
990 | struct buffer_head *bh) |
991 | { |
992 | int dirty = buffer_dirty(bh); |
993 | int ret; |
994 | |
995 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
996 | return 0; |
997 | /* |
998 | * __block_write_begin() could have dirtied some buffers. Clean |
999 | * the dirty bit as jbd2_journal_get_write_access() could complain |
1000 | * otherwise about fs integrity issues. Setting of the dirty bit |
1001 | * by __block_write_begin() isn't a real problem here as we clear |
1002 | * the bit before releasing a page lock and thus writeback cannot |
1003 | * ever write the buffer. |
1004 | */ |
1005 | if (dirty) |
1006 | clear_buffer_dirty(bh); |
1007 | BUFFER_TRACE(bh, "get write access" ); |
1008 | ret = ext4_journal_get_write_access(handle, inode->i_sb, bh, |
1009 | EXT4_JTR_NONE); |
1010 | if (!ret && dirty) |
1011 | ret = ext4_dirty_journalled_data(handle, bh); |
1012 | return ret; |
1013 | } |
1014 | |
1015 | #ifdef CONFIG_FS_ENCRYPTION |
1016 | static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len, |
1017 | get_block_t *get_block) |
1018 | { |
1019 | unsigned from = pos & (PAGE_SIZE - 1); |
1020 | unsigned to = from + len; |
1021 | struct inode *inode = folio->mapping->host; |
1022 | unsigned block_start, block_end; |
1023 | sector_t block; |
1024 | int err = 0; |
1025 | unsigned blocksize = inode->i_sb->s_blocksize; |
1026 | unsigned bbits; |
1027 | struct buffer_head *bh, *head, *wait[2]; |
1028 | int nr_wait = 0; |
1029 | int i; |
1030 | |
1031 | BUG_ON(!folio_test_locked(folio)); |
1032 | BUG_ON(from > PAGE_SIZE); |
1033 | BUG_ON(to > PAGE_SIZE); |
1034 | BUG_ON(from > to); |
1035 | |
1036 | head = folio_buffers(folio); |
1037 | if (!head) |
1038 | head = create_empty_buffers(folio, blocksize, b_state: 0); |
1039 | bbits = ilog2(blocksize); |
1040 | block = (sector_t)folio->index << (PAGE_SHIFT - bbits); |
1041 | |
1042 | for (bh = head, block_start = 0; bh != head || !block_start; |
1043 | block++, block_start = block_end, bh = bh->b_this_page) { |
1044 | block_end = block_start + blocksize; |
1045 | if (block_end <= from || block_start >= to) { |
1046 | if (folio_test_uptodate(folio)) { |
1047 | set_buffer_uptodate(bh); |
1048 | } |
1049 | continue; |
1050 | } |
1051 | if (buffer_new(bh)) |
1052 | clear_buffer_new(bh); |
1053 | if (!buffer_mapped(bh)) { |
1054 | WARN_ON(bh->b_size != blocksize); |
1055 | err = get_block(inode, block, bh, 1); |
1056 | if (err) |
1057 | break; |
1058 | if (buffer_new(bh)) { |
1059 | if (folio_test_uptodate(folio)) { |
1060 | clear_buffer_new(bh); |
1061 | set_buffer_uptodate(bh); |
1062 | mark_buffer_dirty(bh); |
1063 | continue; |
1064 | } |
1065 | if (block_end > to || block_start < from) |
1066 | folio_zero_segments(folio, start1: to, |
1067 | xend1: block_end, |
1068 | start2: block_start, xend2: from); |
1069 | continue; |
1070 | } |
1071 | } |
1072 | if (folio_test_uptodate(folio)) { |
1073 | set_buffer_uptodate(bh); |
1074 | continue; |
1075 | } |
1076 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && |
1077 | !buffer_unwritten(bh) && |
1078 | (block_start < from || block_end > to)) { |
1079 | ext4_read_bh_lock(bh, op_flags: 0, wait: false); |
1080 | wait[nr_wait++] = bh; |
1081 | } |
1082 | } |
1083 | /* |
1084 | * If we issued read requests, let them complete. |
1085 | */ |
1086 | for (i = 0; i < nr_wait; i++) { |
1087 | wait_on_buffer(bh: wait[i]); |
1088 | if (!buffer_uptodate(bh: wait[i])) |
1089 | err = -EIO; |
1090 | } |
1091 | if (unlikely(err)) { |
1092 | folio_zero_new_buffers(folio, from, to); |
1093 | } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) { |
1094 | for (i = 0; i < nr_wait; i++) { |
1095 | int err2; |
1096 | |
1097 | err2 = fscrypt_decrypt_pagecache_blocks(folio, |
1098 | len: blocksize, offs: bh_offset(bh: wait[i])); |
1099 | if (err2) { |
1100 | clear_buffer_uptodate(bh: wait[i]); |
1101 | err = err2; |
1102 | } |
1103 | } |
1104 | } |
1105 | |
1106 | return err; |
1107 | } |
1108 | #endif |
1109 | |
1110 | /* |
1111 | * To preserve ordering, it is essential that the hole instantiation and |
1112 | * the data write be encapsulated in a single transaction. We cannot |
1113 | * close off a transaction and start a new one between the ext4_get_block() |
1114 | * and the ext4_write_end(). So doing the jbd2_journal_start at the start of |
1115 | * ext4_write_begin() is the right place. |
1116 | */ |
1117 | static int ext4_write_begin(struct file *file, struct address_space *mapping, |
1118 | loff_t pos, unsigned len, |
1119 | struct page **pagep, void **fsdata) |
1120 | { |
1121 | struct inode *inode = mapping->host; |
1122 | int ret, needed_blocks; |
1123 | handle_t *handle; |
1124 | int retries = 0; |
1125 | struct folio *folio; |
1126 | pgoff_t index; |
1127 | unsigned from, to; |
1128 | |
1129 | if (unlikely(ext4_forced_shutdown(inode->i_sb))) |
1130 | return -EIO; |
1131 | |
1132 | trace_ext4_write_begin(inode, pos, len); |
1133 | /* |
1134 | * Reserve one block more for addition to orphan list in case |
1135 | * we allocate blocks but write fails for some reason |
1136 | */ |
1137 | needed_blocks = ext4_writepage_trans_blocks(inode) + 1; |
1138 | index = pos >> PAGE_SHIFT; |
1139 | from = pos & (PAGE_SIZE - 1); |
1140 | to = from + len; |
1141 | |
1142 | if (ext4_test_inode_state(inode, bit: EXT4_STATE_MAY_INLINE_DATA)) { |
1143 | ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, |
1144 | pagep); |
1145 | if (ret < 0) |
1146 | return ret; |
1147 | if (ret == 1) |
1148 | return 0; |
1149 | } |
1150 | |
1151 | /* |
1152 | * __filemap_get_folio() can take a long time if the |
1153 | * system is thrashing due to memory pressure, or if the folio |
1154 | * is being written back. So grab it first before we start |
1155 | * the transaction handle. This also allows us to allocate |
1156 | * the folio (if needed) without using GFP_NOFS. |
1157 | */ |
1158 | retry_grab: |
1159 | folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, |
1160 | gfp: mapping_gfp_mask(mapping)); |
1161 | if (IS_ERR(ptr: folio)) |
1162 | return PTR_ERR(ptr: folio); |
1163 | /* |
1164 | * The same as page allocation, we prealloc buffer heads before |
1165 | * starting the handle. |
1166 | */ |
1167 | if (!folio_buffers(folio)) |
1168 | create_empty_buffers(folio, blocksize: inode->i_sb->s_blocksize, b_state: 0); |
1169 | |
1170 | folio_unlock(folio); |
1171 | |
1172 | retry_journal: |
1173 | handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); |
1174 | if (IS_ERR(ptr: handle)) { |
1175 | folio_put(folio); |
1176 | return PTR_ERR(ptr: handle); |
1177 | } |
1178 | |
1179 | folio_lock(folio); |
1180 | if (folio->mapping != mapping) { |
1181 | /* The folio got truncated from under us */ |
1182 | folio_unlock(folio); |
1183 | folio_put(folio); |
1184 | ext4_journal_stop(handle); |
1185 | goto retry_grab; |
1186 | } |
1187 | /* In case writeback began while the folio was unlocked */ |
1188 | folio_wait_stable(folio); |
1189 | |
1190 | #ifdef CONFIG_FS_ENCRYPTION |
1191 | if (ext4_should_dioread_nolock(inode)) |
1192 | ret = ext4_block_write_begin(folio, pos, len, |
1193 | get_block: ext4_get_block_unwritten); |
1194 | else |
1195 | ret = ext4_block_write_begin(folio, pos, len, get_block: ext4_get_block); |
1196 | #else |
1197 | if (ext4_should_dioread_nolock(inode)) |
1198 | ret = __block_write_begin(&folio->page, pos, len, |
1199 | ext4_get_block_unwritten); |
1200 | else |
1201 | ret = __block_write_begin(&folio->page, pos, len, ext4_get_block); |
1202 | #endif |
1203 | if (!ret && ext4_should_journal_data(inode)) { |
1204 | ret = ext4_walk_page_buffers(handle, inode, |
1205 | folio_buffers(folio), from, to, |
1206 | NULL, fn: do_journal_get_write_access); |
1207 | } |
1208 | |
1209 | if (ret) { |
1210 | bool extended = (pos + len > inode->i_size) && |
1211 | !ext4_verity_in_progress(inode); |
1212 | |
1213 | folio_unlock(folio); |
1214 | /* |
1215 | * __block_write_begin may have instantiated a few blocks |
1216 | * outside i_size. Trim these off again. Don't need |
1217 | * i_size_read because we hold i_rwsem. |
1218 | * |
1219 | * Add inode to orphan list in case we crash before |
1220 | * truncate finishes |
1221 | */ |
1222 | if (extended && ext4_can_truncate(inode)) |
1223 | ext4_orphan_add(handle, inode); |
1224 | |
1225 | ext4_journal_stop(handle); |
1226 | if (extended) { |
1227 | ext4_truncate_failed_write(inode); |
1228 | /* |
1229 | * If truncate failed early the inode might |
1230 | * still be on the orphan list; we need to |
1231 | * make sure the inode is removed from the |
1232 | * orphan list in that case. |
1233 | */ |
1234 | if (inode->i_nlink) |
1235 | ext4_orphan_del(NULL, inode); |
1236 | } |
1237 | |
1238 | if (ret == -ENOSPC && |
1239 | ext4_should_retry_alloc(sb: inode->i_sb, retries: &retries)) |
1240 | goto retry_journal; |
1241 | folio_put(folio); |
1242 | return ret; |
1243 | } |
1244 | *pagep = &folio->page; |
1245 | return ret; |
1246 | } |
1247 | |
1248 | /* For write_end() in data=journal mode */ |
1249 | static int write_end_fn(handle_t *handle, struct inode *inode, |
1250 | struct buffer_head *bh) |
1251 | { |
1252 | int ret; |
1253 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
1254 | return 0; |
1255 | set_buffer_uptodate(bh); |
1256 | ret = ext4_dirty_journalled_data(handle, bh); |
1257 | clear_buffer_meta(bh); |
1258 | clear_buffer_prio(bh); |
1259 | return ret; |
1260 | } |
1261 | |
1262 | /* |
1263 | * We need to pick up the new inode size which generic_commit_write gave us |
1264 | * `file' can be NULL - eg, when called from page_symlink(). |
1265 | * |
1266 | * ext4 never places buffers on inode->i_mapping->i_private_list. metadata |
1267 | * buffers are managed internally. |
1268 | */ |
1269 | static int ext4_write_end(struct file *file, |
1270 | struct address_space *mapping, |
1271 | loff_t pos, unsigned len, unsigned copied, |
1272 | struct page *page, void *fsdata) |
1273 | { |
1274 | struct folio *folio = page_folio(page); |
1275 | handle_t *handle = ext4_journal_current_handle(); |
1276 | struct inode *inode = mapping->host; |
1277 | loff_t old_size = inode->i_size; |
1278 | int ret = 0, ret2; |
1279 | int i_size_changed = 0; |
1280 | bool verity = ext4_verity_in_progress(inode); |
1281 | |
1282 | trace_ext4_write_end(inode, pos, len, copied); |
1283 | |
1284 | if (ext4_has_inline_data(inode) && |
1285 | ext4_test_inode_state(inode, bit: EXT4_STATE_MAY_INLINE_DATA)) |
1286 | return ext4_write_inline_data_end(inode, pos, len, copied, |
1287 | folio); |
1288 | |
1289 | copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); |
1290 | /* |
1291 | * it's important to update i_size while still holding folio lock: |
1292 | * page writeout could otherwise come in and zero beyond i_size. |
1293 | * |
1294 | * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree |
1295 | * blocks are being written past EOF, so skip the i_size update. |
1296 | */ |
1297 | if (!verity) |
1298 | i_size_changed = ext4_update_inode_size(inode, newsize: pos + copied); |
1299 | folio_unlock(folio); |
1300 | folio_put(folio); |
1301 | |
1302 | if (old_size < pos && !verity) |
1303 | pagecache_isize_extended(inode, from: old_size, to: pos); |
1304 | /* |
1305 | * Don't mark the inode dirty under folio lock. First, it unnecessarily |
1306 | * makes the holding time of folio lock longer. Second, it forces lock |
1307 | * ordering of folio lock and transaction start for journaling |
1308 | * filesystems. |
1309 | */ |
1310 | if (i_size_changed) |
1311 | ret = ext4_mark_inode_dirty(handle, inode); |
1312 | |
1313 | if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) |
1314 | /* if we have allocated more blocks and copied |
1315 | * less. We will have blocks allocated outside |
1316 | * inode->i_size. So truncate them |
1317 | */ |
1318 | ext4_orphan_add(handle, inode); |
1319 | |
1320 | ret2 = ext4_journal_stop(handle); |
1321 | if (!ret) |
1322 | ret = ret2; |
1323 | |
1324 | if (pos + len > inode->i_size && !verity) { |
1325 | ext4_truncate_failed_write(inode); |
1326 | /* |
1327 | * If truncate failed early the inode might still be |
1328 | * on the orphan list; we need to make sure the inode |
1329 | * is removed from the orphan list in that case. |
1330 | */ |
1331 | if (inode->i_nlink) |
1332 | ext4_orphan_del(NULL, inode); |
1333 | } |
1334 | |
1335 | return ret ? ret : copied; |
1336 | } |
1337 | |
1338 | /* |
1339 | * This is a private version of folio_zero_new_buffers() which doesn't |
1340 | * set the buffer to be dirty, since in data=journalled mode we need |
1341 | * to call ext4_dirty_journalled_data() instead. |
1342 | */ |
1343 | static void ext4_journalled_zero_new_buffers(handle_t *handle, |
1344 | struct inode *inode, |
1345 | struct folio *folio, |
1346 | unsigned from, unsigned to) |
1347 | { |
1348 | unsigned int block_start = 0, block_end; |
1349 | struct buffer_head *head, *bh; |
1350 | |
1351 | bh = head = folio_buffers(folio); |
1352 | do { |
1353 | block_end = block_start + bh->b_size; |
1354 | if (buffer_new(bh)) { |
1355 | if (block_end > from && block_start < to) { |
1356 | if (!folio_test_uptodate(folio)) { |
1357 | unsigned start, size; |
1358 | |
1359 | start = max(from, block_start); |
1360 | size = min(to, block_end) - start; |
1361 | |
1362 | folio_zero_range(folio, start, length: size); |
1363 | write_end_fn(handle, inode, bh); |
1364 | } |
1365 | clear_buffer_new(bh); |
1366 | } |
1367 | } |
1368 | block_start = block_end; |
1369 | bh = bh->b_this_page; |
1370 | } while (bh != head); |
1371 | } |
1372 | |
1373 | static int ext4_journalled_write_end(struct file *file, |
1374 | struct address_space *mapping, |
1375 | loff_t pos, unsigned len, unsigned copied, |
1376 | struct page *page, void *fsdata) |
1377 | { |
1378 | struct folio *folio = page_folio(page); |
1379 | handle_t *handle = ext4_journal_current_handle(); |
1380 | struct inode *inode = mapping->host; |
1381 | loff_t old_size = inode->i_size; |
1382 | int ret = 0, ret2; |
1383 | int partial = 0; |
1384 | unsigned from, to; |
1385 | int size_changed = 0; |
1386 | bool verity = ext4_verity_in_progress(inode); |
1387 | |
1388 | trace_ext4_journalled_write_end(inode, pos, len, copied); |
1389 | from = pos & (PAGE_SIZE - 1); |
1390 | to = from + len; |
1391 | |
1392 | BUG_ON(!ext4_handle_valid(handle)); |
1393 | |
1394 | if (ext4_has_inline_data(inode)) |
1395 | return ext4_write_inline_data_end(inode, pos, len, copied, |
1396 | folio); |
1397 | |
1398 | if (unlikely(copied < len) && !folio_test_uptodate(folio)) { |
1399 | copied = 0; |
1400 | ext4_journalled_zero_new_buffers(handle, inode, folio, |
1401 | from, to); |
1402 | } else { |
1403 | if (unlikely(copied < len)) |
1404 | ext4_journalled_zero_new_buffers(handle, inode, folio, |
1405 | from: from + copied, to); |
1406 | ret = ext4_walk_page_buffers(handle, inode, |
1407 | folio_buffers(folio), |
1408 | from, to: from + copied, partial: &partial, |
1409 | fn: write_end_fn); |
1410 | if (!partial) |
1411 | folio_mark_uptodate(folio); |
1412 | } |
1413 | if (!verity) |
1414 | size_changed = ext4_update_inode_size(inode, newsize: pos + copied); |
1415 | EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; |
1416 | folio_unlock(folio); |
1417 | folio_put(folio); |
1418 | |
1419 | if (old_size < pos && !verity) |
1420 | pagecache_isize_extended(inode, from: old_size, to: pos); |
1421 | |
1422 | if (size_changed) { |
1423 | ret2 = ext4_mark_inode_dirty(handle, inode); |
1424 | if (!ret) |
1425 | ret = ret2; |
1426 | } |
1427 | |
1428 | if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) |
1429 | /* if we have allocated more blocks and copied |
1430 | * less. We will have blocks allocated outside |
1431 | * inode->i_size. So truncate them |
1432 | */ |
1433 | ext4_orphan_add(handle, inode); |
1434 | |
1435 | ret2 = ext4_journal_stop(handle); |
1436 | if (!ret) |
1437 | ret = ret2; |
1438 | if (pos + len > inode->i_size && !verity) { |
1439 | ext4_truncate_failed_write(inode); |
1440 | /* |
1441 | * If truncate failed early the inode might still be |
1442 | * on the orphan list; we need to make sure the inode |
1443 | * is removed from the orphan list in that case. |
1444 | */ |
1445 | if (inode->i_nlink) |
1446 | ext4_orphan_del(NULL, inode); |
1447 | } |
1448 | |
1449 | return ret ? ret : copied; |
1450 | } |
1451 | |
1452 | /* |
1453 | * Reserve space for a single cluster |
1454 | */ |
1455 | static int ext4_da_reserve_space(struct inode *inode) |
1456 | { |
1457 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1458 | struct ext4_inode_info *ei = EXT4_I(inode); |
1459 | int ret; |
1460 | |
1461 | /* |
1462 | * We will charge metadata quota at writeout time; this saves |
1463 | * us from metadata over-estimation, though we may go over by |
1464 | * a small amount in the end. Here we just reserve for data. |
1465 | */ |
1466 | ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); |
1467 | if (ret) |
1468 | return ret; |
1469 | |
1470 | spin_lock(lock: &ei->i_block_reservation_lock); |
1471 | if (ext4_claim_free_clusters(sbi, nclusters: 1, flags: 0)) { |
1472 | spin_unlock(lock: &ei->i_block_reservation_lock); |
1473 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); |
1474 | return -ENOSPC; |
1475 | } |
1476 | ei->i_reserved_data_blocks++; |
1477 | trace_ext4_da_reserve_space(inode); |
1478 | spin_unlock(lock: &ei->i_block_reservation_lock); |
1479 | |
1480 | return 0; /* success */ |
1481 | } |
1482 | |
1483 | void ext4_da_release_space(struct inode *inode, int to_free) |
1484 | { |
1485 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1486 | struct ext4_inode_info *ei = EXT4_I(inode); |
1487 | |
1488 | if (!to_free) |
1489 | return; /* Nothing to release, exit */ |
1490 | |
1491 | spin_lock(lock: &EXT4_I(inode)->i_block_reservation_lock); |
1492 | |
1493 | trace_ext4_da_release_space(inode, freed_blocks: to_free); |
1494 | if (unlikely(to_free > ei->i_reserved_data_blocks)) { |
1495 | /* |
1496 | * if there aren't enough reserved blocks, then the |
1497 | * counter is messed up somewhere. Since this |
1498 | * function is called from invalidate page, it's |
1499 | * harmless to return without any action. |
1500 | */ |
1501 | ext4_warning(inode->i_sb, "ext4_da_release_space: " |
1502 | "ino %lu, to_free %d with only %d reserved " |
1503 | "data blocks" , inode->i_ino, to_free, |
1504 | ei->i_reserved_data_blocks); |
1505 | WARN_ON(1); |
1506 | to_free = ei->i_reserved_data_blocks; |
1507 | } |
1508 | ei->i_reserved_data_blocks -= to_free; |
1509 | |
1510 | /* update fs dirty data blocks counter */ |
1511 | percpu_counter_sub(fbc: &sbi->s_dirtyclusters_counter, amount: to_free); |
1512 | |
1513 | spin_unlock(lock: &EXT4_I(inode)->i_block_reservation_lock); |
1514 | |
1515 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); |
1516 | } |
1517 | |
1518 | /* |
1519 | * Delayed allocation stuff |
1520 | */ |
1521 | |
1522 | struct mpage_da_data { |
1523 | /* These are input fields for ext4_do_writepages() */ |
1524 | struct inode *inode; |
1525 | struct writeback_control *wbc; |
1526 | unsigned int can_map:1; /* Can writepages call map blocks? */ |
1527 | |
1528 | /* These are internal state of ext4_do_writepages() */ |
1529 | pgoff_t first_page; /* The first page to write */ |
1530 | pgoff_t next_page; /* Current page to examine */ |
1531 | pgoff_t last_page; /* Last page to examine */ |
1532 | /* |
1533 | * Extent to map - this can be after first_page because that can be |
1534 | * fully mapped. We somewhat abuse m_flags to store whether the extent |
1535 | * is delalloc or unwritten. |
1536 | */ |
1537 | struct ext4_map_blocks map; |
1538 | struct ext4_io_submit io_submit; /* IO submission data */ |
1539 | unsigned int do_map:1; |
1540 | unsigned int scanned_until_end:1; |
1541 | unsigned int journalled_more_data:1; |
1542 | }; |
1543 | |
1544 | static void mpage_release_unused_pages(struct mpage_da_data *mpd, |
1545 | bool invalidate) |
1546 | { |
1547 | unsigned nr, i; |
1548 | pgoff_t index, end; |
1549 | struct folio_batch fbatch; |
1550 | struct inode *inode = mpd->inode; |
1551 | struct address_space *mapping = inode->i_mapping; |
1552 | |
1553 | /* This is necessary when next_page == 0. */ |
1554 | if (mpd->first_page >= mpd->next_page) |
1555 | return; |
1556 | |
1557 | mpd->scanned_until_end = 0; |
1558 | index = mpd->first_page; |
1559 | end = mpd->next_page - 1; |
1560 | if (invalidate) { |
1561 | ext4_lblk_t start, last; |
1562 | start = index << (PAGE_SHIFT - inode->i_blkbits); |
1563 | last = end << (PAGE_SHIFT - inode->i_blkbits); |
1564 | |
1565 | /* |
1566 | * avoid racing with extent status tree scans made by |
1567 | * ext4_insert_delayed_block() |
1568 | */ |
1569 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
1570 | ext4_es_remove_extent(inode, lblk: start, len: last - start + 1); |
1571 | up_write(sem: &EXT4_I(inode)->i_data_sem); |
1572 | } |
1573 | |
1574 | folio_batch_init(fbatch: &fbatch); |
1575 | while (index <= end) { |
1576 | nr = filemap_get_folios(mapping, start: &index, end, fbatch: &fbatch); |
1577 | if (nr == 0) |
1578 | break; |
1579 | for (i = 0; i < nr; i++) { |
1580 | struct folio *folio = fbatch.folios[i]; |
1581 | |
1582 | if (folio->index < mpd->first_page) |
1583 | continue; |
1584 | if (folio_next_index(folio) - 1 > end) |
1585 | continue; |
1586 | BUG_ON(!folio_test_locked(folio)); |
1587 | BUG_ON(folio_test_writeback(folio)); |
1588 | if (invalidate) { |
1589 | if (folio_mapped(folio)) |
1590 | folio_clear_dirty_for_io(folio); |
1591 | block_invalidate_folio(folio, offset: 0, |
1592 | length: folio_size(folio)); |
1593 | folio_clear_uptodate(folio); |
1594 | } |
1595 | folio_unlock(folio); |
1596 | } |
1597 | folio_batch_release(fbatch: &fbatch); |
1598 | } |
1599 | } |
1600 | |
1601 | static void ext4_print_free_blocks(struct inode *inode) |
1602 | { |
1603 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1604 | struct super_block *sb = inode->i_sb; |
1605 | struct ext4_inode_info *ei = EXT4_I(inode); |
1606 | |
1607 | ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld" , |
1608 | EXT4_C2B(EXT4_SB(inode->i_sb), |
1609 | ext4_count_free_clusters(sb))); |
1610 | ext4_msg(sb, KERN_CRIT, "Free/Dirty block details" ); |
1611 | ext4_msg(sb, KERN_CRIT, "free_blocks=%lld" , |
1612 | (long long) EXT4_C2B(EXT4_SB(sb), |
1613 | percpu_counter_sum(&sbi->s_freeclusters_counter))); |
1614 | ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld" , |
1615 | (long long) EXT4_C2B(EXT4_SB(sb), |
1616 | percpu_counter_sum(&sbi->s_dirtyclusters_counter))); |
1617 | ext4_msg(sb, KERN_CRIT, "Block reservation details" ); |
1618 | ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u" , |
1619 | ei->i_reserved_data_blocks); |
1620 | return; |
1621 | } |
1622 | |
1623 | /* |
1624 | * ext4_insert_delayed_block - adds a delayed block to the extents status |
1625 | * tree, incrementing the reserved cluster/block |
1626 | * count or making a pending reservation |
1627 | * where needed |
1628 | * |
1629 | * @inode - file containing the newly added block |
1630 | * @lblk - logical block to be added |
1631 | * |
1632 | * Returns 0 on success, negative error code on failure. |
1633 | */ |
1634 | static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) |
1635 | { |
1636 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
1637 | int ret; |
1638 | bool allocated = false; |
1639 | |
1640 | /* |
1641 | * If the cluster containing lblk is shared with a delayed, |
1642 | * written, or unwritten extent in a bigalloc file system, it's |
1643 | * already been accounted for and does not need to be reserved. |
1644 | * A pending reservation must be made for the cluster if it's |
1645 | * shared with a written or unwritten extent and doesn't already |
1646 | * have one. Written and unwritten extents can be purged from the |
1647 | * extents status tree if the system is under memory pressure, so |
1648 | * it's necessary to examine the extent tree if a search of the |
1649 | * extents status tree doesn't get a match. |
1650 | */ |
1651 | if (sbi->s_cluster_ratio == 1) { |
1652 | ret = ext4_da_reserve_space(inode); |
1653 | if (ret != 0) /* ENOSPC */ |
1654 | return ret; |
1655 | } else { /* bigalloc */ |
1656 | if (!ext4_es_scan_clu(inode, matching_fn: &ext4_es_is_delonly, lblk)) { |
1657 | if (!ext4_es_scan_clu(inode, |
1658 | matching_fn: &ext4_es_is_mapped, lblk)) { |
1659 | ret = ext4_clu_mapped(inode, |
1660 | EXT4_B2C(sbi, lblk)); |
1661 | if (ret < 0) |
1662 | return ret; |
1663 | if (ret == 0) { |
1664 | ret = ext4_da_reserve_space(inode); |
1665 | if (ret != 0) /* ENOSPC */ |
1666 | return ret; |
1667 | } else { |
1668 | allocated = true; |
1669 | } |
1670 | } else { |
1671 | allocated = true; |
1672 | } |
1673 | } |
1674 | } |
1675 | |
1676 | ext4_es_insert_delayed_block(inode, lblk, allocated); |
1677 | return 0; |
1678 | } |
1679 | |
1680 | /* |
1681 | * This function is grabs code from the very beginning of |
1682 | * ext4_map_blocks, but assumes that the caller is from delayed write |
1683 | * time. This function looks up the requested blocks and sets the |
1684 | * buffer delay bit under the protection of i_data_sem. |
1685 | */ |
1686 | static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, |
1687 | struct ext4_map_blocks *map, |
1688 | struct buffer_head *bh) |
1689 | { |
1690 | struct extent_status es; |
1691 | int retval; |
1692 | sector_t invalid_block = ~((sector_t) 0xffff); |
1693 | #ifdef ES_AGGRESSIVE_TEST |
1694 | struct ext4_map_blocks orig_map; |
1695 | |
1696 | memcpy(&orig_map, map, sizeof(*map)); |
1697 | #endif |
1698 | |
1699 | if (invalid_block < ext4_blocks_count(es: EXT4_SB(sb: inode->i_sb)->s_es)) |
1700 | invalid_block = ~0; |
1701 | |
1702 | map->m_flags = 0; |
1703 | ext_debug(inode, "max_blocks %u, logical block %lu\n" , map->m_len, |
1704 | (unsigned long) map->m_lblk); |
1705 | |
1706 | /* Lookup extent status tree firstly */ |
1707 | if (ext4_es_lookup_extent(inode, lblk: iblock, NULL, es: &es)) { |
1708 | if (ext4_es_is_hole(es: &es)) |
1709 | goto add_delayed; |
1710 | |
1711 | /* |
1712 | * Delayed extent could be allocated by fallocate. |
1713 | * So we need to check it. |
1714 | */ |
1715 | if (ext4_es_is_delayed(es: &es) && !ext4_es_is_unwritten(es: &es)) { |
1716 | map_bh(bh, sb: inode->i_sb, block: invalid_block); |
1717 | set_buffer_new(bh); |
1718 | set_buffer_delay(bh); |
1719 | return 0; |
1720 | } |
1721 | |
1722 | map->m_pblk = ext4_es_pblock(es: &es) + iblock - es.es_lblk; |
1723 | retval = es.es_len - (iblock - es.es_lblk); |
1724 | if (retval > map->m_len) |
1725 | retval = map->m_len; |
1726 | map->m_len = retval; |
1727 | if (ext4_es_is_written(es: &es)) |
1728 | map->m_flags |= EXT4_MAP_MAPPED; |
1729 | else if (ext4_es_is_unwritten(es: &es)) |
1730 | map->m_flags |= EXT4_MAP_UNWRITTEN; |
1731 | else |
1732 | BUG(); |
1733 | |
1734 | #ifdef ES_AGGRESSIVE_TEST |
1735 | ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); |
1736 | #endif |
1737 | return retval; |
1738 | } |
1739 | |
1740 | /* |
1741 | * Try to see if we can get the block without requesting a new |
1742 | * file system block. |
1743 | */ |
1744 | down_read(sem: &EXT4_I(inode)->i_data_sem); |
1745 | if (ext4_has_inline_data(inode)) |
1746 | retval = 0; |
1747 | else if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
1748 | retval = ext4_ext_map_blocks(NULL, inode, map, flags: 0); |
1749 | else |
1750 | retval = ext4_ind_map_blocks(NULL, inode, map, flags: 0); |
1751 | if (retval < 0) { |
1752 | up_read(sem: &EXT4_I(inode)->i_data_sem); |
1753 | return retval; |
1754 | } |
1755 | if (retval > 0) { |
1756 | unsigned int status; |
1757 | |
1758 | if (unlikely(retval != map->m_len)) { |
1759 | ext4_warning(inode->i_sb, |
1760 | "ES len assertion failed for inode " |
1761 | "%lu: retval %d != map->m_len %d" , |
1762 | inode->i_ino, retval, map->m_len); |
1763 | WARN_ON(1); |
1764 | } |
1765 | |
1766 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
1767 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
1768 | ext4_es_insert_extent(inode, lblk: map->m_lblk, len: map->m_len, |
1769 | pblk: map->m_pblk, status); |
1770 | up_read(sem: &EXT4_I(inode)->i_data_sem); |
1771 | return retval; |
1772 | } |
1773 | up_read(sem: &EXT4_I(inode)->i_data_sem); |
1774 | |
1775 | add_delayed: |
1776 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
1777 | retval = ext4_insert_delayed_block(inode, lblk: map->m_lblk); |
1778 | up_write(sem: &EXT4_I(inode)->i_data_sem); |
1779 | if (retval) |
1780 | return retval; |
1781 | |
1782 | map_bh(bh, sb: inode->i_sb, block: invalid_block); |
1783 | set_buffer_new(bh); |
1784 | set_buffer_delay(bh); |
1785 | return retval; |
1786 | } |
1787 | |
1788 | /* |
1789 | * This is a special get_block_t callback which is used by |
1790 | * ext4_da_write_begin(). It will either return mapped block or |
1791 | * reserve space for a single block. |
1792 | * |
1793 | * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. |
1794 | * We also have b_blocknr = -1 and b_bdev initialized properly |
1795 | * |
1796 | * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. |
1797 | * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev |
1798 | * initialized properly. |
1799 | */ |
1800 | int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, |
1801 | struct buffer_head *bh, int create) |
1802 | { |
1803 | struct ext4_map_blocks map; |
1804 | int ret = 0; |
1805 | |
1806 | BUG_ON(create == 0); |
1807 | BUG_ON(bh->b_size != inode->i_sb->s_blocksize); |
1808 | |
1809 | map.m_lblk = iblock; |
1810 | map.m_len = 1; |
1811 | |
1812 | /* |
1813 | * first, we need to know whether the block is allocated already |
1814 | * preallocated blocks are unmapped but should treated |
1815 | * the same as allocated blocks. |
1816 | */ |
1817 | ret = ext4_da_map_blocks(inode, iblock, map: &map, bh); |
1818 | if (ret <= 0) |
1819 | return ret; |
1820 | |
1821 | map_bh(bh, sb: inode->i_sb, block: map.m_pblk); |
1822 | ext4_update_bh_state(bh, flags: map.m_flags); |
1823 | |
1824 | if (buffer_unwritten(bh)) { |
1825 | /* A delayed write to unwritten bh should be marked |
1826 | * new and mapped. Mapped ensures that we don't do |
1827 | * get_block multiple times when we write to the same |
1828 | * offset and new ensures that we do proper zero out |
1829 | * for partial write. |
1830 | */ |
1831 | set_buffer_new(bh); |
1832 | set_buffer_mapped(bh); |
1833 | } |
1834 | return 0; |
1835 | } |
1836 | |
1837 | static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio) |
1838 | { |
1839 | mpd->first_page += folio_nr_pages(folio); |
1840 | folio_unlock(folio); |
1841 | } |
1842 | |
1843 | static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio) |
1844 | { |
1845 | size_t len; |
1846 | loff_t size; |
1847 | int err; |
1848 | |
1849 | BUG_ON(folio->index != mpd->first_page); |
1850 | folio_clear_dirty_for_io(folio); |
1851 | /* |
1852 | * We have to be very careful here! Nothing protects writeback path |
1853 | * against i_size changes and the page can be writeably mapped into |
1854 | * page tables. So an application can be growing i_size and writing |
1855 | * data through mmap while writeback runs. folio_clear_dirty_for_io() |
1856 | * write-protects our page in page tables and the page cannot get |
1857 | * written to again until we release folio lock. So only after |
1858 | * folio_clear_dirty_for_io() we are safe to sample i_size for |
1859 | * ext4_bio_write_folio() to zero-out tail of the written page. We rely |
1860 | * on the barrier provided by folio_test_clear_dirty() in |
1861 | * folio_clear_dirty_for_io() to make sure i_size is really sampled only |
1862 | * after page tables are updated. |
1863 | */ |
1864 | size = i_size_read(inode: mpd->inode); |
1865 | len = folio_size(folio); |
1866 | if (folio_pos(folio) + len > size && |
1867 | !ext4_verity_in_progress(inode: mpd->inode)) |
1868 | len = size & ~PAGE_MASK; |
1869 | err = ext4_bio_write_folio(io: &mpd->io_submit, page: folio, len); |
1870 | if (!err) |
1871 | mpd->wbc->nr_to_write--; |
1872 | |
1873 | return err; |
1874 | } |
1875 | |
1876 | #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay)) |
1877 | |
1878 | /* |
1879 | * mballoc gives us at most this number of blocks... |
1880 | * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). |
1881 | * The rest of mballoc seems to handle chunks up to full group size. |
1882 | */ |
1883 | #define MAX_WRITEPAGES_EXTENT_LEN 2048 |
1884 | |
1885 | /* |
1886 | * mpage_add_bh_to_extent - try to add bh to extent of blocks to map |
1887 | * |
1888 | * @mpd - extent of blocks |
1889 | * @lblk - logical number of the block in the file |
1890 | * @bh - buffer head we want to add to the extent |
1891 | * |
1892 | * The function is used to collect contig. blocks in the same state. If the |
1893 | * buffer doesn't require mapping for writeback and we haven't started the |
1894 | * extent of buffers to map yet, the function returns 'true' immediately - the |
1895 | * caller can write the buffer right away. Otherwise the function returns true |
1896 | * if the block has been added to the extent, false if the block couldn't be |
1897 | * added. |
1898 | */ |
1899 | static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, |
1900 | struct buffer_head *bh) |
1901 | { |
1902 | struct ext4_map_blocks *map = &mpd->map; |
1903 | |
1904 | /* Buffer that doesn't need mapping for writeback? */ |
1905 | if (!buffer_dirty(bh) || !buffer_mapped(bh) || |
1906 | (!buffer_delay(bh) && !buffer_unwritten(bh))) { |
1907 | /* So far no extent to map => we write the buffer right away */ |
1908 | if (map->m_len == 0) |
1909 | return true; |
1910 | return false; |
1911 | } |
1912 | |
1913 | /* First block in the extent? */ |
1914 | if (map->m_len == 0) { |
1915 | /* We cannot map unless handle is started... */ |
1916 | if (!mpd->do_map) |
1917 | return false; |
1918 | map->m_lblk = lblk; |
1919 | map->m_len = 1; |
1920 | map->m_flags = bh->b_state & BH_FLAGS; |
1921 | return true; |
1922 | } |
1923 | |
1924 | /* Don't go larger than mballoc is willing to allocate */ |
1925 | if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) |
1926 | return false; |
1927 | |
1928 | /* Can we merge the block to our big extent? */ |
1929 | if (lblk == map->m_lblk + map->m_len && |
1930 | (bh->b_state & BH_FLAGS) == map->m_flags) { |
1931 | map->m_len++; |
1932 | return true; |
1933 | } |
1934 | return false; |
1935 | } |
1936 | |
1937 | /* |
1938 | * mpage_process_page_bufs - submit page buffers for IO or add them to extent |
1939 | * |
1940 | * @mpd - extent of blocks for mapping |
1941 | * @head - the first buffer in the page |
1942 | * @bh - buffer we should start processing from |
1943 | * @lblk - logical number of the block in the file corresponding to @bh |
1944 | * |
1945 | * Walk through page buffers from @bh upto @head (exclusive) and either submit |
1946 | * the page for IO if all buffers in this page were mapped and there's no |
1947 | * accumulated extent of buffers to map or add buffers in the page to the |
1948 | * extent of buffers to map. The function returns 1 if the caller can continue |
1949 | * by processing the next page, 0 if it should stop adding buffers to the |
1950 | * extent to map because we cannot extend it anymore. It can also return value |
1951 | * < 0 in case of error during IO submission. |
1952 | */ |
1953 | static int mpage_process_page_bufs(struct mpage_da_data *mpd, |
1954 | struct buffer_head *head, |
1955 | struct buffer_head *bh, |
1956 | ext4_lblk_t lblk) |
1957 | { |
1958 | struct inode *inode = mpd->inode; |
1959 | int err; |
1960 | ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(node: inode) - 1) |
1961 | >> inode->i_blkbits; |
1962 | |
1963 | if (ext4_verity_in_progress(inode)) |
1964 | blocks = EXT_MAX_BLOCKS; |
1965 | |
1966 | do { |
1967 | BUG_ON(buffer_locked(bh)); |
1968 | |
1969 | if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { |
1970 | /* Found extent to map? */ |
1971 | if (mpd->map.m_len) |
1972 | return 0; |
1973 | /* Buffer needs mapping and handle is not started? */ |
1974 | if (!mpd->do_map) |
1975 | return 0; |
1976 | /* Everything mapped so far and we hit EOF */ |
1977 | break; |
1978 | } |
1979 | } while (lblk++, (bh = bh->b_this_page) != head); |
1980 | /* So far everything mapped? Submit the page for IO. */ |
1981 | if (mpd->map.m_len == 0) { |
1982 | err = mpage_submit_folio(mpd, folio: head->b_folio); |
1983 | if (err < 0) |
1984 | return err; |
1985 | mpage_folio_done(mpd, folio: head->b_folio); |
1986 | } |
1987 | if (lblk >= blocks) { |
1988 | mpd->scanned_until_end = 1; |
1989 | return 0; |
1990 | } |
1991 | return 1; |
1992 | } |
1993 | |
1994 | /* |
1995 | * mpage_process_folio - update folio buffers corresponding to changed extent |
1996 | * and may submit fully mapped page for IO |
1997 | * @mpd: description of extent to map, on return next extent to map |
1998 | * @folio: Contains these buffers. |
1999 | * @m_lblk: logical block mapping. |
2000 | * @m_pblk: corresponding physical mapping. |
2001 | * @map_bh: determines on return whether this page requires any further |
2002 | * mapping or not. |
2003 | * |
2004 | * Scan given folio buffers corresponding to changed extent and update buffer |
2005 | * state according to new extent state. |
2006 | * We map delalloc buffers to their physical location, clear unwritten bits. |
2007 | * If the given folio is not fully mapped, we update @mpd to the next extent in |
2008 | * the given folio that needs mapping & return @map_bh as true. |
2009 | */ |
2010 | static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio, |
2011 | ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk, |
2012 | bool *map_bh) |
2013 | { |
2014 | struct buffer_head *head, *bh; |
2015 | ext4_io_end_t *io_end = mpd->io_submit.io_end; |
2016 | ext4_lblk_t lblk = *m_lblk; |
2017 | ext4_fsblk_t pblock = *m_pblk; |
2018 | int err = 0; |
2019 | int blkbits = mpd->inode->i_blkbits; |
2020 | ssize_t io_end_size = 0; |
2021 | struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end); |
2022 | |
2023 | bh = head = folio_buffers(folio); |
2024 | do { |
2025 | if (lblk < mpd->map.m_lblk) |
2026 | continue; |
2027 | if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { |
2028 | /* |
2029 | * Buffer after end of mapped extent. |
2030 | * Find next buffer in the folio to map. |
2031 | */ |
2032 | mpd->map.m_len = 0; |
2033 | mpd->map.m_flags = 0; |
2034 | io_end_vec->size += io_end_size; |
2035 | |
2036 | err = mpage_process_page_bufs(mpd, head, bh, lblk); |
2037 | if (err > 0) |
2038 | err = 0; |
2039 | if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) { |
2040 | io_end_vec = ext4_alloc_io_end_vec(io_end); |
2041 | if (IS_ERR(ptr: io_end_vec)) { |
2042 | err = PTR_ERR(ptr: io_end_vec); |
2043 | goto out; |
2044 | } |
2045 | io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits; |
2046 | } |
2047 | *map_bh = true; |
2048 | goto out; |
2049 | } |
2050 | if (buffer_delay(bh)) { |
2051 | clear_buffer_delay(bh); |
2052 | bh->b_blocknr = pblock++; |
2053 | } |
2054 | clear_buffer_unwritten(bh); |
2055 | io_end_size += (1 << blkbits); |
2056 | } while (lblk++, (bh = bh->b_this_page) != head); |
2057 | |
2058 | io_end_vec->size += io_end_size; |
2059 | *map_bh = false; |
2060 | out: |
2061 | *m_lblk = lblk; |
2062 | *m_pblk = pblock; |
2063 | return err; |
2064 | } |
2065 | |
2066 | /* |
2067 | * mpage_map_buffers - update buffers corresponding to changed extent and |
2068 | * submit fully mapped pages for IO |
2069 | * |
2070 | * @mpd - description of extent to map, on return next extent to map |
2071 | * |
2072 | * Scan buffers corresponding to changed extent (we expect corresponding pages |
2073 | * to be already locked) and update buffer state according to new extent state. |
2074 | * We map delalloc buffers to their physical location, clear unwritten bits, |
2075 | * and mark buffers as uninit when we perform writes to unwritten extents |
2076 | * and do extent conversion after IO is finished. If the last page is not fully |
2077 | * mapped, we update @map to the next extent in the last page that needs |
2078 | * mapping. Otherwise we submit the page for IO. |
2079 | */ |
2080 | static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) |
2081 | { |
2082 | struct folio_batch fbatch; |
2083 | unsigned nr, i; |
2084 | struct inode *inode = mpd->inode; |
2085 | int bpp_bits = PAGE_SHIFT - inode->i_blkbits; |
2086 | pgoff_t start, end; |
2087 | ext4_lblk_t lblk; |
2088 | ext4_fsblk_t pblock; |
2089 | int err; |
2090 | bool map_bh = false; |
2091 | |
2092 | start = mpd->map.m_lblk >> bpp_bits; |
2093 | end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; |
2094 | lblk = start << bpp_bits; |
2095 | pblock = mpd->map.m_pblk; |
2096 | |
2097 | folio_batch_init(fbatch: &fbatch); |
2098 | while (start <= end) { |
2099 | nr = filemap_get_folios(mapping: inode->i_mapping, start: &start, end, fbatch: &fbatch); |
2100 | if (nr == 0) |
2101 | break; |
2102 | for (i = 0; i < nr; i++) { |
2103 | struct folio *folio = fbatch.folios[i]; |
2104 | |
2105 | err = mpage_process_folio(mpd, folio, m_lblk: &lblk, m_pblk: &pblock, |
2106 | map_bh: &map_bh); |
2107 | /* |
2108 | * If map_bh is true, means page may require further bh |
2109 | * mapping, or maybe the page was submitted for IO. |
2110 | * So we return to call further extent mapping. |
2111 | */ |
2112 | if (err < 0 || map_bh) |
2113 | goto out; |
2114 | /* Page fully mapped - let IO run! */ |
2115 | err = mpage_submit_folio(mpd, folio); |
2116 | if (err < 0) |
2117 | goto out; |
2118 | mpage_folio_done(mpd, folio); |
2119 | } |
2120 | folio_batch_release(fbatch: &fbatch); |
2121 | } |
2122 | /* Extent fully mapped and matches with page boundary. We are done. */ |
2123 | mpd->map.m_len = 0; |
2124 | mpd->map.m_flags = 0; |
2125 | return 0; |
2126 | out: |
2127 | folio_batch_release(fbatch: &fbatch); |
2128 | return err; |
2129 | } |
2130 | |
2131 | static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) |
2132 | { |
2133 | struct inode *inode = mpd->inode; |
2134 | struct ext4_map_blocks *map = &mpd->map; |
2135 | int get_blocks_flags; |
2136 | int err, dioread_nolock; |
2137 | |
2138 | trace_ext4_da_write_pages_extent(inode, map); |
2139 | /* |
2140 | * Call ext4_map_blocks() to allocate any delayed allocation blocks, or |
2141 | * to convert an unwritten extent to be initialized (in the case |
2142 | * where we have written into one or more preallocated blocks). It is |
2143 | * possible that we're going to need more metadata blocks than |
2144 | * previously reserved. However we must not fail because we're in |
2145 | * writeback and there is nothing we can do about it so it might result |
2146 | * in data loss. So use reserved blocks to allocate metadata if |
2147 | * possible. |
2148 | * |
2149 | * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if |
2150 | * the blocks in question are delalloc blocks. This indicates |
2151 | * that the blocks and quotas has already been checked when |
2152 | * the data was copied into the page cache. |
2153 | */ |
2154 | get_blocks_flags = EXT4_GET_BLOCKS_CREATE | |
2155 | EXT4_GET_BLOCKS_METADATA_NOFAIL | |
2156 | EXT4_GET_BLOCKS_IO_SUBMIT; |
2157 | dioread_nolock = ext4_should_dioread_nolock(inode); |
2158 | if (dioread_nolock) |
2159 | get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; |
2160 | if (map->m_flags & BIT(BH_Delay)) |
2161 | get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; |
2162 | |
2163 | err = ext4_map_blocks(handle, inode, map, flags: get_blocks_flags); |
2164 | if (err < 0) |
2165 | return err; |
2166 | if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { |
2167 | if (!mpd->io_submit.io_end->handle && |
2168 | ext4_handle_valid(handle)) { |
2169 | mpd->io_submit.io_end->handle = handle->h_rsv_handle; |
2170 | handle->h_rsv_handle = NULL; |
2171 | } |
2172 | ext4_set_io_unwritten_flag(inode, io_end: mpd->io_submit.io_end); |
2173 | } |
2174 | |
2175 | BUG_ON(map->m_len == 0); |
2176 | return 0; |
2177 | } |
2178 | |
2179 | /* |
2180 | * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length |
2181 | * mpd->len and submit pages underlying it for IO |
2182 | * |
2183 | * @handle - handle for journal operations |
2184 | * @mpd - extent to map |
2185 | * @give_up_on_write - we set this to true iff there is a fatal error and there |
2186 | * is no hope of writing the data. The caller should discard |
2187 | * dirty pages to avoid infinite loops. |
2188 | * |
2189 | * The function maps extent starting at mpd->lblk of length mpd->len. If it is |
2190 | * delayed, blocks are allocated, if it is unwritten, we may need to convert |
2191 | * them to initialized or split the described range from larger unwritten |
2192 | * extent. Note that we need not map all the described range since allocation |
2193 | * can return less blocks or the range is covered by more unwritten extents. We |
2194 | * cannot map more because we are limited by reserved transaction credits. On |
2195 | * the other hand we always make sure that the last touched page is fully |
2196 | * mapped so that it can be written out (and thus forward progress is |
2197 | * guaranteed). After mapping we submit all mapped pages for IO. |
2198 | */ |
2199 | static int mpage_map_and_submit_extent(handle_t *handle, |
2200 | struct mpage_da_data *mpd, |
2201 | bool *give_up_on_write) |
2202 | { |
2203 | struct inode *inode = mpd->inode; |
2204 | struct ext4_map_blocks *map = &mpd->map; |
2205 | int err; |
2206 | loff_t disksize; |
2207 | int progress = 0; |
2208 | ext4_io_end_t *io_end = mpd->io_submit.io_end; |
2209 | struct ext4_io_end_vec *io_end_vec; |
2210 | |
2211 | io_end_vec = ext4_alloc_io_end_vec(io_end); |
2212 | if (IS_ERR(ptr: io_end_vec)) |
2213 | return PTR_ERR(ptr: io_end_vec); |
2214 | io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits; |
2215 | do { |
2216 | err = mpage_map_one_extent(handle, mpd); |
2217 | if (err < 0) { |
2218 | struct super_block *sb = inode->i_sb; |
2219 | |
2220 | if (ext4_forced_shutdown(sb)) |
2221 | goto invalidate_dirty_pages; |
2222 | /* |
2223 | * Let the uper layers retry transient errors. |
2224 | * In the case of ENOSPC, if ext4_count_free_blocks() |
2225 | * is non-zero, a commit should free up blocks. |
2226 | */ |
2227 | if ((err == -ENOMEM) || |
2228 | (err == -ENOSPC && ext4_count_free_clusters(sb))) { |
2229 | if (progress) |
2230 | goto update_disksize; |
2231 | return err; |
2232 | } |
2233 | ext4_msg(sb, KERN_CRIT, |
2234 | "Delayed block allocation failed for " |
2235 | "inode %lu at logical offset %llu with" |
2236 | " max blocks %u with error %d" , |
2237 | inode->i_ino, |
2238 | (unsigned long long)map->m_lblk, |
2239 | (unsigned)map->m_len, -err); |
2240 | ext4_msg(sb, KERN_CRIT, |
2241 | "This should not happen!! Data will " |
2242 | "be lost\n" ); |
2243 | if (err == -ENOSPC) |
2244 | ext4_print_free_blocks(inode); |
2245 | invalidate_dirty_pages: |
2246 | *give_up_on_write = true; |
2247 | return err; |
2248 | } |
2249 | progress = 1; |
2250 | /* |
2251 | * Update buffer state, submit mapped pages, and get us new |
2252 | * extent to map |
2253 | */ |
2254 | err = mpage_map_and_submit_buffers(mpd); |
2255 | if (err < 0) |
2256 | goto update_disksize; |
2257 | } while (map->m_len); |
2258 | |
2259 | update_disksize: |
2260 | /* |
2261 | * Update on-disk size after IO is submitted. Races with |
2262 | * truncate are avoided by checking i_size under i_data_sem. |
2263 | */ |
2264 | disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; |
2265 | if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) { |
2266 | int err2; |
2267 | loff_t i_size; |
2268 | |
2269 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
2270 | i_size = i_size_read(inode); |
2271 | if (disksize > i_size) |
2272 | disksize = i_size; |
2273 | if (disksize > EXT4_I(inode)->i_disksize) |
2274 | EXT4_I(inode)->i_disksize = disksize; |
2275 | up_write(sem: &EXT4_I(inode)->i_data_sem); |
2276 | err2 = ext4_mark_inode_dirty(handle, inode); |
2277 | if (err2) { |
2278 | ext4_error_err(inode->i_sb, -err2, |
2279 | "Failed to mark inode %lu dirty" , |
2280 | inode->i_ino); |
2281 | } |
2282 | if (!err) |
2283 | err = err2; |
2284 | } |
2285 | return err; |
2286 | } |
2287 | |
2288 | /* |
2289 | * Calculate the total number of credits to reserve for one writepages |
2290 | * iteration. This is called from ext4_writepages(). We map an extent of |
2291 | * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping |
2292 | * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + |
2293 | * bpp - 1 blocks in bpp different extents. |
2294 | */ |
2295 | static int ext4_da_writepages_trans_blocks(struct inode *inode) |
2296 | { |
2297 | int bpp = ext4_journal_blocks_per_page(inode); |
2298 | |
2299 | return ext4_meta_trans_blocks(inode, |
2300 | MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, pextents: bpp); |
2301 | } |
2302 | |
2303 | static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio, |
2304 | size_t len) |
2305 | { |
2306 | struct buffer_head *page_bufs = folio_buffers(folio); |
2307 | struct inode *inode = folio->mapping->host; |
2308 | int ret, err; |
2309 | |
2310 | ret = ext4_walk_page_buffers(handle, inode, head: page_bufs, from: 0, to: len, |
2311 | NULL, fn: do_journal_get_write_access); |
2312 | err = ext4_walk_page_buffers(handle, inode, head: page_bufs, from: 0, to: len, |
2313 | NULL, fn: write_end_fn); |
2314 | if (ret == 0) |
2315 | ret = err; |
2316 | err = ext4_jbd2_inode_add_write(handle, inode, start_byte: folio_pos(folio), length: len); |
2317 | if (ret == 0) |
2318 | ret = err; |
2319 | EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; |
2320 | |
2321 | return ret; |
2322 | } |
2323 | |
2324 | static int mpage_journal_page_buffers(handle_t *handle, |
2325 | struct mpage_da_data *mpd, |
2326 | struct folio *folio) |
2327 | { |
2328 | struct inode *inode = mpd->inode; |
2329 | loff_t size = i_size_read(inode); |
2330 | size_t len = folio_size(folio); |
2331 | |
2332 | folio_clear_checked(folio); |
2333 | mpd->wbc->nr_to_write--; |
2334 | |
2335 | if (folio_pos(folio) + len > size && |
2336 | !ext4_verity_in_progress(inode)) |
2337 | len = size - folio_pos(folio); |
2338 | |
2339 | return ext4_journal_folio_buffers(handle, folio, len); |
2340 | } |
2341 | |
2342 | /* |
2343 | * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages |
2344 | * needing mapping, submit mapped pages |
2345 | * |
2346 | * @mpd - where to look for pages |
2347 | * |
2348 | * Walk dirty pages in the mapping. If they are fully mapped, submit them for |
2349 | * IO immediately. If we cannot map blocks, we submit just already mapped |
2350 | * buffers in the page for IO and keep page dirty. When we can map blocks and |
2351 | * we find a page which isn't mapped we start accumulating extent of buffers |
2352 | * underlying these pages that needs mapping (formed by either delayed or |
2353 | * unwritten buffers). We also lock the pages containing these buffers. The |
2354 | * extent found is returned in @mpd structure (starting at mpd->lblk with |
2355 | * length mpd->len blocks). |
2356 | * |
2357 | * Note that this function can attach bios to one io_end structure which are |
2358 | * neither logically nor physically contiguous. Although it may seem as an |
2359 | * unnecessary complication, it is actually inevitable in blocksize < pagesize |
2360 | * case as we need to track IO to all buffers underlying a page in one io_end. |
2361 | */ |
2362 | static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) |
2363 | { |
2364 | struct address_space *mapping = mpd->inode->i_mapping; |
2365 | struct folio_batch fbatch; |
2366 | unsigned int nr_folios; |
2367 | pgoff_t index = mpd->first_page; |
2368 | pgoff_t end = mpd->last_page; |
2369 | xa_mark_t tag; |
2370 | int i, err = 0; |
2371 | int blkbits = mpd->inode->i_blkbits; |
2372 | ext4_lblk_t lblk; |
2373 | struct buffer_head *head; |
2374 | handle_t *handle = NULL; |
2375 | int bpp = ext4_journal_blocks_per_page(inode: mpd->inode); |
2376 | |
2377 | if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) |
2378 | tag = PAGECACHE_TAG_TOWRITE; |
2379 | else |
2380 | tag = PAGECACHE_TAG_DIRTY; |
2381 | |
2382 | mpd->map.m_len = 0; |
2383 | mpd->next_page = index; |
2384 | if (ext4_should_journal_data(inode: mpd->inode)) { |
2385 | handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE, |
2386 | bpp); |
2387 | if (IS_ERR(ptr: handle)) |
2388 | return PTR_ERR(ptr: handle); |
2389 | } |
2390 | folio_batch_init(fbatch: &fbatch); |
2391 | while (index <= end) { |
2392 | nr_folios = filemap_get_folios_tag(mapping, start: &index, end, |
2393 | tag, fbatch: &fbatch); |
2394 | if (nr_folios == 0) |
2395 | break; |
2396 | |
2397 | for (i = 0; i < nr_folios; i++) { |
2398 | struct folio *folio = fbatch.folios[i]; |
2399 | |
2400 | /* |
2401 | * Accumulated enough dirty pages? This doesn't apply |
2402 | * to WB_SYNC_ALL mode. For integrity sync we have to |
2403 | * keep going because someone may be concurrently |
2404 | * dirtying pages, and we might have synced a lot of |
2405 | * newly appeared dirty pages, but have not synced all |
2406 | * of the old dirty pages. |
2407 | */ |
2408 | if (mpd->wbc->sync_mode == WB_SYNC_NONE && |
2409 | mpd->wbc->nr_to_write <= |
2410 | mpd->map.m_len >> (PAGE_SHIFT - blkbits)) |
2411 | goto out; |
2412 | |
2413 | /* If we can't merge this page, we are done. */ |
2414 | if (mpd->map.m_len > 0 && mpd->next_page != folio->index) |
2415 | goto out; |
2416 | |
2417 | if (handle) { |
2418 | err = ext4_journal_ensure_credits(handle, credits: bpp, |
2419 | revoke_creds: 0); |
2420 | if (err < 0) |
2421 | goto out; |
2422 | } |
2423 | |
2424 | folio_lock(folio); |
2425 | /* |
2426 | * If the page is no longer dirty, or its mapping no |
2427 | * longer corresponds to inode we are writing (which |
2428 | * means it has been truncated or invalidated), or the |
2429 | * page is already under writeback and we are not doing |
2430 | * a data integrity writeback, skip the page |
2431 | */ |
2432 | if (!folio_test_dirty(folio) || |
2433 | (folio_test_writeback(folio) && |
2434 | (mpd->wbc->sync_mode == WB_SYNC_NONE)) || |
2435 | unlikely(folio->mapping != mapping)) { |
2436 | folio_unlock(folio); |
2437 | continue; |
2438 | } |
2439 | |
2440 | folio_wait_writeback(folio); |
2441 | BUG_ON(folio_test_writeback(folio)); |
2442 | |
2443 | /* |
2444 | * Should never happen but for buggy code in |
2445 | * other subsystems that call |
2446 | * set_page_dirty() without properly warning |
2447 | * the file system first. See [1] for more |
2448 | * information. |
2449 | * |
2450 | * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz |
2451 | */ |
2452 | if (!folio_buffers(folio)) { |
2453 | ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached" , folio->index); |
2454 | folio_clear_dirty(folio); |
2455 | folio_unlock(folio); |
2456 | continue; |
2457 | } |
2458 | |
2459 | if (mpd->map.m_len == 0) |
2460 | mpd->first_page = folio->index; |
2461 | mpd->next_page = folio_next_index(folio); |
2462 | /* |
2463 | * Writeout when we cannot modify metadata is simple. |
2464 | * Just submit the page. For data=journal mode we |
2465 | * first handle writeout of the page for checkpoint and |
2466 | * only after that handle delayed page dirtying. This |
2467 | * makes sure current data is checkpointed to the final |
2468 | * location before possibly journalling it again which |
2469 | * is desirable when the page is frequently dirtied |
2470 | * through a pin. |
2471 | */ |
2472 | if (!mpd->can_map) { |
2473 | err = mpage_submit_folio(mpd, folio); |
2474 | if (err < 0) |
2475 | goto out; |
2476 | /* Pending dirtying of journalled data? */ |
2477 | if (folio_test_checked(folio)) { |
2478 | err = mpage_journal_page_buffers(handle, |
2479 | mpd, folio); |
2480 | if (err < 0) |
2481 | goto out; |
2482 | mpd->journalled_more_data = 1; |
2483 | } |
2484 | mpage_folio_done(mpd, folio); |
2485 | } else { |
2486 | /* Add all dirty buffers to mpd */ |
2487 | lblk = ((ext4_lblk_t)folio->index) << |
2488 | (PAGE_SHIFT - blkbits); |
2489 | head = folio_buffers(folio); |
2490 | err = mpage_process_page_bufs(mpd, head, bh: head, |
2491 | lblk); |
2492 | if (err <= 0) |
2493 | goto out; |
2494 | err = 0; |
2495 | } |
2496 | } |
2497 | folio_batch_release(fbatch: &fbatch); |
2498 | cond_resched(); |
2499 | } |
2500 | mpd->scanned_until_end = 1; |
2501 | if (handle) |
2502 | ext4_journal_stop(handle); |
2503 | return 0; |
2504 | out: |
2505 | folio_batch_release(fbatch: &fbatch); |
2506 | if (handle) |
2507 | ext4_journal_stop(handle); |
2508 | return err; |
2509 | } |
2510 | |
2511 | static int ext4_do_writepages(struct mpage_da_data *mpd) |
2512 | { |
2513 | struct writeback_control *wbc = mpd->wbc; |
2514 | pgoff_t writeback_index = 0; |
2515 | long nr_to_write = wbc->nr_to_write; |
2516 | int range_whole = 0; |
2517 | int cycled = 1; |
2518 | handle_t *handle = NULL; |
2519 | struct inode *inode = mpd->inode; |
2520 | struct address_space *mapping = inode->i_mapping; |
2521 | int needed_blocks, rsv_blocks = 0, ret = 0; |
2522 | struct ext4_sb_info *sbi = EXT4_SB(sb: mapping->host->i_sb); |
2523 | struct blk_plug plug; |
2524 | bool give_up_on_write = false; |
2525 | |
2526 | trace_ext4_writepages(inode, wbc); |
2527 | |
2528 | /* |
2529 | * No pages to write? This is mainly a kludge to avoid starting |
2530 | * a transaction for special inodes like journal inode on last iput() |
2531 | * because that could violate lock ordering on umount |
2532 | */ |
2533 | if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
2534 | goto out_writepages; |
2535 | |
2536 | /* |
2537 | * If the filesystem has aborted, it is read-only, so return |
2538 | * right away instead of dumping stack traces later on that |
2539 | * will obscure the real source of the problem. We test |
2540 | * fs shutdown state instead of sb->s_flag's SB_RDONLY because |
2541 | * the latter could be true if the filesystem is mounted |
2542 | * read-only, and in that case, ext4_writepages should |
2543 | * *never* be called, so if that ever happens, we would want |
2544 | * the stack trace. |
2545 | */ |
2546 | if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) { |
2547 | ret = -EROFS; |
2548 | goto out_writepages; |
2549 | } |
2550 | |
2551 | /* |
2552 | * If we have inline data and arrive here, it means that |
2553 | * we will soon create the block for the 1st page, so |
2554 | * we'd better clear the inline data here. |
2555 | */ |
2556 | if (ext4_has_inline_data(inode)) { |
2557 | /* Just inode will be modified... */ |
2558 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); |
2559 | if (IS_ERR(ptr: handle)) { |
2560 | ret = PTR_ERR(ptr: handle); |
2561 | goto out_writepages; |
2562 | } |
2563 | BUG_ON(ext4_test_inode_state(inode, |
2564 | EXT4_STATE_MAY_INLINE_DATA)); |
2565 | ext4_destroy_inline_data(handle, inode); |
2566 | ext4_journal_stop(handle); |
2567 | } |
2568 | |
2569 | /* |
2570 | * data=journal mode does not do delalloc so we just need to writeout / |
2571 | * journal already mapped buffers. On the other hand we need to commit |
2572 | * transaction to make data stable. We expect all the data to be |
2573 | * already in the journal (the only exception are DMA pinned pages |
2574 | * dirtied behind our back) so we commit transaction here and run the |
2575 | * writeback loop to checkpoint them. The checkpointing is not actually |
2576 | * necessary to make data persistent *but* quite a few places (extent |
2577 | * shifting operations, fsverity, ...) depend on being able to drop |
2578 | * pagecache pages after calling filemap_write_and_wait() and for that |
2579 | * checkpointing needs to happen. |
2580 | */ |
2581 | if (ext4_should_journal_data(inode)) { |
2582 | mpd->can_map = 0; |
2583 | if (wbc->sync_mode == WB_SYNC_ALL) |
2584 | ext4_fc_commit(journal: sbi->s_journal, |
2585 | EXT4_I(inode)->i_datasync_tid); |
2586 | } |
2587 | mpd->journalled_more_data = 0; |
2588 | |
2589 | if (ext4_should_dioread_nolock(inode)) { |
2590 | /* |
2591 | * We may need to convert up to one extent per block in |
2592 | * the page and we may dirty the inode. |
2593 | */ |
2594 | rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, |
2595 | PAGE_SIZE >> inode->i_blkbits); |
2596 | } |
2597 | |
2598 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
2599 | range_whole = 1; |
2600 | |
2601 | if (wbc->range_cyclic) { |
2602 | writeback_index = mapping->writeback_index; |
2603 | if (writeback_index) |
2604 | cycled = 0; |
2605 | mpd->first_page = writeback_index; |
2606 | mpd->last_page = -1; |
2607 | } else { |
2608 | mpd->first_page = wbc->range_start >> PAGE_SHIFT; |
2609 | mpd->last_page = wbc->range_end >> PAGE_SHIFT; |
2610 | } |
2611 | |
2612 | ext4_io_submit_init(io: &mpd->io_submit, wbc); |
2613 | retry: |
2614 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
2615 | tag_pages_for_writeback(mapping, start: mpd->first_page, |
2616 | end: mpd->last_page); |
2617 | blk_start_plug(&plug); |
2618 | |
2619 | /* |
2620 | * First writeback pages that don't need mapping - we can avoid |
2621 | * starting a transaction unnecessarily and also avoid being blocked |
2622 | * in the block layer on device congestion while having transaction |
2623 | * started. |
2624 | */ |
2625 | mpd->do_map = 0; |
2626 | mpd->scanned_until_end = 0; |
2627 | mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); |
2628 | if (!mpd->io_submit.io_end) { |
2629 | ret = -ENOMEM; |
2630 | goto unplug; |
2631 | } |
2632 | ret = mpage_prepare_extent_to_map(mpd); |
2633 | /* Unlock pages we didn't use */ |
2634 | mpage_release_unused_pages(mpd, invalidate: false); |
2635 | /* Submit prepared bio */ |
2636 | ext4_io_submit(io: &mpd->io_submit); |
2637 | ext4_put_io_end_defer(io_end: mpd->io_submit.io_end); |
2638 | mpd->io_submit.io_end = NULL; |
2639 | if (ret < 0) |
2640 | goto unplug; |
2641 | |
2642 | while (!mpd->scanned_until_end && wbc->nr_to_write > 0) { |
2643 | /* For each extent of pages we use new io_end */ |
2644 | mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); |
2645 | if (!mpd->io_submit.io_end) { |
2646 | ret = -ENOMEM; |
2647 | break; |
2648 | } |
2649 | |
2650 | WARN_ON_ONCE(!mpd->can_map); |
2651 | /* |
2652 | * We have two constraints: We find one extent to map and we |
2653 | * must always write out whole page (makes a difference when |
2654 | * blocksize < pagesize) so that we don't block on IO when we |
2655 | * try to write out the rest of the page. Journalled mode is |
2656 | * not supported by delalloc. |
2657 | */ |
2658 | BUG_ON(ext4_should_journal_data(inode)); |
2659 | needed_blocks = ext4_da_writepages_trans_blocks(inode); |
2660 | |
2661 | /* start a new transaction */ |
2662 | handle = ext4_journal_start_with_reserve(inode, |
2663 | EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); |
2664 | if (IS_ERR(ptr: handle)) { |
2665 | ret = PTR_ERR(ptr: handle); |
2666 | ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " |
2667 | "%ld pages, ino %lu; err %d" , __func__, |
2668 | wbc->nr_to_write, inode->i_ino, ret); |
2669 | /* Release allocated io_end */ |
2670 | ext4_put_io_end(io_end: mpd->io_submit.io_end); |
2671 | mpd->io_submit.io_end = NULL; |
2672 | break; |
2673 | } |
2674 | mpd->do_map = 1; |
2675 | |
2676 | trace_ext4_da_write_pages(inode, first_page: mpd->first_page, wbc); |
2677 | ret = mpage_prepare_extent_to_map(mpd); |
2678 | if (!ret && mpd->map.m_len) |
2679 | ret = mpage_map_and_submit_extent(handle, mpd, |
2680 | give_up_on_write: &give_up_on_write); |
2681 | /* |
2682 | * Caution: If the handle is synchronous, |
2683 | * ext4_journal_stop() can wait for transaction commit |
2684 | * to finish which may depend on writeback of pages to |
2685 | * complete or on page lock to be released. In that |
2686 | * case, we have to wait until after we have |
2687 | * submitted all the IO, released page locks we hold, |
2688 | * and dropped io_end reference (for extent conversion |
2689 | * to be able to complete) before stopping the handle. |
2690 | */ |
2691 | if (!ext4_handle_valid(handle) || handle->h_sync == 0) { |
2692 | ext4_journal_stop(handle); |
2693 | handle = NULL; |
2694 | mpd->do_map = 0; |
2695 | } |
2696 | /* Unlock pages we didn't use */ |
2697 | mpage_release_unused_pages(mpd, invalidate: give_up_on_write); |
2698 | /* Submit prepared bio */ |
2699 | ext4_io_submit(io: &mpd->io_submit); |
2700 | |
2701 | /* |
2702 | * Drop our io_end reference we got from init. We have |
2703 | * to be careful and use deferred io_end finishing if |
2704 | * we are still holding the transaction as we can |
2705 | * release the last reference to io_end which may end |
2706 | * up doing unwritten extent conversion. |
2707 | */ |
2708 | if (handle) { |
2709 | ext4_put_io_end_defer(io_end: mpd->io_submit.io_end); |
2710 | ext4_journal_stop(handle); |
2711 | } else |
2712 | ext4_put_io_end(io_end: mpd->io_submit.io_end); |
2713 | mpd->io_submit.io_end = NULL; |
2714 | |
2715 | if (ret == -ENOSPC && sbi->s_journal) { |
2716 | /* |
2717 | * Commit the transaction which would |
2718 | * free blocks released in the transaction |
2719 | * and try again |
2720 | */ |
2721 | jbd2_journal_force_commit_nested(sbi->s_journal); |
2722 | ret = 0; |
2723 | continue; |
2724 | } |
2725 | /* Fatal error - ENOMEM, EIO... */ |
2726 | if (ret) |
2727 | break; |
2728 | } |
2729 | unplug: |
2730 | blk_finish_plug(&plug); |
2731 | if (!ret && !cycled && wbc->nr_to_write > 0) { |
2732 | cycled = 1; |
2733 | mpd->last_page = writeback_index - 1; |
2734 | mpd->first_page = 0; |
2735 | goto retry; |
2736 | } |
2737 | |
2738 | /* Update index */ |
2739 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
2740 | /* |
2741 | * Set the writeback_index so that range_cyclic |
2742 | * mode will write it back later |
2743 | */ |
2744 | mapping->writeback_index = mpd->first_page; |
2745 | |
2746 | out_writepages: |
2747 | trace_ext4_writepages_result(inode, wbc, ret, |
2748 | pages_written: nr_to_write - wbc->nr_to_write); |
2749 | return ret; |
2750 | } |
2751 | |
2752 | static int ext4_writepages(struct address_space *mapping, |
2753 | struct writeback_control *wbc) |
2754 | { |
2755 | struct super_block *sb = mapping->host->i_sb; |
2756 | struct mpage_da_data mpd = { |
2757 | .inode = mapping->host, |
2758 | .wbc = wbc, |
2759 | .can_map = 1, |
2760 | }; |
2761 | int ret; |
2762 | int alloc_ctx; |
2763 | |
2764 | if (unlikely(ext4_forced_shutdown(sb))) |
2765 | return -EIO; |
2766 | |
2767 | alloc_ctx = ext4_writepages_down_read(sb); |
2768 | ret = ext4_do_writepages(mpd: &mpd); |
2769 | /* |
2770 | * For data=journal writeback we could have come across pages marked |
2771 | * for delayed dirtying (PageChecked) which were just added to the |
2772 | * running transaction. Try once more to get them to stable storage. |
2773 | */ |
2774 | if (!ret && mpd.journalled_more_data) |
2775 | ret = ext4_do_writepages(mpd: &mpd); |
2776 | ext4_writepages_up_read(sb, ctx: alloc_ctx); |
2777 | |
2778 | return ret; |
2779 | } |
2780 | |
2781 | int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode) |
2782 | { |
2783 | struct writeback_control wbc = { |
2784 | .sync_mode = WB_SYNC_ALL, |
2785 | .nr_to_write = LONG_MAX, |
2786 | .range_start = jinode->i_dirty_start, |
2787 | .range_end = jinode->i_dirty_end, |
2788 | }; |
2789 | struct mpage_da_data mpd = { |
2790 | .inode = jinode->i_vfs_inode, |
2791 | .wbc = &wbc, |
2792 | .can_map = 0, |
2793 | }; |
2794 | return ext4_do_writepages(mpd: &mpd); |
2795 | } |
2796 | |
2797 | static int ext4_dax_writepages(struct address_space *mapping, |
2798 | struct writeback_control *wbc) |
2799 | { |
2800 | int ret; |
2801 | long nr_to_write = wbc->nr_to_write; |
2802 | struct inode *inode = mapping->host; |
2803 | int alloc_ctx; |
2804 | |
2805 | if (unlikely(ext4_forced_shutdown(inode->i_sb))) |
2806 | return -EIO; |
2807 | |
2808 | alloc_ctx = ext4_writepages_down_read(sb: inode->i_sb); |
2809 | trace_ext4_writepages(inode, wbc); |
2810 | |
2811 | ret = dax_writeback_mapping_range(mapping, |
2812 | dax_dev: EXT4_SB(sb: inode->i_sb)->s_daxdev, wbc); |
2813 | trace_ext4_writepages_result(inode, wbc, ret, |
2814 | pages_written: nr_to_write - wbc->nr_to_write); |
2815 | ext4_writepages_up_read(sb: inode->i_sb, ctx: alloc_ctx); |
2816 | return ret; |
2817 | } |
2818 | |
2819 | static int ext4_nonda_switch(struct super_block *sb) |
2820 | { |
2821 | s64 free_clusters, dirty_clusters; |
2822 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
2823 | |
2824 | /* |
2825 | * switch to non delalloc mode if we are running low |
2826 | * on free block. The free block accounting via percpu |
2827 | * counters can get slightly wrong with percpu_counter_batch getting |
2828 | * accumulated on each CPU without updating global counters |
2829 | * Delalloc need an accurate free block accounting. So switch |
2830 | * to non delalloc when we are near to error range. |
2831 | */ |
2832 | free_clusters = |
2833 | percpu_counter_read_positive(fbc: &sbi->s_freeclusters_counter); |
2834 | dirty_clusters = |
2835 | percpu_counter_read_positive(fbc: &sbi->s_dirtyclusters_counter); |
2836 | /* |
2837 | * Start pushing delalloc when 1/2 of free blocks are dirty. |
2838 | */ |
2839 | if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) |
2840 | try_to_writeback_inodes_sb(sb, reason: WB_REASON_FS_FREE_SPACE); |
2841 | |
2842 | if (2 * free_clusters < 3 * dirty_clusters || |
2843 | free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { |
2844 | /* |
2845 | * free block count is less than 150% of dirty blocks |
2846 | * or free blocks is less than watermark |
2847 | */ |
2848 | return 1; |
2849 | } |
2850 | return 0; |
2851 | } |
2852 | |
2853 | static int ext4_da_write_begin(struct file *file, struct address_space *mapping, |
2854 | loff_t pos, unsigned len, |
2855 | struct page **pagep, void **fsdata) |
2856 | { |
2857 | int ret, retries = 0; |
2858 | struct folio *folio; |
2859 | pgoff_t index; |
2860 | struct inode *inode = mapping->host; |
2861 | |
2862 | if (unlikely(ext4_forced_shutdown(inode->i_sb))) |
2863 | return -EIO; |
2864 | |
2865 | index = pos >> PAGE_SHIFT; |
2866 | |
2867 | if (ext4_nonda_switch(sb: inode->i_sb) || ext4_verity_in_progress(inode)) { |
2868 | *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; |
2869 | return ext4_write_begin(file, mapping, pos, |
2870 | len, pagep, fsdata); |
2871 | } |
2872 | *fsdata = (void *)0; |
2873 | trace_ext4_da_write_begin(inode, pos, len); |
2874 | |
2875 | if (ext4_test_inode_state(inode, bit: EXT4_STATE_MAY_INLINE_DATA)) { |
2876 | ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, |
2877 | pagep, fsdata); |
2878 | if (ret < 0) |
2879 | return ret; |
2880 | if (ret == 1) |
2881 | return 0; |
2882 | } |
2883 | |
2884 | retry: |
2885 | folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, |
2886 | gfp: mapping_gfp_mask(mapping)); |
2887 | if (IS_ERR(ptr: folio)) |
2888 | return PTR_ERR(ptr: folio); |
2889 | |
2890 | /* In case writeback began while the folio was unlocked */ |
2891 | folio_wait_stable(folio); |
2892 | |
2893 | #ifdef CONFIG_FS_ENCRYPTION |
2894 | ret = ext4_block_write_begin(folio, pos, len, get_block: ext4_da_get_block_prep); |
2895 | #else |
2896 | ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep); |
2897 | #endif |
2898 | if (ret < 0) { |
2899 | folio_unlock(folio); |
2900 | folio_put(folio); |
2901 | /* |
2902 | * block_write_begin may have instantiated a few blocks |
2903 | * outside i_size. Trim these off again. Don't need |
2904 | * i_size_read because we hold inode lock. |
2905 | */ |
2906 | if (pos + len > inode->i_size) |
2907 | ext4_truncate_failed_write(inode); |
2908 | |
2909 | if (ret == -ENOSPC && |
2910 | ext4_should_retry_alloc(sb: inode->i_sb, retries: &retries)) |
2911 | goto retry; |
2912 | return ret; |
2913 | } |
2914 | |
2915 | *pagep = &folio->page; |
2916 | return ret; |
2917 | } |
2918 | |
2919 | /* |
2920 | * Check if we should update i_disksize |
2921 | * when write to the end of file but not require block allocation |
2922 | */ |
2923 | static int ext4_da_should_update_i_disksize(struct folio *folio, |
2924 | unsigned long offset) |
2925 | { |
2926 | struct buffer_head *bh; |
2927 | struct inode *inode = folio->mapping->host; |
2928 | unsigned int idx; |
2929 | int i; |
2930 | |
2931 | bh = folio_buffers(folio); |
2932 | idx = offset >> inode->i_blkbits; |
2933 | |
2934 | for (i = 0; i < idx; i++) |
2935 | bh = bh->b_this_page; |
2936 | |
2937 | if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) |
2938 | return 0; |
2939 | return 1; |
2940 | } |
2941 | |
2942 | static int ext4_da_do_write_end(struct address_space *mapping, |
2943 | loff_t pos, unsigned len, unsigned copied, |
2944 | struct folio *folio) |
2945 | { |
2946 | struct inode *inode = mapping->host; |
2947 | loff_t old_size = inode->i_size; |
2948 | bool disksize_changed = false; |
2949 | loff_t new_i_size; |
2950 | |
2951 | /* |
2952 | * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES |
2953 | * flag, which all that's needed to trigger page writeback. |
2954 | */ |
2955 | copied = block_write_end(NULL, mapping, pos, len, copied, |
2956 | &folio->page, NULL); |
2957 | new_i_size = pos + copied; |
2958 | |
2959 | /* |
2960 | * It's important to update i_size while still holding folio lock, |
2961 | * because folio writeout could otherwise come in and zero beyond |
2962 | * i_size. |
2963 | * |
2964 | * Since we are holding inode lock, we are sure i_disksize <= |
2965 | * i_size. We also know that if i_disksize < i_size, there are |
2966 | * delalloc writes pending in the range up to i_size. If the end of |
2967 | * the current write is <= i_size, there's no need to touch |
2968 | * i_disksize since writeback will push i_disksize up to i_size |
2969 | * eventually. If the end of the current write is > i_size and |
2970 | * inside an allocated block which ext4_da_should_update_i_disksize() |
2971 | * checked, we need to update i_disksize here as certain |
2972 | * ext4_writepages() paths not allocating blocks and update i_disksize. |
2973 | */ |
2974 | if (new_i_size > inode->i_size) { |
2975 | unsigned long end; |
2976 | |
2977 | i_size_write(inode, i_size: new_i_size); |
2978 | end = (new_i_size - 1) & (PAGE_SIZE - 1); |
2979 | if (copied && ext4_da_should_update_i_disksize(folio, offset: end)) { |
2980 | ext4_update_i_disksize(inode, newsize: new_i_size); |
2981 | disksize_changed = true; |
2982 | } |
2983 | } |
2984 | |
2985 | folio_unlock(folio); |
2986 | folio_put(folio); |
2987 | |
2988 | if (old_size < pos) |
2989 | pagecache_isize_extended(inode, from: old_size, to: pos); |
2990 | |
2991 | if (disksize_changed) { |
2992 | handle_t *handle; |
2993 | |
2994 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); |
2995 | if (IS_ERR(ptr: handle)) |
2996 | return PTR_ERR(ptr: handle); |
2997 | ext4_mark_inode_dirty(handle, inode); |
2998 | ext4_journal_stop(handle); |
2999 | } |
3000 | |
3001 | return copied; |
3002 | } |
3003 | |
3004 | static int ext4_da_write_end(struct file *file, |
3005 | struct address_space *mapping, |
3006 | loff_t pos, unsigned len, unsigned copied, |
3007 | struct page *page, void *fsdata) |
3008 | { |
3009 | struct inode *inode = mapping->host; |
3010 | int write_mode = (int)(unsigned long)fsdata; |
3011 | struct folio *folio = page_folio(page); |
3012 | |
3013 | if (write_mode == FALL_BACK_TO_NONDELALLOC) |
3014 | return ext4_write_end(file, mapping, pos, |
3015 | len, copied, page: &folio->page, fsdata); |
3016 | |
3017 | trace_ext4_da_write_end(inode, pos, len, copied); |
3018 | |
3019 | if (write_mode != CONVERT_INLINE_DATA && |
3020 | ext4_test_inode_state(inode, bit: EXT4_STATE_MAY_INLINE_DATA) && |
3021 | ext4_has_inline_data(inode)) |
3022 | return ext4_write_inline_data_end(inode, pos, len, copied, |
3023 | folio); |
3024 | |
3025 | if (unlikely(copied < len) && !folio_test_uptodate(folio)) |
3026 | copied = 0; |
3027 | |
3028 | return ext4_da_do_write_end(mapping, pos, len, copied, folio); |
3029 | } |
3030 | |
3031 | /* |
3032 | * Force all delayed allocation blocks to be allocated for a given inode. |
3033 | */ |
3034 | int ext4_alloc_da_blocks(struct inode *inode) |
3035 | { |
3036 | trace_ext4_alloc_da_blocks(inode); |
3037 | |
3038 | if (!EXT4_I(inode)->i_reserved_data_blocks) |
3039 | return 0; |
3040 | |
3041 | /* |
3042 | * We do something simple for now. The filemap_flush() will |
3043 | * also start triggering a write of the data blocks, which is |
3044 | * not strictly speaking necessary (and for users of |
3045 | * laptop_mode, not even desirable). However, to do otherwise |
3046 | * would require replicating code paths in: |
3047 | * |
3048 | * ext4_writepages() -> |
3049 | * write_cache_pages() ---> (via passed in callback function) |
3050 | * __mpage_da_writepage() --> |
3051 | * mpage_add_bh_to_extent() |
3052 | * mpage_da_map_blocks() |
3053 | * |
3054 | * The problem is that write_cache_pages(), located in |
3055 | * mm/page-writeback.c, marks pages clean in preparation for |
3056 | * doing I/O, which is not desirable if we're not planning on |
3057 | * doing I/O at all. |
3058 | * |
3059 | * We could call write_cache_pages(), and then redirty all of |
3060 | * the pages by calling redirty_page_for_writepage() but that |
3061 | * would be ugly in the extreme. So instead we would need to |
3062 | * replicate parts of the code in the above functions, |
3063 | * simplifying them because we wouldn't actually intend to |
3064 | * write out the pages, but rather only collect contiguous |
3065 | * logical block extents, call the multi-block allocator, and |
3066 | * then update the buffer heads with the block allocations. |
3067 | * |
3068 | * For now, though, we'll cheat by calling filemap_flush(), |
3069 | * which will map the blocks, and start the I/O, but not |
3070 | * actually wait for the I/O to complete. |
3071 | */ |
3072 | return filemap_flush(inode->i_mapping); |
3073 | } |
3074 | |
3075 | /* |
3076 | * bmap() is special. It gets used by applications such as lilo and by |
3077 | * the swapper to find the on-disk block of a specific piece of data. |
3078 | * |
3079 | * Naturally, this is dangerous if the block concerned is still in the |
3080 | * journal. If somebody makes a swapfile on an ext4 data-journaling |
3081 | * filesystem and enables swap, then they may get a nasty shock when the |
3082 | * data getting swapped to that swapfile suddenly gets overwritten by |
3083 | * the original zero's written out previously to the journal and |
3084 | * awaiting writeback in the kernel's buffer cache. |
3085 | * |
3086 | * So, if we see any bmap calls here on a modified, data-journaled file, |
3087 | * take extra steps to flush any blocks which might be in the cache. |
3088 | */ |
3089 | static sector_t ext4_bmap(struct address_space *mapping, sector_t block) |
3090 | { |
3091 | struct inode *inode = mapping->host; |
3092 | sector_t ret = 0; |
3093 | |
3094 | inode_lock_shared(inode); |
3095 | /* |
3096 | * We can get here for an inline file via the FIBMAP ioctl |
3097 | */ |
3098 | if (ext4_has_inline_data(inode)) |
3099 | goto out; |
3100 | |
3101 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && |
3102 | (test_opt(inode->i_sb, DELALLOC) || |
3103 | ext4_should_journal_data(inode))) { |
3104 | /* |
3105 | * With delalloc or journalled data we want to sync the file so |
3106 | * that we can make sure we allocate blocks for file and data |
3107 | * is in place for the user to see it |
3108 | */ |
3109 | filemap_write_and_wait(mapping); |
3110 | } |
3111 | |
3112 | ret = iomap_bmap(mapping, bno: block, ops: &ext4_iomap_ops); |
3113 | |
3114 | out: |
3115 | inode_unlock_shared(inode); |
3116 | return ret; |
3117 | } |
3118 | |
3119 | static int ext4_read_folio(struct file *file, struct folio *folio) |
3120 | { |
3121 | int ret = -EAGAIN; |
3122 | struct inode *inode = folio->mapping->host; |
3123 | |
3124 | trace_ext4_read_folio(inode, folio); |
3125 | |
3126 | if (ext4_has_inline_data(inode)) |
3127 | ret = ext4_readpage_inline(inode, folio); |
3128 | |
3129 | if (ret == -EAGAIN) |
3130 | return ext4_mpage_readpages(inode, NULL, folio); |
3131 | |
3132 | return ret; |
3133 | } |
3134 | |
3135 | static void ext4_readahead(struct readahead_control *rac) |
3136 | { |
3137 | struct inode *inode = rac->mapping->host; |
3138 | |
3139 | /* If the file has inline data, no need to do readahead. */ |
3140 | if (ext4_has_inline_data(inode)) |
3141 | return; |
3142 | |
3143 | ext4_mpage_readpages(inode, rac, NULL); |
3144 | } |
3145 | |
3146 | static void ext4_invalidate_folio(struct folio *folio, size_t offset, |
3147 | size_t length) |
3148 | { |
3149 | trace_ext4_invalidate_folio(folio, offset, length); |
3150 | |
3151 | /* No journalling happens on data buffers when this function is used */ |
3152 | WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio))); |
3153 | |
3154 | block_invalidate_folio(folio, offset, length); |
3155 | } |
3156 | |
3157 | static int __ext4_journalled_invalidate_folio(struct folio *folio, |
3158 | size_t offset, size_t length) |
3159 | { |
3160 | journal_t *journal = EXT4_JOURNAL(folio->mapping->host); |
3161 | |
3162 | trace_ext4_journalled_invalidate_folio(folio, offset, length); |
3163 | |
3164 | /* |
3165 | * If it's a full truncate we just forget about the pending dirtying |
3166 | */ |
3167 | if (offset == 0 && length == folio_size(folio)) |
3168 | folio_clear_checked(folio); |
3169 | |
3170 | return jbd2_journal_invalidate_folio(journal, folio, offset, length); |
3171 | } |
3172 | |
3173 | /* Wrapper for aops... */ |
3174 | static void ext4_journalled_invalidate_folio(struct folio *folio, |
3175 | size_t offset, |
3176 | size_t length) |
3177 | { |
3178 | WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0); |
3179 | } |
3180 | |
3181 | static bool ext4_release_folio(struct folio *folio, gfp_t wait) |
3182 | { |
3183 | struct inode *inode = folio->mapping->host; |
3184 | journal_t *journal = EXT4_JOURNAL(inode); |
3185 | |
3186 | trace_ext4_release_folio(inode, folio); |
3187 | |
3188 | /* Page has dirty journalled data -> cannot release */ |
3189 | if (folio_test_checked(folio)) |
3190 | return false; |
3191 | if (journal) |
3192 | return jbd2_journal_try_to_free_buffers(journal, folio); |
3193 | else |
3194 | return try_to_free_buffers(folio); |
3195 | } |
3196 | |
3197 | static bool ext4_inode_datasync_dirty(struct inode *inode) |
3198 | { |
3199 | journal_t *journal = EXT4_SB(sb: inode->i_sb)->s_journal; |
3200 | |
3201 | if (journal) { |
3202 | if (jbd2_transaction_committed(journal, |
3203 | EXT4_I(inode)->i_datasync_tid)) |
3204 | return false; |
3205 | if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT)) |
3206 | return !list_empty(head: &EXT4_I(inode)->i_fc_list); |
3207 | return true; |
3208 | } |
3209 | |
3210 | /* Any metadata buffers to write? */ |
3211 | if (!list_empty(head: &inode->i_mapping->i_private_list)) |
3212 | return true; |
3213 | return inode->i_state & I_DIRTY_DATASYNC; |
3214 | } |
3215 | |
3216 | static void ext4_set_iomap(struct inode *inode, struct iomap *iomap, |
3217 | struct ext4_map_blocks *map, loff_t offset, |
3218 | loff_t length, unsigned int flags) |
3219 | { |
3220 | u8 blkbits = inode->i_blkbits; |
3221 | |
3222 | /* |
3223 | * Writes that span EOF might trigger an I/O size update on completion, |
3224 | * so consider them to be dirty for the purpose of O_DSYNC, even if |
3225 | * there is no other metadata changes being made or are pending. |
3226 | */ |
3227 | iomap->flags = 0; |
3228 | if (ext4_inode_datasync_dirty(inode) || |
3229 | offset + length > i_size_read(inode)) |
3230 | iomap->flags |= IOMAP_F_DIRTY; |
3231 | |
3232 | if (map->m_flags & EXT4_MAP_NEW) |
3233 | iomap->flags |= IOMAP_F_NEW; |
3234 | |
3235 | if (flags & IOMAP_DAX) |
3236 | iomap->dax_dev = EXT4_SB(sb: inode->i_sb)->s_daxdev; |
3237 | else |
3238 | iomap->bdev = inode->i_sb->s_bdev; |
3239 | iomap->offset = (u64) map->m_lblk << blkbits; |
3240 | iomap->length = (u64) map->m_len << blkbits; |
3241 | |
3242 | if ((map->m_flags & EXT4_MAP_MAPPED) && |
3243 | !ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
3244 | iomap->flags |= IOMAP_F_MERGED; |
3245 | |
3246 | /* |
3247 | * Flags passed to ext4_map_blocks() for direct I/O writes can result |
3248 | * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits |
3249 | * set. In order for any allocated unwritten extents to be converted |
3250 | * into written extents correctly within the ->end_io() handler, we |
3251 | * need to ensure that the iomap->type is set appropriately. Hence, the |
3252 | * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has |
3253 | * been set first. |
3254 | */ |
3255 | if (map->m_flags & EXT4_MAP_UNWRITTEN) { |
3256 | iomap->type = IOMAP_UNWRITTEN; |
3257 | iomap->addr = (u64) map->m_pblk << blkbits; |
3258 | if (flags & IOMAP_DAX) |
3259 | iomap->addr += EXT4_SB(sb: inode->i_sb)->s_dax_part_off; |
3260 | } else if (map->m_flags & EXT4_MAP_MAPPED) { |
3261 | iomap->type = IOMAP_MAPPED; |
3262 | iomap->addr = (u64) map->m_pblk << blkbits; |
3263 | if (flags & IOMAP_DAX) |
3264 | iomap->addr += EXT4_SB(sb: inode->i_sb)->s_dax_part_off; |
3265 | } else if (map->m_flags & EXT4_MAP_DELAYED) { |
3266 | iomap->type = IOMAP_DELALLOC; |
3267 | iomap->addr = IOMAP_NULL_ADDR; |
3268 | } else { |
3269 | iomap->type = IOMAP_HOLE; |
3270 | iomap->addr = IOMAP_NULL_ADDR; |
3271 | } |
3272 | } |
3273 | |
3274 | static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map, |
3275 | unsigned int flags) |
3276 | { |
3277 | handle_t *handle; |
3278 | u8 blkbits = inode->i_blkbits; |
3279 | int ret, dio_credits, m_flags = 0, retries = 0; |
3280 | |
3281 | /* |
3282 | * Trim the mapping request to the maximum value that we can map at |
3283 | * once for direct I/O. |
3284 | */ |
3285 | if (map->m_len > DIO_MAX_BLOCKS) |
3286 | map->m_len = DIO_MAX_BLOCKS; |
3287 | dio_credits = ext4_chunk_trans_blocks(inode, nrblocks: map->m_len); |
3288 | |
3289 | retry: |
3290 | /* |
3291 | * Either we allocate blocks and then don't get an unwritten extent, so |
3292 | * in that case we have reserved enough credits. Or, the blocks are |
3293 | * already allocated and unwritten. In that case, the extent conversion |
3294 | * fits into the credits as well. |
3295 | */ |
3296 | handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); |
3297 | if (IS_ERR(ptr: handle)) |
3298 | return PTR_ERR(ptr: handle); |
3299 | |
3300 | /* |
3301 | * DAX and direct I/O are the only two operations that are currently |
3302 | * supported with IOMAP_WRITE. |
3303 | */ |
3304 | WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT))); |
3305 | if (flags & IOMAP_DAX) |
3306 | m_flags = EXT4_GET_BLOCKS_CREATE_ZERO; |
3307 | /* |
3308 | * We use i_size instead of i_disksize here because delalloc writeback |
3309 | * can complete at any point during the I/O and subsequently push the |
3310 | * i_disksize out to i_size. This could be beyond where direct I/O is |
3311 | * happening and thus expose allocated blocks to direct I/O reads. |
3312 | */ |
3313 | else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode)) |
3314 | m_flags = EXT4_GET_BLOCKS_CREATE; |
3315 | else if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
3316 | m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT; |
3317 | |
3318 | ret = ext4_map_blocks(handle, inode, map, flags: m_flags); |
3319 | |
3320 | /* |
3321 | * We cannot fill holes in indirect tree based inodes as that could |
3322 | * expose stale data in the case of a crash. Use the magic error code |
3323 | * to fallback to buffered I/O. |
3324 | */ |
3325 | if (!m_flags && !ret) |
3326 | ret = -ENOTBLK; |
3327 | |
3328 | ext4_journal_stop(handle); |
3329 | if (ret == -ENOSPC && ext4_should_retry_alloc(sb: inode->i_sb, retries: &retries)) |
3330 | goto retry; |
3331 | |
3332 | return ret; |
3333 | } |
3334 | |
3335 | |
3336 | static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
3337 | unsigned flags, struct iomap *iomap, struct iomap *srcmap) |
3338 | { |
3339 | int ret; |
3340 | struct ext4_map_blocks map; |
3341 | u8 blkbits = inode->i_blkbits; |
3342 | |
3343 | if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) |
3344 | return -EINVAL; |
3345 | |
3346 | if (WARN_ON_ONCE(ext4_has_inline_data(inode))) |
3347 | return -ERANGE; |
3348 | |
3349 | /* |
3350 | * Calculate the first and last logical blocks respectively. |
3351 | */ |
3352 | map.m_lblk = offset >> blkbits; |
3353 | map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, |
3354 | EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; |
3355 | |
3356 | if (flags & IOMAP_WRITE) { |
3357 | /* |
3358 | * We check here if the blocks are already allocated, then we |
3359 | * don't need to start a journal txn and we can directly return |
3360 | * the mapping information. This could boost performance |
3361 | * especially in multi-threaded overwrite requests. |
3362 | */ |
3363 | if (offset + length <= i_size_read(inode)) { |
3364 | ret = ext4_map_blocks(NULL, inode, map: &map, flags: 0); |
3365 | if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED)) |
3366 | goto out; |
3367 | } |
3368 | ret = ext4_iomap_alloc(inode, map: &map, flags); |
3369 | } else { |
3370 | ret = ext4_map_blocks(NULL, inode, map: &map, flags: 0); |
3371 | } |
3372 | |
3373 | if (ret < 0) |
3374 | return ret; |
3375 | out: |
3376 | /* |
3377 | * When inline encryption is enabled, sometimes I/O to an encrypted file |
3378 | * has to be broken up to guarantee DUN contiguity. Handle this by |
3379 | * limiting the length of the mapping returned. |
3380 | */ |
3381 | map.m_len = fscrypt_limit_io_blocks(inode, lblk: map.m_lblk, nr_blocks: map.m_len); |
3382 | |
3383 | ext4_set_iomap(inode, iomap, map: &map, offset, length, flags); |
3384 | |
3385 | return 0; |
3386 | } |
3387 | |
3388 | static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset, |
3389 | loff_t length, unsigned flags, struct iomap *iomap, |
3390 | struct iomap *srcmap) |
3391 | { |
3392 | int ret; |
3393 | |
3394 | /* |
3395 | * Even for writes we don't need to allocate blocks, so just pretend |
3396 | * we are reading to save overhead of starting a transaction. |
3397 | */ |
3398 | flags &= ~IOMAP_WRITE; |
3399 | ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap); |
3400 | WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED); |
3401 | return ret; |
3402 | } |
3403 | |
3404 | static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, |
3405 | ssize_t written, unsigned flags, struct iomap *iomap) |
3406 | { |
3407 | /* |
3408 | * Check to see whether an error occurred while writing out the data to |
3409 | * the allocated blocks. If so, return the magic error code so that we |
3410 | * fallback to buffered I/O and attempt to complete the remainder of |
3411 | * the I/O. Any blocks that may have been allocated in preparation for |
3412 | * the direct I/O will be reused during buffered I/O. |
3413 | */ |
3414 | if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0) |
3415 | return -ENOTBLK; |
3416 | |
3417 | return 0; |
3418 | } |
3419 | |
3420 | const struct iomap_ops ext4_iomap_ops = { |
3421 | .iomap_begin = ext4_iomap_begin, |
3422 | .iomap_end = ext4_iomap_end, |
3423 | }; |
3424 | |
3425 | const struct iomap_ops ext4_iomap_overwrite_ops = { |
3426 | .iomap_begin = ext4_iomap_overwrite_begin, |
3427 | .iomap_end = ext4_iomap_end, |
3428 | }; |
3429 | |
3430 | static int ext4_iomap_begin_report(struct inode *inode, loff_t offset, |
3431 | loff_t length, unsigned int flags, |
3432 | struct iomap *iomap, struct iomap *srcmap) |
3433 | { |
3434 | int ret; |
3435 | struct ext4_map_blocks map; |
3436 | u8 blkbits = inode->i_blkbits; |
3437 | |
3438 | if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) |
3439 | return -EINVAL; |
3440 | |
3441 | if (ext4_has_inline_data(inode)) { |
3442 | ret = ext4_inline_data_iomap(inode, iomap); |
3443 | if (ret != -EAGAIN) { |
3444 | if (ret == 0 && offset >= iomap->length) |
3445 | ret = -ENOENT; |
3446 | return ret; |
3447 | } |
3448 | } |
3449 | |
3450 | /* |
3451 | * Calculate the first and last logical block respectively. |
3452 | */ |
3453 | map.m_lblk = offset >> blkbits; |
3454 | map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, |
3455 | EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; |
3456 | |
3457 | /* |
3458 | * Fiemap callers may call for offset beyond s_bitmap_maxbytes. |
3459 | * So handle it here itself instead of querying ext4_map_blocks(). |
3460 | * Since ext4_map_blocks() will warn about it and will return |
3461 | * -EIO error. |
3462 | */ |
3463 | if (!(ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS))) { |
3464 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
3465 | |
3466 | if (offset >= sbi->s_bitmap_maxbytes) { |
3467 | map.m_flags = 0; |
3468 | goto set_iomap; |
3469 | } |
3470 | } |
3471 | |
3472 | ret = ext4_map_blocks(NULL, inode, map: &map, flags: 0); |
3473 | if (ret < 0) |
3474 | return ret; |
3475 | set_iomap: |
3476 | ext4_set_iomap(inode, iomap, map: &map, offset, length, flags); |
3477 | |
3478 | return 0; |
3479 | } |
3480 | |
3481 | const struct iomap_ops ext4_iomap_report_ops = { |
3482 | .iomap_begin = ext4_iomap_begin_report, |
3483 | }; |
3484 | |
3485 | /* |
3486 | * For data=journal mode, folio should be marked dirty only when it was |
3487 | * writeably mapped. When that happens, it was already attached to the |
3488 | * transaction and marked as jbddirty (we take care of this in |
3489 | * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings |
3490 | * so we should have nothing to do here, except for the case when someone |
3491 | * had the page pinned and dirtied the page through this pin (e.g. by doing |
3492 | * direct IO to it). In that case we'd need to attach buffers here to the |
3493 | * transaction but we cannot due to lock ordering. We cannot just dirty the |
3494 | * folio and leave attached buffers clean, because the buffers' dirty state is |
3495 | * "definitive". We cannot just set the buffers dirty or jbddirty because all |
3496 | * the journalling code will explode. So what we do is to mark the folio |
3497 | * "pending dirty" and next time ext4_writepages() is called, attach buffers |
3498 | * to the transaction appropriately. |
3499 | */ |
3500 | static bool ext4_journalled_dirty_folio(struct address_space *mapping, |
3501 | struct folio *folio) |
3502 | { |
3503 | WARN_ON_ONCE(!folio_buffers(folio)); |
3504 | if (folio_maybe_dma_pinned(folio)) |
3505 | folio_set_checked(folio); |
3506 | return filemap_dirty_folio(mapping, folio); |
3507 | } |
3508 | |
3509 | static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio) |
3510 | { |
3511 | WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio)); |
3512 | WARN_ON_ONCE(!folio_buffers(folio)); |
3513 | return block_dirty_folio(mapping, folio); |
3514 | } |
3515 | |
3516 | static int ext4_iomap_swap_activate(struct swap_info_struct *sis, |
3517 | struct file *file, sector_t *span) |
3518 | { |
3519 | return iomap_swapfile_activate(sis, swap_file: file, pagespan: span, |
3520 | ops: &ext4_iomap_report_ops); |
3521 | } |
3522 | |
3523 | static const struct address_space_operations ext4_aops = { |
3524 | .read_folio = ext4_read_folio, |
3525 | .readahead = ext4_readahead, |
3526 | .writepages = ext4_writepages, |
3527 | .write_begin = ext4_write_begin, |
3528 | .write_end = ext4_write_end, |
3529 | .dirty_folio = ext4_dirty_folio, |
3530 | .bmap = ext4_bmap, |
3531 | .invalidate_folio = ext4_invalidate_folio, |
3532 | .release_folio = ext4_release_folio, |
3533 | .direct_IO = noop_direct_IO, |
3534 | .migrate_folio = buffer_migrate_folio, |
3535 | .is_partially_uptodate = block_is_partially_uptodate, |
3536 | .error_remove_folio = generic_error_remove_folio, |
3537 | .swap_activate = ext4_iomap_swap_activate, |
3538 | }; |
3539 | |
3540 | static const struct address_space_operations ext4_journalled_aops = { |
3541 | .read_folio = ext4_read_folio, |
3542 | .readahead = ext4_readahead, |
3543 | .writepages = ext4_writepages, |
3544 | .write_begin = ext4_write_begin, |
3545 | .write_end = ext4_journalled_write_end, |
3546 | .dirty_folio = ext4_journalled_dirty_folio, |
3547 | .bmap = ext4_bmap, |
3548 | .invalidate_folio = ext4_journalled_invalidate_folio, |
3549 | .release_folio = ext4_release_folio, |
3550 | .direct_IO = noop_direct_IO, |
3551 | .migrate_folio = buffer_migrate_folio_norefs, |
3552 | .is_partially_uptodate = block_is_partially_uptodate, |
3553 | .error_remove_folio = generic_error_remove_folio, |
3554 | .swap_activate = ext4_iomap_swap_activate, |
3555 | }; |
3556 | |
3557 | static const struct address_space_operations ext4_da_aops = { |
3558 | .read_folio = ext4_read_folio, |
3559 | .readahead = ext4_readahead, |
3560 | .writepages = ext4_writepages, |
3561 | .write_begin = ext4_da_write_begin, |
3562 | .write_end = ext4_da_write_end, |
3563 | .dirty_folio = ext4_dirty_folio, |
3564 | .bmap = ext4_bmap, |
3565 | .invalidate_folio = ext4_invalidate_folio, |
3566 | .release_folio = ext4_release_folio, |
3567 | .direct_IO = noop_direct_IO, |
3568 | .migrate_folio = buffer_migrate_folio, |
3569 | .is_partially_uptodate = block_is_partially_uptodate, |
3570 | .error_remove_folio = generic_error_remove_folio, |
3571 | .swap_activate = ext4_iomap_swap_activate, |
3572 | }; |
3573 | |
3574 | static const struct address_space_operations ext4_dax_aops = { |
3575 | .writepages = ext4_dax_writepages, |
3576 | .direct_IO = noop_direct_IO, |
3577 | .dirty_folio = noop_dirty_folio, |
3578 | .bmap = ext4_bmap, |
3579 | .swap_activate = ext4_iomap_swap_activate, |
3580 | }; |
3581 | |
3582 | void ext4_set_aops(struct inode *inode) |
3583 | { |
3584 | switch (ext4_inode_journal_mode(inode)) { |
3585 | case EXT4_INODE_ORDERED_DATA_MODE: |
3586 | case EXT4_INODE_WRITEBACK_DATA_MODE: |
3587 | break; |
3588 | case EXT4_INODE_JOURNAL_DATA_MODE: |
3589 | inode->i_mapping->a_ops = &ext4_journalled_aops; |
3590 | return; |
3591 | default: |
3592 | BUG(); |
3593 | } |
3594 | if (IS_DAX(inode)) |
3595 | inode->i_mapping->a_ops = &ext4_dax_aops; |
3596 | else if (test_opt(inode->i_sb, DELALLOC)) |
3597 | inode->i_mapping->a_ops = &ext4_da_aops; |
3598 | else |
3599 | inode->i_mapping->a_ops = &ext4_aops; |
3600 | } |
3601 | |
3602 | /* |
3603 | * Here we can't skip an unwritten buffer even though it usually reads zero |
3604 | * because it might have data in pagecache (eg, if called from ext4_zero_range, |
3605 | * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a |
3606 | * racing writeback can come later and flush the stale pagecache to disk. |
3607 | */ |
3608 | static int __ext4_block_zero_page_range(handle_t *handle, |
3609 | struct address_space *mapping, loff_t from, loff_t length) |
3610 | { |
3611 | ext4_fsblk_t index = from >> PAGE_SHIFT; |
3612 | unsigned offset = from & (PAGE_SIZE-1); |
3613 | unsigned blocksize, pos; |
3614 | ext4_lblk_t iblock; |
3615 | struct inode *inode = mapping->host; |
3616 | struct buffer_head *bh; |
3617 | struct folio *folio; |
3618 | int err = 0; |
3619 | |
3620 | folio = __filemap_get_folio(mapping, index: from >> PAGE_SHIFT, |
3621 | FGP_LOCK | FGP_ACCESSED | FGP_CREAT, |
3622 | gfp: mapping_gfp_constraint(mapping, gfp_mask: ~__GFP_FS)); |
3623 | if (IS_ERR(ptr: folio)) |
3624 | return PTR_ERR(ptr: folio); |
3625 | |
3626 | blocksize = inode->i_sb->s_blocksize; |
3627 | |
3628 | iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); |
3629 | |
3630 | bh = folio_buffers(folio); |
3631 | if (!bh) |
3632 | bh = create_empty_buffers(folio, blocksize, b_state: 0); |
3633 | |
3634 | /* Find the buffer that contains "offset" */ |
3635 | pos = blocksize; |
3636 | while (offset >= pos) { |
3637 | bh = bh->b_this_page; |
3638 | iblock++; |
3639 | pos += blocksize; |
3640 | } |
3641 | if (buffer_freed(bh)) { |
3642 | BUFFER_TRACE(bh, "freed: skip" ); |
3643 | goto unlock; |
3644 | } |
3645 | if (!buffer_mapped(bh)) { |
3646 | BUFFER_TRACE(bh, "unmapped" ); |
3647 | ext4_get_block(inode, iblock, bh, create: 0); |
3648 | /* unmapped? It's a hole - nothing to do */ |
3649 | if (!buffer_mapped(bh)) { |
3650 | BUFFER_TRACE(bh, "still unmapped" ); |
3651 | goto unlock; |
3652 | } |
3653 | } |
3654 | |
3655 | /* Ok, it's mapped. Make sure it's up-to-date */ |
3656 | if (folio_test_uptodate(folio)) |
3657 | set_buffer_uptodate(bh); |
3658 | |
3659 | if (!buffer_uptodate(bh)) { |
3660 | err = ext4_read_bh_lock(bh, op_flags: 0, wait: true); |
3661 | if (err) |
3662 | goto unlock; |
3663 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) { |
3664 | /* We expect the key to be set. */ |
3665 | BUG_ON(!fscrypt_has_encryption_key(inode)); |
3666 | err = fscrypt_decrypt_pagecache_blocks(folio, |
3667 | len: blocksize, |
3668 | offs: bh_offset(bh)); |
3669 | if (err) { |
3670 | clear_buffer_uptodate(bh); |
3671 | goto unlock; |
3672 | } |
3673 | } |
3674 | } |
3675 | if (ext4_should_journal_data(inode)) { |
3676 | BUFFER_TRACE(bh, "get write access" ); |
3677 | err = ext4_journal_get_write_access(handle, inode->i_sb, bh, |
3678 | EXT4_JTR_NONE); |
3679 | if (err) |
3680 | goto unlock; |
3681 | } |
3682 | folio_zero_range(folio, start: offset, length); |
3683 | BUFFER_TRACE(bh, "zeroed end of block" ); |
3684 | |
3685 | if (ext4_should_journal_data(inode)) { |
3686 | err = ext4_dirty_journalled_data(handle, bh); |
3687 | } else { |
3688 | err = 0; |
3689 | mark_buffer_dirty(bh); |
3690 | if (ext4_should_order_data(inode)) |
3691 | err = ext4_jbd2_inode_add_write(handle, inode, start_byte: from, |
3692 | length); |
3693 | } |
3694 | |
3695 | unlock: |
3696 | folio_unlock(folio); |
3697 | folio_put(folio); |
3698 | return err; |
3699 | } |
3700 | |
3701 | /* |
3702 | * ext4_block_zero_page_range() zeros out a mapping of length 'length' |
3703 | * starting from file offset 'from'. The range to be zero'd must |
3704 | * be contained with in one block. If the specified range exceeds |
3705 | * the end of the block it will be shortened to end of the block |
3706 | * that corresponds to 'from' |
3707 | */ |
3708 | static int ext4_block_zero_page_range(handle_t *handle, |
3709 | struct address_space *mapping, loff_t from, loff_t length) |
3710 | { |
3711 | struct inode *inode = mapping->host; |
3712 | unsigned offset = from & (PAGE_SIZE-1); |
3713 | unsigned blocksize = inode->i_sb->s_blocksize; |
3714 | unsigned max = blocksize - (offset & (blocksize - 1)); |
3715 | |
3716 | /* |
3717 | * correct length if it does not fall between |
3718 | * 'from' and the end of the block |
3719 | */ |
3720 | if (length > max || length < 0) |
3721 | length = max; |
3722 | |
3723 | if (IS_DAX(inode)) { |
3724 | return dax_zero_range(inode, pos: from, len: length, NULL, |
3725 | ops: &ext4_iomap_ops); |
3726 | } |
3727 | return __ext4_block_zero_page_range(handle, mapping, from, length); |
3728 | } |
3729 | |
3730 | /* |
3731 | * ext4_block_truncate_page() zeroes out a mapping from file offset `from' |
3732 | * up to the end of the block which corresponds to `from'. |
3733 | * This required during truncate. We need to physically zero the tail end |
3734 | * of that block so it doesn't yield old data if the file is later grown. |
3735 | */ |
3736 | static int ext4_block_truncate_page(handle_t *handle, |
3737 | struct address_space *mapping, loff_t from) |
3738 | { |
3739 | unsigned offset = from & (PAGE_SIZE-1); |
3740 | unsigned length; |
3741 | unsigned blocksize; |
3742 | struct inode *inode = mapping->host; |
3743 | |
3744 | /* If we are processing an encrypted inode during orphan list handling */ |
3745 | if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode)) |
3746 | return 0; |
3747 | |
3748 | blocksize = inode->i_sb->s_blocksize; |
3749 | length = blocksize - (offset & (blocksize - 1)); |
3750 | |
3751 | return ext4_block_zero_page_range(handle, mapping, from, length); |
3752 | } |
3753 | |
3754 | int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, |
3755 | loff_t lstart, loff_t length) |
3756 | { |
3757 | struct super_block *sb = inode->i_sb; |
3758 | struct address_space *mapping = inode->i_mapping; |
3759 | unsigned partial_start, partial_end; |
3760 | ext4_fsblk_t start, end; |
3761 | loff_t byte_end = (lstart + length - 1); |
3762 | int err = 0; |
3763 | |
3764 | partial_start = lstart & (sb->s_blocksize - 1); |
3765 | partial_end = byte_end & (sb->s_blocksize - 1); |
3766 | |
3767 | start = lstart >> sb->s_blocksize_bits; |
3768 | end = byte_end >> sb->s_blocksize_bits; |
3769 | |
3770 | /* Handle partial zero within the single block */ |
3771 | if (start == end && |
3772 | (partial_start || (partial_end != sb->s_blocksize - 1))) { |
3773 | err = ext4_block_zero_page_range(handle, mapping, |
3774 | from: lstart, length); |
3775 | return err; |
3776 | } |
3777 | /* Handle partial zero out on the start of the range */ |
3778 | if (partial_start) { |
3779 | err = ext4_block_zero_page_range(handle, mapping, |
3780 | from: lstart, length: sb->s_blocksize); |
3781 | if (err) |
3782 | return err; |
3783 | } |
3784 | /* Handle partial zero out on the end of the range */ |
3785 | if (partial_end != sb->s_blocksize - 1) |
3786 | err = ext4_block_zero_page_range(handle, mapping, |
3787 | from: byte_end - partial_end, |
3788 | length: partial_end + 1); |
3789 | return err; |
3790 | } |
3791 | |
3792 | int ext4_can_truncate(struct inode *inode) |
3793 | { |
3794 | if (S_ISREG(inode->i_mode)) |
3795 | return 1; |
3796 | if (S_ISDIR(inode->i_mode)) |
3797 | return 1; |
3798 | if (S_ISLNK(inode->i_mode)) |
3799 | return !ext4_inode_is_fast_symlink(inode); |
3800 | return 0; |
3801 | } |
3802 | |
3803 | /* |
3804 | * We have to make sure i_disksize gets properly updated before we truncate |
3805 | * page cache due to hole punching or zero range. Otherwise i_disksize update |
3806 | * can get lost as it may have been postponed to submission of writeback but |
3807 | * that will never happen after we truncate page cache. |
3808 | */ |
3809 | int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, |
3810 | loff_t len) |
3811 | { |
3812 | handle_t *handle; |
3813 | int ret; |
3814 | |
3815 | loff_t size = i_size_read(inode); |
3816 | |
3817 | WARN_ON(!inode_is_locked(inode)); |
3818 | if (offset > size || offset + len < size) |
3819 | return 0; |
3820 | |
3821 | if (EXT4_I(inode)->i_disksize >= size) |
3822 | return 0; |
3823 | |
3824 | handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); |
3825 | if (IS_ERR(ptr: handle)) |
3826 | return PTR_ERR(ptr: handle); |
3827 | ext4_update_i_disksize(inode, newsize: size); |
3828 | ret = ext4_mark_inode_dirty(handle, inode); |
3829 | ext4_journal_stop(handle); |
3830 | |
3831 | return ret; |
3832 | } |
3833 | |
3834 | static void ext4_wait_dax_page(struct inode *inode) |
3835 | { |
3836 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
3837 | schedule(); |
3838 | filemap_invalidate_lock(mapping: inode->i_mapping); |
3839 | } |
3840 | |
3841 | int ext4_break_layouts(struct inode *inode) |
3842 | { |
3843 | struct page *page; |
3844 | int error; |
3845 | |
3846 | if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock))) |
3847 | return -EINVAL; |
3848 | |
3849 | do { |
3850 | page = dax_layout_busy_page(mapping: inode->i_mapping); |
3851 | if (!page) |
3852 | return 0; |
3853 | |
3854 | error = ___wait_var_event(&page->_refcount, |
3855 | atomic_read(&page->_refcount) == 1, |
3856 | TASK_INTERRUPTIBLE, 0, 0, |
3857 | ext4_wait_dax_page(inode)); |
3858 | } while (error == 0); |
3859 | |
3860 | return error; |
3861 | } |
3862 | |
3863 | /* |
3864 | * ext4_punch_hole: punches a hole in a file by releasing the blocks |
3865 | * associated with the given offset and length |
3866 | * |
3867 | * @inode: File inode |
3868 | * @offset: The offset where the hole will begin |
3869 | * @len: The length of the hole |
3870 | * |
3871 | * Returns: 0 on success or negative on failure |
3872 | */ |
3873 | |
3874 | int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) |
3875 | { |
3876 | struct inode *inode = file_inode(f: file); |
3877 | struct super_block *sb = inode->i_sb; |
3878 | ext4_lblk_t first_block, stop_block; |
3879 | struct address_space *mapping = inode->i_mapping; |
3880 | loff_t first_block_offset, last_block_offset, max_length; |
3881 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
3882 | handle_t *handle; |
3883 | unsigned int credits; |
3884 | int ret = 0, ret2 = 0; |
3885 | |
3886 | trace_ext4_punch_hole(inode, offset, len: length, mode: 0); |
3887 | |
3888 | /* |
3889 | * Write out all dirty pages to avoid race conditions |
3890 | * Then release them. |
3891 | */ |
3892 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { |
3893 | ret = filemap_write_and_wait_range(mapping, lstart: offset, |
3894 | lend: offset + length - 1); |
3895 | if (ret) |
3896 | return ret; |
3897 | } |
3898 | |
3899 | inode_lock(inode); |
3900 | |
3901 | /* No need to punch hole beyond i_size */ |
3902 | if (offset >= inode->i_size) |
3903 | goto out_mutex; |
3904 | |
3905 | /* |
3906 | * If the hole extends beyond i_size, set the hole |
3907 | * to end after the page that contains i_size |
3908 | */ |
3909 | if (offset + length > inode->i_size) { |
3910 | length = inode->i_size + |
3911 | PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - |
3912 | offset; |
3913 | } |
3914 | |
3915 | /* |
3916 | * For punch hole the length + offset needs to be within one block |
3917 | * before last range. Adjust the length if it goes beyond that limit. |
3918 | */ |
3919 | max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize; |
3920 | if (offset + length > max_length) |
3921 | length = max_length - offset; |
3922 | |
3923 | if (offset & (sb->s_blocksize - 1) || |
3924 | (offset + length) & (sb->s_blocksize - 1)) { |
3925 | /* |
3926 | * Attach jinode to inode for jbd2 if we do any zeroing of |
3927 | * partial block |
3928 | */ |
3929 | ret = ext4_inode_attach_jinode(inode); |
3930 | if (ret < 0) |
3931 | goto out_mutex; |
3932 | |
3933 | } |
3934 | |
3935 | /* Wait all existing dio workers, newcomers will block on i_rwsem */ |
3936 | inode_dio_wait(inode); |
3937 | |
3938 | ret = file_modified(file); |
3939 | if (ret) |
3940 | goto out_mutex; |
3941 | |
3942 | /* |
3943 | * Prevent page faults from reinstantiating pages we have released from |
3944 | * page cache. |
3945 | */ |
3946 | filemap_invalidate_lock(mapping); |
3947 | |
3948 | ret = ext4_break_layouts(inode); |
3949 | if (ret) |
3950 | goto out_dio; |
3951 | |
3952 | first_block_offset = round_up(offset, sb->s_blocksize); |
3953 | last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; |
3954 | |
3955 | /* Now release the pages and zero block aligned part of pages*/ |
3956 | if (last_block_offset > first_block_offset) { |
3957 | ret = ext4_update_disksize_before_punch(inode, offset, len: length); |
3958 | if (ret) |
3959 | goto out_dio; |
3960 | truncate_pagecache_range(inode, offset: first_block_offset, |
3961 | end: last_block_offset); |
3962 | } |
3963 | |
3964 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
3965 | credits = ext4_writepage_trans_blocks(inode); |
3966 | else |
3967 | credits = ext4_blocks_for_truncate(inode); |
3968 | handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); |
3969 | if (IS_ERR(ptr: handle)) { |
3970 | ret = PTR_ERR(ptr: handle); |
3971 | ext4_std_error(sb, ret); |
3972 | goto out_dio; |
3973 | } |
3974 | |
3975 | ret = ext4_zero_partial_blocks(handle, inode, lstart: offset, |
3976 | length); |
3977 | if (ret) |
3978 | goto out_stop; |
3979 | |
3980 | first_block = (offset + sb->s_blocksize - 1) >> |
3981 | EXT4_BLOCK_SIZE_BITS(sb); |
3982 | stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); |
3983 | |
3984 | /* If there are blocks to remove, do it */ |
3985 | if (stop_block > first_block) { |
3986 | ext4_lblk_t hole_len = stop_block - first_block; |
3987 | |
3988 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
3989 | ext4_discard_preallocations(inode); |
3990 | |
3991 | ext4_es_remove_extent(inode, lblk: first_block, len: hole_len); |
3992 | |
3993 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
3994 | ret = ext4_ext_remove_space(inode, start: first_block, |
3995 | end: stop_block - 1); |
3996 | else |
3997 | ret = ext4_ind_remove_space(handle, inode, start: first_block, |
3998 | end: stop_block); |
3999 | |
4000 | ext4_es_insert_extent(inode, lblk: first_block, len: hole_len, pblk: ~0, |
4001 | EXTENT_STATUS_HOLE); |
4002 | up_write(sem: &EXT4_I(inode)->i_data_sem); |
4003 | } |
4004 | ext4_fc_track_range(handle, inode, start: first_block, end: stop_block); |
4005 | if (IS_SYNC(inode)) |
4006 | ext4_handle_sync(handle); |
4007 | |
4008 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
4009 | ret2 = ext4_mark_inode_dirty(handle, inode); |
4010 | if (unlikely(ret2)) |
4011 | ret = ret2; |
4012 | if (ret >= 0) |
4013 | ext4_update_inode_fsync_trans(handle, inode, datasync: 1); |
4014 | out_stop: |
4015 | ext4_journal_stop(handle); |
4016 | out_dio: |
4017 | filemap_invalidate_unlock(mapping); |
4018 | out_mutex: |
4019 | inode_unlock(inode); |
4020 | return ret; |
4021 | } |
4022 | |
4023 | int ext4_inode_attach_jinode(struct inode *inode) |
4024 | { |
4025 | struct ext4_inode_info *ei = EXT4_I(inode); |
4026 | struct jbd2_inode *jinode; |
4027 | |
4028 | if (ei->jinode || !EXT4_SB(sb: inode->i_sb)->s_journal) |
4029 | return 0; |
4030 | |
4031 | jinode = jbd2_alloc_inode(GFP_KERNEL); |
4032 | spin_lock(lock: &inode->i_lock); |
4033 | if (!ei->jinode) { |
4034 | if (!jinode) { |
4035 | spin_unlock(lock: &inode->i_lock); |
4036 | return -ENOMEM; |
4037 | } |
4038 | ei->jinode = jinode; |
4039 | jbd2_journal_init_jbd_inode(jinode: ei->jinode, inode); |
4040 | jinode = NULL; |
4041 | } |
4042 | spin_unlock(lock: &inode->i_lock); |
4043 | if (unlikely(jinode != NULL)) |
4044 | jbd2_free_inode(jinode); |
4045 | return 0; |
4046 | } |
4047 | |
4048 | /* |
4049 | * ext4_truncate() |
4050 | * |
4051 | * We block out ext4_get_block() block instantiations across the entire |
4052 | * transaction, and VFS/VM ensures that ext4_truncate() cannot run |
4053 | * simultaneously on behalf of the same inode. |
4054 | * |
4055 | * As we work through the truncate and commit bits of it to the journal there |
4056 | * is one core, guiding principle: the file's tree must always be consistent on |
4057 | * disk. We must be able to restart the truncate after a crash. |
4058 | * |
4059 | * The file's tree may be transiently inconsistent in memory (although it |
4060 | * probably isn't), but whenever we close off and commit a journal transaction, |
4061 | * the contents of (the filesystem + the journal) must be consistent and |
4062 | * restartable. It's pretty simple, really: bottom up, right to left (although |
4063 | * left-to-right works OK too). |
4064 | * |
4065 | * Note that at recovery time, journal replay occurs *before* the restart of |
4066 | * truncate against the orphan inode list. |
4067 | * |
4068 | * The committed inode has the new, desired i_size (which is the same as |
4069 | * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see |
4070 | * that this inode's truncate did not complete and it will again call |
4071 | * ext4_truncate() to have another go. So there will be instantiated blocks |
4072 | * to the right of the truncation point in a crashed ext4 filesystem. But |
4073 | * that's fine - as long as they are linked from the inode, the post-crash |
4074 | * ext4_truncate() run will find them and release them. |
4075 | */ |
4076 | int ext4_truncate(struct inode *inode) |
4077 | { |
4078 | struct ext4_inode_info *ei = EXT4_I(inode); |
4079 | unsigned int credits; |
4080 | int err = 0, err2; |
4081 | handle_t *handle; |
4082 | struct address_space *mapping = inode->i_mapping; |
4083 | |
4084 | /* |
4085 | * There is a possibility that we're either freeing the inode |
4086 | * or it's a completely new inode. In those cases we might not |
4087 | * have i_rwsem locked because it's not necessary. |
4088 | */ |
4089 | if (!(inode->i_state & (I_NEW|I_FREEING))) |
4090 | WARN_ON(!inode_is_locked(inode)); |
4091 | trace_ext4_truncate_enter(inode); |
4092 | |
4093 | if (!ext4_can_truncate(inode)) |
4094 | goto out_trace; |
4095 | |
4096 | if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) |
4097 | ext4_set_inode_state(inode, bit: EXT4_STATE_DA_ALLOC_CLOSE); |
4098 | |
4099 | if (ext4_has_inline_data(inode)) { |
4100 | int has_inline = 1; |
4101 | |
4102 | err = ext4_inline_data_truncate(inode, has_inline: &has_inline); |
4103 | if (err || has_inline) |
4104 | goto out_trace; |
4105 | } |
4106 | |
4107 | /* If we zero-out tail of the page, we have to create jinode for jbd2 */ |
4108 | if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { |
4109 | err = ext4_inode_attach_jinode(inode); |
4110 | if (err) |
4111 | goto out_trace; |
4112 | } |
4113 | |
4114 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
4115 | credits = ext4_writepage_trans_blocks(inode); |
4116 | else |
4117 | credits = ext4_blocks_for_truncate(inode); |
4118 | |
4119 | handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); |
4120 | if (IS_ERR(ptr: handle)) { |
4121 | err = PTR_ERR(ptr: handle); |
4122 | goto out_trace; |
4123 | } |
4124 | |
4125 | if (inode->i_size & (inode->i_sb->s_blocksize - 1)) |
4126 | ext4_block_truncate_page(handle, mapping, from: inode->i_size); |
4127 | |
4128 | /* |
4129 | * We add the inode to the orphan list, so that if this |
4130 | * truncate spans multiple transactions, and we crash, we will |
4131 | * resume the truncate when the filesystem recovers. It also |
4132 | * marks the inode dirty, to catch the new size. |
4133 | * |
4134 | * Implication: the file must always be in a sane, consistent |
4135 | * truncatable state while each transaction commits. |
4136 | */ |
4137 | err = ext4_orphan_add(handle, inode); |
4138 | if (err) |
4139 | goto out_stop; |
4140 | |
4141 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
4142 | |
4143 | ext4_discard_preallocations(inode); |
4144 | |
4145 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
4146 | err = ext4_ext_truncate(handle, inode); |
4147 | else |
4148 | ext4_ind_truncate(handle, inode); |
4149 | |
4150 | up_write(sem: &ei->i_data_sem); |
4151 | if (err) |
4152 | goto out_stop; |
4153 | |
4154 | if (IS_SYNC(inode)) |
4155 | ext4_handle_sync(handle); |
4156 | |
4157 | out_stop: |
4158 | /* |
4159 | * If this was a simple ftruncate() and the file will remain alive, |
4160 | * then we need to clear up the orphan record which we created above. |
4161 | * However, if this was a real unlink then we were called by |
4162 | * ext4_evict_inode(), and we allow that function to clean up the |
4163 | * orphan info for us. |
4164 | */ |
4165 | if (inode->i_nlink) |
4166 | ext4_orphan_del(handle, inode); |
4167 | |
4168 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
4169 | err2 = ext4_mark_inode_dirty(handle, inode); |
4170 | if (unlikely(err2 && !err)) |
4171 | err = err2; |
4172 | ext4_journal_stop(handle); |
4173 | |
4174 | out_trace: |
4175 | trace_ext4_truncate_exit(inode); |
4176 | return err; |
4177 | } |
4178 | |
4179 | static inline u64 ext4_inode_peek_iversion(const struct inode *inode) |
4180 | { |
4181 | if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) |
4182 | return inode_peek_iversion_raw(inode); |
4183 | else |
4184 | return inode_peek_iversion(inode); |
4185 | } |
4186 | |
4187 | static int ext4_inode_blocks_set(struct ext4_inode *raw_inode, |
4188 | struct ext4_inode_info *ei) |
4189 | { |
4190 | struct inode *inode = &(ei->vfs_inode); |
4191 | u64 i_blocks = READ_ONCE(inode->i_blocks); |
4192 | struct super_block *sb = inode->i_sb; |
4193 | |
4194 | if (i_blocks <= ~0U) { |
4195 | /* |
4196 | * i_blocks can be represented in a 32 bit variable |
4197 | * as multiple of 512 bytes |
4198 | */ |
4199 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
4200 | raw_inode->i_blocks_high = 0; |
4201 | ext4_clear_inode_flag(inode, bit: EXT4_INODE_HUGE_FILE); |
4202 | return 0; |
4203 | } |
4204 | |
4205 | /* |
4206 | * This should never happen since sb->s_maxbytes should not have |
4207 | * allowed this, sb->s_maxbytes was set according to the huge_file |
4208 | * feature in ext4_fill_super(). |
4209 | */ |
4210 | if (!ext4_has_feature_huge_file(sb)) |
4211 | return -EFSCORRUPTED; |
4212 | |
4213 | if (i_blocks <= 0xffffffffffffULL) { |
4214 | /* |
4215 | * i_blocks can be represented in a 48 bit variable |
4216 | * as multiple of 512 bytes |
4217 | */ |
4218 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
4219 | raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); |
4220 | ext4_clear_inode_flag(inode, bit: EXT4_INODE_HUGE_FILE); |
4221 | } else { |
4222 | ext4_set_inode_flag(inode, bit: EXT4_INODE_HUGE_FILE); |
4223 | /* i_block is stored in file system block size */ |
4224 | i_blocks = i_blocks >> (inode->i_blkbits - 9); |
4225 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
4226 | raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); |
4227 | } |
4228 | return 0; |
4229 | } |
4230 | |
4231 | static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode) |
4232 | { |
4233 | struct ext4_inode_info *ei = EXT4_I(inode); |
4234 | uid_t i_uid; |
4235 | gid_t i_gid; |
4236 | projid_t i_projid; |
4237 | int block; |
4238 | int err; |
4239 | |
4240 | err = ext4_inode_blocks_set(raw_inode, ei); |
4241 | |
4242 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); |
4243 | i_uid = i_uid_read(inode); |
4244 | i_gid = i_gid_read(inode); |
4245 | i_projid = from_kprojid(to: &init_user_ns, projid: ei->i_projid); |
4246 | if (!(test_opt(inode->i_sb, NO_UID32))) { |
4247 | raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); |
4248 | raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); |
4249 | /* |
4250 | * Fix up interoperability with old kernels. Otherwise, |
4251 | * old inodes get re-used with the upper 16 bits of the |
4252 | * uid/gid intact. |
4253 | */ |
4254 | if (ei->i_dtime && list_empty(head: &ei->i_orphan)) { |
4255 | raw_inode->i_uid_high = 0; |
4256 | raw_inode->i_gid_high = 0; |
4257 | } else { |
4258 | raw_inode->i_uid_high = |
4259 | cpu_to_le16(high_16_bits(i_uid)); |
4260 | raw_inode->i_gid_high = |
4261 | cpu_to_le16(high_16_bits(i_gid)); |
4262 | } |
4263 | } else { |
4264 | raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); |
4265 | raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); |
4266 | raw_inode->i_uid_high = 0; |
4267 | raw_inode->i_gid_high = 0; |
4268 | } |
4269 | raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); |
4270 | |
4271 | EXT4_INODE_SET_CTIME(inode, raw_inode); |
4272 | EXT4_INODE_SET_MTIME(inode, raw_inode); |
4273 | EXT4_INODE_SET_ATIME(inode, raw_inode); |
4274 | EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); |
4275 | |
4276 | raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); |
4277 | raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); |
4278 | if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) |
4279 | raw_inode->i_file_acl_high = |
4280 | cpu_to_le16(ei->i_file_acl >> 32); |
4281 | raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); |
4282 | ext4_isize_set(raw_inode, i_size: ei->i_disksize); |
4283 | |
4284 | raw_inode->i_generation = cpu_to_le32(inode->i_generation); |
4285 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { |
4286 | if (old_valid_dev(dev: inode->i_rdev)) { |
4287 | raw_inode->i_block[0] = |
4288 | cpu_to_le32(old_encode_dev(inode->i_rdev)); |
4289 | raw_inode->i_block[1] = 0; |
4290 | } else { |
4291 | raw_inode->i_block[0] = 0; |
4292 | raw_inode->i_block[1] = |
4293 | cpu_to_le32(new_encode_dev(inode->i_rdev)); |
4294 | raw_inode->i_block[2] = 0; |
4295 | } |
4296 | } else if (!ext4_has_inline_data(inode)) { |
4297 | for (block = 0; block < EXT4_N_BLOCKS; block++) |
4298 | raw_inode->i_block[block] = ei->i_data[block]; |
4299 | } |
4300 | |
4301 | if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { |
4302 | u64 ivers = ext4_inode_peek_iversion(inode); |
4303 | |
4304 | raw_inode->i_disk_version = cpu_to_le32(ivers); |
4305 | if (ei->i_extra_isize) { |
4306 | if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) |
4307 | raw_inode->i_version_hi = |
4308 | cpu_to_le32(ivers >> 32); |
4309 | raw_inode->i_extra_isize = |
4310 | cpu_to_le16(ei->i_extra_isize); |
4311 | } |
4312 | } |
4313 | |
4314 | if (i_projid != EXT4_DEF_PROJID && |
4315 | !ext4_has_feature_project(sb: inode->i_sb)) |
4316 | err = err ?: -EFSCORRUPTED; |
4317 | |
4318 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && |
4319 | EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) |
4320 | raw_inode->i_projid = cpu_to_le32(i_projid); |
4321 | |
4322 | ext4_inode_csum_set(inode, raw: raw_inode, ei); |
4323 | return err; |
4324 | } |
4325 | |
4326 | /* |
4327 | * ext4_get_inode_loc returns with an extra refcount against the inode's |
4328 | * underlying buffer_head on success. If we pass 'inode' and it does not |
4329 | * have in-inode xattr, we have all inode data in memory that is needed |
4330 | * to recreate the on-disk version of this inode. |
4331 | */ |
4332 | static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino, |
4333 | struct inode *inode, struct ext4_iloc *iloc, |
4334 | ext4_fsblk_t *ret_block) |
4335 | { |
4336 | struct ext4_group_desc *gdp; |
4337 | struct buffer_head *bh; |
4338 | ext4_fsblk_t block; |
4339 | struct blk_plug plug; |
4340 | int inodes_per_block, inode_offset; |
4341 | |
4342 | iloc->bh = NULL; |
4343 | if (ino < EXT4_ROOT_INO || |
4344 | ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) |
4345 | return -EFSCORRUPTED; |
4346 | |
4347 | iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); |
4348 | gdp = ext4_get_group_desc(sb, block_group: iloc->block_group, NULL); |
4349 | if (!gdp) |
4350 | return -EIO; |
4351 | |
4352 | /* |
4353 | * Figure out the offset within the block group inode table |
4354 | */ |
4355 | inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; |
4356 | inode_offset = ((ino - 1) % |
4357 | EXT4_INODES_PER_GROUP(sb)); |
4358 | iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); |
4359 | |
4360 | block = ext4_inode_table(sb, bg: gdp); |
4361 | if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) || |
4362 | (block >= ext4_blocks_count(es: EXT4_SB(sb)->s_es))) { |
4363 | ext4_error(sb, "Invalid inode table block %llu in " |
4364 | "block_group %u" , block, iloc->block_group); |
4365 | return -EFSCORRUPTED; |
4366 | } |
4367 | block += (inode_offset / inodes_per_block); |
4368 | |
4369 | bh = sb_getblk(sb, block); |
4370 | if (unlikely(!bh)) |
4371 | return -ENOMEM; |
4372 | if (ext4_buffer_uptodate(bh)) |
4373 | goto has_buffer; |
4374 | |
4375 | lock_buffer(bh); |
4376 | if (ext4_buffer_uptodate(bh)) { |
4377 | /* Someone brought it uptodate while we waited */ |
4378 | unlock_buffer(bh); |
4379 | goto has_buffer; |
4380 | } |
4381 | |
4382 | /* |
4383 | * If we have all information of the inode in memory and this |
4384 | * is the only valid inode in the block, we need not read the |
4385 | * block. |
4386 | */ |
4387 | if (inode && !ext4_test_inode_state(inode, bit: EXT4_STATE_XATTR)) { |
4388 | struct buffer_head *bitmap_bh; |
4389 | int i, start; |
4390 | |
4391 | start = inode_offset & ~(inodes_per_block - 1); |
4392 | |
4393 | /* Is the inode bitmap in cache? */ |
4394 | bitmap_bh = sb_getblk(sb, block: ext4_inode_bitmap(sb, bg: gdp)); |
4395 | if (unlikely(!bitmap_bh)) |
4396 | goto make_io; |
4397 | |
4398 | /* |
4399 | * If the inode bitmap isn't in cache then the |
4400 | * optimisation may end up performing two reads instead |
4401 | * of one, so skip it. |
4402 | */ |
4403 | if (!buffer_uptodate(bh: bitmap_bh)) { |
4404 | brelse(bh: bitmap_bh); |
4405 | goto make_io; |
4406 | } |
4407 | for (i = start; i < start + inodes_per_block; i++) { |
4408 | if (i == inode_offset) |
4409 | continue; |
4410 | if (ext4_test_bit(nr: i, addr: bitmap_bh->b_data)) |
4411 | break; |
4412 | } |
4413 | brelse(bh: bitmap_bh); |
4414 | if (i == start + inodes_per_block) { |
4415 | struct ext4_inode *raw_inode = |
4416 | (struct ext4_inode *) (bh->b_data + iloc->offset); |
4417 | |
4418 | /* all other inodes are free, so skip I/O */ |
4419 | memset(bh->b_data, 0, bh->b_size); |
4420 | if (!ext4_test_inode_state(inode, bit: EXT4_STATE_NEW)) |
4421 | ext4_fill_raw_inode(inode, raw_inode); |
4422 | set_buffer_uptodate(bh); |
4423 | unlock_buffer(bh); |
4424 | goto has_buffer; |
4425 | } |
4426 | } |
4427 | |
4428 | make_io: |
4429 | /* |
4430 | * If we need to do any I/O, try to pre-readahead extra |
4431 | * blocks from the inode table. |
4432 | */ |
4433 | blk_start_plug(&plug); |
4434 | if (EXT4_SB(sb)->s_inode_readahead_blks) { |
4435 | ext4_fsblk_t b, end, table; |
4436 | unsigned num; |
4437 | __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; |
4438 | |
4439 | table = ext4_inode_table(sb, bg: gdp); |
4440 | /* s_inode_readahead_blks is always a power of 2 */ |
4441 | b = block & ~((ext4_fsblk_t) ra_blks - 1); |
4442 | if (table > b) |
4443 | b = table; |
4444 | end = b + ra_blks; |
4445 | num = EXT4_INODES_PER_GROUP(sb); |
4446 | if (ext4_has_group_desc_csum(sb)) |
4447 | num -= ext4_itable_unused_count(sb, bg: gdp); |
4448 | table += num / inodes_per_block; |
4449 | if (end > table) |
4450 | end = table; |
4451 | while (b <= end) |
4452 | ext4_sb_breadahead_unmovable(sb, block: b++); |
4453 | } |
4454 | |
4455 | /* |
4456 | * There are other valid inodes in the buffer, this inode |
4457 | * has in-inode xattrs, or we don't have this inode in memory. |
4458 | * Read the block from disk. |
4459 | */ |
4460 | trace_ext4_load_inode(sb, ino); |
4461 | ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL); |
4462 | blk_finish_plug(&plug); |
4463 | wait_on_buffer(bh); |
4464 | ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO); |
4465 | if (!buffer_uptodate(bh)) { |
4466 | if (ret_block) |
4467 | *ret_block = block; |
4468 | brelse(bh); |
4469 | return -EIO; |
4470 | } |
4471 | has_buffer: |
4472 | iloc->bh = bh; |
4473 | return 0; |
4474 | } |
4475 | |
4476 | static int __ext4_get_inode_loc_noinmem(struct inode *inode, |
4477 | struct ext4_iloc *iloc) |
4478 | { |
4479 | ext4_fsblk_t err_blk = 0; |
4480 | int ret; |
4481 | |
4482 | ret = __ext4_get_inode_loc(sb: inode->i_sb, ino: inode->i_ino, NULL, iloc, |
4483 | ret_block: &err_blk); |
4484 | |
4485 | if (ret == -EIO) |
4486 | ext4_error_inode_block(inode, err_blk, EIO, |
4487 | "unable to read itable block" ); |
4488 | |
4489 | return ret; |
4490 | } |
4491 | |
4492 | int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) |
4493 | { |
4494 | ext4_fsblk_t err_blk = 0; |
4495 | int ret; |
4496 | |
4497 | ret = __ext4_get_inode_loc(sb: inode->i_sb, ino: inode->i_ino, inode, iloc, |
4498 | ret_block: &err_blk); |
4499 | |
4500 | if (ret == -EIO) |
4501 | ext4_error_inode_block(inode, err_blk, EIO, |
4502 | "unable to read itable block" ); |
4503 | |
4504 | return ret; |
4505 | } |
4506 | |
4507 | |
4508 | int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino, |
4509 | struct ext4_iloc *iloc) |
4510 | { |
4511 | return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL); |
4512 | } |
4513 | |
4514 | static bool ext4_should_enable_dax(struct inode *inode) |
4515 | { |
4516 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
4517 | |
4518 | if (test_opt2(inode->i_sb, DAX_NEVER)) |
4519 | return false; |
4520 | if (!S_ISREG(inode->i_mode)) |
4521 | return false; |
4522 | if (ext4_should_journal_data(inode)) |
4523 | return false; |
4524 | if (ext4_has_inline_data(inode)) |
4525 | return false; |
4526 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_ENCRYPT)) |
4527 | return false; |
4528 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_VERITY)) |
4529 | return false; |
4530 | if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) |
4531 | return false; |
4532 | if (test_opt(inode->i_sb, DAX_ALWAYS)) |
4533 | return true; |
4534 | |
4535 | return ext4_test_inode_flag(inode, bit: EXT4_INODE_DAX); |
4536 | } |
4537 | |
4538 | void ext4_set_inode_flags(struct inode *inode, bool init) |
4539 | { |
4540 | unsigned int flags = EXT4_I(inode)->i_flags; |
4541 | unsigned int new_fl = 0; |
4542 | |
4543 | WARN_ON_ONCE(IS_DAX(inode) && init); |
4544 | |
4545 | if (flags & EXT4_SYNC_FL) |
4546 | new_fl |= S_SYNC; |
4547 | if (flags & EXT4_APPEND_FL) |
4548 | new_fl |= S_APPEND; |
4549 | if (flags & EXT4_IMMUTABLE_FL) |
4550 | new_fl |= S_IMMUTABLE; |
4551 | if (flags & EXT4_NOATIME_FL) |
4552 | new_fl |= S_NOATIME; |
4553 | if (flags & EXT4_DIRSYNC_FL) |
4554 | new_fl |= S_DIRSYNC; |
4555 | |
4556 | /* Because of the way inode_set_flags() works we must preserve S_DAX |
4557 | * here if already set. */ |
4558 | new_fl |= (inode->i_flags & S_DAX); |
4559 | if (init && ext4_should_enable_dax(inode)) |
4560 | new_fl |= S_DAX; |
4561 | |
4562 | if (flags & EXT4_ENCRYPT_FL) |
4563 | new_fl |= S_ENCRYPTED; |
4564 | if (flags & EXT4_CASEFOLD_FL) |
4565 | new_fl |= S_CASEFOLD; |
4566 | if (flags & EXT4_VERITY_FL) |
4567 | new_fl |= S_VERITY; |
4568 | inode_set_flags(inode, flags: new_fl, |
4569 | S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX| |
4570 | S_ENCRYPTED|S_CASEFOLD|S_VERITY); |
4571 | } |
4572 | |
4573 | static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, |
4574 | struct ext4_inode_info *ei) |
4575 | { |
4576 | blkcnt_t i_blocks ; |
4577 | struct inode *inode = &(ei->vfs_inode); |
4578 | struct super_block *sb = inode->i_sb; |
4579 | |
4580 | if (ext4_has_feature_huge_file(sb)) { |
4581 | /* we are using combined 48 bit field */ |
4582 | i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | |
4583 | le32_to_cpu(raw_inode->i_blocks_lo); |
4584 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_HUGE_FILE)) { |
4585 | /* i_blocks represent file system block size */ |
4586 | return i_blocks << (inode->i_blkbits - 9); |
4587 | } else { |
4588 | return i_blocks; |
4589 | } |
4590 | } else { |
4591 | return le32_to_cpu(raw_inode->i_blocks_lo); |
4592 | } |
4593 | } |
4594 | |
4595 | static inline int (struct inode *inode, |
4596 | struct ext4_inode *raw_inode, |
4597 | struct ext4_inode_info *ei) |
4598 | { |
4599 | __le32 *magic = (void *)raw_inode + |
4600 | EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; |
4601 | |
4602 | if (EXT4_INODE_HAS_XATTR_SPACE(inode) && |
4603 | *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { |
4604 | int err; |
4605 | |
4606 | ext4_set_inode_state(inode, bit: EXT4_STATE_XATTR); |
4607 | err = ext4_find_inline_data_nolock(inode); |
4608 | if (!err && ext4_has_inline_data(inode)) |
4609 | ext4_set_inode_state(inode, bit: EXT4_STATE_MAY_INLINE_DATA); |
4610 | return err; |
4611 | } else |
4612 | EXT4_I(inode)->i_inline_off = 0; |
4613 | return 0; |
4614 | } |
4615 | |
4616 | int ext4_get_projid(struct inode *inode, kprojid_t *projid) |
4617 | { |
4618 | if (!ext4_has_feature_project(sb: inode->i_sb)) |
4619 | return -EOPNOTSUPP; |
4620 | *projid = EXT4_I(inode)->i_projid; |
4621 | return 0; |
4622 | } |
4623 | |
4624 | /* |
4625 | * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of |
4626 | * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag |
4627 | * set. |
4628 | */ |
4629 | static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) |
4630 | { |
4631 | if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) |
4632 | inode_set_iversion_raw(inode, val); |
4633 | else |
4634 | inode_set_iversion_queried(inode, val); |
4635 | } |
4636 | |
4637 | static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags) |
4638 | |
4639 | { |
4640 | if (flags & EXT4_IGET_EA_INODE) { |
4641 | if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) |
4642 | return "missing EA_INODE flag" ; |
4643 | if (ext4_test_inode_state(inode, bit: EXT4_STATE_XATTR) || |
4644 | EXT4_I(inode)->i_file_acl) |
4645 | return "ea_inode with extended attributes" ; |
4646 | } else { |
4647 | if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) |
4648 | return "unexpected EA_INODE flag" ; |
4649 | } |
4650 | if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) |
4651 | return "unexpected bad inode w/o EXT4_IGET_BAD" ; |
4652 | return NULL; |
4653 | } |
4654 | |
4655 | struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, |
4656 | ext4_iget_flags flags, const char *function, |
4657 | unsigned int line) |
4658 | { |
4659 | struct ext4_iloc iloc; |
4660 | struct ext4_inode *raw_inode; |
4661 | struct ext4_inode_info *ei; |
4662 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
4663 | struct inode *inode; |
4664 | const char *err_str; |
4665 | journal_t *journal = EXT4_SB(sb)->s_journal; |
4666 | long ret; |
4667 | loff_t size; |
4668 | int block; |
4669 | uid_t i_uid; |
4670 | gid_t i_gid; |
4671 | projid_t i_projid; |
4672 | |
4673 | if ((!(flags & EXT4_IGET_SPECIAL) && |
4674 | ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) || |
4675 | ino == le32_to_cpu(es->s_usr_quota_inum) || |
4676 | ino == le32_to_cpu(es->s_grp_quota_inum) || |
4677 | ino == le32_to_cpu(es->s_prj_quota_inum) || |
4678 | ino == le32_to_cpu(es->s_orphan_file_inum))) || |
4679 | (ino < EXT4_ROOT_INO) || |
4680 | (ino > le32_to_cpu(es->s_inodes_count))) { |
4681 | if (flags & EXT4_IGET_HANDLE) |
4682 | return ERR_PTR(error: -ESTALE); |
4683 | __ext4_error(sb, function, line, false, EFSCORRUPTED, 0, |
4684 | "inode #%lu: comm %s: iget: illegal inode #" , |
4685 | ino, current->comm); |
4686 | return ERR_PTR(error: -EFSCORRUPTED); |
4687 | } |
4688 | |
4689 | inode = iget_locked(sb, ino); |
4690 | if (!inode) |
4691 | return ERR_PTR(error: -ENOMEM); |
4692 | if (!(inode->i_state & I_NEW)) { |
4693 | if ((err_str = check_igot_inode(inode, flags)) != NULL) { |
4694 | ext4_error_inode(inode, function, line, 0, err_str); |
4695 | iput(inode); |
4696 | return ERR_PTR(error: -EFSCORRUPTED); |
4697 | } |
4698 | return inode; |
4699 | } |
4700 | |
4701 | ei = EXT4_I(inode); |
4702 | iloc.bh = NULL; |
4703 | |
4704 | ret = __ext4_get_inode_loc_noinmem(inode, iloc: &iloc); |
4705 | if (ret < 0) |
4706 | goto bad_inode; |
4707 | raw_inode = ext4_raw_inode(iloc: &iloc); |
4708 | |
4709 | if ((flags & EXT4_IGET_HANDLE) && |
4710 | (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) { |
4711 | ret = -ESTALE; |
4712 | goto bad_inode; |
4713 | } |
4714 | |
4715 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { |
4716 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); |
4717 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > |
4718 | EXT4_INODE_SIZE(inode->i_sb) || |
4719 | (ei->i_extra_isize & 3)) { |
4720 | ext4_error_inode(inode, function, line, 0, |
4721 | "iget: bad extra_isize %u " |
4722 | "(inode size %u)" , |
4723 | ei->i_extra_isize, |
4724 | EXT4_INODE_SIZE(inode->i_sb)); |
4725 | ret = -EFSCORRUPTED; |
4726 | goto bad_inode; |
4727 | } |
4728 | } else |
4729 | ei->i_extra_isize = 0; |
4730 | |
4731 | /* Precompute checksum seed for inode metadata */ |
4732 | if (ext4_has_metadata_csum(sb)) { |
4733 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
4734 | __u32 csum; |
4735 | __le32 inum = cpu_to_le32(inode->i_ino); |
4736 | __le32 gen = raw_inode->i_generation; |
4737 | csum = ext4_chksum(sbi, crc: sbi->s_csum_seed, address: (__u8 *)&inum, |
4738 | length: sizeof(inum)); |
4739 | ei->i_csum_seed = ext4_chksum(sbi, crc: csum, address: (__u8 *)&gen, |
4740 | length: sizeof(gen)); |
4741 | } |
4742 | |
4743 | if ((!ext4_inode_csum_verify(inode, raw: raw_inode, ei) || |
4744 | ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) && |
4745 | (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) { |
4746 | ext4_error_inode_err(inode, function, line, 0, |
4747 | EFSBADCRC, "iget: checksum invalid" ); |
4748 | ret = -EFSBADCRC; |
4749 | goto bad_inode; |
4750 | } |
4751 | |
4752 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); |
4753 | i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); |
4754 | i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); |
4755 | if (ext4_has_feature_project(sb) && |
4756 | EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && |
4757 | EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) |
4758 | i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); |
4759 | else |
4760 | i_projid = EXT4_DEF_PROJID; |
4761 | |
4762 | if (!(test_opt(inode->i_sb, NO_UID32))) { |
4763 | i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; |
4764 | i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; |
4765 | } |
4766 | i_uid_write(inode, uid: i_uid); |
4767 | i_gid_write(inode, gid: i_gid); |
4768 | ei->i_projid = make_kprojid(from: &init_user_ns, projid: i_projid); |
4769 | set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); |
4770 | |
4771 | ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ |
4772 | ei->i_inline_off = 0; |
4773 | ei->i_dir_start_lookup = 0; |
4774 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); |
4775 | /* We now have enough fields to check if the inode was active or not. |
4776 | * This is needed because nfsd might try to access dead inodes |
4777 | * the test is that same one that e2fsck uses |
4778 | * NeilBrown 1999oct15 |
4779 | */ |
4780 | if (inode->i_nlink == 0) { |
4781 | if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL || |
4782 | !(EXT4_SB(sb: inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && |
4783 | ino != EXT4_BOOT_LOADER_INO) { |
4784 | /* this inode is deleted or unallocated */ |
4785 | if (flags & EXT4_IGET_SPECIAL) { |
4786 | ext4_error_inode(inode, function, line, 0, |
4787 | "iget: special inode unallocated" ); |
4788 | ret = -EFSCORRUPTED; |
4789 | } else |
4790 | ret = -ESTALE; |
4791 | goto bad_inode; |
4792 | } |
4793 | /* The only unlinked inodes we let through here have |
4794 | * valid i_mode and are being read by the orphan |
4795 | * recovery code: that's fine, we're about to complete |
4796 | * the process of deleting those. |
4797 | * OR it is the EXT4_BOOT_LOADER_INO which is |
4798 | * not initialized on a new filesystem. */ |
4799 | } |
4800 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); |
4801 | ext4_set_inode_flags(inode, init: true); |
4802 | inode->i_blocks = ext4_inode_blocks(raw_inode, ei); |
4803 | ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); |
4804 | if (ext4_has_feature_64bit(sb)) |
4805 | ei->i_file_acl |= |
4806 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; |
4807 | inode->i_size = ext4_isize(sb, raw_inode); |
4808 | if ((size = i_size_read(inode)) < 0) { |
4809 | ext4_error_inode(inode, function, line, 0, |
4810 | "iget: bad i_size value: %lld" , size); |
4811 | ret = -EFSCORRUPTED; |
4812 | goto bad_inode; |
4813 | } |
4814 | /* |
4815 | * If dir_index is not enabled but there's dir with INDEX flag set, |
4816 | * we'd normally treat htree data as empty space. But with metadata |
4817 | * checksumming that corrupts checksums so forbid that. |
4818 | */ |
4819 | if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) && |
4820 | ext4_test_inode_flag(inode, bit: EXT4_INODE_INDEX)) { |
4821 | ext4_error_inode(inode, function, line, 0, |
4822 | "iget: Dir with htree data on filesystem without dir_index feature." ); |
4823 | ret = -EFSCORRUPTED; |
4824 | goto bad_inode; |
4825 | } |
4826 | ei->i_disksize = inode->i_size; |
4827 | #ifdef CONFIG_QUOTA |
4828 | ei->i_reserved_quota = 0; |
4829 | #endif |
4830 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); |
4831 | ei->i_block_group = iloc.block_group; |
4832 | ei->i_last_alloc_group = ~0; |
4833 | /* |
4834 | * NOTE! The in-memory inode i_data array is in little-endian order |
4835 | * even on big-endian machines: we do NOT byteswap the block numbers! |
4836 | */ |
4837 | for (block = 0; block < EXT4_N_BLOCKS; block++) |
4838 | ei->i_data[block] = raw_inode->i_block[block]; |
4839 | INIT_LIST_HEAD(list: &ei->i_orphan); |
4840 | ext4_fc_init_inode(inode: &ei->vfs_inode); |
4841 | |
4842 | /* |
4843 | * Set transaction id's of transactions that have to be committed |
4844 | * to finish f[data]sync. We set them to currently running transaction |
4845 | * as we cannot be sure that the inode or some of its metadata isn't |
4846 | * part of the transaction - the inode could have been reclaimed and |
4847 | * now it is reread from disk. |
4848 | */ |
4849 | if (journal) { |
4850 | transaction_t *transaction; |
4851 | tid_t tid; |
4852 | |
4853 | read_lock(&journal->j_state_lock); |
4854 | if (journal->j_running_transaction) |
4855 | transaction = journal->j_running_transaction; |
4856 | else |
4857 | transaction = journal->j_committing_transaction; |
4858 | if (transaction) |
4859 | tid = transaction->t_tid; |
4860 | else |
4861 | tid = journal->j_commit_sequence; |
4862 | read_unlock(&journal->j_state_lock); |
4863 | ei->i_sync_tid = tid; |
4864 | ei->i_datasync_tid = tid; |
4865 | } |
4866 | |
4867 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { |
4868 | if (ei->i_extra_isize == 0) { |
4869 | /* The extra space is currently unused. Use it. */ |
4870 | BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); |
4871 | ei->i_extra_isize = sizeof(struct ext4_inode) - |
4872 | EXT4_GOOD_OLD_INODE_SIZE; |
4873 | } else { |
4874 | ret = ext4_iget_extra_inode(inode, raw_inode, ei); |
4875 | if (ret) |
4876 | goto bad_inode; |
4877 | } |
4878 | } |
4879 | |
4880 | EXT4_INODE_GET_CTIME(inode, raw_inode); |
4881 | EXT4_INODE_GET_ATIME(inode, raw_inode); |
4882 | EXT4_INODE_GET_MTIME(inode, raw_inode); |
4883 | EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); |
4884 | |
4885 | if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { |
4886 | u64 ivers = le32_to_cpu(raw_inode->i_disk_version); |
4887 | |
4888 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { |
4889 | if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) |
4890 | ivers |= |
4891 | (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; |
4892 | } |
4893 | ext4_inode_set_iversion_queried(inode, val: ivers); |
4894 | } |
4895 | |
4896 | ret = 0; |
4897 | if (ei->i_file_acl && |
4898 | !ext4_inode_block_valid(inode, start_blk: ei->i_file_acl, count: 1)) { |
4899 | ext4_error_inode(inode, function, line, 0, |
4900 | "iget: bad extended attribute block %llu" , |
4901 | ei->i_file_acl); |
4902 | ret = -EFSCORRUPTED; |
4903 | goto bad_inode; |
4904 | } else if (!ext4_has_inline_data(inode)) { |
4905 | /* validate the block references in the inode */ |
4906 | if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) && |
4907 | (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
4908 | (S_ISLNK(inode->i_mode) && |
4909 | !ext4_inode_is_fast_symlink(inode)))) { |
4910 | if (ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS)) |
4911 | ret = ext4_ext_check_inode(inode); |
4912 | else |
4913 | ret = ext4_ind_check_inode(inode); |
4914 | } |
4915 | } |
4916 | if (ret) |
4917 | goto bad_inode; |
4918 | |
4919 | if (S_ISREG(inode->i_mode)) { |
4920 | inode->i_op = &ext4_file_inode_operations; |
4921 | inode->i_fop = &ext4_file_operations; |
4922 | ext4_set_aops(inode); |
4923 | } else if (S_ISDIR(inode->i_mode)) { |
4924 | inode->i_op = &ext4_dir_inode_operations; |
4925 | inode->i_fop = &ext4_dir_operations; |
4926 | } else if (S_ISLNK(inode->i_mode)) { |
4927 | /* VFS does not allow setting these so must be corruption */ |
4928 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { |
4929 | ext4_error_inode(inode, function, line, 0, |
4930 | "iget: immutable or append flags " |
4931 | "not allowed on symlinks" ); |
4932 | ret = -EFSCORRUPTED; |
4933 | goto bad_inode; |
4934 | } |
4935 | if (IS_ENCRYPTED(inode)) { |
4936 | inode->i_op = &ext4_encrypted_symlink_inode_operations; |
4937 | } else if (ext4_inode_is_fast_symlink(inode)) { |
4938 | inode->i_link = (char *)ei->i_data; |
4939 | inode->i_op = &ext4_fast_symlink_inode_operations; |
4940 | nd_terminate_link(name: ei->i_data, len: inode->i_size, |
4941 | maxlen: sizeof(ei->i_data) - 1); |
4942 | } else { |
4943 | inode->i_op = &ext4_symlink_inode_operations; |
4944 | } |
4945 | } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || |
4946 | S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { |
4947 | inode->i_op = &ext4_special_inode_operations; |
4948 | if (raw_inode->i_block[0]) |
4949 | init_special_inode(inode, inode->i_mode, |
4950 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); |
4951 | else |
4952 | init_special_inode(inode, inode->i_mode, |
4953 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
4954 | } else if (ino == EXT4_BOOT_LOADER_INO) { |
4955 | make_bad_inode(inode); |
4956 | } else { |
4957 | ret = -EFSCORRUPTED; |
4958 | ext4_error_inode(inode, function, line, 0, |
4959 | "iget: bogus i_mode (%o)" , inode->i_mode); |
4960 | goto bad_inode; |
4961 | } |
4962 | if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(sb: inode->i_sb)) { |
4963 | ext4_error_inode(inode, function, line, 0, |
4964 | "casefold flag without casefold feature" ); |
4965 | ret = -EFSCORRUPTED; |
4966 | goto bad_inode; |
4967 | } |
4968 | if ((err_str = check_igot_inode(inode, flags)) != NULL) { |
4969 | ext4_error_inode(inode, function, line, 0, err_str); |
4970 | ret = -EFSCORRUPTED; |
4971 | goto bad_inode; |
4972 | } |
4973 | |
4974 | brelse(bh: iloc.bh); |
4975 | unlock_new_inode(inode); |
4976 | return inode; |
4977 | |
4978 | bad_inode: |
4979 | brelse(bh: iloc.bh); |
4980 | iget_failed(inode); |
4981 | return ERR_PTR(error: ret); |
4982 | } |
4983 | |
4984 | static void __ext4_update_other_inode_time(struct super_block *sb, |
4985 | unsigned long orig_ino, |
4986 | unsigned long ino, |
4987 | struct ext4_inode *raw_inode) |
4988 | { |
4989 | struct inode *inode; |
4990 | |
4991 | inode = find_inode_by_ino_rcu(sb, ino); |
4992 | if (!inode) |
4993 | return; |
4994 | |
4995 | if (!inode_is_dirtytime_only(inode)) |
4996 | return; |
4997 | |
4998 | spin_lock(lock: &inode->i_lock); |
4999 | if (inode_is_dirtytime_only(inode)) { |
5000 | struct ext4_inode_info *ei = EXT4_I(inode); |
5001 | |
5002 | inode->i_state &= ~I_DIRTY_TIME; |
5003 | spin_unlock(lock: &inode->i_lock); |
5004 | |
5005 | spin_lock(lock: &ei->i_raw_lock); |
5006 | EXT4_INODE_SET_CTIME(inode, raw_inode); |
5007 | EXT4_INODE_SET_MTIME(inode, raw_inode); |
5008 | EXT4_INODE_SET_ATIME(inode, raw_inode); |
5009 | ext4_inode_csum_set(inode, raw: raw_inode, ei); |
5010 | spin_unlock(lock: &ei->i_raw_lock); |
5011 | trace_ext4_other_inode_update_time(inode, orig_ino); |
5012 | return; |
5013 | } |
5014 | spin_unlock(lock: &inode->i_lock); |
5015 | } |
5016 | |
5017 | /* |
5018 | * Opportunistically update the other time fields for other inodes in |
5019 | * the same inode table block. |
5020 | */ |
5021 | static void ext4_update_other_inodes_time(struct super_block *sb, |
5022 | unsigned long orig_ino, char *buf) |
5023 | { |
5024 | unsigned long ino; |
5025 | int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; |
5026 | int inode_size = EXT4_INODE_SIZE(sb); |
5027 | |
5028 | /* |
5029 | * Calculate the first inode in the inode table block. Inode |
5030 | * numbers are one-based. That is, the first inode in a block |
5031 | * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). |
5032 | */ |
5033 | ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; |
5034 | rcu_read_lock(); |
5035 | for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { |
5036 | if (ino == orig_ino) |
5037 | continue; |
5038 | __ext4_update_other_inode_time(sb, orig_ino, ino, |
5039 | raw_inode: (struct ext4_inode *)buf); |
5040 | } |
5041 | rcu_read_unlock(); |
5042 | } |
5043 | |
5044 | /* |
5045 | * Post the struct inode info into an on-disk inode location in the |
5046 | * buffer-cache. This gobbles the caller's reference to the |
5047 | * buffer_head in the inode location struct. |
5048 | * |
5049 | * The caller must have write access to iloc->bh. |
5050 | */ |
5051 | static int ext4_do_update_inode(handle_t *handle, |
5052 | struct inode *inode, |
5053 | struct ext4_iloc *iloc) |
5054 | { |
5055 | struct ext4_inode *raw_inode = ext4_raw_inode(iloc); |
5056 | struct ext4_inode_info *ei = EXT4_I(inode); |
5057 | struct buffer_head *bh = iloc->bh; |
5058 | struct super_block *sb = inode->i_sb; |
5059 | int err; |
5060 | int need_datasync = 0, set_large_file = 0; |
5061 | |
5062 | spin_lock(lock: &ei->i_raw_lock); |
5063 | |
5064 | /* |
5065 | * For fields not tracked in the in-memory inode, initialise them |
5066 | * to zero for new inodes. |
5067 | */ |
5068 | if (ext4_test_inode_state(inode, bit: EXT4_STATE_NEW)) |
5069 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
5070 | |
5071 | if (READ_ONCE(ei->i_disksize) != ext4_isize(sb: inode->i_sb, raw_inode)) |
5072 | need_datasync = 1; |
5073 | if (ei->i_disksize > 0x7fffffffULL) { |
5074 | if (!ext4_has_feature_large_file(sb) || |
5075 | EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV)) |
5076 | set_large_file = 1; |
5077 | } |
5078 | |
5079 | err = ext4_fill_raw_inode(inode, raw_inode); |
5080 | spin_unlock(lock: &ei->i_raw_lock); |
5081 | if (err) { |
5082 | EXT4_ERROR_INODE(inode, "corrupted inode contents" ); |
5083 | goto out_brelse; |
5084 | } |
5085 | |
5086 | if (inode->i_sb->s_flags & SB_LAZYTIME) |
5087 | ext4_update_other_inodes_time(sb: inode->i_sb, orig_ino: inode->i_ino, |
5088 | buf: bh->b_data); |
5089 | |
5090 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata" ); |
5091 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
5092 | if (err) |
5093 | goto out_error; |
5094 | ext4_clear_inode_state(inode, bit: EXT4_STATE_NEW); |
5095 | if (set_large_file) { |
5096 | BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access" ); |
5097 | err = ext4_journal_get_write_access(handle, sb, |
5098 | EXT4_SB(sb)->s_sbh, |
5099 | EXT4_JTR_NONE); |
5100 | if (err) |
5101 | goto out_error; |
5102 | lock_buffer(bh: EXT4_SB(sb)->s_sbh); |
5103 | ext4_set_feature_large_file(sb); |
5104 | ext4_superblock_csum_set(sb); |
5105 | unlock_buffer(bh: EXT4_SB(sb)->s_sbh); |
5106 | ext4_handle_sync(handle); |
5107 | err = ext4_handle_dirty_metadata(handle, NULL, |
5108 | EXT4_SB(sb)->s_sbh); |
5109 | } |
5110 | ext4_update_inode_fsync_trans(handle, inode, datasync: need_datasync); |
5111 | out_error: |
5112 | ext4_std_error(inode->i_sb, err); |
5113 | out_brelse: |
5114 | brelse(bh); |
5115 | return err; |
5116 | } |
5117 | |
5118 | /* |
5119 | * ext4_write_inode() |
5120 | * |
5121 | * We are called from a few places: |
5122 | * |
5123 | * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. |
5124 | * Here, there will be no transaction running. We wait for any running |
5125 | * transaction to commit. |
5126 | * |
5127 | * - Within flush work (sys_sync(), kupdate and such). |
5128 | * We wait on commit, if told to. |
5129 | * |
5130 | * - Within iput_final() -> write_inode_now() |
5131 | * We wait on commit, if told to. |
5132 | * |
5133 | * In all cases it is actually safe for us to return without doing anything, |
5134 | * because the inode has been copied into a raw inode buffer in |
5135 | * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL |
5136 | * writeback. |
5137 | * |
5138 | * Note that we are absolutely dependent upon all inode dirtiers doing the |
5139 | * right thing: they *must* call mark_inode_dirty() after dirtying info in |
5140 | * which we are interested. |
5141 | * |
5142 | * It would be a bug for them to not do this. The code: |
5143 | * |
5144 | * mark_inode_dirty(inode) |
5145 | * stuff(); |
5146 | * inode->i_size = expr; |
5147 | * |
5148 | * is in error because write_inode() could occur while `stuff()' is running, |
5149 | * and the new i_size will be lost. Plus the inode will no longer be on the |
5150 | * superblock's dirty inode list. |
5151 | */ |
5152 | int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) |
5153 | { |
5154 | int err; |
5155 | |
5156 | if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) |
5157 | return 0; |
5158 | |
5159 | if (unlikely(ext4_forced_shutdown(inode->i_sb))) |
5160 | return -EIO; |
5161 | |
5162 | if (EXT4_SB(sb: inode->i_sb)->s_journal) { |
5163 | if (ext4_journal_current_handle()) { |
5164 | ext4_debug("called recursively, non-PF_MEMALLOC!\n" ); |
5165 | dump_stack(); |
5166 | return -EIO; |
5167 | } |
5168 | |
5169 | /* |
5170 | * No need to force transaction in WB_SYNC_NONE mode. Also |
5171 | * ext4_sync_fs() will force the commit after everything is |
5172 | * written. |
5173 | */ |
5174 | if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) |
5175 | return 0; |
5176 | |
5177 | err = ext4_fc_commit(journal: EXT4_SB(sb: inode->i_sb)->s_journal, |
5178 | EXT4_I(inode)->i_sync_tid); |
5179 | } else { |
5180 | struct ext4_iloc iloc; |
5181 | |
5182 | err = __ext4_get_inode_loc_noinmem(inode, iloc: &iloc); |
5183 | if (err) |
5184 | return err; |
5185 | /* |
5186 | * sync(2) will flush the whole buffer cache. No need to do |
5187 | * it here separately for each inode. |
5188 | */ |
5189 | if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) |
5190 | sync_dirty_buffer(bh: iloc.bh); |
5191 | if (buffer_req(bh: iloc.bh) && !buffer_uptodate(bh: iloc.bh)) { |
5192 | ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO, |
5193 | "IO error syncing inode" ); |
5194 | err = -EIO; |
5195 | } |
5196 | brelse(bh: iloc.bh); |
5197 | } |
5198 | return err; |
5199 | } |
5200 | |
5201 | /* |
5202 | * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate |
5203 | * buffers that are attached to a folio straddling i_size and are undergoing |
5204 | * commit. In that case we have to wait for commit to finish and try again. |
5205 | */ |
5206 | static void ext4_wait_for_tail_page_commit(struct inode *inode) |
5207 | { |
5208 | unsigned offset; |
5209 | journal_t *journal = EXT4_SB(sb: inode->i_sb)->s_journal; |
5210 | tid_t commit_tid = 0; |
5211 | int ret; |
5212 | |
5213 | offset = inode->i_size & (PAGE_SIZE - 1); |
5214 | /* |
5215 | * If the folio is fully truncated, we don't need to wait for any commit |
5216 | * (and we even should not as __ext4_journalled_invalidate_folio() may |
5217 | * strip all buffers from the folio but keep the folio dirty which can then |
5218 | * confuse e.g. concurrent ext4_writepages() seeing dirty folio without |
5219 | * buffers). Also we don't need to wait for any commit if all buffers in |
5220 | * the folio remain valid. This is most beneficial for the common case of |
5221 | * blocksize == PAGESIZE. |
5222 | */ |
5223 | if (!offset || offset > (PAGE_SIZE - i_blocksize(node: inode))) |
5224 | return; |
5225 | while (1) { |
5226 | struct folio *folio = filemap_lock_folio(mapping: inode->i_mapping, |
5227 | index: inode->i_size >> PAGE_SHIFT); |
5228 | if (IS_ERR(ptr: folio)) |
5229 | return; |
5230 | ret = __ext4_journalled_invalidate_folio(folio, offset, |
5231 | length: folio_size(folio) - offset); |
5232 | folio_unlock(folio); |
5233 | folio_put(folio); |
5234 | if (ret != -EBUSY) |
5235 | return; |
5236 | commit_tid = 0; |
5237 | read_lock(&journal->j_state_lock); |
5238 | if (journal->j_committing_transaction) |
5239 | commit_tid = journal->j_committing_transaction->t_tid; |
5240 | read_unlock(&journal->j_state_lock); |
5241 | if (commit_tid) |
5242 | jbd2_log_wait_commit(journal, tid: commit_tid); |
5243 | } |
5244 | } |
5245 | |
5246 | /* |
5247 | * ext4_setattr() |
5248 | * |
5249 | * Called from notify_change. |
5250 | * |
5251 | * We want to trap VFS attempts to truncate the file as soon as |
5252 | * possible. In particular, we want to make sure that when the VFS |
5253 | * shrinks i_size, we put the inode on the orphan list and modify |
5254 | * i_disksize immediately, so that during the subsequent flushing of |
5255 | * dirty pages and freeing of disk blocks, we can guarantee that any |
5256 | * commit will leave the blocks being flushed in an unused state on |
5257 | * disk. (On recovery, the inode will get truncated and the blocks will |
5258 | * be freed, so we have a strong guarantee that no future commit will |
5259 | * leave these blocks visible to the user.) |
5260 | * |
5261 | * Another thing we have to assure is that if we are in ordered mode |
5262 | * and inode is still attached to the committing transaction, we must |
5263 | * we start writeout of all the dirty pages which are being truncated. |
5264 | * This way we are sure that all the data written in the previous |
5265 | * transaction are already on disk (truncate waits for pages under |
5266 | * writeback). |
5267 | * |
5268 | * Called with inode->i_rwsem down. |
5269 | */ |
5270 | int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry, |
5271 | struct iattr *attr) |
5272 | { |
5273 | struct inode *inode = d_inode(dentry); |
5274 | int error, rc = 0; |
5275 | int orphan = 0; |
5276 | const unsigned int ia_valid = attr->ia_valid; |
5277 | bool inc_ivers = true; |
5278 | |
5279 | if (unlikely(ext4_forced_shutdown(inode->i_sb))) |
5280 | return -EIO; |
5281 | |
5282 | if (unlikely(IS_IMMUTABLE(inode))) |
5283 | return -EPERM; |
5284 | |
5285 | if (unlikely(IS_APPEND(inode) && |
5286 | (ia_valid & (ATTR_MODE | ATTR_UID | |
5287 | ATTR_GID | ATTR_TIMES_SET)))) |
5288 | return -EPERM; |
5289 | |
5290 | error = setattr_prepare(idmap, dentry, attr); |
5291 | if (error) |
5292 | return error; |
5293 | |
5294 | error = fscrypt_prepare_setattr(dentry, attr); |
5295 | if (error) |
5296 | return error; |
5297 | |
5298 | error = fsverity_prepare_setattr(dentry, attr); |
5299 | if (error) |
5300 | return error; |
5301 | |
5302 | if (is_quota_modification(idmap, inode, ia: attr)) { |
5303 | error = dquot_initialize(inode); |
5304 | if (error) |
5305 | return error; |
5306 | } |
5307 | |
5308 | if (i_uid_needs_update(idmap, attr, inode) || |
5309 | i_gid_needs_update(idmap, attr, inode)) { |
5310 | handle_t *handle; |
5311 | |
5312 | /* (user+group)*(old+new) structure, inode write (sb, |
5313 | * inode block, ? - but truncate inode update has it) */ |
5314 | handle = ext4_journal_start(inode, EXT4_HT_QUOTA, |
5315 | (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + |
5316 | EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); |
5317 | if (IS_ERR(ptr: handle)) { |
5318 | error = PTR_ERR(ptr: handle); |
5319 | goto err_out; |
5320 | } |
5321 | |
5322 | /* dquot_transfer() calls back ext4_get_inode_usage() which |
5323 | * counts xattr inode references. |
5324 | */ |
5325 | down_read(sem: &EXT4_I(inode)->xattr_sem); |
5326 | error = dquot_transfer(idmap, inode, iattr: attr); |
5327 | up_read(sem: &EXT4_I(inode)->xattr_sem); |
5328 | |
5329 | if (error) { |
5330 | ext4_journal_stop(handle); |
5331 | return error; |
5332 | } |
5333 | /* Update corresponding info in inode so that everything is in |
5334 | * one transaction */ |
5335 | i_uid_update(idmap, attr, inode); |
5336 | i_gid_update(idmap, attr, inode); |
5337 | error = ext4_mark_inode_dirty(handle, inode); |
5338 | ext4_journal_stop(handle); |
5339 | if (unlikely(error)) { |
5340 | return error; |
5341 | } |
5342 | } |
5343 | |
5344 | if (attr->ia_valid & ATTR_SIZE) { |
5345 | handle_t *handle; |
5346 | loff_t oldsize = inode->i_size; |
5347 | loff_t old_disksize; |
5348 | int shrink = (attr->ia_size < inode->i_size); |
5349 | |
5350 | if (!(ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS))) { |
5351 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
5352 | |
5353 | if (attr->ia_size > sbi->s_bitmap_maxbytes) { |
5354 | return -EFBIG; |
5355 | } |
5356 | } |
5357 | if (!S_ISREG(inode->i_mode)) { |
5358 | return -EINVAL; |
5359 | } |
5360 | |
5361 | if (attr->ia_size == inode->i_size) |
5362 | inc_ivers = false; |
5363 | |
5364 | if (shrink) { |
5365 | if (ext4_should_order_data(inode)) { |
5366 | error = ext4_begin_ordered_truncate(inode, |
5367 | new_size: attr->ia_size); |
5368 | if (error) |
5369 | goto err_out; |
5370 | } |
5371 | /* |
5372 | * Blocks are going to be removed from the inode. Wait |
5373 | * for dio in flight. |
5374 | */ |
5375 | inode_dio_wait(inode); |
5376 | } |
5377 | |
5378 | filemap_invalidate_lock(mapping: inode->i_mapping); |
5379 | |
5380 | rc = ext4_break_layouts(inode); |
5381 | if (rc) { |
5382 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
5383 | goto err_out; |
5384 | } |
5385 | |
5386 | if (attr->ia_size != inode->i_size) { |
5387 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); |
5388 | if (IS_ERR(ptr: handle)) { |
5389 | error = PTR_ERR(ptr: handle); |
5390 | goto out_mmap_sem; |
5391 | } |
5392 | if (ext4_handle_valid(handle) && shrink) { |
5393 | error = ext4_orphan_add(handle, inode); |
5394 | orphan = 1; |
5395 | } |
5396 | /* |
5397 | * Update c/mtime on truncate up, ext4_truncate() will |
5398 | * update c/mtime in shrink case below |
5399 | */ |
5400 | if (!shrink) |
5401 | inode_set_mtime_to_ts(inode, |
5402 | ts: inode_set_ctime_current(inode)); |
5403 | |
5404 | if (shrink) |
5405 | ext4_fc_track_range(handle, inode, |
5406 | start: (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> |
5407 | inode->i_sb->s_blocksize_bits, |
5408 | EXT_MAX_BLOCKS - 1); |
5409 | else |
5410 | ext4_fc_track_range( |
5411 | handle, inode, |
5412 | start: (oldsize > 0 ? oldsize - 1 : oldsize) >> |
5413 | inode->i_sb->s_blocksize_bits, |
5414 | end: (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> |
5415 | inode->i_sb->s_blocksize_bits); |
5416 | |
5417 | down_write(sem: &EXT4_I(inode)->i_data_sem); |
5418 | old_disksize = EXT4_I(inode)->i_disksize; |
5419 | EXT4_I(inode)->i_disksize = attr->ia_size; |
5420 | rc = ext4_mark_inode_dirty(handle, inode); |
5421 | if (!error) |
5422 | error = rc; |
5423 | /* |
5424 | * We have to update i_size under i_data_sem together |
5425 | * with i_disksize to avoid races with writeback code |
5426 | * running ext4_wb_update_i_disksize(). |
5427 | */ |
5428 | if (!error) |
5429 | i_size_write(inode, i_size: attr->ia_size); |
5430 | else |
5431 | EXT4_I(inode)->i_disksize = old_disksize; |
5432 | up_write(sem: &EXT4_I(inode)->i_data_sem); |
5433 | ext4_journal_stop(handle); |
5434 | if (error) |
5435 | goto out_mmap_sem; |
5436 | if (!shrink) { |
5437 | pagecache_isize_extended(inode, from: oldsize, |
5438 | to: inode->i_size); |
5439 | } else if (ext4_should_journal_data(inode)) { |
5440 | ext4_wait_for_tail_page_commit(inode); |
5441 | } |
5442 | } |
5443 | |
5444 | /* |
5445 | * Truncate pagecache after we've waited for commit |
5446 | * in data=journal mode to make pages freeable. |
5447 | */ |
5448 | truncate_pagecache(inode, new: inode->i_size); |
5449 | /* |
5450 | * Call ext4_truncate() even if i_size didn't change to |
5451 | * truncate possible preallocated blocks. |
5452 | */ |
5453 | if (attr->ia_size <= oldsize) { |
5454 | rc = ext4_truncate(inode); |
5455 | if (rc) |
5456 | error = rc; |
5457 | } |
5458 | out_mmap_sem: |
5459 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
5460 | } |
5461 | |
5462 | if (!error) { |
5463 | if (inc_ivers) |
5464 | inode_inc_iversion(inode); |
5465 | setattr_copy(idmap, inode, attr); |
5466 | mark_inode_dirty(inode); |
5467 | } |
5468 | |
5469 | /* |
5470 | * If the call to ext4_truncate failed to get a transaction handle at |
5471 | * all, we need to clean up the in-core orphan list manually. |
5472 | */ |
5473 | if (orphan && inode->i_nlink) |
5474 | ext4_orphan_del(NULL, inode); |
5475 | |
5476 | if (!error && (ia_valid & ATTR_MODE)) |
5477 | rc = posix_acl_chmod(idmap, dentry, inode->i_mode); |
5478 | |
5479 | err_out: |
5480 | if (error) |
5481 | ext4_std_error(inode->i_sb, error); |
5482 | if (!error) |
5483 | error = rc; |
5484 | return error; |
5485 | } |
5486 | |
5487 | u32 ext4_dio_alignment(struct inode *inode) |
5488 | { |
5489 | if (fsverity_active(inode)) |
5490 | return 0; |
5491 | if (ext4_should_journal_data(inode)) |
5492 | return 0; |
5493 | if (ext4_has_inline_data(inode)) |
5494 | return 0; |
5495 | if (IS_ENCRYPTED(inode)) { |
5496 | if (!fscrypt_dio_supported(inode)) |
5497 | return 0; |
5498 | return i_blocksize(node: inode); |
5499 | } |
5500 | return 1; /* use the iomap defaults */ |
5501 | } |
5502 | |
5503 | int ext4_getattr(struct mnt_idmap *idmap, const struct path *path, |
5504 | struct kstat *stat, u32 request_mask, unsigned int query_flags) |
5505 | { |
5506 | struct inode *inode = d_inode(dentry: path->dentry); |
5507 | struct ext4_inode *raw_inode; |
5508 | struct ext4_inode_info *ei = EXT4_I(inode); |
5509 | unsigned int flags; |
5510 | |
5511 | if ((request_mask & STATX_BTIME) && |
5512 | EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { |
5513 | stat->result_mask |= STATX_BTIME; |
5514 | stat->btime.tv_sec = ei->i_crtime.tv_sec; |
5515 | stat->btime.tv_nsec = ei->i_crtime.tv_nsec; |
5516 | } |
5517 | |
5518 | /* |
5519 | * Return the DIO alignment restrictions if requested. We only return |
5520 | * this information when requested, since on encrypted files it might |
5521 | * take a fair bit of work to get if the file wasn't opened recently. |
5522 | */ |
5523 | if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { |
5524 | u32 dio_align = ext4_dio_alignment(inode); |
5525 | |
5526 | stat->result_mask |= STATX_DIOALIGN; |
5527 | if (dio_align == 1) { |
5528 | struct block_device *bdev = inode->i_sb->s_bdev; |
5529 | |
5530 | /* iomap defaults */ |
5531 | stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; |
5532 | stat->dio_offset_align = bdev_logical_block_size(bdev); |
5533 | } else { |
5534 | stat->dio_mem_align = dio_align; |
5535 | stat->dio_offset_align = dio_align; |
5536 | } |
5537 | } |
5538 | |
5539 | flags = ei->i_flags & EXT4_FL_USER_VISIBLE; |
5540 | if (flags & EXT4_APPEND_FL) |
5541 | stat->attributes |= STATX_ATTR_APPEND; |
5542 | if (flags & EXT4_COMPR_FL) |
5543 | stat->attributes |= STATX_ATTR_COMPRESSED; |
5544 | if (flags & EXT4_ENCRYPT_FL) |
5545 | stat->attributes |= STATX_ATTR_ENCRYPTED; |
5546 | if (flags & EXT4_IMMUTABLE_FL) |
5547 | stat->attributes |= STATX_ATTR_IMMUTABLE; |
5548 | if (flags & EXT4_NODUMP_FL) |
5549 | stat->attributes |= STATX_ATTR_NODUMP; |
5550 | if (flags & EXT4_VERITY_FL) |
5551 | stat->attributes |= STATX_ATTR_VERITY; |
5552 | |
5553 | stat->attributes_mask |= (STATX_ATTR_APPEND | |
5554 | STATX_ATTR_COMPRESSED | |
5555 | STATX_ATTR_ENCRYPTED | |
5556 | STATX_ATTR_IMMUTABLE | |
5557 | STATX_ATTR_NODUMP | |
5558 | STATX_ATTR_VERITY); |
5559 | |
5560 | generic_fillattr(idmap, request_mask, inode, stat); |
5561 | return 0; |
5562 | } |
5563 | |
5564 | int ext4_file_getattr(struct mnt_idmap *idmap, |
5565 | const struct path *path, struct kstat *stat, |
5566 | u32 request_mask, unsigned int query_flags) |
5567 | { |
5568 | struct inode *inode = d_inode(dentry: path->dentry); |
5569 | u64 delalloc_blocks; |
5570 | |
5571 | ext4_getattr(idmap, path, stat, request_mask, query_flags); |
5572 | |
5573 | /* |
5574 | * If there is inline data in the inode, the inode will normally not |
5575 | * have data blocks allocated (it may have an external xattr block). |
5576 | * Report at least one sector for such files, so tools like tar, rsync, |
5577 | * others don't incorrectly think the file is completely sparse. |
5578 | */ |
5579 | if (unlikely(ext4_has_inline_data(inode))) |
5580 | stat->blocks += (stat->size + 511) >> 9; |
5581 | |
5582 | /* |
5583 | * We can't update i_blocks if the block allocation is delayed |
5584 | * otherwise in the case of system crash before the real block |
5585 | * allocation is done, we will have i_blocks inconsistent with |
5586 | * on-disk file blocks. |
5587 | * We always keep i_blocks updated together with real |
5588 | * allocation. But to not confuse with user, stat |
5589 | * will return the blocks that include the delayed allocation |
5590 | * blocks for this file. |
5591 | */ |
5592 | delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), |
5593 | EXT4_I(inode)->i_reserved_data_blocks); |
5594 | stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); |
5595 | return 0; |
5596 | } |
5597 | |
5598 | static int ext4_index_trans_blocks(struct inode *inode, int lblocks, |
5599 | int pextents) |
5600 | { |
5601 | if (!(ext4_test_inode_flag(inode, bit: EXT4_INODE_EXTENTS))) |
5602 | return ext4_ind_trans_blocks(inode, nrblocks: lblocks); |
5603 | return ext4_ext_index_trans_blocks(inode, extents: pextents); |
5604 | } |
5605 | |
5606 | /* |
5607 | * Account for index blocks, block groups bitmaps and block group |
5608 | * descriptor blocks if modify datablocks and index blocks |
5609 | * worse case, the indexs blocks spread over different block groups |
5610 | * |
5611 | * If datablocks are discontiguous, they are possible to spread over |
5612 | * different block groups too. If they are contiguous, with flexbg, |
5613 | * they could still across block group boundary. |
5614 | * |
5615 | * Also account for superblock, inode, quota and xattr blocks |
5616 | */ |
5617 | static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, |
5618 | int pextents) |
5619 | { |
5620 | ext4_group_t groups, ngroups = ext4_get_groups_count(sb: inode->i_sb); |
5621 | int gdpblocks; |
5622 | int idxblocks; |
5623 | int ret; |
5624 | |
5625 | /* |
5626 | * How many index blocks need to touch to map @lblocks logical blocks |
5627 | * to @pextents physical extents? |
5628 | */ |
5629 | idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); |
5630 | |
5631 | ret = idxblocks; |
5632 | |
5633 | /* |
5634 | * Now let's see how many group bitmaps and group descriptors need |
5635 | * to account |
5636 | */ |
5637 | groups = idxblocks + pextents; |
5638 | gdpblocks = groups; |
5639 | if (groups > ngroups) |
5640 | groups = ngroups; |
5641 | if (groups > EXT4_SB(sb: inode->i_sb)->s_gdb_count) |
5642 | gdpblocks = EXT4_SB(sb: inode->i_sb)->s_gdb_count; |
5643 | |
5644 | /* bitmaps and block group descriptor blocks */ |
5645 | ret += groups + gdpblocks; |
5646 | |
5647 | /* Blocks for super block, inode, quota and xattr blocks */ |
5648 | ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); |
5649 | |
5650 | return ret; |
5651 | } |
5652 | |
5653 | /* |
5654 | * Calculate the total number of credits to reserve to fit |
5655 | * the modification of a single pages into a single transaction, |
5656 | * which may include multiple chunks of block allocations. |
5657 | * |
5658 | * This could be called via ext4_write_begin() |
5659 | * |
5660 | * We need to consider the worse case, when |
5661 | * one new block per extent. |
5662 | */ |
5663 | int ext4_writepage_trans_blocks(struct inode *inode) |
5664 | { |
5665 | int bpp = ext4_journal_blocks_per_page(inode); |
5666 | int ret; |
5667 | |
5668 | ret = ext4_meta_trans_blocks(inode, lblocks: bpp, pextents: bpp); |
5669 | |
5670 | /* Account for data blocks for journalled mode */ |
5671 | if (ext4_should_journal_data(inode)) |
5672 | ret += bpp; |
5673 | return ret; |
5674 | } |
5675 | |
5676 | /* |
5677 | * Calculate the journal credits for a chunk of data modification. |
5678 | * |
5679 | * This is called from DIO, fallocate or whoever calling |
5680 | * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. |
5681 | * |
5682 | * journal buffers for data blocks are not included here, as DIO |
5683 | * and fallocate do no need to journal data buffers. |
5684 | */ |
5685 | int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) |
5686 | { |
5687 | return ext4_meta_trans_blocks(inode, lblocks: nrblocks, pextents: 1); |
5688 | } |
5689 | |
5690 | /* |
5691 | * The caller must have previously called ext4_reserve_inode_write(). |
5692 | * Give this, we know that the caller already has write access to iloc->bh. |
5693 | */ |
5694 | int ext4_mark_iloc_dirty(handle_t *handle, |
5695 | struct inode *inode, struct ext4_iloc *iloc) |
5696 | { |
5697 | int err = 0; |
5698 | |
5699 | if (unlikely(ext4_forced_shutdown(inode->i_sb))) { |
5700 | put_bh(bh: iloc->bh); |
5701 | return -EIO; |
5702 | } |
5703 | ext4_fc_track_inode(handle, inode); |
5704 | |
5705 | /* the do_update_inode consumes one bh->b_count */ |
5706 | get_bh(bh: iloc->bh); |
5707 | |
5708 | /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ |
5709 | err = ext4_do_update_inode(handle, inode, iloc); |
5710 | put_bh(bh: iloc->bh); |
5711 | return err; |
5712 | } |
5713 | |
5714 | /* |
5715 | * On success, We end up with an outstanding reference count against |
5716 | * iloc->bh. This _must_ be cleaned up later. |
5717 | */ |
5718 | |
5719 | int |
5720 | ext4_reserve_inode_write(handle_t *handle, struct inode *inode, |
5721 | struct ext4_iloc *iloc) |
5722 | { |
5723 | int err; |
5724 | |
5725 | if (unlikely(ext4_forced_shutdown(inode->i_sb))) |
5726 | return -EIO; |
5727 | |
5728 | err = ext4_get_inode_loc(inode, iloc); |
5729 | if (!err) { |
5730 | BUFFER_TRACE(iloc->bh, "get_write_access" ); |
5731 | err = ext4_journal_get_write_access(handle, inode->i_sb, |
5732 | iloc->bh, EXT4_JTR_NONE); |
5733 | if (err) { |
5734 | brelse(bh: iloc->bh); |
5735 | iloc->bh = NULL; |
5736 | } |
5737 | } |
5738 | ext4_std_error(inode->i_sb, err); |
5739 | return err; |
5740 | } |
5741 | |
5742 | static int __ext4_expand_extra_isize(struct inode *inode, |
5743 | unsigned int , |
5744 | struct ext4_iloc *iloc, |
5745 | handle_t *handle, int *no_expand) |
5746 | { |
5747 | struct ext4_inode *raw_inode; |
5748 | struct ext4_xattr_ibody_header *; |
5749 | unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb); |
5750 | struct ext4_inode_info *ei = EXT4_I(inode); |
5751 | int error; |
5752 | |
5753 | /* this was checked at iget time, but double check for good measure */ |
5754 | if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) || |
5755 | (ei->i_extra_isize & 3)) { |
5756 | EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)" , |
5757 | ei->i_extra_isize, |
5758 | EXT4_INODE_SIZE(inode->i_sb)); |
5759 | return -EFSCORRUPTED; |
5760 | } |
5761 | if ((new_extra_isize < ei->i_extra_isize) || |
5762 | (new_extra_isize < 4) || |
5763 | (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE)) |
5764 | return -EINVAL; /* Should never happen */ |
5765 | |
5766 | raw_inode = ext4_raw_inode(iloc); |
5767 | |
5768 | header = IHDR(inode, raw_inode); |
5769 | |
5770 | /* No extended attributes present */ |
5771 | if (!ext4_test_inode_state(inode, bit: EXT4_STATE_XATTR) || |
5772 | header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { |
5773 | memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + |
5774 | EXT4_I(inode)->i_extra_isize, 0, |
5775 | new_extra_isize - EXT4_I(inode)->i_extra_isize); |
5776 | EXT4_I(inode)->i_extra_isize = new_extra_isize; |
5777 | return 0; |
5778 | } |
5779 | |
5780 | /* |
5781 | * We may need to allocate external xattr block so we need quotas |
5782 | * initialized. Here we can be called with various locks held so we |
5783 | * cannot affort to initialize quotas ourselves. So just bail. |
5784 | */ |
5785 | if (dquot_initialize_needed(inode)) |
5786 | return -EAGAIN; |
5787 | |
5788 | /* try to expand with EAs present */ |
5789 | error = ext4_expand_extra_isize_ea(inode, new_extra_isize, |
5790 | raw_inode, handle); |
5791 | if (error) { |
5792 | /* |
5793 | * Inode size expansion failed; don't try again |
5794 | */ |
5795 | *no_expand = 1; |
5796 | } |
5797 | |
5798 | return error; |
5799 | } |
5800 | |
5801 | /* |
5802 | * Expand an inode by new_extra_isize bytes. |
5803 | * Returns 0 on success or negative error number on failure. |
5804 | */ |
5805 | static int ext4_try_to_expand_extra_isize(struct inode *inode, |
5806 | unsigned int , |
5807 | struct ext4_iloc iloc, |
5808 | handle_t *handle) |
5809 | { |
5810 | int no_expand; |
5811 | int error; |
5812 | |
5813 | if (ext4_test_inode_state(inode, bit: EXT4_STATE_NO_EXPAND)) |
5814 | return -EOVERFLOW; |
5815 | |
5816 | /* |
5817 | * In nojournal mode, we can immediately attempt to expand |
5818 | * the inode. When journaled, we first need to obtain extra |
5819 | * buffer credits since we may write into the EA block |
5820 | * with this same handle. If journal_extend fails, then it will |
5821 | * only result in a minor loss of functionality for that inode. |
5822 | * If this is felt to be critical, then e2fsck should be run to |
5823 | * force a large enough s_min_extra_isize. |
5824 | */ |
5825 | if (ext4_journal_extend(handle, |
5826 | EXT4_DATA_TRANS_BLOCKS(inode->i_sb), revoke: 0) != 0) |
5827 | return -ENOSPC; |
5828 | |
5829 | if (ext4_write_trylock_xattr(inode, save: &no_expand) == 0) |
5830 | return -EBUSY; |
5831 | |
5832 | error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc: &iloc, |
5833 | handle, no_expand: &no_expand); |
5834 | ext4_write_unlock_xattr(inode, save: &no_expand); |
5835 | |
5836 | return error; |
5837 | } |
5838 | |
5839 | int ext4_expand_extra_isize(struct inode *inode, |
5840 | unsigned int , |
5841 | struct ext4_iloc *iloc) |
5842 | { |
5843 | handle_t *handle; |
5844 | int no_expand; |
5845 | int error, rc; |
5846 | |
5847 | if (ext4_test_inode_state(inode, bit: EXT4_STATE_NO_EXPAND)) { |
5848 | brelse(bh: iloc->bh); |
5849 | return -EOVERFLOW; |
5850 | } |
5851 | |
5852 | handle = ext4_journal_start(inode, EXT4_HT_INODE, |
5853 | EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); |
5854 | if (IS_ERR(ptr: handle)) { |
5855 | error = PTR_ERR(ptr: handle); |
5856 | brelse(bh: iloc->bh); |
5857 | return error; |
5858 | } |
5859 | |
5860 | ext4_write_lock_xattr(inode, save: &no_expand); |
5861 | |
5862 | BUFFER_TRACE(iloc->bh, "get_write_access" ); |
5863 | error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh, |
5864 | EXT4_JTR_NONE); |
5865 | if (error) { |
5866 | brelse(bh: iloc->bh); |
5867 | goto out_unlock; |
5868 | } |
5869 | |
5870 | error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, |
5871 | handle, no_expand: &no_expand); |
5872 | |
5873 | rc = ext4_mark_iloc_dirty(handle, inode, iloc); |
5874 | if (!error) |
5875 | error = rc; |
5876 | |
5877 | out_unlock: |
5878 | ext4_write_unlock_xattr(inode, save: &no_expand); |
5879 | ext4_journal_stop(handle); |
5880 | return error; |
5881 | } |
5882 | |
5883 | /* |
5884 | * What we do here is to mark the in-core inode as clean with respect to inode |
5885 | * dirtiness (it may still be data-dirty). |
5886 | * This means that the in-core inode may be reaped by prune_icache |
5887 | * without having to perform any I/O. This is a very good thing, |
5888 | * because *any* task may call prune_icache - even ones which |
5889 | * have a transaction open against a different journal. |
5890 | * |
5891 | * Is this cheating? Not really. Sure, we haven't written the |
5892 | * inode out, but prune_icache isn't a user-visible syncing function. |
5893 | * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) |
5894 | * we start and wait on commits. |
5895 | */ |
5896 | int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, |
5897 | const char *func, unsigned int line) |
5898 | { |
5899 | struct ext4_iloc iloc; |
5900 | struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb); |
5901 | int err; |
5902 | |
5903 | might_sleep(); |
5904 | trace_ext4_mark_inode_dirty(inode, _RET_IP_); |
5905 | err = ext4_reserve_inode_write(handle, inode, iloc: &iloc); |
5906 | if (err) |
5907 | goto out; |
5908 | |
5909 | if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) |
5910 | ext4_try_to_expand_extra_isize(inode, new_extra_isize: sbi->s_want_extra_isize, |
5911 | iloc, handle); |
5912 | |
5913 | err = ext4_mark_iloc_dirty(handle, inode, iloc: &iloc); |
5914 | out: |
5915 | if (unlikely(err)) |
5916 | ext4_error_inode_err(inode, func, line, 0, err, |
5917 | "mark_inode_dirty error" ); |
5918 | return err; |
5919 | } |
5920 | |
5921 | /* |
5922 | * ext4_dirty_inode() is called from __mark_inode_dirty() |
5923 | * |
5924 | * We're really interested in the case where a file is being extended. |
5925 | * i_size has been changed by generic_commit_write() and we thus need |
5926 | * to include the updated inode in the current transaction. |
5927 | * |
5928 | * Also, dquot_alloc_block() will always dirty the inode when blocks |
5929 | * are allocated to the file. |
5930 | * |
5931 | * If the inode is marked synchronous, we don't honour that here - doing |
5932 | * so would cause a commit on atime updates, which we don't bother doing. |
5933 | * We handle synchronous inodes at the highest possible level. |
5934 | */ |
5935 | void ext4_dirty_inode(struct inode *inode, int flags) |
5936 | { |
5937 | handle_t *handle; |
5938 | |
5939 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); |
5940 | if (IS_ERR(ptr: handle)) |
5941 | return; |
5942 | ext4_mark_inode_dirty(handle, inode); |
5943 | ext4_journal_stop(handle); |
5944 | } |
5945 | |
5946 | int ext4_change_inode_journal_flag(struct inode *inode, int val) |
5947 | { |
5948 | journal_t *journal; |
5949 | handle_t *handle; |
5950 | int err; |
5951 | int alloc_ctx; |
5952 | |
5953 | /* |
5954 | * We have to be very careful here: changing a data block's |
5955 | * journaling status dynamically is dangerous. If we write a |
5956 | * data block to the journal, change the status and then delete |
5957 | * that block, we risk forgetting to revoke the old log record |
5958 | * from the journal and so a subsequent replay can corrupt data. |
5959 | * So, first we make sure that the journal is empty and that |
5960 | * nobody is changing anything. |
5961 | */ |
5962 | |
5963 | journal = EXT4_JOURNAL(inode); |
5964 | if (!journal) |
5965 | return 0; |
5966 | if (is_journal_aborted(journal)) |
5967 | return -EROFS; |
5968 | |
5969 | /* Wait for all existing dio workers */ |
5970 | inode_dio_wait(inode); |
5971 | |
5972 | /* |
5973 | * Before flushing the journal and switching inode's aops, we have |
5974 | * to flush all dirty data the inode has. There can be outstanding |
5975 | * delayed allocations, there can be unwritten extents created by |
5976 | * fallocate or buffered writes in dioread_nolock mode covered by |
5977 | * dirty data which can be converted only after flushing the dirty |
5978 | * data (and journalled aops don't know how to handle these cases). |
5979 | */ |
5980 | if (val) { |
5981 | filemap_invalidate_lock(mapping: inode->i_mapping); |
5982 | err = filemap_write_and_wait(mapping: inode->i_mapping); |
5983 | if (err < 0) { |
5984 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
5985 | return err; |
5986 | } |
5987 | } |
5988 | |
5989 | alloc_ctx = ext4_writepages_down_write(sb: inode->i_sb); |
5990 | jbd2_journal_lock_updates(journal); |
5991 | |
5992 | /* |
5993 | * OK, there are no updates running now, and all cached data is |
5994 | * synced to disk. We are now in a completely consistent state |
5995 | * which doesn't have anything in the journal, and we know that |
5996 | * no filesystem updates are running, so it is safe to modify |
5997 | * the inode's in-core data-journaling state flag now. |
5998 | */ |
5999 | |
6000 | if (val) |
6001 | ext4_set_inode_flag(inode, bit: EXT4_INODE_JOURNAL_DATA); |
6002 | else { |
6003 | err = jbd2_journal_flush(journal, flags: 0); |
6004 | if (err < 0) { |
6005 | jbd2_journal_unlock_updates(journal); |
6006 | ext4_writepages_up_write(sb: inode->i_sb, ctx: alloc_ctx); |
6007 | return err; |
6008 | } |
6009 | ext4_clear_inode_flag(inode, bit: EXT4_INODE_JOURNAL_DATA); |
6010 | } |
6011 | ext4_set_aops(inode); |
6012 | |
6013 | jbd2_journal_unlock_updates(journal); |
6014 | ext4_writepages_up_write(sb: inode->i_sb, ctx: alloc_ctx); |
6015 | |
6016 | if (val) |
6017 | filemap_invalidate_unlock(mapping: inode->i_mapping); |
6018 | |
6019 | /* Finally we can mark the inode as dirty. */ |
6020 | |
6021 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); |
6022 | if (IS_ERR(ptr: handle)) |
6023 | return PTR_ERR(ptr: handle); |
6024 | |
6025 | ext4_fc_mark_ineligible(sb: inode->i_sb, |
6026 | reason: EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle); |
6027 | err = ext4_mark_inode_dirty(handle, inode); |
6028 | ext4_handle_sync(handle); |
6029 | ext4_journal_stop(handle); |
6030 | ext4_std_error(inode->i_sb, err); |
6031 | |
6032 | return err; |
6033 | } |
6034 | |
6035 | static int ext4_bh_unmapped(handle_t *handle, struct inode *inode, |
6036 | struct buffer_head *bh) |
6037 | { |
6038 | return !buffer_mapped(bh); |
6039 | } |
6040 | |
6041 | vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) |
6042 | { |
6043 | struct vm_area_struct *vma = vmf->vma; |
6044 | struct folio *folio = page_folio(vmf->page); |
6045 | loff_t size; |
6046 | unsigned long len; |
6047 | int err; |
6048 | vm_fault_t ret; |
6049 | struct file *file = vma->vm_file; |
6050 | struct inode *inode = file_inode(f: file); |
6051 | struct address_space *mapping = inode->i_mapping; |
6052 | handle_t *handle; |
6053 | get_block_t *get_block; |
6054 | int retries = 0; |
6055 | |
6056 | if (unlikely(IS_IMMUTABLE(inode))) |
6057 | return VM_FAULT_SIGBUS; |
6058 | |
6059 | sb_start_pagefault(sb: inode->i_sb); |
6060 | file_update_time(file: vma->vm_file); |
6061 | |
6062 | filemap_invalidate_lock_shared(mapping); |
6063 | |
6064 | err = ext4_convert_inline_data(inode); |
6065 | if (err) |
6066 | goto out_ret; |
6067 | |
6068 | /* |
6069 | * On data journalling we skip straight to the transaction handle: |
6070 | * there's no delalloc; page truncated will be checked later; the |
6071 | * early return w/ all buffers mapped (calculates size/len) can't |
6072 | * be used; and there's no dioread_nolock, so only ext4_get_block. |
6073 | */ |
6074 | if (ext4_should_journal_data(inode)) |
6075 | goto retry_alloc; |
6076 | |
6077 | /* Delalloc case is easy... */ |
6078 | if (test_opt(inode->i_sb, DELALLOC) && |
6079 | !ext4_nonda_switch(sb: inode->i_sb)) { |
6080 | do { |
6081 | err = block_page_mkwrite(vma, vmf, |
6082 | get_block: ext4_da_get_block_prep); |
6083 | } while (err == -ENOSPC && |
6084 | ext4_should_retry_alloc(sb: inode->i_sb, retries: &retries)); |
6085 | goto out_ret; |
6086 | } |
6087 | |
6088 | folio_lock(folio); |
6089 | size = i_size_read(inode); |
6090 | /* Page got truncated from under us? */ |
6091 | if (folio->mapping != mapping || folio_pos(folio) > size) { |
6092 | folio_unlock(folio); |
6093 | ret = VM_FAULT_NOPAGE; |
6094 | goto out; |
6095 | } |
6096 | |
6097 | len = folio_size(folio); |
6098 | if (folio_pos(folio) + len > size) |
6099 | len = size - folio_pos(folio); |
6100 | /* |
6101 | * Return if we have all the buffers mapped. This avoids the need to do |
6102 | * journal_start/journal_stop which can block and take a long time |
6103 | * |
6104 | * This cannot be done for data journalling, as we have to add the |
6105 | * inode to the transaction's list to writeprotect pages on commit. |
6106 | */ |
6107 | if (folio_buffers(folio)) { |
6108 | if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio), |
6109 | from: 0, to: len, NULL, |
6110 | fn: ext4_bh_unmapped)) { |
6111 | /* Wait so that we don't change page under IO */ |
6112 | folio_wait_stable(folio); |
6113 | ret = VM_FAULT_LOCKED; |
6114 | goto out; |
6115 | } |
6116 | } |
6117 | folio_unlock(folio); |
6118 | /* OK, we need to fill the hole... */ |
6119 | if (ext4_should_dioread_nolock(inode)) |
6120 | get_block = ext4_get_block_unwritten; |
6121 | else |
6122 | get_block = ext4_get_block; |
6123 | retry_alloc: |
6124 | handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, |
6125 | ext4_writepage_trans_blocks(inode)); |
6126 | if (IS_ERR(ptr: handle)) { |
6127 | ret = VM_FAULT_SIGBUS; |
6128 | goto out; |
6129 | } |
6130 | /* |
6131 | * Data journalling can't use block_page_mkwrite() because it |
6132 | * will set_buffer_dirty() before do_journal_get_write_access() |
6133 | * thus might hit warning messages for dirty metadata buffers. |
6134 | */ |
6135 | if (!ext4_should_journal_data(inode)) { |
6136 | err = block_page_mkwrite(vma, vmf, get_block); |
6137 | } else { |
6138 | folio_lock(folio); |
6139 | size = i_size_read(inode); |
6140 | /* Page got truncated from under us? */ |
6141 | if (folio->mapping != mapping || folio_pos(folio) > size) { |
6142 | ret = VM_FAULT_NOPAGE; |
6143 | goto out_error; |
6144 | } |
6145 | |
6146 | len = folio_size(folio); |
6147 | if (folio_pos(folio) + len > size) |
6148 | len = size - folio_pos(folio); |
6149 | |
6150 | err = __block_write_begin(page: &folio->page, pos: 0, len, get_block: ext4_get_block); |
6151 | if (!err) { |
6152 | ret = VM_FAULT_SIGBUS; |
6153 | if (ext4_journal_folio_buffers(handle, folio, len)) |
6154 | goto out_error; |
6155 | } else { |
6156 | folio_unlock(folio); |
6157 | } |
6158 | } |
6159 | ext4_journal_stop(handle); |
6160 | if (err == -ENOSPC && ext4_should_retry_alloc(sb: inode->i_sb, retries: &retries)) |
6161 | goto retry_alloc; |
6162 | out_ret: |
6163 | ret = vmf_fs_error(err); |
6164 | out: |
6165 | filemap_invalidate_unlock_shared(mapping); |
6166 | sb_end_pagefault(sb: inode->i_sb); |
6167 | return ret; |
6168 | out_error: |
6169 | folio_unlock(folio); |
6170 | ext4_journal_stop(handle); |
6171 | goto out; |
6172 | } |
6173 | |