1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/ext4/resize.c |
4 | * |
5 | * Support for resizing an ext4 filesystem while it is mounted. |
6 | * |
7 | * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> |
8 | * |
9 | * This could probably be made into a module, because it is not often in use. |
10 | */ |
11 | |
12 | |
13 | #include <linux/errno.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/jiffies.h> |
16 | |
17 | #include "ext4_jbd2.h" |
18 | |
19 | struct ext4_rcu_ptr { |
20 | struct rcu_head rcu; |
21 | void *ptr; |
22 | }; |
23 | |
24 | static void ext4_rcu_ptr_callback(struct rcu_head *head) |
25 | { |
26 | struct ext4_rcu_ptr *ptr; |
27 | |
28 | ptr = container_of(head, struct ext4_rcu_ptr, rcu); |
29 | kvfree(addr: ptr->ptr); |
30 | kfree(objp: ptr); |
31 | } |
32 | |
33 | void ext4_kvfree_array_rcu(void *to_free) |
34 | { |
35 | struct ext4_rcu_ptr *ptr = kzalloc(size: sizeof(*ptr), GFP_KERNEL); |
36 | |
37 | if (ptr) { |
38 | ptr->ptr = to_free; |
39 | call_rcu(head: &ptr->rcu, func: ext4_rcu_ptr_callback); |
40 | return; |
41 | } |
42 | synchronize_rcu(); |
43 | kvfree(addr: to_free); |
44 | } |
45 | |
46 | int ext4_resize_begin(struct super_block *sb) |
47 | { |
48 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
49 | int ret = 0; |
50 | |
51 | if (!capable(CAP_SYS_RESOURCE)) |
52 | return -EPERM; |
53 | |
54 | /* |
55 | * If the reserved GDT blocks is non-zero, the resize_inode feature |
56 | * should always be set. |
57 | */ |
58 | if (sbi->s_es->s_reserved_gdt_blocks && |
59 | !ext4_has_feature_resize_inode(sb)) { |
60 | ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero" ); |
61 | return -EFSCORRUPTED; |
62 | } |
63 | |
64 | /* |
65 | * If we are not using the primary superblock/GDT copy don't resize, |
66 | * because the user tools have no way of handling this. Probably a |
67 | * bad time to do it anyways. |
68 | */ |
69 | if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != |
70 | le32_to_cpu(sbi->s_es->s_first_data_block)) { |
71 | ext4_warning(sb, "won't resize using backup superblock at %llu" , |
72 | (unsigned long long)sbi->s_sbh->b_blocknr); |
73 | return -EPERM; |
74 | } |
75 | |
76 | /* |
77 | * We are not allowed to do online-resizing on a filesystem mounted |
78 | * with error, because it can destroy the filesystem easily. |
79 | */ |
80 | if (sbi->s_mount_state & EXT4_ERROR_FS) { |
81 | ext4_warning(sb, "There are errors in the filesystem, " |
82 | "so online resizing is not allowed" ); |
83 | return -EPERM; |
84 | } |
85 | |
86 | if (ext4_has_feature_sparse_super2(sb)) { |
87 | ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2" ); |
88 | return -EOPNOTSUPP; |
89 | } |
90 | |
91 | if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, |
92 | addr: &sbi->s_ext4_flags)) |
93 | ret = -EBUSY; |
94 | |
95 | return ret; |
96 | } |
97 | |
98 | int ext4_resize_end(struct super_block *sb, bool update_backups) |
99 | { |
100 | clear_bit_unlock(EXT4_FLAGS_RESIZING, addr: &EXT4_SB(sb)->s_ext4_flags); |
101 | smp_mb__after_atomic(); |
102 | if (update_backups) |
103 | return ext4_update_overhead(sb, force: true); |
104 | return 0; |
105 | } |
106 | |
107 | static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, |
108 | ext4_group_t group) { |
109 | ext4_grpblk_t overhead; |
110 | overhead = ext4_bg_num_gdb(sb, group); |
111 | if (ext4_bg_has_super(sb, group)) |
112 | overhead += 1 + |
113 | le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); |
114 | return overhead; |
115 | } |
116 | |
117 | #define outside(b, first, last) ((b) < (first) || (b) >= (last)) |
118 | #define inside(b, first, last) ((b) >= (first) && (b) < (last)) |
119 | |
120 | static int verify_group_input(struct super_block *sb, |
121 | struct ext4_new_group_data *input) |
122 | { |
123 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
124 | struct ext4_super_block *es = sbi->s_es; |
125 | ext4_fsblk_t start = ext4_blocks_count(es); |
126 | ext4_fsblk_t end = start + input->blocks_count; |
127 | ext4_group_t group = input->group; |
128 | ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; |
129 | unsigned overhead; |
130 | ext4_fsblk_t metaend; |
131 | struct buffer_head *bh = NULL; |
132 | ext4_grpblk_t free_blocks_count, offset; |
133 | int err = -EINVAL; |
134 | |
135 | if (group != sbi->s_groups_count) { |
136 | ext4_warning(sb, "Cannot add at group %u (only %u groups)" , |
137 | input->group, sbi->s_groups_count); |
138 | return -EINVAL; |
139 | } |
140 | |
141 | overhead = ext4_group_overhead_blocks(sb, group); |
142 | metaend = start + overhead; |
143 | free_blocks_count = input->blocks_count - 2 - overhead - |
144 | sbi->s_itb_per_group; |
145 | input->free_clusters_count = EXT4_B2C(sbi, free_blocks_count); |
146 | |
147 | if (test_opt(sb, DEBUG)) |
148 | printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " |
149 | "(%d free, %u reserved)\n" , |
150 | ext4_bg_has_super(sb, input->group) ? "normal" : |
151 | "no-super" , input->group, input->blocks_count, |
152 | free_blocks_count, input->reserved_blocks); |
153 | |
154 | ext4_get_group_no_and_offset(sb, blocknr: start, NULL, offsetp: &offset); |
155 | if (offset != 0) |
156 | ext4_warning(sb, "Last group not full" ); |
157 | else if (input->reserved_blocks > input->blocks_count / 5) |
158 | ext4_warning(sb, "Reserved blocks too high (%u)" , |
159 | input->reserved_blocks); |
160 | else if (free_blocks_count < 0) |
161 | ext4_warning(sb, "Bad blocks count %u" , |
162 | input->blocks_count); |
163 | else if (IS_ERR(ptr: bh = ext4_sb_bread(sb, block: end - 1, op_flags: 0))) { |
164 | err = PTR_ERR(ptr: bh); |
165 | bh = NULL; |
166 | ext4_warning(sb, "Cannot read last block (%llu)" , |
167 | end - 1); |
168 | } else if (outside(input->block_bitmap, start, end)) |
169 | ext4_warning(sb, "Block bitmap not in group (block %llu)" , |
170 | (unsigned long long)input->block_bitmap); |
171 | else if (outside(input->inode_bitmap, start, end)) |
172 | ext4_warning(sb, "Inode bitmap not in group (block %llu)" , |
173 | (unsigned long long)input->inode_bitmap); |
174 | else if (outside(input->inode_table, start, end) || |
175 | outside(itend - 1, start, end)) |
176 | ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)" , |
177 | (unsigned long long)input->inode_table, itend - 1); |
178 | else if (input->inode_bitmap == input->block_bitmap) |
179 | ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)" , |
180 | (unsigned long long)input->block_bitmap); |
181 | else if (inside(input->block_bitmap, input->inode_table, itend)) |
182 | ext4_warning(sb, "Block bitmap (%llu) in inode table " |
183 | "(%llu-%llu)" , |
184 | (unsigned long long)input->block_bitmap, |
185 | (unsigned long long)input->inode_table, itend - 1); |
186 | else if (inside(input->inode_bitmap, input->inode_table, itend)) |
187 | ext4_warning(sb, "Inode bitmap (%llu) in inode table " |
188 | "(%llu-%llu)" , |
189 | (unsigned long long)input->inode_bitmap, |
190 | (unsigned long long)input->inode_table, itend - 1); |
191 | else if (inside(input->block_bitmap, start, metaend)) |
192 | ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)" , |
193 | (unsigned long long)input->block_bitmap, |
194 | start, metaend - 1); |
195 | else if (inside(input->inode_bitmap, start, metaend)) |
196 | ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)" , |
197 | (unsigned long long)input->inode_bitmap, |
198 | start, metaend - 1); |
199 | else if (inside(input->inode_table, start, metaend) || |
200 | inside(itend - 1, start, metaend)) |
201 | ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " |
202 | "(%llu-%llu)" , |
203 | (unsigned long long)input->inode_table, |
204 | itend - 1, start, metaend - 1); |
205 | else |
206 | err = 0; |
207 | brelse(bh); |
208 | |
209 | return err; |
210 | } |
211 | |
212 | /* |
213 | * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex |
214 | * group each time. |
215 | */ |
216 | struct ext4_new_flex_group_data { |
217 | struct ext4_new_group_data *groups; /* new_group_data for groups |
218 | in the flex group */ |
219 | __u16 *bg_flags; /* block group flags of groups |
220 | in @groups */ |
221 | ext4_group_t resize_bg; /* number of allocated |
222 | new_group_data */ |
223 | ext4_group_t count; /* number of groups in @groups |
224 | */ |
225 | }; |
226 | |
227 | /* |
228 | * Avoiding memory allocation failures due to too many groups added each time. |
229 | */ |
230 | #define MAX_RESIZE_BG 16384 |
231 | |
232 | /* |
233 | * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of |
234 | * @flexbg_size. |
235 | * |
236 | * Returns NULL on failure otherwise address of the allocated structure. |
237 | */ |
238 | static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size, |
239 | ext4_group_t o_group, ext4_group_t n_group) |
240 | { |
241 | ext4_group_t last_group; |
242 | struct ext4_new_flex_group_data *flex_gd; |
243 | |
244 | flex_gd = kmalloc(size: sizeof(*flex_gd), GFP_NOFS); |
245 | if (flex_gd == NULL) |
246 | goto out3; |
247 | |
248 | if (unlikely(flexbg_size > MAX_RESIZE_BG)) |
249 | flex_gd->resize_bg = MAX_RESIZE_BG; |
250 | else |
251 | flex_gd->resize_bg = flexbg_size; |
252 | |
253 | /* Avoid allocating large 'groups' array if not needed */ |
254 | last_group = o_group | (flex_gd->resize_bg - 1); |
255 | if (n_group <= last_group) |
256 | flex_gd->resize_bg = 1 << fls(x: n_group - o_group + 1); |
257 | else if (n_group - last_group < flex_gd->resize_bg) |
258 | flex_gd->resize_bg = 1 << max(fls(last_group - o_group + 1), |
259 | fls(n_group - last_group)); |
260 | |
261 | flex_gd->groups = kmalloc_array(n: flex_gd->resize_bg, |
262 | size: sizeof(struct ext4_new_group_data), |
263 | GFP_NOFS); |
264 | if (flex_gd->groups == NULL) |
265 | goto out2; |
266 | |
267 | flex_gd->bg_flags = kmalloc_array(n: flex_gd->resize_bg, size: sizeof(__u16), |
268 | GFP_NOFS); |
269 | if (flex_gd->bg_flags == NULL) |
270 | goto out1; |
271 | |
272 | return flex_gd; |
273 | |
274 | out1: |
275 | kfree(objp: flex_gd->groups); |
276 | out2: |
277 | kfree(objp: flex_gd); |
278 | out3: |
279 | return NULL; |
280 | } |
281 | |
282 | static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) |
283 | { |
284 | kfree(objp: flex_gd->bg_flags); |
285 | kfree(objp: flex_gd->groups); |
286 | kfree(objp: flex_gd); |
287 | } |
288 | |
289 | /* |
290 | * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps |
291 | * and inode tables for a flex group. |
292 | * |
293 | * This function is used by 64bit-resize. Note that this function allocates |
294 | * group tables from the 1st group of groups contained by @flexgd, which may |
295 | * be a partial of a flex group. |
296 | * |
297 | * @sb: super block of fs to which the groups belongs |
298 | * |
299 | * Returns 0 on a successful allocation of the metadata blocks in the |
300 | * block group. |
301 | */ |
302 | static int ext4_alloc_group_tables(struct super_block *sb, |
303 | struct ext4_new_flex_group_data *flex_gd, |
304 | unsigned int flexbg_size) |
305 | { |
306 | struct ext4_new_group_data *group_data = flex_gd->groups; |
307 | ext4_fsblk_t start_blk; |
308 | ext4_fsblk_t last_blk; |
309 | ext4_group_t src_group; |
310 | ext4_group_t bb_index = 0; |
311 | ext4_group_t ib_index = 0; |
312 | ext4_group_t it_index = 0; |
313 | ext4_group_t group; |
314 | ext4_group_t last_group; |
315 | unsigned overhead; |
316 | __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; |
317 | int i; |
318 | |
319 | BUG_ON(flex_gd->count == 0 || group_data == NULL); |
320 | |
321 | src_group = group_data[0].group; |
322 | last_group = src_group + flex_gd->count - 1; |
323 | |
324 | BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != |
325 | (last_group & ~(flexbg_size - 1)))); |
326 | next_group: |
327 | group = group_data[0].group; |
328 | if (src_group >= group_data[0].group + flex_gd->count) |
329 | return -ENOSPC; |
330 | start_blk = ext4_group_first_block_no(sb, group_no: src_group); |
331 | last_blk = start_blk + group_data[src_group - group].blocks_count; |
332 | |
333 | overhead = ext4_group_overhead_blocks(sb, group: src_group); |
334 | |
335 | start_blk += overhead; |
336 | |
337 | /* We collect contiguous blocks as much as possible. */ |
338 | src_group++; |
339 | for (; src_group <= last_group; src_group++) { |
340 | overhead = ext4_group_overhead_blocks(sb, group: src_group); |
341 | if (overhead == 0) |
342 | last_blk += group_data[src_group - group].blocks_count; |
343 | else |
344 | break; |
345 | } |
346 | |
347 | /* Allocate block bitmaps */ |
348 | for (; bb_index < flex_gd->count; bb_index++) { |
349 | if (start_blk >= last_blk) |
350 | goto next_group; |
351 | group_data[bb_index].block_bitmap = start_blk++; |
352 | group = ext4_get_group_number(sb, block: start_blk - 1); |
353 | group -= group_data[0].group; |
354 | group_data[group].mdata_blocks++; |
355 | flex_gd->bg_flags[group] &= uninit_mask; |
356 | } |
357 | |
358 | /* Allocate inode bitmaps */ |
359 | for (; ib_index < flex_gd->count; ib_index++) { |
360 | if (start_blk >= last_blk) |
361 | goto next_group; |
362 | group_data[ib_index].inode_bitmap = start_blk++; |
363 | group = ext4_get_group_number(sb, block: start_blk - 1); |
364 | group -= group_data[0].group; |
365 | group_data[group].mdata_blocks++; |
366 | flex_gd->bg_flags[group] &= uninit_mask; |
367 | } |
368 | |
369 | /* Allocate inode tables */ |
370 | for (; it_index < flex_gd->count; it_index++) { |
371 | unsigned int itb = EXT4_SB(sb)->s_itb_per_group; |
372 | ext4_fsblk_t next_group_start; |
373 | |
374 | if (start_blk + itb > last_blk) |
375 | goto next_group; |
376 | group_data[it_index].inode_table = start_blk; |
377 | group = ext4_get_group_number(sb, block: start_blk); |
378 | next_group_start = ext4_group_first_block_no(sb, group_no: group + 1); |
379 | group -= group_data[0].group; |
380 | |
381 | if (start_blk + itb > next_group_start) { |
382 | flex_gd->bg_flags[group + 1] &= uninit_mask; |
383 | overhead = start_blk + itb - next_group_start; |
384 | group_data[group + 1].mdata_blocks += overhead; |
385 | itb -= overhead; |
386 | } |
387 | |
388 | group_data[group].mdata_blocks += itb; |
389 | flex_gd->bg_flags[group] &= uninit_mask; |
390 | start_blk += EXT4_SB(sb)->s_itb_per_group; |
391 | } |
392 | |
393 | /* Update free clusters count to exclude metadata blocks */ |
394 | for (i = 0; i < flex_gd->count; i++) { |
395 | group_data[i].free_clusters_count -= |
396 | EXT4_NUM_B2C(EXT4_SB(sb), |
397 | group_data[i].mdata_blocks); |
398 | } |
399 | |
400 | if (test_opt(sb, DEBUG)) { |
401 | int i; |
402 | group = group_data[0].group; |
403 | |
404 | printk(KERN_DEBUG "EXT4-fs: adding a flex group with " |
405 | "%u groups, flexbg size is %u:\n" , flex_gd->count, |
406 | flexbg_size); |
407 | |
408 | for (i = 0; i < flex_gd->count; i++) { |
409 | ext4_debug( |
410 | "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n" , |
411 | ext4_bg_has_super(sb, group + i) ? "normal" : |
412 | "no-super" , group + i, |
413 | group_data[i].blocks_count, |
414 | group_data[i].free_clusters_count, |
415 | group_data[i].mdata_blocks); |
416 | } |
417 | } |
418 | return 0; |
419 | } |
420 | |
421 | static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, |
422 | ext4_fsblk_t blk) |
423 | { |
424 | struct buffer_head *bh; |
425 | int err; |
426 | |
427 | bh = sb_getblk(sb, block: blk); |
428 | if (unlikely(!bh)) |
429 | return ERR_PTR(error: -ENOMEM); |
430 | BUFFER_TRACE(bh, "get_write_access" ); |
431 | err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); |
432 | if (err) { |
433 | brelse(bh); |
434 | bh = ERR_PTR(error: err); |
435 | } else { |
436 | memset(bh->b_data, 0, sb->s_blocksize); |
437 | set_buffer_uptodate(bh); |
438 | } |
439 | |
440 | return bh; |
441 | } |
442 | |
443 | static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits) |
444 | { |
445 | return ext4_journal_ensure_credits_fn(handle, credits, |
446 | EXT4_MAX_TRANS_DATA, 0, 0); |
447 | } |
448 | |
449 | /* |
450 | * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. |
451 | * |
452 | * Helper function for ext4_setup_new_group_blocks() which set . |
453 | * |
454 | * @sb: super block |
455 | * @handle: journal handle |
456 | * @flex_gd: flex group data |
457 | */ |
458 | static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, |
459 | struct ext4_new_flex_group_data *flex_gd, |
460 | ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) |
461 | { |
462 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
463 | ext4_group_t count = last_cluster - first_cluster + 1; |
464 | ext4_group_t count2; |
465 | |
466 | ext4_debug("mark clusters [%llu-%llu] used\n" , first_cluster, |
467 | last_cluster); |
468 | for (; count > 0; count -= count2, first_cluster += count2) { |
469 | ext4_fsblk_t start; |
470 | struct buffer_head *bh; |
471 | ext4_group_t group; |
472 | int err; |
473 | |
474 | group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); |
475 | start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); |
476 | group -= flex_gd->groups[0].group; |
477 | |
478 | count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); |
479 | if (count2 > count) |
480 | count2 = count; |
481 | |
482 | if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { |
483 | BUG_ON(flex_gd->count > 1); |
484 | continue; |
485 | } |
486 | |
487 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
488 | if (err < 0) |
489 | return err; |
490 | |
491 | bh = sb_getblk(sb, block: flex_gd->groups[group].block_bitmap); |
492 | if (unlikely(!bh)) |
493 | return -ENOMEM; |
494 | |
495 | BUFFER_TRACE(bh, "get_write_access" ); |
496 | err = ext4_journal_get_write_access(handle, sb, bh, |
497 | EXT4_JTR_NONE); |
498 | if (err) { |
499 | brelse(bh); |
500 | return err; |
501 | } |
502 | ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n" , |
503 | first_cluster, first_cluster - start, count2); |
504 | mb_set_bits(bm: bh->b_data, cur: first_cluster - start, len: count2); |
505 | |
506 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
507 | brelse(bh); |
508 | if (unlikely(err)) |
509 | return err; |
510 | } |
511 | |
512 | return 0; |
513 | } |
514 | |
515 | /* |
516 | * Set up the block and inode bitmaps, and the inode table for the new groups. |
517 | * This doesn't need to be part of the main transaction, since we are only |
518 | * changing blocks outside the actual filesystem. We still do journaling to |
519 | * ensure the recovery is correct in case of a failure just after resize. |
520 | * If any part of this fails, we simply abort the resize. |
521 | * |
522 | * setup_new_flex_group_blocks handles a flex group as follow: |
523 | * 1. copy super block and GDT, and initialize group tables if necessary. |
524 | * In this step, we only set bits in blocks bitmaps for blocks taken by |
525 | * super block and GDT. |
526 | * 2. allocate group tables in block bitmaps, that is, set bits in block |
527 | * bitmap for blocks taken by group tables. |
528 | */ |
529 | static int setup_new_flex_group_blocks(struct super_block *sb, |
530 | struct ext4_new_flex_group_data *flex_gd) |
531 | { |
532 | int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; |
533 | ext4_fsblk_t start; |
534 | ext4_fsblk_t block; |
535 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
536 | struct ext4_super_block *es = sbi->s_es; |
537 | struct ext4_new_group_data *group_data = flex_gd->groups; |
538 | __u16 *bg_flags = flex_gd->bg_flags; |
539 | handle_t *handle; |
540 | ext4_group_t group, count; |
541 | struct buffer_head *bh = NULL; |
542 | int reserved_gdb, i, j, err = 0, err2; |
543 | int meta_bg; |
544 | |
545 | BUG_ON(!flex_gd->count || !group_data || |
546 | group_data[0].group != sbi->s_groups_count); |
547 | |
548 | reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); |
549 | meta_bg = ext4_has_feature_meta_bg(sb); |
550 | |
551 | /* This transaction may be extended/restarted along the way */ |
552 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); |
553 | if (IS_ERR(ptr: handle)) |
554 | return PTR_ERR(ptr: handle); |
555 | |
556 | group = group_data[0].group; |
557 | for (i = 0; i < flex_gd->count; i++, group++) { |
558 | unsigned long gdblocks; |
559 | ext4_grpblk_t overhead; |
560 | |
561 | gdblocks = ext4_bg_num_gdb(sb, group); |
562 | start = ext4_group_first_block_no(sb, group_no: group); |
563 | |
564 | if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) |
565 | goto handle_itb; |
566 | |
567 | if (meta_bg == 1) |
568 | goto handle_itb; |
569 | |
570 | block = start + ext4_bg_has_super(sb, group); |
571 | /* Copy all of the GDT blocks into the backup in this group */ |
572 | for (j = 0; j < gdblocks; j++, block++) { |
573 | struct buffer_head *gdb; |
574 | |
575 | ext4_debug("update backup group %#04llx\n" , block); |
576 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
577 | if (err < 0) |
578 | goto out; |
579 | |
580 | gdb = sb_getblk(sb, block); |
581 | if (unlikely(!gdb)) { |
582 | err = -ENOMEM; |
583 | goto out; |
584 | } |
585 | |
586 | BUFFER_TRACE(gdb, "get_write_access" ); |
587 | err = ext4_journal_get_write_access(handle, sb, gdb, |
588 | EXT4_JTR_NONE); |
589 | if (err) { |
590 | brelse(bh: gdb); |
591 | goto out; |
592 | } |
593 | memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, |
594 | s_group_desc, j)->b_data, gdb->b_size); |
595 | set_buffer_uptodate(gdb); |
596 | |
597 | err = ext4_handle_dirty_metadata(handle, NULL, gdb); |
598 | if (unlikely(err)) { |
599 | brelse(bh: gdb); |
600 | goto out; |
601 | } |
602 | brelse(bh: gdb); |
603 | } |
604 | |
605 | /* Zero out all of the reserved backup group descriptor |
606 | * table blocks |
607 | */ |
608 | if (ext4_bg_has_super(sb, group)) { |
609 | err = sb_issue_zeroout(sb, block: gdblocks + start + 1, |
610 | nr_blocks: reserved_gdb, GFP_NOFS); |
611 | if (err) |
612 | goto out; |
613 | } |
614 | |
615 | handle_itb: |
616 | /* Initialize group tables of the group @group */ |
617 | if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) |
618 | goto handle_bb; |
619 | |
620 | /* Zero out all of the inode table blocks */ |
621 | block = group_data[i].inode_table; |
622 | ext4_debug("clear inode table blocks %#04llx -> %#04lx\n" , |
623 | block, sbi->s_itb_per_group); |
624 | err = sb_issue_zeroout(sb, block, nr_blocks: sbi->s_itb_per_group, |
625 | GFP_NOFS); |
626 | if (err) |
627 | goto out; |
628 | |
629 | handle_bb: |
630 | if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) |
631 | goto handle_ib; |
632 | |
633 | /* Initialize block bitmap of the @group */ |
634 | block = group_data[i].block_bitmap; |
635 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
636 | if (err < 0) |
637 | goto out; |
638 | |
639 | bh = bclean(handle, sb, blk: block); |
640 | if (IS_ERR(ptr: bh)) { |
641 | err = PTR_ERR(ptr: bh); |
642 | goto out; |
643 | } |
644 | overhead = ext4_group_overhead_blocks(sb, group); |
645 | if (overhead != 0) { |
646 | ext4_debug("mark backup superblock %#04llx (+0)\n" , |
647 | start); |
648 | mb_set_bits(bm: bh->b_data, cur: 0, |
649 | EXT4_NUM_B2C(sbi, overhead)); |
650 | } |
651 | ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), |
652 | end_bit: sb->s_blocksize * 8, bitmap: bh->b_data); |
653 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
654 | brelse(bh); |
655 | if (err) |
656 | goto out; |
657 | |
658 | handle_ib: |
659 | if (bg_flags[i] & EXT4_BG_INODE_UNINIT) |
660 | continue; |
661 | |
662 | /* Initialize inode bitmap of the @group */ |
663 | block = group_data[i].inode_bitmap; |
664 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
665 | if (err < 0) |
666 | goto out; |
667 | /* Mark unused entries in inode bitmap used */ |
668 | bh = bclean(handle, sb, blk: block); |
669 | if (IS_ERR(ptr: bh)) { |
670 | err = PTR_ERR(ptr: bh); |
671 | goto out; |
672 | } |
673 | |
674 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), |
675 | end_bit: sb->s_blocksize * 8, bitmap: bh->b_data); |
676 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
677 | brelse(bh); |
678 | if (err) |
679 | goto out; |
680 | } |
681 | |
682 | /* Mark group tables in block bitmap */ |
683 | for (j = 0; j < GROUP_TABLE_COUNT; j++) { |
684 | count = group_table_count[j]; |
685 | start = (&group_data[0].block_bitmap)[j]; |
686 | block = start; |
687 | for (i = 1; i < flex_gd->count; i++) { |
688 | block += group_table_count[j]; |
689 | if (block == (&group_data[i].block_bitmap)[j]) { |
690 | count += group_table_count[j]; |
691 | continue; |
692 | } |
693 | err = set_flexbg_block_bitmap(sb, handle, |
694 | flex_gd, |
695 | EXT4_B2C(sbi, start), |
696 | EXT4_B2C(sbi, |
697 | start + count |
698 | - 1)); |
699 | if (err) |
700 | goto out; |
701 | count = group_table_count[j]; |
702 | start = (&group_data[i].block_bitmap)[j]; |
703 | block = start; |
704 | } |
705 | |
706 | err = set_flexbg_block_bitmap(sb, handle, |
707 | flex_gd, |
708 | EXT4_B2C(sbi, start), |
709 | EXT4_B2C(sbi, |
710 | start + count |
711 | - 1)); |
712 | if (err) |
713 | goto out; |
714 | } |
715 | |
716 | out: |
717 | err2 = ext4_journal_stop(handle); |
718 | if (err2 && !err) |
719 | err = err2; |
720 | |
721 | return err; |
722 | } |
723 | |
724 | /* |
725 | * Iterate through the groups which hold BACKUP superblock/GDT copies in an |
726 | * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before |
727 | * calling this for the first time. In a sparse filesystem it will be the |
728 | * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... |
729 | * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... |
730 | */ |
731 | unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three, |
732 | unsigned int *five, unsigned int *seven) |
733 | { |
734 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
735 | unsigned int *min = three; |
736 | int mult = 3; |
737 | unsigned int ret; |
738 | |
739 | if (ext4_has_feature_sparse_super2(sb)) { |
740 | do { |
741 | if (*min > 2) |
742 | return UINT_MAX; |
743 | ret = le32_to_cpu(es->s_backup_bgs[*min - 1]); |
744 | *min += 1; |
745 | } while (!ret); |
746 | return ret; |
747 | } |
748 | |
749 | if (!ext4_has_feature_sparse_super(sb)) { |
750 | ret = *min; |
751 | *min += 1; |
752 | return ret; |
753 | } |
754 | |
755 | if (*five < *min) { |
756 | min = five; |
757 | mult = 5; |
758 | } |
759 | if (*seven < *min) { |
760 | min = seven; |
761 | mult = 7; |
762 | } |
763 | |
764 | ret = *min; |
765 | *min *= mult; |
766 | |
767 | return ret; |
768 | } |
769 | |
770 | /* |
771 | * Check that all of the backup GDT blocks are held in the primary GDT block. |
772 | * It is assumed that they are stored in group order. Returns the number of |
773 | * groups in current filesystem that have BACKUPS, or -ve error code. |
774 | */ |
775 | static int verify_reserved_gdb(struct super_block *sb, |
776 | ext4_group_t end, |
777 | struct buffer_head *primary) |
778 | { |
779 | const ext4_fsblk_t blk = primary->b_blocknr; |
780 | unsigned three = 1; |
781 | unsigned five = 5; |
782 | unsigned seven = 7; |
783 | unsigned grp; |
784 | __le32 *p = (__le32 *)primary->b_data; |
785 | int gdbackups = 0; |
786 | |
787 | while ((grp = ext4_list_backups(sb, three: &three, five: &five, seven: &seven)) < end) { |
788 | if (le32_to_cpu(*p++) != |
789 | grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ |
790 | ext4_warning(sb, "reserved GDT %llu" |
791 | " missing grp %d (%llu)" , |
792 | blk, grp, |
793 | grp * |
794 | (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + |
795 | blk); |
796 | return -EINVAL; |
797 | } |
798 | if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) |
799 | return -EFBIG; |
800 | } |
801 | |
802 | return gdbackups; |
803 | } |
804 | |
805 | /* |
806 | * Called when we need to bring a reserved group descriptor table block into |
807 | * use from the resize inode. The primary copy of the new GDT block currently |
808 | * is an indirect block (under the double indirect block in the resize inode). |
809 | * The new backup GDT blocks will be stored as leaf blocks in this indirect |
810 | * block, in group order. Even though we know all the block numbers we need, |
811 | * we check to ensure that the resize inode has actually reserved these blocks. |
812 | * |
813 | * Don't need to update the block bitmaps because the blocks are still in use. |
814 | * |
815 | * We get all of the error cases out of the way, so that we are sure to not |
816 | * fail once we start modifying the data on disk, because JBD has no rollback. |
817 | */ |
818 | static int add_new_gdb(handle_t *handle, struct inode *inode, |
819 | ext4_group_t group) |
820 | { |
821 | struct super_block *sb = inode->i_sb; |
822 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
823 | unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
824 | ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; |
825 | struct buffer_head **o_group_desc, **n_group_desc = NULL; |
826 | struct buffer_head *dind = NULL; |
827 | struct buffer_head *gdb_bh = NULL; |
828 | int gdbackups; |
829 | struct ext4_iloc iloc = { .bh = NULL }; |
830 | __le32 *data; |
831 | int err; |
832 | |
833 | if (test_opt(sb, DEBUG)) |
834 | printk(KERN_DEBUG |
835 | "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n" , |
836 | gdb_num); |
837 | |
838 | gdb_bh = ext4_sb_bread(sb, block: gdblock, op_flags: 0); |
839 | if (IS_ERR(ptr: gdb_bh)) |
840 | return PTR_ERR(ptr: gdb_bh); |
841 | |
842 | gdbackups = verify_reserved_gdb(sb, end: group, primary: gdb_bh); |
843 | if (gdbackups < 0) { |
844 | err = gdbackups; |
845 | goto errout; |
846 | } |
847 | |
848 | data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; |
849 | dind = ext4_sb_bread(sb, le32_to_cpu(*data), op_flags: 0); |
850 | if (IS_ERR(ptr: dind)) { |
851 | err = PTR_ERR(ptr: dind); |
852 | dind = NULL; |
853 | goto errout; |
854 | } |
855 | |
856 | data = (__le32 *)dind->b_data; |
857 | if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { |
858 | ext4_warning(sb, "new group %u GDT block %llu not reserved" , |
859 | group, gdblock); |
860 | err = -EINVAL; |
861 | goto errout; |
862 | } |
863 | |
864 | BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access" ); |
865 | err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, |
866 | EXT4_JTR_NONE); |
867 | if (unlikely(err)) |
868 | goto errout; |
869 | |
870 | BUFFER_TRACE(gdb_bh, "get_write_access" ); |
871 | err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); |
872 | if (unlikely(err)) |
873 | goto errout; |
874 | |
875 | BUFFER_TRACE(dind, "get_write_access" ); |
876 | err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE); |
877 | if (unlikely(err)) { |
878 | ext4_std_error(sb, err); |
879 | goto errout; |
880 | } |
881 | |
882 | /* ext4_reserve_inode_write() gets a reference on the iloc */ |
883 | err = ext4_reserve_inode_write(handle, inode, iloc: &iloc); |
884 | if (unlikely(err)) |
885 | goto errout; |
886 | |
887 | n_group_desc = kvmalloc(size: (gdb_num + 1) * sizeof(struct buffer_head *), |
888 | GFP_KERNEL); |
889 | if (!n_group_desc) { |
890 | err = -ENOMEM; |
891 | ext4_warning(sb, "not enough memory for %lu groups" , |
892 | gdb_num + 1); |
893 | goto errout; |
894 | } |
895 | |
896 | /* |
897 | * Finally, we have all of the possible failures behind us... |
898 | * |
899 | * Remove new GDT block from inode double-indirect block and clear out |
900 | * the new GDT block for use (which also "frees" the backup GDT blocks |
901 | * from the reserved inode). We don't need to change the bitmaps for |
902 | * these blocks, because they are marked as in-use from being in the |
903 | * reserved inode, and will become GDT blocks (primary and backup). |
904 | */ |
905 | data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; |
906 | err = ext4_handle_dirty_metadata(handle, NULL, dind); |
907 | if (unlikely(err)) { |
908 | ext4_std_error(sb, err); |
909 | goto errout; |
910 | } |
911 | inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> |
912 | (9 - EXT4_SB(sb)->s_cluster_bits); |
913 | ext4_mark_iloc_dirty(handle, inode, iloc: &iloc); |
914 | memset(gdb_bh->b_data, 0, sb->s_blocksize); |
915 | err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); |
916 | if (unlikely(err)) { |
917 | ext4_std_error(sb, err); |
918 | iloc.bh = NULL; |
919 | goto errout; |
920 | } |
921 | brelse(bh: dind); |
922 | |
923 | rcu_read_lock(); |
924 | o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); |
925 | memcpy(n_group_desc, o_group_desc, |
926 | EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); |
927 | rcu_read_unlock(); |
928 | n_group_desc[gdb_num] = gdb_bh; |
929 | rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); |
930 | EXT4_SB(sb)->s_gdb_count++; |
931 | ext4_kvfree_array_rcu(to_free: o_group_desc); |
932 | |
933 | lock_buffer(bh: EXT4_SB(sb)->s_sbh); |
934 | le16_add_cpu(var: &es->s_reserved_gdt_blocks, val: -1); |
935 | ext4_superblock_csum_set(sb); |
936 | unlock_buffer(bh: EXT4_SB(sb)->s_sbh); |
937 | err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); |
938 | if (err) |
939 | ext4_std_error(sb, err); |
940 | return err; |
941 | errout: |
942 | kvfree(addr: n_group_desc); |
943 | brelse(bh: iloc.bh); |
944 | brelse(bh: dind); |
945 | brelse(bh: gdb_bh); |
946 | |
947 | ext4_debug("leaving with error %d\n" , err); |
948 | return err; |
949 | } |
950 | |
951 | /* |
952 | * If there is no available space in the existing block group descriptors for |
953 | * the new block group and there are no reserved block group descriptors, then |
954 | * the meta_bg feature will get enabled, and es->s_first_meta_bg will get set |
955 | * to the first block group that is managed using meta_bg and s_first_meta_bg |
956 | * must be a multiple of EXT4_DESC_PER_BLOCK(sb). |
957 | * This function will be called when first group of meta_bg is added to bring |
958 | * new group descriptors block of new added meta_bg. |
959 | */ |
960 | static int add_new_gdb_meta_bg(struct super_block *sb, |
961 | handle_t *handle, ext4_group_t group) { |
962 | ext4_fsblk_t gdblock; |
963 | struct buffer_head *gdb_bh; |
964 | struct buffer_head **o_group_desc, **n_group_desc; |
965 | unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
966 | int err; |
967 | |
968 | gdblock = ext4_group_first_block_no(sb, group_no: group) + |
969 | ext4_bg_has_super(sb, group); |
970 | gdb_bh = ext4_sb_bread(sb, block: gdblock, op_flags: 0); |
971 | if (IS_ERR(ptr: gdb_bh)) |
972 | return PTR_ERR(ptr: gdb_bh); |
973 | n_group_desc = kvmalloc(size: (gdb_num + 1) * sizeof(struct buffer_head *), |
974 | GFP_KERNEL); |
975 | if (!n_group_desc) { |
976 | brelse(bh: gdb_bh); |
977 | err = -ENOMEM; |
978 | ext4_warning(sb, "not enough memory for %lu groups" , |
979 | gdb_num + 1); |
980 | return err; |
981 | } |
982 | |
983 | rcu_read_lock(); |
984 | o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); |
985 | memcpy(n_group_desc, o_group_desc, |
986 | EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); |
987 | rcu_read_unlock(); |
988 | n_group_desc[gdb_num] = gdb_bh; |
989 | |
990 | BUFFER_TRACE(gdb_bh, "get_write_access" ); |
991 | err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); |
992 | if (err) { |
993 | kvfree(addr: n_group_desc); |
994 | brelse(bh: gdb_bh); |
995 | return err; |
996 | } |
997 | |
998 | rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); |
999 | EXT4_SB(sb)->s_gdb_count++; |
1000 | ext4_kvfree_array_rcu(to_free: o_group_desc); |
1001 | return err; |
1002 | } |
1003 | |
1004 | /* |
1005 | * Called when we are adding a new group which has a backup copy of each of |
1006 | * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. |
1007 | * We need to add these reserved backup GDT blocks to the resize inode, so |
1008 | * that they are kept for future resizing and not allocated to files. |
1009 | * |
1010 | * Each reserved backup GDT block will go into a different indirect block. |
1011 | * The indirect blocks are actually the primary reserved GDT blocks, |
1012 | * so we know in advance what their block numbers are. We only get the |
1013 | * double-indirect block to verify it is pointing to the primary reserved |
1014 | * GDT blocks so we don't overwrite a data block by accident. The reserved |
1015 | * backup GDT blocks are stored in their reserved primary GDT block. |
1016 | */ |
1017 | static int reserve_backup_gdb(handle_t *handle, struct inode *inode, |
1018 | ext4_group_t group) |
1019 | { |
1020 | struct super_block *sb = inode->i_sb; |
1021 | int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); |
1022 | int cluster_bits = EXT4_SB(sb)->s_cluster_bits; |
1023 | struct buffer_head **primary; |
1024 | struct buffer_head *dind; |
1025 | struct ext4_iloc iloc; |
1026 | ext4_fsblk_t blk; |
1027 | __le32 *data, *end; |
1028 | int gdbackups = 0; |
1029 | int res, i; |
1030 | int err; |
1031 | |
1032 | primary = kmalloc_array(n: reserved_gdb, size: sizeof(*primary), GFP_NOFS); |
1033 | if (!primary) |
1034 | return -ENOMEM; |
1035 | |
1036 | data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; |
1037 | dind = ext4_sb_bread(sb, le32_to_cpu(*data), op_flags: 0); |
1038 | if (IS_ERR(ptr: dind)) { |
1039 | err = PTR_ERR(ptr: dind); |
1040 | dind = NULL; |
1041 | goto exit_free; |
1042 | } |
1043 | |
1044 | blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; |
1045 | data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % |
1046 | EXT4_ADDR_PER_BLOCK(sb)); |
1047 | end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); |
1048 | |
1049 | /* Get each reserved primary GDT block and verify it holds backups */ |
1050 | for (res = 0; res < reserved_gdb; res++, blk++) { |
1051 | if (le32_to_cpu(*data) != blk) { |
1052 | ext4_warning(sb, "reserved block %llu" |
1053 | " not at offset %ld" , |
1054 | blk, |
1055 | (long)(data - (__le32 *)dind->b_data)); |
1056 | err = -EINVAL; |
1057 | goto exit_bh; |
1058 | } |
1059 | primary[res] = ext4_sb_bread(sb, block: blk, op_flags: 0); |
1060 | if (IS_ERR(ptr: primary[res])) { |
1061 | err = PTR_ERR(ptr: primary[res]); |
1062 | primary[res] = NULL; |
1063 | goto exit_bh; |
1064 | } |
1065 | gdbackups = verify_reserved_gdb(sb, end: group, primary: primary[res]); |
1066 | if (gdbackups < 0) { |
1067 | brelse(bh: primary[res]); |
1068 | err = gdbackups; |
1069 | goto exit_bh; |
1070 | } |
1071 | if (++data >= end) |
1072 | data = (__le32 *)dind->b_data; |
1073 | } |
1074 | |
1075 | for (i = 0; i < reserved_gdb; i++) { |
1076 | BUFFER_TRACE(primary[i], "get_write_access" ); |
1077 | if ((err = ext4_journal_get_write_access(handle, sb, primary[i], |
1078 | EXT4_JTR_NONE))) |
1079 | goto exit_bh; |
1080 | } |
1081 | |
1082 | if ((err = ext4_reserve_inode_write(handle, inode, iloc: &iloc))) |
1083 | goto exit_bh; |
1084 | |
1085 | /* |
1086 | * Finally we can add each of the reserved backup GDT blocks from |
1087 | * the new group to its reserved primary GDT block. |
1088 | */ |
1089 | blk = group * EXT4_BLOCKS_PER_GROUP(sb); |
1090 | for (i = 0; i < reserved_gdb; i++) { |
1091 | int err2; |
1092 | data = (__le32 *)primary[i]->b_data; |
1093 | data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); |
1094 | err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); |
1095 | if (!err) |
1096 | err = err2; |
1097 | } |
1098 | |
1099 | inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); |
1100 | ext4_mark_iloc_dirty(handle, inode, iloc: &iloc); |
1101 | |
1102 | exit_bh: |
1103 | while (--res >= 0) |
1104 | brelse(bh: primary[res]); |
1105 | brelse(bh: dind); |
1106 | |
1107 | exit_free: |
1108 | kfree(objp: primary); |
1109 | |
1110 | return err; |
1111 | } |
1112 | |
1113 | static inline void ext4_set_block_group_nr(struct super_block *sb, char *data, |
1114 | ext4_group_t group) |
1115 | { |
1116 | struct ext4_super_block *es = (struct ext4_super_block *) data; |
1117 | |
1118 | es->s_block_group_nr = cpu_to_le16(group); |
1119 | if (ext4_has_metadata_csum(sb)) |
1120 | es->s_checksum = ext4_superblock_csum(sb, es); |
1121 | } |
1122 | |
1123 | /* |
1124 | * Update the backup copies of the ext4 metadata. These don't need to be part |
1125 | * of the main resize transaction, because e2fsck will re-write them if there |
1126 | * is a problem (basically only OOM will cause a problem). However, we |
1127 | * _should_ update the backups if possible, in case the primary gets trashed |
1128 | * for some reason and we need to run e2fsck from a backup superblock. The |
1129 | * important part is that the new block and inode counts are in the backup |
1130 | * superblocks, and the location of the new group metadata in the GDT backups. |
1131 | * |
1132 | * We do not need take the s_resize_lock for this, because these |
1133 | * blocks are not otherwise touched by the filesystem code when it is |
1134 | * mounted. We don't need to worry about last changing from |
1135 | * sbi->s_groups_count, because the worst that can happen is that we |
1136 | * do not copy the full number of backups at this time. The resize |
1137 | * which changed s_groups_count will backup again. |
1138 | */ |
1139 | static void update_backups(struct super_block *sb, sector_t blk_off, char *data, |
1140 | int size, int meta_bg) |
1141 | { |
1142 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1143 | ext4_group_t last; |
1144 | const int bpg = EXT4_BLOCKS_PER_GROUP(sb); |
1145 | unsigned three = 1; |
1146 | unsigned five = 5; |
1147 | unsigned seven = 7; |
1148 | ext4_group_t group = 0; |
1149 | int rest = sb->s_blocksize - size; |
1150 | handle_t *handle; |
1151 | int err = 0, err2; |
1152 | |
1153 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); |
1154 | if (IS_ERR(ptr: handle)) { |
1155 | group = 1; |
1156 | err = PTR_ERR(ptr: handle); |
1157 | goto exit_err; |
1158 | } |
1159 | |
1160 | if (meta_bg == 0) { |
1161 | group = ext4_list_backups(sb, three: &three, five: &five, seven: &seven); |
1162 | last = sbi->s_groups_count; |
1163 | } else { |
1164 | group = ext4_get_group_number(sb, block: blk_off) + 1; |
1165 | last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); |
1166 | } |
1167 | |
1168 | while (group < sbi->s_groups_count) { |
1169 | struct buffer_head *bh; |
1170 | ext4_fsblk_t backup_block; |
1171 | int has_super = ext4_bg_has_super(sb, group); |
1172 | ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group_no: group); |
1173 | |
1174 | /* Out of journal space, and can't get more - abort - so sad */ |
1175 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
1176 | if (err < 0) |
1177 | break; |
1178 | |
1179 | if (meta_bg == 0) |
1180 | backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; |
1181 | else |
1182 | backup_block = first_block + has_super; |
1183 | |
1184 | bh = sb_getblk(sb, block: backup_block); |
1185 | if (unlikely(!bh)) { |
1186 | err = -ENOMEM; |
1187 | break; |
1188 | } |
1189 | ext4_debug("update metadata backup %llu(+%llu)\n" , |
1190 | backup_block, backup_block - |
1191 | ext4_group_first_block_no(sb, group)); |
1192 | BUFFER_TRACE(bh, "get_write_access" ); |
1193 | if ((err = ext4_journal_get_write_access(handle, sb, bh, |
1194 | EXT4_JTR_NONE))) { |
1195 | brelse(bh); |
1196 | break; |
1197 | } |
1198 | lock_buffer(bh); |
1199 | memcpy(bh->b_data, data, size); |
1200 | if (rest) |
1201 | memset(bh->b_data + size, 0, rest); |
1202 | if (has_super && (backup_block == first_block)) |
1203 | ext4_set_block_group_nr(sb, data: bh->b_data, group); |
1204 | set_buffer_uptodate(bh); |
1205 | unlock_buffer(bh); |
1206 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
1207 | if (unlikely(err)) |
1208 | ext4_std_error(sb, err); |
1209 | brelse(bh); |
1210 | |
1211 | if (meta_bg == 0) |
1212 | group = ext4_list_backups(sb, three: &three, five: &five, seven: &seven); |
1213 | else if (group == last) |
1214 | break; |
1215 | else |
1216 | group = last; |
1217 | } |
1218 | if ((err2 = ext4_journal_stop(handle)) && !err) |
1219 | err = err2; |
1220 | |
1221 | /* |
1222 | * Ugh! Need to have e2fsck write the backup copies. It is too |
1223 | * late to revert the resize, we shouldn't fail just because of |
1224 | * the backup copies (they are only needed in case of corruption). |
1225 | * |
1226 | * However, if we got here we have a journal problem too, so we |
1227 | * can't really start a transaction to mark the superblock. |
1228 | * Chicken out and just set the flag on the hope it will be written |
1229 | * to disk, and if not - we will simply wait until next fsck. |
1230 | */ |
1231 | exit_err: |
1232 | if (err) { |
1233 | ext4_warning(sb, "can't update backup for group %u (err %d), " |
1234 | "forcing fsck on next reboot" , group, err); |
1235 | sbi->s_mount_state &= ~EXT4_VALID_FS; |
1236 | sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); |
1237 | mark_buffer_dirty(bh: sbi->s_sbh); |
1238 | } |
1239 | } |
1240 | |
1241 | /* |
1242 | * ext4_add_new_descs() adds @count group descriptor of groups |
1243 | * starting at @group |
1244 | * |
1245 | * @handle: journal handle |
1246 | * @sb: super block |
1247 | * @group: the group no. of the first group desc to be added |
1248 | * @resize_inode: the resize inode |
1249 | * @count: number of group descriptors to be added |
1250 | */ |
1251 | static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, |
1252 | ext4_group_t group, struct inode *resize_inode, |
1253 | ext4_group_t count) |
1254 | { |
1255 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1256 | struct ext4_super_block *es = sbi->s_es; |
1257 | struct buffer_head *gdb_bh; |
1258 | int i, gdb_off, gdb_num, err = 0; |
1259 | int meta_bg; |
1260 | |
1261 | meta_bg = ext4_has_feature_meta_bg(sb); |
1262 | for (i = 0; i < count; i++, group++) { |
1263 | int reserved_gdb = ext4_bg_has_super(sb, group) ? |
1264 | le16_to_cpu(es->s_reserved_gdt_blocks) : 0; |
1265 | |
1266 | gdb_off = group % EXT4_DESC_PER_BLOCK(sb); |
1267 | gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
1268 | |
1269 | /* |
1270 | * We will only either add reserved group blocks to a backup group |
1271 | * or remove reserved blocks for the first group in a new group block. |
1272 | * Doing both would be mean more complex code, and sane people don't |
1273 | * use non-sparse filesystems anymore. This is already checked above. |
1274 | */ |
1275 | if (gdb_off) { |
1276 | gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, |
1277 | gdb_num); |
1278 | BUFFER_TRACE(gdb_bh, "get_write_access" ); |
1279 | err = ext4_journal_get_write_access(handle, sb, gdb_bh, |
1280 | EXT4_JTR_NONE); |
1281 | |
1282 | if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) |
1283 | err = reserve_backup_gdb(handle, inode: resize_inode, group); |
1284 | } else if (meta_bg != 0) { |
1285 | err = add_new_gdb_meta_bg(sb, handle, group); |
1286 | } else { |
1287 | err = add_new_gdb(handle, inode: resize_inode, group); |
1288 | } |
1289 | if (err) |
1290 | break; |
1291 | } |
1292 | return err; |
1293 | } |
1294 | |
1295 | static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) |
1296 | { |
1297 | struct buffer_head *bh = sb_getblk(sb, block); |
1298 | if (unlikely(!bh)) |
1299 | return NULL; |
1300 | if (!bh_uptodate_or_lock(bh)) { |
1301 | if (ext4_read_bh(bh, op_flags: 0, NULL) < 0) { |
1302 | brelse(bh); |
1303 | return NULL; |
1304 | } |
1305 | } |
1306 | |
1307 | return bh; |
1308 | } |
1309 | |
1310 | static int ext4_set_bitmap_checksums(struct super_block *sb, |
1311 | struct ext4_group_desc *gdp, |
1312 | struct ext4_new_group_data *group_data) |
1313 | { |
1314 | struct buffer_head *bh; |
1315 | |
1316 | if (!ext4_has_metadata_csum(sb)) |
1317 | return 0; |
1318 | |
1319 | bh = ext4_get_bitmap(sb, block: group_data->inode_bitmap); |
1320 | if (!bh) |
1321 | return -EIO; |
1322 | ext4_inode_bitmap_csum_set(sb, gdp, bh, |
1323 | EXT4_INODES_PER_GROUP(sb) / 8); |
1324 | brelse(bh); |
1325 | |
1326 | bh = ext4_get_bitmap(sb, block: group_data->block_bitmap); |
1327 | if (!bh) |
1328 | return -EIO; |
1329 | ext4_block_bitmap_csum_set(sb, gdp, bh); |
1330 | brelse(bh); |
1331 | |
1332 | return 0; |
1333 | } |
1334 | |
1335 | /* |
1336 | * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg |
1337 | */ |
1338 | static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, |
1339 | struct ext4_new_flex_group_data *flex_gd) |
1340 | { |
1341 | struct ext4_new_group_data *group_data = flex_gd->groups; |
1342 | struct ext4_group_desc *gdp; |
1343 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1344 | struct buffer_head *gdb_bh; |
1345 | ext4_group_t group; |
1346 | __u16 *bg_flags = flex_gd->bg_flags; |
1347 | int i, gdb_off, gdb_num, err = 0; |
1348 | |
1349 | |
1350 | for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { |
1351 | group = group_data->group; |
1352 | |
1353 | gdb_off = group % EXT4_DESC_PER_BLOCK(sb); |
1354 | gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
1355 | |
1356 | /* |
1357 | * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). |
1358 | */ |
1359 | gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); |
1360 | /* Update group descriptor block for new group */ |
1361 | gdp = (struct ext4_group_desc *)(gdb_bh->b_data + |
1362 | gdb_off * EXT4_DESC_SIZE(sb)); |
1363 | |
1364 | memset(gdp, 0, EXT4_DESC_SIZE(sb)); |
1365 | ext4_block_bitmap_set(sb, bg: gdp, blk: group_data->block_bitmap); |
1366 | ext4_inode_bitmap_set(sb, bg: gdp, blk: group_data->inode_bitmap); |
1367 | err = ext4_set_bitmap_checksums(sb, gdp, group_data); |
1368 | if (err) { |
1369 | ext4_std_error(sb, err); |
1370 | break; |
1371 | } |
1372 | |
1373 | ext4_inode_table_set(sb, bg: gdp, blk: group_data->inode_table); |
1374 | ext4_free_group_clusters_set(sb, bg: gdp, |
1375 | count: group_data->free_clusters_count); |
1376 | ext4_free_inodes_set(sb, bg: gdp, EXT4_INODES_PER_GROUP(sb)); |
1377 | if (ext4_has_group_desc_csum(sb)) |
1378 | ext4_itable_unused_set(sb, bg: gdp, |
1379 | EXT4_INODES_PER_GROUP(sb)); |
1380 | gdp->bg_flags = cpu_to_le16(*bg_flags); |
1381 | ext4_group_desc_csum_set(sb, group, gdp); |
1382 | |
1383 | err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); |
1384 | if (unlikely(err)) { |
1385 | ext4_std_error(sb, err); |
1386 | break; |
1387 | } |
1388 | |
1389 | /* |
1390 | * We can allocate memory for mb_alloc based on the new group |
1391 | * descriptor |
1392 | */ |
1393 | err = ext4_mb_add_groupinfo(sb, i: group, desc: gdp); |
1394 | if (err) |
1395 | break; |
1396 | } |
1397 | return err; |
1398 | } |
1399 | |
1400 | static void ext4_add_overhead(struct super_block *sb, |
1401 | const ext4_fsblk_t overhead) |
1402 | { |
1403 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1404 | struct ext4_super_block *es = sbi->s_es; |
1405 | |
1406 | sbi->s_overhead += overhead; |
1407 | es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); |
1408 | smp_wmb(); |
1409 | } |
1410 | |
1411 | /* |
1412 | * ext4_update_super() updates the super block so that the newly added |
1413 | * groups can be seen by the filesystem. |
1414 | * |
1415 | * @sb: super block |
1416 | * @flex_gd: new added groups |
1417 | */ |
1418 | static void ext4_update_super(struct super_block *sb, |
1419 | struct ext4_new_flex_group_data *flex_gd) |
1420 | { |
1421 | ext4_fsblk_t blocks_count = 0; |
1422 | ext4_fsblk_t free_blocks = 0; |
1423 | ext4_fsblk_t reserved_blocks = 0; |
1424 | struct ext4_new_group_data *group_data = flex_gd->groups; |
1425 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1426 | struct ext4_super_block *es = sbi->s_es; |
1427 | int i; |
1428 | |
1429 | BUG_ON(flex_gd->count == 0 || group_data == NULL); |
1430 | /* |
1431 | * Make the new blocks and inodes valid next. We do this before |
1432 | * increasing the group count so that once the group is enabled, |
1433 | * all of its blocks and inodes are already valid. |
1434 | * |
1435 | * We always allocate group-by-group, then block-by-block or |
1436 | * inode-by-inode within a group, so enabling these |
1437 | * blocks/inodes before the group is live won't actually let us |
1438 | * allocate the new space yet. |
1439 | */ |
1440 | for (i = 0; i < flex_gd->count; i++) { |
1441 | blocks_count += group_data[i].blocks_count; |
1442 | free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); |
1443 | } |
1444 | |
1445 | reserved_blocks = ext4_r_blocks_count(es) * 100; |
1446 | reserved_blocks = div64_u64(dividend: reserved_blocks, divisor: ext4_blocks_count(es)); |
1447 | reserved_blocks *= blocks_count; |
1448 | do_div(reserved_blocks, 100); |
1449 | |
1450 | lock_buffer(bh: sbi->s_sbh); |
1451 | ext4_blocks_count_set(es, blk: ext4_blocks_count(es) + blocks_count); |
1452 | ext4_free_blocks_count_set(es, blk: ext4_free_blocks_count(es) + free_blocks); |
1453 | le32_add_cpu(var: &es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * |
1454 | flex_gd->count); |
1455 | le32_add_cpu(var: &es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * |
1456 | flex_gd->count); |
1457 | |
1458 | ext4_debug("free blocks count %llu" , ext4_free_blocks_count(es)); |
1459 | /* |
1460 | * We need to protect s_groups_count against other CPUs seeing |
1461 | * inconsistent state in the superblock. |
1462 | * |
1463 | * The precise rules we use are: |
1464 | * |
1465 | * * Writers must perform a smp_wmb() after updating all |
1466 | * dependent data and before modifying the groups count |
1467 | * |
1468 | * * Readers must perform an smp_rmb() after reading the groups |
1469 | * count and before reading any dependent data. |
1470 | * |
1471 | * NB. These rules can be relaxed when checking the group count |
1472 | * while freeing data, as we can only allocate from a block |
1473 | * group after serialising against the group count, and we can |
1474 | * only then free after serialising in turn against that |
1475 | * allocation. |
1476 | */ |
1477 | smp_wmb(); |
1478 | |
1479 | /* Update the global fs size fields */ |
1480 | sbi->s_groups_count += flex_gd->count; |
1481 | sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, |
1482 | (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); |
1483 | |
1484 | /* Update the reserved block counts only once the new group is |
1485 | * active. */ |
1486 | ext4_r_blocks_count_set(es, blk: ext4_r_blocks_count(es) + |
1487 | reserved_blocks); |
1488 | |
1489 | /* Update the free space counts */ |
1490 | percpu_counter_add(fbc: &sbi->s_freeclusters_counter, |
1491 | EXT4_NUM_B2C(sbi, free_blocks)); |
1492 | percpu_counter_add(fbc: &sbi->s_freeinodes_counter, |
1493 | EXT4_INODES_PER_GROUP(sb) * flex_gd->count); |
1494 | |
1495 | ext4_debug("free blocks count %llu" , |
1496 | percpu_counter_read(&sbi->s_freeclusters_counter)); |
1497 | if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { |
1498 | ext4_group_t flex_group; |
1499 | struct flex_groups *fg; |
1500 | |
1501 | flex_group = ext4_flex_group(sbi, block_group: group_data[0].group); |
1502 | fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); |
1503 | atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), |
1504 | v: &fg->free_clusters); |
1505 | atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, |
1506 | v: &fg->free_inodes); |
1507 | } |
1508 | |
1509 | /* |
1510 | * Update the fs overhead information. |
1511 | * |
1512 | * For bigalloc, if the superblock already has a properly calculated |
1513 | * overhead, update it with a value based on numbers already computed |
1514 | * above for the newly allocated capacity. |
1515 | */ |
1516 | if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0)) |
1517 | ext4_add_overhead(sb, |
1518 | EXT4_NUM_B2C(sbi, blocks_count - free_blocks)); |
1519 | else |
1520 | ext4_calculate_overhead(sb); |
1521 | es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); |
1522 | |
1523 | ext4_superblock_csum_set(sb); |
1524 | unlock_buffer(bh: sbi->s_sbh); |
1525 | if (test_opt(sb, DEBUG)) |
1526 | printk(KERN_DEBUG "EXT4-fs: added group %u:" |
1527 | "%llu blocks(%llu free %llu reserved)\n" , flex_gd->count, |
1528 | blocks_count, free_blocks, reserved_blocks); |
1529 | } |
1530 | |
1531 | /* Add a flex group to an fs. Ensure we handle all possible error conditions |
1532 | * _before_ we start modifying the filesystem, because we cannot abort the |
1533 | * transaction and not have it write the data to disk. |
1534 | */ |
1535 | static int ext4_flex_group_add(struct super_block *sb, |
1536 | struct inode *resize_inode, |
1537 | struct ext4_new_flex_group_data *flex_gd) |
1538 | { |
1539 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1540 | struct ext4_super_block *es = sbi->s_es; |
1541 | ext4_fsblk_t o_blocks_count; |
1542 | ext4_grpblk_t last; |
1543 | ext4_group_t group; |
1544 | handle_t *handle; |
1545 | unsigned reserved_gdb; |
1546 | int err = 0, err2 = 0, credit; |
1547 | |
1548 | BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); |
1549 | |
1550 | reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); |
1551 | o_blocks_count = ext4_blocks_count(es); |
1552 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count, blockgrpp: &group, offsetp: &last); |
1553 | BUG_ON(last); |
1554 | |
1555 | err = setup_new_flex_group_blocks(sb, flex_gd); |
1556 | if (err) |
1557 | goto exit; |
1558 | /* |
1559 | * We will always be modifying at least the superblock and GDT |
1560 | * blocks. If we are adding a group past the last current GDT block, |
1561 | * we will also modify the inode and the dindirect block. If we |
1562 | * are adding a group with superblock/GDT backups we will also |
1563 | * modify each of the reserved GDT dindirect blocks. |
1564 | */ |
1565 | credit = 3; /* sb, resize inode, resize inode dindirect */ |
1566 | /* GDT blocks */ |
1567 | credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); |
1568 | credit += reserved_gdb; /* Reserved GDT dindirect blocks */ |
1569 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); |
1570 | if (IS_ERR(ptr: handle)) { |
1571 | err = PTR_ERR(ptr: handle); |
1572 | goto exit; |
1573 | } |
1574 | |
1575 | BUFFER_TRACE(sbi->s_sbh, "get_write_access" ); |
1576 | err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, |
1577 | EXT4_JTR_NONE); |
1578 | if (err) |
1579 | goto exit_journal; |
1580 | |
1581 | group = flex_gd->groups[0].group; |
1582 | BUG_ON(group != sbi->s_groups_count); |
1583 | err = ext4_add_new_descs(handle, sb, group, |
1584 | resize_inode, count: flex_gd->count); |
1585 | if (err) |
1586 | goto exit_journal; |
1587 | |
1588 | err = ext4_setup_new_descs(handle, sb, flex_gd); |
1589 | if (err) |
1590 | goto exit_journal; |
1591 | |
1592 | ext4_update_super(sb, flex_gd); |
1593 | |
1594 | err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); |
1595 | |
1596 | exit_journal: |
1597 | err2 = ext4_journal_stop(handle); |
1598 | if (!err) |
1599 | err = err2; |
1600 | |
1601 | if (!err) { |
1602 | int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
1603 | int gdb_num_end = ((group + flex_gd->count - 1) / |
1604 | EXT4_DESC_PER_BLOCK(sb)); |
1605 | int meta_bg = ext4_has_feature_meta_bg(sb) && |
1606 | gdb_num >= le32_to_cpu(es->s_first_meta_bg); |
1607 | sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - |
1608 | ext4_group_first_block_no(sb, group_no: 0); |
1609 | |
1610 | update_backups(sb, blk_off: ext4_group_first_block_no(sb, group_no: 0), |
1611 | data: (char *)es, size: sizeof(struct ext4_super_block), meta_bg: 0); |
1612 | for (; gdb_num <= gdb_num_end; gdb_num++) { |
1613 | struct buffer_head *gdb_bh; |
1614 | |
1615 | gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, |
1616 | gdb_num); |
1617 | update_backups(sb, blk_off: gdb_bh->b_blocknr - padding_blocks, |
1618 | data: gdb_bh->b_data, size: gdb_bh->b_size, meta_bg); |
1619 | } |
1620 | } |
1621 | exit: |
1622 | return err; |
1623 | } |
1624 | |
1625 | static int ext4_setup_next_flex_gd(struct super_block *sb, |
1626 | struct ext4_new_flex_group_data *flex_gd, |
1627 | ext4_fsblk_t n_blocks_count) |
1628 | { |
1629 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1630 | struct ext4_super_block *es = sbi->s_es; |
1631 | struct ext4_new_group_data *group_data = flex_gd->groups; |
1632 | ext4_fsblk_t o_blocks_count; |
1633 | ext4_group_t n_group; |
1634 | ext4_group_t group; |
1635 | ext4_group_t last_group; |
1636 | ext4_grpblk_t last; |
1637 | ext4_grpblk_t clusters_per_group; |
1638 | unsigned long i; |
1639 | |
1640 | clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); |
1641 | |
1642 | o_blocks_count = ext4_blocks_count(es); |
1643 | |
1644 | if (o_blocks_count == n_blocks_count) |
1645 | return 0; |
1646 | |
1647 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count, blockgrpp: &group, offsetp: &last); |
1648 | BUG_ON(last); |
1649 | ext4_get_group_no_and_offset(sb, blocknr: n_blocks_count - 1, blockgrpp: &n_group, offsetp: &last); |
1650 | |
1651 | last_group = group | (flex_gd->resize_bg - 1); |
1652 | if (last_group > n_group) |
1653 | last_group = n_group; |
1654 | |
1655 | flex_gd->count = last_group - group + 1; |
1656 | |
1657 | for (i = 0; i < flex_gd->count; i++) { |
1658 | int overhead; |
1659 | |
1660 | group_data[i].group = group + i; |
1661 | group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); |
1662 | overhead = ext4_group_overhead_blocks(sb, group: group + i); |
1663 | group_data[i].mdata_blocks = overhead; |
1664 | group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); |
1665 | if (ext4_has_group_desc_csum(sb)) { |
1666 | flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | |
1667 | EXT4_BG_INODE_UNINIT; |
1668 | if (!test_opt(sb, INIT_INODE_TABLE)) |
1669 | flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; |
1670 | } else |
1671 | flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; |
1672 | } |
1673 | |
1674 | if (last_group == n_group && ext4_has_group_desc_csum(sb)) |
1675 | /* We need to initialize block bitmap of last group. */ |
1676 | flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; |
1677 | |
1678 | if ((last_group == n_group) && (last != clusters_per_group - 1)) { |
1679 | group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); |
1680 | group_data[i - 1].free_clusters_count -= clusters_per_group - |
1681 | last - 1; |
1682 | } |
1683 | |
1684 | return 1; |
1685 | } |
1686 | |
1687 | /* Add group descriptor data to an existing or new group descriptor block. |
1688 | * Ensure we handle all possible error conditions _before_ we start modifying |
1689 | * the filesystem, because we cannot abort the transaction and not have it |
1690 | * write the data to disk. |
1691 | * |
1692 | * If we are on a GDT block boundary, we need to get the reserved GDT block. |
1693 | * Otherwise, we may need to add backup GDT blocks for a sparse group. |
1694 | * |
1695 | * We only need to hold the superblock lock while we are actually adding |
1696 | * in the new group's counts to the superblock. Prior to that we have |
1697 | * not really "added" the group at all. We re-check that we are still |
1698 | * adding in the last group in case things have changed since verifying. |
1699 | */ |
1700 | int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) |
1701 | { |
1702 | struct ext4_new_flex_group_data flex_gd; |
1703 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1704 | struct ext4_super_block *es = sbi->s_es; |
1705 | int reserved_gdb = ext4_bg_has_super(sb, group: input->group) ? |
1706 | le16_to_cpu(es->s_reserved_gdt_blocks) : 0; |
1707 | struct inode *inode = NULL; |
1708 | int gdb_off; |
1709 | int err; |
1710 | __u16 bg_flags = 0; |
1711 | |
1712 | gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); |
1713 | |
1714 | if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) { |
1715 | ext4_warning(sb, "Can't resize non-sparse filesystem further" ); |
1716 | return -EPERM; |
1717 | } |
1718 | |
1719 | if (ext4_blocks_count(es) + input->blocks_count < |
1720 | ext4_blocks_count(es)) { |
1721 | ext4_warning(sb, "blocks_count overflow" ); |
1722 | return -EINVAL; |
1723 | } |
1724 | |
1725 | if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < |
1726 | le32_to_cpu(es->s_inodes_count)) { |
1727 | ext4_warning(sb, "inodes_count overflow" ); |
1728 | return -EINVAL; |
1729 | } |
1730 | |
1731 | if (reserved_gdb || gdb_off == 0) { |
1732 | if (!ext4_has_feature_resize_inode(sb) || |
1733 | !le16_to_cpu(es->s_reserved_gdt_blocks)) { |
1734 | ext4_warning(sb, |
1735 | "No reserved GDT blocks, can't resize" ); |
1736 | return -EPERM; |
1737 | } |
1738 | inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL); |
1739 | if (IS_ERR(ptr: inode)) { |
1740 | ext4_warning(sb, "Error opening resize inode" ); |
1741 | return PTR_ERR(ptr: inode); |
1742 | } |
1743 | } |
1744 | |
1745 | |
1746 | err = verify_group_input(sb, input); |
1747 | if (err) |
1748 | goto out; |
1749 | |
1750 | err = ext4_alloc_flex_bg_array(sb, ngroup: input->group + 1); |
1751 | if (err) |
1752 | goto out; |
1753 | |
1754 | err = ext4_mb_alloc_groupinfo(sb, ngroups: input->group + 1); |
1755 | if (err) |
1756 | goto out; |
1757 | |
1758 | flex_gd.count = 1; |
1759 | flex_gd.groups = input; |
1760 | flex_gd.bg_flags = &bg_flags; |
1761 | err = ext4_flex_group_add(sb, resize_inode: inode, flex_gd: &flex_gd); |
1762 | out: |
1763 | iput(inode); |
1764 | return err; |
1765 | } /* ext4_group_add */ |
1766 | |
1767 | /* |
1768 | * extend a group without checking assuming that checking has been done. |
1769 | */ |
1770 | static int ext4_group_extend_no_check(struct super_block *sb, |
1771 | ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) |
1772 | { |
1773 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
1774 | handle_t *handle; |
1775 | int err = 0, err2; |
1776 | |
1777 | /* We will update the superblock, one block bitmap, and |
1778 | * one group descriptor via ext4_group_add_blocks(). |
1779 | */ |
1780 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); |
1781 | if (IS_ERR(ptr: handle)) { |
1782 | err = PTR_ERR(ptr: handle); |
1783 | ext4_warning(sb, "error %d on journal start" , err); |
1784 | return err; |
1785 | } |
1786 | |
1787 | BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access" ); |
1788 | err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, |
1789 | EXT4_JTR_NONE); |
1790 | if (err) { |
1791 | ext4_warning(sb, "error %d on journal write access" , err); |
1792 | goto errout; |
1793 | } |
1794 | |
1795 | lock_buffer(bh: EXT4_SB(sb)->s_sbh); |
1796 | ext4_blocks_count_set(es, blk: o_blocks_count + add); |
1797 | ext4_free_blocks_count_set(es, blk: ext4_free_blocks_count(es) + add); |
1798 | ext4_superblock_csum_set(sb); |
1799 | unlock_buffer(bh: EXT4_SB(sb)->s_sbh); |
1800 | ext4_debug("freeing blocks %llu through %llu\n" , o_blocks_count, |
1801 | o_blocks_count + add); |
1802 | /* We add the blocks to the bitmap and set the group need init bit */ |
1803 | err = ext4_group_add_blocks(handle, sb, block: o_blocks_count, count: add); |
1804 | if (err) |
1805 | goto errout; |
1806 | ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); |
1807 | ext4_debug("freed blocks %llu through %llu\n" , o_blocks_count, |
1808 | o_blocks_count + add); |
1809 | errout: |
1810 | err2 = ext4_journal_stop(handle); |
1811 | if (err2 && !err) |
1812 | err = err2; |
1813 | |
1814 | if (!err) { |
1815 | if (test_opt(sb, DEBUG)) |
1816 | printk(KERN_DEBUG "EXT4-fs: extended group to %llu " |
1817 | "blocks\n" , ext4_blocks_count(es)); |
1818 | update_backups(sb, blk_off: ext4_group_first_block_no(sb, group_no: 0), |
1819 | data: (char *)es, size: sizeof(struct ext4_super_block), meta_bg: 0); |
1820 | } |
1821 | return err; |
1822 | } |
1823 | |
1824 | /* |
1825 | * Extend the filesystem to the new number of blocks specified. This entry |
1826 | * point is only used to extend the current filesystem to the end of the last |
1827 | * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" |
1828 | * for emergencies (because it has no dependencies on reserved blocks). |
1829 | * |
1830 | * If we _really_ wanted, we could use default values to call ext4_group_add() |
1831 | * allow the "remount" trick to work for arbitrary resizing, assuming enough |
1832 | * GDT blocks are reserved to grow to the desired size. |
1833 | */ |
1834 | int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, |
1835 | ext4_fsblk_t n_blocks_count) |
1836 | { |
1837 | ext4_fsblk_t o_blocks_count; |
1838 | ext4_grpblk_t last; |
1839 | ext4_grpblk_t add; |
1840 | struct buffer_head *bh; |
1841 | ext4_group_t group; |
1842 | |
1843 | o_blocks_count = ext4_blocks_count(es); |
1844 | |
1845 | if (test_opt(sb, DEBUG)) |
1846 | ext4_msg(sb, KERN_DEBUG, |
1847 | "extending last group from %llu to %llu blocks" , |
1848 | o_blocks_count, n_blocks_count); |
1849 | |
1850 | if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) |
1851 | return 0; |
1852 | |
1853 | if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { |
1854 | ext4_msg(sb, KERN_ERR, |
1855 | "filesystem too large to resize to %llu blocks safely" , |
1856 | n_blocks_count); |
1857 | return -EINVAL; |
1858 | } |
1859 | |
1860 | if (n_blocks_count < o_blocks_count) { |
1861 | ext4_warning(sb, "can't shrink FS - resize aborted" ); |
1862 | return -EINVAL; |
1863 | } |
1864 | |
1865 | /* Handle the remaining blocks in the last group only. */ |
1866 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count, blockgrpp: &group, offsetp: &last); |
1867 | |
1868 | if (last == 0) { |
1869 | ext4_warning(sb, "need to use ext2online to resize further" ); |
1870 | return -EPERM; |
1871 | } |
1872 | |
1873 | add = EXT4_BLOCKS_PER_GROUP(sb) - last; |
1874 | |
1875 | if (o_blocks_count + add < o_blocks_count) { |
1876 | ext4_warning(sb, "blocks_count overflow" ); |
1877 | return -EINVAL; |
1878 | } |
1879 | |
1880 | if (o_blocks_count + add > n_blocks_count) |
1881 | add = n_blocks_count - o_blocks_count; |
1882 | |
1883 | if (o_blocks_count + add < n_blocks_count) |
1884 | ext4_warning(sb, "will only finish group (%llu blocks, %u new)" , |
1885 | o_blocks_count + add, add); |
1886 | |
1887 | /* See if the device is actually as big as what was requested */ |
1888 | bh = ext4_sb_bread(sb, block: o_blocks_count + add - 1, op_flags: 0); |
1889 | if (IS_ERR(ptr: bh)) { |
1890 | ext4_warning(sb, "can't read last block, resize aborted" ); |
1891 | return -ENOSPC; |
1892 | } |
1893 | brelse(bh); |
1894 | |
1895 | return ext4_group_extend_no_check(sb, o_blocks_count, add); |
1896 | } /* ext4_group_extend */ |
1897 | |
1898 | |
1899 | static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) |
1900 | { |
1901 | return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); |
1902 | } |
1903 | |
1904 | /* |
1905 | * Release the resize inode and drop the resize_inode feature if there |
1906 | * are no more reserved gdt blocks, and then convert the file system |
1907 | * to enable meta_bg |
1908 | */ |
1909 | static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) |
1910 | { |
1911 | handle_t *handle; |
1912 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1913 | struct ext4_super_block *es = sbi->s_es; |
1914 | struct ext4_inode_info *ei = EXT4_I(inode); |
1915 | ext4_fsblk_t nr; |
1916 | int i, ret, err = 0; |
1917 | int credits = 1; |
1918 | |
1919 | ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg" ); |
1920 | if (inode) { |
1921 | if (es->s_reserved_gdt_blocks) { |
1922 | ext4_error(sb, "Unexpected non-zero " |
1923 | "s_reserved_gdt_blocks" ); |
1924 | return -EPERM; |
1925 | } |
1926 | |
1927 | /* Do a quick sanity check of the resize inode */ |
1928 | if (inode->i_blocks != 1 << (inode->i_blkbits - |
1929 | (9 - sbi->s_cluster_bits))) |
1930 | goto invalid_resize_inode; |
1931 | for (i = 0; i < EXT4_N_BLOCKS; i++) { |
1932 | if (i == EXT4_DIND_BLOCK) { |
1933 | if (ei->i_data[i]) |
1934 | continue; |
1935 | else |
1936 | goto invalid_resize_inode; |
1937 | } |
1938 | if (ei->i_data[i]) |
1939 | goto invalid_resize_inode; |
1940 | } |
1941 | credits += 3; /* block bitmap, bg descriptor, resize inode */ |
1942 | } |
1943 | |
1944 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); |
1945 | if (IS_ERR(ptr: handle)) |
1946 | return PTR_ERR(ptr: handle); |
1947 | |
1948 | BUFFER_TRACE(sbi->s_sbh, "get_write_access" ); |
1949 | err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, |
1950 | EXT4_JTR_NONE); |
1951 | if (err) |
1952 | goto errout; |
1953 | |
1954 | lock_buffer(bh: sbi->s_sbh); |
1955 | ext4_clear_feature_resize_inode(sb); |
1956 | ext4_set_feature_meta_bg(sb); |
1957 | sbi->s_es->s_first_meta_bg = |
1958 | cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); |
1959 | ext4_superblock_csum_set(sb); |
1960 | unlock_buffer(bh: sbi->s_sbh); |
1961 | |
1962 | err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); |
1963 | if (err) { |
1964 | ext4_std_error(sb, err); |
1965 | goto errout; |
1966 | } |
1967 | |
1968 | if (inode) { |
1969 | nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); |
1970 | ext4_free_blocks(handle, inode, NULL, block: nr, count: 1, |
1971 | EXT4_FREE_BLOCKS_METADATA | |
1972 | EXT4_FREE_BLOCKS_FORGET); |
1973 | ei->i_data[EXT4_DIND_BLOCK] = 0; |
1974 | inode->i_blocks = 0; |
1975 | |
1976 | err = ext4_mark_inode_dirty(handle, inode); |
1977 | if (err) |
1978 | ext4_std_error(sb, err); |
1979 | } |
1980 | |
1981 | errout: |
1982 | ret = ext4_journal_stop(handle); |
1983 | return err ? err : ret; |
1984 | |
1985 | invalid_resize_inode: |
1986 | ext4_error(sb, "corrupted/inconsistent resize inode" ); |
1987 | return -EINVAL; |
1988 | } |
1989 | |
1990 | /* |
1991 | * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count |
1992 | * |
1993 | * @sb: super block of the fs to be resized |
1994 | * @n_blocks_count: the number of blocks resides in the resized fs |
1995 | */ |
1996 | int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) |
1997 | { |
1998 | struct ext4_new_flex_group_data *flex_gd = NULL; |
1999 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
2000 | struct ext4_super_block *es = sbi->s_es; |
2001 | struct buffer_head *bh; |
2002 | struct inode *resize_inode = NULL; |
2003 | ext4_grpblk_t add, offset; |
2004 | unsigned long n_desc_blocks; |
2005 | unsigned long o_desc_blocks; |
2006 | ext4_group_t o_group; |
2007 | ext4_group_t n_group; |
2008 | ext4_fsblk_t o_blocks_count; |
2009 | ext4_fsblk_t n_blocks_count_retry = 0; |
2010 | unsigned long last_update_time = 0; |
2011 | int err = 0; |
2012 | int meta_bg; |
2013 | unsigned int flexbg_size = ext4_flex_bg_size(sbi); |
2014 | |
2015 | /* See if the device is actually as big as what was requested */ |
2016 | bh = ext4_sb_bread(sb, block: n_blocks_count - 1, op_flags: 0); |
2017 | if (IS_ERR(ptr: bh)) { |
2018 | ext4_warning(sb, "can't read last block, resize aborted" ); |
2019 | return -ENOSPC; |
2020 | } |
2021 | brelse(bh); |
2022 | |
2023 | /* |
2024 | * For bigalloc, trim the requested size to the nearest cluster |
2025 | * boundary to avoid creating an unusable filesystem. We do this |
2026 | * silently, instead of returning an error, to avoid breaking |
2027 | * callers that blindly resize the filesystem to the full size of |
2028 | * the underlying block device. |
2029 | */ |
2030 | if (ext4_has_feature_bigalloc(sb)) |
2031 | n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1); |
2032 | |
2033 | retry: |
2034 | o_blocks_count = ext4_blocks_count(es); |
2035 | |
2036 | ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " |
2037 | "to %llu blocks" , o_blocks_count, n_blocks_count); |
2038 | |
2039 | if (n_blocks_count < o_blocks_count) { |
2040 | /* On-line shrinking not supported */ |
2041 | ext4_warning(sb, "can't shrink FS - resize aborted" ); |
2042 | return -EINVAL; |
2043 | } |
2044 | |
2045 | if (n_blocks_count == o_blocks_count) |
2046 | /* Nothing need to do */ |
2047 | return 0; |
2048 | |
2049 | n_group = ext4_get_group_number(sb, block: n_blocks_count - 1); |
2050 | if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { |
2051 | ext4_warning(sb, "resize would cause inodes_count overflow" ); |
2052 | return -EINVAL; |
2053 | } |
2054 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count - 1, blockgrpp: &o_group, offsetp: &offset); |
2055 | |
2056 | n_desc_blocks = num_desc_blocks(sb, groups: n_group + 1); |
2057 | o_desc_blocks = num_desc_blocks(sb, groups: sbi->s_groups_count); |
2058 | |
2059 | meta_bg = ext4_has_feature_meta_bg(sb); |
2060 | |
2061 | if (ext4_has_feature_resize_inode(sb)) { |
2062 | if (meta_bg) { |
2063 | ext4_error(sb, "resize_inode and meta_bg enabled " |
2064 | "simultaneously" ); |
2065 | return -EINVAL; |
2066 | } |
2067 | if (n_desc_blocks > o_desc_blocks + |
2068 | le16_to_cpu(es->s_reserved_gdt_blocks)) { |
2069 | n_blocks_count_retry = n_blocks_count; |
2070 | n_desc_blocks = o_desc_blocks + |
2071 | le16_to_cpu(es->s_reserved_gdt_blocks); |
2072 | n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); |
2073 | n_blocks_count = (ext4_fsblk_t)n_group * |
2074 | EXT4_BLOCKS_PER_GROUP(sb) + |
2075 | le32_to_cpu(es->s_first_data_block); |
2076 | n_group--; /* set to last group number */ |
2077 | } |
2078 | |
2079 | if (!resize_inode) |
2080 | resize_inode = ext4_iget(sb, EXT4_RESIZE_INO, |
2081 | EXT4_IGET_SPECIAL); |
2082 | if (IS_ERR(ptr: resize_inode)) { |
2083 | ext4_warning(sb, "Error opening resize inode" ); |
2084 | return PTR_ERR(ptr: resize_inode); |
2085 | } |
2086 | } |
2087 | |
2088 | if ((!resize_inode && !meta_bg && n_desc_blocks > o_desc_blocks) || n_blocks_count == o_blocks_count) { |
2089 | err = ext4_convert_meta_bg(sb, inode: resize_inode); |
2090 | if (err) |
2091 | goto out; |
2092 | if (resize_inode) { |
2093 | iput(resize_inode); |
2094 | resize_inode = NULL; |
2095 | } |
2096 | if (n_blocks_count_retry) { |
2097 | n_blocks_count = n_blocks_count_retry; |
2098 | n_blocks_count_retry = 0; |
2099 | goto retry; |
2100 | } |
2101 | } |
2102 | |
2103 | /* |
2104 | * Make sure the last group has enough space so that it's |
2105 | * guaranteed to have enough space for all metadata blocks |
2106 | * that it might need to hold. (We might not need to store |
2107 | * the inode table blocks in the last block group, but there |
2108 | * will be cases where this might be needed.) |
2109 | */ |
2110 | if ((ext4_group_first_block_no(sb, group_no: n_group) + |
2111 | ext4_group_overhead_blocks(sb, group: n_group) + 2 + |
2112 | sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { |
2113 | n_blocks_count = ext4_group_first_block_no(sb, group_no: n_group); |
2114 | n_group--; |
2115 | n_blocks_count_retry = 0; |
2116 | if (resize_inode) { |
2117 | iput(resize_inode); |
2118 | resize_inode = NULL; |
2119 | } |
2120 | goto retry; |
2121 | } |
2122 | |
2123 | /* extend the last group */ |
2124 | if (n_group == o_group) |
2125 | add = n_blocks_count - o_blocks_count; |
2126 | else |
2127 | add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); |
2128 | if (add > 0) { |
2129 | err = ext4_group_extend_no_check(sb, o_blocks_count, add); |
2130 | if (err) |
2131 | goto out; |
2132 | } |
2133 | |
2134 | if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0) |
2135 | goto out; |
2136 | |
2137 | err = ext4_alloc_flex_bg_array(sb, ngroup: n_group + 1); |
2138 | if (err) |
2139 | goto out; |
2140 | |
2141 | err = ext4_mb_alloc_groupinfo(sb, ngroups: n_group + 1); |
2142 | if (err) |
2143 | goto out; |
2144 | |
2145 | flex_gd = alloc_flex_gd(flexbg_size, o_group, n_group); |
2146 | if (flex_gd == NULL) { |
2147 | err = -ENOMEM; |
2148 | goto out; |
2149 | } |
2150 | |
2151 | /* Add flex groups. Note that a regular group is a |
2152 | * flex group with 1 group. |
2153 | */ |
2154 | while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) { |
2155 | if (time_is_before_jiffies(last_update_time + HZ * 10)) { |
2156 | if (last_update_time) |
2157 | ext4_msg(sb, KERN_INFO, |
2158 | "resized to %llu blocks" , |
2159 | ext4_blocks_count(es)); |
2160 | last_update_time = jiffies; |
2161 | } |
2162 | if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) |
2163 | break; |
2164 | err = ext4_flex_group_add(sb, resize_inode, flex_gd); |
2165 | if (unlikely(err)) |
2166 | break; |
2167 | } |
2168 | |
2169 | if (!err && n_blocks_count_retry) { |
2170 | n_blocks_count = n_blocks_count_retry; |
2171 | n_blocks_count_retry = 0; |
2172 | free_flex_gd(flex_gd); |
2173 | flex_gd = NULL; |
2174 | if (resize_inode) { |
2175 | iput(resize_inode); |
2176 | resize_inode = NULL; |
2177 | } |
2178 | goto retry; |
2179 | } |
2180 | |
2181 | out: |
2182 | if (flex_gd) |
2183 | free_flex_gd(flex_gd); |
2184 | if (resize_inode != NULL) |
2185 | iput(resize_inode); |
2186 | if (err) |
2187 | ext4_warning(sb, "error (%d) occurred during " |
2188 | "file system resize" , err); |
2189 | ext4_msg(sb, KERN_INFO, "resized filesystem to %llu" , |
2190 | ext4_blocks_count(es)); |
2191 | return err; |
2192 | } |
2193 | |