1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * linux/fs/ext4/resize.c |
4 | * |
5 | * Support for resizing an ext4 filesystem while it is mounted. |
6 | * |
7 | * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> |
8 | * |
9 | * This could probably be made into a module, because it is not often in use. |
10 | */ |
11 | |
12 | |
13 | #include <linux/errno.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/jiffies.h> |
16 | |
17 | #include "ext4_jbd2.h" |
18 | |
19 | struct ext4_rcu_ptr { |
20 | struct rcu_head rcu; |
21 | void *ptr; |
22 | }; |
23 | |
24 | static void ext4_rcu_ptr_callback(struct rcu_head *head) |
25 | { |
26 | struct ext4_rcu_ptr *ptr; |
27 | |
28 | ptr = container_of(head, struct ext4_rcu_ptr, rcu); |
29 | kvfree(addr: ptr->ptr); |
30 | kfree(objp: ptr); |
31 | } |
32 | |
33 | void ext4_kvfree_array_rcu(void *to_free) |
34 | { |
35 | struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); |
36 | |
37 | if (ptr) { |
38 | ptr->ptr = to_free; |
39 | call_rcu(head: &ptr->rcu, func: ext4_rcu_ptr_callback); |
40 | return; |
41 | } |
42 | synchronize_rcu(); |
43 | kvfree(addr: to_free); |
44 | } |
45 | |
46 | int ext4_resize_begin(struct super_block *sb) |
47 | { |
48 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
49 | int ret = 0; |
50 | |
51 | if (!capable(CAP_SYS_RESOURCE)) |
52 | return -EPERM; |
53 | |
54 | /* |
55 | * If the reserved GDT blocks is non-zero, the resize_inode feature |
56 | * should always be set. |
57 | */ |
58 | if (sbi->s_es->s_reserved_gdt_blocks && |
59 | !ext4_has_feature_resize_inode(sb)) { |
60 | ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); |
61 | return -EFSCORRUPTED; |
62 | } |
63 | |
64 | /* |
65 | * If we are not using the primary superblock/GDT copy don't resize, |
66 | * because the user tools have no way of handling this. Probably a |
67 | * bad time to do it anyways. |
68 | */ |
69 | if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != |
70 | le32_to_cpu(sbi->s_es->s_first_data_block)) { |
71 | ext4_warning(sb, "won't resize using backup superblock at %llu", |
72 | (unsigned long long)sbi->s_sbh->b_blocknr); |
73 | return -EPERM; |
74 | } |
75 | |
76 | /* |
77 | * We are not allowed to do online-resizing on a filesystem mounted |
78 | * with error, because it can destroy the filesystem easily. |
79 | */ |
80 | if (sbi->s_mount_state & EXT4_ERROR_FS) { |
81 | ext4_warning(sb, "There are errors in the filesystem, " |
82 | "so online resizing is not allowed"); |
83 | return -EPERM; |
84 | } |
85 | |
86 | if (ext4_has_feature_sparse_super2(sb)) { |
87 | ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); |
88 | return -EOPNOTSUPP; |
89 | } |
90 | |
91 | if (test_and_set_bit_lock(nr: EXT4_FLAGS_RESIZING, |
92 | addr: &sbi->s_ext4_flags)) |
93 | ret = -EBUSY; |
94 | |
95 | return ret; |
96 | } |
97 | |
98 | int ext4_resize_end(struct super_block *sb, bool update_backups) |
99 | { |
100 | clear_bit_unlock(nr: EXT4_FLAGS_RESIZING, addr: &EXT4_SB(sb)->s_ext4_flags); |
101 | smp_mb__after_atomic(); |
102 | if (update_backups) |
103 | return ext4_update_overhead(sb, force: true); |
104 | return 0; |
105 | } |
106 | |
107 | static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, |
108 | ext4_group_t group) { |
109 | ext4_grpblk_t overhead; |
110 | overhead = ext4_bg_num_gdb(sb, group); |
111 | if (ext4_bg_has_super(sb, group)) |
112 | overhead += 1 + |
113 | le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); |
114 | return overhead; |
115 | } |
116 | |
117 | #define outside(b, first, last) ((b) < (first) || (b) >= (last)) |
118 | #define inside(b, first, last) ((b) >= (first) && (b) < (last)) |
119 | |
120 | static int verify_group_input(struct super_block *sb, |
121 | struct ext4_new_group_data *input) |
122 | { |
123 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
124 | struct ext4_super_block *es = sbi->s_es; |
125 | ext4_fsblk_t start = ext4_blocks_count(es); |
126 | ext4_fsblk_t end = start + input->blocks_count; |
127 | ext4_group_t group = input->group; |
128 | ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; |
129 | unsigned overhead; |
130 | ext4_fsblk_t metaend; |
131 | struct buffer_head *bh = NULL; |
132 | ext4_grpblk_t free_blocks_count, offset; |
133 | int err = -EINVAL; |
134 | |
135 | if (group != sbi->s_groups_count) { |
136 | ext4_warning(sb, "Cannot add at group %u (only %u groups)", |
137 | input->group, sbi->s_groups_count); |
138 | return -EINVAL; |
139 | } |
140 | |
141 | overhead = ext4_group_overhead_blocks(sb, group); |
142 | metaend = start + overhead; |
143 | free_blocks_count = input->blocks_count - 2 - overhead - |
144 | sbi->s_itb_per_group; |
145 | input->free_clusters_count = EXT4_B2C(sbi, free_blocks_count); |
146 | |
147 | if (test_opt(sb, DEBUG)) |
148 | printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " |
149 | "(%d free, %u reserved)\n", |
150 | ext4_bg_has_super(sb, input->group) ? "normal": |
151 | "no-super", input->group, input->blocks_count, |
152 | free_blocks_count, input->reserved_blocks); |
153 | |
154 | ext4_get_group_no_and_offset(sb, blocknr: start, NULL, offsetp: &offset); |
155 | if (offset != 0) |
156 | ext4_warning(sb, "Last group not full"); |
157 | else if (input->reserved_blocks > input->blocks_count / 5) |
158 | ext4_warning(sb, "Reserved blocks too high (%u)", |
159 | input->reserved_blocks); |
160 | else if (free_blocks_count < 0) |
161 | ext4_warning(sb, "Bad blocks count %u", |
162 | input->blocks_count); |
163 | else if (IS_ERR(ptr: bh = ext4_sb_bread(sb, block: end - 1, op_flags: 0))) { |
164 | err = PTR_ERR(ptr: bh); |
165 | bh = NULL; |
166 | ext4_warning(sb, "Cannot read last block (%llu)", |
167 | end - 1); |
168 | } else if (outside(input->block_bitmap, start, end)) |
169 | ext4_warning(sb, "Block bitmap not in group (block %llu)", |
170 | (unsigned long long)input->block_bitmap); |
171 | else if (outside(input->inode_bitmap, start, end)) |
172 | ext4_warning(sb, "Inode bitmap not in group (block %llu)", |
173 | (unsigned long long)input->inode_bitmap); |
174 | else if (outside(input->inode_table, start, end) || |
175 | outside(itend - 1, start, end)) |
176 | ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", |
177 | (unsigned long long)input->inode_table, itend - 1); |
178 | else if (input->inode_bitmap == input->block_bitmap) |
179 | ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", |
180 | (unsigned long long)input->block_bitmap); |
181 | else if (inside(input->block_bitmap, input->inode_table, itend)) |
182 | ext4_warning(sb, "Block bitmap (%llu) in inode table " |
183 | "(%llu-%llu)", |
184 | (unsigned long long)input->block_bitmap, |
185 | (unsigned long long)input->inode_table, itend - 1); |
186 | else if (inside(input->inode_bitmap, input->inode_table, itend)) |
187 | ext4_warning(sb, "Inode bitmap (%llu) in inode table " |
188 | "(%llu-%llu)", |
189 | (unsigned long long)input->inode_bitmap, |
190 | (unsigned long long)input->inode_table, itend - 1); |
191 | else if (inside(input->block_bitmap, start, metaend)) |
192 | ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", |
193 | (unsigned long long)input->block_bitmap, |
194 | start, metaend - 1); |
195 | else if (inside(input->inode_bitmap, start, metaend)) |
196 | ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", |
197 | (unsigned long long)input->inode_bitmap, |
198 | start, metaend - 1); |
199 | else if (inside(input->inode_table, start, metaend) || |
200 | inside(itend - 1, start, metaend)) |
201 | ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " |
202 | "(%llu-%llu)", |
203 | (unsigned long long)input->inode_table, |
204 | itend - 1, start, metaend - 1); |
205 | else |
206 | err = 0; |
207 | brelse(bh); |
208 | |
209 | return err; |
210 | } |
211 | |
212 | /* |
213 | * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex |
214 | * group each time. |
215 | */ |
216 | struct ext4_new_flex_group_data { |
217 | struct ext4_new_group_data *groups; /* new_group_data for groups |
218 | in the flex group */ |
219 | __u16 *bg_flags; /* block group flags of groups |
220 | in @groups */ |
221 | ext4_group_t resize_bg; /* number of allocated |
222 | new_group_data */ |
223 | ext4_group_t count; /* number of groups in @groups |
224 | */ |
225 | }; |
226 | |
227 | /* |
228 | * Avoiding memory allocation failures due to too many groups added each time. |
229 | */ |
230 | #define MAX_RESIZE_BG 16384 |
231 | |
232 | /* |
233 | * alloc_flex_gd() allocates an ext4_new_flex_group_data that satisfies the |
234 | * resizing from @o_group to @n_group, its size is typically @flexbg_size. |
235 | * |
236 | * Returns NULL on failure otherwise address of the allocated structure. |
237 | */ |
238 | static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size, |
239 | ext4_group_t o_group, ext4_group_t n_group) |
240 | { |
241 | ext4_group_t last_group; |
242 | unsigned int max_resize_bg; |
243 | struct ext4_new_flex_group_data *flex_gd; |
244 | |
245 | flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); |
246 | if (flex_gd == NULL) |
247 | goto out3; |
248 | |
249 | max_resize_bg = umin(flexbg_size, MAX_RESIZE_BG); |
250 | flex_gd->resize_bg = max_resize_bg; |
251 | |
252 | /* Avoid allocating large 'groups' array if not needed */ |
253 | last_group = o_group | (flex_gd->resize_bg - 1); |
254 | if (n_group <= last_group) |
255 | flex_gd->resize_bg = 1 << fls(x: n_group - o_group); |
256 | else if (n_group - last_group < flex_gd->resize_bg) |
257 | flex_gd->resize_bg = 1 << max(fls(last_group - o_group), |
258 | fls(n_group - last_group)); |
259 | |
260 | if (WARN_ON_ONCE(flex_gd->resize_bg > max_resize_bg)) |
261 | flex_gd->resize_bg = max_resize_bg; |
262 | |
263 | flex_gd->groups = kmalloc_array(flex_gd->resize_bg, |
264 | sizeof(struct ext4_new_group_data), |
265 | GFP_NOFS); |
266 | if (flex_gd->groups == NULL) |
267 | goto out2; |
268 | |
269 | flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16), |
270 | GFP_NOFS); |
271 | if (flex_gd->bg_flags == NULL) |
272 | goto out1; |
273 | |
274 | return flex_gd; |
275 | |
276 | out1: |
277 | kfree(objp: flex_gd->groups); |
278 | out2: |
279 | kfree(objp: flex_gd); |
280 | out3: |
281 | return NULL; |
282 | } |
283 | |
284 | static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) |
285 | { |
286 | kfree(objp: flex_gd->bg_flags); |
287 | kfree(objp: flex_gd->groups); |
288 | kfree(objp: flex_gd); |
289 | } |
290 | |
291 | /* |
292 | * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps |
293 | * and inode tables for a flex group. |
294 | * |
295 | * This function is used by 64bit-resize. Note that this function allocates |
296 | * group tables from the 1st group of groups contained by @flexgd, which may |
297 | * be a partial of a flex group. |
298 | * |
299 | * @sb: super block of fs to which the groups belongs |
300 | * |
301 | * Returns 0 on a successful allocation of the metadata blocks in the |
302 | * block group. |
303 | */ |
304 | static int ext4_alloc_group_tables(struct super_block *sb, |
305 | struct ext4_new_flex_group_data *flex_gd, |
306 | unsigned int flexbg_size) |
307 | { |
308 | struct ext4_new_group_data *group_data = flex_gd->groups; |
309 | ext4_fsblk_t start_blk; |
310 | ext4_fsblk_t last_blk; |
311 | ext4_group_t src_group; |
312 | ext4_group_t bb_index = 0; |
313 | ext4_group_t ib_index = 0; |
314 | ext4_group_t it_index = 0; |
315 | ext4_group_t group; |
316 | ext4_group_t last_group; |
317 | unsigned overhead; |
318 | __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; |
319 | int i; |
320 | |
321 | BUG_ON(flex_gd->count == 0 || group_data == NULL); |
322 | |
323 | src_group = group_data[0].group; |
324 | last_group = src_group + flex_gd->count - 1; |
325 | |
326 | BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != |
327 | (last_group & ~(flexbg_size - 1)))); |
328 | next_group: |
329 | group = group_data[0].group; |
330 | if (src_group >= group_data[0].group + flex_gd->count) |
331 | return -ENOSPC; |
332 | start_blk = ext4_group_first_block_no(sb, group_no: src_group); |
333 | last_blk = start_blk + group_data[src_group - group].blocks_count; |
334 | |
335 | overhead = ext4_group_overhead_blocks(sb, group: src_group); |
336 | |
337 | start_blk += overhead; |
338 | |
339 | /* We collect contiguous blocks as much as possible. */ |
340 | src_group++; |
341 | for (; src_group <= last_group; src_group++) { |
342 | overhead = ext4_group_overhead_blocks(sb, group: src_group); |
343 | if (overhead == 0) |
344 | last_blk += group_data[src_group - group].blocks_count; |
345 | else |
346 | break; |
347 | } |
348 | |
349 | /* Allocate block bitmaps */ |
350 | for (; bb_index < flex_gd->count; bb_index++) { |
351 | if (start_blk >= last_blk) |
352 | goto next_group; |
353 | group_data[bb_index].block_bitmap = start_blk++; |
354 | group = ext4_get_group_number(sb, block: start_blk - 1); |
355 | group -= group_data[0].group; |
356 | group_data[group].mdata_blocks++; |
357 | flex_gd->bg_flags[group] &= uninit_mask; |
358 | } |
359 | |
360 | /* Allocate inode bitmaps */ |
361 | for (; ib_index < flex_gd->count; ib_index++) { |
362 | if (start_blk >= last_blk) |
363 | goto next_group; |
364 | group_data[ib_index].inode_bitmap = start_blk++; |
365 | group = ext4_get_group_number(sb, block: start_blk - 1); |
366 | group -= group_data[0].group; |
367 | group_data[group].mdata_blocks++; |
368 | flex_gd->bg_flags[group] &= uninit_mask; |
369 | } |
370 | |
371 | /* Allocate inode tables */ |
372 | for (; it_index < flex_gd->count; it_index++) { |
373 | unsigned int itb = EXT4_SB(sb)->s_itb_per_group; |
374 | ext4_fsblk_t next_group_start; |
375 | |
376 | if (start_blk + itb > last_blk) |
377 | goto next_group; |
378 | group_data[it_index].inode_table = start_blk; |
379 | group = ext4_get_group_number(sb, block: start_blk); |
380 | next_group_start = ext4_group_first_block_no(sb, group_no: group + 1); |
381 | group -= group_data[0].group; |
382 | |
383 | if (start_blk + itb > next_group_start) { |
384 | flex_gd->bg_flags[group + 1] &= uninit_mask; |
385 | overhead = start_blk + itb - next_group_start; |
386 | group_data[group + 1].mdata_blocks += overhead; |
387 | itb -= overhead; |
388 | } |
389 | |
390 | group_data[group].mdata_blocks += itb; |
391 | flex_gd->bg_flags[group] &= uninit_mask; |
392 | start_blk += EXT4_SB(sb)->s_itb_per_group; |
393 | } |
394 | |
395 | /* Update free clusters count to exclude metadata blocks */ |
396 | for (i = 0; i < flex_gd->count; i++) { |
397 | group_data[i].free_clusters_count -= |
398 | EXT4_NUM_B2C(EXT4_SB(sb), |
399 | group_data[i].mdata_blocks); |
400 | } |
401 | |
402 | if (test_opt(sb, DEBUG)) { |
403 | int i; |
404 | group = group_data[0].group; |
405 | |
406 | printk(KERN_DEBUG "EXT4-fs: adding a flex group with " |
407 | "%u groups, flexbg size is %u:\n", flex_gd->count, |
408 | flexbg_size); |
409 | |
410 | for (i = 0; i < flex_gd->count; i++) { |
411 | ext4_debug( |
412 | "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n", |
413 | ext4_bg_has_super(sb, group + i) ? "normal": |
414 | "no-super", group + i, |
415 | group_data[i].blocks_count, |
416 | group_data[i].free_clusters_count, |
417 | group_data[i].mdata_blocks); |
418 | } |
419 | } |
420 | return 0; |
421 | } |
422 | |
423 | static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, |
424 | ext4_fsblk_t blk) |
425 | { |
426 | struct buffer_head *bh; |
427 | int err; |
428 | |
429 | bh = sb_getblk(sb, block: blk); |
430 | if (unlikely(!bh)) |
431 | return ERR_PTR(error: -ENOMEM); |
432 | BUFFER_TRACE(bh, "get_write_access"); |
433 | err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); |
434 | if (err) { |
435 | brelse(bh); |
436 | bh = ERR_PTR(error: err); |
437 | } else { |
438 | memset(bh->b_data, 0, sb->s_blocksize); |
439 | set_buffer_uptodate(bh); |
440 | } |
441 | |
442 | return bh; |
443 | } |
444 | |
445 | static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits) |
446 | { |
447 | return ext4_journal_ensure_credits_fn(handle, credits, |
448 | EXT4_MAX_TRANS_DATA, 0, 0); |
449 | } |
450 | |
451 | /* |
452 | * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. |
453 | * |
454 | * Helper function for ext4_setup_new_group_blocks() which set . |
455 | * |
456 | * @sb: super block |
457 | * @handle: journal handle |
458 | * @flex_gd: flex group data |
459 | */ |
460 | static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, |
461 | struct ext4_new_flex_group_data *flex_gd, |
462 | ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) |
463 | { |
464 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
465 | ext4_group_t count = last_cluster - first_cluster + 1; |
466 | ext4_group_t count2; |
467 | |
468 | ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster, |
469 | last_cluster); |
470 | for (; count > 0; count -= count2, first_cluster += count2) { |
471 | ext4_fsblk_t start; |
472 | struct buffer_head *bh; |
473 | ext4_group_t group; |
474 | int err; |
475 | |
476 | group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); |
477 | start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); |
478 | group -= flex_gd->groups[0].group; |
479 | |
480 | count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); |
481 | if (count2 > count) |
482 | count2 = count; |
483 | |
484 | if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { |
485 | BUG_ON(flex_gd->count > 1); |
486 | continue; |
487 | } |
488 | |
489 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
490 | if (err < 0) |
491 | return err; |
492 | |
493 | bh = sb_getblk(sb, block: flex_gd->groups[group].block_bitmap); |
494 | if (unlikely(!bh)) |
495 | return -ENOMEM; |
496 | |
497 | BUFFER_TRACE(bh, "get_write_access"); |
498 | err = ext4_journal_get_write_access(handle, sb, bh, |
499 | EXT4_JTR_NONE); |
500 | if (err) { |
501 | brelse(bh); |
502 | return err; |
503 | } |
504 | ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", |
505 | first_cluster, first_cluster - start, count2); |
506 | mb_set_bits(bm: bh->b_data, cur: first_cluster - start, len: count2); |
507 | |
508 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
509 | brelse(bh); |
510 | if (unlikely(err)) |
511 | return err; |
512 | } |
513 | |
514 | return 0; |
515 | } |
516 | |
517 | /* |
518 | * Set up the block and inode bitmaps, and the inode table for the new groups. |
519 | * This doesn't need to be part of the main transaction, since we are only |
520 | * changing blocks outside the actual filesystem. We still do journaling to |
521 | * ensure the recovery is correct in case of a failure just after resize. |
522 | * If any part of this fails, we simply abort the resize. |
523 | * |
524 | * setup_new_flex_group_blocks handles a flex group as follow: |
525 | * 1. copy super block and GDT, and initialize group tables if necessary. |
526 | * In this step, we only set bits in blocks bitmaps for blocks taken by |
527 | * super block and GDT. |
528 | * 2. allocate group tables in block bitmaps, that is, set bits in block |
529 | * bitmap for blocks taken by group tables. |
530 | */ |
531 | static int setup_new_flex_group_blocks(struct super_block *sb, |
532 | struct ext4_new_flex_group_data *flex_gd) |
533 | { |
534 | int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; |
535 | ext4_fsblk_t start; |
536 | ext4_fsblk_t block; |
537 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
538 | struct ext4_super_block *es = sbi->s_es; |
539 | struct ext4_new_group_data *group_data = flex_gd->groups; |
540 | __u16 *bg_flags = flex_gd->bg_flags; |
541 | handle_t *handle; |
542 | ext4_group_t group, count; |
543 | struct buffer_head *bh = NULL; |
544 | int reserved_gdb, i, j, err = 0, err2; |
545 | int meta_bg; |
546 | |
547 | BUG_ON(!flex_gd->count || !group_data || |
548 | group_data[0].group != sbi->s_groups_count); |
549 | |
550 | reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); |
551 | meta_bg = ext4_has_feature_meta_bg(sb); |
552 | |
553 | /* This transaction may be extended/restarted along the way */ |
554 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); |
555 | if (IS_ERR(ptr: handle)) |
556 | return PTR_ERR(ptr: handle); |
557 | |
558 | group = group_data[0].group; |
559 | for (i = 0; i < flex_gd->count; i++, group++) { |
560 | unsigned long gdblocks; |
561 | ext4_grpblk_t overhead; |
562 | |
563 | gdblocks = ext4_bg_num_gdb(sb, group); |
564 | start = ext4_group_first_block_no(sb, group_no: group); |
565 | |
566 | if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) |
567 | goto handle_itb; |
568 | |
569 | if (meta_bg == 1) |
570 | goto handle_itb; |
571 | |
572 | block = start + ext4_bg_has_super(sb, group); |
573 | /* Copy all of the GDT blocks into the backup in this group */ |
574 | for (j = 0; j < gdblocks; j++, block++) { |
575 | struct buffer_head *gdb; |
576 | |
577 | ext4_debug("update backup group %#04llx\n", block); |
578 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
579 | if (err < 0) |
580 | goto out; |
581 | |
582 | gdb = sb_getblk(sb, block); |
583 | if (unlikely(!gdb)) { |
584 | err = -ENOMEM; |
585 | goto out; |
586 | } |
587 | |
588 | BUFFER_TRACE(gdb, "get_write_access"); |
589 | err = ext4_journal_get_write_access(handle, sb, gdb, |
590 | EXT4_JTR_NONE); |
591 | if (err) { |
592 | brelse(bh: gdb); |
593 | goto out; |
594 | } |
595 | memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, |
596 | s_group_desc, j)->b_data, gdb->b_size); |
597 | set_buffer_uptodate(gdb); |
598 | |
599 | err = ext4_handle_dirty_metadata(handle, NULL, gdb); |
600 | if (unlikely(err)) { |
601 | brelse(bh: gdb); |
602 | goto out; |
603 | } |
604 | brelse(bh: gdb); |
605 | } |
606 | |
607 | /* Zero out all of the reserved backup group descriptor |
608 | * table blocks |
609 | */ |
610 | if (ext4_bg_has_super(sb, group)) { |
611 | err = sb_issue_zeroout(sb, block: gdblocks + start + 1, |
612 | nr_blocks: reserved_gdb, GFP_NOFS); |
613 | if (err) |
614 | goto out; |
615 | } |
616 | |
617 | handle_itb: |
618 | /* Initialize group tables of the group @group */ |
619 | if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) |
620 | goto handle_bb; |
621 | |
622 | /* Zero out all of the inode table blocks */ |
623 | block = group_data[i].inode_table; |
624 | ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", |
625 | block, sbi->s_itb_per_group); |
626 | err = sb_issue_zeroout(sb, block, nr_blocks: sbi->s_itb_per_group, |
627 | GFP_NOFS); |
628 | if (err) |
629 | goto out; |
630 | |
631 | handle_bb: |
632 | if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) |
633 | goto handle_ib; |
634 | |
635 | /* Initialize block bitmap of the @group */ |
636 | block = group_data[i].block_bitmap; |
637 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
638 | if (err < 0) |
639 | goto out; |
640 | |
641 | bh = bclean(handle, sb, blk: block); |
642 | if (IS_ERR(ptr: bh)) { |
643 | err = PTR_ERR(ptr: bh); |
644 | goto out; |
645 | } |
646 | overhead = ext4_group_overhead_blocks(sb, group); |
647 | if (overhead != 0) { |
648 | ext4_debug("mark backup superblock %#04llx (+0)\n", |
649 | start); |
650 | mb_set_bits(bm: bh->b_data, cur: 0, |
651 | EXT4_NUM_B2C(sbi, overhead)); |
652 | } |
653 | ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), |
654 | end_bit: sb->s_blocksize * 8, bitmap: bh->b_data); |
655 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
656 | brelse(bh); |
657 | if (err) |
658 | goto out; |
659 | |
660 | handle_ib: |
661 | if (bg_flags[i] & EXT4_BG_INODE_UNINIT) |
662 | continue; |
663 | |
664 | /* Initialize inode bitmap of the @group */ |
665 | block = group_data[i].inode_bitmap; |
666 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
667 | if (err < 0) |
668 | goto out; |
669 | /* Mark unused entries in inode bitmap used */ |
670 | bh = bclean(handle, sb, blk: block); |
671 | if (IS_ERR(ptr: bh)) { |
672 | err = PTR_ERR(ptr: bh); |
673 | goto out; |
674 | } |
675 | |
676 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), |
677 | end_bit: sb->s_blocksize * 8, bitmap: bh->b_data); |
678 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
679 | brelse(bh); |
680 | if (err) |
681 | goto out; |
682 | } |
683 | |
684 | /* Mark group tables in block bitmap */ |
685 | for (j = 0; j < GROUP_TABLE_COUNT; j++) { |
686 | count = group_table_count[j]; |
687 | start = (&group_data[0].block_bitmap)[j]; |
688 | block = start; |
689 | for (i = 1; i < flex_gd->count; i++) { |
690 | block += group_table_count[j]; |
691 | if (block == (&group_data[i].block_bitmap)[j]) { |
692 | count += group_table_count[j]; |
693 | continue; |
694 | } |
695 | err = set_flexbg_block_bitmap(sb, handle, |
696 | flex_gd, |
697 | EXT4_B2C(sbi, start), |
698 | EXT4_B2C(sbi, |
699 | start + count |
700 | - 1)); |
701 | if (err) |
702 | goto out; |
703 | count = group_table_count[j]; |
704 | start = (&group_data[i].block_bitmap)[j]; |
705 | block = start; |
706 | } |
707 | |
708 | err = set_flexbg_block_bitmap(sb, handle, |
709 | flex_gd, |
710 | EXT4_B2C(sbi, start), |
711 | EXT4_B2C(sbi, |
712 | start + count |
713 | - 1)); |
714 | if (err) |
715 | goto out; |
716 | } |
717 | |
718 | out: |
719 | err2 = ext4_journal_stop(handle); |
720 | if (err2 && !err) |
721 | err = err2; |
722 | |
723 | return err; |
724 | } |
725 | |
726 | /* |
727 | * Iterate through the groups which hold BACKUP superblock/GDT copies in an |
728 | * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before |
729 | * calling this for the first time. In a sparse filesystem it will be the |
730 | * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... |
731 | * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... |
732 | */ |
733 | unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three, |
734 | unsigned int *five, unsigned int *seven) |
735 | { |
736 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
737 | unsigned int *min = three; |
738 | int mult = 3; |
739 | unsigned int ret; |
740 | |
741 | if (ext4_has_feature_sparse_super2(sb)) { |
742 | do { |
743 | if (*min > 2) |
744 | return UINT_MAX; |
745 | ret = le32_to_cpu(es->s_backup_bgs[*min - 1]); |
746 | *min += 1; |
747 | } while (!ret); |
748 | return ret; |
749 | } |
750 | |
751 | if (!ext4_has_feature_sparse_super(sb)) { |
752 | ret = *min; |
753 | *min += 1; |
754 | return ret; |
755 | } |
756 | |
757 | if (*five < *min) { |
758 | min = five; |
759 | mult = 5; |
760 | } |
761 | if (*seven < *min) { |
762 | min = seven; |
763 | mult = 7; |
764 | } |
765 | |
766 | ret = *min; |
767 | *min *= mult; |
768 | |
769 | return ret; |
770 | } |
771 | |
772 | /* |
773 | * Check that all of the backup GDT blocks are held in the primary GDT block. |
774 | * It is assumed that they are stored in group order. Returns the number of |
775 | * groups in current filesystem that have BACKUPS, or -ve error code. |
776 | */ |
777 | static int verify_reserved_gdb(struct super_block *sb, |
778 | ext4_group_t end, |
779 | struct buffer_head *primary) |
780 | { |
781 | const ext4_fsblk_t blk = primary->b_blocknr; |
782 | unsigned three = 1; |
783 | unsigned five = 5; |
784 | unsigned seven = 7; |
785 | unsigned grp; |
786 | __le32 *p = (__le32 *)primary->b_data; |
787 | int gdbackups = 0; |
788 | |
789 | while ((grp = ext4_list_backups(sb, three: &three, five: &five, seven: &seven)) < end) { |
790 | if (le32_to_cpu(*p++) != |
791 | grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ |
792 | ext4_warning(sb, "reserved GDT %llu" |
793 | " missing grp %d (%llu)", |
794 | blk, grp, |
795 | grp * |
796 | (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + |
797 | blk); |
798 | return -EINVAL; |
799 | } |
800 | if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) |
801 | return -EFBIG; |
802 | } |
803 | |
804 | return gdbackups; |
805 | } |
806 | |
807 | /* |
808 | * Called when we need to bring a reserved group descriptor table block into |
809 | * use from the resize inode. The primary copy of the new GDT block currently |
810 | * is an indirect block (under the double indirect block in the resize inode). |
811 | * The new backup GDT blocks will be stored as leaf blocks in this indirect |
812 | * block, in group order. Even though we know all the block numbers we need, |
813 | * we check to ensure that the resize inode has actually reserved these blocks. |
814 | * |
815 | * Don't need to update the block bitmaps because the blocks are still in use. |
816 | * |
817 | * We get all of the error cases out of the way, so that we are sure to not |
818 | * fail once we start modifying the data on disk, because JBD has no rollback. |
819 | */ |
820 | static int add_new_gdb(handle_t *handle, struct inode *inode, |
821 | ext4_group_t group) |
822 | { |
823 | struct super_block *sb = inode->i_sb; |
824 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
825 | unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
826 | ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; |
827 | struct buffer_head **o_group_desc, **n_group_desc = NULL; |
828 | struct buffer_head *dind = NULL; |
829 | struct buffer_head *gdb_bh = NULL; |
830 | int gdbackups; |
831 | struct ext4_iloc iloc = { .bh = NULL }; |
832 | __le32 *data; |
833 | int err; |
834 | |
835 | if (test_opt(sb, DEBUG)) |
836 | printk(KERN_DEBUG |
837 | "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", |
838 | gdb_num); |
839 | |
840 | gdb_bh = ext4_sb_bread(sb, block: gdblock, op_flags: 0); |
841 | if (IS_ERR(ptr: gdb_bh)) |
842 | return PTR_ERR(ptr: gdb_bh); |
843 | |
844 | gdbackups = verify_reserved_gdb(sb, end: group, primary: gdb_bh); |
845 | if (gdbackups < 0) { |
846 | err = gdbackups; |
847 | goto errout; |
848 | } |
849 | |
850 | data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; |
851 | dind = ext4_sb_bread(sb, le32_to_cpu(*data), op_flags: 0); |
852 | if (IS_ERR(ptr: dind)) { |
853 | err = PTR_ERR(ptr: dind); |
854 | dind = NULL; |
855 | goto errout; |
856 | } |
857 | |
858 | data = (__le32 *)dind->b_data; |
859 | if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { |
860 | ext4_warning(sb, "new group %u GDT block %llu not reserved", |
861 | group, gdblock); |
862 | err = -EINVAL; |
863 | goto errout; |
864 | } |
865 | |
866 | BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); |
867 | err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, |
868 | EXT4_JTR_NONE); |
869 | if (unlikely(err)) |
870 | goto errout; |
871 | |
872 | BUFFER_TRACE(gdb_bh, "get_write_access"); |
873 | err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); |
874 | if (unlikely(err)) |
875 | goto errout; |
876 | |
877 | BUFFER_TRACE(dind, "get_write_access"); |
878 | err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE); |
879 | if (unlikely(err)) { |
880 | ext4_std_error(sb, err); |
881 | goto errout; |
882 | } |
883 | |
884 | /* ext4_reserve_inode_write() gets a reference on the iloc */ |
885 | err = ext4_reserve_inode_write(handle, inode, iloc: &iloc); |
886 | if (unlikely(err)) |
887 | goto errout; |
888 | |
889 | n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), |
890 | GFP_KERNEL); |
891 | if (!n_group_desc) { |
892 | err = -ENOMEM; |
893 | ext4_warning(sb, "not enough memory for %lu groups", |
894 | gdb_num + 1); |
895 | goto errout; |
896 | } |
897 | |
898 | /* |
899 | * Finally, we have all of the possible failures behind us... |
900 | * |
901 | * Remove new GDT block from inode double-indirect block and clear out |
902 | * the new GDT block for use (which also "frees" the backup GDT blocks |
903 | * from the reserved inode). We don't need to change the bitmaps for |
904 | * these blocks, because they are marked as in-use from being in the |
905 | * reserved inode, and will become GDT blocks (primary and backup). |
906 | */ |
907 | data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; |
908 | err = ext4_handle_dirty_metadata(handle, NULL, dind); |
909 | if (unlikely(err)) { |
910 | ext4_std_error(sb, err); |
911 | goto errout; |
912 | } |
913 | inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> |
914 | (9 - EXT4_SB(sb)->s_cluster_bits); |
915 | ext4_mark_iloc_dirty(handle, inode, iloc: &iloc); |
916 | memset(gdb_bh->b_data, 0, sb->s_blocksize); |
917 | err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); |
918 | if (unlikely(err)) { |
919 | ext4_std_error(sb, err); |
920 | iloc.bh = NULL; |
921 | goto errout; |
922 | } |
923 | brelse(bh: dind); |
924 | |
925 | rcu_read_lock(); |
926 | o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); |
927 | memcpy(n_group_desc, o_group_desc, |
928 | EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); |
929 | rcu_read_unlock(); |
930 | n_group_desc[gdb_num] = gdb_bh; |
931 | rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); |
932 | EXT4_SB(sb)->s_gdb_count++; |
933 | ext4_kvfree_array_rcu(to_free: o_group_desc); |
934 | |
935 | lock_buffer(bh: EXT4_SB(sb)->s_sbh); |
936 | le16_add_cpu(var: &es->s_reserved_gdt_blocks, val: -1); |
937 | ext4_superblock_csum_set(sb); |
938 | unlock_buffer(bh: EXT4_SB(sb)->s_sbh); |
939 | err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); |
940 | if (err) |
941 | ext4_std_error(sb, err); |
942 | return err; |
943 | errout: |
944 | kvfree(addr: n_group_desc); |
945 | brelse(bh: iloc.bh); |
946 | brelse(bh: dind); |
947 | brelse(bh: gdb_bh); |
948 | |
949 | ext4_debug("leaving with error %d\n", err); |
950 | return err; |
951 | } |
952 | |
953 | /* |
954 | * If there is no available space in the existing block group descriptors for |
955 | * the new block group and there are no reserved block group descriptors, then |
956 | * the meta_bg feature will get enabled, and es->s_first_meta_bg will get set |
957 | * to the first block group that is managed using meta_bg and s_first_meta_bg |
958 | * must be a multiple of EXT4_DESC_PER_BLOCK(sb). |
959 | * This function will be called when first group of meta_bg is added to bring |
960 | * new group descriptors block of new added meta_bg. |
961 | */ |
962 | static int add_new_gdb_meta_bg(struct super_block *sb, |
963 | handle_t *handle, ext4_group_t group) { |
964 | ext4_fsblk_t gdblock; |
965 | struct buffer_head *gdb_bh; |
966 | struct buffer_head **o_group_desc, **n_group_desc; |
967 | unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
968 | int err; |
969 | |
970 | gdblock = ext4_group_first_block_no(sb, group_no: group) + |
971 | ext4_bg_has_super(sb, group); |
972 | gdb_bh = ext4_sb_bread(sb, block: gdblock, op_flags: 0); |
973 | if (IS_ERR(ptr: gdb_bh)) |
974 | return PTR_ERR(ptr: gdb_bh); |
975 | n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), |
976 | GFP_KERNEL); |
977 | if (!n_group_desc) { |
978 | brelse(bh: gdb_bh); |
979 | err = -ENOMEM; |
980 | ext4_warning(sb, "not enough memory for %lu groups", |
981 | gdb_num + 1); |
982 | return err; |
983 | } |
984 | |
985 | rcu_read_lock(); |
986 | o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); |
987 | memcpy(n_group_desc, o_group_desc, |
988 | EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); |
989 | rcu_read_unlock(); |
990 | n_group_desc[gdb_num] = gdb_bh; |
991 | |
992 | BUFFER_TRACE(gdb_bh, "get_write_access"); |
993 | err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); |
994 | if (err) { |
995 | kvfree(addr: n_group_desc); |
996 | brelse(bh: gdb_bh); |
997 | return err; |
998 | } |
999 | |
1000 | rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); |
1001 | EXT4_SB(sb)->s_gdb_count++; |
1002 | ext4_kvfree_array_rcu(to_free: o_group_desc); |
1003 | return err; |
1004 | } |
1005 | |
1006 | /* |
1007 | * Called when we are adding a new group which has a backup copy of each of |
1008 | * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. |
1009 | * We need to add these reserved backup GDT blocks to the resize inode, so |
1010 | * that they are kept for future resizing and not allocated to files. |
1011 | * |
1012 | * Each reserved backup GDT block will go into a different indirect block. |
1013 | * The indirect blocks are actually the primary reserved GDT blocks, |
1014 | * so we know in advance what their block numbers are. We only get the |
1015 | * double-indirect block to verify it is pointing to the primary reserved |
1016 | * GDT blocks so we don't overwrite a data block by accident. The reserved |
1017 | * backup GDT blocks are stored in their reserved primary GDT block. |
1018 | */ |
1019 | static int reserve_backup_gdb(handle_t *handle, struct inode *inode, |
1020 | ext4_group_t group) |
1021 | { |
1022 | struct super_block *sb = inode->i_sb; |
1023 | int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); |
1024 | int cluster_bits = EXT4_SB(sb)->s_cluster_bits; |
1025 | struct buffer_head **primary; |
1026 | struct buffer_head *dind; |
1027 | struct ext4_iloc iloc; |
1028 | ext4_fsblk_t blk; |
1029 | __le32 *data, *end; |
1030 | int gdbackups = 0; |
1031 | int res, i; |
1032 | int err; |
1033 | |
1034 | primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS); |
1035 | if (!primary) |
1036 | return -ENOMEM; |
1037 | |
1038 | data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; |
1039 | dind = ext4_sb_bread(sb, le32_to_cpu(*data), op_flags: 0); |
1040 | if (IS_ERR(ptr: dind)) { |
1041 | err = PTR_ERR(ptr: dind); |
1042 | dind = NULL; |
1043 | goto exit_free; |
1044 | } |
1045 | |
1046 | blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; |
1047 | data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % |
1048 | EXT4_ADDR_PER_BLOCK(sb)); |
1049 | end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); |
1050 | |
1051 | /* Get each reserved primary GDT block and verify it holds backups */ |
1052 | for (res = 0; res < reserved_gdb; res++, blk++) { |
1053 | if (le32_to_cpu(*data) != blk) { |
1054 | ext4_warning(sb, "reserved block %llu" |
1055 | " not at offset %ld", |
1056 | blk, |
1057 | (long)(data - (__le32 *)dind->b_data)); |
1058 | err = -EINVAL; |
1059 | goto exit_bh; |
1060 | } |
1061 | primary[res] = ext4_sb_bread(sb, block: blk, op_flags: 0); |
1062 | if (IS_ERR(ptr: primary[res])) { |
1063 | err = PTR_ERR(ptr: primary[res]); |
1064 | primary[res] = NULL; |
1065 | goto exit_bh; |
1066 | } |
1067 | gdbackups = verify_reserved_gdb(sb, end: group, primary: primary[res]); |
1068 | if (gdbackups < 0) { |
1069 | brelse(bh: primary[res]); |
1070 | err = gdbackups; |
1071 | goto exit_bh; |
1072 | } |
1073 | if (++data >= end) |
1074 | data = (__le32 *)dind->b_data; |
1075 | } |
1076 | |
1077 | for (i = 0; i < reserved_gdb; i++) { |
1078 | BUFFER_TRACE(primary[i], "get_write_access"); |
1079 | if ((err = ext4_journal_get_write_access(handle, sb, primary[i], |
1080 | EXT4_JTR_NONE))) |
1081 | goto exit_bh; |
1082 | } |
1083 | |
1084 | if ((err = ext4_reserve_inode_write(handle, inode, iloc: &iloc))) |
1085 | goto exit_bh; |
1086 | |
1087 | /* |
1088 | * Finally we can add each of the reserved backup GDT blocks from |
1089 | * the new group to its reserved primary GDT block. |
1090 | */ |
1091 | blk = group * EXT4_BLOCKS_PER_GROUP(sb); |
1092 | for (i = 0; i < reserved_gdb; i++) { |
1093 | int err2; |
1094 | data = (__le32 *)primary[i]->b_data; |
1095 | data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); |
1096 | err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); |
1097 | if (!err) |
1098 | err = err2; |
1099 | } |
1100 | |
1101 | inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); |
1102 | ext4_mark_iloc_dirty(handle, inode, iloc: &iloc); |
1103 | |
1104 | exit_bh: |
1105 | while (--res >= 0) |
1106 | brelse(bh: primary[res]); |
1107 | brelse(bh: dind); |
1108 | |
1109 | exit_free: |
1110 | kfree(objp: primary); |
1111 | |
1112 | return err; |
1113 | } |
1114 | |
1115 | static inline void ext4_set_block_group_nr(struct super_block *sb, char *data, |
1116 | ext4_group_t group) |
1117 | { |
1118 | struct ext4_super_block *es = (struct ext4_super_block *) data; |
1119 | |
1120 | es->s_block_group_nr = cpu_to_le16(group); |
1121 | if (ext4_has_feature_metadata_csum(sb)) |
1122 | es->s_checksum = ext4_superblock_csum(es); |
1123 | } |
1124 | |
1125 | /* |
1126 | * Update the backup copies of the ext4 metadata. These don't need to be part |
1127 | * of the main resize transaction, because e2fsck will re-write them if there |
1128 | * is a problem (basically only OOM will cause a problem). However, we |
1129 | * _should_ update the backups if possible, in case the primary gets trashed |
1130 | * for some reason and we need to run e2fsck from a backup superblock. The |
1131 | * important part is that the new block and inode counts are in the backup |
1132 | * superblocks, and the location of the new group metadata in the GDT backups. |
1133 | * |
1134 | * We do not need take the s_resize_lock for this, because these |
1135 | * blocks are not otherwise touched by the filesystem code when it is |
1136 | * mounted. We don't need to worry about last changing from |
1137 | * sbi->s_groups_count, because the worst that can happen is that we |
1138 | * do not copy the full number of backups at this time. The resize |
1139 | * which changed s_groups_count will backup again. |
1140 | */ |
1141 | static void update_backups(struct super_block *sb, sector_t blk_off, char *data, |
1142 | int size, int meta_bg) |
1143 | { |
1144 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1145 | ext4_group_t last; |
1146 | const int bpg = EXT4_BLOCKS_PER_GROUP(sb); |
1147 | unsigned three = 1; |
1148 | unsigned five = 5; |
1149 | unsigned seven = 7; |
1150 | ext4_group_t group = 0; |
1151 | int rest = sb->s_blocksize - size; |
1152 | handle_t *handle; |
1153 | int err = 0, err2; |
1154 | |
1155 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); |
1156 | if (IS_ERR(ptr: handle)) { |
1157 | group = 1; |
1158 | err = PTR_ERR(ptr: handle); |
1159 | goto exit_err; |
1160 | } |
1161 | |
1162 | if (meta_bg == 0) { |
1163 | group = ext4_list_backups(sb, three: &three, five: &five, seven: &seven); |
1164 | last = sbi->s_groups_count; |
1165 | } else { |
1166 | group = ext4_get_group_number(sb, block: blk_off) + 1; |
1167 | last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); |
1168 | } |
1169 | |
1170 | while (group < sbi->s_groups_count) { |
1171 | struct buffer_head *bh; |
1172 | ext4_fsblk_t backup_block; |
1173 | int has_super = ext4_bg_has_super(sb, group); |
1174 | ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group_no: group); |
1175 | |
1176 | /* Out of journal space, and can't get more - abort - so sad */ |
1177 | err = ext4_resize_ensure_credits_batch(handle, credits: 1); |
1178 | if (err < 0) |
1179 | break; |
1180 | |
1181 | if (meta_bg == 0) |
1182 | backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; |
1183 | else |
1184 | backup_block = first_block + has_super; |
1185 | |
1186 | bh = sb_getblk(sb, block: backup_block); |
1187 | if (unlikely(!bh)) { |
1188 | err = -ENOMEM; |
1189 | break; |
1190 | } |
1191 | ext4_debug("update metadata backup %llu(+%llu)\n", |
1192 | backup_block, backup_block - |
1193 | ext4_group_first_block_no(sb, group)); |
1194 | BUFFER_TRACE(bh, "get_write_access"); |
1195 | if ((err = ext4_journal_get_write_access(handle, sb, bh, |
1196 | EXT4_JTR_NONE))) { |
1197 | brelse(bh); |
1198 | break; |
1199 | } |
1200 | lock_buffer(bh); |
1201 | memcpy(bh->b_data, data, size); |
1202 | if (rest) |
1203 | memset(bh->b_data + size, 0, rest); |
1204 | if (has_super && (backup_block == first_block)) |
1205 | ext4_set_block_group_nr(sb, data: bh->b_data, group); |
1206 | set_buffer_uptodate(bh); |
1207 | unlock_buffer(bh); |
1208 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
1209 | if (unlikely(err)) |
1210 | ext4_std_error(sb, err); |
1211 | brelse(bh); |
1212 | |
1213 | if (meta_bg == 0) |
1214 | group = ext4_list_backups(sb, three: &three, five: &five, seven: &seven); |
1215 | else if (group == last) |
1216 | break; |
1217 | else |
1218 | group = last; |
1219 | } |
1220 | if ((err2 = ext4_journal_stop(handle)) && !err) |
1221 | err = err2; |
1222 | |
1223 | /* |
1224 | * Ugh! Need to have e2fsck write the backup copies. It is too |
1225 | * late to revert the resize, we shouldn't fail just because of |
1226 | * the backup copies (they are only needed in case of corruption). |
1227 | * |
1228 | * However, if we got here we have a journal problem too, so we |
1229 | * can't really start a transaction to mark the superblock. |
1230 | * Chicken out and just set the flag on the hope it will be written |
1231 | * to disk, and if not - we will simply wait until next fsck. |
1232 | */ |
1233 | exit_err: |
1234 | if (err) { |
1235 | ext4_warning(sb, "can't update backup for group %u (err %d), " |
1236 | "forcing fsck on next reboot", group, err); |
1237 | sbi->s_mount_state &= ~EXT4_VALID_FS; |
1238 | sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); |
1239 | mark_buffer_dirty(bh: sbi->s_sbh); |
1240 | } |
1241 | } |
1242 | |
1243 | /* |
1244 | * ext4_add_new_descs() adds @count group descriptor of groups |
1245 | * starting at @group |
1246 | * |
1247 | * @handle: journal handle |
1248 | * @sb: super block |
1249 | * @group: the group no. of the first group desc to be added |
1250 | * @resize_inode: the resize inode |
1251 | * @count: number of group descriptors to be added |
1252 | */ |
1253 | static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, |
1254 | ext4_group_t group, struct inode *resize_inode, |
1255 | ext4_group_t count) |
1256 | { |
1257 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1258 | struct ext4_super_block *es = sbi->s_es; |
1259 | struct buffer_head *gdb_bh; |
1260 | int i, gdb_off, gdb_num, err = 0; |
1261 | int meta_bg; |
1262 | |
1263 | meta_bg = ext4_has_feature_meta_bg(sb); |
1264 | for (i = 0; i < count; i++, group++) { |
1265 | int reserved_gdb = ext4_bg_has_super(sb, group) ? |
1266 | le16_to_cpu(es->s_reserved_gdt_blocks) : 0; |
1267 | |
1268 | gdb_off = group % EXT4_DESC_PER_BLOCK(sb); |
1269 | gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
1270 | |
1271 | /* |
1272 | * We will only either add reserved group blocks to a backup group |
1273 | * or remove reserved blocks for the first group in a new group block. |
1274 | * Doing both would be mean more complex code, and sane people don't |
1275 | * use non-sparse filesystems anymore. This is already checked above. |
1276 | */ |
1277 | if (gdb_off) { |
1278 | gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, |
1279 | gdb_num); |
1280 | BUFFER_TRACE(gdb_bh, "get_write_access"); |
1281 | err = ext4_journal_get_write_access(handle, sb, gdb_bh, |
1282 | EXT4_JTR_NONE); |
1283 | |
1284 | if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) |
1285 | err = reserve_backup_gdb(handle, inode: resize_inode, group); |
1286 | } else if (meta_bg != 0) { |
1287 | err = add_new_gdb_meta_bg(sb, handle, group); |
1288 | } else { |
1289 | err = add_new_gdb(handle, inode: resize_inode, group); |
1290 | } |
1291 | if (err) |
1292 | break; |
1293 | } |
1294 | return err; |
1295 | } |
1296 | |
1297 | static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) |
1298 | { |
1299 | struct buffer_head *bh = sb_getblk(sb, block); |
1300 | if (unlikely(!bh)) |
1301 | return NULL; |
1302 | if (!bh_uptodate_or_lock(bh)) { |
1303 | if (ext4_read_bh(bh, op_flags: 0, NULL, simu_fail: false) < 0) { |
1304 | brelse(bh); |
1305 | return NULL; |
1306 | } |
1307 | } |
1308 | |
1309 | return bh; |
1310 | } |
1311 | |
1312 | static int ext4_set_bitmap_checksums(struct super_block *sb, |
1313 | struct ext4_group_desc *gdp, |
1314 | struct ext4_new_group_data *group_data) |
1315 | { |
1316 | struct buffer_head *bh; |
1317 | |
1318 | if (!ext4_has_feature_metadata_csum(sb)) |
1319 | return 0; |
1320 | |
1321 | bh = ext4_get_bitmap(sb, block: group_data->inode_bitmap); |
1322 | if (!bh) |
1323 | return -EIO; |
1324 | ext4_inode_bitmap_csum_set(sb, gdp, bh); |
1325 | brelse(bh); |
1326 | |
1327 | bh = ext4_get_bitmap(sb, block: group_data->block_bitmap); |
1328 | if (!bh) |
1329 | return -EIO; |
1330 | ext4_block_bitmap_csum_set(sb, gdp, bh); |
1331 | brelse(bh); |
1332 | |
1333 | return 0; |
1334 | } |
1335 | |
1336 | /* |
1337 | * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg |
1338 | */ |
1339 | static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, |
1340 | struct ext4_new_flex_group_data *flex_gd) |
1341 | { |
1342 | struct ext4_new_group_data *group_data = flex_gd->groups; |
1343 | struct ext4_group_desc *gdp; |
1344 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1345 | struct buffer_head *gdb_bh; |
1346 | ext4_group_t group; |
1347 | __u16 *bg_flags = flex_gd->bg_flags; |
1348 | int i, gdb_off, gdb_num, err = 0; |
1349 | |
1350 | |
1351 | for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { |
1352 | group = group_data->group; |
1353 | |
1354 | gdb_off = group % EXT4_DESC_PER_BLOCK(sb); |
1355 | gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
1356 | |
1357 | /* |
1358 | * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). |
1359 | */ |
1360 | gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); |
1361 | /* Update group descriptor block for new group */ |
1362 | gdp = (struct ext4_group_desc *)(gdb_bh->b_data + |
1363 | gdb_off * EXT4_DESC_SIZE(sb)); |
1364 | |
1365 | memset(gdp, 0, EXT4_DESC_SIZE(sb)); |
1366 | ext4_block_bitmap_set(sb, bg: gdp, blk: group_data->block_bitmap); |
1367 | ext4_inode_bitmap_set(sb, bg: gdp, blk: group_data->inode_bitmap); |
1368 | err = ext4_set_bitmap_checksums(sb, gdp, group_data); |
1369 | if (err) { |
1370 | ext4_std_error(sb, err); |
1371 | break; |
1372 | } |
1373 | |
1374 | ext4_inode_table_set(sb, bg: gdp, blk: group_data->inode_table); |
1375 | ext4_free_group_clusters_set(sb, bg: gdp, |
1376 | count: group_data->free_clusters_count); |
1377 | ext4_free_inodes_set(sb, bg: gdp, EXT4_INODES_PER_GROUP(sb)); |
1378 | if (ext4_has_group_desc_csum(sb)) |
1379 | ext4_itable_unused_set(sb, bg: gdp, |
1380 | EXT4_INODES_PER_GROUP(sb)); |
1381 | gdp->bg_flags = cpu_to_le16(*bg_flags); |
1382 | ext4_group_desc_csum_set(sb, group, gdp); |
1383 | |
1384 | err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); |
1385 | if (unlikely(err)) { |
1386 | ext4_std_error(sb, err); |
1387 | break; |
1388 | } |
1389 | |
1390 | /* |
1391 | * We can allocate memory for mb_alloc based on the new group |
1392 | * descriptor |
1393 | */ |
1394 | err = ext4_mb_add_groupinfo(sb, i: group, desc: gdp); |
1395 | if (err) |
1396 | break; |
1397 | } |
1398 | return err; |
1399 | } |
1400 | |
1401 | static void ext4_add_overhead(struct super_block *sb, |
1402 | const ext4_fsblk_t overhead) |
1403 | { |
1404 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1405 | struct ext4_super_block *es = sbi->s_es; |
1406 | |
1407 | sbi->s_overhead += overhead; |
1408 | es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); |
1409 | smp_wmb(); |
1410 | } |
1411 | |
1412 | /* |
1413 | * ext4_update_super() updates the super block so that the newly added |
1414 | * groups can be seen by the filesystem. |
1415 | * |
1416 | * @sb: super block |
1417 | * @flex_gd: new added groups |
1418 | */ |
1419 | static void ext4_update_super(struct super_block *sb, |
1420 | struct ext4_new_flex_group_data *flex_gd) |
1421 | { |
1422 | ext4_fsblk_t blocks_count = 0; |
1423 | ext4_fsblk_t free_blocks = 0; |
1424 | ext4_fsblk_t reserved_blocks = 0; |
1425 | struct ext4_new_group_data *group_data = flex_gd->groups; |
1426 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1427 | struct ext4_super_block *es = sbi->s_es; |
1428 | int i; |
1429 | |
1430 | BUG_ON(flex_gd->count == 0 || group_data == NULL); |
1431 | /* |
1432 | * Make the new blocks and inodes valid next. We do this before |
1433 | * increasing the group count so that once the group is enabled, |
1434 | * all of its blocks and inodes are already valid. |
1435 | * |
1436 | * We always allocate group-by-group, then block-by-block or |
1437 | * inode-by-inode within a group, so enabling these |
1438 | * blocks/inodes before the group is live won't actually let us |
1439 | * allocate the new space yet. |
1440 | */ |
1441 | for (i = 0; i < flex_gd->count; i++) { |
1442 | blocks_count += group_data[i].blocks_count; |
1443 | free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); |
1444 | } |
1445 | |
1446 | reserved_blocks = ext4_r_blocks_count(es) * 100; |
1447 | reserved_blocks = div64_u64(dividend: reserved_blocks, divisor: ext4_blocks_count(es)); |
1448 | reserved_blocks *= blocks_count; |
1449 | do_div(reserved_blocks, 100); |
1450 | |
1451 | lock_buffer(bh: sbi->s_sbh); |
1452 | ext4_blocks_count_set(es, blk: ext4_blocks_count(es) + blocks_count); |
1453 | ext4_free_blocks_count_set(es, blk: ext4_free_blocks_count(es) + free_blocks); |
1454 | le32_add_cpu(var: &es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * |
1455 | flex_gd->count); |
1456 | le32_add_cpu(var: &es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * |
1457 | flex_gd->count); |
1458 | |
1459 | ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); |
1460 | /* |
1461 | * We need to protect s_groups_count against other CPUs seeing |
1462 | * inconsistent state in the superblock. |
1463 | * |
1464 | * The precise rules we use are: |
1465 | * |
1466 | * * Writers must perform a smp_wmb() after updating all |
1467 | * dependent data and before modifying the groups count |
1468 | * |
1469 | * * Readers must perform an smp_rmb() after reading the groups |
1470 | * count and before reading any dependent data. |
1471 | * |
1472 | * NB. These rules can be relaxed when checking the group count |
1473 | * while freeing data, as we can only allocate from a block |
1474 | * group after serialising against the group count, and we can |
1475 | * only then free after serialising in turn against that |
1476 | * allocation. |
1477 | */ |
1478 | smp_wmb(); |
1479 | |
1480 | /* Update the global fs size fields */ |
1481 | sbi->s_groups_count += flex_gd->count; |
1482 | sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, |
1483 | (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); |
1484 | |
1485 | /* Update the reserved block counts only once the new group is |
1486 | * active. */ |
1487 | ext4_r_blocks_count_set(es, blk: ext4_r_blocks_count(es) + |
1488 | reserved_blocks); |
1489 | |
1490 | /* Update the free space counts */ |
1491 | percpu_counter_add(fbc: &sbi->s_freeclusters_counter, |
1492 | EXT4_NUM_B2C(sbi, free_blocks)); |
1493 | percpu_counter_add(fbc: &sbi->s_freeinodes_counter, |
1494 | EXT4_INODES_PER_GROUP(sb) * flex_gd->count); |
1495 | |
1496 | ext4_debug("free blocks count %llu", |
1497 | percpu_counter_read(&sbi->s_freeclusters_counter)); |
1498 | if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { |
1499 | ext4_group_t flex_group; |
1500 | struct flex_groups *fg; |
1501 | |
1502 | flex_group = ext4_flex_group(sbi, block_group: group_data[0].group); |
1503 | fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); |
1504 | atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), |
1505 | v: &fg->free_clusters); |
1506 | atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, |
1507 | v: &fg->free_inodes); |
1508 | } |
1509 | |
1510 | /* |
1511 | * Update the fs overhead information. |
1512 | * |
1513 | * For bigalloc, if the superblock already has a properly calculated |
1514 | * overhead, update it with a value based on numbers already computed |
1515 | * above for the newly allocated capacity. |
1516 | */ |
1517 | if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0)) |
1518 | ext4_add_overhead(sb, |
1519 | EXT4_NUM_B2C(sbi, blocks_count - free_blocks)); |
1520 | else |
1521 | ext4_calculate_overhead(sb); |
1522 | es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); |
1523 | |
1524 | ext4_superblock_csum_set(sb); |
1525 | unlock_buffer(bh: sbi->s_sbh); |
1526 | if (test_opt(sb, DEBUG)) |
1527 | printk(KERN_DEBUG "EXT4-fs: added group %u:" |
1528 | "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, |
1529 | blocks_count, free_blocks, reserved_blocks); |
1530 | } |
1531 | |
1532 | /* Add a flex group to an fs. Ensure we handle all possible error conditions |
1533 | * _before_ we start modifying the filesystem, because we cannot abort the |
1534 | * transaction and not have it write the data to disk. |
1535 | */ |
1536 | static int ext4_flex_group_add(struct super_block *sb, |
1537 | struct inode *resize_inode, |
1538 | struct ext4_new_flex_group_data *flex_gd) |
1539 | { |
1540 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1541 | struct ext4_super_block *es = sbi->s_es; |
1542 | ext4_fsblk_t o_blocks_count; |
1543 | ext4_grpblk_t last; |
1544 | ext4_group_t group; |
1545 | handle_t *handle; |
1546 | unsigned reserved_gdb; |
1547 | int err = 0, err2 = 0, credit; |
1548 | |
1549 | BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); |
1550 | |
1551 | reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); |
1552 | o_blocks_count = ext4_blocks_count(es); |
1553 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count, blockgrpp: &group, offsetp: &last); |
1554 | BUG_ON(last); |
1555 | |
1556 | err = setup_new_flex_group_blocks(sb, flex_gd); |
1557 | if (err) |
1558 | goto exit; |
1559 | /* |
1560 | * We will always be modifying at least the superblock and GDT |
1561 | * blocks. If we are adding a group past the last current GDT block, |
1562 | * we will also modify the inode and the dindirect block. If we |
1563 | * are adding a group with superblock/GDT backups we will also |
1564 | * modify each of the reserved GDT dindirect blocks. |
1565 | */ |
1566 | credit = 3; /* sb, resize inode, resize inode dindirect */ |
1567 | /* GDT blocks */ |
1568 | credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); |
1569 | credit += reserved_gdb; /* Reserved GDT dindirect blocks */ |
1570 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); |
1571 | if (IS_ERR(ptr: handle)) { |
1572 | err = PTR_ERR(ptr: handle); |
1573 | goto exit; |
1574 | } |
1575 | |
1576 | BUFFER_TRACE(sbi->s_sbh, "get_write_access"); |
1577 | err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, |
1578 | EXT4_JTR_NONE); |
1579 | if (err) |
1580 | goto exit_journal; |
1581 | |
1582 | group = flex_gd->groups[0].group; |
1583 | BUG_ON(group != sbi->s_groups_count); |
1584 | err = ext4_add_new_descs(handle, sb, group, |
1585 | resize_inode, count: flex_gd->count); |
1586 | if (err) |
1587 | goto exit_journal; |
1588 | |
1589 | err = ext4_setup_new_descs(handle, sb, flex_gd); |
1590 | if (err) |
1591 | goto exit_journal; |
1592 | |
1593 | ext4_update_super(sb, flex_gd); |
1594 | |
1595 | err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); |
1596 | |
1597 | exit_journal: |
1598 | err2 = ext4_journal_stop(handle); |
1599 | if (!err) |
1600 | err = err2; |
1601 | |
1602 | if (!err) { |
1603 | int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); |
1604 | int gdb_num_end = ((group + flex_gd->count - 1) / |
1605 | EXT4_DESC_PER_BLOCK(sb)); |
1606 | int meta_bg = ext4_has_feature_meta_bg(sb) && |
1607 | gdb_num >= le32_to_cpu(es->s_first_meta_bg); |
1608 | sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - |
1609 | ext4_group_first_block_no(sb, group_no: 0); |
1610 | |
1611 | update_backups(sb, blk_off: ext4_group_first_block_no(sb, group_no: 0), |
1612 | data: (char *)es, size: sizeof(struct ext4_super_block), meta_bg: 0); |
1613 | for (; gdb_num <= gdb_num_end; gdb_num++) { |
1614 | struct buffer_head *gdb_bh; |
1615 | |
1616 | gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, |
1617 | gdb_num); |
1618 | update_backups(sb, blk_off: gdb_bh->b_blocknr - padding_blocks, |
1619 | data: gdb_bh->b_data, size: gdb_bh->b_size, meta_bg); |
1620 | } |
1621 | } |
1622 | exit: |
1623 | return err; |
1624 | } |
1625 | |
1626 | static int ext4_setup_next_flex_gd(struct super_block *sb, |
1627 | struct ext4_new_flex_group_data *flex_gd, |
1628 | ext4_fsblk_t n_blocks_count) |
1629 | { |
1630 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1631 | struct ext4_super_block *es = sbi->s_es; |
1632 | struct ext4_new_group_data *group_data = flex_gd->groups; |
1633 | ext4_fsblk_t o_blocks_count; |
1634 | ext4_group_t n_group; |
1635 | ext4_group_t group; |
1636 | ext4_group_t last_group; |
1637 | ext4_grpblk_t last; |
1638 | ext4_grpblk_t clusters_per_group; |
1639 | unsigned long i; |
1640 | |
1641 | clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); |
1642 | |
1643 | o_blocks_count = ext4_blocks_count(es); |
1644 | |
1645 | if (o_blocks_count == n_blocks_count) |
1646 | return 0; |
1647 | |
1648 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count, blockgrpp: &group, offsetp: &last); |
1649 | BUG_ON(last); |
1650 | ext4_get_group_no_and_offset(sb, blocknr: n_blocks_count - 1, blockgrpp: &n_group, offsetp: &last); |
1651 | |
1652 | last_group = group | (flex_gd->resize_bg - 1); |
1653 | if (last_group > n_group) |
1654 | last_group = n_group; |
1655 | |
1656 | flex_gd->count = last_group - group + 1; |
1657 | |
1658 | for (i = 0; i < flex_gd->count; i++) { |
1659 | int overhead; |
1660 | |
1661 | group_data[i].group = group + i; |
1662 | group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); |
1663 | overhead = ext4_group_overhead_blocks(sb, group: group + i); |
1664 | group_data[i].mdata_blocks = overhead; |
1665 | group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); |
1666 | if (ext4_has_group_desc_csum(sb)) { |
1667 | flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | |
1668 | EXT4_BG_INODE_UNINIT; |
1669 | if (!test_opt(sb, INIT_INODE_TABLE)) |
1670 | flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; |
1671 | } else |
1672 | flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; |
1673 | } |
1674 | |
1675 | if (last_group == n_group && ext4_has_group_desc_csum(sb)) |
1676 | /* We need to initialize block bitmap of last group. */ |
1677 | flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; |
1678 | |
1679 | if ((last_group == n_group) && (last != clusters_per_group - 1)) { |
1680 | group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); |
1681 | group_data[i - 1].free_clusters_count -= clusters_per_group - |
1682 | last - 1; |
1683 | } |
1684 | |
1685 | return 1; |
1686 | } |
1687 | |
1688 | /* Add group descriptor data to an existing or new group descriptor block. |
1689 | * Ensure we handle all possible error conditions _before_ we start modifying |
1690 | * the filesystem, because we cannot abort the transaction and not have it |
1691 | * write the data to disk. |
1692 | * |
1693 | * If we are on a GDT block boundary, we need to get the reserved GDT block. |
1694 | * Otherwise, we may need to add backup GDT blocks for a sparse group. |
1695 | * |
1696 | * We only need to hold the superblock lock while we are actually adding |
1697 | * in the new group's counts to the superblock. Prior to that we have |
1698 | * not really "added" the group at all. We re-check that we are still |
1699 | * adding in the last group in case things have changed since verifying. |
1700 | */ |
1701 | int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) |
1702 | { |
1703 | struct ext4_new_flex_group_data flex_gd; |
1704 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1705 | struct ext4_super_block *es = sbi->s_es; |
1706 | int reserved_gdb = ext4_bg_has_super(sb, group: input->group) ? |
1707 | le16_to_cpu(es->s_reserved_gdt_blocks) : 0; |
1708 | struct inode *inode = NULL; |
1709 | int gdb_off; |
1710 | int err; |
1711 | __u16 bg_flags = 0; |
1712 | |
1713 | gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); |
1714 | |
1715 | if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) { |
1716 | ext4_warning(sb, "Can't resize non-sparse filesystem further"); |
1717 | return -EPERM; |
1718 | } |
1719 | |
1720 | if (ext4_blocks_count(es) + input->blocks_count < |
1721 | ext4_blocks_count(es)) { |
1722 | ext4_warning(sb, "blocks_count overflow"); |
1723 | return -EINVAL; |
1724 | } |
1725 | |
1726 | if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < |
1727 | le32_to_cpu(es->s_inodes_count)) { |
1728 | ext4_warning(sb, "inodes_count overflow"); |
1729 | return -EINVAL; |
1730 | } |
1731 | |
1732 | if (reserved_gdb || gdb_off == 0) { |
1733 | if (!ext4_has_feature_resize_inode(sb) || |
1734 | !le16_to_cpu(es->s_reserved_gdt_blocks)) { |
1735 | ext4_warning(sb, |
1736 | "No reserved GDT blocks, can't resize"); |
1737 | return -EPERM; |
1738 | } |
1739 | inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL); |
1740 | if (IS_ERR(ptr: inode)) { |
1741 | ext4_warning(sb, "Error opening resize inode"); |
1742 | return PTR_ERR(ptr: inode); |
1743 | } |
1744 | } |
1745 | |
1746 | |
1747 | err = verify_group_input(sb, input); |
1748 | if (err) |
1749 | goto out; |
1750 | |
1751 | err = ext4_alloc_flex_bg_array(sb, ngroup: input->group + 1); |
1752 | if (err) |
1753 | goto out; |
1754 | |
1755 | err = ext4_mb_alloc_groupinfo(sb, ngroups: input->group + 1); |
1756 | if (err) |
1757 | goto out; |
1758 | |
1759 | flex_gd.count = 1; |
1760 | flex_gd.groups = input; |
1761 | flex_gd.bg_flags = &bg_flags; |
1762 | err = ext4_flex_group_add(sb, resize_inode: inode, flex_gd: &flex_gd); |
1763 | out: |
1764 | iput(inode); |
1765 | return err; |
1766 | } /* ext4_group_add */ |
1767 | |
1768 | /* |
1769 | * extend a group without checking assuming that checking has been done. |
1770 | */ |
1771 | static int ext4_group_extend_no_check(struct super_block *sb, |
1772 | ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) |
1773 | { |
1774 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
1775 | handle_t *handle; |
1776 | int err = 0, err2; |
1777 | |
1778 | /* We will update the superblock, one block bitmap, and |
1779 | * one group descriptor via ext4_group_add_blocks(). |
1780 | */ |
1781 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); |
1782 | if (IS_ERR(ptr: handle)) { |
1783 | err = PTR_ERR(ptr: handle); |
1784 | ext4_warning(sb, "error %d on journal start", err); |
1785 | return err; |
1786 | } |
1787 | |
1788 | BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); |
1789 | err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, |
1790 | EXT4_JTR_NONE); |
1791 | if (err) { |
1792 | ext4_warning(sb, "error %d on journal write access", err); |
1793 | goto errout; |
1794 | } |
1795 | |
1796 | lock_buffer(bh: EXT4_SB(sb)->s_sbh); |
1797 | ext4_blocks_count_set(es, blk: o_blocks_count + add); |
1798 | ext4_free_blocks_count_set(es, blk: ext4_free_blocks_count(es) + add); |
1799 | ext4_superblock_csum_set(sb); |
1800 | unlock_buffer(bh: EXT4_SB(sb)->s_sbh); |
1801 | ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, |
1802 | o_blocks_count + add); |
1803 | /* We add the blocks to the bitmap and set the group need init bit */ |
1804 | err = ext4_group_add_blocks(handle, sb, block: o_blocks_count, count: add); |
1805 | if (err) |
1806 | goto errout; |
1807 | ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); |
1808 | ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, |
1809 | o_blocks_count + add); |
1810 | errout: |
1811 | err2 = ext4_journal_stop(handle); |
1812 | if (err2 && !err) |
1813 | err = err2; |
1814 | |
1815 | if (!err) { |
1816 | if (test_opt(sb, DEBUG)) |
1817 | printk(KERN_DEBUG "EXT4-fs: extended group to %llu " |
1818 | "blocks\n", ext4_blocks_count(es)); |
1819 | update_backups(sb, blk_off: ext4_group_first_block_no(sb, group_no: 0), |
1820 | data: (char *)es, size: sizeof(struct ext4_super_block), meta_bg: 0); |
1821 | } |
1822 | return err; |
1823 | } |
1824 | |
1825 | /* |
1826 | * Extend the filesystem to the new number of blocks specified. This entry |
1827 | * point is only used to extend the current filesystem to the end of the last |
1828 | * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" |
1829 | * for emergencies (because it has no dependencies on reserved blocks). |
1830 | * |
1831 | * If we _really_ wanted, we could use default values to call ext4_group_add() |
1832 | * allow the "remount" trick to work for arbitrary resizing, assuming enough |
1833 | * GDT blocks are reserved to grow to the desired size. |
1834 | */ |
1835 | int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, |
1836 | ext4_fsblk_t n_blocks_count) |
1837 | { |
1838 | ext4_fsblk_t o_blocks_count; |
1839 | ext4_grpblk_t last; |
1840 | ext4_grpblk_t add; |
1841 | struct buffer_head *bh; |
1842 | ext4_group_t group; |
1843 | |
1844 | o_blocks_count = ext4_blocks_count(es); |
1845 | |
1846 | if (test_opt(sb, DEBUG)) |
1847 | ext4_msg(sb, KERN_DEBUG, |
1848 | "extending last group from %llu to %llu blocks", |
1849 | o_blocks_count, n_blocks_count); |
1850 | |
1851 | if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) |
1852 | return 0; |
1853 | |
1854 | if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { |
1855 | ext4_msg(sb, KERN_ERR, |
1856 | "filesystem too large to resize to %llu blocks safely", |
1857 | n_blocks_count); |
1858 | return -EINVAL; |
1859 | } |
1860 | |
1861 | if (n_blocks_count < o_blocks_count) { |
1862 | ext4_warning(sb, "can't shrink FS - resize aborted"); |
1863 | return -EINVAL; |
1864 | } |
1865 | |
1866 | /* Handle the remaining blocks in the last group only. */ |
1867 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count, blockgrpp: &group, offsetp: &last); |
1868 | |
1869 | if (last == 0) { |
1870 | ext4_warning(sb, "need to use ext2online to resize further"); |
1871 | return -EPERM; |
1872 | } |
1873 | |
1874 | add = EXT4_BLOCKS_PER_GROUP(sb) - last; |
1875 | |
1876 | if (o_blocks_count + add < o_blocks_count) { |
1877 | ext4_warning(sb, "blocks_count overflow"); |
1878 | return -EINVAL; |
1879 | } |
1880 | |
1881 | if (o_blocks_count + add > n_blocks_count) |
1882 | add = n_blocks_count - o_blocks_count; |
1883 | |
1884 | if (o_blocks_count + add < n_blocks_count) |
1885 | ext4_warning(sb, "will only finish group (%llu blocks, %u new)", |
1886 | o_blocks_count + add, add); |
1887 | |
1888 | /* See if the device is actually as big as what was requested */ |
1889 | bh = ext4_sb_bread(sb, block: o_blocks_count + add - 1, op_flags: 0); |
1890 | if (IS_ERR(ptr: bh)) { |
1891 | ext4_warning(sb, "can't read last block, resize aborted"); |
1892 | return -ENOSPC; |
1893 | } |
1894 | brelse(bh); |
1895 | |
1896 | return ext4_group_extend_no_check(sb, o_blocks_count, add); |
1897 | } /* ext4_group_extend */ |
1898 | |
1899 | |
1900 | static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) |
1901 | { |
1902 | return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); |
1903 | } |
1904 | |
1905 | /* |
1906 | * Release the resize inode and drop the resize_inode feature if there |
1907 | * are no more reserved gdt blocks, and then convert the file system |
1908 | * to enable meta_bg |
1909 | */ |
1910 | static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) |
1911 | { |
1912 | handle_t *handle; |
1913 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1914 | struct ext4_super_block *es = sbi->s_es; |
1915 | struct ext4_inode_info *ei = EXT4_I(inode); |
1916 | ext4_fsblk_t nr; |
1917 | int i, ret, err = 0; |
1918 | int credits = 1; |
1919 | |
1920 | ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); |
1921 | if (inode) { |
1922 | if (es->s_reserved_gdt_blocks) { |
1923 | ext4_error(sb, "Unexpected non-zero " |
1924 | "s_reserved_gdt_blocks"); |
1925 | return -EPERM; |
1926 | } |
1927 | |
1928 | /* Do a quick sanity check of the resize inode */ |
1929 | if (inode->i_blocks != 1 << (inode->i_blkbits - |
1930 | (9 - sbi->s_cluster_bits))) |
1931 | goto invalid_resize_inode; |
1932 | for (i = 0; i < EXT4_N_BLOCKS; i++) { |
1933 | if (i == EXT4_DIND_BLOCK) { |
1934 | if (ei->i_data[i]) |
1935 | continue; |
1936 | else |
1937 | goto invalid_resize_inode; |
1938 | } |
1939 | if (ei->i_data[i]) |
1940 | goto invalid_resize_inode; |
1941 | } |
1942 | credits += 3; /* block bitmap, bg descriptor, resize inode */ |
1943 | } |
1944 | |
1945 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); |
1946 | if (IS_ERR(ptr: handle)) |
1947 | return PTR_ERR(ptr: handle); |
1948 | |
1949 | BUFFER_TRACE(sbi->s_sbh, "get_write_access"); |
1950 | err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, |
1951 | EXT4_JTR_NONE); |
1952 | if (err) |
1953 | goto errout; |
1954 | |
1955 | lock_buffer(bh: sbi->s_sbh); |
1956 | ext4_clear_feature_resize_inode(sb); |
1957 | ext4_set_feature_meta_bg(sb); |
1958 | sbi->s_es->s_first_meta_bg = |
1959 | cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); |
1960 | ext4_superblock_csum_set(sb); |
1961 | unlock_buffer(bh: sbi->s_sbh); |
1962 | |
1963 | err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); |
1964 | if (err) { |
1965 | ext4_std_error(sb, err); |
1966 | goto errout; |
1967 | } |
1968 | |
1969 | if (inode) { |
1970 | nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); |
1971 | ext4_free_blocks(handle, inode, NULL, block: nr, count: 1, |
1972 | EXT4_FREE_BLOCKS_METADATA | |
1973 | EXT4_FREE_BLOCKS_FORGET); |
1974 | ei->i_data[EXT4_DIND_BLOCK] = 0; |
1975 | inode->i_blocks = 0; |
1976 | |
1977 | err = ext4_mark_inode_dirty(handle, inode); |
1978 | if (err) |
1979 | ext4_std_error(sb, err); |
1980 | } |
1981 | |
1982 | errout: |
1983 | ret = ext4_journal_stop(handle); |
1984 | return err ? err : ret; |
1985 | |
1986 | invalid_resize_inode: |
1987 | ext4_error(sb, "corrupted/inconsistent resize inode"); |
1988 | return -EINVAL; |
1989 | } |
1990 | |
1991 | /* |
1992 | * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count |
1993 | * |
1994 | * @sb: super block of the fs to be resized |
1995 | * @n_blocks_count: the number of blocks resides in the resized fs |
1996 | */ |
1997 | int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) |
1998 | { |
1999 | struct ext4_new_flex_group_data *flex_gd = NULL; |
2000 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
2001 | struct ext4_super_block *es = sbi->s_es; |
2002 | struct buffer_head *bh; |
2003 | struct inode *resize_inode = NULL; |
2004 | ext4_grpblk_t add, offset; |
2005 | unsigned long n_desc_blocks; |
2006 | unsigned long o_desc_blocks; |
2007 | ext4_group_t o_group; |
2008 | ext4_group_t n_group; |
2009 | ext4_fsblk_t o_blocks_count; |
2010 | ext4_fsblk_t n_blocks_count_retry = 0; |
2011 | unsigned long last_update_time = 0; |
2012 | int err = 0; |
2013 | int meta_bg; |
2014 | unsigned int flexbg_size = ext4_flex_bg_size(sbi); |
2015 | |
2016 | /* See if the device is actually as big as what was requested */ |
2017 | bh = ext4_sb_bread(sb, block: n_blocks_count - 1, op_flags: 0); |
2018 | if (IS_ERR(ptr: bh)) { |
2019 | ext4_warning(sb, "can't read last block, resize aborted"); |
2020 | return -ENOSPC; |
2021 | } |
2022 | brelse(bh); |
2023 | |
2024 | /* |
2025 | * For bigalloc, trim the requested size to the nearest cluster |
2026 | * boundary to avoid creating an unusable filesystem. We do this |
2027 | * silently, instead of returning an error, to avoid breaking |
2028 | * callers that blindly resize the filesystem to the full size of |
2029 | * the underlying block device. |
2030 | */ |
2031 | if (ext4_has_feature_bigalloc(sb)) |
2032 | n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1); |
2033 | |
2034 | retry: |
2035 | o_blocks_count = ext4_blocks_count(es); |
2036 | |
2037 | ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " |
2038 | "to %llu blocks", o_blocks_count, n_blocks_count); |
2039 | |
2040 | if (n_blocks_count < o_blocks_count) { |
2041 | /* On-line shrinking not supported */ |
2042 | ext4_warning(sb, "can't shrink FS - resize aborted"); |
2043 | return -EINVAL; |
2044 | } |
2045 | |
2046 | if (n_blocks_count == o_blocks_count) |
2047 | /* Nothing need to do */ |
2048 | return 0; |
2049 | |
2050 | n_group = ext4_get_group_number(sb, block: n_blocks_count - 1); |
2051 | if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { |
2052 | ext4_warning(sb, "resize would cause inodes_count overflow"); |
2053 | return -EINVAL; |
2054 | } |
2055 | ext4_get_group_no_and_offset(sb, blocknr: o_blocks_count - 1, blockgrpp: &o_group, offsetp: &offset); |
2056 | |
2057 | n_desc_blocks = num_desc_blocks(sb, groups: n_group + 1); |
2058 | o_desc_blocks = num_desc_blocks(sb, groups: sbi->s_groups_count); |
2059 | |
2060 | meta_bg = ext4_has_feature_meta_bg(sb); |
2061 | |
2062 | if (ext4_has_feature_resize_inode(sb)) { |
2063 | if (meta_bg) { |
2064 | ext4_error(sb, "resize_inode and meta_bg enabled " |
2065 | "simultaneously"); |
2066 | return -EINVAL; |
2067 | } |
2068 | if (n_desc_blocks > o_desc_blocks + |
2069 | le16_to_cpu(es->s_reserved_gdt_blocks)) { |
2070 | n_blocks_count_retry = n_blocks_count; |
2071 | n_desc_blocks = o_desc_blocks + |
2072 | le16_to_cpu(es->s_reserved_gdt_blocks); |
2073 | n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); |
2074 | n_blocks_count = (ext4_fsblk_t)n_group * |
2075 | EXT4_BLOCKS_PER_GROUP(sb) + |
2076 | le32_to_cpu(es->s_first_data_block); |
2077 | n_group--; /* set to last group number */ |
2078 | } |
2079 | |
2080 | if (!resize_inode) |
2081 | resize_inode = ext4_iget(sb, EXT4_RESIZE_INO, |
2082 | EXT4_IGET_SPECIAL); |
2083 | if (IS_ERR(ptr: resize_inode)) { |
2084 | ext4_warning(sb, "Error opening resize inode"); |
2085 | return PTR_ERR(ptr: resize_inode); |
2086 | } |
2087 | } |
2088 | |
2089 | if ((!resize_inode && !meta_bg && n_desc_blocks > o_desc_blocks) || n_blocks_count == o_blocks_count) { |
2090 | err = ext4_convert_meta_bg(sb, inode: resize_inode); |
2091 | if (err) |
2092 | goto out; |
2093 | if (resize_inode) { |
2094 | iput(resize_inode); |
2095 | resize_inode = NULL; |
2096 | } |
2097 | if (n_blocks_count_retry) { |
2098 | n_blocks_count = n_blocks_count_retry; |
2099 | n_blocks_count_retry = 0; |
2100 | goto retry; |
2101 | } |
2102 | } |
2103 | |
2104 | /* |
2105 | * Make sure the last group has enough space so that it's |
2106 | * guaranteed to have enough space for all metadata blocks |
2107 | * that it might need to hold. (We might not need to store |
2108 | * the inode table blocks in the last block group, but there |
2109 | * will be cases where this might be needed.) |
2110 | */ |
2111 | if ((ext4_group_first_block_no(sb, group_no: n_group) + |
2112 | ext4_group_overhead_blocks(sb, group: n_group) + 2 + |
2113 | sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { |
2114 | n_blocks_count = ext4_group_first_block_no(sb, group_no: n_group); |
2115 | n_group--; |
2116 | n_blocks_count_retry = 0; |
2117 | if (resize_inode) { |
2118 | iput(resize_inode); |
2119 | resize_inode = NULL; |
2120 | } |
2121 | goto retry; |
2122 | } |
2123 | |
2124 | /* extend the last group */ |
2125 | if (n_group == o_group) |
2126 | add = n_blocks_count - o_blocks_count; |
2127 | else |
2128 | add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); |
2129 | if (add > 0) { |
2130 | err = ext4_group_extend_no_check(sb, o_blocks_count, add); |
2131 | if (err) |
2132 | goto out; |
2133 | } |
2134 | |
2135 | if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0) |
2136 | goto out; |
2137 | |
2138 | err = ext4_alloc_flex_bg_array(sb, ngroup: n_group + 1); |
2139 | if (err) |
2140 | goto out; |
2141 | |
2142 | err = ext4_mb_alloc_groupinfo(sb, ngroups: n_group + 1); |
2143 | if (err) |
2144 | goto out; |
2145 | |
2146 | flex_gd = alloc_flex_gd(flexbg_size, o_group, n_group); |
2147 | if (flex_gd == NULL) { |
2148 | err = -ENOMEM; |
2149 | goto out; |
2150 | } |
2151 | |
2152 | /* Add flex groups. Note that a regular group is a |
2153 | * flex group with 1 group. |
2154 | */ |
2155 | while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) { |
2156 | if (time_is_before_jiffies(last_update_time + HZ * 10)) { |
2157 | if (last_update_time) |
2158 | ext4_msg(sb, KERN_INFO, |
2159 | "resized to %llu blocks", |
2160 | ext4_blocks_count(es)); |
2161 | last_update_time = jiffies; |
2162 | } |
2163 | if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) |
2164 | break; |
2165 | err = ext4_flex_group_add(sb, resize_inode, flex_gd); |
2166 | if (unlikely(err)) |
2167 | break; |
2168 | } |
2169 | |
2170 | if (!err && n_blocks_count_retry) { |
2171 | n_blocks_count = n_blocks_count_retry; |
2172 | n_blocks_count_retry = 0; |
2173 | free_flex_gd(flex_gd); |
2174 | flex_gd = NULL; |
2175 | if (resize_inode) { |
2176 | iput(resize_inode); |
2177 | resize_inode = NULL; |
2178 | } |
2179 | goto retry; |
2180 | } |
2181 | |
2182 | out: |
2183 | if (flex_gd) |
2184 | free_flex_gd(flex_gd); |
2185 | if (resize_inode != NULL) |
2186 | iput(resize_inode); |
2187 | if (err) |
2188 | ext4_warning(sb, "error (%d) occurred during " |
2189 | "file system resize", err); |
2190 | ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", |
2191 | ext4_blocks_count(es)); |
2192 | return err; |
2193 | } |
2194 |
Definitions
- ext4_rcu_ptr
- ext4_rcu_ptr_callback
- ext4_kvfree_array_rcu
- ext4_resize_begin
- ext4_resize_end
- ext4_group_overhead_blocks
- verify_group_input
- ext4_new_flex_group_data
- alloc_flex_gd
- free_flex_gd
- ext4_alloc_group_tables
- bclean
- ext4_resize_ensure_credits_batch
- set_flexbg_block_bitmap
- setup_new_flex_group_blocks
- ext4_list_backups
- verify_reserved_gdb
- add_new_gdb
- add_new_gdb_meta_bg
- reserve_backup_gdb
- ext4_set_block_group_nr
- update_backups
- ext4_add_new_descs
- ext4_get_bitmap
- ext4_set_bitmap_checksums
- ext4_setup_new_descs
- ext4_add_overhead
- ext4_update_super
- ext4_flex_group_add
- ext4_setup_next_flex_gd
- ext4_group_add
- ext4_group_extend_no_check
- ext4_group_extend
- num_desc_blocks
- ext4_convert_meta_bg
Improve your Profiling and Debugging skills
Find out more