1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * KUnit test of ext4 multiblocks allocation. |
4 | */ |
5 | |
6 | #include <kunit/test.h> |
7 | #include <kunit/static_stub.h> |
8 | #include <linux/random.h> |
9 | |
10 | #include "ext4.h" |
11 | |
12 | struct mbt_grp_ctx { |
13 | struct buffer_head bitmap_bh; |
14 | /* desc and gd_bh are just the place holders for now */ |
15 | struct ext4_group_desc desc; |
16 | struct buffer_head gd_bh; |
17 | }; |
18 | |
19 | struct mbt_ctx { |
20 | struct mbt_grp_ctx *grp_ctx; |
21 | }; |
22 | |
23 | struct mbt_ext4_super_block { |
24 | struct ext4_super_block es; |
25 | struct ext4_sb_info sbi; |
26 | struct mbt_ctx mbt_ctx; |
27 | }; |
28 | |
29 | #define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi)) |
30 | #define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx) |
31 | #define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group]) |
32 | |
33 | static struct inode *mbt_alloc_inode(struct super_block *sb) |
34 | { |
35 | struct ext4_inode_info *ei; |
36 | |
37 | ei = kmalloc(sizeof(struct ext4_inode_info), GFP_KERNEL); |
38 | if (!ei) |
39 | return NULL; |
40 | |
41 | INIT_LIST_HEAD(list: &ei->i_orphan); |
42 | init_rwsem(&ei->xattr_sem); |
43 | init_rwsem(&ei->i_data_sem); |
44 | inode_init_once(&ei->vfs_inode); |
45 | ext4_fc_init_inode(inode: &ei->vfs_inode); |
46 | |
47 | return &ei->vfs_inode; |
48 | } |
49 | |
50 | static void mbt_free_inode(struct inode *inode) |
51 | { |
52 | kfree(objp: EXT4_I(inode)); |
53 | } |
54 | |
55 | static const struct super_operations mbt_sops = { |
56 | .alloc_inode = mbt_alloc_inode, |
57 | .free_inode = mbt_free_inode, |
58 | }; |
59 | |
60 | static void mbt_kill_sb(struct super_block *sb) |
61 | { |
62 | generic_shutdown_super(sb); |
63 | } |
64 | |
65 | static struct file_system_type mbt_fs_type = { |
66 | .name = "mballoc test" , |
67 | .kill_sb = mbt_kill_sb, |
68 | }; |
69 | |
70 | static int mbt_mb_init(struct super_block *sb) |
71 | { |
72 | ext4_fsblk_t block; |
73 | int ret; |
74 | |
75 | /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */ |
76 | sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL); |
77 | if (sb->s_bdev == NULL) |
78 | return -ENOMEM; |
79 | |
80 | sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL); |
81 | if (sb->s_bdev->bd_queue == NULL) { |
82 | kfree(objp: sb->s_bdev); |
83 | return -ENOMEM; |
84 | } |
85 | |
86 | /* |
87 | * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache = |
88 | * new_inode(sb); |
89 | */ |
90 | INIT_LIST_HEAD(list: &sb->s_inodes); |
91 | sb->s_op = &mbt_sops; |
92 | |
93 | ret = ext4_mb_init(sb); |
94 | if (ret != 0) |
95 | goto err_out; |
96 | |
97 | block = ext4_count_free_clusters(sb); |
98 | ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block, |
99 | GFP_KERNEL); |
100 | if (ret != 0) |
101 | goto err_mb_release; |
102 | |
103 | ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0, |
104 | GFP_KERNEL); |
105 | if (ret != 0) |
106 | goto err_freeclusters; |
107 | |
108 | return 0; |
109 | |
110 | err_freeclusters: |
111 | percpu_counter_destroy(fbc: &EXT4_SB(sb)->s_freeclusters_counter); |
112 | err_mb_release: |
113 | ext4_mb_release(sb); |
114 | err_out: |
115 | kfree(objp: sb->s_bdev->bd_queue); |
116 | kfree(objp: sb->s_bdev); |
117 | return ret; |
118 | } |
119 | |
120 | static void mbt_mb_release(struct super_block *sb) |
121 | { |
122 | percpu_counter_destroy(fbc: &EXT4_SB(sb)->s_dirtyclusters_counter); |
123 | percpu_counter_destroy(fbc: &EXT4_SB(sb)->s_freeclusters_counter); |
124 | ext4_mb_release(sb); |
125 | kfree(objp: sb->s_bdev->bd_queue); |
126 | kfree(objp: sb->s_bdev); |
127 | } |
128 | |
129 | static int mbt_set(struct super_block *sb, void *data) |
130 | { |
131 | return 0; |
132 | } |
133 | |
134 | static struct super_block *mbt_ext4_alloc_super_block(void) |
135 | { |
136 | struct mbt_ext4_super_block *fsb; |
137 | struct super_block *sb; |
138 | struct ext4_sb_info *sbi; |
139 | |
140 | fsb = kzalloc(sizeof(*fsb), GFP_KERNEL); |
141 | if (fsb == NULL) |
142 | return NULL; |
143 | |
144 | sb = sget(type: &mbt_fs_type, NULL, set: mbt_set, flags: 0, NULL); |
145 | if (IS_ERR(ptr: sb)) |
146 | goto out; |
147 | |
148 | sbi = &fsb->sbi; |
149 | |
150 | sbi->s_blockgroup_lock = |
151 | kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); |
152 | if (!sbi->s_blockgroup_lock) |
153 | goto out_deactivate; |
154 | |
155 | bgl_lock_init(bgl: sbi->s_blockgroup_lock); |
156 | |
157 | sbi->s_es = &fsb->es; |
158 | sb->s_fs_info = sbi; |
159 | |
160 | up_write(sem: &sb->s_umount); |
161 | return sb; |
162 | |
163 | out_deactivate: |
164 | deactivate_locked_super(sb); |
165 | out: |
166 | kfree(objp: fsb); |
167 | return NULL; |
168 | } |
169 | |
170 | static void mbt_ext4_free_super_block(struct super_block *sb) |
171 | { |
172 | struct mbt_ext4_super_block *fsb = MBT_SB(sb); |
173 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
174 | |
175 | kfree(objp: sbi->s_blockgroup_lock); |
176 | deactivate_super(sb); |
177 | kfree(objp: fsb); |
178 | } |
179 | |
180 | struct mbt_ext4_block_layout { |
181 | unsigned char blocksize_bits; |
182 | unsigned int cluster_bits; |
183 | uint32_t blocks_per_group; |
184 | ext4_group_t group_count; |
185 | uint16_t desc_size; |
186 | }; |
187 | |
188 | static void mbt_init_sb_layout(struct super_block *sb, |
189 | struct mbt_ext4_block_layout *layout) |
190 | { |
191 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
192 | struct ext4_super_block *es = sbi->s_es; |
193 | |
194 | sb->s_blocksize = 1UL << layout->blocksize_bits; |
195 | sb->s_blocksize_bits = layout->blocksize_bits; |
196 | |
197 | sbi->s_groups_count = layout->group_count; |
198 | sbi->s_blocks_per_group = layout->blocks_per_group; |
199 | sbi->s_cluster_bits = layout->cluster_bits; |
200 | sbi->s_cluster_ratio = 1U << layout->cluster_bits; |
201 | sbi->s_clusters_per_group = layout->blocks_per_group >> |
202 | layout->cluster_bits; |
203 | sbi->s_desc_size = layout->desc_size; |
204 | sbi->s_desc_per_block_bits = |
205 | sb->s_blocksize_bits - (fls(x: layout->desc_size) - 1); |
206 | sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits; |
207 | |
208 | es->s_first_data_block = cpu_to_le32(0); |
209 | es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group * |
210 | layout->group_count); |
211 | } |
212 | |
213 | static int mbt_grp_ctx_init(struct super_block *sb, |
214 | struct mbt_grp_ctx *grp_ctx) |
215 | { |
216 | ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); |
217 | |
218 | grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL); |
219 | if (grp_ctx->bitmap_bh.b_data == NULL) |
220 | return -ENOMEM; |
221 | mb_set_bits(bm: grp_ctx->bitmap_bh.b_data, cur: max, len: sb->s_blocksize * 8 - max); |
222 | ext4_free_group_clusters_set(sb, bg: &grp_ctx->desc, count: max); |
223 | |
224 | return 0; |
225 | } |
226 | |
227 | static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx) |
228 | { |
229 | kfree(objp: grp_ctx->bitmap_bh.b_data); |
230 | grp_ctx->bitmap_bh.b_data = NULL; |
231 | } |
232 | |
233 | static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group, |
234 | unsigned int start, unsigned int len) |
235 | { |
236 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group); |
237 | |
238 | mb_set_bits(bm: grp_ctx->bitmap_bh.b_data, cur: start, len); |
239 | } |
240 | |
241 | static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group) |
242 | { |
243 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group); |
244 | |
245 | return grp_ctx->bitmap_bh.b_data; |
246 | } |
247 | |
248 | /* called after mbt_init_sb_layout */ |
249 | static int mbt_ctx_init(struct super_block *sb) |
250 | { |
251 | struct mbt_ctx *ctx = MBT_CTX(sb); |
252 | ext4_group_t i, ngroups = ext4_get_groups_count(sb); |
253 | |
254 | ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx), |
255 | GFP_KERNEL); |
256 | if (ctx->grp_ctx == NULL) |
257 | return -ENOMEM; |
258 | |
259 | for (i = 0; i < ngroups; i++) |
260 | if (mbt_grp_ctx_init(sb, grp_ctx: &ctx->grp_ctx[i])) |
261 | goto out; |
262 | |
263 | /* |
264 | * first data block(first cluster in first group) is used by |
265 | * metadata, mark it used to avoid to alloc data block at first |
266 | * block which will fail ext4_sb_block_valid check. |
267 | */ |
268 | mb_set_bits(bm: ctx->grp_ctx[0].bitmap_bh.b_data, cur: 0, len: 1); |
269 | ext4_free_group_clusters_set(sb, bg: &ctx->grp_ctx[0].desc, |
270 | EXT4_CLUSTERS_PER_GROUP(sb) - 1); |
271 | |
272 | return 0; |
273 | out: |
274 | while (i-- > 0) |
275 | mbt_grp_ctx_release(grp_ctx: &ctx->grp_ctx[i]); |
276 | kfree(objp: ctx->grp_ctx); |
277 | return -ENOMEM; |
278 | } |
279 | |
280 | static void mbt_ctx_release(struct super_block *sb) |
281 | { |
282 | struct mbt_ctx *ctx = MBT_CTX(sb); |
283 | ext4_group_t i, ngroups = ext4_get_groups_count(sb); |
284 | |
285 | for (i = 0; i < ngroups; i++) |
286 | mbt_grp_ctx_release(grp_ctx: &ctx->grp_ctx[i]); |
287 | kfree(objp: ctx->grp_ctx); |
288 | } |
289 | |
290 | static struct buffer_head * |
291 | ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group, |
292 | bool ignore_locked) |
293 | { |
294 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group); |
295 | |
296 | /* paired with brelse from caller of ext4_read_block_bitmap_nowait */ |
297 | get_bh(bh: &grp_ctx->bitmap_bh); |
298 | return &grp_ctx->bitmap_bh; |
299 | } |
300 | |
301 | static int ext4_wait_block_bitmap_stub(struct super_block *sb, |
302 | ext4_group_t block_group, |
303 | struct buffer_head *bh) |
304 | { |
305 | /* |
306 | * real ext4_wait_block_bitmap will set these flags and |
307 | * functions like ext4_mb_init_cache will verify the flags. |
308 | */ |
309 | set_buffer_uptodate(bh); |
310 | set_bitmap_uptodate(bh); |
311 | set_buffer_verified(bh); |
312 | return 0; |
313 | } |
314 | |
315 | static struct ext4_group_desc * |
316 | ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group, |
317 | struct buffer_head **bh) |
318 | { |
319 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group); |
320 | |
321 | if (bh != NULL) |
322 | *bh = &grp_ctx->gd_bh; |
323 | |
324 | return &grp_ctx->desc; |
325 | } |
326 | |
327 | static int |
328 | ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state, |
329 | ext4_group_t group, ext4_grpblk_t blkoff, |
330 | ext4_grpblk_t len, int flags, |
331 | ext4_grpblk_t *ret_changed) |
332 | { |
333 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group); |
334 | struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh; |
335 | |
336 | if (state) |
337 | mb_set_bits(bm: bitmap_bh->b_data, cur: blkoff, len); |
338 | else |
339 | mb_clear_bits(bitmap_bh->b_data, blkoff, len); |
340 | |
341 | return 0; |
342 | } |
343 | |
344 | #define TEST_GOAL_GROUP 1 |
345 | static int mbt_kunit_init(struct kunit *test) |
346 | { |
347 | struct mbt_ext4_block_layout *layout = |
348 | (struct mbt_ext4_block_layout *)(test->param_value); |
349 | struct super_block *sb; |
350 | int ret; |
351 | |
352 | sb = mbt_ext4_alloc_super_block(); |
353 | if (sb == NULL) |
354 | return -ENOMEM; |
355 | |
356 | mbt_init_sb_layout(sb, layout); |
357 | |
358 | ret = mbt_ctx_init(sb); |
359 | if (ret != 0) { |
360 | mbt_ext4_free_super_block(sb); |
361 | return ret; |
362 | } |
363 | |
364 | test->priv = sb; |
365 | kunit_activate_static_stub(test, |
366 | ext4_read_block_bitmap_nowait, |
367 | ext4_read_block_bitmap_nowait_stub); |
368 | kunit_activate_static_stub(test, |
369 | ext4_wait_block_bitmap, |
370 | ext4_wait_block_bitmap_stub); |
371 | kunit_activate_static_stub(test, |
372 | ext4_get_group_desc, |
373 | ext4_get_group_desc_stub); |
374 | kunit_activate_static_stub(test, |
375 | ext4_mb_mark_context, |
376 | ext4_mb_mark_context_stub); |
377 | |
378 | /* stub function will be called in mbt_mb_init->ext4_mb_init */ |
379 | if (mbt_mb_init(sb) != 0) { |
380 | mbt_ctx_release(sb); |
381 | mbt_ext4_free_super_block(sb); |
382 | return -ENOMEM; |
383 | } |
384 | |
385 | return 0; |
386 | } |
387 | |
388 | static void mbt_kunit_exit(struct kunit *test) |
389 | { |
390 | struct super_block *sb = (struct super_block *)test->priv; |
391 | |
392 | mbt_mb_release(sb); |
393 | mbt_ctx_release(sb); |
394 | mbt_ext4_free_super_block(sb); |
395 | } |
396 | |
397 | static void test_new_blocks_simple(struct kunit *test) |
398 | { |
399 | struct super_block *sb = (struct super_block *)test->priv; |
400 | struct inode *inode; |
401 | struct ext4_allocation_request ar; |
402 | ext4_group_t i, goal_group = TEST_GOAL_GROUP; |
403 | int err = 0; |
404 | ext4_fsblk_t found; |
405 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
406 | |
407 | inode = kunit_kzalloc(test, size: sizeof(*inode), GFP_KERNEL); |
408 | if (!inode) |
409 | return; |
410 | |
411 | inode->i_sb = sb; |
412 | ar.inode = inode; |
413 | |
414 | /* get block at goal */ |
415 | ar.goal = ext4_group_first_block_no(sb, group_no: goal_group); |
416 | found = ext4_mb_new_blocks_simple(&ar, &err); |
417 | KUNIT_ASSERT_EQ_MSG(test, ar.goal, found, |
418 | "failed to alloc block at goal, expected %llu found %llu" , |
419 | ar.goal, found); |
420 | |
421 | /* get block after goal in goal group */ |
422 | ar.goal = ext4_group_first_block_no(sb, group_no: goal_group); |
423 | found = ext4_mb_new_blocks_simple(&ar, &err); |
424 | KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found, |
425 | "failed to alloc block after goal in goal group, expected %llu found %llu" , |
426 | ar.goal + 1, found); |
427 | |
428 | /* get block after goal group */ |
429 | mbt_ctx_mark_used(sb, group: goal_group, start: 0, EXT4_CLUSTERS_PER_GROUP(sb)); |
430 | ar.goal = ext4_group_first_block_no(sb, group_no: goal_group); |
431 | found = ext4_mb_new_blocks_simple(&ar, &err); |
432 | KUNIT_ASSERT_EQ_MSG(test, |
433 | ext4_group_first_block_no(sb, goal_group + 1), found, |
434 | "failed to alloc block after goal group, expected %llu found %llu" , |
435 | ext4_group_first_block_no(sb, goal_group + 1), found); |
436 | |
437 | /* get block before goal group */ |
438 | for (i = goal_group; i < ext4_get_groups_count(sb); i++) |
439 | mbt_ctx_mark_used(sb, group: i, start: 0, EXT4_CLUSTERS_PER_GROUP(sb)); |
440 | ar.goal = ext4_group_first_block_no(sb, group_no: goal_group); |
441 | found = ext4_mb_new_blocks_simple(&ar, &err); |
442 | KUNIT_ASSERT_EQ_MSG(test, |
443 | ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found, |
444 | "failed to alloc block before goal group, expected %llu found %llu" , |
445 | ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found); |
446 | |
447 | /* no block available, fail to allocate block */ |
448 | for (i = 0; i < ext4_get_groups_count(sb); i++) |
449 | mbt_ctx_mark_used(sb, group: i, start: 0, EXT4_CLUSTERS_PER_GROUP(sb)); |
450 | ar.goal = ext4_group_first_block_no(sb, group_no: goal_group); |
451 | found = ext4_mb_new_blocks_simple(&ar, &err); |
452 | KUNIT_ASSERT_NE_MSG(test, err, 0, |
453 | "unexpectedly get block when no block is available" ); |
454 | } |
455 | |
456 | #define TEST_RANGE_COUNT 8 |
457 | |
458 | struct test_range { |
459 | ext4_grpblk_t start; |
460 | ext4_grpblk_t len; |
461 | }; |
462 | |
463 | static void |
464 | mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges, |
465 | int count) |
466 | { |
467 | ext4_grpblk_t start, len, max; |
468 | int i; |
469 | |
470 | max = EXT4_CLUSTERS_PER_GROUP(sb) / count; |
471 | for (i = 0; i < count; i++) { |
472 | start = get_random_u32() % max; |
473 | len = get_random_u32() % max; |
474 | len = min(len, max - start); |
475 | |
476 | ranges[i].start = start + i * max; |
477 | ranges[i].len = len; |
478 | } |
479 | } |
480 | |
481 | static void |
482 | validate_free_blocks_simple(struct kunit *test, struct super_block *sb, |
483 | ext4_group_t goal_group, ext4_grpblk_t start, |
484 | ext4_grpblk_t len) |
485 | { |
486 | void *bitmap; |
487 | ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb); |
488 | ext4_group_t i; |
489 | |
490 | for (i = 0; i < ext4_get_groups_count(sb); i++) { |
491 | if (i == goal_group) |
492 | continue; |
493 | |
494 | bitmap = mbt_ctx_bitmap(sb, group: i); |
495 | bit = mb_find_next_zero_bit(bitmap, max, 0); |
496 | KUNIT_ASSERT_EQ_MSG(test, bit, max, |
497 | "free block on unexpected group %d" , i); |
498 | } |
499 | |
500 | bitmap = mbt_ctx_bitmap(sb, group: goal_group); |
501 | bit = mb_find_next_zero_bit(bitmap, max, 0); |
502 | KUNIT_ASSERT_EQ(test, bit, start); |
503 | |
504 | bit = mb_find_next_bit(bitmap, max, bit + 1); |
505 | KUNIT_ASSERT_EQ(test, bit, start + len); |
506 | } |
507 | |
508 | static void |
509 | test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group, |
510 | ext4_grpblk_t start, ext4_grpblk_t len) |
511 | { |
512 | struct super_block *sb = (struct super_block *)test->priv; |
513 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
514 | struct inode *inode; |
515 | ext4_fsblk_t block; |
516 | |
517 | inode = kunit_kzalloc(test, size: sizeof(*inode), GFP_KERNEL); |
518 | if (!inode) |
519 | return; |
520 | inode->i_sb = sb; |
521 | |
522 | if (len == 0) |
523 | return; |
524 | |
525 | block = ext4_group_first_block_no(sb, group_no: goal_group) + |
526 | EXT4_C2B(sbi, start); |
527 | ext4_free_blocks_simple(inode, block, len); |
528 | validate_free_blocks_simple(test, sb, goal_group, start, len); |
529 | mbt_ctx_mark_used(sb, group: goal_group, start: 0, EXT4_CLUSTERS_PER_GROUP(sb)); |
530 | } |
531 | |
532 | static void test_free_blocks_simple(struct kunit *test) |
533 | { |
534 | struct super_block *sb = (struct super_block *)test->priv; |
535 | ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); |
536 | ext4_group_t i; |
537 | struct test_range ranges[TEST_RANGE_COUNT]; |
538 | |
539 | for (i = 0; i < ext4_get_groups_count(sb); i++) |
540 | mbt_ctx_mark_used(sb, group: i, start: 0, len: max); |
541 | |
542 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); |
543 | for (i = 0; i < TEST_RANGE_COUNT; i++) |
544 | test_free_blocks_simple_range(test, TEST_GOAL_GROUP, |
545 | start: ranges[i].start, len: ranges[i].len); |
546 | } |
547 | |
548 | static void |
549 | test_mark_diskspace_used_range(struct kunit *test, |
550 | struct ext4_allocation_context *ac, |
551 | ext4_grpblk_t start, |
552 | ext4_grpblk_t len) |
553 | { |
554 | struct super_block *sb = (struct super_block *)test->priv; |
555 | int ret; |
556 | void *bitmap; |
557 | ext4_grpblk_t i, max; |
558 | |
559 | /* ext4_mb_mark_diskspace_used will BUG if len is 0 */ |
560 | if (len == 0) |
561 | return; |
562 | |
563 | ac->ac_b_ex.fe_group = TEST_GOAL_GROUP; |
564 | ac->ac_b_ex.fe_start = start; |
565 | ac->ac_b_ex.fe_len = len; |
566 | |
567 | bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP); |
568 | memset(bitmap, 0, sb->s_blocksize); |
569 | ret = ext4_mb_mark_diskspace_used(ac, NULL, 0); |
570 | KUNIT_ASSERT_EQ(test, ret, 0); |
571 | |
572 | max = EXT4_CLUSTERS_PER_GROUP(sb); |
573 | i = mb_find_next_bit(bitmap, max, 0); |
574 | KUNIT_ASSERT_EQ(test, i, start); |
575 | i = mb_find_next_zero_bit(bitmap, max, i + 1); |
576 | KUNIT_ASSERT_EQ(test, i, start + len); |
577 | i = mb_find_next_bit(bitmap, max, i + 1); |
578 | KUNIT_ASSERT_EQ(test, max, i); |
579 | } |
580 | |
581 | static void test_mark_diskspace_used(struct kunit *test) |
582 | { |
583 | struct super_block *sb = (struct super_block *)test->priv; |
584 | struct inode *inode; |
585 | struct ext4_allocation_context ac; |
586 | struct test_range ranges[TEST_RANGE_COUNT]; |
587 | int i; |
588 | |
589 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); |
590 | |
591 | inode = kunit_kzalloc(test, size: sizeof(*inode), GFP_KERNEL); |
592 | if (!inode) |
593 | return; |
594 | inode->i_sb = sb; |
595 | |
596 | ac.ac_status = AC_STATUS_FOUND; |
597 | ac.ac_sb = sb; |
598 | ac.ac_inode = inode; |
599 | for (i = 0; i < TEST_RANGE_COUNT; i++) |
600 | test_mark_diskspace_used_range(test, ac: &ac, start: ranges[i].start, |
601 | len: ranges[i].len); |
602 | } |
603 | |
604 | static void mbt_generate_buddy(struct super_block *sb, void *buddy, |
605 | void *bitmap, struct ext4_group_info *grp) |
606 | { |
607 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
608 | uint32_t order, off; |
609 | void *bb, *bb_h; |
610 | int max; |
611 | |
612 | memset(buddy, 0xff, sb->s_blocksize); |
613 | memset(grp, 0, offsetof(struct ext4_group_info, |
614 | bb_counters[MB_NUM_ORDERS(sb)])); |
615 | |
616 | bb = bitmap; |
617 | max = EXT4_CLUSTERS_PER_GROUP(sb); |
618 | bb_h = buddy + sbi->s_mb_offsets[1]; |
619 | |
620 | off = mb_find_next_zero_bit(bb, max, 0); |
621 | grp->bb_first_free = off; |
622 | while (off < max) { |
623 | grp->bb_counters[0]++; |
624 | grp->bb_free++; |
625 | |
626 | if (!(off & 1) && !mb_test_bit(off + 1, bb)) { |
627 | grp->bb_free++; |
628 | grp->bb_counters[0]--; |
629 | mb_clear_bit(off >> 1, bb_h); |
630 | grp->bb_counters[1]++; |
631 | grp->bb_largest_free_order = 1; |
632 | off++; |
633 | } |
634 | |
635 | off = mb_find_next_zero_bit(bb, max, off + 1); |
636 | } |
637 | |
638 | for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) { |
639 | bb = buddy + sbi->s_mb_offsets[order]; |
640 | bb_h = buddy + sbi->s_mb_offsets[order + 1]; |
641 | max = max >> 1; |
642 | off = mb_find_next_zero_bit(bb, max, 0); |
643 | |
644 | while (off < max) { |
645 | if (!(off & 1) && !mb_test_bit(off + 1, bb)) { |
646 | mb_set_bits(bm: bb, cur: off, len: 2); |
647 | grp->bb_counters[order] -= 2; |
648 | mb_clear_bit(off >> 1, bb_h); |
649 | grp->bb_counters[order + 1]++; |
650 | grp->bb_largest_free_order = order + 1; |
651 | off++; |
652 | } |
653 | |
654 | off = mb_find_next_zero_bit(bb, max, off + 1); |
655 | } |
656 | } |
657 | |
658 | max = EXT4_CLUSTERS_PER_GROUP(sb); |
659 | off = mb_find_next_zero_bit(bitmap, max, 0); |
660 | while (off < max) { |
661 | grp->bb_fragments++; |
662 | |
663 | off = mb_find_next_bit(bitmap, max, off + 1); |
664 | if (off + 1 >= max) |
665 | break; |
666 | |
667 | off = mb_find_next_zero_bit(bitmap, max, off + 1); |
668 | } |
669 | } |
670 | |
671 | static void |
672 | mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1, |
673 | struct ext4_group_info *grp2) |
674 | { |
675 | struct super_block *sb = (struct super_block *)test->priv; |
676 | int i; |
677 | |
678 | KUNIT_ASSERT_EQ(test, grp1->bb_first_free, |
679 | grp2->bb_first_free); |
680 | KUNIT_ASSERT_EQ(test, grp1->bb_fragments, |
681 | grp2->bb_fragments); |
682 | KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free); |
683 | KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order, |
684 | grp2->bb_largest_free_order); |
685 | |
686 | for (i = 1; i < MB_NUM_ORDERS(sb); i++) { |
687 | KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i], |
688 | grp2->bb_counters[i], |
689 | "bb_counters[%d] diffs, expected %d, generated %d" , |
690 | i, grp1->bb_counters[i], |
691 | grp2->bb_counters[i]); |
692 | } |
693 | } |
694 | |
695 | static void |
696 | do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap, |
697 | void *mbt_buddy, struct ext4_group_info *mbt_grp, |
698 | void *ext4_buddy, struct ext4_group_info *ext4_grp) |
699 | { |
700 | int i; |
701 | |
702 | mbt_generate_buddy(sb, buddy: mbt_buddy, bitmap, grp: mbt_grp); |
703 | |
704 | for (i = 0; i < MB_NUM_ORDERS(sb); i++) |
705 | ext4_grp->bb_counters[i] = 0; |
706 | /* needed by validation in ext4_mb_generate_buddy */ |
707 | ext4_grp->bb_free = mbt_grp->bb_free; |
708 | memset(ext4_buddy, 0xff, sb->s_blocksize); |
709 | ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP, |
710 | ext4_grp); |
711 | |
712 | KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize), |
713 | 0); |
714 | mbt_validate_group_info(test, grp1: mbt_grp, grp2: ext4_grp); |
715 | } |
716 | |
717 | static void test_mb_generate_buddy(struct kunit *test) |
718 | { |
719 | struct super_block *sb = (struct super_block *)test->priv; |
720 | void *bitmap, *expected_bb, *generate_bb; |
721 | struct ext4_group_info *expected_grp, *generate_grp; |
722 | struct test_range ranges[TEST_RANGE_COUNT]; |
723 | int i; |
724 | |
725 | bitmap = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL); |
726 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap); |
727 | expected_bb = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL); |
728 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb); |
729 | generate_bb = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL); |
730 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb); |
731 | expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, |
732 | bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); |
733 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp); |
734 | generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP); |
735 | KUNIT_ASSERT_NOT_NULL(test, generate_grp); |
736 | |
737 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); |
738 | for (i = 0; i < TEST_RANGE_COUNT; i++) { |
739 | mb_set_bits(bm: bitmap, cur: ranges[i].start, len: ranges[i].len); |
740 | do_test_generate_buddy(test, sb, bitmap, mbt_buddy: expected_bb, |
741 | mbt_grp: expected_grp, ext4_buddy: generate_bb, ext4_grp: generate_grp); |
742 | } |
743 | } |
744 | |
745 | static void |
746 | test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b, |
747 | ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap, |
748 | void *buddy, struct ext4_group_info *grp) |
749 | { |
750 | struct super_block *sb = (struct super_block *)test->priv; |
751 | struct ext4_free_extent ex; |
752 | int i; |
753 | |
754 | /* mb_mark_used only accepts non-zero len */ |
755 | if (len == 0) |
756 | return; |
757 | |
758 | ex.fe_start = start; |
759 | ex.fe_len = len; |
760 | ex.fe_group = TEST_GOAL_GROUP; |
761 | |
762 | ext4_lock_group(sb, TEST_GOAL_GROUP); |
763 | mb_mark_used(e4b, &ex); |
764 | ext4_unlock_group(sb, TEST_GOAL_GROUP); |
765 | |
766 | mb_set_bits(bm: bitmap, cur: start, len); |
767 | /* bypass bb_free validatoin in ext4_mb_generate_buddy */ |
768 | grp->bb_free -= len; |
769 | memset(buddy, 0xff, sb->s_blocksize); |
770 | for (i = 0; i < MB_NUM_ORDERS(sb); i++) |
771 | grp->bb_counters[i] = 0; |
772 | ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp); |
773 | |
774 | KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize), |
775 | 0); |
776 | mbt_validate_group_info(test, grp1: grp, grp2: e4b->bd_info); |
777 | } |
778 | |
779 | static void test_mb_mark_used(struct kunit *test) |
780 | { |
781 | struct ext4_buddy e4b; |
782 | struct super_block *sb = (struct super_block *)test->priv; |
783 | void *bitmap, *buddy; |
784 | struct ext4_group_info *grp; |
785 | int ret; |
786 | struct test_range ranges[TEST_RANGE_COUNT]; |
787 | int i; |
788 | |
789 | /* buddy cache assumes that each page contains at least one block */ |
790 | if (sb->s_blocksize > PAGE_SIZE) |
791 | kunit_skip(test, "blocksize exceeds pagesize" ); |
792 | |
793 | bitmap = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL); |
794 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap); |
795 | buddy = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL); |
796 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy); |
797 | grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, |
798 | bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); |
799 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp); |
800 | |
801 | ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b); |
802 | KUNIT_ASSERT_EQ(test, ret, 0); |
803 | |
804 | grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb); |
805 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); |
806 | for (i = 0; i < TEST_RANGE_COUNT; i++) |
807 | test_mb_mark_used_range(test, e4b: &e4b, start: ranges[i].start, |
808 | len: ranges[i].len, bitmap, buddy, grp); |
809 | |
810 | ext4_mb_unload_buddy(&e4b); |
811 | } |
812 | |
813 | static void |
814 | test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b, |
815 | ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap, |
816 | void *buddy, struct ext4_group_info *grp) |
817 | { |
818 | struct super_block *sb = (struct super_block *)test->priv; |
819 | int i; |
820 | |
821 | /* mb_free_blocks will WARN if len is 0 */ |
822 | if (len == 0) |
823 | return; |
824 | |
825 | ext4_lock_group(sb, group: e4b->bd_group); |
826 | mb_free_blocks(NULL, e4b, start, len); |
827 | ext4_unlock_group(sb, group: e4b->bd_group); |
828 | |
829 | mb_clear_bits(bitmap, start, len); |
830 | /* bypass bb_free validatoin in ext4_mb_generate_buddy */ |
831 | grp->bb_free += len; |
832 | memset(buddy, 0xff, sb->s_blocksize); |
833 | for (i = 0; i < MB_NUM_ORDERS(sb); i++) |
834 | grp->bb_counters[i] = 0; |
835 | ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp); |
836 | |
837 | KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize), |
838 | 0); |
839 | mbt_validate_group_info(test, grp1: grp, grp2: e4b->bd_info); |
840 | |
841 | } |
842 | |
843 | static void test_mb_free_blocks(struct kunit *test) |
844 | { |
845 | struct ext4_buddy e4b; |
846 | struct super_block *sb = (struct super_block *)test->priv; |
847 | void *bitmap, *buddy; |
848 | struct ext4_group_info *grp; |
849 | struct ext4_free_extent ex; |
850 | int ret; |
851 | int i; |
852 | struct test_range ranges[TEST_RANGE_COUNT]; |
853 | |
854 | /* buddy cache assumes that each page contains at least one block */ |
855 | if (sb->s_blocksize > PAGE_SIZE) |
856 | kunit_skip(test, "blocksize exceeds pagesize" ); |
857 | |
858 | bitmap = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL); |
859 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap); |
860 | buddy = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL); |
861 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy); |
862 | grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, |
863 | bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); |
864 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp); |
865 | |
866 | ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b); |
867 | KUNIT_ASSERT_EQ(test, ret, 0); |
868 | |
869 | ex.fe_start = 0; |
870 | ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb); |
871 | ex.fe_group = TEST_GOAL_GROUP; |
872 | |
873 | ext4_lock_group(sb, TEST_GOAL_GROUP); |
874 | mb_mark_used(&e4b, &ex); |
875 | ext4_unlock_group(sb, TEST_GOAL_GROUP); |
876 | |
877 | grp->bb_free = 0; |
878 | memset(bitmap, 0xff, sb->s_blocksize); |
879 | |
880 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); |
881 | for (i = 0; i < TEST_RANGE_COUNT; i++) |
882 | test_mb_free_blocks_range(test, e4b: &e4b, start: ranges[i].start, |
883 | len: ranges[i].len, bitmap, buddy, grp); |
884 | |
885 | ext4_mb_unload_buddy(&e4b); |
886 | } |
887 | |
888 | #define COUNT_FOR_ESTIMATE 100000 |
889 | static void test_mb_mark_used_cost(struct kunit *test) |
890 | { |
891 | struct ext4_buddy e4b; |
892 | struct super_block *sb = (struct super_block *)test->priv; |
893 | struct ext4_free_extent ex; |
894 | int ret; |
895 | struct test_range ranges[TEST_RANGE_COUNT]; |
896 | int i, j; |
897 | unsigned long start, end, all = 0; |
898 | |
899 | /* buddy cache assumes that each page contains at least one block */ |
900 | if (sb->s_blocksize > PAGE_SIZE) |
901 | kunit_skip(test, "blocksize exceeds pagesize" ); |
902 | |
903 | ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b); |
904 | KUNIT_ASSERT_EQ(test, ret, 0); |
905 | |
906 | ex.fe_group = TEST_GOAL_GROUP; |
907 | for (j = 0; j < COUNT_FOR_ESTIMATE; j++) { |
908 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); |
909 | start = jiffies; |
910 | for (i = 0; i < TEST_RANGE_COUNT; i++) { |
911 | if (ranges[i].len == 0) |
912 | continue; |
913 | |
914 | ex.fe_start = ranges[i].start; |
915 | ex.fe_len = ranges[i].len; |
916 | ext4_lock_group(sb, TEST_GOAL_GROUP); |
917 | mb_mark_used(&e4b, &ex); |
918 | ext4_unlock_group(sb, TEST_GOAL_GROUP); |
919 | } |
920 | end = jiffies; |
921 | all += (end - start); |
922 | |
923 | for (i = 0; i < TEST_RANGE_COUNT; i++) { |
924 | if (ranges[i].len == 0) |
925 | continue; |
926 | |
927 | ext4_lock_group(sb, TEST_GOAL_GROUP); |
928 | mb_free_blocks(NULL, &e4b, ranges[i].start, |
929 | ranges[i].len); |
930 | ext4_unlock_group(sb, TEST_GOAL_GROUP); |
931 | } |
932 | } |
933 | |
934 | kunit_info(test, "costed jiffies %lu\n" , all); |
935 | ext4_mb_unload_buddy(&e4b); |
936 | } |
937 | |
938 | static const struct mbt_ext4_block_layout mbt_test_layouts[] = { |
939 | { |
940 | .blocksize_bits = 10, |
941 | .cluster_bits = 3, |
942 | .blocks_per_group = 8192, |
943 | .group_count = 4, |
944 | .desc_size = 64, |
945 | }, |
946 | { |
947 | .blocksize_bits = 12, |
948 | .cluster_bits = 3, |
949 | .blocks_per_group = 8192, |
950 | .group_count = 4, |
951 | .desc_size = 64, |
952 | }, |
953 | { |
954 | .blocksize_bits = 16, |
955 | .cluster_bits = 3, |
956 | .blocks_per_group = 8192, |
957 | .group_count = 4, |
958 | .desc_size = 64, |
959 | }, |
960 | }; |
961 | |
962 | static void mbt_show_layout(const struct mbt_ext4_block_layout *layout, |
963 | char *desc) |
964 | { |
965 | snprintf(buf: desc, KUNIT_PARAM_DESC_SIZE, fmt: "block_bits=%d cluster_bits=%d " |
966 | "blocks_per_group=%d group_count=%d desc_size=%d\n" , |
967 | layout->blocksize_bits, layout->cluster_bits, |
968 | layout->blocks_per_group, layout->group_count, |
969 | layout->desc_size); |
970 | } |
971 | KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout); |
972 | |
973 | static struct kunit_case mbt_test_cases[] = { |
974 | KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params), |
975 | KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params), |
976 | KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params), |
977 | KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params), |
978 | KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params), |
979 | KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params), |
980 | KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost, mbt_layouts_gen_params, |
981 | { .speed = KUNIT_SPEED_SLOW }), |
982 | {} |
983 | }; |
984 | |
985 | static struct kunit_suite mbt_test_suite = { |
986 | .name = "ext4_mballoc_test" , |
987 | .init = mbt_kunit_init, |
988 | .exit = mbt_kunit_exit, |
989 | .test_cases = mbt_test_cases, |
990 | }; |
991 | |
992 | kunit_test_suites(&mbt_test_suite); |
993 | |
994 | MODULE_LICENSE("GPL" ); |
995 | |