1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KUnit test of ext4 multiblocks allocation.
4 */
5
6#include <kunit/test.h>
7#include <kunit/static_stub.h>
8#include <linux/random.h>
9
10#include "ext4.h"
11
12struct mbt_grp_ctx {
13 struct buffer_head bitmap_bh;
14 /* desc and gd_bh are just the place holders for now */
15 struct ext4_group_desc desc;
16 struct buffer_head gd_bh;
17};
18
19struct mbt_ctx {
20 struct mbt_grp_ctx *grp_ctx;
21};
22
23struct mbt_ext4_super_block {
24 struct ext4_super_block es;
25 struct ext4_sb_info sbi;
26 struct mbt_ctx mbt_ctx;
27};
28
29#define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
30#define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
31#define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
32
33static const struct super_operations mbt_sops = {
34};
35
36static void mbt_kill_sb(struct super_block *sb)
37{
38 generic_shutdown_super(sb);
39}
40
41static struct file_system_type mbt_fs_type = {
42 .name = "mballoc test",
43 .kill_sb = mbt_kill_sb,
44};
45
46static int mbt_mb_init(struct super_block *sb)
47{
48 ext4_fsblk_t block;
49 int ret;
50
51 /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
52 sb->s_bdev = kzalloc(size: sizeof(*sb->s_bdev), GFP_KERNEL);
53 if (sb->s_bdev == NULL)
54 return -ENOMEM;
55
56 sb->s_bdev->bd_queue = kzalloc(size: sizeof(struct request_queue), GFP_KERNEL);
57 if (sb->s_bdev->bd_queue == NULL) {
58 kfree(objp: sb->s_bdev);
59 return -ENOMEM;
60 }
61
62 /*
63 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
64 * new_inode(sb);
65 */
66 INIT_LIST_HEAD(list: &sb->s_inodes);
67 sb->s_op = &mbt_sops;
68
69 ret = ext4_mb_init(sb);
70 if (ret != 0)
71 goto err_out;
72
73 block = ext4_count_free_clusters(sb);
74 ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
75 GFP_KERNEL);
76 if (ret != 0)
77 goto err_mb_release;
78
79 ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
80 GFP_KERNEL);
81 if (ret != 0)
82 goto err_freeclusters;
83
84 return 0;
85
86err_freeclusters:
87 percpu_counter_destroy(fbc: &EXT4_SB(sb)->s_freeclusters_counter);
88err_mb_release:
89 ext4_mb_release(sb);
90err_out:
91 kfree(objp: sb->s_bdev->bd_queue);
92 kfree(objp: sb->s_bdev);
93 return ret;
94}
95
96static void mbt_mb_release(struct super_block *sb)
97{
98 percpu_counter_destroy(fbc: &EXT4_SB(sb)->s_dirtyclusters_counter);
99 percpu_counter_destroy(fbc: &EXT4_SB(sb)->s_freeclusters_counter);
100 ext4_mb_release(sb);
101 kfree(objp: sb->s_bdev->bd_queue);
102 kfree(objp: sb->s_bdev);
103}
104
105static int mbt_set(struct super_block *sb, void *data)
106{
107 return 0;
108}
109
110static struct super_block *mbt_ext4_alloc_super_block(void)
111{
112 struct mbt_ext4_super_block *fsb;
113 struct super_block *sb;
114 struct ext4_sb_info *sbi;
115
116 fsb = kzalloc(size: sizeof(*fsb), GFP_KERNEL);
117 if (fsb == NULL)
118 return NULL;
119
120 sb = sget(type: &mbt_fs_type, NULL, set: mbt_set, flags: 0, NULL);
121 if (IS_ERR(ptr: sb))
122 goto out;
123
124 sbi = &fsb->sbi;
125
126 sbi->s_blockgroup_lock =
127 kzalloc(size: sizeof(struct blockgroup_lock), GFP_KERNEL);
128 if (!sbi->s_blockgroup_lock)
129 goto out_deactivate;
130
131 bgl_lock_init(bgl: sbi->s_blockgroup_lock);
132
133 sbi->s_es = &fsb->es;
134 sb->s_fs_info = sbi;
135
136 up_write(sem: &sb->s_umount);
137 return sb;
138
139out_deactivate:
140 deactivate_locked_super(sb);
141out:
142 kfree(objp: fsb);
143 return NULL;
144}
145
146static void mbt_ext4_free_super_block(struct super_block *sb)
147{
148 struct mbt_ext4_super_block *fsb = MBT_SB(sb);
149 struct ext4_sb_info *sbi = EXT4_SB(sb);
150
151 kfree(objp: sbi->s_blockgroup_lock);
152 deactivate_super(sb);
153 kfree(objp: fsb);
154}
155
156struct mbt_ext4_block_layout {
157 unsigned char blocksize_bits;
158 unsigned int cluster_bits;
159 uint32_t blocks_per_group;
160 ext4_group_t group_count;
161 uint16_t desc_size;
162};
163
164static void mbt_init_sb_layout(struct super_block *sb,
165 struct mbt_ext4_block_layout *layout)
166{
167 struct ext4_sb_info *sbi = EXT4_SB(sb);
168 struct ext4_super_block *es = sbi->s_es;
169
170 sb->s_blocksize = 1UL << layout->blocksize_bits;
171 sb->s_blocksize_bits = layout->blocksize_bits;
172
173 sbi->s_groups_count = layout->group_count;
174 sbi->s_blocks_per_group = layout->blocks_per_group;
175 sbi->s_cluster_bits = layout->cluster_bits;
176 sbi->s_cluster_ratio = 1U << layout->cluster_bits;
177 sbi->s_clusters_per_group = layout->blocks_per_group >>
178 layout->cluster_bits;
179 sbi->s_desc_size = layout->desc_size;
180 sbi->s_desc_per_block_bits =
181 sb->s_blocksize_bits - (fls(x: layout->desc_size) - 1);
182 sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
183
184 es->s_first_data_block = cpu_to_le32(0);
185 es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
186 layout->group_count);
187}
188
189static int mbt_grp_ctx_init(struct super_block *sb,
190 struct mbt_grp_ctx *grp_ctx)
191{
192 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
193
194 grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
195 if (grp_ctx->bitmap_bh.b_data == NULL)
196 return -ENOMEM;
197 mb_set_bits(bm: grp_ctx->bitmap_bh.b_data, cur: max, len: sb->s_blocksize * 8 - max);
198 ext4_free_group_clusters_set(sb, bg: &grp_ctx->desc, count: max);
199
200 return 0;
201}
202
203static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
204{
205 kfree(objp: grp_ctx->bitmap_bh.b_data);
206 grp_ctx->bitmap_bh.b_data = NULL;
207}
208
209static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
210 unsigned int start, unsigned int len)
211{
212 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
213
214 mb_set_bits(bm: grp_ctx->bitmap_bh.b_data, cur: start, len);
215}
216
217static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
218{
219 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
220
221 return grp_ctx->bitmap_bh.b_data;
222}
223
224/* called after mbt_init_sb_layout */
225static int mbt_ctx_init(struct super_block *sb)
226{
227 struct mbt_ctx *ctx = MBT_CTX(sb);
228 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
229
230 ctx->grp_ctx = kcalloc(n: ngroups, size: sizeof(struct mbt_grp_ctx),
231 GFP_KERNEL);
232 if (ctx->grp_ctx == NULL)
233 return -ENOMEM;
234
235 for (i = 0; i < ngroups; i++)
236 if (mbt_grp_ctx_init(sb, grp_ctx: &ctx->grp_ctx[i]))
237 goto out;
238
239 /*
240 * first data block(first cluster in first group) is used by
241 * metadata, mark it used to avoid to alloc data block at first
242 * block which will fail ext4_sb_block_valid check.
243 */
244 mb_set_bits(bm: ctx->grp_ctx[0].bitmap_bh.b_data, cur: 0, len: 1);
245 ext4_free_group_clusters_set(sb, bg: &ctx->grp_ctx[0].desc,
246 EXT4_CLUSTERS_PER_GROUP(sb) - 1);
247
248 return 0;
249out:
250 while (i-- > 0)
251 mbt_grp_ctx_release(grp_ctx: &ctx->grp_ctx[i]);
252 kfree(objp: ctx->grp_ctx);
253 return -ENOMEM;
254}
255
256static void mbt_ctx_release(struct super_block *sb)
257{
258 struct mbt_ctx *ctx = MBT_CTX(sb);
259 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
260
261 for (i = 0; i < ngroups; i++)
262 mbt_grp_ctx_release(grp_ctx: &ctx->grp_ctx[i]);
263 kfree(objp: ctx->grp_ctx);
264}
265
266static struct buffer_head *
267ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
268 bool ignore_locked)
269{
270 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
271
272 /* paired with brelse from caller of ext4_read_block_bitmap_nowait */
273 get_bh(bh: &grp_ctx->bitmap_bh);
274 return &grp_ctx->bitmap_bh;
275}
276
277static int ext4_wait_block_bitmap_stub(struct super_block *sb,
278 ext4_group_t block_group,
279 struct buffer_head *bh)
280{
281 /*
282 * real ext4_wait_block_bitmap will set these flags and
283 * functions like ext4_mb_init_cache will verify the flags.
284 */
285 set_buffer_uptodate(bh);
286 set_bitmap_uptodate(bh);
287 set_buffer_verified(bh);
288 return 0;
289}
290
291static struct ext4_group_desc *
292ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
293 struct buffer_head **bh)
294{
295 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
296
297 if (bh != NULL)
298 *bh = &grp_ctx->gd_bh;
299
300 return &grp_ctx->desc;
301}
302
303static int
304ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
305 ext4_group_t group, ext4_grpblk_t blkoff,
306 ext4_grpblk_t len, int flags,
307 ext4_grpblk_t *ret_changed)
308{
309 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
310 struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
311
312 if (state)
313 mb_set_bits(bm: bitmap_bh->b_data, cur: blkoff, len);
314 else
315 mb_clear_bits(bitmap_bh->b_data, blkoff, len);
316
317 return 0;
318}
319
320#define TEST_GOAL_GROUP 1
321static int mbt_kunit_init(struct kunit *test)
322{
323 struct mbt_ext4_block_layout *layout =
324 (struct mbt_ext4_block_layout *)(test->param_value);
325 struct super_block *sb;
326 int ret;
327
328 sb = mbt_ext4_alloc_super_block();
329 if (sb == NULL)
330 return -ENOMEM;
331
332 mbt_init_sb_layout(sb, layout);
333
334 ret = mbt_ctx_init(sb);
335 if (ret != 0) {
336 mbt_ext4_free_super_block(sb);
337 return ret;
338 }
339
340 test->priv = sb;
341 kunit_activate_static_stub(test,
342 ext4_read_block_bitmap_nowait,
343 ext4_read_block_bitmap_nowait_stub);
344 kunit_activate_static_stub(test,
345 ext4_wait_block_bitmap,
346 ext4_wait_block_bitmap_stub);
347 kunit_activate_static_stub(test,
348 ext4_get_group_desc,
349 ext4_get_group_desc_stub);
350 kunit_activate_static_stub(test,
351 ext4_mb_mark_context,
352 ext4_mb_mark_context_stub);
353
354 /* stub function will be called in mbt_mb_init->ext4_mb_init */
355 if (mbt_mb_init(sb) != 0) {
356 mbt_ctx_release(sb);
357 mbt_ext4_free_super_block(sb);
358 return -ENOMEM;
359 }
360
361 return 0;
362}
363
364static void mbt_kunit_exit(struct kunit *test)
365{
366 struct super_block *sb = (struct super_block *)test->priv;
367
368 mbt_mb_release(sb);
369 mbt_ctx_release(sb);
370 mbt_ext4_free_super_block(sb);
371}
372
373static void test_new_blocks_simple(struct kunit *test)
374{
375 struct super_block *sb = (struct super_block *)test->priv;
376 struct inode *inode;
377 struct ext4_allocation_request ar;
378 ext4_group_t i, goal_group = TEST_GOAL_GROUP;
379 int err = 0;
380 ext4_fsblk_t found;
381 struct ext4_sb_info *sbi = EXT4_SB(sb);
382
383 inode = kunit_kzalloc(test, size: sizeof(*inode), GFP_KERNEL);
384 if (!inode)
385 return;
386
387 inode->i_sb = sb;
388 ar.inode = inode;
389
390 /* get block at goal */
391 ar.goal = ext4_group_first_block_no(sb, group_no: goal_group);
392 found = ext4_mb_new_blocks_simple(&ar, &err);
393 KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
394 "failed to alloc block at goal, expected %llu found %llu",
395 ar.goal, found);
396
397 /* get block after goal in goal group */
398 ar.goal = ext4_group_first_block_no(sb, group_no: goal_group);
399 found = ext4_mb_new_blocks_simple(&ar, &err);
400 KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
401 "failed to alloc block after goal in goal group, expected %llu found %llu",
402 ar.goal + 1, found);
403
404 /* get block after goal group */
405 mbt_ctx_mark_used(sb, group: goal_group, start: 0, EXT4_CLUSTERS_PER_GROUP(sb));
406 ar.goal = ext4_group_first_block_no(sb, group_no: goal_group);
407 found = ext4_mb_new_blocks_simple(&ar, &err);
408 KUNIT_ASSERT_EQ_MSG(test,
409 ext4_group_first_block_no(sb, goal_group + 1), found,
410 "failed to alloc block after goal group, expected %llu found %llu",
411 ext4_group_first_block_no(sb, goal_group + 1), found);
412
413 /* get block before goal group */
414 for (i = goal_group; i < ext4_get_groups_count(sb); i++)
415 mbt_ctx_mark_used(sb, group: i, start: 0, EXT4_CLUSTERS_PER_GROUP(sb));
416 ar.goal = ext4_group_first_block_no(sb, group_no: goal_group);
417 found = ext4_mb_new_blocks_simple(&ar, &err);
418 KUNIT_ASSERT_EQ_MSG(test,
419 ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
420 "failed to alloc block before goal group, expected %llu found %llu",
421 ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
422
423 /* no block available, fail to allocate block */
424 for (i = 0; i < ext4_get_groups_count(sb); i++)
425 mbt_ctx_mark_used(sb, group: i, start: 0, EXT4_CLUSTERS_PER_GROUP(sb));
426 ar.goal = ext4_group_first_block_no(sb, group_no: goal_group);
427 found = ext4_mb_new_blocks_simple(&ar, &err);
428 KUNIT_ASSERT_NE_MSG(test, err, 0,
429 "unexpectedly get block when no block is available");
430}
431
432#define TEST_RANGE_COUNT 8
433
434struct test_range {
435 ext4_grpblk_t start;
436 ext4_grpblk_t len;
437};
438
439static void
440mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
441 int count)
442{
443 ext4_grpblk_t start, len, max;
444 int i;
445
446 max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
447 for (i = 0; i < count; i++) {
448 start = get_random_u32() % max;
449 len = get_random_u32() % max;
450 len = min(len, max - start);
451
452 ranges[i].start = start + i * max;
453 ranges[i].len = len;
454 }
455}
456
457static void
458validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
459 ext4_group_t goal_group, ext4_grpblk_t start,
460 ext4_grpblk_t len)
461{
462 void *bitmap;
463 ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
464 ext4_group_t i;
465
466 for (i = 0; i < ext4_get_groups_count(sb); i++) {
467 if (i == goal_group)
468 continue;
469
470 bitmap = mbt_ctx_bitmap(sb, group: i);
471 bit = mb_find_next_zero_bit(bitmap, max, 0);
472 KUNIT_ASSERT_EQ_MSG(test, bit, max,
473 "free block on unexpected group %d", i);
474 }
475
476 bitmap = mbt_ctx_bitmap(sb, group: goal_group);
477 bit = mb_find_next_zero_bit(bitmap, max, 0);
478 KUNIT_ASSERT_EQ(test, bit, start);
479
480 bit = mb_find_next_bit(bitmap, max, bit + 1);
481 KUNIT_ASSERT_EQ(test, bit, start + len);
482}
483
484static void
485test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
486 ext4_grpblk_t start, ext4_grpblk_t len)
487{
488 struct super_block *sb = (struct super_block *)test->priv;
489 struct ext4_sb_info *sbi = EXT4_SB(sb);
490 struct inode *inode;
491 ext4_fsblk_t block;
492
493 inode = kunit_kzalloc(test, size: sizeof(*inode), GFP_KERNEL);
494 if (!inode)
495 return;
496 inode->i_sb = sb;
497
498 if (len == 0)
499 return;
500
501 block = ext4_group_first_block_no(sb, group_no: goal_group) +
502 EXT4_C2B(sbi, start);
503 ext4_free_blocks_simple(inode, block, len);
504 validate_free_blocks_simple(test, sb, goal_group, start, len);
505 mbt_ctx_mark_used(sb, group: goal_group, start: 0, EXT4_CLUSTERS_PER_GROUP(sb));
506}
507
508static void test_free_blocks_simple(struct kunit *test)
509{
510 struct super_block *sb = (struct super_block *)test->priv;
511 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
512 ext4_group_t i;
513 struct test_range ranges[TEST_RANGE_COUNT];
514
515 for (i = 0; i < ext4_get_groups_count(sb); i++)
516 mbt_ctx_mark_used(sb, group: i, start: 0, len: max);
517
518 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
519 for (i = 0; i < TEST_RANGE_COUNT; i++)
520 test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
521 start: ranges[i].start, len: ranges[i].len);
522}
523
524static void
525test_mark_diskspace_used_range(struct kunit *test,
526 struct ext4_allocation_context *ac,
527 ext4_grpblk_t start,
528 ext4_grpblk_t len)
529{
530 struct super_block *sb = (struct super_block *)test->priv;
531 int ret;
532 void *bitmap;
533 ext4_grpblk_t i, max;
534
535 /* ext4_mb_mark_diskspace_used will BUG if len is 0 */
536 if (len == 0)
537 return;
538
539 ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
540 ac->ac_b_ex.fe_start = start;
541 ac->ac_b_ex.fe_len = len;
542
543 bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
544 memset(bitmap, 0, sb->s_blocksize);
545 ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
546 KUNIT_ASSERT_EQ(test, ret, 0);
547
548 max = EXT4_CLUSTERS_PER_GROUP(sb);
549 i = mb_find_next_bit(bitmap, max, 0);
550 KUNIT_ASSERT_EQ(test, i, start);
551 i = mb_find_next_zero_bit(bitmap, max, i + 1);
552 KUNIT_ASSERT_EQ(test, i, start + len);
553 i = mb_find_next_bit(bitmap, max, i + 1);
554 KUNIT_ASSERT_EQ(test, max, i);
555}
556
557static void test_mark_diskspace_used(struct kunit *test)
558{
559 struct super_block *sb = (struct super_block *)test->priv;
560 struct inode *inode;
561 struct ext4_allocation_context ac;
562 struct test_range ranges[TEST_RANGE_COUNT];
563 int i;
564
565 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
566
567 inode = kunit_kzalloc(test, size: sizeof(*inode), GFP_KERNEL);
568 if (!inode)
569 return;
570 inode->i_sb = sb;
571
572 ac.ac_status = AC_STATUS_FOUND;
573 ac.ac_sb = sb;
574 ac.ac_inode = inode;
575 for (i = 0; i < TEST_RANGE_COUNT; i++)
576 test_mark_diskspace_used_range(test, ac: &ac, start: ranges[i].start,
577 len: ranges[i].len);
578}
579
580static void mbt_generate_buddy(struct super_block *sb, void *buddy,
581 void *bitmap, struct ext4_group_info *grp)
582{
583 struct ext4_sb_info *sbi = EXT4_SB(sb);
584 uint32_t order, off;
585 void *bb, *bb_h;
586 int max;
587
588 memset(buddy, 0xff, sb->s_blocksize);
589 memset(grp, 0, offsetof(struct ext4_group_info,
590 bb_counters[MB_NUM_ORDERS(sb)]));
591
592 bb = bitmap;
593 max = EXT4_CLUSTERS_PER_GROUP(sb);
594 bb_h = buddy + sbi->s_mb_offsets[1];
595
596 off = mb_find_next_zero_bit(bb, max, 0);
597 grp->bb_first_free = off;
598 while (off < max) {
599 grp->bb_counters[0]++;
600 grp->bb_free++;
601
602 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
603 grp->bb_free++;
604 grp->bb_counters[0]--;
605 mb_clear_bit(off >> 1, bb_h);
606 grp->bb_counters[1]++;
607 grp->bb_largest_free_order = 1;
608 off++;
609 }
610
611 off = mb_find_next_zero_bit(bb, max, off + 1);
612 }
613
614 for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
615 bb = buddy + sbi->s_mb_offsets[order];
616 bb_h = buddy + sbi->s_mb_offsets[order + 1];
617 max = max >> 1;
618 off = mb_find_next_zero_bit(bb, max, 0);
619
620 while (off < max) {
621 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
622 mb_set_bits(bm: bb, cur: off, len: 2);
623 grp->bb_counters[order] -= 2;
624 mb_clear_bit(off >> 1, bb_h);
625 grp->bb_counters[order + 1]++;
626 grp->bb_largest_free_order = order + 1;
627 off++;
628 }
629
630 off = mb_find_next_zero_bit(bb, max, off + 1);
631 }
632 }
633
634 max = EXT4_CLUSTERS_PER_GROUP(sb);
635 off = mb_find_next_zero_bit(bitmap, max, 0);
636 while (off < max) {
637 grp->bb_fragments++;
638
639 off = mb_find_next_bit(bitmap, max, off + 1);
640 if (off + 1 >= max)
641 break;
642
643 off = mb_find_next_zero_bit(bitmap, max, off + 1);
644 }
645}
646
647static void
648mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
649 struct ext4_group_info *grp2)
650{
651 struct super_block *sb = (struct super_block *)test->priv;
652 int i;
653
654 KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
655 grp2->bb_first_free);
656 KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
657 grp2->bb_fragments);
658 KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
659 KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
660 grp2->bb_largest_free_order);
661
662 for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
663 KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
664 grp2->bb_counters[i],
665 "bb_counters[%d] diffs, expected %d, generated %d",
666 i, grp1->bb_counters[i],
667 grp2->bb_counters[i]);
668 }
669}
670
671static void
672do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
673 void *mbt_buddy, struct ext4_group_info *mbt_grp,
674 void *ext4_buddy, struct ext4_group_info *ext4_grp)
675{
676 int i;
677
678 mbt_generate_buddy(sb, buddy: mbt_buddy, bitmap, grp: mbt_grp);
679
680 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
681 ext4_grp->bb_counters[i] = 0;
682 /* needed by validation in ext4_mb_generate_buddy */
683 ext4_grp->bb_free = mbt_grp->bb_free;
684 memset(ext4_buddy, 0xff, sb->s_blocksize);
685 ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
686 ext4_grp);
687
688 KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
689 0);
690 mbt_validate_group_info(test, grp1: mbt_grp, grp2: ext4_grp);
691}
692
693static void test_mb_generate_buddy(struct kunit *test)
694{
695 struct super_block *sb = (struct super_block *)test->priv;
696 void *bitmap, *expected_bb, *generate_bb;
697 struct ext4_group_info *expected_grp, *generate_grp;
698 struct test_range ranges[TEST_RANGE_COUNT];
699 int i;
700
701 bitmap = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL);
702 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
703 expected_bb = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL);
704 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
705 generate_bb = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL);
706 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
707 expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
708 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
709 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
710 generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
711 KUNIT_ASSERT_NOT_NULL(test, generate_grp);
712
713 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
714 for (i = 0; i < TEST_RANGE_COUNT; i++) {
715 mb_set_bits(bm: bitmap, cur: ranges[i].start, len: ranges[i].len);
716 do_test_generate_buddy(test, sb, bitmap, mbt_buddy: expected_bb,
717 mbt_grp: expected_grp, ext4_buddy: generate_bb, ext4_grp: generate_grp);
718 }
719}
720
721static void
722test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
723 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
724 void *buddy, struct ext4_group_info *grp)
725{
726 struct super_block *sb = (struct super_block *)test->priv;
727 struct ext4_free_extent ex;
728 int i;
729
730 /* mb_mark_used only accepts non-zero len */
731 if (len == 0)
732 return;
733
734 ex.fe_start = start;
735 ex.fe_len = len;
736 ex.fe_group = TEST_GOAL_GROUP;
737
738 ext4_lock_group(sb, TEST_GOAL_GROUP);
739 mb_mark_used(e4b, &ex);
740 ext4_unlock_group(sb, TEST_GOAL_GROUP);
741
742 mb_set_bits(bm: bitmap, cur: start, len);
743 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
744 grp->bb_free -= len;
745 memset(buddy, 0xff, sb->s_blocksize);
746 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
747 grp->bb_counters[i] = 0;
748 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
749
750 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
751 0);
752 mbt_validate_group_info(test, grp1: grp, grp2: e4b->bd_info);
753}
754
755static void test_mb_mark_used(struct kunit *test)
756{
757 struct ext4_buddy e4b;
758 struct super_block *sb = (struct super_block *)test->priv;
759 void *bitmap, *buddy;
760 struct ext4_group_info *grp;
761 int ret;
762 struct test_range ranges[TEST_RANGE_COUNT];
763 int i;
764
765 /* buddy cache assumes that each page contains at least one block */
766 if (sb->s_blocksize > PAGE_SIZE)
767 kunit_skip(test, "blocksize exceeds pagesize");
768
769 bitmap = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL);
770 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
771 buddy = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL);
772 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
773 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
774 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
775
776 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
777 KUNIT_ASSERT_EQ(test, ret, 0);
778
779 grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
780 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
781 for (i = 0; i < TEST_RANGE_COUNT; i++)
782 test_mb_mark_used_range(test, e4b: &e4b, start: ranges[i].start,
783 len: ranges[i].len, bitmap, buddy, grp);
784
785 ext4_mb_unload_buddy(&e4b);
786}
787
788static void
789test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
790 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
791 void *buddy, struct ext4_group_info *grp)
792{
793 struct super_block *sb = (struct super_block *)test->priv;
794 int i;
795
796 /* mb_free_blocks will WARN if len is 0 */
797 if (len == 0)
798 return;
799
800 ext4_lock_group(sb, group: e4b->bd_group);
801 mb_free_blocks(NULL, e4b, start, len);
802 ext4_unlock_group(sb, group: e4b->bd_group);
803
804 mb_clear_bits(bitmap, start, len);
805 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
806 grp->bb_free += len;
807 memset(buddy, 0xff, sb->s_blocksize);
808 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
809 grp->bb_counters[i] = 0;
810 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
811
812 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
813 0);
814 mbt_validate_group_info(test, grp1: grp, grp2: e4b->bd_info);
815
816}
817
818static void test_mb_free_blocks(struct kunit *test)
819{
820 struct ext4_buddy e4b;
821 struct super_block *sb = (struct super_block *)test->priv;
822 void *bitmap, *buddy;
823 struct ext4_group_info *grp;
824 struct ext4_free_extent ex;
825 int ret;
826 int i;
827 struct test_range ranges[TEST_RANGE_COUNT];
828
829 /* buddy cache assumes that each page contains at least one block */
830 if (sb->s_blocksize > PAGE_SIZE)
831 kunit_skip(test, "blocksize exceeds pagesize");
832
833 bitmap = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL);
834 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
835 buddy = kunit_kzalloc(test, size: sb->s_blocksize, GFP_KERNEL);
836 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
837 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
838 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
839
840 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
841 KUNIT_ASSERT_EQ(test, ret, 0);
842
843 ex.fe_start = 0;
844 ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
845 ex.fe_group = TEST_GOAL_GROUP;
846
847 ext4_lock_group(sb, TEST_GOAL_GROUP);
848 mb_mark_used(&e4b, &ex);
849 ext4_unlock_group(sb, TEST_GOAL_GROUP);
850
851 grp->bb_free = 0;
852 memset(bitmap, 0xff, sb->s_blocksize);
853
854 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
855 for (i = 0; i < TEST_RANGE_COUNT; i++)
856 test_mb_free_blocks_range(test, e4b: &e4b, start: ranges[i].start,
857 len: ranges[i].len, bitmap, buddy, grp);
858
859 ext4_mb_unload_buddy(&e4b);
860}
861
862static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
863 {
864 .blocksize_bits = 10,
865 .cluster_bits = 3,
866 .blocks_per_group = 8192,
867 .group_count = 4,
868 .desc_size = 64,
869 },
870 {
871 .blocksize_bits = 12,
872 .cluster_bits = 3,
873 .blocks_per_group = 8192,
874 .group_count = 4,
875 .desc_size = 64,
876 },
877 {
878 .blocksize_bits = 16,
879 .cluster_bits = 3,
880 .blocks_per_group = 8192,
881 .group_count = 4,
882 .desc_size = 64,
883 },
884};
885
886static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
887 char *desc)
888{
889 snprintf(buf: desc, KUNIT_PARAM_DESC_SIZE, fmt: "block_bits=%d cluster_bits=%d "
890 "blocks_per_group=%d group_count=%d desc_size=%d\n",
891 layout->blocksize_bits, layout->cluster_bits,
892 layout->blocks_per_group, layout->group_count,
893 layout->desc_size);
894}
895KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
896
897static struct kunit_case mbt_test_cases[] = {
898 KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
899 KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
900 KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
901 KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
902 KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
903 KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
904 {}
905};
906
907static struct kunit_suite mbt_test_suite = {
908 .name = "ext4_mballoc_test",
909 .init = mbt_kunit_init,
910 .exit = mbt_kunit_exit,
911 .test_cases = mbt_test_cases,
912};
913
914kunit_test_suites(&mbt_test_suite);
915
916MODULE_LICENSE("GPL");
917

source code of linux/fs/ext4/mballoc-test.c