Lines Matching refs:sb

36 static void mbt_kill_sb(struct super_block *sb)
38 generic_shutdown_super(sb);
46 static int mbt_mb_init(struct super_block *sb)
51 /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
52 sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
53 if (sb->s_bdev == NULL)
56 sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
57 if (sb->s_bdev->bd_queue == NULL) {
58 kfree(sb->s_bdev);
64 * new_inode(sb);
66 INIT_LIST_HEAD(&sb->s_inodes);
67 sb->s_op = &mbt_sops;
69 ret = ext4_mb_init(sb);
73 block = ext4_count_free_clusters(sb);
74 ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
79 ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
87 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
89 ext4_mb_release(sb);
91 kfree(sb->s_bdev->bd_queue);
92 kfree(sb->s_bdev);
96 static void mbt_mb_release(struct super_block *sb)
98 percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
99 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
100 ext4_mb_release(sb);
101 kfree(sb->s_bdev->bd_queue);
102 kfree(sb->s_bdev);
105 static int mbt_set(struct super_block *sb, void *data)
113 struct super_block *sb;
120 sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
121 if (IS_ERR(sb))
134 sb->s_fs_info = sbi;
136 up_write(&sb->s_umount);
137 return sb;
140 deactivate_locked_super(sb);
146 static void mbt_ext4_free_super_block(struct super_block *sb)
148 struct mbt_ext4_super_block *fsb = MBT_SB(sb);
149 struct ext4_sb_info *sbi = EXT4_SB(sb);
152 deactivate_super(sb);
164 static void mbt_init_sb_layout(struct super_block *sb,
167 struct ext4_sb_info *sbi = EXT4_SB(sb);
170 sb->s_blocksize = 1UL << layout->blocksize_bits;
171 sb->s_blocksize_bits = layout->blocksize_bits;
181 sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
189 static int mbt_grp_ctx_init(struct super_block *sb,
192 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
194 grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
197 mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
198 ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
209 static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
212 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
217 static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
219 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
225 static int mbt_ctx_init(struct super_block *sb)
227 struct mbt_ctx *ctx = MBT_CTX(sb);
228 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
236 if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
245 ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
246 EXT4_CLUSTERS_PER_GROUP(sb) - 1);
256 static void mbt_ctx_release(struct super_block *sb)
258 struct mbt_ctx *ctx = MBT_CTX(sb);
259 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
267 ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
270 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
277 static int ext4_wait_block_bitmap_stub(struct super_block *sb,
292 ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
295 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
304 ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
309 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
325 struct super_block *sb;
328 sb = mbt_ext4_alloc_super_block();
329 if (sb == NULL)
332 mbt_init_sb_layout(sb, layout);
334 ret = mbt_ctx_init(sb);
336 mbt_ext4_free_super_block(sb);
340 test->priv = sb;
355 if (mbt_mb_init(sb) != 0) {
356 mbt_ctx_release(sb);
357 mbt_ext4_free_super_block(sb);
366 struct super_block *sb = (struct super_block *)test->priv;
368 mbt_mb_release(sb);
369 mbt_ctx_release(sb);
370 mbt_ext4_free_super_block(sb);
375 struct super_block *sb = (struct super_block *)test->priv;
381 struct ext4_sb_info *sbi = EXT4_SB(sb);
387 inode->i_sb = sb;
391 ar.goal = ext4_group_first_block_no(sb, goal_group);
398 ar.goal = ext4_group_first_block_no(sb, goal_group);
405 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
406 ar.goal = ext4_group_first_block_no(sb, goal_group);
409 ext4_group_first_block_no(sb, goal_group + 1), found,
411 ext4_group_first_block_no(sb, goal_group + 1), found);
414 for (i = goal_group; i < ext4_get_groups_count(sb); i++)
415 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
416 ar.goal = ext4_group_first_block_no(sb, goal_group);
419 ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
421 ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
424 for (i = 0; i < ext4_get_groups_count(sb); i++)
425 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
426 ar.goal = ext4_group_first_block_no(sb, goal_group);
440 mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
446 max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
458 validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
463 ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
466 for (i = 0; i < ext4_get_groups_count(sb); i++) {
470 bitmap = mbt_ctx_bitmap(sb, i);
476 bitmap = mbt_ctx_bitmap(sb, goal_group);
488 struct super_block *sb = (struct super_block *)test->priv;
489 struct ext4_sb_info *sbi = EXT4_SB(sb);
496 inode->i_sb = sb;
501 block = ext4_group_first_block_no(sb, goal_group) +
504 validate_free_blocks_simple(test, sb, goal_group, start, len);
505 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
510 struct super_block *sb = (struct super_block *)test->priv;
511 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
515 for (i = 0; i < ext4_get_groups_count(sb); i++)
516 mbt_ctx_mark_used(sb, i, 0, max);
518 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
530 struct super_block *sb = (struct super_block *)test->priv;
543 bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
544 memset(bitmap, 0, sb->s_blocksize);
548 max = EXT4_CLUSTERS_PER_GROUP(sb);
559 struct super_block *sb = (struct super_block *)test->priv;
565 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
570 inode->i_sb = sb;
573 ac.ac_sb = sb;
580 static void mbt_generate_buddy(struct super_block *sb, void *buddy,
583 struct ext4_sb_info *sbi = EXT4_SB(sb);
588 memset(buddy, 0xff, sb->s_blocksize);
590 bb_counters[MB_NUM_ORDERS(sb)]));
593 max = EXT4_CLUSTERS_PER_GROUP(sb);
614 for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
634 max = EXT4_CLUSTERS_PER_GROUP(sb);
651 struct super_block *sb = (struct super_block *)test->priv;
662 for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
672 do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
678 mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
680 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
684 memset(ext4_buddy, 0xff, sb->s_blocksize);
685 ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
688 KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
695 struct super_block *sb = (struct super_block *)test->priv;
701 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
703 expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
705 generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
708 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
710 generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
713 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
716 do_test_generate_buddy(test, sb, bitmap, expected_bb,
726 struct super_block *sb = (struct super_block *)test->priv;
738 ext4_lock_group(sb, TEST_GOAL_GROUP);
740 ext4_unlock_group(sb, TEST_GOAL_GROUP);
745 memset(buddy, 0xff, sb->s_blocksize);
746 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
748 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
750 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
758 struct super_block *sb = (struct super_block *)test->priv;
766 if (sb->s_blocksize > PAGE_SIZE)
769 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
771 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
774 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
776 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
779 grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
780 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
793 struct super_block *sb = (struct super_block *)test->priv;
800 ext4_lock_group(sb, e4b->bd_group);
802 ext4_unlock_group(sb, e4b->bd_group);
807 memset(buddy, 0xff, sb->s_blocksize);
808 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
810 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
812 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
821 struct super_block *sb = (struct super_block *)test->priv;
830 if (sb->s_blocksize > PAGE_SIZE)
833 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
835 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
838 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
840 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
844 ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
847 ext4_lock_group(sb, TEST_GOAL_GROUP);
849 ext4_unlock_group(sb, TEST_GOAL_GROUP);
852 memset(bitmap, 0xff, sb->s_blocksize);
854 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);