Lines Matching refs:block_group

132 struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
135 struct btrfs_fs_info *fs_info = block_group->fs_info;
139 spin_lock(&block_group->lock);
140 if (block_group->inode)
141 inode = igrab(block_group->inode);
142 spin_unlock(&block_group->lock);
147 block_group->start);
151 spin_lock(&block_group->lock);
156 block_group->disk_cache_state = BTRFS_DC_CLEAR;
159 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
160 block_group->inode = igrab(inode);
161 spin_unlock(&block_group->lock);
226 struct btrfs_block_group *block_group,
237 ino, block_group->start);
247 struct btrfs_block_group *block_group)
258 inode = lookup_free_space_inode(block_group, path);
271 spin_lock(&block_group->lock);
272 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
273 block_group->inode = NULL;
274 spin_unlock(&block_group->lock);
277 spin_unlock(&block_group->lock);
284 key.offset = block_group->start;
299 struct btrfs_block_group *block_group,
315 if (block_group) {
324 if (!list_empty(&block_group->io_list)) {
325 list_del_init(&block_group->io_list);
327 btrfs_wait_cache_io(trans, block_group, path);
328 btrfs_put_block_group(block_group);
335 spin_lock(&block_group->lock);
336 block_group->disk_cache_state = BTRFS_DC_CLEAR;
337 spin_unlock(&block_group->lock);
694 struct btrfs_block_group *block_group = ctl->block_group;
698 u64 size = block_group->length;
705 btrfs_err(block_group->fs_info,
707 block_group->start, block_group->length,
900 static int copy_free_space_cache(struct btrfs_block_group *block_group,
916 ret = btrfs_add_free_space(block_group, offset, bytes);
926 ret = btrfs_add_free_space(block_group, offset,
941 int load_free_space_cache(struct btrfs_block_group *block_group)
943 struct btrfs_fs_info *fs_info = block_group->fs_info;
944 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
950 u64 used = block_group->used;
957 btrfs_init_free_space_ctl(block_group, &tmp_ctl);
963 spin_lock(&block_group->lock);
964 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
965 spin_unlock(&block_group->lock);
968 spin_unlock(&block_group->lock);
995 inode = lookup_free_space_inode(block_group, path);
1002 spin_lock(&block_group->lock);
1003 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
1004 spin_unlock(&block_group->lock);
1008 spin_unlock(&block_group->lock);
1019 path, block_group->start);
1024 matched = (tmp_ctl.free_space == (block_group->length - used -
1025 block_group->bytes_super));
1029 ret = copy_free_space_cache(block_group, &tmp_ctl);
1047 block_group->start);
1053 spin_lock(&block_group->lock);
1054 block_group->disk_cache_state = BTRFS_DC_CLEAR;
1055 spin_unlock(&block_group->lock);
1060 block_group->start);
1064 btrfs_discard_update_discardable(block_group);
1073 struct btrfs_block_group *block_group,
1083 /* Get the cluster for this block_group if it exists */
1084 if (block_group && !list_empty(&block_group->cluster_list)) {
1085 cluster = list_entry(block_group->cluster_list.next,
1202 struct btrfs_block_group *block_group,
1210 if (!block_group)
1222 start = block_group->start;
1224 while (start < block_group->start + block_group->length) {
1231 if (extent_start >= block_group->start + block_group->length)
1235 extent_end = min(block_group->start + block_group->length,
1300 struct btrfs_block_group *block_group,
1322 if (block_group)
1325 block_group->start, ret);
1329 if (block_group) {
1334 spin_lock(&block_group->lock);
1341 if (!ret && list_empty(&block_group->dirty_list))
1342 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1344 block_group->disk_cache_state = BTRFS_DC_ERROR;
1346 spin_unlock(&block_group->lock);
1357 struct btrfs_block_group *block_group,
1360 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1361 block_group, &block_group->io_ctl,
1362 path, block_group->start);
1370 * @block_group: block_group for this cache if it belongs to a block_group
1380 struct btrfs_block_group *block_group,
1399 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1400 down_write(&block_group->data_rwsem);
1401 spin_lock(&block_group->lock);
1402 if (block_group->delalloc_bytes) {
1403 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1404 spin_unlock(&block_group->lock);
1405 up_write(&block_group->data_rwsem);
1411 spin_unlock(&block_group->lock);
1428 block_group, &entries, &bitmaps,
1441 ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries);
1466 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1467 up_write(&block_group->data_rwsem);
1501 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1502 up_write(&block_group->data_rwsem);
1518 struct btrfs_block_group *block_group,
1522 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1526 spin_lock(&block_group->lock);
1527 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1528 spin_unlock(&block_group->lock);
1531 spin_unlock(&block_group->lock);
1533 inode = lookup_free_space_inode(block_group, path);
1537 ret = __btrfs_write_out_cache(inode, ctl, block_group,
1538 &block_group->io_ctl, trans);
1542 block_group->start, ret);
1543 spin_lock(&block_group->lock);
1544 block_group->disk_cache_state = BTRFS_DC_ERROR;
1545 spin_unlock(&block_group->lock);
1547 block_group->io_ctl.inode = NULL;
2261 struct btrfs_block_group *block_group = ctl->block_group;
2262 struct btrfs_fs_info *fs_info = block_group->fs_info;
2266 if (btrfs_should_fragment_free_space(block_group))
2302 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
2316 struct btrfs_block_group *block_group = NULL;
2330 block_group = ctl->block_group;
2337 if (block_group && !list_empty(&block_group->cluster_list)) {
2342 cluster = list_entry(block_group->cluster_list.next,
2622 static int __btrfs_add_free_space(struct btrfs_block_group *block_group,
2626 struct btrfs_fs_info *fs_info = block_group->fs_info;
2627 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2676 btrfs_discard_update_discardable(block_group);
2685 btrfs_discard_check_filter(block_group, filter_bytes);
2686 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
2692 static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
2695 struct btrfs_space_info *sinfo = block_group->space_info;
2696 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2697 u64 offset = bytenr - block_group->start;
2700 bool initial = (size == block_group->length);
2703 WARN_ON(!initial && offset + size > block_group->zone_capacity);
2712 to_free = block_group->zone_capacity;
2713 else if (offset >= block_group->alloc_offset)
2715 else if (offset + size <= block_group->alloc_offset)
2718 to_free = offset + size - block_group->alloc_offset;
2726 if (!block_group->ro)
2727 block_group->zone_unusable += to_unusable;
2730 spin_lock(&block_group->lock);
2731 block_group->alloc_offset -= size;
2732 spin_unlock(&block_group->lock);
2735 reclaimable_unusable = block_group->zone_unusable -
2736 (block_group->length - block_group->zone_capacity);
2738 if (block_group->zone_unusable == block_group->length) {
2739 btrfs_mark_bg_unused(block_group);
2742 mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) {
2743 btrfs_mark_bg_to_reclaim(block_group);
2749 int btrfs_add_free_space(struct btrfs_block_group *block_group,
2754 if (btrfs_is_zoned(block_group->fs_info))
2755 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2758 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
2761 return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2764 int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
2767 if (btrfs_is_zoned(block_group->fs_info))
2768 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2771 return btrfs_add_free_space(block_group, bytenr, size);
2779 int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
2784 if (btrfs_is_zoned(block_group->fs_info))
2785 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2788 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
2789 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
2792 return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2795 int btrfs_remove_free_space(struct btrfs_block_group *block_group,
2798 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2803 if (btrfs_is_zoned(block_group->fs_info)) {
2815 if (block_group->start + block_group->alloc_offset <
2817 block_group->alloc_offset =
2818 offset + bytes - block_group->start;
2887 ret = __btrfs_add_free_space(block_group,
2902 btrfs_discard_update_discardable(block_group);
2908 void btrfs_dump_free_space(struct btrfs_block_group *block_group,
2911 struct btrfs_fs_info *fs_info = block_group->fs_info;
2912 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2923 block_group->zone_capacity - block_group->alloc_offset,
2925 &block_group->runtime_flags));
2932 if (info->bytes >= bytes && !block_group->ro)
2940 list_empty(&block_group->cluster_list) ? "no" : "yes");
2946 void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
2949 struct btrfs_fs_info *fs_info = block_group->fs_info;
2953 ctl->start = block_group->start;
2954 ctl->block_group = block_group;
2975 struct btrfs_block_group *block_group,
2978 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2984 if (cluster->block_group != block_group) {
2989 cluster->block_group = NULL;
3026 btrfs_put_block_group(block_group);
3029 void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
3031 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3036 while ((head = block_group->cluster_list.next) !=
3037 &block_group->cluster_list) {
3041 WARN_ON(cluster->block_group != block_group);
3042 __btrfs_return_cluster_to_free_space(block_group, cluster);
3047 btrfs_discard_update_discardable(block_group);
3053 * Walk @block_group's free space rb_tree to determine if everything is trimmed.
3055 bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
3057 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3080 u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
3084 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3086 &block_group->fs_info->discard_ctl;
3093 bool use_bytes_index = (offset == block_group->start);
3095 ASSERT(!btrfs_is_zoned(block_group->fs_info));
3099 block_group->full_stripe_len, max_extent_size,
3132 btrfs_discard_update_discardable(block_group);
3136 __btrfs_add_free_space(block_group, align_gap, align_gap_len,
3150 struct btrfs_block_group *block_group,
3157 if (!block_group) {
3158 block_group = cluster->block_group;
3159 if (!block_group) {
3163 } else if (cluster->block_group != block_group) {
3168 btrfs_get_block_group(block_group);
3171 ctl = block_group->free_space_ctl;
3175 __btrfs_return_cluster_to_free_space(block_group, cluster);
3178 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group);
3181 btrfs_put_block_group(block_group);
3184 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
3190 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3217 u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
3221 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3223 &block_group->fs_info->discard_ctl;
3228 ASSERT(!btrfs_is_zoned(block_group->fs_info));
3234 if (cluster->block_group != block_group)
3258 ret = btrfs_alloc_from_bitmap(block_group,
3316 static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
3322 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3399 trace_btrfs_setup_cluster(block_group, cluster,
3410 setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
3415 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3494 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
3503 setup_cluster_bitmap(struct btrfs_block_group *block_group,
3508 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3532 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3553 int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
3557 struct btrfs_fs_info *fs_info = block_group->fs_info;
3558 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3574 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3596 if (cluster->block_group) {
3601 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3604 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3608 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3617 btrfs_get_block_group(block_group);
3619 &block_group->cluster_list);
3620 cluster->block_group = block_group;
3622 trace_btrfs_failed_cluster_setup(block_group);
3642 cluster->block_group = NULL;
3645 static int do_trimming(struct btrfs_block_group *block_group,
3651 struct btrfs_space_info *space_info = block_group->space_info;
3652 struct btrfs_fs_info *fs_info = block_group->fs_info;
3653 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3662 spin_lock(&block_group->lock);
3663 if (!block_group->ro) {
3664 block_group->reserved += reserved_bytes;
3668 spin_unlock(&block_group->lock);
3679 __btrfs_add_free_space(block_group, reserved_start,
3683 __btrfs_add_free_space(block_group, end, reserved_end - end,
3685 __btrfs_add_free_space(block_group, start, bytes, trim_state);
3691 spin_lock(&block_group->lock);
3692 if (block_group->ro)
3694 block_group->reserved -= reserved_bytes;
3696 spin_unlock(&block_group->lock);
3706 static int trim_no_bitmap(struct btrfs_block_group *block_group,
3711 &block_group->fs_info->discard_ctl;
3712 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3795 ret = do_trimming(block_group, total_trimmed, start, bytes,
3799 block_group->discard_cursor = start + bytes;
3804 block_group->discard_cursor = start;
3819 block_group->discard_cursor = btrfs_block_group_end(block_group);
3872 static int trim_bitmaps(struct btrfs_block_group *block_group,
3877 &block_group->fs_info->discard_ctl;
3878 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3894 block_group->discard_cursor =
3895 btrfs_block_group_end(block_group);
3982 ret = do_trimming(block_group, total_trimmed, start, bytes,
3986 block_group->discard_cursor =
3987 btrfs_block_group_end(block_group);
3997 block_group->discard_cursor = start;
4010 block_group->discard_cursor = end;
4016 int btrfs_trim_block_group(struct btrfs_block_group *block_group,
4019 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
4023 ASSERT(!btrfs_is_zoned(block_group->fs_info));
4027 spin_lock(&block_group->lock);
4028 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4029 spin_unlock(&block_group->lock);
4032 btrfs_freeze_block_group(block_group);
4033 spin_unlock(&block_group->lock);
4035 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
4039 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
4045 btrfs_unfreeze_block_group(block_group);
4049 int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
4057 spin_lock(&block_group->lock);
4058 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4059 spin_unlock(&block_group->lock);
4062 btrfs_freeze_block_group(block_group);
4063 spin_unlock(&block_group->lock);
4065 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
4066 btrfs_unfreeze_block_group(block_group);
4071 int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
4079 spin_lock(&block_group->lock);
4080 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4081 spin_unlock(&block_group->lock);
4084 btrfs_freeze_block_group(block_group);
4085 spin_unlock(&block_group->lock);
4087 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
4090 btrfs_unfreeze_block_group(block_group);
4103 struct btrfs_block_group *block_group;
4111 block_group = rb_entry(node, struct btrfs_block_group, cache_node);
4112 ret = btrfs_remove_free_space_inode(trans, NULL, block_group);