Lines Matching refs:map

1821 		struct btrfs_chunk_map *map;
1823 map = rb_entry(n, struct btrfs_chunk_map, rb_node);
1824 ret = map->start + map->chunk_len;
3061 struct btrfs_chunk_map *map;
3065 map = rb_entry(node, struct btrfs_chunk_map, rb_node);
3067 prev_map = map;
3069 if (logical < map->start) {
3071 } else if (logical >= map->start + map->chunk_len) {
3074 refcount_inc(&map->refs);
3075 return map;
3121 struct btrfs_chunk_map *map;
3124 map = btrfs_find_chunk_map_nolock(fs_info, logical, length);
3127 return map;
3141 struct btrfs_chunk_map *map;
3143 map = btrfs_find_chunk_map(fs_info, logical, length);
3145 if (unlikely(!map)) {
3147 "unable to find chunk map for logical %llu length %llu",
3152 if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
3154 "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
3155 logical, logical + length, map->start,
3156 map->start + map->chunk_len);
3157 btrfs_free_chunk_map(map);
3162 return map;
3166 struct btrfs_chunk_map *map, u64 chunk_offset)
3177 for (i = 0; i < map->num_stripes; i++) {
3180 ret = btrfs_update_device(trans, map->stripes[i].dev);
3191 struct btrfs_chunk_map *map;
3196 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3197 if (IS_ERR(map)) {
3204 return PTR_ERR(map);
3218 for (i = 0; i < map->num_stripes; i++) {
3219 struct btrfs_device *device = map->stripes[i].dev;
3221 map->stripes[i].physical,
3250 * that replaces the device object associated with the map's stripes,
3264 check_system_chunk(trans, map->type);
3266 ret = remove_chunk_item(trans, map, chunk_offset);
3298 ret = remove_chunk_item(trans, map, chunk_offset);
3308 trace_btrfs_chunk_free(fs_info, map, chunk_offset, map->chunk_len);
3310 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3327 ret = btrfs_remove_block_group(trans, map);
3339 btrfs_free_chunk_map(map);
5241 /* sub_stripes info for map */
5535 static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int bits)
5537 for (int i = 0; i < map->num_stripes; i++) {
5538 struct btrfs_io_stripe *stripe = &map->stripes[i];
5542 stripe->physical + map->stripe_size - 1,
5547 static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned int bits)
5549 for (int i = 0; i < map->num_stripes; i++) {
5550 struct btrfs_io_stripe *stripe = &map->stripes[i];
5554 stripe->physical + map->stripe_size - 1,
5560 void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
5563 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
5564 RB_CLEAR_NODE(&map->rb_node);
5565 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
5569 btrfs_free_chunk_map(map);
5573 int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map)
5587 if (map->start < entry->start) {
5589 } else if (map->start > entry->start) {
5597 rb_link_node(&map->rb_node, parent, p);
5598 rb_insert_color_cached(&map->rb_node, &fs_info->mapping_tree, leftmost);
5599 chunk_map_device_set_bits(map, CHUNK_ALLOCATED);
5600 chunk_map_device_clear_bits(map, CHUNK_TRIMMED);
5609 struct btrfs_chunk_map *map;
5611 map = kmalloc(btrfs_chunk_map_size(num_stripes), gfp);
5612 if (!map)
5615 refcount_set(&map->refs, 1);
5616 RB_CLEAR_NODE(&map->rb_node);
5618 return map;
5626 struct btrfs_chunk_map *map;
5634 map = btrfs_alloc_chunk_map(ctl->num_stripes, GFP_NOFS);
5635 if (!map)
5638 map->start = start;
5639 map->chunk_len = ctl->chunk_size;
5640 map->stripe_size = ctl->stripe_size;
5641 map->type = type;
5642 map->io_align = BTRFS_STRIPE_LEN;
5643 map->io_width = BTRFS_STRIPE_LEN;
5644 map->sub_stripes = ctl->sub_stripes;
5645 map->num_stripes = ctl->num_stripes;
5650 map->stripes[s].dev = devices_info[i].dev;
5651 map->stripes[s].physical = devices_info[i].dev_offset +
5656 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5658 ret = btrfs_add_chunk_map(info, map);
5660 btrfs_free_chunk_map(map);
5666 btrfs_remove_chunk_map(info, map);
5670 for (int i = 0; i < map->num_stripes; i++) {
5671 struct btrfs_device *dev = map->stripes[i].dev;
5680 atomic64_sub(ctl->stripe_size * map->num_stripes,
5762 struct btrfs_chunk_map *map;
5776 * that replaces the device object associated with the map's stripes,
5791 map = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5792 if (IS_ERR(map)) {
5793 ret = PTR_ERR(map);
5798 item_size = btrfs_chunk_item_size(map->num_stripes);
5807 for (i = 0; i < map->num_stripes; i++) {
5808 struct btrfs_device *device = map->stripes[i].dev;
5816 for (i = 0; i < map->num_stripes; i++) {
5817 struct btrfs_device *device = map->stripes[i].dev;
5818 const u64 dev_offset = map->stripes[i].physical;
5829 btrfs_set_stack_chunk_type(chunk, map->type);
5830 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5834 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5846 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5854 btrfs_free_chunk_map(map);
5899 static inline int btrfs_chunk_max_errors(struct btrfs_chunk_map *map)
5901 const int index = btrfs_bg_flags_to_raid_index(map->type);
5908 struct btrfs_chunk_map *map;
5913 map = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5914 if (IS_ERR(map))
5917 for (i = 0; i < map->num_stripes; i++) {
5919 &map->stripes[i].dev->dev_state)) {
5924 &map->stripes[i].dev->dev_state)) {
5934 if (miss_ndevs > btrfs_chunk_max_errors(map))
5937 btrfs_free_chunk_map(map);
5945 struct btrfs_chunk_map *map;
5949 map = rb_entry(node, struct btrfs_chunk_map, rb_node);
5950 rb_erase_cached(&map->rb_node, &fs_info->mapping_tree);
5951 RB_CLEAR_NODE(&map->rb_node);
5952 chunk_map_device_clear_bits(map, CHUNK_ALLOCATED);
5954 btrfs_free_chunk_map(map);
5962 struct btrfs_chunk_map *map;
5966 map = btrfs_get_chunk_map(fs_info, logical, len);
5967 if (IS_ERR(map))
5976 index = btrfs_bg_flags_to_raid_index(map->type);
5979 if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5981 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5983 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5991 ret = map->num_stripes;
5992 btrfs_free_chunk_map(map);
5999 struct btrfs_chunk_map *map;
6005 map = btrfs_get_chunk_map(fs_info, logical, len);
6007 if (!WARN_ON(IS_ERR(map))) {
6008 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
6009 len = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
6010 btrfs_free_chunk_map(map);
6017 struct btrfs_chunk_map *map;
6023 map = btrfs_get_chunk_map(fs_info, logical, len);
6025 if (!WARN_ON(IS_ERR(map))) {
6026 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
6028 btrfs_free_chunk_map(map);
6034 struct btrfs_chunk_map *map, int first,
6044 ASSERT((map->type &
6047 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
6048 num_stripes = map->sub_stripes;
6050 num_stripes = map->num_stripes;
6077 if (map->stripes[preferred_mirror].dev->bdev &&
6078 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
6081 if (map->stripes[i].dev->bdev &&
6082 (tolerance || map->stripes[i].dev != srcdev))
6141 struct btrfs_chunk_map *map;
6159 map = btrfs_get_chunk_map(fs_info, logical, length);
6160 if (IS_ERR(map))
6161 return ERR_CAST(map);
6164 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6169 offset = logical - map->start;
6170 length = min_t(u64, map->start + map->chunk_len - logical, length);
6194 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6196 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6199 sub_stripes = map->sub_stripes;
6201 factor = map->num_stripes / sub_stripes;
6202 *num_stripes = min_t(u64, map->num_stripes,
6211 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6213 *num_stripes = map->num_stripes;
6215 stripe_index = stripe_nr % map->num_stripes;
6216 stripe_nr /= map->num_stripes;
6227 map->stripes[stripe_index].physical +
6229 stripes[i].dev = map->stripes[stripe_index].dev;
6231 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6261 if (stripe_index == map->num_stripes) {
6267 btrfs_free_chunk_map(map);
6270 btrfs_free_chunk_map(map);
6368 static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset,
6379 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6381 btrfs_stripe_nr_to_offset(nr_data_stripes(map));
6393 rounddown(io_geom->stripe_nr, nr_data_stripes(map)));
6409 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK)
6416 struct btrfs_chunk_map *map,
6419 dst->dev = map->stripes[io_geom->stripe_index].dev;
6422 btrfs_need_stripe_tree_update(fs_info, map->type))
6424 map->type,
6427 dst->physical = map->stripes[io_geom->stripe_index].physical +
6435 const struct btrfs_chunk_map *map,
6445 if (btrfs_need_stripe_tree_update(fs_info, map->type) && op != BTRFS_MAP_READ)
6448 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)
6454 static void map_blocks_raid0(const struct btrfs_chunk_map *map,
6457 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
6458 io_geom->stripe_nr /= map->num_stripes;
6464 struct btrfs_chunk_map *map,
6469 io_geom->num_stripes = map->num_stripes;
6478 io_geom->stripe_index = find_live_mirror(fs_info, map, 0,
6483 static void map_blocks_dup(const struct btrfs_chunk_map *map,
6487 io_geom->num_stripes = map->num_stripes;
6500 struct btrfs_chunk_map *map,
6504 u32 factor = map->num_stripes / map->sub_stripes;
6507 io_geom->stripe_index = (io_geom->stripe_nr % factor) * map->sub_stripes;
6511 io_geom->num_stripes = map->sub_stripes;
6521 io_geom->stripe_index = find_live_mirror(fs_info, map,
6527 static void map_blocks_raid56_write(struct btrfs_chunk_map *map,
6531 int data_stripes = nr_data_stripes(map);
6546 io_geom->num_stripes = map->num_stripes;
6547 io_geom->max_errors = btrfs_chunk_max_errors(map);
6551 io_geom->raid56_full_stripe_start + map->start +
6558 static void map_blocks_raid56_read(struct btrfs_chunk_map *map,
6561 int data_stripes = nr_data_stripes(map);
6570 (io_geom->stripe_nr + io_geom->stripe_index) % map->num_stripes;
6576 static void map_blocks_single(const struct btrfs_chunk_map *map,
6579 io_geom->stripe_index = io_geom->stripe_nr % map->num_stripes;
6580 io_geom->stripe_nr /= map->num_stripes;
6598 * If the map request can be fulfilled by one single
6624 struct btrfs_chunk_map *map;
6647 map = btrfs_get_chunk_map(fs_info, logical, *length);
6648 if (IS_ERR(map))
6649 return PTR_ERR(map);
6651 map_offset = logical - map->start;
6653 max_len = btrfs_max_io_len(map, map_offset, &io_geom);
6654 *length = min_t(u64, map->chunk_len - map_offset, max_len);
6665 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6667 map_blocks_raid0(map, &io_geom);
6672 map_blocks_raid1(fs_info, map, &io_geom, dev_replace_is_ongoing);
6675 map_blocks_dup(map, &io_geom);
6678 map_blocks_raid10(fs_info, map, &io_geom, dev_replace_is_ongoing);
6683 map_blocks_raid56_write(map, &io_geom, logical, length);
6685 map_blocks_raid56_read(map, &io_geom);
6693 map_blocks_single(map, &io_geom);
6696 if (io_geom.stripe_index >= map->num_stripes) {
6699 io_geom.stripe_index, map->num_stripes);
6721 if (is_single_device_io(fs_info, smap, map, num_alloc_stripes, op,
6723 ret = set_io_stripe(fs_info, logical, length, smap, map, &io_geom);
6735 bioc->map_type = map->type;
6738 * For RAID56 full map, we need to make sure the stripes[] follows the
6744 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
6754 bioc->full_stripe_logical = map->start +
6756 nr_data_stripes(map));
6762 dst->dev = map->stripes[stripe_index].dev;
6764 map->stripes[stripe_index].physical +
6775 &bioc->stripes[i], map, &io_geom);
6789 io_geom.max_errors = btrfs_chunk_max_errors(map);
6808 btrfs_free_chunk_map(map);
6980 u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map)
6982 const int data_stripes = calc_data_stripes(map->type, map->num_stripes);
6984 return div_u64(map->chunk_len, data_stripes);
7053 struct btrfs_chunk_map *map;
7087 map = btrfs_find_chunk_map(fs_info, logical, 1);
7090 if (map && map->start <= logical && map->start + map->chunk_len > logical) {
7091 btrfs_free_chunk_map(map);
7093 } else if (map) {
7094 btrfs_free_chunk_map(map);
7097 map = btrfs_alloc_chunk_map(num_stripes, GFP_NOFS);
7098 if (!map)
7101 map->start = logical;
7102 map->chunk_len = length;
7103 map->num_stripes = num_stripes;
7104 map->io_width = btrfs_chunk_io_width(leaf, chunk);
7105 map->io_align = btrfs_chunk_io_align(leaf, chunk);
7106 map->type = type;
7115 map->sub_stripes = btrfs_raid_array[index].sub_stripes;
7116 map->verified_stripes = 0;
7117 map->stripe_size = btrfs_calc_stripe_length(map);
7119 map->stripes[i].physical =
7127 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7128 if (!map->stripes[i].dev) {
7129 map->stripes[i].dev = handle_missing_device(fs_info,
7131 if (IS_ERR(map->stripes[i].dev)) {
7132 ret = PTR_ERR(map->stripes[i].dev);
7133 btrfs_free_chunk_map(map);
7139 &(map->stripes[i].dev->dev_state));
7142 ret = btrfs_add_chunk_map(fs_info, map);
7145 "failed to add chunk map, start=%llu len=%llu: %d",
7146 map->start, map->chunk_len, ret);
7457 struct btrfs_chunk_map *map;
7461 map = btrfs_find_chunk_map(fs_info, 0, U64_MAX);
7463 if (!map) {
7467 while (map) {
7474 map->type);
7475 for (i = 0; i < map->num_stripes; i++) {
7476 struct btrfs_device *dev = map->stripes[i].dev;
7489 map->start, missing, max_tolerated);
7490 btrfs_free_chunk_map(map);
7494 next_start = map->start + map->chunk_len;
7495 btrfs_free_chunk_map(map);
7497 map = btrfs_find_chunk_map(fs_info, next_start, U64_MAX - next_start);
7990 struct btrfs_chunk_map *map;
7997 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
7998 if (!map) {
8006 stripe_len = btrfs_calc_stripe_length(map);
8010 physical_offset, devid, map->start, physical_len,
8026 for (i = 0; i < map->num_stripes; i++) {
8027 if (map->stripes[i].dev->devid == devid &&
8028 map->stripes[i].physical == physical_offset) {
8030 if (map->verified_stripes >= map->num_stripes) {
8033 map->start);
8037 map->verified_stripes++;
8079 btrfs_free_chunk_map(map);
8090 struct btrfs_chunk_map *map;
8092 map = rb_entry(node, struct btrfs_chunk_map, rb_node);
8093 if (map->num_stripes != map->verified_stripes) {
8096 map->start, map->verified_stripes, map->num_stripes);
8358 /* The map range should not cross stripe boundary. */