Deleted Added
full compact
metaslab_impl.h (332547) metaslab_impl.h (339105)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
27 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
28 */
29
30#ifndef _SYS_METASLAB_IMPL_H
31#define _SYS_METASLAB_IMPL_H
32
33#include <sys/metaslab.h>
34#include <sys/space_map.h>
35#include <sys/range_tree.h>
36#include <sys/vdev.h>
37#include <sys/txg.h>
38#include <sys/avl.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/*
45 * Metaslab allocation tracing record.
46 */
47typedef struct metaslab_alloc_trace {
48 list_node_t mat_list_node;
49 metaslab_group_t *mat_mg;
50 metaslab_t *mat_msp;
51 uint64_t mat_size;
52 uint64_t mat_weight;
53 uint32_t mat_dva_id;
54 uint64_t mat_offset;
28 */
29
30#ifndef _SYS_METASLAB_IMPL_H
31#define _SYS_METASLAB_IMPL_H
32
33#include <sys/metaslab.h>
34#include <sys/space_map.h>
35#include <sys/range_tree.h>
36#include <sys/vdev.h>
37#include <sys/txg.h>
38#include <sys/avl.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/*
45 * Metaslab allocation tracing record.
46 */
47typedef struct metaslab_alloc_trace {
48 list_node_t mat_list_node;
49 metaslab_group_t *mat_mg;
50 metaslab_t *mat_msp;
51 uint64_t mat_size;
52 uint64_t mat_weight;
53 uint32_t mat_dva_id;
54 uint64_t mat_offset;
55 int mat_allocator;
55} metaslab_alloc_trace_t;
56
57/*
58 * Used by the metaslab allocation tracing facility to indicate
59 * error conditions. These errors are stored to the offset member
60 * of the metaslab_alloc_trace_t record and displayed by mdb.
61 */
62typedef enum trace_alloc_type {
63 TRACE_ALLOC_FAILURE = -1ULL,
64 TRACE_TOO_SMALL = -2ULL,
65 TRACE_FORCE_GANG = -3ULL,
66 TRACE_NOT_ALLOCATABLE = -4ULL,
67 TRACE_GROUP_FAILURE = -5ULL,
68 TRACE_ENOSPC = -6ULL,
69 TRACE_CONDENSING = -7ULL,
70 TRACE_VDEV_ERROR = -8ULL
71} trace_alloc_type_t;
72
73#define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
74#define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
56} metaslab_alloc_trace_t;
57
58/*
59 * Used by the metaslab allocation tracing facility to indicate
60 * error conditions. These errors are stored to the offset member
61 * of the metaslab_alloc_trace_t record and displayed by mdb.
62 */
63typedef enum trace_alloc_type {
64 TRACE_ALLOC_FAILURE = -1ULL,
65 TRACE_TOO_SMALL = -2ULL,
66 TRACE_FORCE_GANG = -3ULL,
67 TRACE_NOT_ALLOCATABLE = -4ULL,
68 TRACE_GROUP_FAILURE = -5ULL,
69 TRACE_ENOSPC = -6ULL,
70 TRACE_CONDENSING = -7ULL,
71 TRACE_VDEV_ERROR = -8ULL
72} trace_alloc_type_t;
73
74#define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
75#define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
75#define METASLAB_WEIGHT_TYPE (1ULL << 61)
76#define METASLAB_WEIGHT_CLAIM (1ULL << 61)
77#define METASLAB_WEIGHT_TYPE (1ULL << 60)
76#define METASLAB_ACTIVE_MASK \
78#define METASLAB_ACTIVE_MASK \
77 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
79 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY | \
80 METASLAB_WEIGHT_CLAIM)
78
79/*
80 * The metaslab weight is used to encode the amount of free space in a
81 * metaslab, such that the "best" metaslab appears first when sorting the
82 * metaslabs by weight. The weight (and therefore the "best" metaslab) can
83 * be determined in two different ways: by computing a weighted sum of all
84 * the free space in the metaslab (a space based weight) or by counting only
85 * the free segments of the largest size (a segment based weight). We prefer
86 * the segment based weight because it reflects how the free space is
87 * comprised, but we cannot always use it -- legacy pools do not have the
88 * space map histogram information necessary to determine the largest
89 * contiguous regions. Pools that have the space map histogram determine
90 * the segment weight by looking at each bucket in the histogram and
91 * determining the free space whose size in bytes is in the range:
92 * [2^i, 2^(i+1))
93 * We then encode the largest index, i, that contains regions into the
94 * segment-weighted value.
95 *
96 * Space-based weight:
97 *
98 * 64 56 48 40 32 24 16 8 0
99 * +-------+-------+-------+-------+-------+-------+-------+-------+
81
82/*
83 * The metaslab weight is used to encode the amount of free space in a
84 * metaslab, such that the "best" metaslab appears first when sorting the
85 * metaslabs by weight. The weight (and therefore the "best" metaslab) can
86 * be determined in two different ways: by computing a weighted sum of all
87 * the free space in the metaslab (a space based weight) or by counting only
88 * the free segments of the largest size (a segment based weight). We prefer
89 * the segment based weight because it reflects how the free space is
90 * comprised, but we cannot always use it -- legacy pools do not have the
91 * space map histogram information necessary to determine the largest
92 * contiguous regions. Pools that have the space map histogram determine
93 * the segment weight by looking at each bucket in the histogram and
94 * determining the free space whose size in bytes is in the range:
95 * [2^i, 2^(i+1))
96 * We then encode the largest index, i, that contains regions into the
97 * segment-weighted value.
98 *
99 * Space-based weight:
100 *
101 * 64 56 48 40 32 24 16 8 0
102 * +-------+-------+-------+-------+-------+-------+-------+-------+
100 * |PS1| weighted-free space |
103 * |PSC1| weighted-free space |
101 * +-------+-------+-------+-------+-------+-------+-------+-------+
102 *
103 * PS - indicates primary and secondary activation
104 * +-------+-------+-------+-------+-------+-------+-------+-------+
105 *
106 * PS - indicates primary and secondary activation
107 * C - indicates activation for claimed block zio
104 * space - the fragmentation-weighted space
105 *
106 * Segment-based weight:
107 *
108 * 64 56 48 40 32 24 16 8 0
109 * +-------+-------+-------+-------+-------+-------+-------+-------+
108 * space - the fragmentation-weighted space
109 *
110 * Segment-based weight:
111 *
112 * 64 56 48 40 32 24 16 8 0
113 * +-------+-------+-------+-------+-------+-------+-------+-------+
110 * |PS0| idx| count of segments in region |
114 * |PSC0| idx| count of segments in region |
111 * +-------+-------+-------+-------+-------+-------+-------+-------+
112 *
113 * PS - indicates primary and secondary activation
115 * +-------+-------+-------+-------+-------+-------+-------+-------+
116 *
117 * PS - indicates primary and secondary activation
118 * C - indicates activation for claimed block zio
114 * idx - index for the highest bucket in the histogram
115 * count - number of segments in the specified bucket
116 */
119 * idx - index for the highest bucket in the histogram
120 * count - number of segments in the specified bucket
121 */
117#define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 62, 2)
118#define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 62, 2, x)
122#define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 61, 3)
123#define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 61, 3, x)
119
120#define WEIGHT_IS_SPACEBASED(weight) \
124
125#define WEIGHT_IS_SPACEBASED(weight) \
121 ((weight) == 0 || BF64_GET((weight), 61, 1))
122#define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 61, 1, 1)
126 ((weight) == 0 || BF64_GET((weight), 60, 1))
127#define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 60, 1, 1)
123
124/*
125 * These macros are only applicable to segment-based weighting.
126 */
128
129/*
130 * These macros are only applicable to segment-based weighting.
131 */
127#define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 55, 6)
128#define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 55, 6, x)
129#define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 55)
130#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 55, x)
132#define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 54, 6)
133#define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 54, 6, x)
134#define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 54)
135#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 54, x)
131
132/*
133 * A metaslab class encompasses a category of allocatable top-level vdevs.
134 * Each top-level vdev is associated with a metaslab group which defines
135 * the allocatable region for that vdev. Examples of these categories include
136 * "normal" for data block allocations (i.e. main pool allocations) or "log"
137 * for allocations designated for intent log devices (i.e. slog devices).
138 * When a block allocation is requested from the SPA it is associated with a
139 * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
140 * to the class can be used to satisfy that request. Allocations are done
141 * by traversing the metaslab groups that are linked off of the mc_rotor field.
142 * This rotor points to the next metaslab group where allocations will be
143 * attempted. Allocating a block is a 3 step process -- select the metaslab
144 * group, select the metaslab, and then allocate the block. The metaslab
145 * class defines the low-level block allocator that will be used as the
146 * final step in allocation. These allocators are pluggable allowing each class
147 * to use a block allocator that best suits that class.
148 */
149struct metaslab_class {
150 kmutex_t mc_lock;
151 spa_t *mc_spa;
152 metaslab_group_t *mc_rotor;
153 metaslab_ops_t *mc_ops;
154 uint64_t mc_aliquot;
155
156 /*
157 * Track the number of metaslab groups that have been initialized
158 * and can accept allocations. An initialized metaslab group is
159 * one has been completely added to the config (i.e. we have
160 * updated the MOS config and the space has been added to the pool).
161 */
162 uint64_t mc_groups;
163
164 /*
165 * Toggle to enable/disable the allocation throttle.
166 */
167 boolean_t mc_alloc_throttle_enabled;
168
169 /*
170 * The allocation throttle works on a reservation system. Whenever
171 * an asynchronous zio wants to perform an allocation it must
172 * first reserve the number of blocks that it wants to allocate.
173 * If there aren't sufficient slots available for the pending zio
174 * then that I/O is throttled until more slots free up. The current
175 * number of reserved allocations is maintained by the mc_alloc_slots
176 * refcount. The mc_alloc_max_slots value determines the maximum
177 * number of allocations that the system allows. Gang blocks are
178 * allowed to reserve slots even if we've reached the maximum
179 * number of allocations allowed.
180 */
136
137/*
138 * A metaslab class encompasses a category of allocatable top-level vdevs.
139 * Each top-level vdev is associated with a metaslab group which defines
140 * the allocatable region for that vdev. Examples of these categories include
141 * "normal" for data block allocations (i.e. main pool allocations) or "log"
142 * for allocations designated for intent log devices (i.e. slog devices).
143 * When a block allocation is requested from the SPA it is associated with a
144 * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
145 * to the class can be used to satisfy that request. Allocations are done
146 * by traversing the metaslab groups that are linked off of the mc_rotor field.
147 * This rotor points to the next metaslab group where allocations will be
148 * attempted. Allocating a block is a 3 step process -- select the metaslab
149 * group, select the metaslab, and then allocate the block. The metaslab
150 * class defines the low-level block allocator that will be used as the
151 * final step in allocation. These allocators are pluggable allowing each class
152 * to use a block allocator that best suits that class.
153 */
154struct metaslab_class {
155 kmutex_t mc_lock;
156 spa_t *mc_spa;
157 metaslab_group_t *mc_rotor;
158 metaslab_ops_t *mc_ops;
159 uint64_t mc_aliquot;
160
161 /*
162 * Track the number of metaslab groups that have been initialized
163 * and can accept allocations. An initialized metaslab group is
164 * one has been completely added to the config (i.e. we have
165 * updated the MOS config and the space has been added to the pool).
166 */
167 uint64_t mc_groups;
168
169 /*
170 * Toggle to enable/disable the allocation throttle.
171 */
172 boolean_t mc_alloc_throttle_enabled;
173
174 /*
175 * The allocation throttle works on a reservation system. Whenever
176 * an asynchronous zio wants to perform an allocation it must
177 * first reserve the number of blocks that it wants to allocate.
178 * If there aren't sufficient slots available for the pending zio
179 * then that I/O is throttled until more slots free up. The current
180 * number of reserved allocations is maintained by the mc_alloc_slots
181 * refcount. The mc_alloc_max_slots value determines the maximum
182 * number of allocations that the system allows. Gang blocks are
183 * allowed to reserve slots even if we've reached the maximum
184 * number of allocations allowed.
185 */
181 uint64_t mc_alloc_max_slots;
182 refcount_t mc_alloc_slots;
186 uint64_t *mc_alloc_max_slots;
187 refcount_t *mc_alloc_slots;
183
184 uint64_t mc_alloc_groups; /* # of allocatable groups */
185
186 uint64_t mc_alloc; /* total allocated space */
187 uint64_t mc_deferred; /* total deferred frees */
188 uint64_t mc_space; /* total space (alloc + free) */
189 uint64_t mc_dspace; /* total deflated space */
190 uint64_t mc_minblocksize;
191 uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
192};
193
194/*
195 * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
196 * of a top-level vdev. They are linked togther to form a circular linked
197 * list and can belong to only one metaslab class. Metaslab groups may become
198 * ineligible for allocations for a number of reasons such as limited free
199 * space, fragmentation, or going offline. When this happens the allocator will
200 * simply find the next metaslab group in the linked list and attempt
201 * to allocate from that group instead.
202 */
203struct metaslab_group {
204 kmutex_t mg_lock;
188
189 uint64_t mc_alloc_groups; /* # of allocatable groups */
190
191 uint64_t mc_alloc; /* total allocated space */
192 uint64_t mc_deferred; /* total deferred frees */
193 uint64_t mc_space; /* total space (alloc + free) */
194 uint64_t mc_dspace; /* total deflated space */
195 uint64_t mc_minblocksize;
196 uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
197};
198
199/*
200 * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
201 * of a top-level vdev. They are linked togther to form a circular linked
202 * list and can belong to only one metaslab class. Metaslab groups may become
203 * ineligible for allocations for a number of reasons such as limited free
204 * space, fragmentation, or going offline. When this happens the allocator will
205 * simply find the next metaslab group in the linked list and attempt
206 * to allocate from that group instead.
207 */
208struct metaslab_group {
209 kmutex_t mg_lock;
210 metaslab_t **mg_primaries;
211 metaslab_t **mg_secondaries;
205 avl_tree_t mg_metaslab_tree;
206 uint64_t mg_aliquot;
207 boolean_t mg_allocatable; /* can we allocate? */
212 avl_tree_t mg_metaslab_tree;
213 uint64_t mg_aliquot;
214 boolean_t mg_allocatable; /* can we allocate? */
215 uint64_t mg_ms_ready;
208
209 /*
210 * A metaslab group is considered to be initialized only after
211 * we have updated the MOS config and added the space to the pool.
212 * We only allow allocation attempts to a metaslab group if it
213 * has been initialized.
214 */
215 boolean_t mg_initialized;
216
217 uint64_t mg_free_capacity; /* percentage free */
218 int64_t mg_bias;
219 int64_t mg_activation_count;
220 metaslab_class_t *mg_class;
221 vdev_t *mg_vd;
222 taskq_t *mg_taskq;
223 metaslab_group_t *mg_prev;
224 metaslab_group_t *mg_next;
225
226 /*
216
217 /*
218 * A metaslab group is considered to be initialized only after
219 * we have updated the MOS config and added the space to the pool.
220 * We only allow allocation attempts to a metaslab group if it
221 * has been initialized.
222 */
223 boolean_t mg_initialized;
224
225 uint64_t mg_free_capacity; /* percentage free */
226 int64_t mg_bias;
227 int64_t mg_activation_count;
228 metaslab_class_t *mg_class;
229 vdev_t *mg_vd;
230 taskq_t *mg_taskq;
231 metaslab_group_t *mg_prev;
232 metaslab_group_t *mg_next;
233
234 /*
227 * Each metaslab group can handle mg_max_alloc_queue_depth allocations
228 * which are tracked by mg_alloc_queue_depth. It's possible for a
229 * metaslab group to handle more allocations than its max. This
230 * can occur when gang blocks are required or when other groups
231 * are unable to handle their share of allocations.
235 * In order for the allocation throttle to function properly, we cannot
236 * have too many IOs going to each disk by default; the throttle
237 * operates by allocating more work to disks that finish quickly, so
238 * allocating larger chunks to each disk reduces its effectiveness.
239 * However, if the number of IOs going to each allocator is too small,
240 * we will not perform proper aggregation at the vdev_queue layer,
241 * also resulting in decreased performance. Therefore, we will use a
242 * ramp-up strategy.
243 *
244 * Each allocator in each metaslab group has a current queue depth
245 * (mg_alloc_queue_depth[allocator]) and a current max queue depth
246 * (mg_cur_max_alloc_queue_depth[allocator]), and each metaslab group
247 * has an absolute max queue depth (mg_max_alloc_queue_depth). We
248 * add IOs to an allocator until the mg_alloc_queue_depth for that
249 * allocator hits the cur_max. Every time an IO completes for a given
250 * allocator on a given metaslab group, we increment its cur_max until
251 * it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to
252 * help protect against disks that decrease in performance over time.
253 *
254 * It's possible for an allocator to handle more allocations than
255 * its max. This can occur when gang blocks are required or when other
256 * groups are unable to handle their share of allocations.
232 */
233 uint64_t mg_max_alloc_queue_depth;
257 */
258 uint64_t mg_max_alloc_queue_depth;
234 refcount_t mg_alloc_queue_depth;
235
259 uint64_t *mg_cur_max_alloc_queue_depth;
260 refcount_t *mg_alloc_queue_depth;
261 int mg_allocators;
236 /*
237 * A metalab group that can no longer allocate the minimum block
238 * size will set mg_no_free_space. Once a metaslab group is out
239 * of space then its share of work must be distributed to other
240 * groups.
241 */
242 boolean_t mg_no_free_space;
243
244 uint64_t mg_allocations;
245 uint64_t mg_failed_allocations;
246 uint64_t mg_fragmentation;
247 uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
248};
249
250/*
251 * This value defines the number of elements in the ms_lbas array. The value
252 * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
253 * This is the equivalent of highbit(UINT64_MAX).
254 */
255#define MAX_LBAS 64
256
257/*
258 * Each metaslab maintains a set of in-core trees to track metaslab
259 * operations. The in-core free tree (ms_allocatable) contains the list of
260 * free segments which are eligible for allocation. As blocks are
261 * allocated, the allocated segment are removed from the ms_allocatable and
262 * added to a per txg allocation tree (ms_allocating). As blocks are
263 * freed, they are added to the free tree (ms_freeing). These trees
264 * allow us to process all allocations and frees in syncing context
265 * where it is safe to update the on-disk space maps. An additional set
266 * of in-core trees is maintained to track deferred frees
267 * (ms_defer). Once a block is freed it will move from the
268 * ms_freed to the ms_defer tree. A deferred free means that a block
269 * has been freed but cannot be used by the pool until TXG_DEFER_SIZE
270 * transactions groups later. For example, a block that is freed in txg
271 * 50 will not be available for reallocation until txg 52 (50 +
272 * TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
273 * A pool could be safely rolled back TXG_DEFERS_SIZE transactions
274 * groups and ensure that no block has been reallocated.
275 *
276 * The simplified transition diagram looks like this:
277 *
278 *
279 * ALLOCATE
280 * |
281 * V
282 * free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
283 * ^
284 * | ms_freeing <--- FREE
285 * | |
286 * | v
287 * | ms_freed
288 * | |
289 * +-------- ms_defer[2] <-------+-------> (write to space map)
290 *
291 *
292 * Each metaslab's space is tracked in a single space map in the MOS,
293 * which is only updated in syncing context. Each time we sync a txg,
294 * we append the allocs and frees from that txg to the space map. The
295 * pool space is only updated once all metaslabs have finished syncing.
296 *
297 * To load the in-core free tree we read the space map from disk. This
298 * object contains a series of alloc and free records that are combined
299 * to make up the list of all free segments in this metaslab. These
300 * segments are represented in-core by the ms_allocatable and are stored
301 * in an AVL tree.
302 *
303 * As the space map grows (as a result of the appends) it will
304 * eventually become space-inefficient. When the metaslab's in-core
305 * free tree is zfs_condense_pct/100 times the size of the minimal
306 * on-disk representation, we rewrite it in its minimized form. If a
307 * metaslab needs to condense then we must set the ms_condensing flag to
308 * ensure that allocations are not performed on the metaslab that is
309 * being written.
310 */
311struct metaslab {
312 kmutex_t ms_lock;
313 kmutex_t ms_sync_lock;
314 kcondvar_t ms_load_cv;
315 space_map_t *ms_sm;
316 uint64_t ms_id;
317 uint64_t ms_start;
318 uint64_t ms_size;
319 uint64_t ms_fragmentation;
320
321 range_tree_t *ms_allocating[TXG_SIZE];
322 range_tree_t *ms_allocatable;
323
324 /*
325 * The following range trees are accessed only from syncing context.
326 * ms_free*tree only have entries while syncing, and are empty
327 * between syncs.
328 */
329 range_tree_t *ms_freeing; /* to free this syncing txg */
330 range_tree_t *ms_freed; /* already freed this syncing txg */
331 range_tree_t *ms_defer[TXG_DEFER_SIZE];
332 range_tree_t *ms_checkpointing; /* to add to the checkpoint */
333
334 boolean_t ms_condensing; /* condensing? */
335 boolean_t ms_condense_wanted;
336 uint64_t ms_condense_checked_txg;
337
338 /*
339 * We must hold both ms_lock and ms_group->mg_lock in order to
340 * modify ms_loaded.
341 */
342 boolean_t ms_loaded;
343 boolean_t ms_loading;
344
345 int64_t ms_deferspace; /* sum of ms_defermap[] space */
346 uint64_t ms_weight; /* weight vs. others in group */
347 uint64_t ms_activation_weight; /* activation weight */
348
349 /*
350 * Track of whenever a metaslab is selected for loading or allocation.
351 * We use this value to determine how long the metaslab should
352 * stay cached.
353 */
354 uint64_t ms_selected_txg;
355
356 uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
357 uint64_t ms_max_size; /* maximum allocatable size */
358
359 /*
262 /*
263 * A metalab group that can no longer allocate the minimum block
264 * size will set mg_no_free_space. Once a metaslab group is out
265 * of space then its share of work must be distributed to other
266 * groups.
267 */
268 boolean_t mg_no_free_space;
269
270 uint64_t mg_allocations;
271 uint64_t mg_failed_allocations;
272 uint64_t mg_fragmentation;
273 uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
274};
275
276/*
277 * This value defines the number of elements in the ms_lbas array. The value
278 * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
279 * This is the equivalent of highbit(UINT64_MAX).
280 */
281#define MAX_LBAS 64
282
283/*
284 * Each metaslab maintains a set of in-core trees to track metaslab
285 * operations. The in-core free tree (ms_allocatable) contains the list of
286 * free segments which are eligible for allocation. As blocks are
287 * allocated, the allocated segment are removed from the ms_allocatable and
288 * added to a per txg allocation tree (ms_allocating). As blocks are
289 * freed, they are added to the free tree (ms_freeing). These trees
290 * allow us to process all allocations and frees in syncing context
291 * where it is safe to update the on-disk space maps. An additional set
292 * of in-core trees is maintained to track deferred frees
293 * (ms_defer). Once a block is freed it will move from the
294 * ms_freed to the ms_defer tree. A deferred free means that a block
295 * has been freed but cannot be used by the pool until TXG_DEFER_SIZE
296 * transactions groups later. For example, a block that is freed in txg
297 * 50 will not be available for reallocation until txg 52 (50 +
298 * TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
299 * A pool could be safely rolled back TXG_DEFERS_SIZE transactions
300 * groups and ensure that no block has been reallocated.
301 *
302 * The simplified transition diagram looks like this:
303 *
304 *
305 * ALLOCATE
306 * |
307 * V
308 * free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
309 * ^
310 * | ms_freeing <--- FREE
311 * | |
312 * | v
313 * | ms_freed
314 * | |
315 * +-------- ms_defer[2] <-------+-------> (write to space map)
316 *
317 *
318 * Each metaslab's space is tracked in a single space map in the MOS,
319 * which is only updated in syncing context. Each time we sync a txg,
320 * we append the allocs and frees from that txg to the space map. The
321 * pool space is only updated once all metaslabs have finished syncing.
322 *
323 * To load the in-core free tree we read the space map from disk. This
324 * object contains a series of alloc and free records that are combined
325 * to make up the list of all free segments in this metaslab. These
326 * segments are represented in-core by the ms_allocatable and are stored
327 * in an AVL tree.
328 *
329 * As the space map grows (as a result of the appends) it will
330 * eventually become space-inefficient. When the metaslab's in-core
331 * free tree is zfs_condense_pct/100 times the size of the minimal
332 * on-disk representation, we rewrite it in its minimized form. If a
333 * metaslab needs to condense then we must set the ms_condensing flag to
334 * ensure that allocations are not performed on the metaslab that is
335 * being written.
336 */
337struct metaslab {
338 kmutex_t ms_lock;
339 kmutex_t ms_sync_lock;
340 kcondvar_t ms_load_cv;
341 space_map_t *ms_sm;
342 uint64_t ms_id;
343 uint64_t ms_start;
344 uint64_t ms_size;
345 uint64_t ms_fragmentation;
346
347 range_tree_t *ms_allocating[TXG_SIZE];
348 range_tree_t *ms_allocatable;
349
350 /*
351 * The following range trees are accessed only from syncing context.
352 * ms_free*tree only have entries while syncing, and are empty
353 * between syncs.
354 */
355 range_tree_t *ms_freeing; /* to free this syncing txg */
356 range_tree_t *ms_freed; /* already freed this syncing txg */
357 range_tree_t *ms_defer[TXG_DEFER_SIZE];
358 range_tree_t *ms_checkpointing; /* to add to the checkpoint */
359
360 boolean_t ms_condensing; /* condensing? */
361 boolean_t ms_condense_wanted;
362 uint64_t ms_condense_checked_txg;
363
364 /*
365 * We must hold both ms_lock and ms_group->mg_lock in order to
366 * modify ms_loaded.
367 */
368 boolean_t ms_loaded;
369 boolean_t ms_loading;
370
371 int64_t ms_deferspace; /* sum of ms_defermap[] space */
372 uint64_t ms_weight; /* weight vs. others in group */
373 uint64_t ms_activation_weight; /* activation weight */
374
375 /*
376 * Track of whenever a metaslab is selected for loading or allocation.
377 * We use this value to determine how long the metaslab should
378 * stay cached.
379 */
380 uint64_t ms_selected_txg;
381
382 uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
383 uint64_t ms_max_size; /* maximum allocatable size */
384
385 /*
386 * -1 if it's not active in an allocator, otherwise set to the allocator
387 * this metaslab is active for.
388 */
389 int ms_allocator;
390 boolean_t ms_primary; /* Only valid if ms_allocator is not -1 */
391
392 /*
360 * The metaslab block allocators can optionally use a size-ordered
361 * range tree and/or an array of LBAs. Not all allocators use
362 * this functionality. The ms_allocatable_by_size should always
363 * contain the same number of segments as the ms_allocatable. The
364 * only difference is that the ms_allocatable_by_size is ordered by
365 * segment sizes.
366 */
367 avl_tree_t ms_allocatable_by_size;
368 uint64_t ms_lbas[MAX_LBAS];
369
370 metaslab_group_t *ms_group; /* metaslab group */
371 avl_node_t ms_group_node; /* node in metaslab group tree */
372 txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
393 * The metaslab block allocators can optionally use a size-ordered
394 * range tree and/or an array of LBAs. Not all allocators use
395 * this functionality. The ms_allocatable_by_size should always
396 * contain the same number of segments as the ms_allocatable. The
397 * only difference is that the ms_allocatable_by_size is ordered by
398 * segment sizes.
399 */
400 avl_tree_t ms_allocatable_by_size;
401 uint64_t ms_lbas[MAX_LBAS];
402
403 metaslab_group_t *ms_group; /* metaslab group */
404 avl_node_t ms_group_node; /* node in metaslab group tree */
405 txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
406
407 boolean_t ms_new;
373};
374
375#ifdef __cplusplus
376}
377#endif
378
379#endif /* _SYS_METASLAB_IMPL_H */
408};
409
410#ifdef __cplusplus
411}
412#endif
413
414#endif /* _SYS_METASLAB_IMPL_H */