1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd/*
22209962Smm * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23168404Spjd * Use is subject to license terms.
24168404Spjd */
25240415Smm/*
26265740Sdelphij * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
27240415Smm */
28168404Spjd
29168404Spjd#include <sys/zfs_context.h>
30168404Spjd#include <sys/spa.h>
31168404Spjd#include <sys/dmu.h>
32262093Savg#include <sys/dmu_tx.h>
33262093Savg#include <sys/dnode.h>
34262093Savg#include <sys/dsl_pool.h>
35168404Spjd#include <sys/zio.h>
36168404Spjd#include <sys/space_map.h>
37262093Savg#include <sys/refcount.h>
38262093Savg#include <sys/zfeature.h>
39168404Spjd
40273983SdelphijSYSCTL_DECL(_vfs_zfs);
41273983Sdelphij
42168404Spjd/*
43273341Sdelphij * The data for a given space map can be kept on blocks of any size.
44273341Sdelphij * Larger blocks entail fewer i/o operations, but they also cause the
45273341Sdelphij * DMU to keep more data in-core, and also to waste more i/o bandwidth
46273341Sdelphij * when only a few blocks have changed since the last transaction group.
47168404Spjd */
48273341Sdelphijint space_map_blksz = (1 << 12);
49273983SdelphijSYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_blksz, CTLFLAG_RDTUN, &space_map_blksz, 0,
50273983Sdelphij    "Maximum block size for space map.  Must be power of 2 and greater than 4096.");
51168404Spjd
52168404Spjd/*
53262093Savg * Load the space map disk into the specified range tree. Segments of maptype
54262093Savg * are added to the range tree, other segment types are removed.
55262093Savg *
56168404Spjd * Note: space_map_load() will drop sm_lock across dmu_read() calls.
57168404Spjd * The caller must be OK with this.
58168404Spjd */
59168404Spjdint
60262093Savgspace_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
61168404Spjd{
62168404Spjd	uint64_t *entry, *entry_map, *entry_map_end;
63168404Spjd	uint64_t bufsize, size, offset, end, space;
64185029Spjd	int error = 0;
65168404Spjd
66168404Spjd	ASSERT(MUTEX_HELD(sm->sm_lock));
67168404Spjd
68262093Savg	end = space_map_length(sm);
69262093Savg	space = space_map_allocated(sm);
70168404Spjd
71262093Savg	VERIFY0(range_tree_space(rt));
72168404Spjd
73168404Spjd	if (maptype == SM_FREE) {
74262093Savg		range_tree_add(rt, sm->sm_start, sm->sm_size);
75168404Spjd		space = sm->sm_size - space;
76168404Spjd	}
77168404Spjd
78262093Savg	bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
79168404Spjd	entry_map = zio_buf_alloc(bufsize);
80168404Spjd
81168404Spjd	mutex_exit(sm->sm_lock);
82262093Savg	if (end > bufsize) {
83288571Smav		dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
84288571Smav		    end - bufsize, ZIO_PRIORITY_SYNC_READ);
85262093Savg	}
86168404Spjd	mutex_enter(sm->sm_lock);
87168404Spjd
88168404Spjd	for (offset = 0; offset < end; offset += bufsize) {
89168404Spjd		size = MIN(end - offset, bufsize);
90168404Spjd		VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
91168404Spjd		VERIFY(size != 0);
92262093Savg		ASSERT3U(sm->sm_blksz, !=, 0);
93168404Spjd
94168404Spjd		dprintf("object=%llu  offset=%llx  size=%llx\n",
95262093Savg		    space_map_object(sm), offset, size);
96168404Spjd
97168404Spjd		mutex_exit(sm->sm_lock);
98262093Savg		error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
99262093Savg		    entry_map, DMU_READ_PREFETCH);
100168404Spjd		mutex_enter(sm->sm_lock);
101185029Spjd		if (error != 0)
102185029Spjd			break;
103168404Spjd
104168404Spjd		entry_map_end = entry_map + (size / sizeof (uint64_t));
105168404Spjd		for (entry = entry_map; entry < entry_map_end; entry++) {
106168404Spjd			uint64_t e = *entry;
107262093Savg			uint64_t offset, size;
108168404Spjd
109168404Spjd			if (SM_DEBUG_DECODE(e))		/* Skip debug entries */
110168404Spjd				continue;
111168404Spjd
112262093Savg			offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
113262093Savg			    sm->sm_start;
114262093Savg			size = SM_RUN_DECODE(e) << sm->sm_shift;
115262093Savg
116262093Savg			VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
117262093Savg			VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
118262093Savg			VERIFY3U(offset, >=, sm->sm_start);
119262093Savg			VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
120262093Savg			if (SM_TYPE_DECODE(e) == maptype) {
121262093Savg				VERIFY3U(range_tree_space(rt) + size, <=,
122262093Savg				    sm->sm_size);
123262093Savg				range_tree_add(rt, offset, size);
124262093Savg			} else {
125262093Savg				range_tree_remove(rt, offset, size);
126262093Savg			}
127168404Spjd		}
128168404Spjd	}
129168404Spjd
130262093Savg	if (error == 0)
131262093Savg		VERIFY3U(range_tree_space(rt), ==, space);
132262093Savg	else
133262093Savg		range_tree_vacate(rt, NULL, NULL);
134185029Spjd
135168404Spjd	zio_buf_free(entry_map, bufsize);
136262093Savg	return (error);
137262093Savg}
138168404Spjd
139262093Savgvoid
140262093Savgspace_map_histogram_clear(space_map_t *sm)
141262093Savg{
142262093Savg	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
143262093Savg		return;
144168404Spjd
145262093Savg	bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
146262093Savg}
147168404Spjd
148262093Savgboolean_t
149262093Savgspace_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
150262093Savg{
151262093Savg	/*
152262093Savg	 * Verify that the in-core range tree does not have any
153262093Savg	 * ranges smaller than our sm_shift size.
154262093Savg	 */
155262093Savg	for (int i = 0; i < sm->sm_shift; i++) {
156262093Savg		if (rt->rt_histogram[i] != 0)
157262093Savg			return (B_FALSE);
158262093Savg	}
159262093Savg	return (B_TRUE);
160168404Spjd}
161168404Spjd
162168404Spjdvoid
163262093Savgspace_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
164168404Spjd{
165262093Savg	int idx = 0;
166168404Spjd
167262093Savg	ASSERT(MUTEX_HELD(rt->rt_lock));
168262093Savg	ASSERT(dmu_tx_is_syncing(tx));
169262093Savg	VERIFY3U(space_map_object(sm), !=, 0);
170168404Spjd
171262093Savg	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
172262093Savg		return;
173168404Spjd
174262093Savg	dmu_buf_will_dirty(sm->sm_dbuf, tx);
175168404Spjd
176262093Savg	ASSERT(space_map_histogram_verify(sm, rt));
177262093Savg
178262093Savg	/*
179262093Savg	 * Transfer the content of the range tree histogram to the space
180262093Savg	 * map histogram. The space map histogram contains 32 buckets ranging
181262093Savg	 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
182262093Savg	 * however, can represent ranges from 2^0 to 2^63. Since the space
183262093Savg	 * map only cares about allocatable blocks (minimum of sm_shift) we
184262093Savg	 * can safely ignore all ranges in the range tree smaller than sm_shift.
185262093Savg	 */
186262093Savg	for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
187262093Savg
188262093Savg		/*
189262093Savg		 * Since the largest histogram bucket in the space map is
190262093Savg		 * 2^(32+sm_shift-1), we need to normalize the values in
191262093Savg		 * the range tree for any bucket larger than that size. For
192262093Savg		 * example given an sm_shift of 9, ranges larger than 2^40
193262093Savg		 * would get normalized as if they were 1TB ranges. Assume
194262093Savg		 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
195262093Savg		 * the calculation below would normalize this to 5 * 2^4 (16).
196262093Savg		 */
197262093Savg		ASSERT3U(i, >=, idx + sm->sm_shift);
198262093Savg		sm->sm_phys->smp_histogram[idx] +=
199262093Savg		    rt->rt_histogram[i] << (i - idx - sm->sm_shift);
200262093Savg
201262093Savg		/*
202262093Savg		 * Increment the space map's index as long as we haven't
203262093Savg		 * reached the maximum bucket size. Accumulate all ranges
204262093Savg		 * larger than the max bucket size into the last bucket.
205262093Savg		 */
206269773Sdelphij		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
207262093Savg			ASSERT3U(idx + sm->sm_shift, ==, i);
208262093Savg			idx++;
209269773Sdelphij			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
210262093Savg		}
211262093Savg	}
212209962Smm}
213209962Smm
214209962Smmuint64_t
215262093Savgspace_map_entries(space_map_t *sm, range_tree_t *rt)
216168404Spjd{
217262093Savg	avl_tree_t *t = &rt->rt_root;
218262093Savg	range_seg_t *rs;
219262093Savg	uint64_t size, entries;
220168404Spjd
221262093Savg	/*
222262093Savg	 * All space_maps always have a debug entry so account for it here.
223262093Savg	 */
224262093Savg	entries = 1;
225262093Savg
226262093Savg	/*
227262093Savg	 * Traverse the range tree and calculate the number of space map
228262093Savg	 * entries that would be required to write out the range tree.
229262093Savg	 */
230262093Savg	for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
231262093Savg		size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
232262093Savg		entries += howmany(size, SM_RUN_MAX);
233262093Savg	}
234262093Savg	return (entries);
235168404Spjd}
236168404Spjd
237168404Spjd/*
238262093Savg * Note: space_map_write() will drop sm_lock across dmu_write() calls.
239168404Spjd */
240168404Spjdvoid
241262093Savgspace_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
242262093Savg    dmu_tx_t *tx)
243168404Spjd{
244262093Savg	objset_t *os = sm->sm_os;
245168404Spjd	spa_t *spa = dmu_objset_spa(os);
246262093Savg	avl_tree_t *t = &rt->rt_root;
247262093Savg	range_seg_t *rs;
248262093Savg	uint64_t size, total, rt_space, nodes;
249168404Spjd	uint64_t *entry, *entry_map, *entry_map_end;
250273341Sdelphij	uint64_t expected_entries, actual_entries = 1;
251168404Spjd
252262093Savg	ASSERT(MUTEX_HELD(rt->rt_lock));
253262093Savg	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
254262093Savg	VERIFY3U(space_map_object(sm), !=, 0);
255262093Savg	dmu_buf_will_dirty(sm->sm_dbuf, tx);
256168404Spjd
257262093Savg	/*
258262093Savg	 * This field is no longer necessary since the in-core space map
259262093Savg	 * now contains the object number but is maintained for backwards
260262093Savg	 * compatibility.
261262093Savg	 */
262262093Savg	sm->sm_phys->smp_object = sm->sm_object;
263262093Savg
264262093Savg	if (range_tree_space(rt) == 0) {
265262093Savg		VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
266168404Spjd		return;
267262093Savg	}
268168404Spjd
269168404Spjd	if (maptype == SM_ALLOC)
270262093Savg		sm->sm_phys->smp_alloc += range_tree_space(rt);
271168404Spjd	else
272262093Savg		sm->sm_phys->smp_alloc -= range_tree_space(rt);
273168404Spjd
274262093Savg	expected_entries = space_map_entries(sm, rt);
275262093Savg
276262093Savg	entry_map = zio_buf_alloc(sm->sm_blksz);
277262093Savg	entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
278168404Spjd	entry = entry_map;
279168404Spjd
280168404Spjd	*entry++ = SM_DEBUG_ENCODE(1) |
281168404Spjd	    SM_DEBUG_ACTION_ENCODE(maptype) |
282168404Spjd	    SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
283168404Spjd	    SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
284168404Spjd
285247398Smm	total = 0;
286262093Savg	nodes = avl_numnodes(&rt->rt_root);
287262093Savg	rt_space = range_tree_space(rt);
288262093Savg	for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
289262093Savg		uint64_t start;
290168404Spjd
291262093Savg		size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
292262093Savg		start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
293168404Spjd
294262093Savg		total += size << sm->sm_shift;
295262093Savg
296262093Savg		while (size != 0) {
297262093Savg			uint64_t run_len;
298262093Savg
299168404Spjd			run_len = MIN(size, SM_RUN_MAX);
300168404Spjd
301168404Spjd			if (entry == entry_map_end) {
302262093Savg				mutex_exit(rt->rt_lock);
303262093Savg				dmu_write(os, space_map_object(sm),
304262093Savg				    sm->sm_phys->smp_objsize, sm->sm_blksz,
305262093Savg				    entry_map, tx);
306262093Savg				mutex_enter(rt->rt_lock);
307262093Savg				sm->sm_phys->smp_objsize += sm->sm_blksz;
308168404Spjd				entry = entry_map;
309168404Spjd			}
310168404Spjd
311168404Spjd			*entry++ = SM_OFFSET_ENCODE(start) |
312168404Spjd			    SM_TYPE_ENCODE(maptype) |
313168404Spjd			    SM_RUN_ENCODE(run_len);
314168404Spjd
315168404Spjd			start += run_len;
316168404Spjd			size -= run_len;
317262093Savg			actual_entries++;
318168404Spjd		}
319168404Spjd	}
320168404Spjd
321168404Spjd	if (entry != entry_map) {
322168404Spjd		size = (entry - entry_map) * sizeof (uint64_t);
323262093Savg		mutex_exit(rt->rt_lock);
324262093Savg		dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
325168404Spjd		    size, entry_map, tx);
326262093Savg		mutex_enter(rt->rt_lock);
327262093Savg		sm->sm_phys->smp_objsize += size;
328168404Spjd	}
329262093Savg	ASSERT3U(expected_entries, ==, actual_entries);
330168404Spjd
331243503Smm	/*
332243503Smm	 * Ensure that the space_map's accounting wasn't changed
333243503Smm	 * while we were in the middle of writing it out.
334243503Smm	 */
335262093Savg	VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
336262093Savg	VERIFY3U(range_tree_space(rt), ==, rt_space);
337262093Savg	VERIFY3U(range_tree_space(rt), ==, total);
338243503Smm
339262093Savg	zio_buf_free(entry_map, sm->sm_blksz);
340168404Spjd}
341168404Spjd
342262093Savgstatic int
343262093Savgspace_map_open_impl(space_map_t *sm)
344168404Spjd{
345262093Savg	int error;
346262093Savg	u_longlong_t blocks;
347168404Spjd
348262093Savg	error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
349262093Savg	if (error)
350262093Savg		return (error);
351262093Savg
352262093Savg	dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
353262093Savg	sm->sm_phys = sm->sm_dbuf->db_data;
354262093Savg	return (0);
355168404Spjd}
356209962Smm
357262093Savgint
358262093Savgspace_map_open(space_map_t **smp, objset_t *os, uint64_t object,
359262093Savg    uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
360209962Smm{
361262093Savg	space_map_t *sm;
362262093Savg	int error;
363209962Smm
364262093Savg	ASSERT(*smp == NULL);
365262093Savg	ASSERT(os != NULL);
366262093Savg	ASSERT(object != 0);
367209962Smm
368262093Savg	sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
369209962Smm
370262093Savg	sm->sm_start = start;
371262093Savg	sm->sm_size = size;
372262093Savg	sm->sm_shift = shift;
373262093Savg	sm->sm_lock = lp;
374262093Savg	sm->sm_os = os;
375262093Savg	sm->sm_object = object;
376262093Savg
377262093Savg	error = space_map_open_impl(sm);
378262093Savg	if (error != 0) {
379262093Savg		space_map_close(sm);
380262093Savg		return (error);
381262093Savg	}
382262093Savg
383262093Savg	*smp = sm;
384262093Savg
385209962Smm	return (0);
386209962Smm}
387209962Smm
388209962Smmvoid
389262093Savgspace_map_close(space_map_t *sm)
390209962Smm{
391262093Savg	if (sm == NULL)
392262093Savg		return;
393209962Smm
394262093Savg	if (sm->sm_dbuf != NULL)
395262093Savg		dmu_buf_rele(sm->sm_dbuf, sm);
396262093Savg	sm->sm_dbuf = NULL;
397262093Savg	sm->sm_phys = NULL;
398209962Smm
399262093Savg	kmem_free(sm, sizeof (*sm));
400209962Smm}
401209962Smm
402209962Smmvoid
403262093Savgspace_map_truncate(space_map_t *sm, dmu_tx_t *tx)
404209962Smm{
405262093Savg	objset_t *os = sm->sm_os;
406262093Savg	spa_t *spa = dmu_objset_spa(os);
407262093Savg	dmu_object_info_t doi;
408262093Savg
409262093Savg	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
410262093Savg	ASSERT(dmu_tx_is_syncing(tx));
411262093Savg
412262093Savg	dmu_object_info_from_db(sm->sm_dbuf, &doi);
413262093Savg
414273341Sdelphij	/*
415273341Sdelphij	 * If the space map has the wrong bonus size (because
416273341Sdelphij	 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
417273341Sdelphij	 * the wrong block size (because space_map_blksz has changed),
418273341Sdelphij	 * free and re-allocate its object with the updated sizes.
419273341Sdelphij	 *
420273341Sdelphij	 * Otherwise, just truncate the current object.
421273341Sdelphij	 */
422273341Sdelphij	if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
423273341Sdelphij	    doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
424273341Sdelphij	    doi.doi_data_block_size != space_map_blksz) {
425262093Savg		zfs_dbgmsg("txg %llu, spa %s, reallocating: "
426262093Savg		    "old bonus %u, old blocksz %u", dmu_tx_get_txg(tx),
427262093Savg		    spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size);
428273341Sdelphij
429273341Sdelphij		space_map_free(sm, tx);
430273341Sdelphij		dmu_buf_rele(sm->sm_dbuf, sm);
431273341Sdelphij
432273341Sdelphij		sm->sm_object = space_map_alloc(sm->sm_os, tx);
433273341Sdelphij		VERIFY0(space_map_open_impl(sm));
434273341Sdelphij	} else {
435273341Sdelphij		VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
436273341Sdelphij
437273341Sdelphij		/*
438273341Sdelphij		 * If the spacemap is reallocated, its histogram
439273341Sdelphij		 * will be reset.  Do the same in the common case so that
440273341Sdelphij		 * bugs related to the uncommon case do not go unnoticed.
441273341Sdelphij		 */
442273341Sdelphij		bzero(sm->sm_phys->smp_histogram,
443273341Sdelphij		    sizeof (sm->sm_phys->smp_histogram));
444262093Savg	}
445262093Savg
446262093Savg	dmu_buf_will_dirty(sm->sm_dbuf, tx);
447262093Savg	sm->sm_phys->smp_objsize = 0;
448262093Savg	sm->sm_phys->smp_alloc = 0;
449209962Smm}
450209962Smm
451209962Smm/*
452262093Savg * Update the in-core space_map allocation and length values.
453209962Smm */
454209962Smmvoid
455262093Savgspace_map_update(space_map_t *sm)
456209962Smm{
457262093Savg	if (sm == NULL)
458262093Savg		return;
459209962Smm
460209962Smm	ASSERT(MUTEX_HELD(sm->sm_lock));
461209962Smm
462262093Savg	sm->sm_alloc = sm->sm_phys->smp_alloc;
463262093Savg	sm->sm_length = sm->sm_phys->smp_objsize;
464209962Smm}
465209962Smm
466262093Savguint64_t
467262093Savgspace_map_alloc(objset_t *os, dmu_tx_t *tx)
468262093Savg{
469262093Savg	spa_t *spa = dmu_objset_spa(os);
470262093Savg	uint64_t object;
471262093Savg	int bonuslen;
472262093Savg
473263390Sdelphij	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
474263390Sdelphij		spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
475262093Savg		bonuslen = sizeof (space_map_phys_t);
476262093Savg		ASSERT3U(bonuslen, <=, dmu_bonus_max());
477262093Savg	} else {
478262093Savg		bonuslen = SPACE_MAP_SIZE_V0;
479262093Savg	}
480262093Savg
481262093Savg	object = dmu_object_alloc(os,
482273341Sdelphij	    DMU_OT_SPACE_MAP, space_map_blksz,
483262093Savg	    DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
484262093Savg
485262093Savg	return (object);
486262093Savg}
487262093Savg
488209962Smmvoid
489262093Savgspace_map_free(space_map_t *sm, dmu_tx_t *tx)
490209962Smm{
491262093Savg	spa_t *spa;
492209962Smm
493262093Savg	if (sm == NULL)
494262093Savg		return;
495209962Smm
496262093Savg	spa = dmu_objset_spa(sm->sm_os);
497263390Sdelphij	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
498262093Savg		dmu_object_info_t doi;
499209962Smm
500262093Savg		dmu_object_info_from_db(sm->sm_dbuf, &doi);
501262093Savg		if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
502263390Sdelphij			VERIFY(spa_feature_is_active(spa,
503263390Sdelphij			    SPA_FEATURE_SPACEMAP_HISTOGRAM));
504263390Sdelphij			spa_feature_decr(spa,
505263390Sdelphij			    SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
506209962Smm		}
507209962Smm	}
508262093Savg
509262093Savg	VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
510262093Savg	sm->sm_object = 0;
511209962Smm}
512262093Savg
513262093Savguint64_t
514262093Savgspace_map_object(space_map_t *sm)
515262093Savg{
516262093Savg	return (sm != NULL ? sm->sm_object : 0);
517262093Savg}
518262093Savg
519262093Savg/*
520262093Savg * Returns the already synced, on-disk allocated space.
521262093Savg */
522262093Savguint64_t
523262093Savgspace_map_allocated(space_map_t *sm)
524262093Savg{
525262093Savg	return (sm != NULL ? sm->sm_alloc : 0);
526262093Savg}
527262093Savg
528262093Savg/*
529262093Savg * Returns the already synced, on-disk length;
530262093Savg */
531262093Savguint64_t
532262093Savgspace_map_length(space_map_t *sm)
533262093Savg{
534262093Savg	return (sm != NULL ? sm->sm_length : 0);
535262093Savg}
536262093Savg
537262093Savg/*
538262093Savg * Returns the allocated space that is currently syncing.
539262093Savg */
540262093Savgint64_t
541262093Savgspace_map_alloc_delta(space_map_t *sm)
542262093Savg{
543262093Savg	if (sm == NULL)
544262093Savg		return (0);
545262093Savg	ASSERT(sm->sm_dbuf != NULL);
546262093Savg	return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
547262093Savg}
548