1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25/* 26 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 27 */ 28 29#include <sys/zfs_context.h> 30#include <sys/spa.h> 31#include <sys/dmu.h> 32#include <sys/dmu_tx.h> 33#include <sys/dnode.h> 34#include <sys/dsl_pool.h> 35#include <sys/zio.h> 36#include <sys/space_map.h> 37#include <sys/refcount.h> 38#include <sys/zfeature.h> 39 40SYSCTL_DECL(_vfs_zfs); 41 42/* 43 * Note on space map block size: 44 * 45 * The data for a given space map can be kept on blocks of any size. 46 * Larger blocks entail fewer I/O operations, but they also cause the 47 * DMU to keep more data in-core, and also to waste more I/O bandwidth 48 * when only a few blocks have changed since the last transaction group. 49 */ 50 51/* 52 * Enabled whenever we want to stress test the use of double-word 53 * space map entries. 54 */ 55boolean_t zfs_force_some_double_word_sm_entries = B_FALSE; 56 57/* 58 * Override the default indirect block size of 128K, instead using 16K for 59 * spacemaps (2^14 bytes). This dramatically reduces write inflation since 60 * appending to a spacemap typically has to write one data block (4KB) and one 61 * or two indirect blocks (16K-32K, rather than 128K). 62 */ 63int space_map_ibs = 14; 64 65SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN, 66 &space_map_ibs, 0, "Space map indirect block shift"); 67 68boolean_t 69sm_entry_is_debug(uint64_t e) 70{ 71 return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX); 72} 73 74boolean_t 75sm_entry_is_single_word(uint64_t e) 76{ 77 uint8_t prefix = SM_PREFIX_DECODE(e); 78 return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX); 79} 80 81boolean_t 82sm_entry_is_double_word(uint64_t e) 83{ 84 return (SM_PREFIX_DECODE(e) == SM2_PREFIX); 85} 86 87/* 88 * Iterate through the space map, invoking the callback on each (non-debug) 89 * space map entry. 90 */ 91int 92space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg) 93{ 94 uint64_t sm_len = space_map_length(sm); 95 ASSERT3U(sm->sm_blksz, !=, 0); 96 97 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, sm_len, 98 ZIO_PRIORITY_SYNC_READ); 99 100 uint64_t blksz = sm->sm_blksz; 101 int error = 0; 102 for (uint64_t block_base = 0; block_base < sm_len && error == 0; 103 block_base += blksz) { 104 dmu_buf_t *db; 105 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), 106 block_base, FTAG, &db, DMU_READ_PREFETCH); 107 if (error != 0) 108 return (error); 109 110 uint64_t *block_start = db->db_data; 111 uint64_t block_length = MIN(sm_len - block_base, blksz); 112 uint64_t *block_end = block_start + 113 (block_length / sizeof (uint64_t)); 114 115 VERIFY0(P2PHASE(block_length, sizeof (uint64_t))); 116 VERIFY3U(block_length, !=, 0); 117 ASSERT3U(blksz, ==, db->db_size); 118 119 for (uint64_t *block_cursor = block_start; 120 block_cursor < block_end && error == 0; block_cursor++) { 121 uint64_t e = *block_cursor; 122 123 if (sm_entry_is_debug(e)) /* Skip debug entries */ 124 continue; 125 126 uint64_t raw_offset, raw_run, vdev_id; 127 maptype_t type; 128 if (sm_entry_is_single_word(e)) { 129 type = SM_TYPE_DECODE(e); 130 vdev_id = SM_NO_VDEVID; 131 raw_offset = SM_OFFSET_DECODE(e); 132 raw_run = SM_RUN_DECODE(e); 133 } else { 134 /* it is a two-word entry */ 135 ASSERT(sm_entry_is_double_word(e)); 136 raw_run = SM2_RUN_DECODE(e); 137 vdev_id = SM2_VDEV_DECODE(e); 138 139 /* move on to the second word */ 140 block_cursor++; 141 e = *block_cursor; 142 VERIFY3P(block_cursor, <=, block_end); 143 144 type = SM2_TYPE_DECODE(e); 145 raw_offset = SM2_OFFSET_DECODE(e); 146 } 147 148 uint64_t entry_offset = (raw_offset << sm->sm_shift) + 149 sm->sm_start; 150 uint64_t entry_run = raw_run << sm->sm_shift; 151 152 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift)); 153 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift)); 154 ASSERT3U(entry_offset, >=, sm->sm_start); 155 ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size); 156 ASSERT3U(entry_run, <=, sm->sm_size); 157 ASSERT3U(entry_offset + entry_run, <=, 158 sm->sm_start + sm->sm_size); 159 160 space_map_entry_t sme = { 161 .sme_type = type, 162 .sme_vdev = vdev_id, 163 .sme_offset = entry_offset, 164 .sme_run = entry_run 165 }; 166 error = callback(&sme, arg); 167 } 168 dmu_buf_rele(db, FTAG); 169 } 170 return (error); 171} 172 173/* 174 * Reads the entries from the last block of the space map into 175 * buf in reverse order. Populates nwords with number of words 176 * in the last block. 177 * 178 * Refer to block comment within space_map_incremental_destroy() 179 * to understand why this function is needed. 180 */ 181static int 182space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf, 183 uint64_t bufsz, uint64_t *nwords) 184{ 185 int error = 0; 186 dmu_buf_t *db; 187 188 /* 189 * Find the offset of the last word in the space map and use 190 * that to read the last block of the space map with 191 * dmu_buf_hold(). 192 */ 193 uint64_t last_word_offset = 194 sm->sm_phys->smp_objsize - sizeof (uint64_t); 195 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset, 196 FTAG, &db, DMU_READ_NO_PREFETCH); 197 if (error != 0) 198 return (error); 199 200 ASSERT3U(sm->sm_object, ==, db->db_object); 201 ASSERT3U(sm->sm_blksz, ==, db->db_size); 202 ASSERT3U(bufsz, >=, db->db_size); 203 ASSERT(nwords != NULL); 204 205 uint64_t *words = db->db_data; 206 *nwords = 207 (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t); 208 209 ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t)); 210 211 uint64_t n = *nwords; 212 uint64_t j = n - 1; 213 for (uint64_t i = 0; i < n; i++) { 214 uint64_t entry = words[i]; 215 if (sm_entry_is_double_word(entry)) { 216 /* 217 * Since we are populating the buffer backwards 218 * we have to be extra careful and add the two 219 * words of the double-word entry in the right 220 * order. 221 */ 222 ASSERT3U(j, >, 0); 223 buf[j - 1] = entry; 224 225 i++; 226 ASSERT3U(i, <, n); 227 entry = words[i]; 228 buf[j] = entry; 229 j -= 2; 230 } else { 231 ASSERT(sm_entry_is_debug(entry) || 232 sm_entry_is_single_word(entry)); 233 buf[j] = entry; 234 j--; 235 } 236 } 237 238 /* 239 * Assert that we wrote backwards all the 240 * way to the beginning of the buffer. 241 */ 242 ASSERT3S(j, ==, -1); 243 244 dmu_buf_rele(db, FTAG); 245 return (error); 246} 247 248/* 249 * Note: This function performs destructive actions - specifically 250 * it deletes entries from the end of the space map. Thus, callers 251 * should ensure that they are holding the appropriate locks for 252 * the space map that they provide. 253 */ 254int 255space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg, 256 dmu_tx_t *tx) 257{ 258 uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 259 uint64_t *buf = zio_buf_alloc(bufsz); 260 261 dmu_buf_will_dirty(sm->sm_dbuf, tx); 262 263 /* 264 * Ideally we would want to iterate from the beginning of the 265 * space map to the end in incremental steps. The issue with this 266 * approach is that we don't have any field on-disk that points 267 * us where to start between each step. We could try zeroing out 268 * entries that we've destroyed, but this doesn't work either as 269 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]). 270 * 271 * As a result, we destroy its entries incrementally starting from 272 * the end after applying the callback to each of them. 273 * 274 * The problem with this approach is that we cannot literally 275 * iterate through the words in the space map backwards as we 276 * can't distinguish two-word space map entries from their second 277 * word. Thus we do the following: 278 * 279 * 1] We get all the entries from the last block of the space map 280 * and put them into a buffer in reverse order. This way the 281 * last entry comes first in the buffer, the second to last is 282 * second, etc. 283 * 2] We iterate through the entries in the buffer and we apply 284 * the callback to each one. As we move from entry to entry we 285 * we decrease the size of the space map, deleting effectively 286 * each entry. 287 * 3] If there are no more entries in the space map or the callback 288 * returns a value other than 0, we stop iterating over the 289 * space map. If there are entries remaining and the callback 290 * returned 0, we go back to step [1]. 291 */ 292 int error = 0; 293 while (space_map_length(sm) > 0 && error == 0) { 294 uint64_t nwords = 0; 295 error = space_map_reversed_last_block_entries(sm, buf, bufsz, 296 &nwords); 297 if (error != 0) 298 break; 299 300 ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t)); 301 302 for (uint64_t i = 0; i < nwords; i++) { 303 uint64_t e = buf[i]; 304 305 if (sm_entry_is_debug(e)) { 306 sm->sm_phys->smp_objsize -= sizeof (uint64_t); 307 space_map_update(sm); 308 continue; 309 } 310 311 int words = 1; 312 uint64_t raw_offset, raw_run, vdev_id; 313 maptype_t type; 314 if (sm_entry_is_single_word(e)) { 315 type = SM_TYPE_DECODE(e); 316 vdev_id = SM_NO_VDEVID; 317 raw_offset = SM_OFFSET_DECODE(e); 318 raw_run = SM_RUN_DECODE(e); 319 } else { 320 ASSERT(sm_entry_is_double_word(e)); 321 words = 2; 322 323 raw_run = SM2_RUN_DECODE(e); 324 vdev_id = SM2_VDEV_DECODE(e); 325 326 /* move to the second word */ 327 i++; 328 e = buf[i]; 329 330 ASSERT3P(i, <=, nwords); 331 332 type = SM2_TYPE_DECODE(e); 333 raw_offset = SM2_OFFSET_DECODE(e); 334 } 335 336 uint64_t entry_offset = 337 (raw_offset << sm->sm_shift) + sm->sm_start; 338 uint64_t entry_run = raw_run << sm->sm_shift; 339 340 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift)); 341 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift)); 342 VERIFY3U(entry_offset, >=, sm->sm_start); 343 VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size); 344 VERIFY3U(entry_run, <=, sm->sm_size); 345 VERIFY3U(entry_offset + entry_run, <=, 346 sm->sm_start + sm->sm_size); 347 348 space_map_entry_t sme = { 349 .sme_type = type, 350 .sme_vdev = vdev_id, 351 .sme_offset = entry_offset, 352 .sme_run = entry_run 353 }; 354 error = callback(&sme, arg); 355 if (error != 0) 356 break; 357 358 if (type == SM_ALLOC) 359 sm->sm_phys->smp_alloc -= entry_run; 360 else 361 sm->sm_phys->smp_alloc += entry_run; 362 sm->sm_phys->smp_objsize -= words * sizeof (uint64_t); 363 space_map_update(sm); 364 } 365 } 366 367 if (space_map_length(sm) == 0) { 368 ASSERT0(error); 369 ASSERT0(sm->sm_phys->smp_objsize); 370 ASSERT0(sm->sm_alloc); 371 } 372 373 zio_buf_free(buf, bufsz); 374 return (error); 375} 376 377typedef struct space_map_load_arg { 378 space_map_t *smla_sm; 379 range_tree_t *smla_rt; 380 maptype_t smla_type; 381} space_map_load_arg_t; 382 383static int 384space_map_load_callback(space_map_entry_t *sme, void *arg) 385{ 386 space_map_load_arg_t *smla = arg; 387 if (sme->sme_type == smla->smla_type) { 388 VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=, 389 smla->smla_sm->sm_size); 390 range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run); 391 } else { 392 range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run); 393 } 394 395 return (0); 396} 397 398/* 399 * Load the space map disk into the specified range tree. Segments of maptype 400 * are added to the range tree, other segment types are removed. 401 */ 402int 403space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype) 404{ 405 uint64_t space; 406 int err; 407 space_map_load_arg_t smla; 408 409 VERIFY0(range_tree_space(rt)); 410 space = space_map_allocated(sm); 411 412 if (maptype == SM_FREE) { 413 range_tree_add(rt, sm->sm_start, sm->sm_size); 414 space = sm->sm_size - space; 415 } 416 417 smla.smla_rt = rt; 418 smla.smla_sm = sm; 419 smla.smla_type = maptype; 420 err = space_map_iterate(sm, space_map_load_callback, &smla); 421 422 if (err == 0) { 423 VERIFY3U(range_tree_space(rt), ==, space); 424 } else { 425 range_tree_vacate(rt, NULL, NULL); 426 } 427 428 return (err); 429} 430 431void 432space_map_histogram_clear(space_map_t *sm) 433{ 434 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 435 return; 436 437 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram)); 438} 439 440boolean_t 441space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) 442{ 443 /* 444 * Verify that the in-core range tree does not have any 445 * ranges smaller than our sm_shift size. 446 */ 447 for (int i = 0; i < sm->sm_shift; i++) { 448 if (rt->rt_histogram[i] != 0) 449 return (B_FALSE); 450 } 451 return (B_TRUE); 452} 453 454void 455space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) 456{ 457 int idx = 0; 458 459 ASSERT(dmu_tx_is_syncing(tx)); 460 VERIFY3U(space_map_object(sm), !=, 0); 461 462 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 463 return; 464 465 dmu_buf_will_dirty(sm->sm_dbuf, tx); 466 467 ASSERT(space_map_histogram_verify(sm, rt)); 468 /* 469 * Transfer the content of the range tree histogram to the space 470 * map histogram. The space map histogram contains 32 buckets ranging 471 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree, 472 * however, can represent ranges from 2^0 to 2^63. Since the space 473 * map only cares about allocatable blocks (minimum of sm_shift) we 474 * can safely ignore all ranges in the range tree smaller than sm_shift. 475 */ 476 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 477 478 /* 479 * Since the largest histogram bucket in the space map is 480 * 2^(32+sm_shift-1), we need to normalize the values in 481 * the range tree for any bucket larger than that size. For 482 * example given an sm_shift of 9, ranges larger than 2^40 483 * would get normalized as if they were 1TB ranges. Assume 484 * the range tree had a count of 5 in the 2^44 (16TB) bucket, 485 * the calculation below would normalize this to 5 * 2^4 (16). 486 */ 487 ASSERT3U(i, >=, idx + sm->sm_shift); 488 sm->sm_phys->smp_histogram[idx] += 489 rt->rt_histogram[i] << (i - idx - sm->sm_shift); 490 491 /* 492 * Increment the space map's index as long as we haven't 493 * reached the maximum bucket size. Accumulate all ranges 494 * larger than the max bucket size into the last bucket. 495 */ 496 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 497 ASSERT3U(idx + sm->sm_shift, ==, i); 498 idx++; 499 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 500 } 501 } 502} 503 504static void 505space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx) 506{ 507 dmu_buf_will_dirty(sm->sm_dbuf, tx); 508 509 uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) | 510 SM_DEBUG_ACTION_ENCODE(maptype) | 511 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) | 512 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 513 514 dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_objsize, 515 sizeof (dentry), &dentry, tx); 516 517 sm->sm_phys->smp_objsize += sizeof (dentry); 518} 519 520/* 521 * Writes one or more entries given a segment. 522 * 523 * Note: The function may release the dbuf from the pointer initially 524 * passed to it, and return a different dbuf. Also, the space map's 525 * dbuf must be dirty for the changes in sm_phys to take effect. 526 */ 527static void 528space_map_write_seg(space_map_t *sm, range_seg_t *rs, maptype_t maptype, 529 uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, void *tag, dmu_tx_t *tx) 530{ 531 ASSERT3U(words, !=, 0); 532 ASSERT3U(words, <=, 2); 533 534 /* ensure the vdev_id can be represented by the space map */ 535 ASSERT3U(vdev_id, <=, SM_NO_VDEVID); 536 537 /* 538 * if this is a single word entry, ensure that no vdev was 539 * specified. 540 */ 541 IMPLY(words == 1, vdev_id == SM_NO_VDEVID); 542 543 dmu_buf_t *db = *dbp; 544 ASSERT3U(db->db_size, ==, sm->sm_blksz); 545 546 uint64_t *block_base = db->db_data; 547 uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t)); 548 uint64_t *block_cursor = block_base + 549 (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t); 550 551 ASSERT3P(block_cursor, <=, block_end); 552 553 uint64_t size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 554 uint64_t start = (rs->rs_start - sm->sm_start) >> sm->sm_shift; 555 uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX; 556 557 ASSERT3U(rs->rs_start, >=, sm->sm_start); 558 ASSERT3U(rs->rs_start, <, sm->sm_start + sm->sm_size); 559 ASSERT3U(rs->rs_end - rs->rs_start, <=, sm->sm_size); 560 ASSERT3U(rs->rs_end, <=, sm->sm_start + sm->sm_size); 561 562 while (size != 0) { 563 ASSERT3P(block_cursor, <=, block_end); 564 565 /* 566 * If we are at the end of this block, flush it and start 567 * writing again from the beginning. 568 */ 569 if (block_cursor == block_end) { 570 dmu_buf_rele(db, tag); 571 572 uint64_t next_word_offset = sm->sm_phys->smp_objsize; 573 VERIFY0(dmu_buf_hold(sm->sm_os, 574 space_map_object(sm), next_word_offset, 575 tag, &db, DMU_READ_PREFETCH)); 576 dmu_buf_will_dirty(db, tx); 577 578 /* update caller's dbuf */ 579 *dbp = db; 580 581 ASSERT3U(db->db_size, ==, sm->sm_blksz); 582 583 block_base = db->db_data; 584 block_cursor = block_base; 585 block_end = block_base + 586 (db->db_size / sizeof (uint64_t)); 587 } 588 589 /* 590 * If we are writing a two-word entry and we only have one 591 * word left on this block, just pad it with an empty debug 592 * entry and write the two-word entry in the next block. 593 */ 594 uint64_t *next_entry = block_cursor + 1; 595 if (next_entry == block_end && words > 1) { 596 ASSERT3U(words, ==, 2); 597 *block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) | 598 SM_DEBUG_ACTION_ENCODE(0) | 599 SM_DEBUG_SYNCPASS_ENCODE(0) | 600 SM_DEBUG_TXG_ENCODE(0); 601 block_cursor++; 602 sm->sm_phys->smp_objsize += sizeof (uint64_t); 603 ASSERT3P(block_cursor, ==, block_end); 604 continue; 605 } 606 607 uint64_t run_len = MIN(size, run_max); 608 switch (words) { 609 case 1: 610 *block_cursor = SM_OFFSET_ENCODE(start) | 611 SM_TYPE_ENCODE(maptype) | 612 SM_RUN_ENCODE(run_len); 613 block_cursor++; 614 break; 615 case 2: 616 /* write the first word of the entry */ 617 *block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) | 618 SM2_RUN_ENCODE(run_len) | 619 SM2_VDEV_ENCODE(vdev_id); 620 block_cursor++; 621 622 /* move on to the second word of the entry */ 623 ASSERT3P(block_cursor, <, block_end); 624 *block_cursor = SM2_TYPE_ENCODE(maptype) | 625 SM2_OFFSET_ENCODE(start); 626 block_cursor++; 627 break; 628 default: 629 panic("%d-word space map entries are not supported", 630 words); 631 break; 632 } 633 sm->sm_phys->smp_objsize += words * sizeof (uint64_t); 634 635 start += run_len; 636 size -= run_len; 637 } 638 ASSERT0(size); 639 640} 641 642/* 643 * Note: The space map's dbuf must be dirty for the changes in sm_phys to 644 * take effect. 645 */ 646static void 647space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 648 uint64_t vdev_id, dmu_tx_t *tx) 649{ 650 spa_t *spa = tx->tx_pool->dp_spa; 651 dmu_buf_t *db; 652 653 space_map_write_intro_debug(sm, maptype, tx); 654 655#ifdef DEBUG 656 /* 657 * We do this right after we write the intro debug entry 658 * because the estimate does not take it into account. 659 */ 660 uint64_t initial_objsize = sm->sm_phys->smp_objsize; 661 uint64_t estimated_growth = 662 space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID); 663 uint64_t estimated_final_objsize = initial_objsize + estimated_growth; 664#endif 665 666 /* 667 * Find the offset right after the last word in the space map 668 * and use that to get a hold of the last block, so we can 669 * start appending to it. 670 */ 671 uint64_t next_word_offset = sm->sm_phys->smp_objsize; 672 VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm), 673 next_word_offset, FTAG, &db, DMU_READ_PREFETCH)); 674 ASSERT3U(db->db_size, ==, sm->sm_blksz); 675 676 dmu_buf_will_dirty(db, tx); 677 678 avl_tree_t *t = &rt->rt_root; 679 for (range_seg_t *rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 680 uint64_t offset = (rs->rs_start - sm->sm_start) >> sm->sm_shift; 681 uint64_t length = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 682 uint8_t words = 1; 683 684 /* 685 * We only write two-word entries when both of the following 686 * are true: 687 * 688 * [1] The feature is enabled. 689 * [2] The offset or run is too big for a single-word entry, 690 * or the vdev_id is set (meaning not equal to 691 * SM_NO_VDEVID). 692 * 693 * Note that for purposes of testing we've added the case that 694 * we write two-word entries occasionally when the feature is 695 * enabled and zfs_force_some_double_word_sm_entries has been 696 * set. 697 */ 698 if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) && 699 (offset >= (1ULL << SM_OFFSET_BITS) || 700 length > SM_RUN_MAX || 701 vdev_id != SM_NO_VDEVID || 702 (zfs_force_some_double_word_sm_entries && 703 spa_get_random(100) == 0))) 704 words = 2; 705 706 space_map_write_seg(sm, rs, maptype, vdev_id, words, 707 &db, FTAG, tx); 708 } 709 710 dmu_buf_rele(db, FTAG); 711 712#ifdef DEBUG 713 /* 714 * We expect our estimation to be based on the worst case 715 * scenario [see comment in space_map_estimate_optimal_size()]. 716 * Therefore we expect the actual objsize to be equal or less 717 * than whatever we estimated it to be. 718 */ 719 ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_objsize); 720#endif 721} 722 723/* 724 * Note: This function manipulates the state of the given space map but 725 * does not hold any locks implicitly. Thus the caller is responsible 726 * for synchronizing writes to the space map. 727 */ 728void 729space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 730 uint64_t vdev_id, dmu_tx_t *tx) 731{ 732 objset_t *os = sm->sm_os; 733 734 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 735 VERIFY3U(space_map_object(sm), !=, 0); 736 737 dmu_buf_will_dirty(sm->sm_dbuf, tx); 738 739 /* 740 * This field is no longer necessary since the in-core space map 741 * now contains the object number but is maintained for backwards 742 * compatibility. 743 */ 744 sm->sm_phys->smp_object = sm->sm_object; 745 746 if (range_tree_is_empty(rt)) { 747 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); 748 return; 749 } 750 751 if (maptype == SM_ALLOC) 752 sm->sm_phys->smp_alloc += range_tree_space(rt); 753 else 754 sm->sm_phys->smp_alloc -= range_tree_space(rt); 755 756 uint64_t nodes = avl_numnodes(&rt->rt_root); 757 uint64_t rt_space = range_tree_space(rt); 758 759 space_map_write_impl(sm, rt, maptype, vdev_id, tx); 760 761 /* 762 * Ensure that the space_map's accounting wasn't changed 763 * while we were in the middle of writing it out. 764 */ 765 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root)); 766 VERIFY3U(range_tree_space(rt), ==, rt_space); 767} 768 769static int 770space_map_open_impl(space_map_t *sm) 771{ 772 int error; 773 u_longlong_t blocks; 774 775 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf); 776 if (error) 777 return (error); 778 779 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks); 780 sm->sm_phys = sm->sm_dbuf->db_data; 781 return (0); 782} 783 784int 785space_map_open(space_map_t **smp, objset_t *os, uint64_t object, 786 uint64_t start, uint64_t size, uint8_t shift) 787{ 788 space_map_t *sm; 789 int error; 790 791 ASSERT(*smp == NULL); 792 ASSERT(os != NULL); 793 ASSERT(object != 0); 794 795 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP); 796 797 sm->sm_start = start; 798 sm->sm_size = size; 799 sm->sm_shift = shift; 800 sm->sm_os = os; 801 sm->sm_object = object; 802 803 error = space_map_open_impl(sm); 804 if (error != 0) { 805 space_map_close(sm); 806 return (error); 807 } 808 *smp = sm; 809 810 return (0); 811} 812 813void 814space_map_close(space_map_t *sm) 815{ 816 if (sm == NULL) 817 return; 818 819 if (sm->sm_dbuf != NULL) 820 dmu_buf_rele(sm->sm_dbuf, sm); 821 sm->sm_dbuf = NULL; 822 sm->sm_phys = NULL; 823 824 kmem_free(sm, sizeof (*sm)); 825} 826 827void 828space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx) 829{ 830 objset_t *os = sm->sm_os; 831 spa_t *spa = dmu_objset_spa(os); 832 dmu_object_info_t doi; 833 834 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 835 ASSERT(dmu_tx_is_syncing(tx)); 836 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa)); 837 838 dmu_object_info_from_db(sm->sm_dbuf, &doi); 839 840 /* 841 * If the space map has the wrong bonus size (because 842 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or 843 * the wrong block size (because space_map_blksz has changed), 844 * free and re-allocate its object with the updated sizes. 845 * 846 * Otherwise, just truncate the current object. 847 */ 848 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 849 doi.doi_bonus_size != sizeof (space_map_phys_t)) || 850 doi.doi_data_block_size != blocksize || 851 doi.doi_metadata_block_size != 1 << space_map_ibs) { 852 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating " 853 "object[%llu]: old bonus %u, old blocksz %u", 854 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object, 855 doi.doi_bonus_size, doi.doi_data_block_size); 856 857 space_map_free(sm, tx); 858 dmu_buf_rele(sm->sm_dbuf, sm); 859 860 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx); 861 VERIFY0(space_map_open_impl(sm)); 862 } else { 863 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx)); 864 865 /* 866 * If the spacemap is reallocated, its histogram 867 * will be reset. Do the same in the common case so that 868 * bugs related to the uncommon case do not go unnoticed. 869 */ 870 bzero(sm->sm_phys->smp_histogram, 871 sizeof (sm->sm_phys->smp_histogram)); 872 } 873 874 dmu_buf_will_dirty(sm->sm_dbuf, tx); 875 sm->sm_phys->smp_objsize = 0; 876 sm->sm_phys->smp_alloc = 0; 877} 878 879/* 880 * Update the in-core space_map allocation and length values. 881 */ 882void 883space_map_update(space_map_t *sm) 884{ 885 if (sm == NULL) 886 return; 887 888 sm->sm_alloc = sm->sm_phys->smp_alloc; 889 sm->sm_length = sm->sm_phys->smp_objsize; 890} 891 892uint64_t 893space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx) 894{ 895 spa_t *spa = dmu_objset_spa(os); 896 uint64_t object; 897 int bonuslen; 898 899 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 900 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 901 bonuslen = sizeof (space_map_phys_t); 902 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 903 } else { 904 bonuslen = SPACE_MAP_SIZE_V0; 905 } 906 907 object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize, 908 space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx); 909 910 return (object); 911} 912 913void 914space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx) 915{ 916 spa_t *spa = dmu_objset_spa(os); 917 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 918 dmu_object_info_t doi; 919 920 VERIFY0(dmu_object_info(os, smobj, &doi)); 921 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) { 922 spa_feature_decr(spa, 923 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 924 } 925 } 926 927 VERIFY0(dmu_object_free(os, smobj, tx)); 928} 929 930void 931space_map_free(space_map_t *sm, dmu_tx_t *tx) 932{ 933 if (sm == NULL) 934 return; 935 936 space_map_free_obj(sm->sm_os, space_map_object(sm), tx); 937 sm->sm_object = 0; 938} 939 940/* 941 * Given a range tree, it makes a worst-case estimate of how much 942 * space would the tree's segments take if they were written to 943 * the given space map. 944 */ 945uint64_t 946space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt, 947 uint64_t vdev_id) 948{ 949 spa_t *spa = dmu_objset_spa(sm->sm_os); 950 uint64_t shift = sm->sm_shift; 951 uint64_t *histogram = rt->rt_histogram; 952 uint64_t entries_for_seg = 0; 953 954 /* 955 * In order to get a quick estimate of the optimal size that this 956 * range tree would have on-disk as a space map, we iterate through 957 * its histogram buckets instead of iterating through its nodes. 958 * 959 * Note that this is a highest-bound/worst-case estimate for the 960 * following reasons: 961 * 962 * 1] We assume that we always add a debug padding for each block 963 * we write and we also assume that we start at the last word 964 * of a block attempting to write a two-word entry. 965 * 2] Rounding up errors due to the way segments are distributed 966 * in the buckets of the range tree's histogram. 967 * 3] The activation of zfs_force_some_double_word_sm_entries 968 * (tunable) when testing. 969 * 970 * = Math and Rounding Errors = 971 * 972 * rt_histogram[i] bucket of a range tree represents the number 973 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given 974 * that, we want to divide the buckets into groups: Buckets that 975 * can be represented using a single-word entry, ones that can 976 * be represented with a double-word entry, and ones that can 977 * only be represented with multiple two-word entries. 978 * 979 * [Note that if the new encoding feature is not enabled there 980 * are only two groups: single-word entry buckets and multiple 981 * single-word entry buckets. The information below assumes 982 * two-word entries enabled, but it can easily applied when 983 * the feature is not enabled] 984 * 985 * To find the highest bucket that can be represented with a 986 * single-word entry we look at the maximum run that such entry 987 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that 988 * the run of a space map entry is shifted by sm_shift, thus we 989 * add it to the exponent]. This way, excluding the value of the 990 * maximum run that can be represented by a single-word entry, 991 * all runs that are smaller exist in buckets 0 to 992 * SM_RUN_BITS + shift - 1. 993 * 994 * To find the highest bucket that can be represented with a 995 * double-word entry, we follow the same approach. Finally, any 996 * bucket higher than that are represented with multiple two-word 997 * entries. To be more specific, if the highest bucket whose 998 * segments can be represented with a single two-word entry is X, 999 * then bucket X+1 will need 2 two-word entries for each of its 1000 * segments, X+2 will need 4, X+3 will need 8, ...etc. 1001 * 1002 * With all of the above we make our estimation based on bucket 1003 * groups. There is a rounding error though. As we mentioned in 1004 * the example with the one-word entry, the maximum run that can 1005 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is 1006 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of 1007 * that length fall into the next bucket (and bucket group) where 1008 * we start counting two-word entries and this is one more reason 1009 * why the estimated size may end up being bigger than the actual 1010 * size written. 1011 */ 1012 uint64_t size = 0; 1013 uint64_t idx = 0; 1014 1015 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) || 1016 (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) { 1017 1018 /* 1019 * If we are trying to force some double word entries just 1020 * assume the worst-case of every single word entry being 1021 * written as a double word entry. 1022 */ 1023 uint64_t entry_size = 1024 (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) && 1025 zfs_force_some_double_word_sm_entries) ? 1026 (2 * sizeof (uint64_t)) : sizeof (uint64_t); 1027 1028 uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1; 1029 for (; idx <= single_entry_max_bucket; idx++) 1030 size += histogram[idx] * entry_size; 1031 1032 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) { 1033 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) { 1034 ASSERT3U(idx, >=, single_entry_max_bucket); 1035 entries_for_seg = 1036 1ULL << (idx - single_entry_max_bucket); 1037 size += histogram[idx] * 1038 entries_for_seg * entry_size; 1039 } 1040 return (size); 1041 } 1042 } 1043 1044 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)); 1045 1046 uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1; 1047 for (; idx <= double_entry_max_bucket; idx++) 1048 size += histogram[idx] * 2 * sizeof (uint64_t); 1049 1050 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) { 1051 ASSERT3U(idx, >=, double_entry_max_bucket); 1052 entries_for_seg = 1ULL << (idx - double_entry_max_bucket); 1053 size += histogram[idx] * 1054 entries_for_seg * 2 * sizeof (uint64_t); 1055 } 1056 1057 /* 1058 * Assume the worst case where we start with the padding at the end 1059 * of the current block and we add an extra padding entry at the end 1060 * of all subsequent blocks. 1061 */ 1062 size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t); 1063 1064 return (size); 1065} 1066 1067uint64_t 1068space_map_object(space_map_t *sm) 1069{ 1070 return (sm != NULL ? sm->sm_object : 0); 1071} 1072 1073/* 1074 * Returns the already synced, on-disk allocated space. 1075 */ 1076uint64_t 1077space_map_allocated(space_map_t *sm) 1078{ 1079 return (sm != NULL ? sm->sm_alloc : 0); 1080} 1081 1082/* 1083 * Returns the already synced, on-disk length; 1084 */ 1085uint64_t 1086space_map_length(space_map_t *sm) 1087{ 1088 return (sm != NULL ? sm->sm_length : 0); 1089} 1090 1091/* 1092 * Returns the allocated space that is currently syncing. 1093 */ 1094int64_t 1095space_map_alloc_delta(space_map_t *sm) 1096{ 1097 if (sm == NULL) 1098 return (0); 1099 ASSERT(sm->sm_dbuf != NULL); 1100 return (sm->sm_phys->smp_alloc - space_map_allocated(sm)); 1101} 1102