dmu_tx.c revision 1.5
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28#include <sys/dmu.h> 29#include <sys/dmu_impl.h> 30#include <sys/dbuf.h> 31#include <sys/dmu_tx.h> 32#include <sys/dmu_objset.h> 33#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 34#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 35#include <sys/dsl_pool.h> 36#include <sys/zap_impl.h> /* for fzap_default_block_shift */ 37#include <sys/spa.h> 38#include <sys/sa.h> 39#include <sys/sa_impl.h> 40#include <sys/zfs_context.h> 41#include <sys/varargs.h> 42 43typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 44 uint64_t arg1, uint64_t arg2); 45 46 47dmu_tx_t * 48dmu_tx_create_dd(dsl_dir_t *dd) 49{ 50 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 51 tx->tx_dir = dd; 52 if (dd != NULL) 53 tx->tx_pool = dd->dd_pool; 54 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 55 offsetof(dmu_tx_hold_t, txh_node)); 56 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 57 offsetof(dmu_tx_callback_t, dcb_node)); 58 tx->tx_start = gethrtime(); 59#ifdef ZFS_DEBUG 60 refcount_create(&tx->tx_space_written); 61 refcount_create(&tx->tx_space_freed); 62#endif 63 return (tx); 64} 65 66dmu_tx_t * 67dmu_tx_create(objset_t *os) 68{ 69 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 70 tx->tx_objset = os; 71 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 72 return (tx); 73} 74 75dmu_tx_t * 76dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 77{ 78 dmu_tx_t *tx = dmu_tx_create_dd(NULL); 79 80 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 81 tx->tx_pool = dp; 82 tx->tx_txg = txg; 83 tx->tx_anyobj = TRUE; 84 85 return (tx); 86} 87 88int 89dmu_tx_is_syncing(dmu_tx_t *tx) 90{ 91 return (tx->tx_anyobj); 92} 93 94int 95dmu_tx_private_ok(dmu_tx_t *tx) 96{ 97 return (tx->tx_anyobj); 98} 99 100static dmu_tx_hold_t * 101dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 102 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 103{ 104 dmu_tx_hold_t *txh; 105 dnode_t *dn = NULL; 106 int err; 107 108 if (object != DMU_NEW_OBJECT) { 109 err = dnode_hold(os, object, tx, &dn); 110 if (err) { 111 tx->tx_err = err; 112 return (NULL); 113 } 114 115 if (err == 0 && tx->tx_txg != 0) { 116 mutex_enter(&dn->dn_mtx); 117 /* 118 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 119 * problem, but there's no way for it to happen (for 120 * now, at least). 121 */ 122 ASSERT(dn->dn_assigned_txg == 0); 123 dn->dn_assigned_txg = tx->tx_txg; 124 (void) refcount_add(&dn->dn_tx_holds, tx); 125 mutex_exit(&dn->dn_mtx); 126 } 127 } 128 129 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 130 txh->txh_tx = tx; 131 txh->txh_dnode = dn; 132 refcount_create(&txh->txh_space_towrite); 133 refcount_create(&txh->txh_space_tofree); 134 refcount_create(&txh->txh_space_tooverwrite); 135 refcount_create(&txh->txh_space_tounref); 136 refcount_create(&txh->txh_memory_tohold); 137 refcount_create(&txh->txh_fudge); 138#ifdef ZFS_DEBUG 139 txh->txh_type = type; 140 txh->txh_arg1 = arg1; 141 txh->txh_arg2 = arg2; 142#endif 143 list_insert_tail(&tx->tx_holds, txh); 144 145 return (txh); 146} 147 148void 149dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 150{ 151 /* 152 * If we're syncing, they can manipulate any object anyhow, and 153 * the hold on the dnode_t can cause problems. 154 */ 155 if (!dmu_tx_is_syncing(tx)) { 156 (void) dmu_tx_hold_object_impl(tx, os, 157 object, THT_NEWOBJECT, 0, 0); 158 } 159} 160 161static int 162dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 163{ 164 int err; 165 dmu_buf_impl_t *db; 166 167 rw_enter(&dn->dn_struct_rwlock, RW_READER); 168 db = dbuf_hold_level(dn, level, blkid, FTAG); 169 rw_exit(&dn->dn_struct_rwlock); 170 if (db == NULL) 171 return (SET_ERROR(EIO)); 172 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 173 dbuf_rele(db, FTAG); 174 return (err); 175} 176 177static void 178dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 179 int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 180{ 181 objset_t *os = dn->dn_objset; 182 dsl_dataset_t *ds = os->os_dsl_dataset; 183 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 184 dmu_buf_impl_t *parent = NULL; 185 blkptr_t *bp = NULL; 186 uint64_t space; 187 188 if (level >= dn->dn_nlevels || history[level] == blkid) 189 return; 190 191 history[level] = blkid; 192 193 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 194 195 if (db == NULL || db == dn->dn_dbuf) { 196 ASSERT(level != 0); 197 db = NULL; 198 } else { 199 ASSERT(DB_DNODE(db) == dn); 200 ASSERT(db->db_level == level); 201 ASSERT(db->db.db_size == space); 202 ASSERT(db->db_blkid == blkid); 203 bp = db->db_blkptr; 204 parent = db->db_parent; 205 } 206 207 freeable = (bp && (freeable || 208 dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 209 210 if (freeable) { 211 (void) refcount_add_many(&txh->txh_space_tooverwrite, 212 space, FTAG); 213 } else { 214 (void) refcount_add_many(&txh->txh_space_towrite, 215 space, FTAG); 216 } 217 218 if (bp) { 219 (void) refcount_add_many(&txh->txh_space_tounref, 220 bp_get_dsize(os->os_spa, bp), FTAG); 221 } 222 223 dmu_tx_count_twig(txh, dn, parent, level + 1, 224 blkid >> epbs, freeable, history); 225} 226 227/* ARGSUSED */ 228static void 229dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 230{ 231 dnode_t *dn = txh->txh_dnode; 232 uint64_t start, end, i; 233 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 234 int err = 0; 235 236 if (len == 0) 237 return; 238 239 min_bs = SPA_MINBLOCKSHIFT; 240 max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; 241 min_ibs = DN_MIN_INDBLKSHIFT; 242 max_ibs = DN_MAX_INDBLKSHIFT; 243 244 if (dn) { 245 uint64_t history[DN_MAX_LEVELS]; 246 int nlvls = dn->dn_nlevels; 247 int delta; 248 249 /* 250 * For i/o error checking, read the first and last level-0 251 * blocks (if they are not aligned), and all the level-1 blocks. 252 */ 253 if (dn->dn_maxblkid == 0) { 254 delta = dn->dn_datablksz; 255 start = (off < dn->dn_datablksz) ? 0 : 1; 256 end = (off+len <= dn->dn_datablksz) ? 0 : 1; 257 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 258 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 259 if (err) 260 goto out; 261 delta -= off; 262 } 263 } else { 264 zio_t *zio = zio_root(dn->dn_objset->os_spa, 265 NULL, NULL, ZIO_FLAG_CANFAIL); 266 267 /* first level-0 block */ 268 start = off >> dn->dn_datablkshift; 269 if (P2PHASE(off, dn->dn_datablksz) || 270 len < dn->dn_datablksz) { 271 err = dmu_tx_check_ioerr(zio, dn, 0, start); 272 if (err) 273 goto out; 274 } 275 276 /* last level-0 block */ 277 end = (off+len-1) >> dn->dn_datablkshift; 278 if (end != start && end <= dn->dn_maxblkid && 279 P2PHASE(off+len, dn->dn_datablksz)) { 280 err = dmu_tx_check_ioerr(zio, dn, 0, end); 281 if (err) 282 goto out; 283 } 284 285 /* level-1 blocks */ 286 if (nlvls > 1) { 287 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 288 for (i = (start>>shft)+1; i < end>>shft; i++) { 289 err = dmu_tx_check_ioerr(zio, dn, 1, i); 290 if (err) 291 goto out; 292 } 293 } 294 295 err = zio_wait(zio); 296 if (err) 297 goto out; 298 delta = P2NPHASE(off, dn->dn_datablksz); 299 } 300 301 min_ibs = max_ibs = dn->dn_indblkshift; 302 if (dn->dn_maxblkid > 0) { 303 /* 304 * The blocksize can't change, 305 * so we can make a more precise estimate. 306 */ 307 ASSERT(dn->dn_datablkshift != 0); 308 min_bs = max_bs = dn->dn_datablkshift; 309 } else { 310 /* 311 * The blocksize can increase up to the recordsize, 312 * or if it is already more than the recordsize, 313 * up to the next power of 2. 314 */ 315 min_bs = highbit64(dn->dn_datablksz - 1); 316 max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); 317 } 318 319 /* 320 * If this write is not off the end of the file 321 * we need to account for overwrites/unref. 322 */ 323 if (start <= dn->dn_maxblkid) { 324 for (int l = 0; l < DN_MAX_LEVELS; l++) 325 history[l] = -1ULL; 326 } 327 while (start <= dn->dn_maxblkid) { 328 dmu_buf_impl_t *db; 329 330 rw_enter(&dn->dn_struct_rwlock, RW_READER); 331 err = dbuf_hold_impl(dn, 0, start, 332 FALSE, FALSE, FTAG, &db); 333 rw_exit(&dn->dn_struct_rwlock); 334 335 if (err) { 336 txh->txh_tx->tx_err = err; 337 return; 338 } 339 340 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 341 history); 342 dbuf_rele(db, FTAG); 343 if (++start > end) { 344 /* 345 * Account for new indirects appearing 346 * before this IO gets assigned into a txg. 347 */ 348 bits = 64 - min_bs; 349 epbs = min_ibs - SPA_BLKPTRSHIFT; 350 for (bits -= epbs * (nlvls - 1); 351 bits >= 0; bits -= epbs) { 352 (void) refcount_add_many( 353 &txh->txh_fudge, 354 1ULL << max_ibs, FTAG); 355 } 356 goto out; 357 } 358 off += delta; 359 if (len >= delta) 360 len -= delta; 361 delta = dn->dn_datablksz; 362 } 363 } 364 365 /* 366 * 'end' is the last thing we will access, not one past. 367 * This way we won't overflow when accessing the last byte. 368 */ 369 start = P2ALIGN(off, 1ULL << max_bs); 370 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 371 (void) refcount_add_many(&txh->txh_space_towrite, 372 end - start + 1, FTAG); 373 374 start >>= min_bs; 375 end >>= min_bs; 376 377 epbs = min_ibs - SPA_BLKPTRSHIFT; 378 379 /* 380 * The object contains at most 2^(64 - min_bs) blocks, 381 * and each indirect level maps 2^epbs. 382 */ 383 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 384 start >>= epbs; 385 end >>= epbs; 386 ASSERT3U(end, >=, start); 387 (void) refcount_add_many(&txh->txh_space_towrite, 388 (end - start + 1) << max_ibs, FTAG); 389 if (start != 0) { 390 /* 391 * We also need a new blkid=0 indirect block 392 * to reference any existing file data. 393 */ 394 (void) refcount_add_many(&txh->txh_space_towrite, 395 1ULL << max_ibs, FTAG); 396 } 397 } 398 399out: 400 if (refcount_count(&txh->txh_space_towrite) + 401 refcount_count(&txh->txh_space_tooverwrite) > 402 2 * DMU_MAX_ACCESS) 403 err = SET_ERROR(EFBIG); 404 405 if (err) 406 txh->txh_tx->tx_err = err; 407} 408 409static void 410dmu_tx_count_dnode(dmu_tx_hold_t *txh) 411{ 412 dnode_t *dn = txh->txh_dnode; 413 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 414 uint64_t space = mdn->dn_datablksz + 415 ((uint64_t)(mdn->dn_nlevels-1) << mdn->dn_indblkshift); 416 417 if (dn && dn->dn_dbuf->db_blkptr && 418 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 419 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 420 (void) refcount_add_many(&txh->txh_space_tooverwrite, 421 space, FTAG); 422 (void) refcount_add_many(&txh->txh_space_tounref, space, FTAG); 423 } else { 424 (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG); 425 if (dn && dn->dn_dbuf->db_blkptr) { 426 (void) refcount_add_many(&txh->txh_space_tounref, 427 space, FTAG); 428 } 429 } 430} 431 432void 433dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 434{ 435 dmu_tx_hold_t *txh; 436 437 ASSERT(tx->tx_txg == 0); 438 ASSERT(len < DMU_MAX_ACCESS); 439 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 440 441 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 442 object, THT_WRITE, off, len); 443 if (txh == NULL) 444 return; 445 446 dmu_tx_count_write(txh, off, len); 447 dmu_tx_count_dnode(txh); 448} 449 450static void 451dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 452{ 453 uint64_t blkid, nblks, lastblk; 454 uint64_t space = 0, unref = 0, skipped = 0; 455 dnode_t *dn = txh->txh_dnode; 456 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 457 spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 458 int epbs; 459 uint64_t l0span = 0, nl1blks = 0; 460 461 if (dn->dn_nlevels == 0) 462 return; 463 464 /* 465 * The struct_rwlock protects us against dn_nlevels 466 * changing, in case (against all odds) we manage to dirty & 467 * sync out the changes after we check for being dirty. 468 * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 469 */ 470 rw_enter(&dn->dn_struct_rwlock, RW_READER); 471 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 472 if (dn->dn_maxblkid == 0) { 473 if (off == 0 && len >= dn->dn_datablksz) { 474 blkid = 0; 475 nblks = 1; 476 } else { 477 rw_exit(&dn->dn_struct_rwlock); 478 return; 479 } 480 } else { 481 blkid = off >> dn->dn_datablkshift; 482 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 483 484 if (blkid > dn->dn_maxblkid) { 485 rw_exit(&dn->dn_struct_rwlock); 486 return; 487 } 488 if (blkid + nblks > dn->dn_maxblkid) 489 nblks = dn->dn_maxblkid - blkid + 1; 490 491 } 492 l0span = nblks; /* save for later use to calc level > 1 overhead */ 493 if (dn->dn_nlevels == 1) { 494 int i; 495 for (i = 0; i < nblks; i++) { 496 blkptr_t *bp = dn->dn_phys->dn_blkptr; 497 ASSERT3U(blkid + i, <, dn->dn_nblkptr); 498 bp += blkid + i; 499 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 500 dprintf_bp(bp, "can free old%s", ""); 501 space += bp_get_dsize(spa, bp); 502 } 503 unref += BP_GET_ASIZE(bp); 504 } 505 nl1blks = 1; 506 nblks = 0; 507 } 508 509 lastblk = blkid + nblks - 1; 510 while (nblks) { 511 dmu_buf_impl_t *dbuf; 512 uint64_t ibyte, new_blkid; 513 int epb = 1 << epbs; 514 int err, i, blkoff, tochk; 515 blkptr_t *bp; 516 517 ibyte = blkid << dn->dn_datablkshift; 518 err = dnode_next_offset(dn, 519 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 520 new_blkid = ibyte >> dn->dn_datablkshift; 521 if (err == ESRCH) { 522 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 523 break; 524 } 525 if (err) { 526 txh->txh_tx->tx_err = err; 527 break; 528 } 529 if (new_blkid > lastblk) { 530 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 531 break; 532 } 533 534 if (new_blkid > blkid) { 535 ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 536 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 537 nblks -= new_blkid - blkid; 538 blkid = new_blkid; 539 } 540 blkoff = P2PHASE(blkid, epb); 541 tochk = MIN(epb - blkoff, nblks); 542 543 err = dbuf_hold_impl(dn, 1, blkid >> epbs, 544 FALSE, FALSE, FTAG, &dbuf); 545 if (err) { 546 txh->txh_tx->tx_err = err; 547 break; 548 } 549 550 (void) refcount_add_many(&txh->txh_memory_tohold, 551 dbuf->db.db_size, FTAG); 552 553 /* 554 * We don't check memory_tohold against DMU_MAX_ACCESS because 555 * memory_tohold is an over-estimation (especially the >L1 556 * indirect blocks), so it could fail. Callers should have 557 * already verified that they will not be holding too much 558 * memory. 559 */ 560 561 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 562 if (err != 0) { 563 txh->txh_tx->tx_err = err; 564 dbuf_rele(dbuf, FTAG); 565 break; 566 } 567 568 bp = dbuf->db.db_data; 569 bp += blkoff; 570 571 for (i = 0; i < tochk; i++) { 572 if (dsl_dataset_block_freeable(ds, &bp[i], 573 bp[i].blk_birth)) { 574 dprintf_bp(&bp[i], "can free old%s", ""); 575 space += bp_get_dsize(spa, &bp[i]); 576 } 577 unref += BP_GET_ASIZE(bp); 578 } 579 dbuf_rele(dbuf, FTAG); 580 581 ++nl1blks; 582 blkid += tochk; 583 nblks -= tochk; 584 } 585 rw_exit(&dn->dn_struct_rwlock); 586 587 /* 588 * Add in memory requirements of higher-level indirects. 589 * This assumes a worst-possible scenario for dn_nlevels and a 590 * worst-possible distribution of l1-blocks over the region to free. 591 */ 592 { 593 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 594 int level = 2; 595 /* 596 * Here we don't use DN_MAX_LEVEL, but calculate it with the 597 * given datablkshift and indblkshift. This makes the 598 * difference between 19 and 8 on large files. 599 */ 600 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 601 (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 602 603 while (level++ < maxlevel) { 604 (void) refcount_add_many(&txh->txh_memory_tohold, 605 MAX(MIN(blkcnt, nl1blks), 1) << dn->dn_indblkshift, 606 FTAG); 607 blkcnt = 1 + (blkcnt >> epbs); 608 } 609 } 610 611 /* account for new level 1 indirect blocks that might show up */ 612 if (skipped > 0) { 613 (void) refcount_add_many(&txh->txh_fudge, 614 skipped << dn->dn_indblkshift, FTAG); 615 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 616 (void) refcount_add_many(&txh->txh_memory_tohold, 617 skipped << dn->dn_indblkshift, FTAG); 618 } 619 (void) refcount_add_many(&txh->txh_space_tofree, space, FTAG); 620 (void) refcount_add_many(&txh->txh_space_tounref, unref, FTAG); 621} 622 623/* 624 * This function marks the transaction as being a "net free". The end 625 * result is that refquotas will be disabled for this transaction, and 626 * this transaction will be able to use half of the pool space overhead 627 * (see dsl_pool_adjustedsize()). Therefore this function should only 628 * be called for transactions that we expect will not cause a net increase 629 * in the amount of space used (but it's OK if that is occasionally not true). 630 */ 631void 632dmu_tx_mark_netfree(dmu_tx_t *tx) 633{ 634 dmu_tx_hold_t *txh; 635 636 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 637 DMU_NEW_OBJECT, THT_FREE, 0, 0); 638 639 /* 640 * Pretend that this operation will free 1GB of space. This 641 * should be large enough to cancel out the largest write. 642 * We don't want to use something like UINT64_MAX, because that would 643 * cause overflows when doing math with these values (e.g. in 644 * dmu_tx_try_assign()). 645 */ 646 (void) refcount_add_many(&txh->txh_space_tofree, 647 1024 * 1024 * 1024, FTAG); 648 (void) refcount_add_many(&txh->txh_space_tounref, 649 1024 * 1024 * 1024, FTAG); 650} 651 652void 653dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 654{ 655 dmu_tx_hold_t *txh; 656 dnode_t *dn; 657 int err; 658 zio_t *zio; 659 660 ASSERT(tx->tx_txg == 0); 661 662 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 663 object, THT_FREE, off, len); 664 if (txh == NULL) 665 return; 666 dn = txh->txh_dnode; 667 dmu_tx_count_dnode(txh); 668 669 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 670 return; 671 if (len == DMU_OBJECT_END) 672 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 673 674 675 /* 676 * For i/o error checking, we read the first and last level-0 677 * blocks if they are not aligned, and all the level-1 blocks. 678 * 679 * Note: dbuf_free_range() assumes that we have not instantiated 680 * any level-0 dbufs that will be completely freed. Therefore we must 681 * exercise care to not read or count the first and last blocks 682 * if they are blocksize-aligned. 683 */ 684 if (dn->dn_datablkshift == 0) { 685 if (off != 0 || len < dn->dn_datablksz) 686 dmu_tx_count_write(txh, 0, dn->dn_datablksz); 687 } else { 688 /* first block will be modified if it is not aligned */ 689 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 690 dmu_tx_count_write(txh, off, 1); 691 /* last block will be modified if it is not aligned */ 692 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 693 dmu_tx_count_write(txh, off+len, 1); 694 } 695 696 /* 697 * Check level-1 blocks. 698 */ 699 if (dn->dn_nlevels > 1) { 700 int shift = dn->dn_datablkshift + dn->dn_indblkshift - 701 SPA_BLKPTRSHIFT; 702 uint64_t start = off >> shift; 703 uint64_t end = (off + len) >> shift; 704 705 ASSERT(dn->dn_indblkshift != 0); 706 707 /* 708 * dnode_reallocate() can result in an object with indirect 709 * blocks having an odd data block size. In this case, 710 * just check the single block. 711 */ 712 if (dn->dn_datablkshift == 0) 713 start = end = 0; 714 715 zio = zio_root(tx->tx_pool->dp_spa, 716 NULL, NULL, ZIO_FLAG_CANFAIL); 717 for (uint64_t i = start; i <= end; i++) { 718 uint64_t ibyte = i << shift; 719 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 720 i = ibyte >> shift; 721 if (err == ESRCH || i > end) 722 break; 723 if (err) { 724 tx->tx_err = err; 725 return; 726 } 727 728 err = dmu_tx_check_ioerr(zio, dn, 1, i); 729 if (err) { 730 tx->tx_err = err; 731 return; 732 } 733 } 734 err = zio_wait(zio); 735 if (err) { 736 tx->tx_err = err; 737 return; 738 } 739 } 740 741 dmu_tx_count_free(txh, off, len); 742} 743 744void 745dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 746{ 747 dmu_tx_hold_t *txh; 748 dnode_t *dn; 749 int err; 750 751 ASSERT(tx->tx_txg == 0); 752 753 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 754 object, THT_ZAP, add, (uintptr_t)name); 755 if (txh == NULL) 756 return; 757 dn = txh->txh_dnode; 758 759 dmu_tx_count_dnode(txh); 760 761 if (dn == NULL) { 762 /* 763 * We will be able to fit a new object's entries into one leaf 764 * block. So there will be at most 2 blocks total, 765 * including the header block. 766 */ 767 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 768 return; 769 } 770 771 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 772 773 if (dn->dn_maxblkid == 0 && !add) { 774 blkptr_t *bp; 775 776 /* 777 * If there is only one block (i.e. this is a micro-zap) 778 * and we are not adding anything, the accounting is simple. 779 */ 780 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 781 if (err) { 782 tx->tx_err = err; 783 return; 784 } 785 786 /* 787 * Use max block size here, since we don't know how much 788 * the size will change between now and the dbuf dirty call. 789 */ 790 bp = &dn->dn_phys->dn_blkptr[0]; 791 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 792 bp, bp->blk_birth)) { 793 (void) refcount_add_many(&txh->txh_space_tooverwrite, 794 MZAP_MAX_BLKSZ, FTAG); 795 } else { 796 (void) refcount_add_many(&txh->txh_space_towrite, 797 MZAP_MAX_BLKSZ, FTAG); 798 } 799 if (!BP_IS_HOLE(bp)) { 800 (void) refcount_add_many(&txh->txh_space_tounref, 801 MZAP_MAX_BLKSZ, FTAG); 802 } 803 return; 804 } 805 806 if (dn->dn_maxblkid > 0 && name) { 807 /* 808 * access the name in this fat-zap so that we'll check 809 * for i/o errors to the leaf blocks, etc. 810 */ 811 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); 812 if (err == EIO) { 813 tx->tx_err = err; 814 return; 815 } 816 } 817 818 err = zap_count_write_by_dnode(dn, name, add, 819 &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 820 821 /* 822 * If the modified blocks are scattered to the four winds, 823 * we'll have to modify an indirect twig for each. We can make 824 * modifications at up to 3 locations: 825 * - header block at the beginning of the object 826 * - target leaf block 827 * - end of the object, where we might need to write: 828 * - a new leaf block if the target block needs to be split 829 * - the new pointer table, if it is growing 830 * - the new cookie table, if it is growing 831 */ 832 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 833 dsl_dataset_phys_t *ds_phys = 834 dsl_dataset_phys(dn->dn_objset->os_dsl_dataset); 835 for (int lvl = 1; lvl < dn->dn_nlevels; lvl++) { 836 uint64_t num_indirects = 1 + (dn->dn_maxblkid >> (epbs * lvl)); 837 uint64_t spc = MIN(3, num_indirects) << dn->dn_indblkshift; 838 if (ds_phys->ds_prev_snap_obj != 0) { 839 (void) refcount_add_many(&txh->txh_space_towrite, 840 spc, FTAG); 841 } else { 842 (void) refcount_add_many(&txh->txh_space_tooverwrite, 843 spc, FTAG); 844 } 845 } 846} 847 848void 849dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 850{ 851 dmu_tx_hold_t *txh; 852 853 ASSERT(tx->tx_txg == 0); 854 855 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 856 object, THT_BONUS, 0, 0); 857 if (txh) 858 dmu_tx_count_dnode(txh); 859} 860 861void 862dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 863{ 864 dmu_tx_hold_t *txh; 865 ASSERT(tx->tx_txg == 0); 866 867 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 868 DMU_NEW_OBJECT, THT_SPACE, space, 0); 869 870 (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG); 871} 872 873int 874dmu_tx_holds(dmu_tx_t *tx, uint64_t object) 875{ 876 dmu_tx_hold_t *txh; 877 int holds = 0; 878 879 /* 880 * By asserting that the tx is assigned, we're counting the 881 * number of dn_tx_holds, which is the same as the number of 882 * dn_holds. Otherwise, we'd be counting dn_holds, but 883 * dn_tx_holds could be 0. 884 */ 885 ASSERT(tx->tx_txg != 0); 886 887 /* if (tx->tx_anyobj == TRUE) */ 888 /* return (0); */ 889 890 for (txh = list_head(&tx->tx_holds); txh; 891 txh = list_next(&tx->tx_holds, txh)) { 892 if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 893 holds++; 894 } 895 896 return (holds); 897} 898 899#ifdef ZFS_DEBUG 900void 901dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 902{ 903 dmu_tx_hold_t *txh; 904 int match_object = FALSE, match_offset = FALSE; 905 dnode_t *dn; 906 907 DB_DNODE_ENTER(db); 908 dn = DB_DNODE(db); 909 ASSERT(tx->tx_txg != 0); 910 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 911 ASSERT3U(dn->dn_object, ==, db->db.db_object); 912 913 if (tx->tx_anyobj) { 914 DB_DNODE_EXIT(db); 915 return; 916 } 917 918 /* XXX No checking on the meta dnode for now */ 919 if (db->db.db_object == DMU_META_DNODE_OBJECT) { 920 DB_DNODE_EXIT(db); 921 return; 922 } 923 924 for (txh = list_head(&tx->tx_holds); txh; 925 txh = list_next(&tx->tx_holds, txh)) { 926 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 927 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 928 match_object = TRUE; 929 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 930 int datablkshift = dn->dn_datablkshift ? 931 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 932 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 933 int shift = datablkshift + epbs * db->db_level; 934 uint64_t beginblk = shift >= 64 ? 0 : 935 (txh->txh_arg1 >> shift); 936 uint64_t endblk = shift >= 64 ? 0 : 937 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 938 uint64_t blkid = db->db_blkid; 939 940 /* XXX txh_arg2 better not be zero... */ 941 942 dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 943 txh->txh_type, beginblk, endblk); 944 945 switch (txh->txh_type) { 946 case THT_WRITE: 947 if (blkid >= beginblk && blkid <= endblk) 948 match_offset = TRUE; 949 /* 950 * We will let this hold work for the bonus 951 * or spill buffer so that we don't need to 952 * hold it when creating a new object. 953 */ 954 if (blkid == DMU_BONUS_BLKID || 955 blkid == DMU_SPILL_BLKID) 956 match_offset = TRUE; 957 /* 958 * They might have to increase nlevels, 959 * thus dirtying the new TLIBs. Or the 960 * might have to change the block size, 961 * thus dirying the new lvl=0 blk=0. 962 */ 963 if (blkid == 0) 964 match_offset = TRUE; 965 break; 966 case THT_FREE: 967 /* 968 * We will dirty all the level 1 blocks in 969 * the free range and perhaps the first and 970 * last level 0 block. 971 */ 972 if (blkid >= beginblk && (blkid <= endblk || 973 txh->txh_arg2 == DMU_OBJECT_END)) 974 match_offset = TRUE; 975 break; 976 case THT_SPILL: 977 if (blkid == DMU_SPILL_BLKID) 978 match_offset = TRUE; 979 break; 980 case THT_BONUS: 981 if (blkid == DMU_BONUS_BLKID) 982 match_offset = TRUE; 983 break; 984 case THT_ZAP: 985 match_offset = TRUE; 986 break; 987 case THT_NEWOBJECT: 988 match_object = TRUE; 989 break; 990 default: 991 ASSERT(!"bad txh_type"); 992 } 993 } 994 if (match_object && match_offset) { 995 DB_DNODE_EXIT(db); 996 return; 997 } 998 } 999 DB_DNODE_EXIT(db); 1000 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 1001 (u_longlong_t)db->db.db_object, db->db_level, 1002 (u_longlong_t)db->db_blkid); 1003} 1004#endif 1005 1006/* 1007 * If we can't do 10 iops, something is wrong. Let us go ahead 1008 * and hit zfs_dirty_data_max. 1009 */ 1010hrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 1011int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 1012 1013/* 1014 * We delay transactions when we've determined that the backend storage 1015 * isn't able to accommodate the rate of incoming writes. 1016 * 1017 * If there is already a transaction waiting, we delay relative to when 1018 * that transaction finishes waiting. This way the calculated min_time 1019 * is independent of the number of threads concurrently executing 1020 * transactions. 1021 * 1022 * If we are the only waiter, wait relative to when the transaction 1023 * started, rather than the current time. This credits the transaction for 1024 * "time already served", e.g. reading indirect blocks. 1025 * 1026 * The minimum time for a transaction to take is calculated as: 1027 * min_time = scale * (dirty - min) / (max - dirty) 1028 * min_time is then capped at zfs_delay_max_ns. 1029 * 1030 * The delay has two degrees of freedom that can be adjusted via tunables. 1031 * The percentage of dirty data at which we start to delay is defined by 1032 * zfs_delay_min_dirty_percent. This should typically be at or above 1033 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 1034 * delay after writing at full speed has failed to keep up with the incoming 1035 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 1036 * speaking, this variable determines the amount of delay at the midpoint of 1037 * the curve. 1038 * 1039 * delay 1040 * 10ms +-------------------------------------------------------------*+ 1041 * | *| 1042 * 9ms + *+ 1043 * | *| 1044 * 8ms + *+ 1045 * | * | 1046 * 7ms + * + 1047 * | * | 1048 * 6ms + * + 1049 * | * | 1050 * 5ms + * + 1051 * | * | 1052 * 4ms + * + 1053 * | * | 1054 * 3ms + * + 1055 * | * | 1056 * 2ms + (midpoint) * + 1057 * | | ** | 1058 * 1ms + v *** + 1059 * | zfs_delay_scale ----------> ******** | 1060 * 0 +-------------------------------------*********----------------+ 1061 * 0% <- zfs_dirty_data_max -> 100% 1062 * 1063 * Note that since the delay is added to the outstanding time remaining on the 1064 * most recent transaction, the delay is effectively the inverse of IOPS. 1065 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 1066 * was chosen such that small changes in the amount of accumulated dirty data 1067 * in the first 3/4 of the curve yield relatively small differences in the 1068 * amount of delay. 1069 * 1070 * The effects can be easier to understand when the amount of delay is 1071 * represented on a log scale: 1072 * 1073 * delay 1074 * 100ms +-------------------------------------------------------------++ 1075 * + + 1076 * | | 1077 * + *+ 1078 * 10ms + *+ 1079 * + ** + 1080 * | (midpoint) ** | 1081 * + | ** + 1082 * 1ms + v **** + 1083 * + zfs_delay_scale ----------> ***** + 1084 * | **** | 1085 * + **** + 1086 * 100us + ** + 1087 * + * + 1088 * | * | 1089 * + * + 1090 * 10us + * + 1091 * + + 1092 * | | 1093 * + + 1094 * +--------------------------------------------------------------+ 1095 * 0% <- zfs_dirty_data_max -> 100% 1096 * 1097 * Note here that only as the amount of dirty data approaches its limit does 1098 * the delay start to increase rapidly. The goal of a properly tuned system 1099 * should be to keep the amount of dirty data out of that range by first 1100 * ensuring that the appropriate limits are set for the I/O scheduler to reach 1101 * optimal throughput on the backend storage, and then by changing the value 1102 * of zfs_delay_scale to increase the steepness of the curve. 1103 */ 1104static void 1105dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 1106{ 1107 dsl_pool_t *dp = tx->tx_pool; 1108 uint64_t delay_min_bytes = 1109 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 1110 hrtime_t wakeup, min_tx_time, now; 1111 1112 if (dirty <= delay_min_bytes) 1113 return; 1114 1115 /* 1116 * The caller has already waited until we are under the max. 1117 * We make them pass us the amount of dirty data so we don't 1118 * have to handle the case of it being >= the max, which could 1119 * cause a divide-by-zero if it's == the max. 1120 */ 1121 ASSERT3U(dirty, <, zfs_dirty_data_max); 1122 1123 now = gethrtime(); 1124 min_tx_time = zfs_delay_scale * 1125 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 1126 if (now > tx->tx_start + min_tx_time) 1127 return; 1128 1129 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 1130 1131 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 1132 uint64_t, min_tx_time); 1133 1134 mutex_enter(&dp->dp_lock); 1135 wakeup = MAX(tx->tx_start + min_tx_time, 1136 dp->dp_last_wakeup + min_tx_time); 1137 dp->dp_last_wakeup = wakeup; 1138 mutex_exit(&dp->dp_lock); 1139 1140#ifdef _KERNEL 1141#ifdef illumos 1142 mutex_enter(&curthread->t_delay_lock); 1143 while (cv_timedwait_hires(&curthread->t_delay_cv, 1144 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 1145 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 1146 continue; 1147 mutex_exit(&curthread->t_delay_lock); 1148#endif 1149#ifdef __FreeBSD__ 1150 pause_sbt("dmu_tx_delay", wakeup * SBT_1NS, 1151 zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE); 1152#endif 1153#ifdef __NetBSD__ 1154 int timo = (wakeup - now) * hz / 1000000000; 1155 1156 if (timo == 0) 1157 timo = 1; 1158 kpause("dmu_tx_delay", false, timo, NULL); 1159#endif 1160#else 1161 hrtime_t delta = wakeup - gethrtime(); 1162 struct timespec ts; 1163 ts.tv_sec = delta / NANOSEC; 1164 ts.tv_nsec = delta % NANOSEC; 1165 (void) nanosleep(&ts, NULL); 1166#endif 1167} 1168 1169static int 1170dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1171{ 1172 dmu_tx_hold_t *txh; 1173 spa_t *spa = tx->tx_pool->dp_spa; 1174 uint64_t memory, asize, fsize, usize; 1175 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1176 1177 ASSERT0(tx->tx_txg); 1178 1179 if (tx->tx_err) 1180 return (tx->tx_err); 1181 1182 if (spa_suspended(spa)) { 1183 /* 1184 * If the user has indicated a blocking failure mode 1185 * then return ERESTART which will block in dmu_tx_wait(). 1186 * Otherwise, return EIO so that an error can get 1187 * propagated back to the VOP calls. 1188 * 1189 * Note that we always honor the txg_how flag regardless 1190 * of the failuremode setting. 1191 */ 1192 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1193 txg_how != TXG_WAIT) 1194 return (SET_ERROR(EIO)); 1195 1196 return (SET_ERROR(ERESTART)); 1197 } 1198 1199 if (!tx->tx_waited && 1200 dsl_pool_need_dirty_delay(tx->tx_pool)) { 1201 tx->tx_wait_dirty = B_TRUE; 1202 return (SET_ERROR(ERESTART)); 1203 } 1204 1205 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1206 tx->tx_needassign_txh = NULL; 1207 1208 /* 1209 * NB: No error returns are allowed after txg_hold_open, but 1210 * before processing the dnode holds, due to the 1211 * dmu_tx_unassign() logic. 1212 */ 1213 1214 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 1215 for (txh = list_head(&tx->tx_holds); txh; 1216 txh = list_next(&tx->tx_holds, txh)) { 1217 dnode_t *dn = txh->txh_dnode; 1218 if (dn != NULL) { 1219 mutex_enter(&dn->dn_mtx); 1220 if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1221 mutex_exit(&dn->dn_mtx); 1222 tx->tx_needassign_txh = txh; 1223 return (SET_ERROR(ERESTART)); 1224 } 1225 if (dn->dn_assigned_txg == 0) 1226 dn->dn_assigned_txg = tx->tx_txg; 1227 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1228 (void) refcount_add(&dn->dn_tx_holds, tx); 1229 mutex_exit(&dn->dn_mtx); 1230 } 1231 towrite += refcount_count(&txh->txh_space_towrite); 1232 tofree += refcount_count(&txh->txh_space_tofree); 1233 tooverwrite += refcount_count(&txh->txh_space_tooverwrite); 1234 tounref += refcount_count(&txh->txh_space_tounref); 1235 tohold += refcount_count(&txh->txh_memory_tohold); 1236 fudge += refcount_count(&txh->txh_fudge); 1237 } 1238 1239 /* 1240 * If a snapshot has been taken since we made our estimates, 1241 * assume that we won't be able to free or overwrite anything. 1242 */ 1243 if (tx->tx_objset && 1244 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1245 tx->tx_lastsnap_txg) { 1246 towrite += tooverwrite; 1247 tooverwrite = tofree = 0; 1248 } 1249 1250 /* needed allocation: worst-case estimate of write space */ 1251 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1252 /* freed space estimate: worst-case overwrite + free estimate */ 1253 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1254 /* convert unrefd space to worst-case estimate */ 1255 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1256 /* calculate memory footprint estimate */ 1257 memory = towrite + tooverwrite + tohold; 1258 1259#ifdef ZFS_DEBUG 1260 /* 1261 * Add in 'tohold' to account for our dirty holds on this memory 1262 * XXX - the "fudge" factor is to account for skipped blocks that 1263 * we missed because dnode_next_offset() misses in-core-only blocks. 1264 */ 1265 tx->tx_space_towrite = asize + 1266 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 1267 tx->tx_space_tofree = tofree; 1268 tx->tx_space_tooverwrite = tooverwrite; 1269 tx->tx_space_tounref = tounref; 1270#endif 1271 1272 if (tx->tx_dir && asize != 0) { 1273 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1274 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 1275 if (err) 1276 return (err); 1277 } 1278 1279 return (0); 1280} 1281 1282static void 1283dmu_tx_unassign(dmu_tx_t *tx) 1284{ 1285 dmu_tx_hold_t *txh; 1286 1287 if (tx->tx_txg == 0) 1288 return; 1289 1290 txg_rele_to_quiesce(&tx->tx_txgh); 1291 1292 /* 1293 * Walk the transaction's hold list, removing the hold on the 1294 * associated dnode, and notifying waiters if the refcount drops to 0. 1295 */ 1296 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 1297 txh = list_next(&tx->tx_holds, txh)) { 1298 dnode_t *dn = txh->txh_dnode; 1299 1300 if (dn == NULL) 1301 continue; 1302 mutex_enter(&dn->dn_mtx); 1303 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1304 1305 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1306 dn->dn_assigned_txg = 0; 1307 cv_broadcast(&dn->dn_notxholds); 1308 } 1309 mutex_exit(&dn->dn_mtx); 1310 } 1311 1312 txg_rele_to_sync(&tx->tx_txgh); 1313 1314 tx->tx_lasttried_txg = tx->tx_txg; 1315 tx->tx_txg = 0; 1316} 1317 1318/* 1319 * Assign tx to a transaction group. txg_how can be one of: 1320 * 1321 * (1) TXG_WAIT. If the current open txg is full, waits until there's 1322 * a new one. This should be used when you're not holding locks. 1323 * It will only fail if we're truly out of space (or over quota). 1324 * 1325 * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1326 * blocking, returns immediately with ERESTART. This should be used 1327 * whenever you're holding locks. On an ERESTART error, the caller 1328 * should drop locks, do a dmu_tx_wait(tx), and try again. 1329 * 1330 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 1331 * has already been called on behalf of this operation (though 1332 * most likely on a different tx). 1333 */ 1334int 1335dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1336{ 1337 int err; 1338 1339 ASSERT(tx->tx_txg == 0); 1340 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 1341 txg_how == TXG_WAITED); 1342 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1343 1344 /* If we might wait, we must not hold the config lock. */ 1345 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 1346 1347 if (txg_how == TXG_WAITED) 1348 tx->tx_waited = B_TRUE; 1349 1350 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1351 dmu_tx_unassign(tx); 1352 1353 if (err != ERESTART || txg_how != TXG_WAIT) 1354 return (err); 1355 1356 dmu_tx_wait(tx); 1357 } 1358 1359 txg_rele_to_quiesce(&tx->tx_txgh); 1360 1361 return (0); 1362} 1363 1364void 1365dmu_tx_wait(dmu_tx_t *tx) 1366{ 1367 spa_t *spa = tx->tx_pool->dp_spa; 1368 dsl_pool_t *dp = tx->tx_pool; 1369 1370 ASSERT(tx->tx_txg == 0); 1371 ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1372 1373 if (tx->tx_wait_dirty) { 1374 /* 1375 * dmu_tx_try_assign() has determined that we need to wait 1376 * because we've consumed much or all of the dirty buffer 1377 * space. 1378 */ 1379 mutex_enter(&dp->dp_lock); 1380 while (dp->dp_dirty_total >= zfs_dirty_data_max) 1381 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1382 uint64_t dirty = dp->dp_dirty_total; 1383 mutex_exit(&dp->dp_lock); 1384 1385 dmu_tx_delay(tx, dirty); 1386 1387 tx->tx_wait_dirty = B_FALSE; 1388 1389 /* 1390 * Note: setting tx_waited only has effect if the caller 1391 * used TX_WAIT. Otherwise they are going to destroy 1392 * this tx and try again. The common case, zfs_write(), 1393 * uses TX_WAIT. 1394 */ 1395 tx->tx_waited = B_TRUE; 1396 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1397 /* 1398 * If the pool is suspended we need to wait until it 1399 * is resumed. Note that it's possible that the pool 1400 * has become active after this thread has tried to 1401 * obtain a tx. If that's the case then tx_lasttried_txg 1402 * would not have been set. 1403 */ 1404 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1405 } else if (tx->tx_needassign_txh) { 1406 /* 1407 * A dnode is assigned to the quiescing txg. Wait for its 1408 * transaction to complete. 1409 */ 1410 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1411 1412 mutex_enter(&dn->dn_mtx); 1413 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1414 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1415 mutex_exit(&dn->dn_mtx); 1416 tx->tx_needassign_txh = NULL; 1417 } else { 1418 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1419 } 1420} 1421 1422void 1423dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1424{ 1425#ifdef ZFS_DEBUG 1426 if (tx->tx_dir == NULL || delta == 0) 1427 return; 1428 1429 if (delta > 0) { 1430/* FreeBSD r318821, illumos 7793 ztest fails assertion in dmu_tx_willuse_space 1431 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1432 tx->tx_space_towrite); 1433*/ 1434 (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1435 } else { 1436 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1437 } 1438#endif 1439} 1440 1441static void 1442dmu_tx_destroy(dmu_tx_t *tx) 1443{ 1444 dmu_tx_hold_t *txh; 1445 1446 while ((txh = list_head(&tx->tx_holds)) != NULL) { 1447 dnode_t *dn = txh->txh_dnode; 1448 1449 list_remove(&tx->tx_holds, txh); 1450 refcount_destroy_many(&txh->txh_space_towrite, 1451 refcount_count(&txh->txh_space_towrite)); 1452 refcount_destroy_many(&txh->txh_space_tofree, 1453 refcount_count(&txh->txh_space_tofree)); 1454 refcount_destroy_many(&txh->txh_space_tooverwrite, 1455 refcount_count(&txh->txh_space_tooverwrite)); 1456 refcount_destroy_many(&txh->txh_space_tounref, 1457 refcount_count(&txh->txh_space_tounref)); 1458 refcount_destroy_many(&txh->txh_memory_tohold, 1459 refcount_count(&txh->txh_memory_tohold)); 1460 refcount_destroy_many(&txh->txh_fudge, 1461 refcount_count(&txh->txh_fudge)); 1462 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1463 if (dn != NULL) 1464 dnode_rele(dn, tx); 1465 } 1466 1467 list_destroy(&tx->tx_callbacks); 1468 list_destroy(&tx->tx_holds); 1469#ifdef ZFS_DEBUG 1470 refcount_destroy_many(&tx->tx_space_written, 1471 refcount_count(&tx->tx_space_written)); 1472 refcount_destroy_many(&tx->tx_space_freed, 1473 refcount_count(&tx->tx_space_freed)); 1474#endif 1475 kmem_free(tx, sizeof (dmu_tx_t)); 1476} 1477 1478void 1479dmu_tx_commit(dmu_tx_t *tx) 1480{ 1481 ASSERT(tx->tx_txg != 0); 1482 1483 /* 1484 * Go through the transaction's hold list and remove holds on 1485 * associated dnodes, notifying waiters if no holds remain. 1486 */ 1487 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 1488 txh = list_next(&tx->tx_holds, txh)) { 1489 dnode_t *dn = txh->txh_dnode; 1490 1491 if (dn == NULL) 1492 continue; 1493 1494 mutex_enter(&dn->dn_mtx); 1495 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1496 1497 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1498 dn->dn_assigned_txg = 0; 1499 cv_broadcast(&dn->dn_notxholds); 1500 } 1501 mutex_exit(&dn->dn_mtx); 1502 } 1503 1504 if (tx->tx_tempreserve_cookie) 1505 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1506 1507 if (!list_is_empty(&tx->tx_callbacks)) 1508 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1509 1510 if (tx->tx_anyobj == FALSE) 1511 txg_rele_to_sync(&tx->tx_txgh); 1512 1513#ifdef ZFS_DEBUG 1514 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1515 tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1516 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1517#endif 1518 dmu_tx_destroy(tx); 1519} 1520 1521void 1522dmu_tx_abort(dmu_tx_t *tx) 1523{ 1524 ASSERT(tx->tx_txg == 0); 1525 1526 /* 1527 * Call any registered callbacks with an error code. 1528 */ 1529 if (!list_is_empty(&tx->tx_callbacks)) 1530 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1531 1532 dmu_tx_destroy(tx); 1533} 1534 1535uint64_t 1536dmu_tx_get_txg(dmu_tx_t *tx) 1537{ 1538 ASSERT(tx->tx_txg != 0); 1539 return (tx->tx_txg); 1540} 1541 1542dsl_pool_t * 1543dmu_tx_pool(dmu_tx_t *tx) 1544{ 1545 ASSERT(tx->tx_pool != NULL); 1546 return (tx->tx_pool); 1547} 1548 1549 1550void 1551dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1552{ 1553 dmu_tx_callback_t *dcb; 1554 1555 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1556 1557 dcb->dcb_func = func; 1558 dcb->dcb_data = data; 1559 1560 list_insert_tail(&tx->tx_callbacks, dcb); 1561} 1562 1563/* 1564 * Call all the commit callbacks on a list, with a given error code. 1565 */ 1566void 1567dmu_tx_do_callbacks(list_t *cb_list, int error) 1568{ 1569 dmu_tx_callback_t *dcb; 1570 1571 while ((dcb = list_head(cb_list)) != NULL) { 1572 list_remove(cb_list, dcb); 1573 dcb->dcb_func(dcb->dcb_data, error); 1574 kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1575 } 1576} 1577 1578/* 1579 * Interface to hold a bunch of attributes. 1580 * used for creating new files. 1581 * attrsize is the total size of all attributes 1582 * to be added during object creation 1583 * 1584 * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1585 */ 1586 1587/* 1588 * hold necessary attribute name for attribute registration. 1589 * should be a very rare case where this is needed. If it does 1590 * happen it would only happen on the first write to the file system. 1591 */ 1592static void 1593dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1594{ 1595 int i; 1596 1597 if (!sa->sa_need_attr_registration) 1598 return; 1599 1600 for (i = 0; i != sa->sa_num_attrs; i++) { 1601 if (!sa->sa_attr_table[i].sa_registered) { 1602 if (sa->sa_reg_attr_obj) 1603 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1604 B_TRUE, sa->sa_attr_table[i].sa_name); 1605 else 1606 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1607 B_TRUE, sa->sa_attr_table[i].sa_name); 1608 } 1609 } 1610} 1611 1612 1613void 1614dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1615{ 1616 dnode_t *dn; 1617 dmu_tx_hold_t *txh; 1618 1619 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1620 THT_SPILL, 0, 0); 1621 1622 dn = txh->txh_dnode; 1623 1624 if (dn == NULL) 1625 return; 1626 1627 /* If blkptr doesn't exist then add space to towrite */ 1628 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 1629 (void) refcount_add_many(&txh->txh_space_towrite, 1630 SPA_OLD_MAXBLOCKSIZE, FTAG); 1631 } else { 1632 blkptr_t *bp; 1633 1634 bp = &dn->dn_phys->dn_spill; 1635 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1636 bp, bp->blk_birth)) { 1637 (void) refcount_add_many(&txh->txh_space_tooverwrite, 1638 SPA_OLD_MAXBLOCKSIZE, FTAG); 1639 } else { 1640 (void) refcount_add_many(&txh->txh_space_towrite, 1641 SPA_OLD_MAXBLOCKSIZE, FTAG); 1642 } 1643 if (!BP_IS_HOLE(bp)) { 1644 (void) refcount_add_many(&txh->txh_space_tounref, 1645 SPA_OLD_MAXBLOCKSIZE, FTAG); 1646 } 1647 } 1648} 1649 1650void 1651dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1652{ 1653 sa_os_t *sa = tx->tx_objset->os_sa; 1654 1655 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1656 1657 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1658 return; 1659 1660 if (tx->tx_objset->os_sa->sa_layout_attr_obj) 1661 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1662 else { 1663 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1664 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1665 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1666 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1667 } 1668 1669 dmu_tx_sa_registration_hold(sa, tx); 1670 1671 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 1672 return; 1673 1674 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1675 THT_SPILL, 0, 0); 1676} 1677 1678/* 1679 * Hold SA attribute 1680 * 1681 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1682 * 1683 * variable_size is the total size of all variable sized attributes 1684 * passed to this function. It is not the total size of all 1685 * variable size attributes that *may* exist on this object. 1686 */ 1687void 1688dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1689{ 1690 uint64_t object; 1691 sa_os_t *sa = tx->tx_objset->os_sa; 1692 1693 ASSERT(hdl != NULL); 1694 1695 object = sa_handle_object(hdl); 1696 1697 dmu_tx_hold_bonus(tx, object); 1698 1699 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1700 return; 1701 1702 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1703 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1704 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1705 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1706 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1707 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1708 } 1709 1710 dmu_tx_sa_registration_hold(sa, tx); 1711 1712 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1713 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1714 1715 if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1716 ASSERT(tx->tx_txg == 0); 1717 dmu_tx_hold_spill(tx, object); 1718 } else { 1719 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1720 dnode_t *dn; 1721 1722 DB_DNODE_ENTER(db); 1723 dn = DB_DNODE(db); 1724 if (dn->dn_have_spill) { 1725 ASSERT(tx->tx_txg == 0); 1726 dmu_tx_hold_spill(tx, object); 1727 } 1728 DB_DNODE_EXIT(db); 1729 } 1730} 1731