dmu_tx.c revision 296519
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23226512Smm * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24284593Savg * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25296519Smav * Copyright (c) 2014 Integros [integros.com] 26226512Smm */ 27168404Spjd 28168404Spjd#include <sys/dmu.h> 29168404Spjd#include <sys/dmu_impl.h> 30168404Spjd#include <sys/dbuf.h> 31168404Spjd#include <sys/dmu_tx.h> 32168404Spjd#include <sys/dmu_objset.h> 33168404Spjd#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 34168404Spjd#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 35168404Spjd#include <sys/dsl_pool.h> 36168404Spjd#include <sys/zap_impl.h> /* for fzap_default_block_shift */ 37168404Spjd#include <sys/spa.h> 38219089Spjd#include <sys/sa.h> 39219089Spjd#include <sys/sa_impl.h> 40168404Spjd#include <sys/zfs_context.h> 41219089Spjd#include <sys/varargs.h> 42168404Spjd 43168404Spjdtypedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 44168404Spjd uint64_t arg1, uint64_t arg2); 45168404Spjd 46168404Spjd 47168404Spjddmu_tx_t * 48168404Spjddmu_tx_create_dd(dsl_dir_t *dd) 49168404Spjd{ 50168404Spjd dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 51168404Spjd tx->tx_dir = dd; 52248571Smm if (dd != NULL) 53168404Spjd tx->tx_pool = dd->dd_pool; 54168404Spjd list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 55168404Spjd offsetof(dmu_tx_hold_t, txh_node)); 56219089Spjd list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 57219089Spjd offsetof(dmu_tx_callback_t, dcb_node)); 58258632Savg tx->tx_start = gethrtime(); 59168404Spjd#ifdef ZFS_DEBUG 60168404Spjd refcount_create(&tx->tx_space_written); 61168404Spjd refcount_create(&tx->tx_space_freed); 62168404Spjd#endif 63168404Spjd return (tx); 64168404Spjd} 65168404Spjd 66168404Spjddmu_tx_t * 67168404Spjddmu_tx_create(objset_t *os) 68168404Spjd{ 69219089Spjd dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 70168404Spjd tx->tx_objset = os; 71219089Spjd tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 72168404Spjd return (tx); 73168404Spjd} 74168404Spjd 75168404Spjddmu_tx_t * 76168404Spjddmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 77168404Spjd{ 78168404Spjd dmu_tx_t *tx = dmu_tx_create_dd(NULL); 79168404Spjd 80168404Spjd ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 81168404Spjd tx->tx_pool = dp; 82168404Spjd tx->tx_txg = txg; 83168404Spjd tx->tx_anyobj = TRUE; 84168404Spjd 85168404Spjd return (tx); 86168404Spjd} 87168404Spjd 88168404Spjdint 89168404Spjddmu_tx_is_syncing(dmu_tx_t *tx) 90168404Spjd{ 91168404Spjd return (tx->tx_anyobj); 92168404Spjd} 93168404Spjd 94168404Spjdint 95168404Spjddmu_tx_private_ok(dmu_tx_t *tx) 96168404Spjd{ 97168404Spjd return (tx->tx_anyobj); 98168404Spjd} 99168404Spjd 100168404Spjdstatic dmu_tx_hold_t * 101168404Spjddmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 102168404Spjd enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 103168404Spjd{ 104168404Spjd dmu_tx_hold_t *txh; 105168404Spjd dnode_t *dn = NULL; 106168404Spjd int err; 107168404Spjd 108168404Spjd if (object != DMU_NEW_OBJECT) { 109219089Spjd err = dnode_hold(os, object, tx, &dn); 110168404Spjd if (err) { 111168404Spjd tx->tx_err = err; 112168404Spjd return (NULL); 113168404Spjd } 114168404Spjd 115168404Spjd if (err == 0 && tx->tx_txg != 0) { 116168404Spjd mutex_enter(&dn->dn_mtx); 117168404Spjd /* 118168404Spjd * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 119168404Spjd * problem, but there's no way for it to happen (for 120168404Spjd * now, at least). 121168404Spjd */ 122168404Spjd ASSERT(dn->dn_assigned_txg == 0); 123168404Spjd dn->dn_assigned_txg = tx->tx_txg; 124168404Spjd (void) refcount_add(&dn->dn_tx_holds, tx); 125168404Spjd mutex_exit(&dn->dn_mtx); 126168404Spjd } 127168404Spjd } 128168404Spjd 129168404Spjd txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 130168404Spjd txh->txh_tx = tx; 131168404Spjd txh->txh_dnode = dn; 132168404Spjd#ifdef ZFS_DEBUG 133168404Spjd txh->txh_type = type; 134168404Spjd txh->txh_arg1 = arg1; 135168404Spjd txh->txh_arg2 = arg2; 136168404Spjd#endif 137168404Spjd list_insert_tail(&tx->tx_holds, txh); 138168404Spjd 139168404Spjd return (txh); 140168404Spjd} 141168404Spjd 142168404Spjdvoid 143168404Spjddmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 144168404Spjd{ 145168404Spjd /* 146168404Spjd * If we're syncing, they can manipulate any object anyhow, and 147168404Spjd * the hold on the dnode_t can cause problems. 148168404Spjd */ 149168404Spjd if (!dmu_tx_is_syncing(tx)) { 150168404Spjd (void) dmu_tx_hold_object_impl(tx, os, 151168404Spjd object, THT_NEWOBJECT, 0, 0); 152168404Spjd } 153168404Spjd} 154168404Spjd 155168404Spjdstatic int 156168404Spjddmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 157168404Spjd{ 158168404Spjd int err; 159168404Spjd dmu_buf_impl_t *db; 160168404Spjd 161168404Spjd rw_enter(&dn->dn_struct_rwlock, RW_READER); 162168404Spjd db = dbuf_hold_level(dn, level, blkid, FTAG); 163168404Spjd rw_exit(&dn->dn_struct_rwlock); 164168404Spjd if (db == NULL) 165249195Smm return (SET_ERROR(EIO)); 166185029Spjd err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 167168404Spjd dbuf_rele(db, FTAG); 168168404Spjd return (err); 169168404Spjd} 170168404Spjd 171209962Smmstatic void 172219089Spjddmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 173219089Spjd int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 174209962Smm{ 175219089Spjd objset_t *os = dn->dn_objset; 176219089Spjd dsl_dataset_t *ds = os->os_dsl_dataset; 177219089Spjd int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 178219089Spjd dmu_buf_impl_t *parent = NULL; 179219089Spjd blkptr_t *bp = NULL; 180219089Spjd uint64_t space; 181209962Smm 182219089Spjd if (level >= dn->dn_nlevels || history[level] == blkid) 183209962Smm return; 184209962Smm 185219089Spjd history[level] = blkid; 186209962Smm 187219089Spjd space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 188219089Spjd 189219089Spjd if (db == NULL || db == dn->dn_dbuf) { 190219089Spjd ASSERT(level != 0); 191219089Spjd db = NULL; 192219089Spjd } else { 193219089Spjd ASSERT(DB_DNODE(db) == dn); 194219089Spjd ASSERT(db->db_level == level); 195219089Spjd ASSERT(db->db.db_size == space); 196219089Spjd ASSERT(db->db_blkid == blkid); 197219089Spjd bp = db->db_blkptr; 198219089Spjd parent = db->db_parent; 199209962Smm } 200209962Smm 201219089Spjd freeable = (bp && (freeable || 202219089Spjd dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 203209962Smm 204219089Spjd if (freeable) 205219089Spjd txh->txh_space_tooverwrite += space; 206219089Spjd else 207219089Spjd txh->txh_space_towrite += space; 208219089Spjd if (bp) 209219089Spjd txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); 210219089Spjd 211219089Spjd dmu_tx_count_twig(txh, dn, parent, level + 1, 212219089Spjd blkid >> epbs, freeable, history); 213209962Smm} 214209962Smm 215168404Spjd/* ARGSUSED */ 216168404Spjdstatic void 217168404Spjddmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 218168404Spjd{ 219168404Spjd dnode_t *dn = txh->txh_dnode; 220168404Spjd uint64_t start, end, i; 221168404Spjd int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 222168404Spjd int err = 0; 223168404Spjd 224168404Spjd if (len == 0) 225168404Spjd return; 226168404Spjd 227168404Spjd min_bs = SPA_MINBLOCKSHIFT; 228274337Sdelphij max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; 229168404Spjd min_ibs = DN_MIN_INDBLKSHIFT; 230168404Spjd max_ibs = DN_MAX_INDBLKSHIFT; 231168404Spjd 232209962Smm if (dn) { 233219089Spjd uint64_t history[DN_MAX_LEVELS]; 234209962Smm int nlvls = dn->dn_nlevels; 235209962Smm int delta; 236168404Spjd 237209962Smm /* 238209962Smm * For i/o error checking, read the first and last level-0 239209962Smm * blocks (if they are not aligned), and all the level-1 blocks. 240209962Smm */ 241168404Spjd if (dn->dn_maxblkid == 0) { 242209962Smm delta = dn->dn_datablksz; 243209962Smm start = (off < dn->dn_datablksz) ? 0 : 1; 244209962Smm end = (off+len <= dn->dn_datablksz) ? 0 : 1; 245209962Smm if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 246209962Smm err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 247209962Smm if (err) 248209962Smm goto out; 249209962Smm delta -= off; 250209962Smm } 251168404Spjd } else { 252168404Spjd zio_t *zio = zio_root(dn->dn_objset->os_spa, 253168404Spjd NULL, NULL, ZIO_FLAG_CANFAIL); 254168404Spjd 255168404Spjd /* first level-0 block */ 256168404Spjd start = off >> dn->dn_datablkshift; 257168404Spjd if (P2PHASE(off, dn->dn_datablksz) || 258168404Spjd len < dn->dn_datablksz) { 259168404Spjd err = dmu_tx_check_ioerr(zio, dn, 0, start); 260168404Spjd if (err) 261168404Spjd goto out; 262168404Spjd } 263168404Spjd 264168404Spjd /* last level-0 block */ 265168404Spjd end = (off+len-1) >> dn->dn_datablkshift; 266219089Spjd if (end != start && end <= dn->dn_maxblkid && 267168404Spjd P2PHASE(off+len, dn->dn_datablksz)) { 268168404Spjd err = dmu_tx_check_ioerr(zio, dn, 0, end); 269168404Spjd if (err) 270168404Spjd goto out; 271168404Spjd } 272168404Spjd 273168404Spjd /* level-1 blocks */ 274209962Smm if (nlvls > 1) { 275209962Smm int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 276209962Smm for (i = (start>>shft)+1; i < end>>shft; i++) { 277168404Spjd err = dmu_tx_check_ioerr(zio, dn, 1, i); 278168404Spjd if (err) 279168404Spjd goto out; 280168404Spjd } 281168404Spjd } 282168404Spjd 283168404Spjd err = zio_wait(zio); 284168404Spjd if (err) 285168404Spjd goto out; 286209962Smm delta = P2NPHASE(off, dn->dn_datablksz); 287168404Spjd } 288168404Spjd 289246631Smm min_ibs = max_ibs = dn->dn_indblkshift; 290209962Smm if (dn->dn_maxblkid > 0) { 291209962Smm /* 292209962Smm * The blocksize can't change, 293209962Smm * so we can make a more precise estimate. 294209962Smm */ 295209962Smm ASSERT(dn->dn_datablkshift != 0); 296168404Spjd min_bs = max_bs = dn->dn_datablkshift; 297274337Sdelphij } else { 298274337Sdelphij /* 299274337Sdelphij * The blocksize can increase up to the recordsize, 300274337Sdelphij * or if it is already more than the recordsize, 301274337Sdelphij * up to the next power of 2. 302274337Sdelphij */ 303274337Sdelphij min_bs = highbit64(dn->dn_datablksz - 1); 304274337Sdelphij max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); 305209962Smm } 306209962Smm 307209962Smm /* 308209962Smm * If this write is not off the end of the file 309209962Smm * we need to account for overwrites/unref. 310209962Smm */ 311219089Spjd if (start <= dn->dn_maxblkid) { 312219089Spjd for (int l = 0; l < DN_MAX_LEVELS; l++) 313219089Spjd history[l] = -1ULL; 314219089Spjd } 315209962Smm while (start <= dn->dn_maxblkid) { 316209962Smm dmu_buf_impl_t *db; 317209962Smm 318209962Smm rw_enter(&dn->dn_struct_rwlock, RW_READER); 319286705Smav err = dbuf_hold_impl(dn, 0, start, 320286705Smav FALSE, FALSE, FTAG, &db); 321209962Smm rw_exit(&dn->dn_struct_rwlock); 322219089Spjd 323219089Spjd if (err) { 324219089Spjd txh->txh_tx->tx_err = err; 325219089Spjd return; 326209962Smm } 327219089Spjd 328219089Spjd dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 329219089Spjd history); 330209962Smm dbuf_rele(db, FTAG); 331209962Smm if (++start > end) { 332209962Smm /* 333209962Smm * Account for new indirects appearing 334209962Smm * before this IO gets assigned into a txg. 335209962Smm */ 336209962Smm bits = 64 - min_bs; 337209962Smm epbs = min_ibs - SPA_BLKPTRSHIFT; 338209962Smm for (bits -= epbs * (nlvls - 1); 339209962Smm bits >= 0; bits -= epbs) 340209962Smm txh->txh_fudge += 1ULL << max_ibs; 341209962Smm goto out; 342209962Smm } 343209962Smm off += delta; 344209962Smm if (len >= delta) 345209962Smm len -= delta; 346209962Smm delta = dn->dn_datablksz; 347209962Smm } 348168404Spjd } 349168404Spjd 350168404Spjd /* 351168404Spjd * 'end' is the last thing we will access, not one past. 352168404Spjd * This way we won't overflow when accessing the last byte. 353168404Spjd */ 354168404Spjd start = P2ALIGN(off, 1ULL << max_bs); 355168404Spjd end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 356168404Spjd txh->txh_space_towrite += end - start + 1; 357168404Spjd 358168404Spjd start >>= min_bs; 359168404Spjd end >>= min_bs; 360168404Spjd 361168404Spjd epbs = min_ibs - SPA_BLKPTRSHIFT; 362168404Spjd 363168404Spjd /* 364168404Spjd * The object contains at most 2^(64 - min_bs) blocks, 365168404Spjd * and each indirect level maps 2^epbs. 366168404Spjd */ 367168404Spjd for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 368168404Spjd start >>= epbs; 369168404Spjd end >>= epbs; 370209962Smm ASSERT3U(end, >=, start); 371209962Smm txh->txh_space_towrite += (end - start + 1) << max_ibs; 372209962Smm if (start != 0) { 373209962Smm /* 374209962Smm * We also need a new blkid=0 indirect block 375209962Smm * to reference any existing file data. 376209962Smm */ 377168404Spjd txh->txh_space_towrite += 1ULL << max_ibs; 378209962Smm } 379168404Spjd } 380168404Spjd 381209962Smmout: 382209962Smm if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 383209962Smm 2 * DMU_MAX_ACCESS) 384249195Smm err = SET_ERROR(EFBIG); 385168404Spjd 386168404Spjd if (err) 387168404Spjd txh->txh_tx->tx_err = err; 388168404Spjd} 389168404Spjd 390168404Spjdstatic void 391168404Spjddmu_tx_count_dnode(dmu_tx_hold_t *txh) 392168404Spjd{ 393168404Spjd dnode_t *dn = txh->txh_dnode; 394219089Spjd dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 395168404Spjd uint64_t space = mdn->dn_datablksz + 396168404Spjd ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 397168404Spjd 398168404Spjd if (dn && dn->dn_dbuf->db_blkptr && 399168404Spjd dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 400219089Spjd dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 401168404Spjd txh->txh_space_tooverwrite += space; 402209962Smm txh->txh_space_tounref += space; 403168404Spjd } else { 404168404Spjd txh->txh_space_towrite += space; 405185029Spjd if (dn && dn->dn_dbuf->db_blkptr) 406185029Spjd txh->txh_space_tounref += space; 407168404Spjd } 408168404Spjd} 409168404Spjd 410168404Spjdvoid 411168404Spjddmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 412168404Spjd{ 413168404Spjd dmu_tx_hold_t *txh; 414168404Spjd 415168404Spjd ASSERT(tx->tx_txg == 0); 416168404Spjd ASSERT(len < DMU_MAX_ACCESS); 417168404Spjd ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 418168404Spjd 419168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 420168404Spjd object, THT_WRITE, off, len); 421168404Spjd if (txh == NULL) 422168404Spjd return; 423168404Spjd 424168404Spjd dmu_tx_count_write(txh, off, len); 425168404Spjd dmu_tx_count_dnode(txh); 426168404Spjd} 427168404Spjd 428168404Spjdstatic void 429168404Spjddmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 430168404Spjd{ 431185029Spjd uint64_t blkid, nblks, lastblk; 432185029Spjd uint64_t space = 0, unref = 0, skipped = 0; 433168404Spjd dnode_t *dn = txh->txh_dnode; 434168404Spjd dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 435168404Spjd spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 436185029Spjd int epbs; 437240133Smm uint64_t l0span = 0, nl1blks = 0; 438168404Spjd 439185029Spjd if (dn->dn_nlevels == 0) 440168404Spjd return; 441168404Spjd 442168404Spjd /* 443185029Spjd * The struct_rwlock protects us against dn_nlevels 444168404Spjd * changing, in case (against all odds) we manage to dirty & 445168404Spjd * sync out the changes after we check for being dirty. 446219089Spjd * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 447168404Spjd */ 448168404Spjd rw_enter(&dn->dn_struct_rwlock, RW_READER); 449185029Spjd epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 450185029Spjd if (dn->dn_maxblkid == 0) { 451168404Spjd if (off == 0 && len >= dn->dn_datablksz) { 452168404Spjd blkid = 0; 453168404Spjd nblks = 1; 454168404Spjd } else { 455168404Spjd rw_exit(&dn->dn_struct_rwlock); 456168404Spjd return; 457168404Spjd } 458168404Spjd } else { 459168404Spjd blkid = off >> dn->dn_datablkshift; 460185029Spjd nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 461168404Spjd 462254077Sdelphij if (blkid > dn->dn_maxblkid) { 463168404Spjd rw_exit(&dn->dn_struct_rwlock); 464168404Spjd return; 465168404Spjd } 466185029Spjd if (blkid + nblks > dn->dn_maxblkid) 467254077Sdelphij nblks = dn->dn_maxblkid - blkid + 1; 468168404Spjd 469168404Spjd } 470240133Smm l0span = nblks; /* save for later use to calc level > 1 overhead */ 471185029Spjd if (dn->dn_nlevels == 1) { 472168404Spjd int i; 473168404Spjd for (i = 0; i < nblks; i++) { 474168404Spjd blkptr_t *bp = dn->dn_phys->dn_blkptr; 475185029Spjd ASSERT3U(blkid + i, <, dn->dn_nblkptr); 476168404Spjd bp += blkid + i; 477219089Spjd if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 478168404Spjd dprintf_bp(bp, "can free old%s", ""); 479219089Spjd space += bp_get_dsize(spa, bp); 480168404Spjd } 481185029Spjd unref += BP_GET_ASIZE(bp); 482168404Spjd } 483240133Smm nl1blks = 1; 484168404Spjd nblks = 0; 485168404Spjd } 486168404Spjd 487185029Spjd lastblk = blkid + nblks - 1; 488168404Spjd while (nblks) { 489168404Spjd dmu_buf_impl_t *dbuf; 490185029Spjd uint64_t ibyte, new_blkid; 491185029Spjd int epb = 1 << epbs; 492185029Spjd int err, i, blkoff, tochk; 493185029Spjd blkptr_t *bp; 494168404Spjd 495185029Spjd ibyte = blkid << dn->dn_datablkshift; 496185029Spjd err = dnode_next_offset(dn, 497185029Spjd DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 498185029Spjd new_blkid = ibyte >> dn->dn_datablkshift; 499185029Spjd if (err == ESRCH) { 500185029Spjd skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 501185029Spjd break; 502185029Spjd } 503185029Spjd if (err) { 504185029Spjd txh->txh_tx->tx_err = err; 505185029Spjd break; 506185029Spjd } 507185029Spjd if (new_blkid > lastblk) { 508185029Spjd skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 509185029Spjd break; 510185029Spjd } 511168404Spjd 512185029Spjd if (new_blkid > blkid) { 513185029Spjd ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 514185029Spjd skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 515185029Spjd nblks -= new_blkid - blkid; 516185029Spjd blkid = new_blkid; 517185029Spjd } 518185029Spjd blkoff = P2PHASE(blkid, epb); 519185029Spjd tochk = MIN(epb - blkoff, nblks); 520168404Spjd 521286705Smav err = dbuf_hold_impl(dn, 1, blkid >> epbs, 522286705Smav FALSE, FALSE, FTAG, &dbuf); 523219089Spjd if (err) { 524219089Spjd txh->txh_tx->tx_err = err; 525219089Spjd break; 526219089Spjd } 527168404Spjd 528185029Spjd txh->txh_memory_tohold += dbuf->db.db_size; 529219089Spjd 530219089Spjd /* 531219089Spjd * We don't check memory_tohold against DMU_MAX_ACCESS because 532219089Spjd * memory_tohold is an over-estimation (especially the >L1 533219089Spjd * indirect blocks), so it could fail. Callers should have 534219089Spjd * already verified that they will not be holding too much 535219089Spjd * memory. 536219089Spjd */ 537219089Spjd 538185029Spjd err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 539185029Spjd if (err != 0) { 540168404Spjd txh->txh_tx->tx_err = err; 541185029Spjd dbuf_rele(dbuf, FTAG); 542168404Spjd break; 543168404Spjd } 544168404Spjd 545185029Spjd bp = dbuf->db.db_data; 546185029Spjd bp += blkoff; 547185029Spjd 548185029Spjd for (i = 0; i < tochk; i++) { 549219089Spjd if (dsl_dataset_block_freeable(ds, &bp[i], 550219089Spjd bp[i].blk_birth)) { 551185029Spjd dprintf_bp(&bp[i], "can free old%s", ""); 552219089Spjd space += bp_get_dsize(spa, &bp[i]); 553185029Spjd } 554185029Spjd unref += BP_GET_ASIZE(bp); 555185029Spjd } 556185029Spjd dbuf_rele(dbuf, FTAG); 557185029Spjd 558240133Smm ++nl1blks; 559168404Spjd blkid += tochk; 560168404Spjd nblks -= tochk; 561168404Spjd } 562168404Spjd rw_exit(&dn->dn_struct_rwlock); 563168404Spjd 564240133Smm /* 565240133Smm * Add in memory requirements of higher-level indirects. 566240133Smm * This assumes a worst-possible scenario for dn_nlevels and a 567240133Smm * worst-possible distribution of l1-blocks over the region to free. 568240133Smm */ 569240133Smm { 570240133Smm uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 571240133Smm int level = 2; 572240133Smm /* 573240133Smm * Here we don't use DN_MAX_LEVEL, but calculate it with the 574240133Smm * given datablkshift and indblkshift. This makes the 575240133Smm * difference between 19 and 8 on large files. 576240133Smm */ 577240133Smm int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 578240133Smm (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 579240133Smm 580240133Smm while (level++ < maxlevel) { 581240955Smm txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) 582240133Smm << dn->dn_indblkshift; 583240133Smm blkcnt = 1 + (blkcnt >> epbs); 584240133Smm } 585240133Smm } 586240133Smm 587185029Spjd /* account for new level 1 indirect blocks that might show up */ 588185029Spjd if (skipped > 0) { 589185029Spjd txh->txh_fudge += skipped << dn->dn_indblkshift; 590185029Spjd skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 591185029Spjd txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 592185029Spjd } 593168404Spjd txh->txh_space_tofree += space; 594185029Spjd txh->txh_space_tounref += unref; 595168404Spjd} 596168404Spjd 597268464Sdelphij/* 598268464Sdelphij * This function marks the transaction as being a "net free". The end 599268464Sdelphij * result is that refquotas will be disabled for this transaction, and 600268464Sdelphij * this transaction will be able to use half of the pool space overhead 601268464Sdelphij * (see dsl_pool_adjustedsize()). Therefore this function should only 602268464Sdelphij * be called for transactions that we expect will not cause a net increase 603268464Sdelphij * in the amount of space used (but it's OK if that is occasionally not true). 604268464Sdelphij */ 605168404Spjdvoid 606268464Sdelphijdmu_tx_mark_netfree(dmu_tx_t *tx) 607268464Sdelphij{ 608268464Sdelphij dmu_tx_hold_t *txh; 609268464Sdelphij 610268464Sdelphij txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 611268464Sdelphij DMU_NEW_OBJECT, THT_FREE, 0, 0); 612268464Sdelphij 613268464Sdelphij /* 614268464Sdelphij * Pretend that this operation will free 1GB of space. This 615268464Sdelphij * should be large enough to cancel out the largest write. 616268464Sdelphij * We don't want to use something like UINT64_MAX, because that would 617268464Sdelphij * cause overflows when doing math with these values (e.g. in 618268464Sdelphij * dmu_tx_try_assign()). 619268464Sdelphij */ 620268464Sdelphij txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024; 621268464Sdelphij} 622268464Sdelphij 623268464Sdelphijvoid 624168404Spjddmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 625168404Spjd{ 626168404Spjd dmu_tx_hold_t *txh; 627168404Spjd dnode_t *dn; 628253821Sdelphij int err; 629168404Spjd zio_t *zio; 630168404Spjd 631168404Spjd ASSERT(tx->tx_txg == 0); 632168404Spjd 633168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 634168404Spjd object, THT_FREE, off, len); 635168404Spjd if (txh == NULL) 636168404Spjd return; 637168404Spjd dn = txh->txh_dnode; 638258632Savg dmu_tx_count_dnode(txh); 639168404Spjd 640168404Spjd if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 641168404Spjd return; 642168404Spjd if (len == DMU_OBJECT_END) 643168404Spjd len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 644168404Spjd 645253821Sdelphij 646168404Spjd /* 647253821Sdelphij * For i/o error checking, we read the first and last level-0 648253821Sdelphij * blocks if they are not aligned, and all the level-1 blocks. 649253821Sdelphij * 650253821Sdelphij * Note: dbuf_free_range() assumes that we have not instantiated 651253821Sdelphij * any level-0 dbufs that will be completely freed. Therefore we must 652253821Sdelphij * exercise care to not read or count the first and last blocks 653253821Sdelphij * if they are blocksize-aligned. 654168404Spjd */ 655253821Sdelphij if (dn->dn_datablkshift == 0) { 656254753Sdelphij if (off != 0 || len < dn->dn_datablksz) 657256259Savg dmu_tx_count_write(txh, 0, dn->dn_datablksz); 658253821Sdelphij } else { 659253821Sdelphij /* first block will be modified if it is not aligned */ 660253821Sdelphij if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 661253821Sdelphij dmu_tx_count_write(txh, off, 1); 662253821Sdelphij /* last block will be modified if it is not aligned */ 663253821Sdelphij if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 664253821Sdelphij dmu_tx_count_write(txh, off+len, 1); 665253821Sdelphij } 666253821Sdelphij 667253821Sdelphij /* 668253821Sdelphij * Check level-1 blocks. 669253821Sdelphij */ 670168404Spjd if (dn->dn_nlevels > 1) { 671253821Sdelphij int shift = dn->dn_datablkshift + dn->dn_indblkshift - 672168404Spjd SPA_BLKPTRSHIFT; 673253821Sdelphij uint64_t start = off >> shift; 674253821Sdelphij uint64_t end = (off + len) >> shift; 675168404Spjd 676253821Sdelphij ASSERT(dn->dn_indblkshift != 0); 677253821Sdelphij 678259576Spjd /* 679259576Spjd * dnode_reallocate() can result in an object with indirect 680259576Spjd * blocks having an odd data block size. In this case, 681259576Spjd * just check the single block. 682259576Spjd */ 683259576Spjd if (dn->dn_datablkshift == 0) 684259576Spjd start = end = 0; 685259576Spjd 686168404Spjd zio = zio_root(tx->tx_pool->dp_spa, 687168404Spjd NULL, NULL, ZIO_FLAG_CANFAIL); 688253821Sdelphij for (uint64_t i = start; i <= end; i++) { 689168404Spjd uint64_t ibyte = i << shift; 690185029Spjd err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 691168404Spjd i = ibyte >> shift; 692284593Savg if (err == ESRCH || i > end) 693168404Spjd break; 694168404Spjd if (err) { 695168404Spjd tx->tx_err = err; 696168404Spjd return; 697168404Spjd } 698168404Spjd 699168404Spjd err = dmu_tx_check_ioerr(zio, dn, 1, i); 700168404Spjd if (err) { 701168404Spjd tx->tx_err = err; 702168404Spjd return; 703168404Spjd } 704168404Spjd } 705168404Spjd err = zio_wait(zio); 706168404Spjd if (err) { 707168404Spjd tx->tx_err = err; 708168404Spjd return; 709168404Spjd } 710168404Spjd } 711168404Spjd 712168404Spjd dmu_tx_count_free(txh, off, len); 713168404Spjd} 714168404Spjd 715168404Spjdvoid 716209962Smmdmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 717168404Spjd{ 718168404Spjd dmu_tx_hold_t *txh; 719168404Spjd dnode_t *dn; 720275782Sdelphij dsl_dataset_phys_t *ds_phys; 721168404Spjd uint64_t nblocks; 722168404Spjd int epbs, err; 723168404Spjd 724168404Spjd ASSERT(tx->tx_txg == 0); 725168404Spjd 726168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 727168404Spjd object, THT_ZAP, add, (uintptr_t)name); 728168404Spjd if (txh == NULL) 729168404Spjd return; 730168404Spjd dn = txh->txh_dnode; 731168404Spjd 732168404Spjd dmu_tx_count_dnode(txh); 733168404Spjd 734168404Spjd if (dn == NULL) { 735168404Spjd /* 736168404Spjd * We will be able to fit a new object's entries into one leaf 737168404Spjd * block. So there will be at most 2 blocks total, 738168404Spjd * including the header block. 739168404Spjd */ 740168404Spjd dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 741168404Spjd return; 742168404Spjd } 743168404Spjd 744236884Smm ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 745168404Spjd 746168404Spjd if (dn->dn_maxblkid == 0 && !add) { 747226512Smm blkptr_t *bp; 748226512Smm 749168404Spjd /* 750168404Spjd * If there is only one block (i.e. this is a micro-zap) 751168404Spjd * and we are not adding anything, the accounting is simple. 752168404Spjd */ 753168404Spjd err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 754168404Spjd if (err) { 755168404Spjd tx->tx_err = err; 756168404Spjd return; 757168404Spjd } 758168404Spjd 759168404Spjd /* 760168404Spjd * Use max block size here, since we don't know how much 761168404Spjd * the size will change between now and the dbuf dirty call. 762168404Spjd */ 763226512Smm bp = &dn->dn_phys->dn_blkptr[0]; 764168404Spjd if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 765226512Smm bp, bp->blk_birth)) 766274337Sdelphij txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ; 767226512Smm else 768274337Sdelphij txh->txh_space_towrite += MZAP_MAX_BLKSZ; 769226512Smm if (!BP_IS_HOLE(bp)) 770274337Sdelphij txh->txh_space_tounref += MZAP_MAX_BLKSZ; 771168404Spjd return; 772168404Spjd } 773168404Spjd 774168404Spjd if (dn->dn_maxblkid > 0 && name) { 775168404Spjd /* 776168404Spjd * access the name in this fat-zap so that we'll check 777168404Spjd * for i/o errors to the leaf blocks, etc. 778168404Spjd */ 779219089Spjd err = zap_lookup(dn->dn_objset, dn->dn_object, name, 780168404Spjd 8, 0, NULL); 781168404Spjd if (err == EIO) { 782168404Spjd tx->tx_err = err; 783168404Spjd return; 784168404Spjd } 785168404Spjd } 786168404Spjd 787219089Spjd err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 788209962Smm &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 789168404Spjd 790168404Spjd /* 791168404Spjd * If the modified blocks are scattered to the four winds, 792168404Spjd * we'll have to modify an indirect twig for each. 793168404Spjd */ 794168404Spjd epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 795275782Sdelphij ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset); 796168404Spjd for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 797275782Sdelphij if (ds_phys->ds_prev_snap_obj) 798209962Smm txh->txh_space_towrite += 3 << dn->dn_indblkshift; 799209962Smm else 800209962Smm txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 801168404Spjd} 802168404Spjd 803168404Spjdvoid 804168404Spjddmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 805168404Spjd{ 806168404Spjd dmu_tx_hold_t *txh; 807168404Spjd 808168404Spjd ASSERT(tx->tx_txg == 0); 809168404Spjd 810168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 811168404Spjd object, THT_BONUS, 0, 0); 812168404Spjd if (txh) 813168404Spjd dmu_tx_count_dnode(txh); 814168404Spjd} 815168404Spjd 816168404Spjdvoid 817168404Spjddmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 818168404Spjd{ 819168404Spjd dmu_tx_hold_t *txh; 820168404Spjd ASSERT(tx->tx_txg == 0); 821168404Spjd 822168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 823168404Spjd DMU_NEW_OBJECT, THT_SPACE, space, 0); 824168404Spjd 825168404Spjd txh->txh_space_towrite += space; 826168404Spjd} 827168404Spjd 828168404Spjdint 829168404Spjddmu_tx_holds(dmu_tx_t *tx, uint64_t object) 830168404Spjd{ 831168404Spjd dmu_tx_hold_t *txh; 832168404Spjd int holds = 0; 833168404Spjd 834168404Spjd /* 835168404Spjd * By asserting that the tx is assigned, we're counting the 836168404Spjd * number of dn_tx_holds, which is the same as the number of 837168404Spjd * dn_holds. Otherwise, we'd be counting dn_holds, but 838168404Spjd * dn_tx_holds could be 0. 839168404Spjd */ 840168404Spjd ASSERT(tx->tx_txg != 0); 841168404Spjd 842168404Spjd /* if (tx->tx_anyobj == TRUE) */ 843168404Spjd /* return (0); */ 844168404Spjd 845168404Spjd for (txh = list_head(&tx->tx_holds); txh; 846168404Spjd txh = list_next(&tx->tx_holds, txh)) { 847168404Spjd if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 848168404Spjd holds++; 849168404Spjd } 850168404Spjd 851168404Spjd return (holds); 852168404Spjd} 853168404Spjd 854168404Spjd#ifdef ZFS_DEBUG 855168404Spjdvoid 856168404Spjddmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 857168404Spjd{ 858168404Spjd dmu_tx_hold_t *txh; 859168404Spjd int match_object = FALSE, match_offset = FALSE; 860219089Spjd dnode_t *dn; 861168404Spjd 862219089Spjd DB_DNODE_ENTER(db); 863219089Spjd dn = DB_DNODE(db); 864168404Spjd ASSERT(tx->tx_txg != 0); 865219089Spjd ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 866168404Spjd ASSERT3U(dn->dn_object, ==, db->db.db_object); 867168404Spjd 868219089Spjd if (tx->tx_anyobj) { 869219089Spjd DB_DNODE_EXIT(db); 870168404Spjd return; 871219089Spjd } 872168404Spjd 873168404Spjd /* XXX No checking on the meta dnode for now */ 874219089Spjd if (db->db.db_object == DMU_META_DNODE_OBJECT) { 875219089Spjd DB_DNODE_EXIT(db); 876168404Spjd return; 877219089Spjd } 878168404Spjd 879168404Spjd for (txh = list_head(&tx->tx_holds); txh; 880168404Spjd txh = list_next(&tx->tx_holds, txh)) { 881168404Spjd ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 882168404Spjd if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 883168404Spjd match_object = TRUE; 884168404Spjd if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 885168404Spjd int datablkshift = dn->dn_datablkshift ? 886168404Spjd dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 887168404Spjd int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 888168404Spjd int shift = datablkshift + epbs * db->db_level; 889168404Spjd uint64_t beginblk = shift >= 64 ? 0 : 890168404Spjd (txh->txh_arg1 >> shift); 891168404Spjd uint64_t endblk = shift >= 64 ? 0 : 892168404Spjd ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 893168404Spjd uint64_t blkid = db->db_blkid; 894168404Spjd 895168404Spjd /* XXX txh_arg2 better not be zero... */ 896168404Spjd 897168404Spjd dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 898168404Spjd txh->txh_type, beginblk, endblk); 899168404Spjd 900168404Spjd switch (txh->txh_type) { 901168404Spjd case THT_WRITE: 902168404Spjd if (blkid >= beginblk && blkid <= endblk) 903168404Spjd match_offset = TRUE; 904168404Spjd /* 905168404Spjd * We will let this hold work for the bonus 906219089Spjd * or spill buffer so that we don't need to 907219089Spjd * hold it when creating a new object. 908168404Spjd */ 909219089Spjd if (blkid == DMU_BONUS_BLKID || 910219089Spjd blkid == DMU_SPILL_BLKID) 911168404Spjd match_offset = TRUE; 912168404Spjd /* 913168404Spjd * They might have to increase nlevels, 914168404Spjd * thus dirtying the new TLIBs. Or the 915168404Spjd * might have to change the block size, 916168404Spjd * thus dirying the new lvl=0 blk=0. 917168404Spjd */ 918168404Spjd if (blkid == 0) 919168404Spjd match_offset = TRUE; 920168404Spjd break; 921168404Spjd case THT_FREE: 922185029Spjd /* 923185029Spjd * We will dirty all the level 1 blocks in 924185029Spjd * the free range and perhaps the first and 925185029Spjd * last level 0 block. 926185029Spjd */ 927185029Spjd if (blkid >= beginblk && (blkid <= endblk || 928185029Spjd txh->txh_arg2 == DMU_OBJECT_END)) 929168404Spjd match_offset = TRUE; 930168404Spjd break; 931219089Spjd case THT_SPILL: 932219089Spjd if (blkid == DMU_SPILL_BLKID) 933219089Spjd match_offset = TRUE; 934219089Spjd break; 935168404Spjd case THT_BONUS: 936219089Spjd if (blkid == DMU_BONUS_BLKID) 937168404Spjd match_offset = TRUE; 938168404Spjd break; 939168404Spjd case THT_ZAP: 940168404Spjd match_offset = TRUE; 941168404Spjd break; 942168404Spjd case THT_NEWOBJECT: 943168404Spjd match_object = TRUE; 944168404Spjd break; 945168404Spjd default: 946168404Spjd ASSERT(!"bad txh_type"); 947168404Spjd } 948168404Spjd } 949219089Spjd if (match_object && match_offset) { 950219089Spjd DB_DNODE_EXIT(db); 951168404Spjd return; 952219089Spjd } 953168404Spjd } 954219089Spjd DB_DNODE_EXIT(db); 955168404Spjd panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 956168404Spjd (u_longlong_t)db->db.db_object, db->db_level, 957168404Spjd (u_longlong_t)db->db_blkid); 958168404Spjd} 959168404Spjd#endif 960168404Spjd 961258632Savg/* 962258632Savg * If we can't do 10 iops, something is wrong. Let us go ahead 963258632Savg * and hit zfs_dirty_data_max. 964258632Savg */ 965258632Savghrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 966258632Savgint zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 967258632Savg 968258632Savg/* 969258632Savg * We delay transactions when we've determined that the backend storage 970258632Savg * isn't able to accommodate the rate of incoming writes. 971258632Savg * 972258632Savg * If there is already a transaction waiting, we delay relative to when 973258632Savg * that transaction finishes waiting. This way the calculated min_time 974258632Savg * is independent of the number of threads concurrently executing 975258632Savg * transactions. 976258632Savg * 977258632Savg * If we are the only waiter, wait relative to when the transaction 978258632Savg * started, rather than the current time. This credits the transaction for 979258632Savg * "time already served", e.g. reading indirect blocks. 980258632Savg * 981258632Savg * The minimum time for a transaction to take is calculated as: 982258632Savg * min_time = scale * (dirty - min) / (max - dirty) 983258632Savg * min_time is then capped at zfs_delay_max_ns. 984258632Savg * 985258632Savg * The delay has two degrees of freedom that can be adjusted via tunables. 986258632Savg * The percentage of dirty data at which we start to delay is defined by 987258632Savg * zfs_delay_min_dirty_percent. This should typically be at or above 988258632Savg * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 989258632Savg * delay after writing at full speed has failed to keep up with the incoming 990258632Savg * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 991258632Savg * speaking, this variable determines the amount of delay at the midpoint of 992258632Savg * the curve. 993258632Savg * 994258632Savg * delay 995258632Savg * 10ms +-------------------------------------------------------------*+ 996258632Savg * | *| 997258632Savg * 9ms + *+ 998258632Savg * | *| 999258632Savg * 8ms + *+ 1000258632Savg * | * | 1001258632Savg * 7ms + * + 1002258632Savg * | * | 1003258632Savg * 6ms + * + 1004258632Savg * | * | 1005258632Savg * 5ms + * + 1006258632Savg * | * | 1007258632Savg * 4ms + * + 1008258632Savg * | * | 1009258632Savg * 3ms + * + 1010258632Savg * | * | 1011258632Savg * 2ms + (midpoint) * + 1012258632Savg * | | ** | 1013258632Savg * 1ms + v *** + 1014258632Savg * | zfs_delay_scale ----------> ******** | 1015258632Savg * 0 +-------------------------------------*********----------------+ 1016258632Savg * 0% <- zfs_dirty_data_max -> 100% 1017258632Savg * 1018258632Savg * Note that since the delay is added to the outstanding time remaining on the 1019258632Savg * most recent transaction, the delay is effectively the inverse of IOPS. 1020258632Savg * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 1021258632Savg * was chosen such that small changes in the amount of accumulated dirty data 1022258632Savg * in the first 3/4 of the curve yield relatively small differences in the 1023258632Savg * amount of delay. 1024258632Savg * 1025258632Savg * The effects can be easier to understand when the amount of delay is 1026258632Savg * represented on a log scale: 1027258632Savg * 1028258632Savg * delay 1029258632Savg * 100ms +-------------------------------------------------------------++ 1030258632Savg * + + 1031258632Savg * | | 1032258632Savg * + *+ 1033258632Savg * 10ms + *+ 1034258632Savg * + ** + 1035258632Savg * | (midpoint) ** | 1036258632Savg * + | ** + 1037258632Savg * 1ms + v **** + 1038258632Savg * + zfs_delay_scale ----------> ***** + 1039258632Savg * | **** | 1040258632Savg * + **** + 1041258632Savg * 100us + ** + 1042258632Savg * + * + 1043258632Savg * | * | 1044258632Savg * + * + 1045258632Savg * 10us + * + 1046258632Savg * + + 1047258632Savg * | | 1048258632Savg * + + 1049258632Savg * +--------------------------------------------------------------+ 1050258632Savg * 0% <- zfs_dirty_data_max -> 100% 1051258632Savg * 1052258632Savg * Note here that only as the amount of dirty data approaches its limit does 1053258632Savg * the delay start to increase rapidly. The goal of a properly tuned system 1054258632Savg * should be to keep the amount of dirty data out of that range by first 1055258632Savg * ensuring that the appropriate limits are set for the I/O scheduler to reach 1056258632Savg * optimal throughput on the backend storage, and then by changing the value 1057258632Savg * of zfs_delay_scale to increase the steepness of the curve. 1058258632Savg */ 1059258632Savgstatic void 1060258632Savgdmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 1061258632Savg{ 1062258632Savg dsl_pool_t *dp = tx->tx_pool; 1063258632Savg uint64_t delay_min_bytes = 1064258632Savg zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 1065258632Savg hrtime_t wakeup, min_tx_time, now; 1066258632Savg 1067258632Savg if (dirty <= delay_min_bytes) 1068258632Savg return; 1069258632Savg 1070258632Savg /* 1071258632Savg * The caller has already waited until we are under the max. 1072258632Savg * We make them pass us the amount of dirty data so we don't 1073258632Savg * have to handle the case of it being >= the max, which could 1074258632Savg * cause a divide-by-zero if it's == the max. 1075258632Savg */ 1076258632Savg ASSERT3U(dirty, <, zfs_dirty_data_max); 1077258632Savg 1078258632Savg now = gethrtime(); 1079258632Savg min_tx_time = zfs_delay_scale * 1080258632Savg (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 1081258632Savg if (now > tx->tx_start + min_tx_time) 1082258632Savg return; 1083258632Savg 1084258632Savg min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 1085258632Savg 1086258632Savg DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 1087258632Savg uint64_t, min_tx_time); 1088258632Savg 1089258632Savg mutex_enter(&dp->dp_lock); 1090258632Savg wakeup = MAX(tx->tx_start + min_tx_time, 1091258632Savg dp->dp_last_wakeup + min_tx_time); 1092258632Savg dp->dp_last_wakeup = wakeup; 1093258632Savg mutex_exit(&dp->dp_lock); 1094258632Savg 1095258632Savg#ifdef _KERNEL 1096258632Savg#ifdef illumos 1097258632Savg mutex_enter(&curthread->t_delay_lock); 1098258632Savg while (cv_timedwait_hires(&curthread->t_delay_cv, 1099258632Savg &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 1100258632Savg CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 1101258632Savg continue; 1102258632Savg mutex_exit(&curthread->t_delay_lock); 1103258632Savg#else 1104258632Savg pause_sbt("dmu_tx_delay", wakeup * SBT_1NS, 1105258632Savg zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE); 1106258632Savg#endif 1107258632Savg#else 1108258632Savg hrtime_t delta = wakeup - gethrtime(); 1109258632Savg struct timespec ts; 1110258632Savg ts.tv_sec = delta / NANOSEC; 1111258632Savg ts.tv_nsec = delta % NANOSEC; 1112258632Savg (void) nanosleep(&ts, NULL); 1113258632Savg#endif 1114258632Savg} 1115258632Savg 1116168404Spjdstatic int 1117248571Smmdmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1118168404Spjd{ 1119168404Spjd dmu_tx_hold_t *txh; 1120185029Spjd spa_t *spa = tx->tx_pool->dp_spa; 1121185029Spjd uint64_t memory, asize, fsize, usize; 1122185029Spjd uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1123168404Spjd 1124240415Smm ASSERT0(tx->tx_txg); 1125185029Spjd 1126168404Spjd if (tx->tx_err) 1127168404Spjd return (tx->tx_err); 1128168404Spjd 1129185029Spjd if (spa_suspended(spa)) { 1130185029Spjd /* 1131185029Spjd * If the user has indicated a blocking failure mode 1132185029Spjd * then return ERESTART which will block in dmu_tx_wait(). 1133185029Spjd * Otherwise, return EIO so that an error can get 1134185029Spjd * propagated back to the VOP calls. 1135185029Spjd * 1136185029Spjd * Note that we always honor the txg_how flag regardless 1137185029Spjd * of the failuremode setting. 1138185029Spjd */ 1139185029Spjd if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1140185029Spjd txg_how != TXG_WAIT) 1141249195Smm return (SET_ERROR(EIO)); 1142185029Spjd 1143249195Smm return (SET_ERROR(ERESTART)); 1144185029Spjd } 1145185029Spjd 1146258632Savg if (!tx->tx_waited && 1147258632Savg dsl_pool_need_dirty_delay(tx->tx_pool)) { 1148258632Savg tx->tx_wait_dirty = B_TRUE; 1149258632Savg return (SET_ERROR(ERESTART)); 1150258632Savg } 1151258632Savg 1152168404Spjd tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1153168404Spjd tx->tx_needassign_txh = NULL; 1154168404Spjd 1155168404Spjd /* 1156168404Spjd * NB: No error returns are allowed after txg_hold_open, but 1157168404Spjd * before processing the dnode holds, due to the 1158168404Spjd * dmu_tx_unassign() logic. 1159168404Spjd */ 1160168404Spjd 1161185029Spjd towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 1162168404Spjd for (txh = list_head(&tx->tx_holds); txh; 1163168404Spjd txh = list_next(&tx->tx_holds, txh)) { 1164168404Spjd dnode_t *dn = txh->txh_dnode; 1165168404Spjd if (dn != NULL) { 1166168404Spjd mutex_enter(&dn->dn_mtx); 1167168404Spjd if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1168168404Spjd mutex_exit(&dn->dn_mtx); 1169168404Spjd tx->tx_needassign_txh = txh; 1170249195Smm return (SET_ERROR(ERESTART)); 1171168404Spjd } 1172168404Spjd if (dn->dn_assigned_txg == 0) 1173168404Spjd dn->dn_assigned_txg = tx->tx_txg; 1174168404Spjd ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1175168404Spjd (void) refcount_add(&dn->dn_tx_holds, tx); 1176168404Spjd mutex_exit(&dn->dn_mtx); 1177168404Spjd } 1178168404Spjd towrite += txh->txh_space_towrite; 1179168404Spjd tofree += txh->txh_space_tofree; 1180168404Spjd tooverwrite += txh->txh_space_tooverwrite; 1181185029Spjd tounref += txh->txh_space_tounref; 1182185029Spjd tohold += txh->txh_memory_tohold; 1183185029Spjd fudge += txh->txh_fudge; 1184168404Spjd } 1185168404Spjd 1186168404Spjd /* 1187168404Spjd * If a snapshot has been taken since we made our estimates, 1188168404Spjd * assume that we won't be able to free or overwrite anything. 1189168404Spjd */ 1190168404Spjd if (tx->tx_objset && 1191219089Spjd dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1192168404Spjd tx->tx_lastsnap_txg) { 1193168404Spjd towrite += tooverwrite; 1194168404Spjd tooverwrite = tofree = 0; 1195168404Spjd } 1196168404Spjd 1197185029Spjd /* needed allocation: worst-case estimate of write space */ 1198185029Spjd asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1199185029Spjd /* freed space estimate: worst-case overwrite + free estimate */ 1200168404Spjd fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1201185029Spjd /* convert unrefd space to worst-case estimate */ 1202185029Spjd usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1203185029Spjd /* calculate memory footprint estimate */ 1204185029Spjd memory = towrite + tooverwrite + tohold; 1205168404Spjd 1206168404Spjd#ifdef ZFS_DEBUG 1207185029Spjd /* 1208185029Spjd * Add in 'tohold' to account for our dirty holds on this memory 1209185029Spjd * XXX - the "fudge" factor is to account for skipped blocks that 1210185029Spjd * we missed because dnode_next_offset() misses in-core-only blocks. 1211185029Spjd */ 1212185029Spjd tx->tx_space_towrite = asize + 1213185029Spjd spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 1214168404Spjd tx->tx_space_tofree = tofree; 1215168404Spjd tx->tx_space_tooverwrite = tooverwrite; 1216185029Spjd tx->tx_space_tounref = tounref; 1217168404Spjd#endif 1218168404Spjd 1219168404Spjd if (tx->tx_dir && asize != 0) { 1220185029Spjd int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1221185029Spjd asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 1222168404Spjd if (err) 1223168404Spjd return (err); 1224168404Spjd } 1225168404Spjd 1226168404Spjd return (0); 1227168404Spjd} 1228168404Spjd 1229168404Spjdstatic void 1230168404Spjddmu_tx_unassign(dmu_tx_t *tx) 1231168404Spjd{ 1232168404Spjd dmu_tx_hold_t *txh; 1233168404Spjd 1234168404Spjd if (tx->tx_txg == 0) 1235168404Spjd return; 1236168404Spjd 1237168404Spjd txg_rele_to_quiesce(&tx->tx_txgh); 1238168404Spjd 1239251629Sdelphij /* 1240251629Sdelphij * Walk the transaction's hold list, removing the hold on the 1241251629Sdelphij * associated dnode, and notifying waiters if the refcount drops to 0. 1242251629Sdelphij */ 1243168404Spjd for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 1244168404Spjd txh = list_next(&tx->tx_holds, txh)) { 1245168404Spjd dnode_t *dn = txh->txh_dnode; 1246168404Spjd 1247168404Spjd if (dn == NULL) 1248168404Spjd continue; 1249168404Spjd mutex_enter(&dn->dn_mtx); 1250168404Spjd ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1251168404Spjd 1252168404Spjd if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1253168404Spjd dn->dn_assigned_txg = 0; 1254168404Spjd cv_broadcast(&dn->dn_notxholds); 1255168404Spjd } 1256168404Spjd mutex_exit(&dn->dn_mtx); 1257168404Spjd } 1258168404Spjd 1259168404Spjd txg_rele_to_sync(&tx->tx_txgh); 1260168404Spjd 1261168404Spjd tx->tx_lasttried_txg = tx->tx_txg; 1262168404Spjd tx->tx_txg = 0; 1263168404Spjd} 1264168404Spjd 1265168404Spjd/* 1266168404Spjd * Assign tx to a transaction group. txg_how can be one of: 1267168404Spjd * 1268168404Spjd * (1) TXG_WAIT. If the current open txg is full, waits until there's 1269168404Spjd * a new one. This should be used when you're not holding locks. 1270248571Smm * It will only fail if we're truly out of space (or over quota). 1271168404Spjd * 1272168404Spjd * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1273168404Spjd * blocking, returns immediately with ERESTART. This should be used 1274168404Spjd * whenever you're holding locks. On an ERESTART error, the caller 1275168404Spjd * should drop locks, do a dmu_tx_wait(tx), and try again. 1276258632Savg * 1277258632Savg * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 1278258632Savg * has already been called on behalf of this operation (though 1279258632Savg * most likely on a different tx). 1280168404Spjd */ 1281168404Spjdint 1282248571Smmdmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1283168404Spjd{ 1284168404Spjd int err; 1285168404Spjd 1286168404Spjd ASSERT(tx->tx_txg == 0); 1287258632Savg ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 1288258632Savg txg_how == TXG_WAITED); 1289168404Spjd ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1290168404Spjd 1291248571Smm /* If we might wait, we must not hold the config lock. */ 1292248571Smm ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 1293248571Smm 1294258632Savg if (txg_how == TXG_WAITED) 1295258632Savg tx->tx_waited = B_TRUE; 1296258632Savg 1297168404Spjd while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1298168404Spjd dmu_tx_unassign(tx); 1299168404Spjd 1300168404Spjd if (err != ERESTART || txg_how != TXG_WAIT) 1301168404Spjd return (err); 1302168404Spjd 1303168404Spjd dmu_tx_wait(tx); 1304168404Spjd } 1305168404Spjd 1306168404Spjd txg_rele_to_quiesce(&tx->tx_txgh); 1307168404Spjd 1308168404Spjd return (0); 1309168404Spjd} 1310168404Spjd 1311168404Spjdvoid 1312168404Spjddmu_tx_wait(dmu_tx_t *tx) 1313168404Spjd{ 1314185029Spjd spa_t *spa = tx->tx_pool->dp_spa; 1315258632Savg dsl_pool_t *dp = tx->tx_pool; 1316185029Spjd 1317168404Spjd ASSERT(tx->tx_txg == 0); 1318248571Smm ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1319168404Spjd 1320258632Savg if (tx->tx_wait_dirty) { 1321258632Savg /* 1322258632Savg * dmu_tx_try_assign() has determined that we need to wait 1323258632Savg * because we've consumed much or all of the dirty buffer 1324258632Savg * space. 1325258632Savg */ 1326258632Savg mutex_enter(&dp->dp_lock); 1327258632Savg while (dp->dp_dirty_total >= zfs_dirty_data_max) 1328258632Savg cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1329258632Savg uint64_t dirty = dp->dp_dirty_total; 1330258632Savg mutex_exit(&dp->dp_lock); 1331258632Savg 1332258632Savg dmu_tx_delay(tx, dirty); 1333258632Savg 1334258632Savg tx->tx_wait_dirty = B_FALSE; 1335258632Savg 1336258632Savg /* 1337258632Savg * Note: setting tx_waited only has effect if the caller 1338258632Savg * used TX_WAIT. Otherwise they are going to destroy 1339258632Savg * this tx and try again. The common case, zfs_write(), 1340258632Savg * uses TX_WAIT. 1341258632Savg */ 1342258632Savg tx->tx_waited = B_TRUE; 1343258632Savg } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1344258632Savg /* 1345258632Savg * If the pool is suspended we need to wait until it 1346258632Savg * is resumed. Note that it's possible that the pool 1347258632Savg * has become active after this thread has tried to 1348258632Savg * obtain a tx. If that's the case then tx_lasttried_txg 1349258632Savg * would not have been set. 1350258632Savg */ 1351258632Savg txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1352185029Spjd } else if (tx->tx_needassign_txh) { 1353258632Savg /* 1354258632Savg * A dnode is assigned to the quiescing txg. Wait for its 1355258632Savg * transaction to complete. 1356258632Savg */ 1357168404Spjd dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1358168404Spjd 1359168404Spjd mutex_enter(&dn->dn_mtx); 1360168404Spjd while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1361168404Spjd cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1362168404Spjd mutex_exit(&dn->dn_mtx); 1363168404Spjd tx->tx_needassign_txh = NULL; 1364168404Spjd } else { 1365168404Spjd txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1366168404Spjd } 1367168404Spjd} 1368168404Spjd 1369168404Spjdvoid 1370168404Spjddmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1371168404Spjd{ 1372168404Spjd#ifdef ZFS_DEBUG 1373168404Spjd if (tx->tx_dir == NULL || delta == 0) 1374168404Spjd return; 1375168404Spjd 1376168404Spjd if (delta > 0) { 1377168404Spjd ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1378168404Spjd tx->tx_space_towrite); 1379168404Spjd (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1380168404Spjd } else { 1381168404Spjd (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1382168404Spjd } 1383168404Spjd#endif 1384168404Spjd} 1385168404Spjd 1386168404Spjdvoid 1387168404Spjddmu_tx_commit(dmu_tx_t *tx) 1388168404Spjd{ 1389168404Spjd dmu_tx_hold_t *txh; 1390168404Spjd 1391168404Spjd ASSERT(tx->tx_txg != 0); 1392168404Spjd 1393251629Sdelphij /* 1394251629Sdelphij * Go through the transaction's hold list and remove holds on 1395251629Sdelphij * associated dnodes, notifying waiters if no holds remain. 1396251629Sdelphij */ 1397168404Spjd while (txh = list_head(&tx->tx_holds)) { 1398168404Spjd dnode_t *dn = txh->txh_dnode; 1399168404Spjd 1400168404Spjd list_remove(&tx->tx_holds, txh); 1401168404Spjd kmem_free(txh, sizeof (dmu_tx_hold_t)); 1402168404Spjd if (dn == NULL) 1403168404Spjd continue; 1404168404Spjd mutex_enter(&dn->dn_mtx); 1405168404Spjd ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1406168404Spjd 1407168404Spjd if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1408168404Spjd dn->dn_assigned_txg = 0; 1409168404Spjd cv_broadcast(&dn->dn_notxholds); 1410168404Spjd } 1411168404Spjd mutex_exit(&dn->dn_mtx); 1412168404Spjd dnode_rele(dn, tx); 1413168404Spjd } 1414168404Spjd 1415168404Spjd if (tx->tx_tempreserve_cookie) 1416168404Spjd dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1417168404Spjd 1418219089Spjd if (!list_is_empty(&tx->tx_callbacks)) 1419219089Spjd txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1420219089Spjd 1421168404Spjd if (tx->tx_anyobj == FALSE) 1422168404Spjd txg_rele_to_sync(&tx->tx_txgh); 1423219089Spjd 1424219089Spjd list_destroy(&tx->tx_callbacks); 1425185029Spjd list_destroy(&tx->tx_holds); 1426168404Spjd#ifdef ZFS_DEBUG 1427168404Spjd dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1428168404Spjd tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1429168404Spjd tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1430168404Spjd refcount_destroy_many(&tx->tx_space_written, 1431168404Spjd refcount_count(&tx->tx_space_written)); 1432168404Spjd refcount_destroy_many(&tx->tx_space_freed, 1433168404Spjd refcount_count(&tx->tx_space_freed)); 1434168404Spjd#endif 1435168404Spjd kmem_free(tx, sizeof (dmu_tx_t)); 1436168404Spjd} 1437168404Spjd 1438168404Spjdvoid 1439168404Spjddmu_tx_abort(dmu_tx_t *tx) 1440168404Spjd{ 1441168404Spjd dmu_tx_hold_t *txh; 1442168404Spjd 1443168404Spjd ASSERT(tx->tx_txg == 0); 1444168404Spjd 1445168404Spjd while (txh = list_head(&tx->tx_holds)) { 1446168404Spjd dnode_t *dn = txh->txh_dnode; 1447168404Spjd 1448168404Spjd list_remove(&tx->tx_holds, txh); 1449168404Spjd kmem_free(txh, sizeof (dmu_tx_hold_t)); 1450168404Spjd if (dn != NULL) 1451168404Spjd dnode_rele(dn, tx); 1452168404Spjd } 1453219089Spjd 1454219089Spjd /* 1455219089Spjd * Call any registered callbacks with an error code. 1456219089Spjd */ 1457219089Spjd if (!list_is_empty(&tx->tx_callbacks)) 1458219089Spjd dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1459219089Spjd 1460219089Spjd list_destroy(&tx->tx_callbacks); 1461185029Spjd list_destroy(&tx->tx_holds); 1462168404Spjd#ifdef ZFS_DEBUG 1463168404Spjd refcount_destroy_many(&tx->tx_space_written, 1464168404Spjd refcount_count(&tx->tx_space_written)); 1465168404Spjd refcount_destroy_many(&tx->tx_space_freed, 1466168404Spjd refcount_count(&tx->tx_space_freed)); 1467168404Spjd#endif 1468168404Spjd kmem_free(tx, sizeof (dmu_tx_t)); 1469168404Spjd} 1470168404Spjd 1471168404Spjduint64_t 1472168404Spjddmu_tx_get_txg(dmu_tx_t *tx) 1473168404Spjd{ 1474168404Spjd ASSERT(tx->tx_txg != 0); 1475168404Spjd return (tx->tx_txg); 1476168404Spjd} 1477219089Spjd 1478248571Smmdsl_pool_t * 1479248571Smmdmu_tx_pool(dmu_tx_t *tx) 1480248571Smm{ 1481248571Smm ASSERT(tx->tx_pool != NULL); 1482248571Smm return (tx->tx_pool); 1483248571Smm} 1484248571Smm 1485248571Smm 1486219089Spjdvoid 1487219089Spjddmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1488219089Spjd{ 1489219089Spjd dmu_tx_callback_t *dcb; 1490219089Spjd 1491219089Spjd dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1492219089Spjd 1493219089Spjd dcb->dcb_func = func; 1494219089Spjd dcb->dcb_data = data; 1495219089Spjd 1496219089Spjd list_insert_tail(&tx->tx_callbacks, dcb); 1497219089Spjd} 1498219089Spjd 1499219089Spjd/* 1500219089Spjd * Call all the commit callbacks on a list, with a given error code. 1501219089Spjd */ 1502219089Spjdvoid 1503219089Spjddmu_tx_do_callbacks(list_t *cb_list, int error) 1504219089Spjd{ 1505219089Spjd dmu_tx_callback_t *dcb; 1506219089Spjd 1507219089Spjd while (dcb = list_head(cb_list)) { 1508219089Spjd list_remove(cb_list, dcb); 1509219089Spjd dcb->dcb_func(dcb->dcb_data, error); 1510219089Spjd kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1511219089Spjd } 1512219089Spjd} 1513219089Spjd 1514219089Spjd/* 1515219089Spjd * Interface to hold a bunch of attributes. 1516219089Spjd * used for creating new files. 1517219089Spjd * attrsize is the total size of all attributes 1518219089Spjd * to be added during object creation 1519219089Spjd * 1520219089Spjd * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1521219089Spjd */ 1522219089Spjd 1523219089Spjd/* 1524219089Spjd * hold necessary attribute name for attribute registration. 1525219089Spjd * should be a very rare case where this is needed. If it does 1526219089Spjd * happen it would only happen on the first write to the file system. 1527219089Spjd */ 1528219089Spjdstatic void 1529219089Spjddmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1530219089Spjd{ 1531219089Spjd int i; 1532219089Spjd 1533219089Spjd if (!sa->sa_need_attr_registration) 1534219089Spjd return; 1535219089Spjd 1536219089Spjd for (i = 0; i != sa->sa_num_attrs; i++) { 1537219089Spjd if (!sa->sa_attr_table[i].sa_registered) { 1538219089Spjd if (sa->sa_reg_attr_obj) 1539219089Spjd dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1540219089Spjd B_TRUE, sa->sa_attr_table[i].sa_name); 1541219089Spjd else 1542219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1543219089Spjd B_TRUE, sa->sa_attr_table[i].sa_name); 1544219089Spjd } 1545219089Spjd } 1546219089Spjd} 1547219089Spjd 1548219089Spjd 1549219089Spjdvoid 1550219089Spjddmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1551219089Spjd{ 1552219089Spjd dnode_t *dn; 1553219089Spjd dmu_tx_hold_t *txh; 1554219089Spjd 1555219089Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1556219089Spjd THT_SPILL, 0, 0); 1557219089Spjd 1558219089Spjd dn = txh->txh_dnode; 1559219089Spjd 1560219089Spjd if (dn == NULL) 1561219089Spjd return; 1562219089Spjd 1563219089Spjd /* If blkptr doesn't exist then add space to towrite */ 1564219089Spjd if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 1565274337Sdelphij txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1566219089Spjd } else { 1567226512Smm blkptr_t *bp; 1568226512Smm 1569219089Spjd bp = &dn->dn_phys->dn_spill; 1570219089Spjd if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1571219089Spjd bp, bp->blk_birth)) 1572274337Sdelphij txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE; 1573219089Spjd else 1574274337Sdelphij txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1575226512Smm if (!BP_IS_HOLE(bp)) 1576274337Sdelphij txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE; 1577219089Spjd } 1578219089Spjd} 1579219089Spjd 1580219089Spjdvoid 1581219089Spjddmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1582219089Spjd{ 1583219089Spjd sa_os_t *sa = tx->tx_objset->os_sa; 1584219089Spjd 1585219089Spjd dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1586219089Spjd 1587219089Spjd if (tx->tx_objset->os_sa->sa_master_obj == 0) 1588219089Spjd return; 1589219089Spjd 1590219089Spjd if (tx->tx_objset->os_sa->sa_layout_attr_obj) 1591219089Spjd dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1592219089Spjd else { 1593219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1594219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1595219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1596219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1597219089Spjd } 1598219089Spjd 1599219089Spjd dmu_tx_sa_registration_hold(sa, tx); 1600219089Spjd 1601219089Spjd if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 1602219089Spjd return; 1603219089Spjd 1604219089Spjd (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1605219089Spjd THT_SPILL, 0, 0); 1606219089Spjd} 1607219089Spjd 1608219089Spjd/* 1609219089Spjd * Hold SA attribute 1610219089Spjd * 1611219089Spjd * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1612219089Spjd * 1613219089Spjd * variable_size is the total size of all variable sized attributes 1614219089Spjd * passed to this function. It is not the total size of all 1615219089Spjd * variable size attributes that *may* exist on this object. 1616219089Spjd */ 1617219089Spjdvoid 1618219089Spjddmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1619219089Spjd{ 1620219089Spjd uint64_t object; 1621219089Spjd sa_os_t *sa = tx->tx_objset->os_sa; 1622219089Spjd 1623219089Spjd ASSERT(hdl != NULL); 1624219089Spjd 1625219089Spjd object = sa_handle_object(hdl); 1626219089Spjd 1627219089Spjd dmu_tx_hold_bonus(tx, object); 1628219089Spjd 1629219089Spjd if (tx->tx_objset->os_sa->sa_master_obj == 0) 1630219089Spjd return; 1631219089Spjd 1632219089Spjd if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1633219089Spjd tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1634219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1635219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1636219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1637219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1638219089Spjd } 1639219089Spjd 1640219089Spjd dmu_tx_sa_registration_hold(sa, tx); 1641219089Spjd 1642219089Spjd if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1643219089Spjd dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1644219089Spjd 1645219089Spjd if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1646219089Spjd ASSERT(tx->tx_txg == 0); 1647219089Spjd dmu_tx_hold_spill(tx, object); 1648219089Spjd } else { 1649219089Spjd dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1650219089Spjd dnode_t *dn; 1651219089Spjd 1652219089Spjd DB_DNODE_ENTER(db); 1653219089Spjd dn = DB_DNODE(db); 1654219089Spjd if (dn->dn_have_spill) { 1655219089Spjd ASSERT(tx->tx_txg == 0); 1656219089Spjd dmu_tx_hold_spill(tx, object); 1657219089Spjd } 1658219089Spjd DB_DNODE_EXIT(db); 1659219089Spjd } 1660219089Spjd} 1661