dmu_tx.c revision 286705
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23226512Smm * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24284593Savg * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25226512Smm */ 26168404Spjd 27168404Spjd#include <sys/dmu.h> 28168404Spjd#include <sys/dmu_impl.h> 29168404Spjd#include <sys/dbuf.h> 30168404Spjd#include <sys/dmu_tx.h> 31168404Spjd#include <sys/dmu_objset.h> 32168404Spjd#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 33168404Spjd#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 34168404Spjd#include <sys/dsl_pool.h> 35168404Spjd#include <sys/zap_impl.h> /* for fzap_default_block_shift */ 36168404Spjd#include <sys/spa.h> 37219089Spjd#include <sys/sa.h> 38219089Spjd#include <sys/sa_impl.h> 39168404Spjd#include <sys/zfs_context.h> 40219089Spjd#include <sys/varargs.h> 41168404Spjd 42168404Spjdtypedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 43168404Spjd uint64_t arg1, uint64_t arg2); 44168404Spjd 45168404Spjd 46168404Spjddmu_tx_t * 47168404Spjddmu_tx_create_dd(dsl_dir_t *dd) 48168404Spjd{ 49168404Spjd dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 50168404Spjd tx->tx_dir = dd; 51248571Smm if (dd != NULL) 52168404Spjd tx->tx_pool = dd->dd_pool; 53168404Spjd list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 54168404Spjd offsetof(dmu_tx_hold_t, txh_node)); 55219089Spjd list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 56219089Spjd offsetof(dmu_tx_callback_t, dcb_node)); 57258632Savg tx->tx_start = gethrtime(); 58168404Spjd#ifdef ZFS_DEBUG 59168404Spjd refcount_create(&tx->tx_space_written); 60168404Spjd refcount_create(&tx->tx_space_freed); 61168404Spjd#endif 62168404Spjd return (tx); 63168404Spjd} 64168404Spjd 65168404Spjddmu_tx_t * 66168404Spjddmu_tx_create(objset_t *os) 67168404Spjd{ 68219089Spjd dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 69168404Spjd tx->tx_objset = os; 70219089Spjd tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 71168404Spjd return (tx); 72168404Spjd} 73168404Spjd 74168404Spjddmu_tx_t * 75168404Spjddmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 76168404Spjd{ 77168404Spjd dmu_tx_t *tx = dmu_tx_create_dd(NULL); 78168404Spjd 79168404Spjd ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 80168404Spjd tx->tx_pool = dp; 81168404Spjd tx->tx_txg = txg; 82168404Spjd tx->tx_anyobj = TRUE; 83168404Spjd 84168404Spjd return (tx); 85168404Spjd} 86168404Spjd 87168404Spjdint 88168404Spjddmu_tx_is_syncing(dmu_tx_t *tx) 89168404Spjd{ 90168404Spjd return (tx->tx_anyobj); 91168404Spjd} 92168404Spjd 93168404Spjdint 94168404Spjddmu_tx_private_ok(dmu_tx_t *tx) 95168404Spjd{ 96168404Spjd return (tx->tx_anyobj); 97168404Spjd} 98168404Spjd 99168404Spjdstatic dmu_tx_hold_t * 100168404Spjddmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 101168404Spjd enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 102168404Spjd{ 103168404Spjd dmu_tx_hold_t *txh; 104168404Spjd dnode_t *dn = NULL; 105168404Spjd int err; 106168404Spjd 107168404Spjd if (object != DMU_NEW_OBJECT) { 108219089Spjd err = dnode_hold(os, object, tx, &dn); 109168404Spjd if (err) { 110168404Spjd tx->tx_err = err; 111168404Spjd return (NULL); 112168404Spjd } 113168404Spjd 114168404Spjd if (err == 0 && tx->tx_txg != 0) { 115168404Spjd mutex_enter(&dn->dn_mtx); 116168404Spjd /* 117168404Spjd * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 118168404Spjd * problem, but there's no way for it to happen (for 119168404Spjd * now, at least). 120168404Spjd */ 121168404Spjd ASSERT(dn->dn_assigned_txg == 0); 122168404Spjd dn->dn_assigned_txg = tx->tx_txg; 123168404Spjd (void) refcount_add(&dn->dn_tx_holds, tx); 124168404Spjd mutex_exit(&dn->dn_mtx); 125168404Spjd } 126168404Spjd } 127168404Spjd 128168404Spjd txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 129168404Spjd txh->txh_tx = tx; 130168404Spjd txh->txh_dnode = dn; 131168404Spjd#ifdef ZFS_DEBUG 132168404Spjd txh->txh_type = type; 133168404Spjd txh->txh_arg1 = arg1; 134168404Spjd txh->txh_arg2 = arg2; 135168404Spjd#endif 136168404Spjd list_insert_tail(&tx->tx_holds, txh); 137168404Spjd 138168404Spjd return (txh); 139168404Spjd} 140168404Spjd 141168404Spjdvoid 142168404Spjddmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 143168404Spjd{ 144168404Spjd /* 145168404Spjd * If we're syncing, they can manipulate any object anyhow, and 146168404Spjd * the hold on the dnode_t can cause problems. 147168404Spjd */ 148168404Spjd if (!dmu_tx_is_syncing(tx)) { 149168404Spjd (void) dmu_tx_hold_object_impl(tx, os, 150168404Spjd object, THT_NEWOBJECT, 0, 0); 151168404Spjd } 152168404Spjd} 153168404Spjd 154168404Spjdstatic int 155168404Spjddmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 156168404Spjd{ 157168404Spjd int err; 158168404Spjd dmu_buf_impl_t *db; 159168404Spjd 160168404Spjd rw_enter(&dn->dn_struct_rwlock, RW_READER); 161168404Spjd db = dbuf_hold_level(dn, level, blkid, FTAG); 162168404Spjd rw_exit(&dn->dn_struct_rwlock); 163168404Spjd if (db == NULL) 164249195Smm return (SET_ERROR(EIO)); 165185029Spjd err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 166168404Spjd dbuf_rele(db, FTAG); 167168404Spjd return (err); 168168404Spjd} 169168404Spjd 170209962Smmstatic void 171219089Spjddmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 172219089Spjd int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 173209962Smm{ 174219089Spjd objset_t *os = dn->dn_objset; 175219089Spjd dsl_dataset_t *ds = os->os_dsl_dataset; 176219089Spjd int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 177219089Spjd dmu_buf_impl_t *parent = NULL; 178219089Spjd blkptr_t *bp = NULL; 179219089Spjd uint64_t space; 180209962Smm 181219089Spjd if (level >= dn->dn_nlevels || history[level] == blkid) 182209962Smm return; 183209962Smm 184219089Spjd history[level] = blkid; 185209962Smm 186219089Spjd space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 187219089Spjd 188219089Spjd if (db == NULL || db == dn->dn_dbuf) { 189219089Spjd ASSERT(level != 0); 190219089Spjd db = NULL; 191219089Spjd } else { 192219089Spjd ASSERT(DB_DNODE(db) == dn); 193219089Spjd ASSERT(db->db_level == level); 194219089Spjd ASSERT(db->db.db_size == space); 195219089Spjd ASSERT(db->db_blkid == blkid); 196219089Spjd bp = db->db_blkptr; 197219089Spjd parent = db->db_parent; 198209962Smm } 199209962Smm 200219089Spjd freeable = (bp && (freeable || 201219089Spjd dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 202209962Smm 203219089Spjd if (freeable) 204219089Spjd txh->txh_space_tooverwrite += space; 205219089Spjd else 206219089Spjd txh->txh_space_towrite += space; 207219089Spjd if (bp) 208219089Spjd txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); 209219089Spjd 210219089Spjd dmu_tx_count_twig(txh, dn, parent, level + 1, 211219089Spjd blkid >> epbs, freeable, history); 212209962Smm} 213209962Smm 214168404Spjd/* ARGSUSED */ 215168404Spjdstatic void 216168404Spjddmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 217168404Spjd{ 218168404Spjd dnode_t *dn = txh->txh_dnode; 219168404Spjd uint64_t start, end, i; 220168404Spjd int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 221168404Spjd int err = 0; 222168404Spjd 223168404Spjd if (len == 0) 224168404Spjd return; 225168404Spjd 226168404Spjd min_bs = SPA_MINBLOCKSHIFT; 227274337Sdelphij max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; 228168404Spjd min_ibs = DN_MIN_INDBLKSHIFT; 229168404Spjd max_ibs = DN_MAX_INDBLKSHIFT; 230168404Spjd 231209962Smm if (dn) { 232219089Spjd uint64_t history[DN_MAX_LEVELS]; 233209962Smm int nlvls = dn->dn_nlevels; 234209962Smm int delta; 235168404Spjd 236209962Smm /* 237209962Smm * For i/o error checking, read the first and last level-0 238209962Smm * blocks (if they are not aligned), and all the level-1 blocks. 239209962Smm */ 240168404Spjd if (dn->dn_maxblkid == 0) { 241209962Smm delta = dn->dn_datablksz; 242209962Smm start = (off < dn->dn_datablksz) ? 0 : 1; 243209962Smm end = (off+len <= dn->dn_datablksz) ? 0 : 1; 244209962Smm if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 245209962Smm err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 246209962Smm if (err) 247209962Smm goto out; 248209962Smm delta -= off; 249209962Smm } 250168404Spjd } else { 251168404Spjd zio_t *zio = zio_root(dn->dn_objset->os_spa, 252168404Spjd NULL, NULL, ZIO_FLAG_CANFAIL); 253168404Spjd 254168404Spjd /* first level-0 block */ 255168404Spjd start = off >> dn->dn_datablkshift; 256168404Spjd if (P2PHASE(off, dn->dn_datablksz) || 257168404Spjd len < dn->dn_datablksz) { 258168404Spjd err = dmu_tx_check_ioerr(zio, dn, 0, start); 259168404Spjd if (err) 260168404Spjd goto out; 261168404Spjd } 262168404Spjd 263168404Spjd /* last level-0 block */ 264168404Spjd end = (off+len-1) >> dn->dn_datablkshift; 265219089Spjd if (end != start && end <= dn->dn_maxblkid && 266168404Spjd P2PHASE(off+len, dn->dn_datablksz)) { 267168404Spjd err = dmu_tx_check_ioerr(zio, dn, 0, end); 268168404Spjd if (err) 269168404Spjd goto out; 270168404Spjd } 271168404Spjd 272168404Spjd /* level-1 blocks */ 273209962Smm if (nlvls > 1) { 274209962Smm int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 275209962Smm for (i = (start>>shft)+1; i < end>>shft; i++) { 276168404Spjd err = dmu_tx_check_ioerr(zio, dn, 1, i); 277168404Spjd if (err) 278168404Spjd goto out; 279168404Spjd } 280168404Spjd } 281168404Spjd 282168404Spjd err = zio_wait(zio); 283168404Spjd if (err) 284168404Spjd goto out; 285209962Smm delta = P2NPHASE(off, dn->dn_datablksz); 286168404Spjd } 287168404Spjd 288246631Smm min_ibs = max_ibs = dn->dn_indblkshift; 289209962Smm if (dn->dn_maxblkid > 0) { 290209962Smm /* 291209962Smm * The blocksize can't change, 292209962Smm * so we can make a more precise estimate. 293209962Smm */ 294209962Smm ASSERT(dn->dn_datablkshift != 0); 295168404Spjd min_bs = max_bs = dn->dn_datablkshift; 296274337Sdelphij } else { 297274337Sdelphij /* 298274337Sdelphij * The blocksize can increase up to the recordsize, 299274337Sdelphij * or if it is already more than the recordsize, 300274337Sdelphij * up to the next power of 2. 301274337Sdelphij */ 302274337Sdelphij min_bs = highbit64(dn->dn_datablksz - 1); 303274337Sdelphij max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); 304209962Smm } 305209962Smm 306209962Smm /* 307209962Smm * If this write is not off the end of the file 308209962Smm * we need to account for overwrites/unref. 309209962Smm */ 310219089Spjd if (start <= dn->dn_maxblkid) { 311219089Spjd for (int l = 0; l < DN_MAX_LEVELS; l++) 312219089Spjd history[l] = -1ULL; 313219089Spjd } 314209962Smm while (start <= dn->dn_maxblkid) { 315209962Smm dmu_buf_impl_t *db; 316209962Smm 317209962Smm rw_enter(&dn->dn_struct_rwlock, RW_READER); 318286705Smav err = dbuf_hold_impl(dn, 0, start, 319286705Smav FALSE, FALSE, FTAG, &db); 320209962Smm rw_exit(&dn->dn_struct_rwlock); 321219089Spjd 322219089Spjd if (err) { 323219089Spjd txh->txh_tx->tx_err = err; 324219089Spjd return; 325209962Smm } 326219089Spjd 327219089Spjd dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 328219089Spjd history); 329209962Smm dbuf_rele(db, FTAG); 330209962Smm if (++start > end) { 331209962Smm /* 332209962Smm * Account for new indirects appearing 333209962Smm * before this IO gets assigned into a txg. 334209962Smm */ 335209962Smm bits = 64 - min_bs; 336209962Smm epbs = min_ibs - SPA_BLKPTRSHIFT; 337209962Smm for (bits -= epbs * (nlvls - 1); 338209962Smm bits >= 0; bits -= epbs) 339209962Smm txh->txh_fudge += 1ULL << max_ibs; 340209962Smm goto out; 341209962Smm } 342209962Smm off += delta; 343209962Smm if (len >= delta) 344209962Smm len -= delta; 345209962Smm delta = dn->dn_datablksz; 346209962Smm } 347168404Spjd } 348168404Spjd 349168404Spjd /* 350168404Spjd * 'end' is the last thing we will access, not one past. 351168404Spjd * This way we won't overflow when accessing the last byte. 352168404Spjd */ 353168404Spjd start = P2ALIGN(off, 1ULL << max_bs); 354168404Spjd end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 355168404Spjd txh->txh_space_towrite += end - start + 1; 356168404Spjd 357168404Spjd start >>= min_bs; 358168404Spjd end >>= min_bs; 359168404Spjd 360168404Spjd epbs = min_ibs - SPA_BLKPTRSHIFT; 361168404Spjd 362168404Spjd /* 363168404Spjd * The object contains at most 2^(64 - min_bs) blocks, 364168404Spjd * and each indirect level maps 2^epbs. 365168404Spjd */ 366168404Spjd for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 367168404Spjd start >>= epbs; 368168404Spjd end >>= epbs; 369209962Smm ASSERT3U(end, >=, start); 370209962Smm txh->txh_space_towrite += (end - start + 1) << max_ibs; 371209962Smm if (start != 0) { 372209962Smm /* 373209962Smm * We also need a new blkid=0 indirect block 374209962Smm * to reference any existing file data. 375209962Smm */ 376168404Spjd txh->txh_space_towrite += 1ULL << max_ibs; 377209962Smm } 378168404Spjd } 379168404Spjd 380209962Smmout: 381209962Smm if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 382209962Smm 2 * DMU_MAX_ACCESS) 383249195Smm err = SET_ERROR(EFBIG); 384168404Spjd 385168404Spjd if (err) 386168404Spjd txh->txh_tx->tx_err = err; 387168404Spjd} 388168404Spjd 389168404Spjdstatic void 390168404Spjddmu_tx_count_dnode(dmu_tx_hold_t *txh) 391168404Spjd{ 392168404Spjd dnode_t *dn = txh->txh_dnode; 393219089Spjd dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 394168404Spjd uint64_t space = mdn->dn_datablksz + 395168404Spjd ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 396168404Spjd 397168404Spjd if (dn && dn->dn_dbuf->db_blkptr && 398168404Spjd dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 399219089Spjd dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 400168404Spjd txh->txh_space_tooverwrite += space; 401209962Smm txh->txh_space_tounref += space; 402168404Spjd } else { 403168404Spjd txh->txh_space_towrite += space; 404185029Spjd if (dn && dn->dn_dbuf->db_blkptr) 405185029Spjd txh->txh_space_tounref += space; 406168404Spjd } 407168404Spjd} 408168404Spjd 409168404Spjdvoid 410168404Spjddmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 411168404Spjd{ 412168404Spjd dmu_tx_hold_t *txh; 413168404Spjd 414168404Spjd ASSERT(tx->tx_txg == 0); 415168404Spjd ASSERT(len < DMU_MAX_ACCESS); 416168404Spjd ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 417168404Spjd 418168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 419168404Spjd object, THT_WRITE, off, len); 420168404Spjd if (txh == NULL) 421168404Spjd return; 422168404Spjd 423168404Spjd dmu_tx_count_write(txh, off, len); 424168404Spjd dmu_tx_count_dnode(txh); 425168404Spjd} 426168404Spjd 427168404Spjdstatic void 428168404Spjddmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 429168404Spjd{ 430185029Spjd uint64_t blkid, nblks, lastblk; 431185029Spjd uint64_t space = 0, unref = 0, skipped = 0; 432168404Spjd dnode_t *dn = txh->txh_dnode; 433168404Spjd dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 434168404Spjd spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 435185029Spjd int epbs; 436240133Smm uint64_t l0span = 0, nl1blks = 0; 437168404Spjd 438185029Spjd if (dn->dn_nlevels == 0) 439168404Spjd return; 440168404Spjd 441168404Spjd /* 442185029Spjd * The struct_rwlock protects us against dn_nlevels 443168404Spjd * changing, in case (against all odds) we manage to dirty & 444168404Spjd * sync out the changes after we check for being dirty. 445219089Spjd * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 446168404Spjd */ 447168404Spjd rw_enter(&dn->dn_struct_rwlock, RW_READER); 448185029Spjd epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 449185029Spjd if (dn->dn_maxblkid == 0) { 450168404Spjd if (off == 0 && len >= dn->dn_datablksz) { 451168404Spjd blkid = 0; 452168404Spjd nblks = 1; 453168404Spjd } else { 454168404Spjd rw_exit(&dn->dn_struct_rwlock); 455168404Spjd return; 456168404Spjd } 457168404Spjd } else { 458168404Spjd blkid = off >> dn->dn_datablkshift; 459185029Spjd nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 460168404Spjd 461254077Sdelphij if (blkid > dn->dn_maxblkid) { 462168404Spjd rw_exit(&dn->dn_struct_rwlock); 463168404Spjd return; 464168404Spjd } 465185029Spjd if (blkid + nblks > dn->dn_maxblkid) 466254077Sdelphij nblks = dn->dn_maxblkid - blkid + 1; 467168404Spjd 468168404Spjd } 469240133Smm l0span = nblks; /* save for later use to calc level > 1 overhead */ 470185029Spjd if (dn->dn_nlevels == 1) { 471168404Spjd int i; 472168404Spjd for (i = 0; i < nblks; i++) { 473168404Spjd blkptr_t *bp = dn->dn_phys->dn_blkptr; 474185029Spjd ASSERT3U(blkid + i, <, dn->dn_nblkptr); 475168404Spjd bp += blkid + i; 476219089Spjd if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 477168404Spjd dprintf_bp(bp, "can free old%s", ""); 478219089Spjd space += bp_get_dsize(spa, bp); 479168404Spjd } 480185029Spjd unref += BP_GET_ASIZE(bp); 481168404Spjd } 482240133Smm nl1blks = 1; 483168404Spjd nblks = 0; 484168404Spjd } 485168404Spjd 486185029Spjd lastblk = blkid + nblks - 1; 487168404Spjd while (nblks) { 488168404Spjd dmu_buf_impl_t *dbuf; 489185029Spjd uint64_t ibyte, new_blkid; 490185029Spjd int epb = 1 << epbs; 491185029Spjd int err, i, blkoff, tochk; 492185029Spjd blkptr_t *bp; 493168404Spjd 494185029Spjd ibyte = blkid << dn->dn_datablkshift; 495185029Spjd err = dnode_next_offset(dn, 496185029Spjd DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 497185029Spjd new_blkid = ibyte >> dn->dn_datablkshift; 498185029Spjd if (err == ESRCH) { 499185029Spjd skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 500185029Spjd break; 501185029Spjd } 502185029Spjd if (err) { 503185029Spjd txh->txh_tx->tx_err = err; 504185029Spjd break; 505185029Spjd } 506185029Spjd if (new_blkid > lastblk) { 507185029Spjd skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 508185029Spjd break; 509185029Spjd } 510168404Spjd 511185029Spjd if (new_blkid > blkid) { 512185029Spjd ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 513185029Spjd skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 514185029Spjd nblks -= new_blkid - blkid; 515185029Spjd blkid = new_blkid; 516185029Spjd } 517185029Spjd blkoff = P2PHASE(blkid, epb); 518185029Spjd tochk = MIN(epb - blkoff, nblks); 519168404Spjd 520286705Smav err = dbuf_hold_impl(dn, 1, blkid >> epbs, 521286705Smav FALSE, FALSE, FTAG, &dbuf); 522219089Spjd if (err) { 523219089Spjd txh->txh_tx->tx_err = err; 524219089Spjd break; 525219089Spjd } 526168404Spjd 527185029Spjd txh->txh_memory_tohold += dbuf->db.db_size; 528219089Spjd 529219089Spjd /* 530219089Spjd * We don't check memory_tohold against DMU_MAX_ACCESS because 531219089Spjd * memory_tohold is an over-estimation (especially the >L1 532219089Spjd * indirect blocks), so it could fail. Callers should have 533219089Spjd * already verified that they will not be holding too much 534219089Spjd * memory. 535219089Spjd */ 536219089Spjd 537185029Spjd err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 538185029Spjd if (err != 0) { 539168404Spjd txh->txh_tx->tx_err = err; 540185029Spjd dbuf_rele(dbuf, FTAG); 541168404Spjd break; 542168404Spjd } 543168404Spjd 544185029Spjd bp = dbuf->db.db_data; 545185029Spjd bp += blkoff; 546185029Spjd 547185029Spjd for (i = 0; i < tochk; i++) { 548219089Spjd if (dsl_dataset_block_freeable(ds, &bp[i], 549219089Spjd bp[i].blk_birth)) { 550185029Spjd dprintf_bp(&bp[i], "can free old%s", ""); 551219089Spjd space += bp_get_dsize(spa, &bp[i]); 552185029Spjd } 553185029Spjd unref += BP_GET_ASIZE(bp); 554185029Spjd } 555185029Spjd dbuf_rele(dbuf, FTAG); 556185029Spjd 557240133Smm ++nl1blks; 558168404Spjd blkid += tochk; 559168404Spjd nblks -= tochk; 560168404Spjd } 561168404Spjd rw_exit(&dn->dn_struct_rwlock); 562168404Spjd 563240133Smm /* 564240133Smm * Add in memory requirements of higher-level indirects. 565240133Smm * This assumes a worst-possible scenario for dn_nlevels and a 566240133Smm * worst-possible distribution of l1-blocks over the region to free. 567240133Smm */ 568240133Smm { 569240133Smm uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 570240133Smm int level = 2; 571240133Smm /* 572240133Smm * Here we don't use DN_MAX_LEVEL, but calculate it with the 573240133Smm * given datablkshift and indblkshift. This makes the 574240133Smm * difference between 19 and 8 on large files. 575240133Smm */ 576240133Smm int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 577240133Smm (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 578240133Smm 579240133Smm while (level++ < maxlevel) { 580240955Smm txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) 581240133Smm << dn->dn_indblkshift; 582240133Smm blkcnt = 1 + (blkcnt >> epbs); 583240133Smm } 584240133Smm } 585240133Smm 586185029Spjd /* account for new level 1 indirect blocks that might show up */ 587185029Spjd if (skipped > 0) { 588185029Spjd txh->txh_fudge += skipped << dn->dn_indblkshift; 589185029Spjd skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 590185029Spjd txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 591185029Spjd } 592168404Spjd txh->txh_space_tofree += space; 593185029Spjd txh->txh_space_tounref += unref; 594168404Spjd} 595168404Spjd 596268464Sdelphij/* 597268464Sdelphij * This function marks the transaction as being a "net free". The end 598268464Sdelphij * result is that refquotas will be disabled for this transaction, and 599268464Sdelphij * this transaction will be able to use half of the pool space overhead 600268464Sdelphij * (see dsl_pool_adjustedsize()). Therefore this function should only 601268464Sdelphij * be called for transactions that we expect will not cause a net increase 602268464Sdelphij * in the amount of space used (but it's OK if that is occasionally not true). 603268464Sdelphij */ 604168404Spjdvoid 605268464Sdelphijdmu_tx_mark_netfree(dmu_tx_t *tx) 606268464Sdelphij{ 607268464Sdelphij dmu_tx_hold_t *txh; 608268464Sdelphij 609268464Sdelphij txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 610268464Sdelphij DMU_NEW_OBJECT, THT_FREE, 0, 0); 611268464Sdelphij 612268464Sdelphij /* 613268464Sdelphij * Pretend that this operation will free 1GB of space. This 614268464Sdelphij * should be large enough to cancel out the largest write. 615268464Sdelphij * We don't want to use something like UINT64_MAX, because that would 616268464Sdelphij * cause overflows when doing math with these values (e.g. in 617268464Sdelphij * dmu_tx_try_assign()). 618268464Sdelphij */ 619268464Sdelphij txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024; 620268464Sdelphij} 621268464Sdelphij 622268464Sdelphijvoid 623168404Spjddmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 624168404Spjd{ 625168404Spjd dmu_tx_hold_t *txh; 626168404Spjd dnode_t *dn; 627253821Sdelphij int err; 628168404Spjd zio_t *zio; 629168404Spjd 630168404Spjd ASSERT(tx->tx_txg == 0); 631168404Spjd 632168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 633168404Spjd object, THT_FREE, off, len); 634168404Spjd if (txh == NULL) 635168404Spjd return; 636168404Spjd dn = txh->txh_dnode; 637258632Savg dmu_tx_count_dnode(txh); 638168404Spjd 639168404Spjd if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 640168404Spjd return; 641168404Spjd if (len == DMU_OBJECT_END) 642168404Spjd len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 643168404Spjd 644253821Sdelphij 645168404Spjd /* 646253821Sdelphij * For i/o error checking, we read the first and last level-0 647253821Sdelphij * blocks if they are not aligned, and all the level-1 blocks. 648253821Sdelphij * 649253821Sdelphij * Note: dbuf_free_range() assumes that we have not instantiated 650253821Sdelphij * any level-0 dbufs that will be completely freed. Therefore we must 651253821Sdelphij * exercise care to not read or count the first and last blocks 652253821Sdelphij * if they are blocksize-aligned. 653168404Spjd */ 654253821Sdelphij if (dn->dn_datablkshift == 0) { 655254753Sdelphij if (off != 0 || len < dn->dn_datablksz) 656256259Savg dmu_tx_count_write(txh, 0, dn->dn_datablksz); 657253821Sdelphij } else { 658253821Sdelphij /* first block will be modified if it is not aligned */ 659253821Sdelphij if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 660253821Sdelphij dmu_tx_count_write(txh, off, 1); 661253821Sdelphij /* last block will be modified if it is not aligned */ 662253821Sdelphij if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 663253821Sdelphij dmu_tx_count_write(txh, off+len, 1); 664253821Sdelphij } 665253821Sdelphij 666253821Sdelphij /* 667253821Sdelphij * Check level-1 blocks. 668253821Sdelphij */ 669168404Spjd if (dn->dn_nlevels > 1) { 670253821Sdelphij int shift = dn->dn_datablkshift + dn->dn_indblkshift - 671168404Spjd SPA_BLKPTRSHIFT; 672253821Sdelphij uint64_t start = off >> shift; 673253821Sdelphij uint64_t end = (off + len) >> shift; 674168404Spjd 675253821Sdelphij ASSERT(dn->dn_indblkshift != 0); 676253821Sdelphij 677259576Spjd /* 678259576Spjd * dnode_reallocate() can result in an object with indirect 679259576Spjd * blocks having an odd data block size. In this case, 680259576Spjd * just check the single block. 681259576Spjd */ 682259576Spjd if (dn->dn_datablkshift == 0) 683259576Spjd start = end = 0; 684259576Spjd 685168404Spjd zio = zio_root(tx->tx_pool->dp_spa, 686168404Spjd NULL, NULL, ZIO_FLAG_CANFAIL); 687253821Sdelphij for (uint64_t i = start; i <= end; i++) { 688168404Spjd uint64_t ibyte = i << shift; 689185029Spjd err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 690168404Spjd i = ibyte >> shift; 691284593Savg if (err == ESRCH || i > end) 692168404Spjd break; 693168404Spjd if (err) { 694168404Spjd tx->tx_err = err; 695168404Spjd return; 696168404Spjd } 697168404Spjd 698168404Spjd err = dmu_tx_check_ioerr(zio, dn, 1, i); 699168404Spjd if (err) { 700168404Spjd tx->tx_err = err; 701168404Spjd return; 702168404Spjd } 703168404Spjd } 704168404Spjd err = zio_wait(zio); 705168404Spjd if (err) { 706168404Spjd tx->tx_err = err; 707168404Spjd return; 708168404Spjd } 709168404Spjd } 710168404Spjd 711168404Spjd dmu_tx_count_free(txh, off, len); 712168404Spjd} 713168404Spjd 714168404Spjdvoid 715209962Smmdmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 716168404Spjd{ 717168404Spjd dmu_tx_hold_t *txh; 718168404Spjd dnode_t *dn; 719275782Sdelphij dsl_dataset_phys_t *ds_phys; 720168404Spjd uint64_t nblocks; 721168404Spjd int epbs, err; 722168404Spjd 723168404Spjd ASSERT(tx->tx_txg == 0); 724168404Spjd 725168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 726168404Spjd object, THT_ZAP, add, (uintptr_t)name); 727168404Spjd if (txh == NULL) 728168404Spjd return; 729168404Spjd dn = txh->txh_dnode; 730168404Spjd 731168404Spjd dmu_tx_count_dnode(txh); 732168404Spjd 733168404Spjd if (dn == NULL) { 734168404Spjd /* 735168404Spjd * We will be able to fit a new object's entries into one leaf 736168404Spjd * block. So there will be at most 2 blocks total, 737168404Spjd * including the header block. 738168404Spjd */ 739168404Spjd dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 740168404Spjd return; 741168404Spjd } 742168404Spjd 743236884Smm ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 744168404Spjd 745168404Spjd if (dn->dn_maxblkid == 0 && !add) { 746226512Smm blkptr_t *bp; 747226512Smm 748168404Spjd /* 749168404Spjd * If there is only one block (i.e. this is a micro-zap) 750168404Spjd * and we are not adding anything, the accounting is simple. 751168404Spjd */ 752168404Spjd err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 753168404Spjd if (err) { 754168404Spjd tx->tx_err = err; 755168404Spjd return; 756168404Spjd } 757168404Spjd 758168404Spjd /* 759168404Spjd * Use max block size here, since we don't know how much 760168404Spjd * the size will change between now and the dbuf dirty call. 761168404Spjd */ 762226512Smm bp = &dn->dn_phys->dn_blkptr[0]; 763168404Spjd if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 764226512Smm bp, bp->blk_birth)) 765274337Sdelphij txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ; 766226512Smm else 767274337Sdelphij txh->txh_space_towrite += MZAP_MAX_BLKSZ; 768226512Smm if (!BP_IS_HOLE(bp)) 769274337Sdelphij txh->txh_space_tounref += MZAP_MAX_BLKSZ; 770168404Spjd return; 771168404Spjd } 772168404Spjd 773168404Spjd if (dn->dn_maxblkid > 0 && name) { 774168404Spjd /* 775168404Spjd * access the name in this fat-zap so that we'll check 776168404Spjd * for i/o errors to the leaf blocks, etc. 777168404Spjd */ 778219089Spjd err = zap_lookup(dn->dn_objset, dn->dn_object, name, 779168404Spjd 8, 0, NULL); 780168404Spjd if (err == EIO) { 781168404Spjd tx->tx_err = err; 782168404Spjd return; 783168404Spjd } 784168404Spjd } 785168404Spjd 786219089Spjd err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 787209962Smm &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 788168404Spjd 789168404Spjd /* 790168404Spjd * If the modified blocks are scattered to the four winds, 791168404Spjd * we'll have to modify an indirect twig for each. 792168404Spjd */ 793168404Spjd epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 794275782Sdelphij ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset); 795168404Spjd for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 796275782Sdelphij if (ds_phys->ds_prev_snap_obj) 797209962Smm txh->txh_space_towrite += 3 << dn->dn_indblkshift; 798209962Smm else 799209962Smm txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 800168404Spjd} 801168404Spjd 802168404Spjdvoid 803168404Spjddmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 804168404Spjd{ 805168404Spjd dmu_tx_hold_t *txh; 806168404Spjd 807168404Spjd ASSERT(tx->tx_txg == 0); 808168404Spjd 809168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 810168404Spjd object, THT_BONUS, 0, 0); 811168404Spjd if (txh) 812168404Spjd dmu_tx_count_dnode(txh); 813168404Spjd} 814168404Spjd 815168404Spjdvoid 816168404Spjddmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 817168404Spjd{ 818168404Spjd dmu_tx_hold_t *txh; 819168404Spjd ASSERT(tx->tx_txg == 0); 820168404Spjd 821168404Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 822168404Spjd DMU_NEW_OBJECT, THT_SPACE, space, 0); 823168404Spjd 824168404Spjd txh->txh_space_towrite += space; 825168404Spjd} 826168404Spjd 827168404Spjdint 828168404Spjddmu_tx_holds(dmu_tx_t *tx, uint64_t object) 829168404Spjd{ 830168404Spjd dmu_tx_hold_t *txh; 831168404Spjd int holds = 0; 832168404Spjd 833168404Spjd /* 834168404Spjd * By asserting that the tx is assigned, we're counting the 835168404Spjd * number of dn_tx_holds, which is the same as the number of 836168404Spjd * dn_holds. Otherwise, we'd be counting dn_holds, but 837168404Spjd * dn_tx_holds could be 0. 838168404Spjd */ 839168404Spjd ASSERT(tx->tx_txg != 0); 840168404Spjd 841168404Spjd /* if (tx->tx_anyobj == TRUE) */ 842168404Spjd /* return (0); */ 843168404Spjd 844168404Spjd for (txh = list_head(&tx->tx_holds); txh; 845168404Spjd txh = list_next(&tx->tx_holds, txh)) { 846168404Spjd if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 847168404Spjd holds++; 848168404Spjd } 849168404Spjd 850168404Spjd return (holds); 851168404Spjd} 852168404Spjd 853168404Spjd#ifdef ZFS_DEBUG 854168404Spjdvoid 855168404Spjddmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 856168404Spjd{ 857168404Spjd dmu_tx_hold_t *txh; 858168404Spjd int match_object = FALSE, match_offset = FALSE; 859219089Spjd dnode_t *dn; 860168404Spjd 861219089Spjd DB_DNODE_ENTER(db); 862219089Spjd dn = DB_DNODE(db); 863168404Spjd ASSERT(tx->tx_txg != 0); 864219089Spjd ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 865168404Spjd ASSERT3U(dn->dn_object, ==, db->db.db_object); 866168404Spjd 867219089Spjd if (tx->tx_anyobj) { 868219089Spjd DB_DNODE_EXIT(db); 869168404Spjd return; 870219089Spjd } 871168404Spjd 872168404Spjd /* XXX No checking on the meta dnode for now */ 873219089Spjd if (db->db.db_object == DMU_META_DNODE_OBJECT) { 874219089Spjd DB_DNODE_EXIT(db); 875168404Spjd return; 876219089Spjd } 877168404Spjd 878168404Spjd for (txh = list_head(&tx->tx_holds); txh; 879168404Spjd txh = list_next(&tx->tx_holds, txh)) { 880168404Spjd ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 881168404Spjd if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 882168404Spjd match_object = TRUE; 883168404Spjd if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 884168404Spjd int datablkshift = dn->dn_datablkshift ? 885168404Spjd dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 886168404Spjd int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 887168404Spjd int shift = datablkshift + epbs * db->db_level; 888168404Spjd uint64_t beginblk = shift >= 64 ? 0 : 889168404Spjd (txh->txh_arg1 >> shift); 890168404Spjd uint64_t endblk = shift >= 64 ? 0 : 891168404Spjd ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 892168404Spjd uint64_t blkid = db->db_blkid; 893168404Spjd 894168404Spjd /* XXX txh_arg2 better not be zero... */ 895168404Spjd 896168404Spjd dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 897168404Spjd txh->txh_type, beginblk, endblk); 898168404Spjd 899168404Spjd switch (txh->txh_type) { 900168404Spjd case THT_WRITE: 901168404Spjd if (blkid >= beginblk && blkid <= endblk) 902168404Spjd match_offset = TRUE; 903168404Spjd /* 904168404Spjd * We will let this hold work for the bonus 905219089Spjd * or spill buffer so that we don't need to 906219089Spjd * hold it when creating a new object. 907168404Spjd */ 908219089Spjd if (blkid == DMU_BONUS_BLKID || 909219089Spjd blkid == DMU_SPILL_BLKID) 910168404Spjd match_offset = TRUE; 911168404Spjd /* 912168404Spjd * They might have to increase nlevels, 913168404Spjd * thus dirtying the new TLIBs. Or the 914168404Spjd * might have to change the block size, 915168404Spjd * thus dirying the new lvl=0 blk=0. 916168404Spjd */ 917168404Spjd if (blkid == 0) 918168404Spjd match_offset = TRUE; 919168404Spjd break; 920168404Spjd case THT_FREE: 921185029Spjd /* 922185029Spjd * We will dirty all the level 1 blocks in 923185029Spjd * the free range and perhaps the first and 924185029Spjd * last level 0 block. 925185029Spjd */ 926185029Spjd if (blkid >= beginblk && (blkid <= endblk || 927185029Spjd txh->txh_arg2 == DMU_OBJECT_END)) 928168404Spjd match_offset = TRUE; 929168404Spjd break; 930219089Spjd case THT_SPILL: 931219089Spjd if (blkid == DMU_SPILL_BLKID) 932219089Spjd match_offset = TRUE; 933219089Spjd break; 934168404Spjd case THT_BONUS: 935219089Spjd if (blkid == DMU_BONUS_BLKID) 936168404Spjd match_offset = TRUE; 937168404Spjd break; 938168404Spjd case THT_ZAP: 939168404Spjd match_offset = TRUE; 940168404Spjd break; 941168404Spjd case THT_NEWOBJECT: 942168404Spjd match_object = TRUE; 943168404Spjd break; 944168404Spjd default: 945168404Spjd ASSERT(!"bad txh_type"); 946168404Spjd } 947168404Spjd } 948219089Spjd if (match_object && match_offset) { 949219089Spjd DB_DNODE_EXIT(db); 950168404Spjd return; 951219089Spjd } 952168404Spjd } 953219089Spjd DB_DNODE_EXIT(db); 954168404Spjd panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 955168404Spjd (u_longlong_t)db->db.db_object, db->db_level, 956168404Spjd (u_longlong_t)db->db_blkid); 957168404Spjd} 958168404Spjd#endif 959168404Spjd 960258632Savg/* 961258632Savg * If we can't do 10 iops, something is wrong. Let us go ahead 962258632Savg * and hit zfs_dirty_data_max. 963258632Savg */ 964258632Savghrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 965258632Savgint zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 966258632Savg 967258632Savg/* 968258632Savg * We delay transactions when we've determined that the backend storage 969258632Savg * isn't able to accommodate the rate of incoming writes. 970258632Savg * 971258632Savg * If there is already a transaction waiting, we delay relative to when 972258632Savg * that transaction finishes waiting. This way the calculated min_time 973258632Savg * is independent of the number of threads concurrently executing 974258632Savg * transactions. 975258632Savg * 976258632Savg * If we are the only waiter, wait relative to when the transaction 977258632Savg * started, rather than the current time. This credits the transaction for 978258632Savg * "time already served", e.g. reading indirect blocks. 979258632Savg * 980258632Savg * The minimum time for a transaction to take is calculated as: 981258632Savg * min_time = scale * (dirty - min) / (max - dirty) 982258632Savg * min_time is then capped at zfs_delay_max_ns. 983258632Savg * 984258632Savg * The delay has two degrees of freedom that can be adjusted via tunables. 985258632Savg * The percentage of dirty data at which we start to delay is defined by 986258632Savg * zfs_delay_min_dirty_percent. This should typically be at or above 987258632Savg * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 988258632Savg * delay after writing at full speed has failed to keep up with the incoming 989258632Savg * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 990258632Savg * speaking, this variable determines the amount of delay at the midpoint of 991258632Savg * the curve. 992258632Savg * 993258632Savg * delay 994258632Savg * 10ms +-------------------------------------------------------------*+ 995258632Savg * | *| 996258632Savg * 9ms + *+ 997258632Savg * | *| 998258632Savg * 8ms + *+ 999258632Savg * | * | 1000258632Savg * 7ms + * + 1001258632Savg * | * | 1002258632Savg * 6ms + * + 1003258632Savg * | * | 1004258632Savg * 5ms + * + 1005258632Savg * | * | 1006258632Savg * 4ms + * + 1007258632Savg * | * | 1008258632Savg * 3ms + * + 1009258632Savg * | * | 1010258632Savg * 2ms + (midpoint) * + 1011258632Savg * | | ** | 1012258632Savg * 1ms + v *** + 1013258632Savg * | zfs_delay_scale ----------> ******** | 1014258632Savg * 0 +-------------------------------------*********----------------+ 1015258632Savg * 0% <- zfs_dirty_data_max -> 100% 1016258632Savg * 1017258632Savg * Note that since the delay is added to the outstanding time remaining on the 1018258632Savg * most recent transaction, the delay is effectively the inverse of IOPS. 1019258632Savg * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 1020258632Savg * was chosen such that small changes in the amount of accumulated dirty data 1021258632Savg * in the first 3/4 of the curve yield relatively small differences in the 1022258632Savg * amount of delay. 1023258632Savg * 1024258632Savg * The effects can be easier to understand when the amount of delay is 1025258632Savg * represented on a log scale: 1026258632Savg * 1027258632Savg * delay 1028258632Savg * 100ms +-------------------------------------------------------------++ 1029258632Savg * + + 1030258632Savg * | | 1031258632Savg * + *+ 1032258632Savg * 10ms + *+ 1033258632Savg * + ** + 1034258632Savg * | (midpoint) ** | 1035258632Savg * + | ** + 1036258632Savg * 1ms + v **** + 1037258632Savg * + zfs_delay_scale ----------> ***** + 1038258632Savg * | **** | 1039258632Savg * + **** + 1040258632Savg * 100us + ** + 1041258632Savg * + * + 1042258632Savg * | * | 1043258632Savg * + * + 1044258632Savg * 10us + * + 1045258632Savg * + + 1046258632Savg * | | 1047258632Savg * + + 1048258632Savg * +--------------------------------------------------------------+ 1049258632Savg * 0% <- zfs_dirty_data_max -> 100% 1050258632Savg * 1051258632Savg * Note here that only as the amount of dirty data approaches its limit does 1052258632Savg * the delay start to increase rapidly. The goal of a properly tuned system 1053258632Savg * should be to keep the amount of dirty data out of that range by first 1054258632Savg * ensuring that the appropriate limits are set for the I/O scheduler to reach 1055258632Savg * optimal throughput on the backend storage, and then by changing the value 1056258632Savg * of zfs_delay_scale to increase the steepness of the curve. 1057258632Savg */ 1058258632Savgstatic void 1059258632Savgdmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 1060258632Savg{ 1061258632Savg dsl_pool_t *dp = tx->tx_pool; 1062258632Savg uint64_t delay_min_bytes = 1063258632Savg zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 1064258632Savg hrtime_t wakeup, min_tx_time, now; 1065258632Savg 1066258632Savg if (dirty <= delay_min_bytes) 1067258632Savg return; 1068258632Savg 1069258632Savg /* 1070258632Savg * The caller has already waited until we are under the max. 1071258632Savg * We make them pass us the amount of dirty data so we don't 1072258632Savg * have to handle the case of it being >= the max, which could 1073258632Savg * cause a divide-by-zero if it's == the max. 1074258632Savg */ 1075258632Savg ASSERT3U(dirty, <, zfs_dirty_data_max); 1076258632Savg 1077258632Savg now = gethrtime(); 1078258632Savg min_tx_time = zfs_delay_scale * 1079258632Savg (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 1080258632Savg if (now > tx->tx_start + min_tx_time) 1081258632Savg return; 1082258632Savg 1083258632Savg min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 1084258632Savg 1085258632Savg DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 1086258632Savg uint64_t, min_tx_time); 1087258632Savg 1088258632Savg mutex_enter(&dp->dp_lock); 1089258632Savg wakeup = MAX(tx->tx_start + min_tx_time, 1090258632Savg dp->dp_last_wakeup + min_tx_time); 1091258632Savg dp->dp_last_wakeup = wakeup; 1092258632Savg mutex_exit(&dp->dp_lock); 1093258632Savg 1094258632Savg#ifdef _KERNEL 1095258632Savg#ifdef illumos 1096258632Savg mutex_enter(&curthread->t_delay_lock); 1097258632Savg while (cv_timedwait_hires(&curthread->t_delay_cv, 1098258632Savg &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 1099258632Savg CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 1100258632Savg continue; 1101258632Savg mutex_exit(&curthread->t_delay_lock); 1102258632Savg#else 1103258632Savg pause_sbt("dmu_tx_delay", wakeup * SBT_1NS, 1104258632Savg zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE); 1105258632Savg#endif 1106258632Savg#else 1107258632Savg hrtime_t delta = wakeup - gethrtime(); 1108258632Savg struct timespec ts; 1109258632Savg ts.tv_sec = delta / NANOSEC; 1110258632Savg ts.tv_nsec = delta % NANOSEC; 1111258632Savg (void) nanosleep(&ts, NULL); 1112258632Savg#endif 1113258632Savg} 1114258632Savg 1115168404Spjdstatic int 1116248571Smmdmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1117168404Spjd{ 1118168404Spjd dmu_tx_hold_t *txh; 1119185029Spjd spa_t *spa = tx->tx_pool->dp_spa; 1120185029Spjd uint64_t memory, asize, fsize, usize; 1121185029Spjd uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1122168404Spjd 1123240415Smm ASSERT0(tx->tx_txg); 1124185029Spjd 1125168404Spjd if (tx->tx_err) 1126168404Spjd return (tx->tx_err); 1127168404Spjd 1128185029Spjd if (spa_suspended(spa)) { 1129185029Spjd /* 1130185029Spjd * If the user has indicated a blocking failure mode 1131185029Spjd * then return ERESTART which will block in dmu_tx_wait(). 1132185029Spjd * Otherwise, return EIO so that an error can get 1133185029Spjd * propagated back to the VOP calls. 1134185029Spjd * 1135185029Spjd * Note that we always honor the txg_how flag regardless 1136185029Spjd * of the failuremode setting. 1137185029Spjd */ 1138185029Spjd if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1139185029Spjd txg_how != TXG_WAIT) 1140249195Smm return (SET_ERROR(EIO)); 1141185029Spjd 1142249195Smm return (SET_ERROR(ERESTART)); 1143185029Spjd } 1144185029Spjd 1145258632Savg if (!tx->tx_waited && 1146258632Savg dsl_pool_need_dirty_delay(tx->tx_pool)) { 1147258632Savg tx->tx_wait_dirty = B_TRUE; 1148258632Savg return (SET_ERROR(ERESTART)); 1149258632Savg } 1150258632Savg 1151168404Spjd tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1152168404Spjd tx->tx_needassign_txh = NULL; 1153168404Spjd 1154168404Spjd /* 1155168404Spjd * NB: No error returns are allowed after txg_hold_open, but 1156168404Spjd * before processing the dnode holds, due to the 1157168404Spjd * dmu_tx_unassign() logic. 1158168404Spjd */ 1159168404Spjd 1160185029Spjd towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 1161168404Spjd for (txh = list_head(&tx->tx_holds); txh; 1162168404Spjd txh = list_next(&tx->tx_holds, txh)) { 1163168404Spjd dnode_t *dn = txh->txh_dnode; 1164168404Spjd if (dn != NULL) { 1165168404Spjd mutex_enter(&dn->dn_mtx); 1166168404Spjd if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1167168404Spjd mutex_exit(&dn->dn_mtx); 1168168404Spjd tx->tx_needassign_txh = txh; 1169249195Smm return (SET_ERROR(ERESTART)); 1170168404Spjd } 1171168404Spjd if (dn->dn_assigned_txg == 0) 1172168404Spjd dn->dn_assigned_txg = tx->tx_txg; 1173168404Spjd ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1174168404Spjd (void) refcount_add(&dn->dn_tx_holds, tx); 1175168404Spjd mutex_exit(&dn->dn_mtx); 1176168404Spjd } 1177168404Spjd towrite += txh->txh_space_towrite; 1178168404Spjd tofree += txh->txh_space_tofree; 1179168404Spjd tooverwrite += txh->txh_space_tooverwrite; 1180185029Spjd tounref += txh->txh_space_tounref; 1181185029Spjd tohold += txh->txh_memory_tohold; 1182185029Spjd fudge += txh->txh_fudge; 1183168404Spjd } 1184168404Spjd 1185168404Spjd /* 1186168404Spjd * If a snapshot has been taken since we made our estimates, 1187168404Spjd * assume that we won't be able to free or overwrite anything. 1188168404Spjd */ 1189168404Spjd if (tx->tx_objset && 1190219089Spjd dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1191168404Spjd tx->tx_lastsnap_txg) { 1192168404Spjd towrite += tooverwrite; 1193168404Spjd tooverwrite = tofree = 0; 1194168404Spjd } 1195168404Spjd 1196185029Spjd /* needed allocation: worst-case estimate of write space */ 1197185029Spjd asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1198185029Spjd /* freed space estimate: worst-case overwrite + free estimate */ 1199168404Spjd fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1200185029Spjd /* convert unrefd space to worst-case estimate */ 1201185029Spjd usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1202185029Spjd /* calculate memory footprint estimate */ 1203185029Spjd memory = towrite + tooverwrite + tohold; 1204168404Spjd 1205168404Spjd#ifdef ZFS_DEBUG 1206185029Spjd /* 1207185029Spjd * Add in 'tohold' to account for our dirty holds on this memory 1208185029Spjd * XXX - the "fudge" factor is to account for skipped blocks that 1209185029Spjd * we missed because dnode_next_offset() misses in-core-only blocks. 1210185029Spjd */ 1211185029Spjd tx->tx_space_towrite = asize + 1212185029Spjd spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 1213168404Spjd tx->tx_space_tofree = tofree; 1214168404Spjd tx->tx_space_tooverwrite = tooverwrite; 1215185029Spjd tx->tx_space_tounref = tounref; 1216168404Spjd#endif 1217168404Spjd 1218168404Spjd if (tx->tx_dir && asize != 0) { 1219185029Spjd int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1220185029Spjd asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 1221168404Spjd if (err) 1222168404Spjd return (err); 1223168404Spjd } 1224168404Spjd 1225168404Spjd return (0); 1226168404Spjd} 1227168404Spjd 1228168404Spjdstatic void 1229168404Spjddmu_tx_unassign(dmu_tx_t *tx) 1230168404Spjd{ 1231168404Spjd dmu_tx_hold_t *txh; 1232168404Spjd 1233168404Spjd if (tx->tx_txg == 0) 1234168404Spjd return; 1235168404Spjd 1236168404Spjd txg_rele_to_quiesce(&tx->tx_txgh); 1237168404Spjd 1238251629Sdelphij /* 1239251629Sdelphij * Walk the transaction's hold list, removing the hold on the 1240251629Sdelphij * associated dnode, and notifying waiters if the refcount drops to 0. 1241251629Sdelphij */ 1242168404Spjd for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 1243168404Spjd txh = list_next(&tx->tx_holds, txh)) { 1244168404Spjd dnode_t *dn = txh->txh_dnode; 1245168404Spjd 1246168404Spjd if (dn == NULL) 1247168404Spjd continue; 1248168404Spjd mutex_enter(&dn->dn_mtx); 1249168404Spjd ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1250168404Spjd 1251168404Spjd if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1252168404Spjd dn->dn_assigned_txg = 0; 1253168404Spjd cv_broadcast(&dn->dn_notxholds); 1254168404Spjd } 1255168404Spjd mutex_exit(&dn->dn_mtx); 1256168404Spjd } 1257168404Spjd 1258168404Spjd txg_rele_to_sync(&tx->tx_txgh); 1259168404Spjd 1260168404Spjd tx->tx_lasttried_txg = tx->tx_txg; 1261168404Spjd tx->tx_txg = 0; 1262168404Spjd} 1263168404Spjd 1264168404Spjd/* 1265168404Spjd * Assign tx to a transaction group. txg_how can be one of: 1266168404Spjd * 1267168404Spjd * (1) TXG_WAIT. If the current open txg is full, waits until there's 1268168404Spjd * a new one. This should be used when you're not holding locks. 1269248571Smm * It will only fail if we're truly out of space (or over quota). 1270168404Spjd * 1271168404Spjd * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1272168404Spjd * blocking, returns immediately with ERESTART. This should be used 1273168404Spjd * whenever you're holding locks. On an ERESTART error, the caller 1274168404Spjd * should drop locks, do a dmu_tx_wait(tx), and try again. 1275258632Savg * 1276258632Savg * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 1277258632Savg * has already been called on behalf of this operation (though 1278258632Savg * most likely on a different tx). 1279168404Spjd */ 1280168404Spjdint 1281248571Smmdmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1282168404Spjd{ 1283168404Spjd int err; 1284168404Spjd 1285168404Spjd ASSERT(tx->tx_txg == 0); 1286258632Savg ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 1287258632Savg txg_how == TXG_WAITED); 1288168404Spjd ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1289168404Spjd 1290248571Smm /* If we might wait, we must not hold the config lock. */ 1291248571Smm ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 1292248571Smm 1293258632Savg if (txg_how == TXG_WAITED) 1294258632Savg tx->tx_waited = B_TRUE; 1295258632Savg 1296168404Spjd while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1297168404Spjd dmu_tx_unassign(tx); 1298168404Spjd 1299168404Spjd if (err != ERESTART || txg_how != TXG_WAIT) 1300168404Spjd return (err); 1301168404Spjd 1302168404Spjd dmu_tx_wait(tx); 1303168404Spjd } 1304168404Spjd 1305168404Spjd txg_rele_to_quiesce(&tx->tx_txgh); 1306168404Spjd 1307168404Spjd return (0); 1308168404Spjd} 1309168404Spjd 1310168404Spjdvoid 1311168404Spjddmu_tx_wait(dmu_tx_t *tx) 1312168404Spjd{ 1313185029Spjd spa_t *spa = tx->tx_pool->dp_spa; 1314258632Savg dsl_pool_t *dp = tx->tx_pool; 1315185029Spjd 1316168404Spjd ASSERT(tx->tx_txg == 0); 1317248571Smm ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1318168404Spjd 1319258632Savg if (tx->tx_wait_dirty) { 1320258632Savg /* 1321258632Savg * dmu_tx_try_assign() has determined that we need to wait 1322258632Savg * because we've consumed much or all of the dirty buffer 1323258632Savg * space. 1324258632Savg */ 1325258632Savg mutex_enter(&dp->dp_lock); 1326258632Savg while (dp->dp_dirty_total >= zfs_dirty_data_max) 1327258632Savg cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1328258632Savg uint64_t dirty = dp->dp_dirty_total; 1329258632Savg mutex_exit(&dp->dp_lock); 1330258632Savg 1331258632Savg dmu_tx_delay(tx, dirty); 1332258632Savg 1333258632Savg tx->tx_wait_dirty = B_FALSE; 1334258632Savg 1335258632Savg /* 1336258632Savg * Note: setting tx_waited only has effect if the caller 1337258632Savg * used TX_WAIT. Otherwise they are going to destroy 1338258632Savg * this tx and try again. The common case, zfs_write(), 1339258632Savg * uses TX_WAIT. 1340258632Savg */ 1341258632Savg tx->tx_waited = B_TRUE; 1342258632Savg } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1343258632Savg /* 1344258632Savg * If the pool is suspended we need to wait until it 1345258632Savg * is resumed. Note that it's possible that the pool 1346258632Savg * has become active after this thread has tried to 1347258632Savg * obtain a tx. If that's the case then tx_lasttried_txg 1348258632Savg * would not have been set. 1349258632Savg */ 1350258632Savg txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1351185029Spjd } else if (tx->tx_needassign_txh) { 1352258632Savg /* 1353258632Savg * A dnode is assigned to the quiescing txg. Wait for its 1354258632Savg * transaction to complete. 1355258632Savg */ 1356168404Spjd dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1357168404Spjd 1358168404Spjd mutex_enter(&dn->dn_mtx); 1359168404Spjd while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1360168404Spjd cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1361168404Spjd mutex_exit(&dn->dn_mtx); 1362168404Spjd tx->tx_needassign_txh = NULL; 1363168404Spjd } else { 1364168404Spjd txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1365168404Spjd } 1366168404Spjd} 1367168404Spjd 1368168404Spjdvoid 1369168404Spjddmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1370168404Spjd{ 1371168404Spjd#ifdef ZFS_DEBUG 1372168404Spjd if (tx->tx_dir == NULL || delta == 0) 1373168404Spjd return; 1374168404Spjd 1375168404Spjd if (delta > 0) { 1376168404Spjd ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1377168404Spjd tx->tx_space_towrite); 1378168404Spjd (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1379168404Spjd } else { 1380168404Spjd (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1381168404Spjd } 1382168404Spjd#endif 1383168404Spjd} 1384168404Spjd 1385168404Spjdvoid 1386168404Spjddmu_tx_commit(dmu_tx_t *tx) 1387168404Spjd{ 1388168404Spjd dmu_tx_hold_t *txh; 1389168404Spjd 1390168404Spjd ASSERT(tx->tx_txg != 0); 1391168404Spjd 1392251629Sdelphij /* 1393251629Sdelphij * Go through the transaction's hold list and remove holds on 1394251629Sdelphij * associated dnodes, notifying waiters if no holds remain. 1395251629Sdelphij */ 1396168404Spjd while (txh = list_head(&tx->tx_holds)) { 1397168404Spjd dnode_t *dn = txh->txh_dnode; 1398168404Spjd 1399168404Spjd list_remove(&tx->tx_holds, txh); 1400168404Spjd kmem_free(txh, sizeof (dmu_tx_hold_t)); 1401168404Spjd if (dn == NULL) 1402168404Spjd continue; 1403168404Spjd mutex_enter(&dn->dn_mtx); 1404168404Spjd ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1405168404Spjd 1406168404Spjd if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1407168404Spjd dn->dn_assigned_txg = 0; 1408168404Spjd cv_broadcast(&dn->dn_notxholds); 1409168404Spjd } 1410168404Spjd mutex_exit(&dn->dn_mtx); 1411168404Spjd dnode_rele(dn, tx); 1412168404Spjd } 1413168404Spjd 1414168404Spjd if (tx->tx_tempreserve_cookie) 1415168404Spjd dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1416168404Spjd 1417219089Spjd if (!list_is_empty(&tx->tx_callbacks)) 1418219089Spjd txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1419219089Spjd 1420168404Spjd if (tx->tx_anyobj == FALSE) 1421168404Spjd txg_rele_to_sync(&tx->tx_txgh); 1422219089Spjd 1423219089Spjd list_destroy(&tx->tx_callbacks); 1424185029Spjd list_destroy(&tx->tx_holds); 1425168404Spjd#ifdef ZFS_DEBUG 1426168404Spjd dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1427168404Spjd tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1428168404Spjd tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1429168404Spjd refcount_destroy_many(&tx->tx_space_written, 1430168404Spjd refcount_count(&tx->tx_space_written)); 1431168404Spjd refcount_destroy_many(&tx->tx_space_freed, 1432168404Spjd refcount_count(&tx->tx_space_freed)); 1433168404Spjd#endif 1434168404Spjd kmem_free(tx, sizeof (dmu_tx_t)); 1435168404Spjd} 1436168404Spjd 1437168404Spjdvoid 1438168404Spjddmu_tx_abort(dmu_tx_t *tx) 1439168404Spjd{ 1440168404Spjd dmu_tx_hold_t *txh; 1441168404Spjd 1442168404Spjd ASSERT(tx->tx_txg == 0); 1443168404Spjd 1444168404Spjd while (txh = list_head(&tx->tx_holds)) { 1445168404Spjd dnode_t *dn = txh->txh_dnode; 1446168404Spjd 1447168404Spjd list_remove(&tx->tx_holds, txh); 1448168404Spjd kmem_free(txh, sizeof (dmu_tx_hold_t)); 1449168404Spjd if (dn != NULL) 1450168404Spjd dnode_rele(dn, tx); 1451168404Spjd } 1452219089Spjd 1453219089Spjd /* 1454219089Spjd * Call any registered callbacks with an error code. 1455219089Spjd */ 1456219089Spjd if (!list_is_empty(&tx->tx_callbacks)) 1457219089Spjd dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1458219089Spjd 1459219089Spjd list_destroy(&tx->tx_callbacks); 1460185029Spjd list_destroy(&tx->tx_holds); 1461168404Spjd#ifdef ZFS_DEBUG 1462168404Spjd refcount_destroy_many(&tx->tx_space_written, 1463168404Spjd refcount_count(&tx->tx_space_written)); 1464168404Spjd refcount_destroy_many(&tx->tx_space_freed, 1465168404Spjd refcount_count(&tx->tx_space_freed)); 1466168404Spjd#endif 1467168404Spjd kmem_free(tx, sizeof (dmu_tx_t)); 1468168404Spjd} 1469168404Spjd 1470168404Spjduint64_t 1471168404Spjddmu_tx_get_txg(dmu_tx_t *tx) 1472168404Spjd{ 1473168404Spjd ASSERT(tx->tx_txg != 0); 1474168404Spjd return (tx->tx_txg); 1475168404Spjd} 1476219089Spjd 1477248571Smmdsl_pool_t * 1478248571Smmdmu_tx_pool(dmu_tx_t *tx) 1479248571Smm{ 1480248571Smm ASSERT(tx->tx_pool != NULL); 1481248571Smm return (tx->tx_pool); 1482248571Smm} 1483248571Smm 1484248571Smm 1485219089Spjdvoid 1486219089Spjddmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1487219089Spjd{ 1488219089Spjd dmu_tx_callback_t *dcb; 1489219089Spjd 1490219089Spjd dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1491219089Spjd 1492219089Spjd dcb->dcb_func = func; 1493219089Spjd dcb->dcb_data = data; 1494219089Spjd 1495219089Spjd list_insert_tail(&tx->tx_callbacks, dcb); 1496219089Spjd} 1497219089Spjd 1498219089Spjd/* 1499219089Spjd * Call all the commit callbacks on a list, with a given error code. 1500219089Spjd */ 1501219089Spjdvoid 1502219089Spjddmu_tx_do_callbacks(list_t *cb_list, int error) 1503219089Spjd{ 1504219089Spjd dmu_tx_callback_t *dcb; 1505219089Spjd 1506219089Spjd while (dcb = list_head(cb_list)) { 1507219089Spjd list_remove(cb_list, dcb); 1508219089Spjd dcb->dcb_func(dcb->dcb_data, error); 1509219089Spjd kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1510219089Spjd } 1511219089Spjd} 1512219089Spjd 1513219089Spjd/* 1514219089Spjd * Interface to hold a bunch of attributes. 1515219089Spjd * used for creating new files. 1516219089Spjd * attrsize is the total size of all attributes 1517219089Spjd * to be added during object creation 1518219089Spjd * 1519219089Spjd * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1520219089Spjd */ 1521219089Spjd 1522219089Spjd/* 1523219089Spjd * hold necessary attribute name for attribute registration. 1524219089Spjd * should be a very rare case where this is needed. If it does 1525219089Spjd * happen it would only happen on the first write to the file system. 1526219089Spjd */ 1527219089Spjdstatic void 1528219089Spjddmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1529219089Spjd{ 1530219089Spjd int i; 1531219089Spjd 1532219089Spjd if (!sa->sa_need_attr_registration) 1533219089Spjd return; 1534219089Spjd 1535219089Spjd for (i = 0; i != sa->sa_num_attrs; i++) { 1536219089Spjd if (!sa->sa_attr_table[i].sa_registered) { 1537219089Spjd if (sa->sa_reg_attr_obj) 1538219089Spjd dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1539219089Spjd B_TRUE, sa->sa_attr_table[i].sa_name); 1540219089Spjd else 1541219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1542219089Spjd B_TRUE, sa->sa_attr_table[i].sa_name); 1543219089Spjd } 1544219089Spjd } 1545219089Spjd} 1546219089Spjd 1547219089Spjd 1548219089Spjdvoid 1549219089Spjddmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1550219089Spjd{ 1551219089Spjd dnode_t *dn; 1552219089Spjd dmu_tx_hold_t *txh; 1553219089Spjd 1554219089Spjd txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1555219089Spjd THT_SPILL, 0, 0); 1556219089Spjd 1557219089Spjd dn = txh->txh_dnode; 1558219089Spjd 1559219089Spjd if (dn == NULL) 1560219089Spjd return; 1561219089Spjd 1562219089Spjd /* If blkptr doesn't exist then add space to towrite */ 1563219089Spjd if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 1564274337Sdelphij txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1565219089Spjd } else { 1566226512Smm blkptr_t *bp; 1567226512Smm 1568219089Spjd bp = &dn->dn_phys->dn_spill; 1569219089Spjd if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1570219089Spjd bp, bp->blk_birth)) 1571274337Sdelphij txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE; 1572219089Spjd else 1573274337Sdelphij txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1574226512Smm if (!BP_IS_HOLE(bp)) 1575274337Sdelphij txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE; 1576219089Spjd } 1577219089Spjd} 1578219089Spjd 1579219089Spjdvoid 1580219089Spjddmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1581219089Spjd{ 1582219089Spjd sa_os_t *sa = tx->tx_objset->os_sa; 1583219089Spjd 1584219089Spjd dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1585219089Spjd 1586219089Spjd if (tx->tx_objset->os_sa->sa_master_obj == 0) 1587219089Spjd return; 1588219089Spjd 1589219089Spjd if (tx->tx_objset->os_sa->sa_layout_attr_obj) 1590219089Spjd dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1591219089Spjd else { 1592219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1593219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1594219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1595219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1596219089Spjd } 1597219089Spjd 1598219089Spjd dmu_tx_sa_registration_hold(sa, tx); 1599219089Spjd 1600219089Spjd if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 1601219089Spjd return; 1602219089Spjd 1603219089Spjd (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1604219089Spjd THT_SPILL, 0, 0); 1605219089Spjd} 1606219089Spjd 1607219089Spjd/* 1608219089Spjd * Hold SA attribute 1609219089Spjd * 1610219089Spjd * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1611219089Spjd * 1612219089Spjd * variable_size is the total size of all variable sized attributes 1613219089Spjd * passed to this function. It is not the total size of all 1614219089Spjd * variable size attributes that *may* exist on this object. 1615219089Spjd */ 1616219089Spjdvoid 1617219089Spjddmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1618219089Spjd{ 1619219089Spjd uint64_t object; 1620219089Spjd sa_os_t *sa = tx->tx_objset->os_sa; 1621219089Spjd 1622219089Spjd ASSERT(hdl != NULL); 1623219089Spjd 1624219089Spjd object = sa_handle_object(hdl); 1625219089Spjd 1626219089Spjd dmu_tx_hold_bonus(tx, object); 1627219089Spjd 1628219089Spjd if (tx->tx_objset->os_sa->sa_master_obj == 0) 1629219089Spjd return; 1630219089Spjd 1631219089Spjd if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1632219089Spjd tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1633219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1634219089Spjd dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1635219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1636219089Spjd dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1637219089Spjd } 1638219089Spjd 1639219089Spjd dmu_tx_sa_registration_hold(sa, tx); 1640219089Spjd 1641219089Spjd if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1642219089Spjd dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1643219089Spjd 1644219089Spjd if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1645219089Spjd ASSERT(tx->tx_txg == 0); 1646219089Spjd dmu_tx_hold_spill(tx, object); 1647219089Spjd } else { 1648219089Spjd dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1649219089Spjd dnode_t *dn; 1650219089Spjd 1651219089Spjd DB_DNODE_ENTER(db); 1652219089Spjd dn = DB_DNODE(db); 1653219089Spjd if (dn->dn_have_spill) { 1654219089Spjd ASSERT(tx->tx_txg == 0); 1655219089Spjd dmu_tx_hold_spill(tx, object); 1656219089Spjd } 1657219089Spjd DB_DNODE_EXIT(db); 1658219089Spjd } 1659219089Spjd} 1660