Deleted Added
full compact
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24/*
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 */
26
27#include <sys/dmu.h>
28#include <sys/dmu_impl.h>
29#include <sys/dbuf.h>
30#include <sys/dmu_tx.h>
31#include <sys/dmu_objset.h>
32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34#include <sys/dsl_pool.h>
35#include <sys/zap_impl.h> /* for fzap_default_block_shift */
36#include <sys/spa.h>
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
39#include <sys/zfs_context.h>
40#include <sys/varargs.h>
41
42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
44
45
46dmu_tx_t *
47dmu_tx_create_dd(dsl_dir_t *dd)
48{
49 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50 tx->tx_dir = dd;
51 if (dd)
52 tx->tx_pool = dd->dd_pool;
53 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54 offsetof(dmu_tx_hold_t, txh_node));
55 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56 offsetof(dmu_tx_callback_t, dcb_node));
57#ifdef ZFS_DEBUG
58 refcount_create(&tx->tx_space_written);
59 refcount_create(&tx->tx_space_freed);
60#endif
61 return (tx);
62}
63
64dmu_tx_t *
65dmu_tx_create(objset_t *os)
66{
67 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
68 tx->tx_objset = os;
69 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
70 return (tx);
71}
72
73dmu_tx_t *
74dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
75{
76 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
77
78 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
79 tx->tx_pool = dp;
80 tx->tx_txg = txg;
81 tx->tx_anyobj = TRUE;
82
83 return (tx);
84}
85
86int
87dmu_tx_is_syncing(dmu_tx_t *tx)
88{
89 return (tx->tx_anyobj);
90}
91
92int
93dmu_tx_private_ok(dmu_tx_t *tx)
94{
95 return (tx->tx_anyobj);
96}
97
98static dmu_tx_hold_t *
99dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
100 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
101{
102 dmu_tx_hold_t *txh;
103 dnode_t *dn = NULL;
104 int err;
105
106 if (object != DMU_NEW_OBJECT) {
107 err = dnode_hold(os, object, tx, &dn);
108 if (err) {
109 tx->tx_err = err;
110 return (NULL);
111 }
112
113 if (err == 0 && tx->tx_txg != 0) {
114 mutex_enter(&dn->dn_mtx);
115 /*
116 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
117 * problem, but there's no way for it to happen (for
118 * now, at least).
119 */
120 ASSERT(dn->dn_assigned_txg == 0);
121 dn->dn_assigned_txg = tx->tx_txg;
122 (void) refcount_add(&dn->dn_tx_holds, tx);
123 mutex_exit(&dn->dn_mtx);
124 }
125 }
126
127 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
128 txh->txh_tx = tx;
129 txh->txh_dnode = dn;
130#ifdef ZFS_DEBUG
131 txh->txh_type = type;
132 txh->txh_arg1 = arg1;
133 txh->txh_arg2 = arg2;
134#endif
135 list_insert_tail(&tx->tx_holds, txh);
136
137 return (txh);
138}
139
140void
141dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
142{
143 /*
144 * If we're syncing, they can manipulate any object anyhow, and
145 * the hold on the dnode_t can cause problems.
146 */
147 if (!dmu_tx_is_syncing(tx)) {
148 (void) dmu_tx_hold_object_impl(tx, os,
149 object, THT_NEWOBJECT, 0, 0);
150 }
151}
152
153static int
154dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
155{
156 int err;
157 dmu_buf_impl_t *db;
158
159 rw_enter(&dn->dn_struct_rwlock, RW_READER);
160 db = dbuf_hold_level(dn, level, blkid, FTAG);
161 rw_exit(&dn->dn_struct_rwlock);
162 if (db == NULL)
163 return (EIO);
164 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
165 dbuf_rele(db, FTAG);
166 return (err);
167}
168
169static void
170dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
171 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
172{
173 objset_t *os = dn->dn_objset;
174 dsl_dataset_t *ds = os->os_dsl_dataset;
175 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
176 dmu_buf_impl_t *parent = NULL;
177 blkptr_t *bp = NULL;
178 uint64_t space;
179
180 if (level >= dn->dn_nlevels || history[level] == blkid)
181 return;
182
183 history[level] = blkid;
184
185 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
186
187 if (db == NULL || db == dn->dn_dbuf) {
188 ASSERT(level != 0);
189 db = NULL;
190 } else {
191 ASSERT(DB_DNODE(db) == dn);
192 ASSERT(db->db_level == level);
193 ASSERT(db->db.db_size == space);
194 ASSERT(db->db_blkid == blkid);
195 bp = db->db_blkptr;
196 parent = db->db_parent;
197 }
198
199 freeable = (bp && (freeable ||
200 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
201
202 if (freeable)
203 txh->txh_space_tooverwrite += space;
204 else
205 txh->txh_space_towrite += space;
206 if (bp)
207 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
208
209 dmu_tx_count_twig(txh, dn, parent, level + 1,
210 blkid >> epbs, freeable, history);
211}
212
213/* ARGSUSED */
214static void
215dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
216{
217 dnode_t *dn = txh->txh_dnode;
218 uint64_t start, end, i;
219 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
220 int err = 0;
221
222 if (len == 0)
223 return;
224
225 min_bs = SPA_MINBLOCKSHIFT;
226 max_bs = SPA_MAXBLOCKSHIFT;
227 min_ibs = DN_MIN_INDBLKSHIFT;
228 max_ibs = DN_MAX_INDBLKSHIFT;
229
230 if (dn) {
231 uint64_t history[DN_MAX_LEVELS];
232 int nlvls = dn->dn_nlevels;
233 int delta;
234
235 /*
236 * For i/o error checking, read the first and last level-0
237 * blocks (if they are not aligned), and all the level-1 blocks.
238 */
239 if (dn->dn_maxblkid == 0) {
240 delta = dn->dn_datablksz;
241 start = (off < dn->dn_datablksz) ? 0 : 1;
242 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
243 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
244 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
245 if (err)
246 goto out;
247 delta -= off;
248 }
249 } else {
250 zio_t *zio = zio_root(dn->dn_objset->os_spa,
251 NULL, NULL, ZIO_FLAG_CANFAIL);
252
253 /* first level-0 block */
254 start = off >> dn->dn_datablkshift;
255 if (P2PHASE(off, dn->dn_datablksz) ||
256 len < dn->dn_datablksz) {
257 err = dmu_tx_check_ioerr(zio, dn, 0, start);
258 if (err)
259 goto out;
260 }
261
262 /* last level-0 block */
263 end = (off+len-1) >> dn->dn_datablkshift;
264 if (end != start && end <= dn->dn_maxblkid &&
265 P2PHASE(off+len, dn->dn_datablksz)) {
266 err = dmu_tx_check_ioerr(zio, dn, 0, end);
267 if (err)
268 goto out;
269 }
270
271 /* level-1 blocks */
272 if (nlvls > 1) {
273 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
274 for (i = (start>>shft)+1; i < end>>shft; i++) {
275 err = dmu_tx_check_ioerr(zio, dn, 1, i);
276 if (err)
277 goto out;
278 }
279 }
280
281 err = zio_wait(zio);
282 if (err)
283 goto out;
284 delta = P2NPHASE(off, dn->dn_datablksz);
285 }
286
287 if (dn->dn_maxblkid > 0) {
288 /*
289 * The blocksize can't change,
290 * so we can make a more precise estimate.
291 */
292 ASSERT(dn->dn_datablkshift != 0);
293 min_bs = max_bs = dn->dn_datablkshift;
294 min_ibs = max_ibs = dn->dn_indblkshift;
295 } else if (dn->dn_indblkshift > max_ibs) {
296 /*
297 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
298 * the code will still work correctly on older pools.
299 */
300 min_ibs = max_ibs = dn->dn_indblkshift;
301 }
302
303 /*
304 * If this write is not off the end of the file
305 * we need to account for overwrites/unref.
306 */
307 if (start <= dn->dn_maxblkid) {
308 for (int l = 0; l < DN_MAX_LEVELS; l++)
309 history[l] = -1ULL;
310 }
311 while (start <= dn->dn_maxblkid) {
312 dmu_buf_impl_t *db;
313
314 rw_enter(&dn->dn_struct_rwlock, RW_READER);
315 err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
316 rw_exit(&dn->dn_struct_rwlock);
317
318 if (err) {
319 txh->txh_tx->tx_err = err;
320 return;
321 }
322
323 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
324 history);
325 dbuf_rele(db, FTAG);
326 if (++start > end) {
327 /*
328 * Account for new indirects appearing
329 * before this IO gets assigned into a txg.
330 */
331 bits = 64 - min_bs;
332 epbs = min_ibs - SPA_BLKPTRSHIFT;
333 for (bits -= epbs * (nlvls - 1);
334 bits >= 0; bits -= epbs)
335 txh->txh_fudge += 1ULL << max_ibs;
336 goto out;
337 }
338 off += delta;
339 if (len >= delta)
340 len -= delta;
341 delta = dn->dn_datablksz;
342 }
343 }
344
345 /*
346 * 'end' is the last thing we will access, not one past.
347 * This way we won't overflow when accessing the last byte.
348 */
349 start = P2ALIGN(off, 1ULL << max_bs);
350 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
351 txh->txh_space_towrite += end - start + 1;
352
353 start >>= min_bs;
354 end >>= min_bs;
355
356 epbs = min_ibs - SPA_BLKPTRSHIFT;
357
358 /*
359 * The object contains at most 2^(64 - min_bs) blocks,
360 * and each indirect level maps 2^epbs.
361 */
362 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
363 start >>= epbs;
364 end >>= epbs;
365 ASSERT3U(end, >=, start);
366 txh->txh_space_towrite += (end - start + 1) << max_ibs;
367 if (start != 0) {
368 /*
369 * We also need a new blkid=0 indirect block
370 * to reference any existing file data.
371 */
372 txh->txh_space_towrite += 1ULL << max_ibs;
373 }
374 }
375
376out:
377 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
378 2 * DMU_MAX_ACCESS)
379 err = EFBIG;
380
381 if (err)
382 txh->txh_tx->tx_err = err;
383}
384
385static void
386dmu_tx_count_dnode(dmu_tx_hold_t *txh)
387{
388 dnode_t *dn = txh->txh_dnode;
389 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
390 uint64_t space = mdn->dn_datablksz +
391 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
392
393 if (dn && dn->dn_dbuf->db_blkptr &&
394 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
395 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
396 txh->txh_space_tooverwrite += space;
397 txh->txh_space_tounref += space;
398 } else {
399 txh->txh_space_towrite += space;
400 if (dn && dn->dn_dbuf->db_blkptr)
401 txh->txh_space_tounref += space;
402 }
403}
404
405void
406dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
407{
408 dmu_tx_hold_t *txh;
409
410 ASSERT(tx->tx_txg == 0);
411 ASSERT(len < DMU_MAX_ACCESS);
412 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
413
414 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
415 object, THT_WRITE, off, len);
416 if (txh == NULL)
417 return;
418
419 dmu_tx_count_write(txh, off, len);
420 dmu_tx_count_dnode(txh);
421}
422
423static void
424dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
425{
426 uint64_t blkid, nblks, lastblk;
427 uint64_t space = 0, unref = 0, skipped = 0;
428 dnode_t *dn = txh->txh_dnode;
429 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
430 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
431 int epbs;
432 uint64_t l0span = 0, nl1blks = 0;
433
434 if (dn->dn_nlevels == 0)
435 return;
436
437 /*
438 * The struct_rwlock protects us against dn_nlevels
439 * changing, in case (against all odds) we manage to dirty &
440 * sync out the changes after we check for being dirty.
441 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
442 */
443 rw_enter(&dn->dn_struct_rwlock, RW_READER);
444 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
445 if (dn->dn_maxblkid == 0) {
446 if (off == 0 && len >= dn->dn_datablksz) {
447 blkid = 0;
448 nblks = 1;
449 } else {
450 rw_exit(&dn->dn_struct_rwlock);
451 return;
452 }
453 } else {
454 blkid = off >> dn->dn_datablkshift;
455 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
456
457 if (blkid >= dn->dn_maxblkid) {
458 rw_exit(&dn->dn_struct_rwlock);
459 return;
460 }
461 if (blkid + nblks > dn->dn_maxblkid)
462 nblks = dn->dn_maxblkid - blkid;
463
464 }
465 l0span = nblks; /* save for later use to calc level > 1 overhead */
466 if (dn->dn_nlevels == 1) {
467 int i;
468 for (i = 0; i < nblks; i++) {
469 blkptr_t *bp = dn->dn_phys->dn_blkptr;
470 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
471 bp += blkid + i;
472 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
473 dprintf_bp(bp, "can free old%s", "");
474 space += bp_get_dsize(spa, bp);
475 }
476 unref += BP_GET_ASIZE(bp);
477 }
478 nl1blks = 1;
479 nblks = 0;
480 }
481
480 /*
481 * Add in memory requirements of higher-level indirects.
482 * This assumes a worst-possible scenario for dn_nlevels.
483 */
484 {
485 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
486 int level = (dn->dn_nlevels > 1) ? 2 : 1;
487
488 while (level++ < DN_MAX_LEVELS) {
489 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
490 blkcnt = 1 + (blkcnt >> epbs);
491 }
492 ASSERT(blkcnt <= dn->dn_nblkptr);
493 }
494
482 lastblk = blkid + nblks - 1;
483 while (nblks) {
484 dmu_buf_impl_t *dbuf;
485 uint64_t ibyte, new_blkid;
486 int epb = 1 << epbs;
487 int err, i, blkoff, tochk;
488 blkptr_t *bp;
489
490 ibyte = blkid << dn->dn_datablkshift;
491 err = dnode_next_offset(dn,
492 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
493 new_blkid = ibyte >> dn->dn_datablkshift;
494 if (err == ESRCH) {
495 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
496 break;
497 }
498 if (err) {
499 txh->txh_tx->tx_err = err;
500 break;
501 }
502 if (new_blkid > lastblk) {
503 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
504 break;
505 }
506
507 if (new_blkid > blkid) {
508 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
509 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
510 nblks -= new_blkid - blkid;
511 blkid = new_blkid;
512 }
513 blkoff = P2PHASE(blkid, epb);
514 tochk = MIN(epb - blkoff, nblks);
515
516 err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
517 if (err) {
518 txh->txh_tx->tx_err = err;
519 break;
520 }
521
522 txh->txh_memory_tohold += dbuf->db.db_size;
523
524 /*
525 * We don't check memory_tohold against DMU_MAX_ACCESS because
526 * memory_tohold is an over-estimation (especially the >L1
527 * indirect blocks), so it could fail. Callers should have
528 * already verified that they will not be holding too much
529 * memory.
530 */
531
532 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
533 if (err != 0) {
534 txh->txh_tx->tx_err = err;
535 dbuf_rele(dbuf, FTAG);
536 break;
537 }
538
539 bp = dbuf->db.db_data;
540 bp += blkoff;
541
542 for (i = 0; i < tochk; i++) {
543 if (dsl_dataset_block_freeable(ds, &bp[i],
544 bp[i].blk_birth)) {
545 dprintf_bp(&bp[i], "can free old%s", "");
546 space += bp_get_dsize(spa, &bp[i]);
547 }
548 unref += BP_GET_ASIZE(bp);
549 }
550 dbuf_rele(dbuf, FTAG);
551
552 ++nl1blks;
553 blkid += tochk;
554 nblks -= tochk;
555 }
556 rw_exit(&dn->dn_struct_rwlock);
557
558 /*
559 * Add in memory requirements of higher-level indirects.
560 * This assumes a worst-possible scenario for dn_nlevels and a
561 * worst-possible distribution of l1-blocks over the region to free.
562 */
563 {
564 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
565 int level = 2;
566 /*
567 * Here we don't use DN_MAX_LEVEL, but calculate it with the
568 * given datablkshift and indblkshift. This makes the
569 * difference between 19 and 8 on large files.
570 */
571 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
572 (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
573
574 while (level++ < maxlevel) {
575 txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
576 << dn->dn_indblkshift;
577 blkcnt = 1 + (blkcnt >> epbs);
578 }
579 }
580
581 /* account for new level 1 indirect blocks that might show up */
582 if (skipped > 0) {
583 txh->txh_fudge += skipped << dn->dn_indblkshift;
584 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
585 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
586 }
587 txh->txh_space_tofree += space;
588 txh->txh_space_tounref += unref;
589}
590
591void
592dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
593{
594 dmu_tx_hold_t *txh;
595 dnode_t *dn;
596 uint64_t start, end, i;
597 int err, shift;
598 zio_t *zio;
599
600 ASSERT(tx->tx_txg == 0);
601
602 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
603 object, THT_FREE, off, len);
604 if (txh == NULL)
605 return;
606 dn = txh->txh_dnode;
607
608 /* first block */
609 if (off != 0)
610 dmu_tx_count_write(txh, off, 1);
611 /* last block */
612 if (len != DMU_OBJECT_END)
613 dmu_tx_count_write(txh, off+len, 1);
614
615 dmu_tx_count_dnode(txh);
616
617 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
618 return;
619 if (len == DMU_OBJECT_END)
620 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
621
622 /*
623 * For i/o error checking, read the first and last level-0
624 * blocks, and all the level-1 blocks. The above count_write's
625 * have already taken care of the level-0 blocks.
626 */
627 if (dn->dn_nlevels > 1) {
628 shift = dn->dn_datablkshift + dn->dn_indblkshift -
629 SPA_BLKPTRSHIFT;
630 start = off >> shift;
631 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
632
633 zio = zio_root(tx->tx_pool->dp_spa,
634 NULL, NULL, ZIO_FLAG_CANFAIL);
635 for (i = start; i <= end; i++) {
636 uint64_t ibyte = i << shift;
637 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
638 i = ibyte >> shift;
639 if (err == ESRCH)
640 break;
641 if (err) {
642 tx->tx_err = err;
643 return;
644 }
645
646 err = dmu_tx_check_ioerr(zio, dn, 1, i);
647 if (err) {
648 tx->tx_err = err;
649 return;
650 }
651 }
652 err = zio_wait(zio);
653 if (err) {
654 tx->tx_err = err;
655 return;
656 }
657 }
658
659 dmu_tx_count_free(txh, off, len);
660}
661
662void
663dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
664{
665 dmu_tx_hold_t *txh;
666 dnode_t *dn;
667 uint64_t nblocks;
668 int epbs, err;
669
670 ASSERT(tx->tx_txg == 0);
671
672 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
673 object, THT_ZAP, add, (uintptr_t)name);
674 if (txh == NULL)
675 return;
676 dn = txh->txh_dnode;
677
678 dmu_tx_count_dnode(txh);
679
680 if (dn == NULL) {
681 /*
682 * We will be able to fit a new object's entries into one leaf
683 * block. So there will be at most 2 blocks total,
684 * including the header block.
685 */
686 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
687 return;
688 }
689
679 ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
690 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
691
692 if (dn->dn_maxblkid == 0 && !add) {
693 blkptr_t *bp;
694
695 /*
696 * If there is only one block (i.e. this is a micro-zap)
697 * and we are not adding anything, the accounting is simple.
698 */
699 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
700 if (err) {
701 tx->tx_err = err;
702 return;
703 }
704
705 /*
706 * Use max block size here, since we don't know how much
707 * the size will change between now and the dbuf dirty call.
708 */
709 bp = &dn->dn_phys->dn_blkptr[0];
710 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
711 bp, bp->blk_birth))
712 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
713 else
714 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
715 if (!BP_IS_HOLE(bp))
716 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
717 return;
718 }
719
720 if (dn->dn_maxblkid > 0 && name) {
721 /*
722 * access the name in this fat-zap so that we'll check
723 * for i/o errors to the leaf blocks, etc.
724 */
725 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
726 8, 0, NULL);
727 if (err == EIO) {
728 tx->tx_err = err;
729 return;
730 }
731 }
732
733 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
734 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
735
736 /*
737 * If the modified blocks are scattered to the four winds,
738 * we'll have to modify an indirect twig for each.
739 */
740 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
741 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
742 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
743 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
744 else
745 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
746}
747
748void
749dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
750{
751 dmu_tx_hold_t *txh;
752
753 ASSERT(tx->tx_txg == 0);
754
755 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
756 object, THT_BONUS, 0, 0);
757 if (txh)
758 dmu_tx_count_dnode(txh);
759}
760
761void
762dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
763{
764 dmu_tx_hold_t *txh;
765 ASSERT(tx->tx_txg == 0);
766
767 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
768 DMU_NEW_OBJECT, THT_SPACE, space, 0);
769
770 txh->txh_space_towrite += space;
771}
772
773int
774dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
775{
776 dmu_tx_hold_t *txh;
777 int holds = 0;
778
779 /*
780 * By asserting that the tx is assigned, we're counting the
781 * number of dn_tx_holds, which is the same as the number of
782 * dn_holds. Otherwise, we'd be counting dn_holds, but
783 * dn_tx_holds could be 0.
784 */
785 ASSERT(tx->tx_txg != 0);
786
787 /* if (tx->tx_anyobj == TRUE) */
788 /* return (0); */
789
790 for (txh = list_head(&tx->tx_holds); txh;
791 txh = list_next(&tx->tx_holds, txh)) {
792 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
793 holds++;
794 }
795
796 return (holds);
797}
798
799#ifdef ZFS_DEBUG
800void
801dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
802{
803 dmu_tx_hold_t *txh;
804 int match_object = FALSE, match_offset = FALSE;
805 dnode_t *dn;
806
807 DB_DNODE_ENTER(db);
808 dn = DB_DNODE(db);
809 ASSERT(tx->tx_txg != 0);
810 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
811 ASSERT3U(dn->dn_object, ==, db->db.db_object);
812
813 if (tx->tx_anyobj) {
814 DB_DNODE_EXIT(db);
815 return;
816 }
817
818 /* XXX No checking on the meta dnode for now */
819 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
820 DB_DNODE_EXIT(db);
821 return;
822 }
823
824 for (txh = list_head(&tx->tx_holds); txh;
825 txh = list_next(&tx->tx_holds, txh)) {
826 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
827 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
828 match_object = TRUE;
829 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
830 int datablkshift = dn->dn_datablkshift ?
831 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
832 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
833 int shift = datablkshift + epbs * db->db_level;
834 uint64_t beginblk = shift >= 64 ? 0 :
835 (txh->txh_arg1 >> shift);
836 uint64_t endblk = shift >= 64 ? 0 :
837 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
838 uint64_t blkid = db->db_blkid;
839
840 /* XXX txh_arg2 better not be zero... */
841
842 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
843 txh->txh_type, beginblk, endblk);
844
845 switch (txh->txh_type) {
846 case THT_WRITE:
847 if (blkid >= beginblk && blkid <= endblk)
848 match_offset = TRUE;
849 /*
850 * We will let this hold work for the bonus
851 * or spill buffer so that we don't need to
852 * hold it when creating a new object.
853 */
854 if (blkid == DMU_BONUS_BLKID ||
855 blkid == DMU_SPILL_BLKID)
856 match_offset = TRUE;
857 /*
858 * They might have to increase nlevels,
859 * thus dirtying the new TLIBs. Or the
860 * might have to change the block size,
861 * thus dirying the new lvl=0 blk=0.
862 */
863 if (blkid == 0)
864 match_offset = TRUE;
865 break;
866 case THT_FREE:
867 /*
868 * We will dirty all the level 1 blocks in
869 * the free range and perhaps the first and
870 * last level 0 block.
871 */
872 if (blkid >= beginblk && (blkid <= endblk ||
873 txh->txh_arg2 == DMU_OBJECT_END))
874 match_offset = TRUE;
875 break;
876 case THT_SPILL:
877 if (blkid == DMU_SPILL_BLKID)
878 match_offset = TRUE;
879 break;
880 case THT_BONUS:
881 if (blkid == DMU_BONUS_BLKID)
882 match_offset = TRUE;
883 break;
884 case THT_ZAP:
885 match_offset = TRUE;
886 break;
887 case THT_NEWOBJECT:
888 match_object = TRUE;
889 break;
890 default:
891 ASSERT(!"bad txh_type");
892 }
893 }
894 if (match_object && match_offset) {
895 DB_DNODE_EXIT(db);
896 return;
897 }
898 }
899 DB_DNODE_EXIT(db);
900 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
901 (u_longlong_t)db->db.db_object, db->db_level,
902 (u_longlong_t)db->db_blkid);
903}
904#endif
905
906static int
907dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
908{
909 dmu_tx_hold_t *txh;
910 spa_t *spa = tx->tx_pool->dp_spa;
911 uint64_t memory, asize, fsize, usize;
912 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
913
903 ASSERT3U(tx->tx_txg, ==, 0);
914 ASSERT0(tx->tx_txg);
915
916 if (tx->tx_err)
917 return (tx->tx_err);
918
919 if (spa_suspended(spa)) {
920 /*
921 * If the user has indicated a blocking failure mode
922 * then return ERESTART which will block in dmu_tx_wait().
923 * Otherwise, return EIO so that an error can get
924 * propagated back to the VOP calls.
925 *
926 * Note that we always honor the txg_how flag regardless
927 * of the failuremode setting.
928 */
929 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
930 txg_how != TXG_WAIT)
931 return (EIO);
932
933 return (ERESTART);
934 }
935
936 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
937 tx->tx_needassign_txh = NULL;
938
939 /*
940 * NB: No error returns are allowed after txg_hold_open, but
941 * before processing the dnode holds, due to the
942 * dmu_tx_unassign() logic.
943 */
944
945 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
946 for (txh = list_head(&tx->tx_holds); txh;
947 txh = list_next(&tx->tx_holds, txh)) {
948 dnode_t *dn = txh->txh_dnode;
949 if (dn != NULL) {
950 mutex_enter(&dn->dn_mtx);
951 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
952 mutex_exit(&dn->dn_mtx);
953 tx->tx_needassign_txh = txh;
954 return (ERESTART);
955 }
956 if (dn->dn_assigned_txg == 0)
957 dn->dn_assigned_txg = tx->tx_txg;
958 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
959 (void) refcount_add(&dn->dn_tx_holds, tx);
960 mutex_exit(&dn->dn_mtx);
961 }
962 towrite += txh->txh_space_towrite;
963 tofree += txh->txh_space_tofree;
964 tooverwrite += txh->txh_space_tooverwrite;
965 tounref += txh->txh_space_tounref;
966 tohold += txh->txh_memory_tohold;
967 fudge += txh->txh_fudge;
968 }
969
970 /*
971 * NB: This check must be after we've held the dnodes, so that
972 * the dmu_tx_unassign() logic will work properly
973 */
974 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
975 return (ERESTART);
976
977 /*
978 * If a snapshot has been taken since we made our estimates,
979 * assume that we won't be able to free or overwrite anything.
980 */
981 if (tx->tx_objset &&
982 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
983 tx->tx_lastsnap_txg) {
984 towrite += tooverwrite;
985 tooverwrite = tofree = 0;
986 }
987
988 /* needed allocation: worst-case estimate of write space */
989 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
990 /* freed space estimate: worst-case overwrite + free estimate */
991 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
992 /* convert unrefd space to worst-case estimate */
993 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
994 /* calculate memory footprint estimate */
995 memory = towrite + tooverwrite + tohold;
996
997#ifdef ZFS_DEBUG
998 /*
999 * Add in 'tohold' to account for our dirty holds on this memory
1000 * XXX - the "fudge" factor is to account for skipped blocks that
1001 * we missed because dnode_next_offset() misses in-core-only blocks.
1002 */
1003 tx->tx_space_towrite = asize +
1004 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1005 tx->tx_space_tofree = tofree;
1006 tx->tx_space_tooverwrite = tooverwrite;
1007 tx->tx_space_tounref = tounref;
1008#endif
1009
1010 if (tx->tx_dir && asize != 0) {
1011 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1012 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1013 if (err)
1014 return (err);
1015 }
1016
1017 return (0);
1018}
1019
1020static void
1021dmu_tx_unassign(dmu_tx_t *tx)
1022{
1023 dmu_tx_hold_t *txh;
1024
1025 if (tx->tx_txg == 0)
1026 return;
1027
1028 txg_rele_to_quiesce(&tx->tx_txgh);
1029
1030 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1031 txh = list_next(&tx->tx_holds, txh)) {
1032 dnode_t *dn = txh->txh_dnode;
1033
1034 if (dn == NULL)
1035 continue;
1036 mutex_enter(&dn->dn_mtx);
1037 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1038
1039 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1040 dn->dn_assigned_txg = 0;
1041 cv_broadcast(&dn->dn_notxholds);
1042 }
1043 mutex_exit(&dn->dn_mtx);
1044 }
1045
1046 txg_rele_to_sync(&tx->tx_txgh);
1047
1048 tx->tx_lasttried_txg = tx->tx_txg;
1049 tx->tx_txg = 0;
1050}
1051
1052/*
1053 * Assign tx to a transaction group. txg_how can be one of:
1054 *
1055 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1056 * a new one. This should be used when you're not holding locks.
1057 * If will only fail if we're truly out of space (or over quota).
1058 *
1059 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1060 * blocking, returns immediately with ERESTART. This should be used
1061 * whenever you're holding locks. On an ERESTART error, the caller
1062 * should drop locks, do a dmu_tx_wait(tx), and try again.
1063 *
1064 * (3) A specific txg. Use this if you need to ensure that multiple
1065 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1066 * returns ERESTART if it can't assign you into the requested txg.
1067 */
1068int
1069dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1070{
1071 int err;
1072
1073 ASSERT(tx->tx_txg == 0);
1074 ASSERT(txg_how != 0);
1075 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1076
1077 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1078 dmu_tx_unassign(tx);
1079
1080 if (err != ERESTART || txg_how != TXG_WAIT)
1081 return (err);
1082
1083 dmu_tx_wait(tx);
1084 }
1085
1086 txg_rele_to_quiesce(&tx->tx_txgh);
1087
1088 return (0);
1089}
1090
1091void
1092dmu_tx_wait(dmu_tx_t *tx)
1093{
1094 spa_t *spa = tx->tx_pool->dp_spa;
1095
1096 ASSERT(tx->tx_txg == 0);
1097
1098 /*
1099 * It's possible that the pool has become active after this thread
1100 * has tried to obtain a tx. If that's the case then his
1101 * tx_lasttried_txg would not have been assigned.
1102 */
1103 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1104 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1105 } else if (tx->tx_needassign_txh) {
1106 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1107
1108 mutex_enter(&dn->dn_mtx);
1109 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1110 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1111 mutex_exit(&dn->dn_mtx);
1112 tx->tx_needassign_txh = NULL;
1113 } else {
1114 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1115 }
1116}
1117
1118void
1119dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1120{
1121#ifdef ZFS_DEBUG
1122 if (tx->tx_dir == NULL || delta == 0)
1123 return;
1124
1125 if (delta > 0) {
1126 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1127 tx->tx_space_towrite);
1128 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1129 } else {
1130 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1131 }
1132#endif
1133}
1134
1135void
1136dmu_tx_commit(dmu_tx_t *tx)
1137{
1138 dmu_tx_hold_t *txh;
1139
1140 ASSERT(tx->tx_txg != 0);
1141
1142 while (txh = list_head(&tx->tx_holds)) {
1143 dnode_t *dn = txh->txh_dnode;
1144
1145 list_remove(&tx->tx_holds, txh);
1146 kmem_free(txh, sizeof (dmu_tx_hold_t));
1147 if (dn == NULL)
1148 continue;
1149 mutex_enter(&dn->dn_mtx);
1150 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1151
1152 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1153 dn->dn_assigned_txg = 0;
1154 cv_broadcast(&dn->dn_notxholds);
1155 }
1156 mutex_exit(&dn->dn_mtx);
1157 dnode_rele(dn, tx);
1158 }
1159
1160 if (tx->tx_tempreserve_cookie)
1161 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1162
1163 if (!list_is_empty(&tx->tx_callbacks))
1164 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1165
1166 if (tx->tx_anyobj == FALSE)
1167 txg_rele_to_sync(&tx->tx_txgh);
1168
1169 list_destroy(&tx->tx_callbacks);
1170 list_destroy(&tx->tx_holds);
1171#ifdef ZFS_DEBUG
1172 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1173 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1174 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1175 refcount_destroy_many(&tx->tx_space_written,
1176 refcount_count(&tx->tx_space_written));
1177 refcount_destroy_many(&tx->tx_space_freed,
1178 refcount_count(&tx->tx_space_freed));
1179#endif
1180 kmem_free(tx, sizeof (dmu_tx_t));
1181}
1182
1183void
1184dmu_tx_abort(dmu_tx_t *tx)
1185{
1186 dmu_tx_hold_t *txh;
1187
1188 ASSERT(tx->tx_txg == 0);
1189
1190 while (txh = list_head(&tx->tx_holds)) {
1191 dnode_t *dn = txh->txh_dnode;
1192
1193 list_remove(&tx->tx_holds, txh);
1194 kmem_free(txh, sizeof (dmu_tx_hold_t));
1195 if (dn != NULL)
1196 dnode_rele(dn, tx);
1197 }
1198
1199 /*
1200 * Call any registered callbacks with an error code.
1201 */
1202 if (!list_is_empty(&tx->tx_callbacks))
1203 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1204
1205 list_destroy(&tx->tx_callbacks);
1206 list_destroy(&tx->tx_holds);
1207#ifdef ZFS_DEBUG
1208 refcount_destroy_many(&tx->tx_space_written,
1209 refcount_count(&tx->tx_space_written));
1210 refcount_destroy_many(&tx->tx_space_freed,
1211 refcount_count(&tx->tx_space_freed));
1212#endif
1213 kmem_free(tx, sizeof (dmu_tx_t));
1214}
1215
1216uint64_t
1217dmu_tx_get_txg(dmu_tx_t *tx)
1218{
1219 ASSERT(tx->tx_txg != 0);
1220 return (tx->tx_txg);
1221}
1222
1223void
1224dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1225{
1226 dmu_tx_callback_t *dcb;
1227
1228 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1229
1230 dcb->dcb_func = func;
1231 dcb->dcb_data = data;
1232
1233 list_insert_tail(&tx->tx_callbacks, dcb);
1234}
1235
1236/*
1237 * Call all the commit callbacks on a list, with a given error code.
1238 */
1239void
1240dmu_tx_do_callbacks(list_t *cb_list, int error)
1241{
1242 dmu_tx_callback_t *dcb;
1243
1244 while (dcb = list_head(cb_list)) {
1245 list_remove(cb_list, dcb);
1246 dcb->dcb_func(dcb->dcb_data, error);
1247 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1248 }
1249}
1250
1251/*
1252 * Interface to hold a bunch of attributes.
1253 * used for creating new files.
1254 * attrsize is the total size of all attributes
1255 * to be added during object creation
1256 *
1257 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1258 */
1259
1260/*
1261 * hold necessary attribute name for attribute registration.
1262 * should be a very rare case where this is needed. If it does
1263 * happen it would only happen on the first write to the file system.
1264 */
1265static void
1266dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1267{
1268 int i;
1269
1270 if (!sa->sa_need_attr_registration)
1271 return;
1272
1273 for (i = 0; i != sa->sa_num_attrs; i++) {
1274 if (!sa->sa_attr_table[i].sa_registered) {
1275 if (sa->sa_reg_attr_obj)
1276 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1277 B_TRUE, sa->sa_attr_table[i].sa_name);
1278 else
1279 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1280 B_TRUE, sa->sa_attr_table[i].sa_name);
1281 }
1282 }
1283}
1284
1285
1286void
1287dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1288{
1289 dnode_t *dn;
1290 dmu_tx_hold_t *txh;
1291
1292 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1293 THT_SPILL, 0, 0);
1294
1295 dn = txh->txh_dnode;
1296
1297 if (dn == NULL)
1298 return;
1299
1300 /* If blkptr doesn't exist then add space to towrite */
1301 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1302 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1303 } else {
1304 blkptr_t *bp;
1305
1306 bp = &dn->dn_phys->dn_spill;
1307 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1308 bp, bp->blk_birth))
1309 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1310 else
1311 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1312 if (!BP_IS_HOLE(bp))
1313 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1314 }
1315}
1316
1317void
1318dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1319{
1320 sa_os_t *sa = tx->tx_objset->os_sa;
1321
1322 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1323
1324 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1325 return;
1326
1327 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1328 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1329 else {
1330 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1331 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1332 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1333 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1334 }
1335
1336 dmu_tx_sa_registration_hold(sa, tx);
1337
1338 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1339 return;
1340
1341 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1342 THT_SPILL, 0, 0);
1343}
1344
1345/*
1346 * Hold SA attribute
1347 *
1348 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1349 *
1350 * variable_size is the total size of all variable sized attributes
1351 * passed to this function. It is not the total size of all
1352 * variable size attributes that *may* exist on this object.
1353 */
1354void
1355dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1356{
1357 uint64_t object;
1358 sa_os_t *sa = tx->tx_objset->os_sa;
1359
1360 ASSERT(hdl != NULL);
1361
1362 object = sa_handle_object(hdl);
1363
1364 dmu_tx_hold_bonus(tx, object);
1365
1366 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1367 return;
1368
1369 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1370 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1371 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1372 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1373 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1374 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1375 }
1376
1377 dmu_tx_sa_registration_hold(sa, tx);
1378
1379 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1380 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1381
1382 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1383 ASSERT(tx->tx_txg == 0);
1384 dmu_tx_hold_spill(tx, object);
1385 } else {
1386 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1387 dnode_t *dn;
1388
1389 DB_DNODE_ENTER(db);
1390 dn = DB_DNODE(db);
1391 if (dn->dn_have_spill) {
1392 ASSERT(tx->tx_txg == 0);
1393 dmu_tx_hold_spill(tx, object);
1394 }
1395 DB_DNODE_EXIT(db);
1396 }
1397}