dmu_objset.c revision 331612
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved. 29 * Copyright (c) 2014 Integros [integros.com] 30 */ 31 32/* Portions Copyright 2010 Robert Milkowski */ 33 34#include <sys/cred.h> 35#include <sys/zfs_context.h> 36#include <sys/dmu_objset.h> 37#include <sys/dsl_dir.h> 38#include <sys/dsl_dataset.h> 39#include <sys/dsl_prop.h> 40#include <sys/dsl_pool.h> 41#include <sys/dsl_synctask.h> 42#include <sys/dsl_deleg.h> 43#include <sys/dnode.h> 44#include <sys/dbuf.h> 45#include <sys/zvol.h> 46#include <sys/dmu_tx.h> 47#include <sys/zap.h> 48#include <sys/zil.h> 49#include <sys/dmu_impl.h> 50#include <sys/zfs_ioctl.h> 51#include <sys/sa.h> 52#include <sys/zfs_onexit.h> 53#include <sys/dsl_destroy.h> 54#include <sys/vdev.h> 55 56/* 57 * Needed to close a window in dnode_move() that allows the objset to be freed 58 * before it can be safely accessed. 59 */ 60krwlock_t os_lock; 61 62/* 63 * Tunable to overwrite the maximum number of threads for the parallization 64 * of dmu_objset_find_dp, needed to speed up the import of pools with many 65 * datasets. 66 * Default is 4 times the number of leaf vdevs. 67 */ 68int dmu_find_threads = 0; 69 70static void dmu_objset_find_dp_cb(void *arg); 71 72void 73dmu_objset_init(void) 74{ 75 rw_init(&os_lock, NULL, RW_DEFAULT, NULL); 76} 77 78void 79dmu_objset_fini(void) 80{ 81 rw_destroy(&os_lock); 82} 83 84spa_t * 85dmu_objset_spa(objset_t *os) 86{ 87 return (os->os_spa); 88} 89 90zilog_t * 91dmu_objset_zil(objset_t *os) 92{ 93 return (os->os_zil); 94} 95 96dsl_pool_t * 97dmu_objset_pool(objset_t *os) 98{ 99 dsl_dataset_t *ds; 100 101 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) 102 return (ds->ds_dir->dd_pool); 103 else 104 return (spa_get_dsl(os->os_spa)); 105} 106 107dsl_dataset_t * 108dmu_objset_ds(objset_t *os) 109{ 110 return (os->os_dsl_dataset); 111} 112 113dmu_objset_type_t 114dmu_objset_type(objset_t *os) 115{ 116 return (os->os_phys->os_type); 117} 118 119void 120dmu_objset_name(objset_t *os, char *buf) 121{ 122 dsl_dataset_name(os->os_dsl_dataset, buf); 123} 124 125uint64_t 126dmu_objset_id(objset_t *os) 127{ 128 dsl_dataset_t *ds = os->os_dsl_dataset; 129 130 return (ds ? ds->ds_object : 0); 131} 132 133zfs_sync_type_t 134dmu_objset_syncprop(objset_t *os) 135{ 136 return (os->os_sync); 137} 138 139zfs_logbias_op_t 140dmu_objset_logbias(objset_t *os) 141{ 142 return (os->os_logbias); 143} 144 145static void 146checksum_changed_cb(void *arg, uint64_t newval) 147{ 148 objset_t *os = arg; 149 150 /* 151 * Inheritance should have been done by now. 152 */ 153 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 154 155 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 156} 157 158static void 159compression_changed_cb(void *arg, uint64_t newval) 160{ 161 objset_t *os = arg; 162 163 /* 164 * Inheritance and range checking should have been done by now. 165 */ 166 ASSERT(newval != ZIO_COMPRESS_INHERIT); 167 168 os->os_compress = zio_compress_select(os->os_spa, newval, 169 ZIO_COMPRESS_ON); 170} 171 172static void 173copies_changed_cb(void *arg, uint64_t newval) 174{ 175 objset_t *os = arg; 176 177 /* 178 * Inheritance and range checking should have been done by now. 179 */ 180 ASSERT(newval > 0); 181 ASSERT(newval <= spa_max_replication(os->os_spa)); 182 183 os->os_copies = newval; 184} 185 186static void 187dedup_changed_cb(void *arg, uint64_t newval) 188{ 189 objset_t *os = arg; 190 spa_t *spa = os->os_spa; 191 enum zio_checksum checksum; 192 193 /* 194 * Inheritance should have been done by now. 195 */ 196 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 197 198 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); 199 200 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; 201 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); 202} 203 204static void 205primary_cache_changed_cb(void *arg, uint64_t newval) 206{ 207 objset_t *os = arg; 208 209 /* 210 * Inheritance and range checking should have been done by now. 211 */ 212 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 213 newval == ZFS_CACHE_METADATA); 214 215 os->os_primary_cache = newval; 216} 217 218static void 219secondary_cache_changed_cb(void *arg, uint64_t newval) 220{ 221 objset_t *os = arg; 222 223 /* 224 * Inheritance and range checking should have been done by now. 225 */ 226 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 227 newval == ZFS_CACHE_METADATA); 228 229 os->os_secondary_cache = newval; 230} 231 232static void 233sync_changed_cb(void *arg, uint64_t newval) 234{ 235 objset_t *os = arg; 236 237 /* 238 * Inheritance and range checking should have been done by now. 239 */ 240 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || 241 newval == ZFS_SYNC_DISABLED); 242 243 os->os_sync = newval; 244 if (os->os_zil) 245 zil_set_sync(os->os_zil, newval); 246} 247 248static void 249redundant_metadata_changed_cb(void *arg, uint64_t newval) 250{ 251 objset_t *os = arg; 252 253 /* 254 * Inheritance and range checking should have been done by now. 255 */ 256 ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL || 257 newval == ZFS_REDUNDANT_METADATA_MOST); 258 259 os->os_redundant_metadata = newval; 260} 261 262static void 263logbias_changed_cb(void *arg, uint64_t newval) 264{ 265 objset_t *os = arg; 266 267 ASSERT(newval == ZFS_LOGBIAS_LATENCY || 268 newval == ZFS_LOGBIAS_THROUGHPUT); 269 os->os_logbias = newval; 270 if (os->os_zil) 271 zil_set_logbias(os->os_zil, newval); 272} 273 274static void 275recordsize_changed_cb(void *arg, uint64_t newval) 276{ 277 objset_t *os = arg; 278 279 os->os_recordsize = newval; 280} 281 282void 283dmu_objset_byteswap(void *buf, size_t size) 284{ 285 objset_phys_t *osp = buf; 286 287 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); 288 dnode_byteswap(&osp->os_meta_dnode); 289 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 290 osp->os_type = BSWAP_64(osp->os_type); 291 osp->os_flags = BSWAP_64(osp->os_flags); 292 if (size == sizeof (objset_phys_t)) { 293 dnode_byteswap(&osp->os_userused_dnode); 294 dnode_byteswap(&osp->os_groupused_dnode); 295 } 296} 297 298int 299dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 300 objset_t **osp) 301{ 302 objset_t *os; 303 int i, err; 304 305 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 306 307 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); 308 os->os_dsl_dataset = ds; 309 os->os_spa = spa; 310 os->os_rootbp = bp; 311 if (!BP_IS_HOLE(os->os_rootbp)) { 312 arc_flags_t aflags = ARC_FLAG_WAIT; 313 zbookmark_phys_t zb; 314 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 315 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 316 317 if (DMU_OS_IS_L2CACHEABLE(os)) 318 aflags |= ARC_FLAG_L2CACHE; 319 320 dprintf_bp(os->os_rootbp, "reading %s", ""); 321 err = arc_read(NULL, spa, os->os_rootbp, 322 arc_getbuf_func, &os->os_phys_buf, 323 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 324 if (err != 0) { 325 kmem_free(os, sizeof (objset_t)); 326 /* convert checksum errors into IO errors */ 327 if (err == ECKSUM) 328 err = SET_ERROR(EIO); 329 return (err); 330 } 331 332 /* Increase the blocksize if we are permitted. */ 333 if (spa_version(spa) >= SPA_VERSION_USERSPACE && 334 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { 335 arc_buf_t *buf = arc_alloc_buf(spa, 336 sizeof (objset_phys_t), &os->os_phys_buf, 337 ARC_BUFC_METADATA); 338 bzero(buf->b_data, sizeof (objset_phys_t)); 339 bcopy(os->os_phys_buf->b_data, buf->b_data, 340 arc_buf_size(os->os_phys_buf)); 341 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 342 os->os_phys_buf = buf; 343 } 344 345 os->os_phys = os->os_phys_buf->b_data; 346 os->os_flags = os->os_phys->os_flags; 347 } else { 348 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? 349 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; 350 os->os_phys_buf = arc_alloc_buf(spa, size, 351 &os->os_phys_buf, ARC_BUFC_METADATA); 352 os->os_phys = os->os_phys_buf->b_data; 353 bzero(os->os_phys, size); 354 } 355 356 /* 357 * Note: the changed_cb will be called once before the register 358 * func returns, thus changing the checksum/compression from the 359 * default (fletcher2/off). Snapshots don't need to know about 360 * checksum/compression/copies. 361 */ 362 if (ds != NULL) { 363 boolean_t needlock = B_FALSE; 364 365 /* 366 * Note: it's valid to open the objset if the dataset is 367 * long-held, in which case the pool_config lock will not 368 * be held. 369 */ 370 if (!dsl_pool_config_held(dmu_objset_pool(os))) { 371 needlock = B_TRUE; 372 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 373 } 374 err = dsl_prop_register(ds, 375 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 376 primary_cache_changed_cb, os); 377 if (err == 0) { 378 err = dsl_prop_register(ds, 379 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 380 secondary_cache_changed_cb, os); 381 } 382 if (!ds->ds_is_snapshot) { 383 if (err == 0) { 384 err = dsl_prop_register(ds, 385 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 386 checksum_changed_cb, os); 387 } 388 if (err == 0) { 389 err = dsl_prop_register(ds, 390 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 391 compression_changed_cb, os); 392 } 393 if (err == 0) { 394 err = dsl_prop_register(ds, 395 zfs_prop_to_name(ZFS_PROP_COPIES), 396 copies_changed_cb, os); 397 } 398 if (err == 0) { 399 err = dsl_prop_register(ds, 400 zfs_prop_to_name(ZFS_PROP_DEDUP), 401 dedup_changed_cb, os); 402 } 403 if (err == 0) { 404 err = dsl_prop_register(ds, 405 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 406 logbias_changed_cb, os); 407 } 408 if (err == 0) { 409 err = dsl_prop_register(ds, 410 zfs_prop_to_name(ZFS_PROP_SYNC), 411 sync_changed_cb, os); 412 } 413 if (err == 0) { 414 err = dsl_prop_register(ds, 415 zfs_prop_to_name( 416 ZFS_PROP_REDUNDANT_METADATA), 417 redundant_metadata_changed_cb, os); 418 } 419 if (err == 0) { 420 err = dsl_prop_register(ds, 421 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), 422 recordsize_changed_cb, os); 423 } 424 } 425 if (needlock) 426 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 427 if (err != 0) { 428 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 429 kmem_free(os, sizeof (objset_t)); 430 return (err); 431 } 432 } else { 433 /* It's the meta-objset. */ 434 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 435 os->os_compress = ZIO_COMPRESS_ON; 436 os->os_copies = spa_max_replication(spa); 437 os->os_dedup_checksum = ZIO_CHECKSUM_OFF; 438 os->os_dedup_verify = B_FALSE; 439 os->os_logbias = ZFS_LOGBIAS_LATENCY; 440 os->os_sync = ZFS_SYNC_STANDARD; 441 os->os_primary_cache = ZFS_CACHE_ALL; 442 os->os_secondary_cache = ZFS_CACHE_ALL; 443 } 444 445 if (ds == NULL || !ds->ds_is_snapshot) 446 os->os_zil_header = os->os_phys->os_zil_header; 447 os->os_zil = zil_alloc(os, &os->os_zil_header); 448 449 for (i = 0; i < TXG_SIZE; i++) { 450 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), 451 offsetof(dnode_t, dn_dirty_link[i])); 452 list_create(&os->os_free_dnodes[i], sizeof (dnode_t), 453 offsetof(dnode_t, dn_dirty_link[i])); 454 } 455 list_create(&os->os_dnodes, sizeof (dnode_t), 456 offsetof(dnode_t, dn_link)); 457 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 458 offsetof(dmu_buf_impl_t, db_link)); 459 460 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); 461 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 462 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 463 464 dnode_special_open(os, &os->os_phys->os_meta_dnode, 465 DMU_META_DNODE_OBJECT, &os->os_meta_dnode); 466 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { 467 dnode_special_open(os, &os->os_phys->os_userused_dnode, 468 DMU_USERUSED_OBJECT, &os->os_userused_dnode); 469 dnode_special_open(os, &os->os_phys->os_groupused_dnode, 470 DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); 471 } 472 473 *osp = os; 474 return (0); 475} 476 477int 478dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) 479{ 480 int err = 0; 481 482 /* 483 * We shouldn't be doing anything with dsl_dataset_t's unless the 484 * pool_config lock is held, or the dataset is long-held. 485 */ 486 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) || 487 dsl_dataset_long_held(ds)); 488 489 mutex_enter(&ds->ds_opening_lock); 490 if (ds->ds_objset == NULL) { 491 objset_t *os; 492 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 493 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 494 ds, dsl_dataset_get_blkptr(ds), &os); 495 rrw_exit(&ds->ds_bp_rwlock, FTAG); 496 497 if (err == 0) { 498 mutex_enter(&ds->ds_lock); 499 ASSERT(ds->ds_objset == NULL); 500 ds->ds_objset = os; 501 mutex_exit(&ds->ds_lock); 502 } 503 } 504 *osp = ds->ds_objset; 505 mutex_exit(&ds->ds_opening_lock); 506 return (err); 507} 508 509/* 510 * Holds the pool while the objset is held. Therefore only one objset 511 * can be held at a time. 512 */ 513int 514dmu_objset_hold(const char *name, void *tag, objset_t **osp) 515{ 516 dsl_pool_t *dp; 517 dsl_dataset_t *ds; 518 int err; 519 520 err = dsl_pool_hold(name, tag, &dp); 521 if (err != 0) 522 return (err); 523 err = dsl_dataset_hold(dp, name, tag, &ds); 524 if (err != 0) { 525 dsl_pool_rele(dp, tag); 526 return (err); 527 } 528 529 err = dmu_objset_from_ds(ds, osp); 530 if (err != 0) { 531 dsl_dataset_rele(ds, tag); 532 dsl_pool_rele(dp, tag); 533 } 534 535 return (err); 536} 537 538static int 539dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type, 540 boolean_t readonly, void *tag, objset_t **osp) 541{ 542 int err; 543 544 err = dmu_objset_from_ds(ds, osp); 545 if (err != 0) { 546 dsl_dataset_disown(ds, tag); 547 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { 548 dsl_dataset_disown(ds, tag); 549 return (SET_ERROR(EINVAL)); 550 } else if (!readonly && dsl_dataset_is_snapshot(ds)) { 551 dsl_dataset_disown(ds, tag); 552 return (SET_ERROR(EROFS)); 553 } 554 return (err); 555} 556 557/* 558 * dsl_pool must not be held when this is called. 559 * Upon successful return, there will be a longhold on the dataset, 560 * and the dsl_pool will not be held. 561 */ 562int 563dmu_objset_own(const char *name, dmu_objset_type_t type, 564 boolean_t readonly, void *tag, objset_t **osp) 565{ 566 dsl_pool_t *dp; 567 dsl_dataset_t *ds; 568 int err; 569 570 err = dsl_pool_hold(name, FTAG, &dp); 571 if (err != 0) 572 return (err); 573 err = dsl_dataset_own(dp, name, tag, &ds); 574 if (err != 0) { 575 dsl_pool_rele(dp, FTAG); 576 return (err); 577 } 578 err = dmu_objset_own_impl(ds, type, readonly, tag, osp); 579 dsl_pool_rele(dp, FTAG); 580 581 return (err); 582} 583 584int 585dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type, 586 boolean_t readonly, void *tag, objset_t **osp) 587{ 588 dsl_dataset_t *ds; 589 int err; 590 591 err = dsl_dataset_own_obj(dp, obj, tag, &ds); 592 if (err != 0) 593 return (err); 594 595 return (dmu_objset_own_impl(ds, type, readonly, tag, osp)); 596} 597 598void 599dmu_objset_rele(objset_t *os, void *tag) 600{ 601 dsl_pool_t *dp = dmu_objset_pool(os); 602 dsl_dataset_rele(os->os_dsl_dataset, tag); 603 dsl_pool_rele(dp, tag); 604} 605 606/* 607 * When we are called, os MUST refer to an objset associated with a dataset 608 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner 609 * == tag. We will then release and reacquire ownership of the dataset while 610 * holding the pool config_rwlock to avoid intervening namespace or ownership 611 * changes may occur. 612 * 613 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to 614 * release the hold on its dataset and acquire a new one on the dataset of the 615 * same name so that it can be partially torn down and reconstructed. 616 */ 617void 618dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds, 619 void *tag) 620{ 621 dsl_pool_t *dp; 622 char name[ZFS_MAX_DATASET_NAME_LEN]; 623 624 VERIFY3P(ds, !=, NULL); 625 VERIFY3P(ds->ds_owner, ==, tag); 626 VERIFY(dsl_dataset_long_held(ds)); 627 628 dsl_dataset_name(ds, name); 629 dp = ds->ds_dir->dd_pool; 630 dsl_pool_config_enter(dp, FTAG); 631 dsl_dataset_disown(ds, tag); 632 VERIFY0(dsl_dataset_own(dp, name, tag, newds)); 633 dsl_pool_config_exit(dp, FTAG); 634} 635 636void 637dmu_objset_disown(objset_t *os, void *tag) 638{ 639 dsl_dataset_disown(os->os_dsl_dataset, tag); 640} 641 642void 643dmu_objset_evict_dbufs(objset_t *os) 644{ 645 dnode_t dn_marker; 646 dnode_t *dn; 647 648 mutex_enter(&os->os_lock); 649 dn = list_head(&os->os_dnodes); 650 while (dn != NULL) { 651 /* 652 * Skip dnodes without holds. We have to do this dance 653 * because dnode_add_ref() only works if there is already a 654 * hold. If the dnode has no holds, then it has no dbufs. 655 */ 656 if (dnode_add_ref(dn, FTAG)) { 657 list_insert_after(&os->os_dnodes, dn, &dn_marker); 658 mutex_exit(&os->os_lock); 659 660 dnode_evict_dbufs(dn); 661 dnode_rele(dn, FTAG); 662 663 mutex_enter(&os->os_lock); 664 dn = list_next(&os->os_dnodes, &dn_marker); 665 list_remove(&os->os_dnodes, &dn_marker); 666 } else { 667 dn = list_next(&os->os_dnodes, dn); 668 } 669 } 670 mutex_exit(&os->os_lock); 671 672 if (DMU_USERUSED_DNODE(os) != NULL) { 673 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os)); 674 dnode_evict_dbufs(DMU_USERUSED_DNODE(os)); 675 } 676 dnode_evict_dbufs(DMU_META_DNODE(os)); 677} 678 679/* 680 * Objset eviction processing is split into into two pieces. 681 * The first marks the objset as evicting, evicts any dbufs that 682 * have a refcount of zero, and then queues up the objset for the 683 * second phase of eviction. Once os->os_dnodes has been cleared by 684 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed. 685 * The second phase closes the special dnodes, dequeues the objset from 686 * the list of those undergoing eviction, and finally frees the objset. 687 * 688 * NOTE: Due to asynchronous eviction processing (invocation of 689 * dnode_buf_pageout()), it is possible for the meta dnode for the 690 * objset to have no holds even though os->os_dnodes is not empty. 691 */ 692void 693dmu_objset_evict(objset_t *os) 694{ 695 dsl_dataset_t *ds = os->os_dsl_dataset; 696 697 for (int t = 0; t < TXG_SIZE; t++) 698 ASSERT(!dmu_objset_is_dirty(os, t)); 699 700 if (ds) 701 dsl_prop_unregister_all(ds, os); 702 703 if (os->os_sa) 704 sa_tear_down(os); 705 706 dmu_objset_evict_dbufs(os); 707 708 mutex_enter(&os->os_lock); 709 spa_evicting_os_register(os->os_spa, os); 710 if (list_is_empty(&os->os_dnodes)) { 711 mutex_exit(&os->os_lock); 712 dmu_objset_evict_done(os); 713 } else { 714 mutex_exit(&os->os_lock); 715 } 716} 717 718void 719dmu_objset_evict_done(objset_t *os) 720{ 721 ASSERT3P(list_head(&os->os_dnodes), ==, NULL); 722 723 dnode_special_close(&os->os_meta_dnode); 724 if (DMU_USERUSED_DNODE(os)) { 725 dnode_special_close(&os->os_userused_dnode); 726 dnode_special_close(&os->os_groupused_dnode); 727 } 728 zil_free(os->os_zil); 729 730 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 731 732 /* 733 * This is a barrier to prevent the objset from going away in 734 * dnode_move() until we can safely ensure that the objset is still in 735 * use. We consider the objset valid before the barrier and invalid 736 * after the barrier. 737 */ 738 rw_enter(&os_lock, RW_READER); 739 rw_exit(&os_lock); 740 741 mutex_destroy(&os->os_lock); 742 mutex_destroy(&os->os_obj_lock); 743 mutex_destroy(&os->os_user_ptr_lock); 744 spa_evicting_os_deregister(os->os_spa, os); 745 kmem_free(os, sizeof (objset_t)); 746} 747 748timestruc_t 749dmu_objset_snap_cmtime(objset_t *os) 750{ 751 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); 752} 753 754/* called from dsl for meta-objset */ 755objset_t * 756dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 757 dmu_objset_type_t type, dmu_tx_t *tx) 758{ 759 objset_t *os; 760 dnode_t *mdn; 761 762 ASSERT(dmu_tx_is_syncing(tx)); 763 764 if (ds != NULL) 765 VERIFY0(dmu_objset_from_ds(ds, &os)); 766 else 767 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); 768 769 mdn = DMU_META_DNODE(os); 770 771 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 772 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 773 774 /* 775 * We don't want to have to increase the meta-dnode's nlevels 776 * later, because then we could do it in quescing context while 777 * we are also accessing it in open context. 778 * 779 * This precaution is not necessary for the MOS (ds == NULL), 780 * because the MOS is only updated in syncing context. 781 * This is most fortunate: the MOS is the only objset that 782 * needs to be synced multiple times as spa_sync() iterates 783 * to convergence, so minimizing its dn_nlevels matters. 784 */ 785 if (ds != NULL) { 786 int levels = 1; 787 788 /* 789 * Determine the number of levels necessary for the meta-dnode 790 * to contain DN_MAX_OBJECT dnodes. Note that in order to 791 * ensure that we do not overflow 64 bits, there has to be 792 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT 793 * but < 2^64. Therefore, 794 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be 795 * less than (64 - log2(DN_MAX_OBJECT)) (16). 796 */ 797 while ((uint64_t)mdn->dn_nblkptr << 798 (mdn->dn_datablkshift - DNODE_SHIFT + 799 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 800 DN_MAX_OBJECT) 801 levels++; 802 803 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 804 mdn->dn_nlevels = levels; 805 } 806 807 ASSERT(type != DMU_OST_NONE); 808 ASSERT(type != DMU_OST_ANY); 809 ASSERT(type < DMU_OST_NUMTYPES); 810 os->os_phys->os_type = type; 811 if (dmu_objset_userused_enabled(os)) { 812 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 813 os->os_flags = os->os_phys->os_flags; 814 } 815 816 dsl_dataset_dirty(ds, tx); 817 818 return (os); 819} 820 821typedef struct dmu_objset_create_arg { 822 const char *doca_name; 823 cred_t *doca_cred; 824 void (*doca_userfunc)(objset_t *os, void *arg, 825 cred_t *cr, dmu_tx_t *tx); 826 void *doca_userarg; 827 dmu_objset_type_t doca_type; 828 uint64_t doca_flags; 829} dmu_objset_create_arg_t; 830 831/*ARGSUSED*/ 832static int 833dmu_objset_create_check(void *arg, dmu_tx_t *tx) 834{ 835 dmu_objset_create_arg_t *doca = arg; 836 dsl_pool_t *dp = dmu_tx_pool(tx); 837 dsl_dir_t *pdd; 838 const char *tail; 839 int error; 840 841 if (strchr(doca->doca_name, '@') != NULL) 842 return (SET_ERROR(EINVAL)); 843 844 if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN) 845 return (SET_ERROR(ENAMETOOLONG)); 846 847 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); 848 if (error != 0) 849 return (error); 850 if (tail == NULL) { 851 dsl_dir_rele(pdd, FTAG); 852 return (SET_ERROR(EEXIST)); 853 } 854 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 855 doca->doca_cred); 856 dsl_dir_rele(pdd, FTAG); 857 858 return (error); 859} 860 861static void 862dmu_objset_create_sync(void *arg, dmu_tx_t *tx) 863{ 864 dmu_objset_create_arg_t *doca = arg; 865 dsl_pool_t *dp = dmu_tx_pool(tx); 866 dsl_dir_t *pdd; 867 const char *tail; 868 dsl_dataset_t *ds; 869 uint64_t obj; 870 blkptr_t *bp; 871 objset_t *os; 872 873 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); 874 875 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, 876 doca->doca_cred, tx); 877 878 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 879 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 880 bp = dsl_dataset_get_blkptr(ds); 881 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, 882 ds, bp, doca->doca_type, tx); 883 rrw_exit(&ds->ds_bp_rwlock, FTAG); 884 885 if (doca->doca_userfunc != NULL) { 886 doca->doca_userfunc(os, doca->doca_userarg, 887 doca->doca_cred, tx); 888 } 889 890 spa_history_log_internal_ds(ds, "create", tx, ""); 891 dsl_dataset_rele(ds, FTAG); 892 dsl_dir_rele(pdd, FTAG); 893} 894 895int 896dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 897 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 898{ 899 dmu_objset_create_arg_t doca; 900 901 doca.doca_name = name; 902 doca.doca_cred = CRED(); 903 doca.doca_flags = flags; 904 doca.doca_userfunc = func; 905 doca.doca_userarg = arg; 906 doca.doca_type = type; 907 908 return (dsl_sync_task(name, 909 dmu_objset_create_check, dmu_objset_create_sync, &doca, 910 5, ZFS_SPACE_CHECK_NORMAL)); 911} 912 913typedef struct dmu_objset_clone_arg { 914 const char *doca_clone; 915 const char *doca_origin; 916 cred_t *doca_cred; 917} dmu_objset_clone_arg_t; 918 919/*ARGSUSED*/ 920static int 921dmu_objset_clone_check(void *arg, dmu_tx_t *tx) 922{ 923 dmu_objset_clone_arg_t *doca = arg; 924 dsl_dir_t *pdd; 925 const char *tail; 926 int error; 927 dsl_dataset_t *origin; 928 dsl_pool_t *dp = dmu_tx_pool(tx); 929 930 if (strchr(doca->doca_clone, '@') != NULL) 931 return (SET_ERROR(EINVAL)); 932 933 if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN) 934 return (SET_ERROR(ENAMETOOLONG)); 935 936 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); 937 if (error != 0) 938 return (error); 939 if (tail == NULL) { 940 dsl_dir_rele(pdd, FTAG); 941 return (SET_ERROR(EEXIST)); 942 } 943 944 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 945 doca->doca_cred); 946 if (error != 0) { 947 dsl_dir_rele(pdd, FTAG); 948 return (SET_ERROR(EDQUOT)); 949 } 950 dsl_dir_rele(pdd, FTAG); 951 952 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); 953 if (error != 0) 954 return (error); 955 956 /* You can only clone snapshots, not the head datasets. */ 957 if (!origin->ds_is_snapshot) { 958 dsl_dataset_rele(origin, FTAG); 959 return (SET_ERROR(EINVAL)); 960 } 961 dsl_dataset_rele(origin, FTAG); 962 963 return (0); 964} 965 966static void 967dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) 968{ 969 dmu_objset_clone_arg_t *doca = arg; 970 dsl_pool_t *dp = dmu_tx_pool(tx); 971 dsl_dir_t *pdd; 972 const char *tail; 973 dsl_dataset_t *origin, *ds; 974 uint64_t obj; 975 char namebuf[ZFS_MAX_DATASET_NAME_LEN]; 976 977 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); 978 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); 979 980 obj = dsl_dataset_create_sync(pdd, tail, origin, 0, 981 doca->doca_cred, tx); 982 983 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 984 dsl_dataset_name(origin, namebuf); 985 spa_history_log_internal_ds(ds, "clone", tx, 986 "origin=%s (%llu)", namebuf, origin->ds_object); 987 dsl_dataset_rele(ds, FTAG); 988 dsl_dataset_rele(origin, FTAG); 989 dsl_dir_rele(pdd, FTAG); 990} 991 992int 993dmu_objset_clone(const char *clone, const char *origin) 994{ 995 dmu_objset_clone_arg_t doca; 996 997 doca.doca_clone = clone; 998 doca.doca_origin = origin; 999 doca.doca_cred = CRED(); 1000 1001 return (dsl_sync_task(clone, 1002 dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 1003 5, ZFS_SPACE_CHECK_NORMAL)); 1004} 1005 1006int 1007dmu_objset_snapshot_one(const char *fsname, const char *snapname) 1008{ 1009 int err; 1010 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); 1011 nvlist_t *snaps = fnvlist_alloc(); 1012 1013 fnvlist_add_boolean(snaps, longsnap); 1014 strfree(longsnap); 1015 err = dsl_dataset_snapshot(snaps, NULL, NULL); 1016 fnvlist_free(snaps); 1017 return (err); 1018} 1019 1020static void 1021dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) 1022{ 1023 dnode_t *dn; 1024 1025 while (dn = list_head(list)) { 1026 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 1027 ASSERT(dn->dn_dbuf->db_data_pending); 1028 /* 1029 * Initialize dn_zio outside dnode_sync() because the 1030 * meta-dnode needs to set it ouside dnode_sync(). 1031 */ 1032 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 1033 ASSERT(dn->dn_zio); 1034 1035 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 1036 list_remove(list, dn); 1037 1038 if (newlist) { 1039 (void) dnode_add_ref(dn, newlist); 1040 list_insert_tail(newlist, dn); 1041 } 1042 1043 dnode_sync(dn, tx); 1044 } 1045} 1046 1047/* ARGSUSED */ 1048static void 1049dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) 1050{ 1051 blkptr_t *bp = zio->io_bp; 1052 objset_t *os = arg; 1053 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 1054 1055 ASSERT(!BP_IS_EMBEDDED(bp)); 1056 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); 1057 ASSERT0(BP_GET_LEVEL(bp)); 1058 1059 /* 1060 * Update rootbp fill count: it should be the number of objects 1061 * allocated in the object set (not counting the "special" 1062 * objects that are stored in the objset_phys_t -- the meta 1063 * dnode and user/group accounting objects). 1064 */ 1065 bp->blk_fill = 0; 1066 for (int i = 0; i < dnp->dn_nblkptr; i++) 1067 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]); 1068 if (os->os_dsl_dataset != NULL) 1069 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG); 1070 *os->os_rootbp = *bp; 1071 if (os->os_dsl_dataset != NULL) 1072 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1073} 1074 1075/* ARGSUSED */ 1076static void 1077dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) 1078{ 1079 blkptr_t *bp = zio->io_bp; 1080 blkptr_t *bp_orig = &zio->io_bp_orig; 1081 objset_t *os = arg; 1082 1083 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 1084 ASSERT(BP_EQUAL(bp, bp_orig)); 1085 } else { 1086 dsl_dataset_t *ds = os->os_dsl_dataset; 1087 dmu_tx_t *tx = os->os_synctx; 1088 1089 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 1090 dsl_dataset_block_born(ds, bp, tx); 1091 } 1092 kmem_free(bp, sizeof (*bp)); 1093} 1094 1095/* called from dsl */ 1096void 1097dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) 1098{ 1099 int txgoff; 1100 zbookmark_phys_t zb; 1101 zio_prop_t zp; 1102 zio_t *zio; 1103 list_t *list; 1104 list_t *newlist = NULL; 1105 dbuf_dirty_record_t *dr; 1106 blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP); 1107 *blkptr_copy = *os->os_rootbp; 1108 1109 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 1110 1111 ASSERT(dmu_tx_is_syncing(tx)); 1112 /* XXX the write_done callback should really give us the tx... */ 1113 os->os_synctx = tx; 1114 1115 if (os->os_dsl_dataset == NULL) { 1116 /* 1117 * This is the MOS. If we have upgraded, 1118 * spa_max_replication() could change, so reset 1119 * os_copies here. 1120 */ 1121 os->os_copies = spa_max_replication(os->os_spa); 1122 } 1123 1124 /* 1125 * Create the root block IO 1126 */ 1127 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 1128 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1129 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1130 arc_release(os->os_phys_buf, &os->os_phys_buf); 1131 1132 dmu_write_policy(os, NULL, 0, 0, &zp); 1133 1134 zio = arc_write(pio, os->os_spa, tx->tx_txg, 1135 blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), 1136 &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done, 1137 os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 1138 1139 /* 1140 * Sync special dnodes - the parent IO for the sync is the root block 1141 */ 1142 DMU_META_DNODE(os)->dn_zio = zio; 1143 dnode_sync(DMU_META_DNODE(os), tx); 1144 1145 os->os_phys->os_flags = os->os_flags; 1146 1147 if (DMU_USERUSED_DNODE(os) && 1148 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { 1149 DMU_USERUSED_DNODE(os)->dn_zio = zio; 1150 dnode_sync(DMU_USERUSED_DNODE(os), tx); 1151 DMU_GROUPUSED_DNODE(os)->dn_zio = zio; 1152 dnode_sync(DMU_GROUPUSED_DNODE(os), tx); 1153 } 1154 1155 txgoff = tx->tx_txg & TXG_MASK; 1156 1157 if (dmu_objset_userused_enabled(os)) { 1158 newlist = &os->os_synced_dnodes; 1159 /* 1160 * We must create the list here because it uses the 1161 * dn_dirty_link[] of this txg. 1162 */ 1163 list_create(newlist, sizeof (dnode_t), 1164 offsetof(dnode_t, dn_dirty_link[txgoff])); 1165 } 1166 1167 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); 1168 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); 1169 1170 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; 1171 while (dr = list_head(list)) { 1172 ASSERT0(dr->dr_dbuf->db_level); 1173 list_remove(list, dr); 1174 if (dr->dr_zio) 1175 zio_nowait(dr->dr_zio); 1176 } 1177 /* 1178 * Free intent log blocks up to this tx. 1179 */ 1180 zil_sync(os->os_zil, tx); 1181 os->os_phys->os_zil_header = os->os_zil_header; 1182 zio_nowait(zio); 1183} 1184 1185boolean_t 1186dmu_objset_is_dirty(objset_t *os, uint64_t txg) 1187{ 1188 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) || 1189 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK])); 1190} 1191 1192static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; 1193 1194void 1195dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) 1196{ 1197 used_cbs[ost] = cb; 1198} 1199 1200boolean_t 1201dmu_objset_userused_enabled(objset_t *os) 1202{ 1203 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && 1204 used_cbs[os->os_phys->os_type] != NULL && 1205 DMU_USERUSED_DNODE(os) != NULL); 1206} 1207 1208typedef struct userquota_node { 1209 uint64_t uqn_id; 1210 int64_t uqn_delta; 1211 avl_node_t uqn_node; 1212} userquota_node_t; 1213 1214typedef struct userquota_cache { 1215 avl_tree_t uqc_user_deltas; 1216 avl_tree_t uqc_group_deltas; 1217} userquota_cache_t; 1218 1219static int 1220userquota_compare(const void *l, const void *r) 1221{ 1222 const userquota_node_t *luqn = l; 1223 const userquota_node_t *ruqn = r; 1224 1225 if (luqn->uqn_id < ruqn->uqn_id) 1226 return (-1); 1227 if (luqn->uqn_id > ruqn->uqn_id) 1228 return (1); 1229 return (0); 1230} 1231 1232static void 1233do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx) 1234{ 1235 void *cookie; 1236 userquota_node_t *uqn; 1237 1238 ASSERT(dmu_tx_is_syncing(tx)); 1239 1240 cookie = NULL; 1241 while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas, 1242 &cookie)) != NULL) { 1243 VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT, 1244 uqn->uqn_id, uqn->uqn_delta, tx)); 1245 kmem_free(uqn, sizeof (*uqn)); 1246 } 1247 avl_destroy(&cache->uqc_user_deltas); 1248 1249 cookie = NULL; 1250 while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas, 1251 &cookie)) != NULL) { 1252 VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT, 1253 uqn->uqn_id, uqn->uqn_delta, tx)); 1254 kmem_free(uqn, sizeof (*uqn)); 1255 } 1256 avl_destroy(&cache->uqc_group_deltas); 1257} 1258 1259static void 1260userquota_update_cache(avl_tree_t *avl, uint64_t id, int64_t delta) 1261{ 1262 userquota_node_t search = { .uqn_id = id }; 1263 avl_index_t idx; 1264 1265 userquota_node_t *uqn = avl_find(avl, &search, &idx); 1266 if (uqn == NULL) { 1267 uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP); 1268 uqn->uqn_id = id; 1269 avl_insert(avl, uqn, idx); 1270 } 1271 uqn->uqn_delta += delta; 1272} 1273 1274static void 1275do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags, 1276 uint64_t user, uint64_t group, boolean_t subtract) 1277{ 1278 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) { 1279 int64_t delta = DNODE_SIZE + used; 1280 if (subtract) 1281 delta = -delta; 1282 1283 userquota_update_cache(&cache->uqc_user_deltas, user, delta); 1284 userquota_update_cache(&cache->uqc_group_deltas, group, delta); 1285 } 1286} 1287 1288void 1289dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) 1290{ 1291 dnode_t *dn; 1292 list_t *list = &os->os_synced_dnodes; 1293 userquota_cache_t cache = { 0 }; 1294 1295 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os)); 1296 1297 avl_create(&cache.uqc_user_deltas, userquota_compare, 1298 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); 1299 avl_create(&cache.uqc_group_deltas, userquota_compare, 1300 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); 1301 1302 while (dn = list_head(list)) { 1303 int flags; 1304 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); 1305 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || 1306 dn->dn_phys->dn_flags & 1307 DNODE_FLAG_USERUSED_ACCOUNTED); 1308 1309 /* Allocate the user/groupused objects if necessary. */ 1310 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { 1311 VERIFY0(zap_create_claim(os, 1312 DMU_USERUSED_OBJECT, 1313 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1314 VERIFY0(zap_create_claim(os, 1315 DMU_GROUPUSED_OBJECT, 1316 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1317 } 1318 1319 flags = dn->dn_id_flags; 1320 ASSERT(flags); 1321 if (flags & DN_ID_OLD_EXIST) { 1322 do_userquota_update(&cache, 1323 dn->dn_oldused, dn->dn_oldflags, 1324 dn->dn_olduid, dn->dn_oldgid, B_TRUE); 1325 } 1326 if (flags & DN_ID_NEW_EXIST) { 1327 do_userquota_update(&cache, 1328 DN_USED_BYTES(dn->dn_phys), 1329 dn->dn_phys->dn_flags, dn->dn_newuid, 1330 dn->dn_newgid, B_FALSE); 1331 } 1332 1333 mutex_enter(&dn->dn_mtx); 1334 dn->dn_oldused = 0; 1335 dn->dn_oldflags = 0; 1336 if (dn->dn_id_flags & DN_ID_NEW_EXIST) { 1337 dn->dn_olduid = dn->dn_newuid; 1338 dn->dn_oldgid = dn->dn_newgid; 1339 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1340 if (dn->dn_bonuslen == 0) 1341 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1342 else 1343 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1344 } 1345 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); 1346 mutex_exit(&dn->dn_mtx); 1347 1348 list_remove(list, dn); 1349 dnode_rele(dn, list); 1350 } 1351 do_userquota_cacheflush(os, &cache, tx); 1352} 1353 1354/* 1355 * Returns a pointer to data to find uid/gid from 1356 * 1357 * If a dirty record for transaction group that is syncing can't 1358 * be found then NULL is returned. In the NULL case it is assumed 1359 * the uid/gid aren't changing. 1360 */ 1361static void * 1362dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) 1363{ 1364 dbuf_dirty_record_t *dr, **drp; 1365 void *data; 1366 1367 if (db->db_dirtycnt == 0) 1368 return (db->db.db_data); /* Nothing is changing */ 1369 1370 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1371 if (dr->dr_txg == tx->tx_txg) 1372 break; 1373 1374 if (dr == NULL) { 1375 data = NULL; 1376 } else { 1377 dnode_t *dn; 1378 1379 DB_DNODE_ENTER(dr->dr_dbuf); 1380 dn = DB_DNODE(dr->dr_dbuf); 1381 1382 if (dn->dn_bonuslen == 0 && 1383 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) 1384 data = dr->dt.dl.dr_data->b_data; 1385 else 1386 data = dr->dt.dl.dr_data; 1387 1388 DB_DNODE_EXIT(dr->dr_dbuf); 1389 } 1390 1391 return (data); 1392} 1393 1394void 1395dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) 1396{ 1397 objset_t *os = dn->dn_objset; 1398 void *data = NULL; 1399 dmu_buf_impl_t *db = NULL; 1400 uint64_t *user = NULL; 1401 uint64_t *group = NULL; 1402 int flags = dn->dn_id_flags; 1403 int error; 1404 boolean_t have_spill = B_FALSE; 1405 1406 if (!dmu_objset_userused_enabled(dn->dn_objset)) 1407 return; 1408 1409 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| 1410 DN_ID_CHKED_SPILL))) 1411 return; 1412 1413 if (before && dn->dn_bonuslen != 0) 1414 data = DN_BONUS(dn->dn_phys); 1415 else if (!before && dn->dn_bonuslen != 0) { 1416 if (dn->dn_bonus) { 1417 db = dn->dn_bonus; 1418 mutex_enter(&db->db_mtx); 1419 data = dmu_objset_userquota_find_data(db, tx); 1420 } else { 1421 data = DN_BONUS(dn->dn_phys); 1422 } 1423 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { 1424 int rf = 0; 1425 1426 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) 1427 rf |= DB_RF_HAVESTRUCT; 1428 error = dmu_spill_hold_by_dnode(dn, 1429 rf | DB_RF_MUST_SUCCEED, 1430 FTAG, (dmu_buf_t **)&db); 1431 ASSERT(error == 0); 1432 mutex_enter(&db->db_mtx); 1433 data = (before) ? db->db.db_data : 1434 dmu_objset_userquota_find_data(db, tx); 1435 have_spill = B_TRUE; 1436 } else { 1437 mutex_enter(&dn->dn_mtx); 1438 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1439 mutex_exit(&dn->dn_mtx); 1440 return; 1441 } 1442 1443 if (before) { 1444 ASSERT(data); 1445 user = &dn->dn_olduid; 1446 group = &dn->dn_oldgid; 1447 } else if (data) { 1448 user = &dn->dn_newuid; 1449 group = &dn->dn_newgid; 1450 } 1451 1452 /* 1453 * Must always call the callback in case the object 1454 * type has changed and that type isn't an object type to track 1455 */ 1456 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, 1457 user, group); 1458 1459 /* 1460 * Preserve existing uid/gid when the callback can't determine 1461 * what the new uid/gid are and the callback returned EEXIST. 1462 * The EEXIST error tells us to just use the existing uid/gid. 1463 * If we don't know what the old values are then just assign 1464 * them to 0, since that is a new file being created. 1465 */ 1466 if (!before && data == NULL && error == EEXIST) { 1467 if (flags & DN_ID_OLD_EXIST) { 1468 dn->dn_newuid = dn->dn_olduid; 1469 dn->dn_newgid = dn->dn_oldgid; 1470 } else { 1471 dn->dn_newuid = 0; 1472 dn->dn_newgid = 0; 1473 } 1474 error = 0; 1475 } 1476 1477 if (db) 1478 mutex_exit(&db->db_mtx); 1479 1480 mutex_enter(&dn->dn_mtx); 1481 if (error == 0 && before) 1482 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1483 if (error == 0 && !before) 1484 dn->dn_id_flags |= DN_ID_NEW_EXIST; 1485 1486 if (have_spill) { 1487 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1488 } else { 1489 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1490 } 1491 mutex_exit(&dn->dn_mtx); 1492 if (have_spill) 1493 dmu_buf_rele((dmu_buf_t *)db, FTAG); 1494} 1495 1496boolean_t 1497dmu_objset_userspace_present(objset_t *os) 1498{ 1499 return (os->os_phys->os_flags & 1500 OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1501} 1502 1503int 1504dmu_objset_userspace_upgrade(objset_t *os) 1505{ 1506 uint64_t obj; 1507 int err = 0; 1508 1509 if (dmu_objset_userspace_present(os)) 1510 return (0); 1511 if (!dmu_objset_userused_enabled(os)) 1512 return (SET_ERROR(ENOTSUP)); 1513 if (dmu_objset_is_snapshot(os)) 1514 return (SET_ERROR(EINVAL)); 1515 1516 /* 1517 * We simply need to mark every object dirty, so that it will be 1518 * synced out and now accounted. If this is called 1519 * concurrently, or if we already did some work before crashing, 1520 * that's fine, since we track each object's accounted state 1521 * independently. 1522 */ 1523 1524 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 1525 dmu_tx_t *tx; 1526 dmu_buf_t *db; 1527 int objerr; 1528 1529 if (issig(JUSTLOOKING) && issig(FORREAL)) 1530 return (SET_ERROR(EINTR)); 1531 1532 objerr = dmu_bonus_hold(os, obj, FTAG, &db); 1533 if (objerr != 0) 1534 continue; 1535 tx = dmu_tx_create(os); 1536 dmu_tx_hold_bonus(tx, obj); 1537 objerr = dmu_tx_assign(tx, TXG_WAIT); 1538 if (objerr != 0) { 1539 dmu_tx_abort(tx); 1540 continue; 1541 } 1542 dmu_buf_will_dirty(db, tx); 1543 dmu_buf_rele(db, FTAG); 1544 dmu_tx_commit(tx); 1545 } 1546 1547 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 1548 txg_wait_synced(dmu_objset_pool(os), 0); 1549 return (0); 1550} 1551 1552void 1553dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 1554 uint64_t *usedobjsp, uint64_t *availobjsp) 1555{ 1556 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, 1557 usedobjsp, availobjsp); 1558} 1559 1560uint64_t 1561dmu_objset_fsid_guid(objset_t *os) 1562{ 1563 return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); 1564} 1565 1566void 1567dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 1568{ 1569 stat->dds_type = os->os_phys->os_type; 1570 if (os->os_dsl_dataset) 1571 dsl_dataset_fast_stat(os->os_dsl_dataset, stat); 1572} 1573 1574void 1575dmu_objset_stats(objset_t *os, nvlist_t *nv) 1576{ 1577 ASSERT(os->os_dsl_dataset || 1578 os->os_phys->os_type == DMU_OST_META); 1579 1580 if (os->os_dsl_dataset != NULL) 1581 dsl_dataset_stats(os->os_dsl_dataset, nv); 1582 1583 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 1584 os->os_phys->os_type); 1585 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, 1586 dmu_objset_userspace_present(os)); 1587} 1588 1589int 1590dmu_objset_is_snapshot(objset_t *os) 1591{ 1592 if (os->os_dsl_dataset != NULL) 1593 return (os->os_dsl_dataset->ds_is_snapshot); 1594 else 1595 return (B_FALSE); 1596} 1597 1598int 1599dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 1600 boolean_t *conflict) 1601{ 1602 dsl_dataset_t *ds = os->os_dsl_dataset; 1603 uint64_t ignored; 1604 1605 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1606 return (SET_ERROR(ENOENT)); 1607 1608 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 1609 dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored, 1610 MT_FIRST, real, maxlen, conflict)); 1611} 1612 1613int 1614dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 1615 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 1616{ 1617 dsl_dataset_t *ds = os->os_dsl_dataset; 1618 zap_cursor_t cursor; 1619 zap_attribute_t attr; 1620 1621 ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); 1622 1623 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1624 return (SET_ERROR(ENOENT)); 1625 1626 zap_cursor_init_serialized(&cursor, 1627 ds->ds_dir->dd_pool->dp_meta_objset, 1628 dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp); 1629 1630 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1631 zap_cursor_fini(&cursor); 1632 return (SET_ERROR(ENOENT)); 1633 } 1634 1635 if (strlen(attr.za_name) + 1 > namelen) { 1636 zap_cursor_fini(&cursor); 1637 return (SET_ERROR(ENAMETOOLONG)); 1638 } 1639 1640 (void) strcpy(name, attr.za_name); 1641 if (idp) 1642 *idp = attr.za_first_integer; 1643 if (case_conflict) 1644 *case_conflict = attr.za_normalization_conflict; 1645 zap_cursor_advance(&cursor); 1646 *offp = zap_cursor_serialize(&cursor); 1647 zap_cursor_fini(&cursor); 1648 1649 return (0); 1650} 1651 1652int 1653dmu_dir_list_next(objset_t *os, int namelen, char *name, 1654 uint64_t *idp, uint64_t *offp) 1655{ 1656 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; 1657 zap_cursor_t cursor; 1658 zap_attribute_t attr; 1659 1660 /* there is no next dir on a snapshot! */ 1661 if (os->os_dsl_dataset->ds_object != 1662 dsl_dir_phys(dd)->dd_head_dataset_obj) 1663 return (SET_ERROR(ENOENT)); 1664 1665 zap_cursor_init_serialized(&cursor, 1666 dd->dd_pool->dp_meta_objset, 1667 dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp); 1668 1669 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1670 zap_cursor_fini(&cursor); 1671 return (SET_ERROR(ENOENT)); 1672 } 1673 1674 if (strlen(attr.za_name) + 1 > namelen) { 1675 zap_cursor_fini(&cursor); 1676 return (SET_ERROR(ENAMETOOLONG)); 1677 } 1678 1679 (void) strcpy(name, attr.za_name); 1680 if (idp) 1681 *idp = attr.za_first_integer; 1682 zap_cursor_advance(&cursor); 1683 *offp = zap_cursor_serialize(&cursor); 1684 zap_cursor_fini(&cursor); 1685 1686 return (0); 1687} 1688 1689typedef struct dmu_objset_find_ctx { 1690 taskq_t *dc_tq; 1691 dsl_pool_t *dc_dp; 1692 uint64_t dc_ddobj; 1693 int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *); 1694 void *dc_arg; 1695 int dc_flags; 1696 kmutex_t *dc_error_lock; 1697 int *dc_error; 1698} dmu_objset_find_ctx_t; 1699 1700static void 1701dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp) 1702{ 1703 dsl_pool_t *dp = dcp->dc_dp; 1704 dmu_objset_find_ctx_t *child_dcp; 1705 dsl_dir_t *dd; 1706 dsl_dataset_t *ds; 1707 zap_cursor_t zc; 1708 zap_attribute_t *attr; 1709 uint64_t thisobj; 1710 int err = 0; 1711 1712 /* don't process if there already was an error */ 1713 if (*dcp->dc_error != 0) 1714 goto out; 1715 1716 err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, NULL, FTAG, &dd); 1717 if (err != 0) 1718 goto out; 1719 1720 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1721 if (dd->dd_myname[0] == '$') { 1722 dsl_dir_rele(dd, FTAG); 1723 goto out; 1724 } 1725 1726 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 1727 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1728 1729 /* 1730 * Iterate over all children. 1731 */ 1732 if (dcp->dc_flags & DS_FIND_CHILDREN) { 1733 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1734 dsl_dir_phys(dd)->dd_child_dir_zapobj); 1735 zap_cursor_retrieve(&zc, attr) == 0; 1736 (void) zap_cursor_advance(&zc)) { 1737 ASSERT3U(attr->za_integer_length, ==, 1738 sizeof (uint64_t)); 1739 ASSERT3U(attr->za_num_integers, ==, 1); 1740 1741 child_dcp = kmem_alloc(sizeof (*child_dcp), KM_SLEEP); 1742 *child_dcp = *dcp; 1743 child_dcp->dc_ddobj = attr->za_first_integer; 1744 if (dcp->dc_tq != NULL) 1745 (void) taskq_dispatch(dcp->dc_tq, 1746 dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP); 1747 else 1748 dmu_objset_find_dp_impl(child_dcp); 1749 } 1750 zap_cursor_fini(&zc); 1751 } 1752 1753 /* 1754 * Iterate over all snapshots. 1755 */ 1756 if (dcp->dc_flags & DS_FIND_SNAPSHOTS) { 1757 dsl_dataset_t *ds; 1758 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1759 1760 if (err == 0) { 1761 uint64_t snapobj; 1762 1763 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 1764 dsl_dataset_rele(ds, FTAG); 1765 1766 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1767 zap_cursor_retrieve(&zc, attr) == 0; 1768 (void) zap_cursor_advance(&zc)) { 1769 ASSERT3U(attr->za_integer_length, ==, 1770 sizeof (uint64_t)); 1771 ASSERT3U(attr->za_num_integers, ==, 1); 1772 1773 err = dsl_dataset_hold_obj(dp, 1774 attr->za_first_integer, FTAG, &ds); 1775 if (err != 0) 1776 break; 1777 err = dcp->dc_func(dp, ds, dcp->dc_arg); 1778 dsl_dataset_rele(ds, FTAG); 1779 if (err != 0) 1780 break; 1781 } 1782 zap_cursor_fini(&zc); 1783 } 1784 } 1785 1786 dsl_dir_rele(dd, FTAG); 1787 kmem_free(attr, sizeof (zap_attribute_t)); 1788 1789 if (err != 0) 1790 goto out; 1791 1792 /* 1793 * Apply to self. 1794 */ 1795 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1796 if (err != 0) 1797 goto out; 1798 err = dcp->dc_func(dp, ds, dcp->dc_arg); 1799 dsl_dataset_rele(ds, FTAG); 1800 1801out: 1802 if (err != 0) { 1803 mutex_enter(dcp->dc_error_lock); 1804 /* only keep first error */ 1805 if (*dcp->dc_error == 0) 1806 *dcp->dc_error = err; 1807 mutex_exit(dcp->dc_error_lock); 1808 } 1809 1810 kmem_free(dcp, sizeof (*dcp)); 1811} 1812 1813static void 1814dmu_objset_find_dp_cb(void *arg) 1815{ 1816 dmu_objset_find_ctx_t *dcp = arg; 1817 dsl_pool_t *dp = dcp->dc_dp; 1818 1819 /* 1820 * We need to get a pool_config_lock here, as there are several 1821 * asssert(pool_config_held) down the stack. Getting a lock via 1822 * dsl_pool_config_enter is risky, as it might be stalled by a 1823 * pending writer. This would deadlock, as the write lock can 1824 * only be granted when our parent thread gives up the lock. 1825 * The _prio interface gives us priority over a pending writer. 1826 */ 1827 dsl_pool_config_enter_prio(dp, FTAG); 1828 1829 dmu_objset_find_dp_impl(dcp); 1830 1831 dsl_pool_config_exit(dp, FTAG); 1832} 1833 1834/* 1835 * Find objsets under and including ddobj, call func(ds) on each. 1836 * The order for the enumeration is completely undefined. 1837 * func is called with dsl_pool_config held. 1838 */ 1839int 1840dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, 1841 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) 1842{ 1843 int error = 0; 1844 taskq_t *tq = NULL; 1845 int ntasks; 1846 dmu_objset_find_ctx_t *dcp; 1847 kmutex_t err_lock; 1848 1849 mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL); 1850 dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP); 1851 dcp->dc_tq = NULL; 1852 dcp->dc_dp = dp; 1853 dcp->dc_ddobj = ddobj; 1854 dcp->dc_func = func; 1855 dcp->dc_arg = arg; 1856 dcp->dc_flags = flags; 1857 dcp->dc_error_lock = &err_lock; 1858 dcp->dc_error = &error; 1859 1860 if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) { 1861 /* 1862 * In case a write lock is held we can't make use of 1863 * parallelism, as down the stack of the worker threads 1864 * the lock is asserted via dsl_pool_config_held. 1865 * In case of a read lock this is solved by getting a read 1866 * lock in each worker thread, which isn't possible in case 1867 * of a writer lock. So we fall back to the synchronous path 1868 * here. 1869 * In the future it might be possible to get some magic into 1870 * dsl_pool_config_held in a way that it returns true for 1871 * the worker threads so that a single lock held from this 1872 * thread suffices. For now, stay single threaded. 1873 */ 1874 dmu_objset_find_dp_impl(dcp); 1875 mutex_destroy(&err_lock); 1876 1877 return (error); 1878 } 1879 1880 ntasks = dmu_find_threads; 1881 if (ntasks == 0) 1882 ntasks = vdev_count_leaves(dp->dp_spa) * 4; 1883 tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks, 1884 INT_MAX, 0); 1885 if (tq == NULL) { 1886 kmem_free(dcp, sizeof (*dcp)); 1887 mutex_destroy(&err_lock); 1888 1889 return (SET_ERROR(ENOMEM)); 1890 } 1891 dcp->dc_tq = tq; 1892 1893 /* dcp will be freed by task */ 1894 (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP); 1895 1896 /* 1897 * PORTING: this code relies on the property of taskq_wait to wait 1898 * until no more tasks are queued and no more tasks are active. As 1899 * we always queue new tasks from within other tasks, task_wait 1900 * reliably waits for the full recursion to finish, even though we 1901 * enqueue new tasks after taskq_wait has been called. 1902 * On platforms other than illumos, taskq_wait may not have this 1903 * property. 1904 */ 1905 taskq_wait(tq); 1906 taskq_destroy(tq); 1907 mutex_destroy(&err_lock); 1908 1909 return (error); 1910} 1911 1912/* 1913 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 1914 * The dp_config_rwlock must not be held when this is called, and it 1915 * will not be held when the callback is called. 1916 * Therefore this function should only be used when the pool is not changing 1917 * (e.g. in syncing context), or the callback can deal with the possible races. 1918 */ 1919static int 1920dmu_objset_find_impl(spa_t *spa, const char *name, 1921 int func(const char *, void *), void *arg, int flags) 1922{ 1923 dsl_dir_t *dd; 1924 dsl_pool_t *dp = spa_get_dsl(spa); 1925 dsl_dataset_t *ds; 1926 zap_cursor_t zc; 1927 zap_attribute_t *attr; 1928 char *child; 1929 uint64_t thisobj; 1930 int err; 1931 1932 dsl_pool_config_enter(dp, FTAG); 1933 1934 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); 1935 if (err != 0) { 1936 dsl_pool_config_exit(dp, FTAG); 1937 return (err); 1938 } 1939 1940 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1941 if (dd->dd_myname[0] == '$') { 1942 dsl_dir_rele(dd, FTAG); 1943 dsl_pool_config_exit(dp, FTAG); 1944 return (0); 1945 } 1946 1947 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 1948 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1949 1950 /* 1951 * Iterate over all children. 1952 */ 1953 if (flags & DS_FIND_CHILDREN) { 1954 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1955 dsl_dir_phys(dd)->dd_child_dir_zapobj); 1956 zap_cursor_retrieve(&zc, attr) == 0; 1957 (void) zap_cursor_advance(&zc)) { 1958 ASSERT3U(attr->za_integer_length, ==, 1959 sizeof (uint64_t)); 1960 ASSERT3U(attr->za_num_integers, ==, 1); 1961 1962 child = kmem_asprintf("%s/%s", name, attr->za_name); 1963 dsl_pool_config_exit(dp, FTAG); 1964 err = dmu_objset_find_impl(spa, child, 1965 func, arg, flags); 1966 dsl_pool_config_enter(dp, FTAG); 1967 strfree(child); 1968 if (err != 0) 1969 break; 1970 } 1971 zap_cursor_fini(&zc); 1972 1973 if (err != 0) { 1974 dsl_dir_rele(dd, FTAG); 1975 dsl_pool_config_exit(dp, FTAG); 1976 kmem_free(attr, sizeof (zap_attribute_t)); 1977 return (err); 1978 } 1979 } 1980 1981 /* 1982 * Iterate over all snapshots. 1983 */ 1984 if (flags & DS_FIND_SNAPSHOTS) { 1985 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1986 1987 if (err == 0) { 1988 uint64_t snapobj; 1989 1990 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 1991 dsl_dataset_rele(ds, FTAG); 1992 1993 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1994 zap_cursor_retrieve(&zc, attr) == 0; 1995 (void) zap_cursor_advance(&zc)) { 1996 ASSERT3U(attr->za_integer_length, ==, 1997 sizeof (uint64_t)); 1998 ASSERT3U(attr->za_num_integers, ==, 1); 1999 2000 child = kmem_asprintf("%s@%s", 2001 name, attr->za_name); 2002 dsl_pool_config_exit(dp, FTAG); 2003 err = func(child, arg); 2004 dsl_pool_config_enter(dp, FTAG); 2005 strfree(child); 2006 if (err != 0) 2007 break; 2008 } 2009 zap_cursor_fini(&zc); 2010 } 2011 } 2012 2013 dsl_dir_rele(dd, FTAG); 2014 kmem_free(attr, sizeof (zap_attribute_t)); 2015 dsl_pool_config_exit(dp, FTAG); 2016 2017 if (err != 0) 2018 return (err); 2019 2020 /* Apply to self. */ 2021 return (func(name, arg)); 2022} 2023 2024/* 2025 * See comment above dmu_objset_find_impl(). 2026 */ 2027int 2028dmu_objset_find(char *name, int func(const char *, void *), void *arg, 2029 int flags) 2030{ 2031 spa_t *spa; 2032 int error; 2033 2034 error = spa_open(name, &spa, FTAG); 2035 if (error != 0) 2036 return (error); 2037 error = dmu_objset_find_impl(spa, name, func, arg, flags); 2038 spa_close(spa, FTAG); 2039 return (error); 2040} 2041 2042void 2043dmu_objset_set_user(objset_t *os, void *user_ptr) 2044{ 2045 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 2046 os->os_user_ptr = user_ptr; 2047} 2048 2049void * 2050dmu_objset_get_user(objset_t *os) 2051{ 2052 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 2053 return (os->os_user_ptr); 2054} 2055 2056/* 2057 * Determine name of filesystem, given name of snapshot. 2058 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes 2059 */ 2060int 2061dmu_fsname(const char *snapname, char *buf) 2062{ 2063 char *atp = strchr(snapname, '@'); 2064 if (atp == NULL) 2065 return (SET_ERROR(EINVAL)); 2066 if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN) 2067 return (SET_ERROR(ENAMETOOLONG)); 2068 (void) strlcpy(buf, snapname, atp - snapname + 1); 2069 return (0); 2070} 2071