1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013, 2017 by Delphix. All rights reserved. 24 * Copyright 2014 HybridCluster. All rights reserved. 25 */ 26 27#include <sys/dmu.h> 28#include <sys/dmu_objset.h> 29#include <sys/dmu_tx.h> 30#include <sys/dnode.h> 31#include <sys/zap.h> 32#include <sys/zfeature.h> 33#include <sys/dsl_dataset.h> 34 35/* 36 * Each of the concurrent object allocators will grab 37 * 2^dmu_object_alloc_chunk_shift dnode slots at a time. The default is to 38 * grab 128 slots, which is 4 blocks worth. This was experimentally 39 * determined to be the lowest value that eliminates the measurable effect 40 * of lock contention from this code path. 41 */ 42int dmu_object_alloc_chunk_shift = 7; 43 44static uint64_t 45dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize, 46 int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen, 47 int dnodesize, dmu_tx_t *tx) 48{ 49 uint64_t object; 50 uint64_t L1_dnode_count = DNODES_PER_BLOCK << 51 (DMU_META_DNODE(os)->dn_indblkshift - SPA_BLKPTRSHIFT); 52 dnode_t *dn = NULL; 53 int dn_slots = dnodesize >> DNODE_SHIFT; 54 boolean_t restarted = B_FALSE; 55 uint64_t *cpuobj = &os->os_obj_next_percpu[CPU_SEQID % 56 os->os_obj_next_percpu_len]; 57 int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift; 58 int error; 59 60 if (dn_slots == 0) { 61 dn_slots = DNODE_MIN_SLOTS; 62 } else { 63 ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS); 64 ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS); 65 } 66 67 /* 68 * The "chunk" of dnodes that is assigned to a CPU-specific 69 * allocator needs to be at least one block's worth, to avoid 70 * lock contention on the dbuf. It can be at most one L1 block's 71 * worth, so that the "rescan after polishing off a L1's worth" 72 * logic below will be sure to kick in. 73 */ 74 if (dnodes_per_chunk < DNODES_PER_BLOCK) 75 dnodes_per_chunk = DNODES_PER_BLOCK; 76 if (dnodes_per_chunk > L1_dnode_count) 77 dnodes_per_chunk = L1_dnode_count; 78 79#ifdef __FreeBSD__ 80 object = atomic_load_64(cpuobj); 81#else 82 object = *cpuobj; 83#endif 84 85 for (;;) { 86 /* 87 * If we finished a chunk of dnodes, get a new one from 88 * the global allocator. 89 */ 90 if ((P2PHASE(object, dnodes_per_chunk) == 0) || 91 (P2PHASE(object + dn_slots - 1, dnodes_per_chunk) < 92 dn_slots)) { 93 DNODE_STAT_BUMP(dnode_alloc_next_chunk); 94 mutex_enter(&os->os_obj_lock); 95 ASSERT0(P2PHASE(os->os_obj_next_chunk, 96 dnodes_per_chunk)); 97 object = os->os_obj_next_chunk; 98 99 /* 100 * Each time we polish off a L1 bp worth of dnodes 101 * (2^12 objects), move to another L1 bp that's 102 * still reasonably sparse (at most 1/4 full). Look 103 * from the beginning at most once per txg. If we 104 * still can't allocate from that L1 block, search 105 * for an empty L0 block, which will quickly skip 106 * to the end of the metadnode if the no nearby L0 107 * blocks are empty. This fallback avoids a 108 * pathology where full dnode blocks containing 109 * large dnodes appear sparse because they have a 110 * low blk_fill, leading to many failed allocation 111 * attempts. In the long term a better mechanism to 112 * search for sparse metadnode regions, such as 113 * spacemaps, could be implemented. 114 * 115 * os_scan_dnodes is set during txg sync if enough 116 * objects have been freed since the previous 117 * rescan to justify backfilling again. 118 * 119 * Note that dmu_traverse depends on the behavior 120 * that we use multiple blocks of the dnode object 121 * before going back to reuse objects. Any change 122 * to this algorithm should preserve that property 123 * or find another solution to the issues described 124 * in traverse_visitbp. 125 */ 126 if (P2PHASE(object, L1_dnode_count) == 0) { 127 uint64_t offset; 128 uint64_t blkfill; 129 int minlvl; 130 if (os->os_rescan_dnodes) { 131 offset = 0; 132 os->os_rescan_dnodes = B_FALSE; 133 } else { 134 offset = object << DNODE_SHIFT; 135 } 136 blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2; 137 minlvl = restarted ? 1 : 2; 138 restarted = B_TRUE; 139 error = dnode_next_offset(DMU_META_DNODE(os), 140 DNODE_FIND_HOLE, &offset, minlvl, 141 blkfill, 0); 142 if (error == 0) { 143 object = offset >> DNODE_SHIFT; 144 } 145 } 146 /* 147 * Note: if "restarted", we may find a L0 that 148 * is not suitably aligned. 149 */ 150 os->os_obj_next_chunk = 151 P2ALIGN(object, dnodes_per_chunk) + 152 dnodes_per_chunk; 153 (void) atomic_swap_64(cpuobj, object); 154 mutex_exit(&os->os_obj_lock); 155 } 156 157 /* 158 * The value of (*cpuobj) before adding dn_slots is the object 159 * ID assigned to us. The value afterwards is the object ID 160 * assigned to whoever wants to do an allocation next. 161 */ 162 object = atomic_add_64_nv(cpuobj, dn_slots) - dn_slots; 163 164 /* 165 * XXX We should check for an i/o error here and return 166 * up to our caller. Actually we should pre-read it in 167 * dmu_tx_assign(), but there is currently no mechanism 168 * to do so. 169 */ 170 error = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, 171 dn_slots, FTAG, &dn); 172 if (error == 0) { 173 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 174 /* 175 * Another thread could have allocated it; check 176 * again now that we have the struct lock. 177 */ 178 if (dn->dn_type == DMU_OT_NONE) { 179 dnode_allocate(dn, ot, blocksize, 0, 180 bonustype, bonuslen, dn_slots, tx); 181 rw_exit(&dn->dn_struct_rwlock); 182 dmu_tx_add_new_object(tx, dn); 183 dnode_rele(dn, FTAG); 184 return (object); 185 } 186 rw_exit(&dn->dn_struct_rwlock); 187 dnode_rele(dn, FTAG); 188 DNODE_STAT_BUMP(dnode_alloc_race); 189 } 190 191 /* 192 * Skip to next known valid starting point on error. This 193 * is the start of the next block of dnodes. 194 */ 195 if (dmu_object_next(os, &object, B_TRUE, 0) != 0) { 196 object = P2ROUNDUP(object + 1, DNODES_PER_BLOCK); 197 DNODE_STAT_BUMP(dnode_alloc_next_block); 198 } 199 (void) atomic_swap_64(cpuobj, object); 200 } 201} 202 203uint64_t 204dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize, 205 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) 206{ 207 return (dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype, 208 bonuslen, 0, tx)); 209} 210 211uint64_t 212dmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize, 213 int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen, 214 dmu_tx_t *tx) 215{ 216 return (dmu_object_alloc_impl(os, ot, blocksize, indirect_blockshift, 217 bonustype, bonuslen, 0, tx)); 218} 219 220uint64_t 221dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize, 222 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx) 223{ 224 return (dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype, 225 bonuslen, dnodesize, tx)); 226} 227 228int 229dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot, 230 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) 231{ 232 return (dmu_object_claim_dnsize(os, object, ot, blocksize, bonustype, 233 bonuslen, 0, tx)); 234} 235 236int 237dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot, 238 int blocksize, dmu_object_type_t bonustype, int bonuslen, 239 int dnodesize, dmu_tx_t *tx) 240{ 241 dnode_t *dn; 242 int dn_slots = dnodesize >> DNODE_SHIFT; 243 int err; 244 245 if (dn_slots == 0) 246 dn_slots = DNODE_MIN_SLOTS; 247 ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS); 248 ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS); 249 250 if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx)) 251 return (SET_ERROR(EBADF)); 252 253 err = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots, 254 FTAG, &dn); 255 if (err) 256 return (err); 257 dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx); 258 dmu_tx_add_new_object(tx, dn); 259 260 dnode_rele(dn, FTAG); 261 262 return (0); 263} 264 265int 266dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot, 267 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) 268{ 269 return (dmu_object_reclaim_dnsize(os, object, ot, blocksize, bonustype, 270 bonuslen, DNODE_MIN_SIZE, tx)); 271} 272 273int 274dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot, 275 int blocksize, dmu_object_type_t bonustype, int bonuslen, int dnodesize, 276 dmu_tx_t *tx) 277{ 278 dnode_t *dn; 279 int dn_slots = dnodesize >> DNODE_SHIFT; 280 int err; 281 282 if (dn_slots == 0) 283 dn_slots = DNODE_MIN_SLOTS; 284 285 if (object == DMU_META_DNODE_OBJECT) 286 return (SET_ERROR(EBADF)); 287 288 err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, 289 FTAG, &dn); 290 if (err) 291 return (err); 292 293 dnode_reallocate(dn, ot, blocksize, bonustype, bonuslen, dn_slots, tx); 294 295 dnode_rele(dn, FTAG); 296 return (err); 297} 298 299 300int 301dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx) 302{ 303 dnode_t *dn; 304 int err; 305 306 ASSERT(object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx)); 307 308 err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, 309 FTAG, &dn); 310 if (err) 311 return (err); 312 313 ASSERT(dn->dn_type != DMU_OT_NONE); 314 /* 315 * If we don't create this free range, we'll leak indirect blocks when 316 * we get to freeing the dnode in syncing context. 317 */ 318 dnode_free_range(dn, 0, DMU_OBJECT_END, tx); 319 dnode_free(dn, tx); 320 dnode_rele(dn, FTAG); 321 322 return (0); 323} 324 325/* 326 * Return (in *objectp) the next object which is allocated (or a hole) 327 * after *object, taking into account only objects that may have been modified 328 * after the specified txg. 329 */ 330int 331dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg) 332{ 333 uint64_t offset; 334 uint64_t start_obj; 335 struct dsl_dataset *ds = os->os_dsl_dataset; 336 int error; 337 338 if (*objectp == 0) { 339 start_obj = 1; 340 } else if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) { 341 uint64_t i = *objectp + 1; 342 uint64_t last_obj = *objectp | (DNODES_PER_BLOCK - 1); 343 dmu_object_info_t doi; 344 345 /* 346 * Scan through the remaining meta dnode block. The contents 347 * of each slot in the block are known so it can be quickly 348 * checked. If the block is exhausted without a match then 349 * hand off to dnode_next_offset() for further scanning. 350 */ 351 while (i <= last_obj) { 352 error = dmu_object_info(os, i, &doi); 353 if (error == ENOENT) { 354 if (hole) { 355 *objectp = i; 356 return (0); 357 } else { 358 i++; 359 } 360 } else if (error == EEXIST) { 361 i++; 362 } else if (error == 0) { 363 if (hole) { 364 i += doi.doi_dnodesize >> DNODE_SHIFT; 365 } else { 366 *objectp = i; 367 return (0); 368 } 369 } else { 370 return (error); 371 } 372 } 373 374 start_obj = i; 375 } else { 376 start_obj = *objectp + 1; 377 } 378 379 offset = start_obj << DNODE_SHIFT; 380 381 error = dnode_next_offset(DMU_META_DNODE(os), 382 (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg); 383 384 *objectp = offset >> DNODE_SHIFT; 385 386 return (error); 387} 388 389/* 390 * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the 391 * refcount on SPA_FEATURE_EXTENSIBLE_DATASET. 392 * 393 * Only for use from syncing context, on MOS objects. 394 */ 395void 396dmu_object_zapify(objset_t *mos, uint64_t object, dmu_object_type_t old_type, 397 dmu_tx_t *tx) 398{ 399 dnode_t *dn; 400 401 ASSERT(dmu_tx_is_syncing(tx)); 402 403 VERIFY0(dnode_hold(mos, object, FTAG, &dn)); 404 if (dn->dn_type == DMU_OTN_ZAP_METADATA) { 405 dnode_rele(dn, FTAG); 406 return; 407 } 408 ASSERT3U(dn->dn_type, ==, old_type); 409 ASSERT0(dn->dn_maxblkid); 410 411 /* 412 * We must initialize the ZAP data before changing the type, 413 * so that concurrent calls to *_is_zapified() can determine if 414 * the object has been completely zapified by checking the type. 415 */ 416 mzap_create_impl(mos, object, 0, 0, tx); 417 418 dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type = 419 DMU_OTN_ZAP_METADATA; 420 dnode_setdirty(dn, tx); 421 dnode_rele(dn, FTAG); 422 423 spa_feature_incr(dmu_objset_spa(mos), 424 SPA_FEATURE_EXTENSIBLE_DATASET, tx); 425} 426 427void 428dmu_object_free_zapified(objset_t *mos, uint64_t object, dmu_tx_t *tx) 429{ 430 dnode_t *dn; 431 dmu_object_type_t t; 432 433 ASSERT(dmu_tx_is_syncing(tx)); 434 435 VERIFY0(dnode_hold(mos, object, FTAG, &dn)); 436 t = dn->dn_type; 437 dnode_rele(dn, FTAG); 438 439 if (t == DMU_OTN_ZAP_METADATA) { 440 spa_feature_decr(dmu_objset_spa(mos), 441 SPA_FEATURE_EXTENSIBLE_DATASET, tx); 442 } 443 VERIFY0(dmu_object_free(mos, object, tx)); 444} 445