• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/

Lines Matching refs:dn

161 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
167 blkid = dbuf_whichblock(dn, 0, offset);
168 rw_enter(&dn->dn_struct_rwlock, RW_READER);
169 db = dbuf_hold(dn, blkid, tag);
170 rw_exit(&dn->dn_struct_rwlock);
184 dnode_t *dn;
189 err = dnode_hold(os, object, FTAG, &dn);
192 blkid = dbuf_whichblock(dn, 0, offset);
193 rw_enter(&dn->dn_struct_rwlock, RW_READER);
194 db = dbuf_hold(dn, blkid, tag);
195 rw_exit(&dn->dn_struct_rwlock);
196 dnode_rele(dn, FTAG);
208 dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
217 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
263 dnode_t *dn;
267 dn = DB_DNODE(db);
269 if (dn->dn_bonus != db) {
274 dnode_setbonuslen(dn, newsize, tx);
286 dnode_t *dn;
290 dn = DB_DNODE(db);
294 } else if (dn->dn_bonus != db) {
297 dnode_setbonus_type(dn, type, tx);
309 dnode_t *dn;
313 dn = DB_DNODE(db);
314 type = dn->dn_bonustype;
323 dnode_t *dn;
326 error = dnode_hold(os, object, FTAG, &dn);
327 dbuf_rm_spill(dn, tx);
328 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
329 dnode_rm_spill(dn, tx);
330 rw_exit(&dn->dn_struct_rwlock);
331 dnode_rele(dn, FTAG);
341 dnode_t *dn;
345 error = dnode_hold(os, object, FTAG, &dn);
349 rw_enter(&dn->dn_struct_rwlock, RW_READER);
350 if (dn->dn_bonus == NULL) {
351 rw_exit(&dn->dn_struct_rwlock);
352 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
353 if (dn->dn_bonus == NULL)
354 dbuf_create_bonus(dn);
356 db = dn->dn_bonus;
360 VERIFY(dnode_add_ref(dn, db));
361 atomic_inc_32(&dn->dn_dbufs_count);
369 rw_exit(&dn->dn_struct_rwlock);
371 dnode_rele(dn, FTAG);
389 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
395 rw_enter(&dn->dn_struct_rwlock, RW_READER);
397 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
400 rw_exit(&dn->dn_struct_rwlock);
415 dnode_t *dn;
419 dn = DB_DNODE(db);
421 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
424 rw_enter(&dn->dn_struct_rwlock, RW_READER);
426 if (!dn->dn_have_spill) {
429 err = dmu_spill_hold_by_dnode(dn,
433 rw_exit(&dn->dn_struct_rwlock);
444 dnode_t *dn;
448 dn = DB_DNODE(db);
449 err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
462 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
481 rw_enter(&dn->dn_struct_rwlock, RW_READER);
482 if (dn->dn_datablkshift) {
483 int blkshift = dn->dn_datablkshift;
487 if (offset + length > dn->dn_datablksz) {
490 (longlong_t)dn->dn_objset->
492 (longlong_t)dn->dn_object, dn->dn_datablksz,
494 rw_exit(&dn->dn_struct_rwlock);
510 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
511 blkid = dbuf_whichblock(dn, 0, offset);
513 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
515 rw_exit(&dn->dn_struct_rwlock);
532 DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) {
533 dmu_zfetch(&dn->dn_zfetch, blkid, nblks,
534 read && DNODE_IS_CACHEABLE(dn));
536 rw_exit(&dn->dn_struct_rwlock);
572 dnode_t *dn;
575 err = dnode_hold(os, object, FTAG, &dn);
579 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
582 dnode_rele(dn, FTAG);
593 dnode_t *dn;
597 dn = DB_DNODE(db);
598 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
634 dnode_t *dn;
639 dn = DMU_META_DNODE(os);
644 rw_enter(&dn->dn_struct_rwlock, RW_READER);
645 blkid = dbuf_whichblock(dn, level,
647 dbuf_prefetch(dn, level, blkid, pri, 0);
648 rw_exit(&dn->dn_struct_rwlock);
662 err = dnode_hold(os, object, FTAG, &dn);
666 rw_enter(&dn->dn_struct_rwlock, RW_READER);
669 * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
670 * last block we want to prefetch, and dbuf_whichblock(dn, level,
674 if (level > 0 || dn->dn_datablkshift != 0) {
675 nblks = dbuf_whichblock(dn, level, offset + len - 1) -
676 dbuf_whichblock(dn, level, offset) + 1;
678 nblks = (offset < dn->dn_datablksz);
682 blkid = dbuf_whichblock(dn, level, offset);
684 dbuf_prefetch(dn, level, blkid + i, pri, 0);
687 rw_exit(&dn->dn_struct_rwlock);
689 dnode_rele(dn, FTAG);
704 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
707 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
710 dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
741 err = dnode_next_offset(dn,
780 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
783 uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
805 if (dmu_objset_zfs_unmounting(dn->dn_objset))
811 err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
820 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
864 l1blks << dn->dn_indblkshift;
869 dnode_free_range(dn, chunk_begin, chunk_len, tx);
881 dnode_t *dn;
884 err = dnode_hold(os, object, FTAG, &dn);
887 err = dmu_free_long_range_impl(os, dn, offset, length);
896 dn->dn_maxblkid = 0;
898 dnode_rele(dn, FTAG);
931 dnode_t *dn;
932 int err = dnode_hold(os, object, FTAG, &dn);
937 dnode_free_range(dn, offset, size, tx);
938 dnode_rele(dn, FTAG);
943 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
954 if (dn->dn_maxblkid == 0) {
955 int newsz = offset > dn->dn_datablksz ? 0 :
956 MIN(size, dn->dn_datablksz - offset);
969 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
999 dnode_t *dn;
1002 err = dnode_hold(os, object, FTAG, &dn);
1006 err = dmu_read_impl(dn, offset, size, buf, flags);
1007 dnode_rele(dn, FTAG);
1012 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
1015 return (dmu_read_impl(dn, offset, size, buf, flags));
1069 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1078 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1085 dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
1088 uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
1091 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1092 dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
1103 rw_exit(&dn->dn_struct_rwlock);
1113 dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
1147 dnode_t *dn;
1149 err = dnode_hold(os, object, FTAG, &dn);
1154 if (dn->dn_nlevels <= 1) {
1164 if (err == 0 && dnode_needs_remap(dn)) {
1166 dmu_tx_hold_bonus(tx, dn->dn_object);
1168 dnode_setdirty(dn, tx);
1175 dnode_rele(dn, FTAG);
1180 l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
1181 dn->dn_datablkshift);
1185 while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
1190 if ((err = dmu_object_remap_one_indirect(os, dn,
1197 dnode_rele(dn, FTAG);
1366 dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
1376 err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1442 dnode_t *dn;
1449 dn = DB_DNODE(db);
1450 err = dmu_read_uio_dnode(dn, uio, size);
1464 dnode_t *dn;
1470 err = dnode_hold(os, object, FTAG, &dn);
1474 err = dmu_read_uio_dnode(dn, uio, size);
1476 dnode_rele(dn, FTAG);
1482 dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
1489 err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1552 dnode_t *dn;
1559 dn = DB_DNODE(db);
1560 err = dmu_write_uio_dnode(dn, uio, size, tx);
1575 dnode_t *dn;
1581 err = dnode_hold(os, object, FTAG, &dn);
1585 err = dmu_write_uio_dnode(dn, uio, size, tx);
1587 dnode_rele(dn, FTAG);
1925 dmu_assign_arcbuf_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
1932 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1933 blkid = dbuf_whichblock(dn, 0, offset);
1934 VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
1935 rw_exit(&dn->dn_struct_rwlock);
1963 os = dn->dn_objset;
1964 object = dn->dn_object;
2206 dnode_t *dn;
2215 dn = DB_DNODE(db);
2216 dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
2303 dn = DB_DNODE(db);
2304 if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
2342 dnode_t *dn;
2345 err = dnode_hold(os, object, FTAG, &dn);
2348 err = dnode_set_blksz(dn, size, ibs, tx);
2349 dnode_rele(dn, FTAG);
2357 dnode_t *dn;
2366 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2368 dn->dn_checksum = checksum;
2369 dnode_setdirty(dn, tx);
2370 dnode_rele(dn, FTAG);
2377 dnode_t *dn;
2386 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2387 dn->dn_compress = compress;
2388 dnode_setdirty(dn, tx);
2389 dnode_rele(dn, FTAG);
2403 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
2405 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
2467 compress = zio_compress_select(os->os_spa, dn->dn_compress,
2471 zio_checksum_select(dn->dn_checksum, checksum) :
2505 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2518 dnode_t *dn;
2530 err = dnode_hold(os, object, FTAG, &dn);
2535 err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
2536 dnode_rele(dn, FTAG);
2551 dnode_t *dn;
2554 error = dnode_hold(os, object, FTAG, &dn);
2560 if (list_link_active(&dn->dn_dirty_link[i])) {
2564 dnode_rele(dn, FTAG);
2573 __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2575 dnode_phys_t *dnp = dn->dn_phys;
2577 doi->doi_data_block_size = dn->dn_datablksz;
2578 doi->doi_metadata_block_size = dn->dn_indblkshift ?
2579 1ULL << dn->dn_indblkshift : 0;
2580 doi->doi_type = dn->dn_type;
2581 doi->doi_bonus_type = dn->dn_bonustype;
2582 doi->doi_bonus_size = dn->dn_bonuslen;
2583 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2584 doi->doi_indirection = dn->dn_nlevels;
2585 doi->doi_checksum = dn->dn_checksum;
2586 doi->doi_compress = dn->dn_compress;
2587 doi->doi_nblkptr = dn->dn_nblkptr;
2589 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2596 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2598 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2599 mutex_enter(&dn->dn_mtx);
2601 __dmu_object_info_from_dnode(dn, doi);
2603 mutex_exit(&dn->dn_mtx);
2604 rw_exit(&dn->dn_struct_rwlock);
2614 dnode_t *dn;
2615 int err = dnode_hold(os, object, FTAG, &dn);
2621 dmu_object_info_from_dnode(dn, doi);
2623 dnode_rele(dn, FTAG);
2649 dnode_t *dn;
2652 dn = DB_DNODE(db);
2654 *blksize = dn->dn_datablksz;
2656 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
2657 SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
2665 dnode_t *dn;
2668 dn = DB_DNODE(db);
2669 *dnsize = dn->dn_num_slots << DNODE_SHIFT;