/freebsd-11-stable/stand/libsa/zfs/ |
H A D | zfs.c | 363 vdev_read(vdev_t *vdev, void *priv, off_t offset, void *buf, size_t bytes) argument 775 zfs_fmtdev(void *vdev) argument 779 struct zfs_devdesc *dev = (struct zfs_devdesc *)vdev;
|
/freebsd-11-stable/cddl/contrib/opensolaris/cmd/zdb/ |
H A D | zdb.c | 45 #include <sys/vdev.h> 144 "[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n" 147 "\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n" 201 "-e to specify path to vdev dir\n"); 845 " %010llx-%010llx size: %06llx vdev: %06llu words: %u\n", 1068 (void) fatal("bad vdev id: %llu", (u_longlong_t)c); 2955 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", 3011 * a device being removed. Therefore, the vdev that 3013 * vdev. 3155 * Since the vdev_checkpoint_sm exists in the vdev leve 4656 zdb_vdev_lookup(vdev_t *vdev, const char *path) argument 4743 const char *s, *vdev; local [all...] |
/freebsd-11-stable/sys/dev/vxge/ |
H A D | vxge.h | 327 vxge_dev_t *vdev; member in struct:_vxge_vpath_t
|
/freebsd-11-stable/cddl/contrib/opensolaris/cmd/zhack/ |
H A D | zhack.c | 43 #include <sys/vdev.h>
|
/freebsd-11-stable/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/ |
H A D | dsl_dataset.h | 336 void dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev,
|
/freebsd-11-stable/cddl/contrib/opensolaris/lib/libzfs/common/ |
H A D | libzfs_pool.c | 152 * vdev's guid then get it from the zhp config nvlist. 1120 * Create the named pool, using the provided vdev list. It is assumed 1340 * necessary verification to ensure that the vdev specification is well-formed. 2014 /* translate vdev names to guids */ 2114 * Find a vdev that matches the search criteria specified. We use the 2164 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2217 * Determine our vdev type, keeping in mind 2219 * vdev id pair (i.e. mirror-4). 2251 * Now verify that we have the correct vdev id. 2278 * vdev, no 3126 nvlist_t **mchild, *vdev; local [all...] |
/freebsd-11-stable/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/ |
H A D | spa_misc.c | 102 * - RW_WRITER to change the vdev config 161 * vdev state without altering the vdev tree's topology (e.g. online/offline), 178 * Protects changes to the vdev tree topology, such as vdev 183 * Protects changes to pool state and vdev state, such as vdev 193 * to prevent changes to the vdev tree. The bp-level zio implicitly 194 * protects all of its vdev child zios, which do not hold SCL_ZIO. 204 * Held as reader to prevent changes to the vdev tre 2003 uint64_t vdev = DVA_GET_VDEV(dva); local [all...] |
H A D | zfs_fm.c | 32 #include <sys/vdev.h> 57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want 73 * | Physical I/O | no logical data. Issued as part of vdev caching 91 * will use the same ENA. For vdev cache fill and queue aggregation I/O, 159 * If the vdev has already been marked as failing due 161 * errors, as the DE will automatically fault the vdev 341 * If we have a vdev but no zio, this is a device fault, and the 343 * vdev. 839 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev 853 * handled by higher level logic, and no vdev faul [all...] |
H A D | vdev.c | 57 SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV"); 64 * The limit for ZFS to automatically increase a top-level vdev's ashift 73 * On pool creation or the addition of a new top-level vdev, ZFS will 74 * increase the ashift of the top-level vdev to 2048 as limited by 83 * On pool creation or the addition of a new top-level vdev, ZFS will 84 * increase the ashift of the top-level vdev to 4096 to match the 93 * On pool creation or the addition of a new top-level vdev, ZFS will 94 * increase the ashift of the top-level vdev to 4096 to match the 166 /* target number of metaslabs per top-level vdev */ 170 "Target number of metaslabs per top-level vdev"); 380 vdev_lookup_top(spa_t *spa, uint64_t vdev) argument [all...] |
H A D | dsl_dataset.c | 52 #include <sys/vdev.h> 173 * longer referenced in the head dataset. The vdev must be indirect. 179 dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev, uint64_t offset, argument 189 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 204 DVA_SET_VDEV(dva, vdev);
|
H A D | dbuf.c | 50 #include <sys/vdev.h> 3618 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, argument 3629 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 3631 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, local
|
H A D | vdev_label.c | 31 * The vdev label serves several distinct purposes: 42 * toplevel vdev of which it is a part. 80 * 1. For each vdev, update 'L1' to the new label 82 * 3. For each vdev, update 'L2' to the new label 95 * on another vdev. 101 * The vdev label consists of two distinct parts, and is wrapped within the 106 * properties, per-vdev properties, and configuration information. It is 112 * vdev for the 'best' uberblock. 118 * The nvlist describing the pool and vdev contains the following elements: 125 * vdev_tree An nvlist describing vdev tre [all...] |
H A D | spa.c | 190 * to get the vdev stats associated with the imported devices. 195 * For debugging purposes: print out vdev tree during pool import. 206 * With 1 missing vdev we should be able to import the pool and mount all 250 "print out vdev tree during pool import"); 587 * Make sure the vdev config is bootable 873 * the root vdev's guid, our own pool guid, and then mark all of our 1234 offsetof(struct vdev, vdev_txg_node)); 1328 * Verify a pool configuration, and construct the vdev tree appropriately. This 1329 * will create all the necessary vdevs in the appropriate layout, with each vdev 1331 * All vdev validatio 2226 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) argument [all...] |
H A D | dmu_objset.c | 56 #include <sys/vdev.h>
|
H A D | arc.c | 105 * The L2ARC uses the l2ad_mtx on each vdev for the following: 263 #include <sys/vdev.h> 1439 vdev_t *l2ad_vdev; /* vdev */ 5729 * 1. The L2ARC vdev was previously cached. 5733 * also have invalidated the vdev. 7016 * | vdev | | vdev | 7027 * 2) vdev cache of L2ARC devices 7029 * 4) vdev cache of disks 7097 * the vdev queu [all...] |
H A D | zfs_ioctl.c | 167 #include <sys/vdev.h> 2004 * zc_guid guid of vdev to remove
|
/freebsd-11-stable/usr.sbin/bsdinstall/scripts/ |
H A D | zfsboot | 63 # Default Virtual Device (vdev) type to create 173 # If interactive and the user has not explicitly chosen a vdev type or disks, 474 # must select enough disks to satisfy the chosen vdev type. 495 # Warn the user if vdev type is not valid 506 # Calculate size of vdev menu once only 548 # disks are selected to satisfy the chosen vdev type or user wants to 555 # Confirm the vdev type that was selected 579 # Determine the number of disks needed for this vdev type 1301 for vdev in $zroot_vdevs; do 1306 raid10_vdevs="$raid10_vdevs $vdev" [all...] |
/freebsd-11-stable/cddl/contrib/opensolaris/cmd/ztest/ |
H A D | ztest.c | 595 "\t[-f dir (default: %s)] file directory for vdev files\n" 847 uint64_t vdev; local 857 vdev = ztest_shared->zs_vdev_aux; 861 aux, vdev); 863 vdev = ztest_shared->zs_vdev_next_leaf++; 866 pool == NULL ? ztest_opts.zo_pool : pool, vdev); 2376 * or create a pool with a bad vdev spec. 2736 * vdev state first to make sure we handle removal 2884 * If a vdev is in the process of being removed, its removal may 2901 * Pick a random top-level vdev [all...] |