Searched refs:vdev (Results 1 - 25 of 32) sorted by relevance

12

/opensolaris-onvv-gate/usr/src/uts/common/fs/zfs/sys/
H A Dvdev_file.h32 #include <sys/vdev.h>
H A Duberblock.h30 #include <sys/vdev.h>
H A Dvdev_disk.h31 #include <sys/vdev.h>
H A Dmetaslab_impl.h31 #include <sys/vdev.h>
H A Dvdev_impl.h33 #include <sys/vdev.h>
111 struct vdev { struct
113 * Common to all vdev types.
115 uint64_t vdev_id; /* child number in vdev parent */
116 uint64_t vdev_guid; /* unique ID for this vdev */
123 uint64_t vdev_prevstate; /* used when reopening a vdev */
124 vdev_ops_t *vdev_ops; /* vdev operations */
125 spa_t *vdev_spa; /* spa for this vdev */
129 vdev_t *vdev_top; /* top-level vdev */
130 vdev_t *vdev_parent; /* parent vdev */
[all...]
H A Dspa_impl.h29 #include <sys/vdev.h>
136 txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */
137 vdev_t *spa_root_vdev; /* top-level vdev container */
186 vdev_t *spa_pending_vdev; /* pending vdev additions */
H A Dvdev.h59 extern vdev_t *vdev_lookup_top(spa_t *spa, uint64_t vdev);
H A Dspa.h43 typedef struct vdev vdev_t;
164 * vdev virtual device ID
545 /* Pool vdev add/remove lock */
552 /* Pool vdev state change lock */
669 /* vdev cache */
685 extern void spa_event_notify(spa_t *spa, vdev_t *vdev, const char *name);
/opensolaris-onvv-gate/usr/src/lib/libzfs_jni/common/
H A Dlibzfs_jni_pool.c420 nvlist_t *vdev, uint64_t *p_vdev_id, VirtualDeviceBean_t *bean)
429 result = populate_DeviceStatsBean(env, vdev, stats, object);
439 /* Set parent vdev index */
446 result = nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &vdev_id);
462 nvlist_t *vdev, uint64_t *p_vdev_id, LeafVirtualDeviceBean_t *bean)
465 env, zhp, vdev, p_vdev_id, (VirtualDeviceBean_t *)bean));
470 nvlist_t *vdev, uint64_t *p_vdev_id, DiskVirtualDeviceBean_t *bean)
474 env, zhp, vdev, p_vdev_id, (LeafVirtualDeviceBean_t *)bean);
482 result = nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path);
541 nvlist_t *vdev, uint64_
419 populate_VirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id, VirtualDeviceBean_t *bean) argument
461 populate_LeafVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id, LeafVirtualDeviceBean_t *bean) argument
469 populate_DiskVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id, DiskVirtualDeviceBean_t *bean) argument
540 populate_SliceVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id, SliceVirtualDeviceBean_t *bean) argument
570 populate_FileVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id, FileVirtualDeviceBean_t *bean) argument
599 populate_RAIDVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id, RAIDVirtualDeviceBean_t *bean) argument
607 populate_MirrorVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id, MirrorVirtualDeviceBean_t *bean) argument
634 create_DiskVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id) argument
655 create_SliceVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id) argument
676 create_FileVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id) argument
697 create_RAIDVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id) argument
734 create_MirrorVirtualDeviceBean(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id) argument
938 zjni_get_VirtualDevice_from_vdev(JNIEnv *env, zpool_handle_t *zhp, nvlist_t *vdev, uint64_t *p_vdev_id) argument
1067 populate_DeviceStatsBean(JNIEnv *env, nvlist_t *vdev, DeviceStatsBean_t *bean, zjni_Object_t *object) argument
[all...]
H A Dlibzfs_jni_main.c331 jobject vdev = NULL; local
345 vdev = zjni_get_VirtualDevice_from_vdev(
353 return (vdev);
/opensolaris-onvv-gate/usr/src/cmd/fm/dicts/
H A DZFS.dict44 fault.fs.zfs.vdev.io=14
45 fault.fs.zfs.vdev.checksum=15
/opensolaris-onvv-gate/usr/src/cmd/fm/modules/common/zfs-retire/
H A Dzfs_retire.c32 * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
91 * Find a vdev within a tree with a matching GUID.
137 * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
148 * Find the corresponding pool and make sure the vdev still exists.
198 * Given a FRU FMRI, find the matching pool and vdev.
215 * Given a vdev, attempt to replace it with every known spare until one
219 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev) argument
243 dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
269 * Repair this vdev i
365 nvlist_t *vdev; local
[all...]
/opensolaris-onvv-gate/usr/src/lib/libzpool/common/
H A Dllib-lzpool41 #include <sys/vdev.h>
/opensolaris-onvv-gate/usr/src/lib/libzfs/common/
H A Dlibzfs_status.c117 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t)) argument
125 * Ignore problems within a 'replacing' vdev, since we're presumably in
130 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
134 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
140 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
193 * Currently resilvering a vdev
H A Dlibzfs_pool.c146 * vdev's guid then get it from the zhp config nvlist.
872 * Create the named pool, using the provided vdev list. It is assumed
1050 * necessary verification to ensure that the vdev specification is well-formed.
1667 * Find a vdev that matches the search criteria specified. We use the
1716 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1763 * Determine our vdev type, keeping in mind
1765 * vdev id pair (i.e. mirror-4).
1800 * Now verify that we have the correct vdev id.
1827 * vdev, not the leaf vdevs. So we always lookup the
1828 * log device from the root of the vdev tre
2672 nvlist_t **mchild, *vdev; local
[all...]
/opensolaris-onvv-gate/usr/src/uts/common/xen/io/
H A Dxpvd.c728 int *domain, *vdev; local
743 * Use "domain" and "vdev" properties (backend drivers).
747 DDI_PROP_DONTPASS, "vdev", &vdev, &nvdev)
754 (void) snprintf(addr, addrlen, "%d,%d", domain[0], vdev[0]);
755 ddi_prop_free(vdev);
876 /* Backend format is "<domain>,<vdev>". */
882 /* Frontend format is "<vdev>". */
917 int vdev; local
919 if (!i_xpvd_parse_devname(arg, &devclass, &dom, &vdev)) {
[all...]
/opensolaris-onvv-gate/usr/src/cmd/syseventd/modules/zfs_mod/
H A Dzfs_mod.c49 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
132 * The device associated with the given vdev (either by devid or physical path)
147 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk) argument
157 if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
160 (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
161 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
224 * Cosntruct the root vdev to pass to zpool_vdev_attach(). While adding
225 * the entire vdev structure is harmless, we construct a reduced set of
258 * Utility functions to find a vdev matching given criteria.
368 * Given a physical device path, iterate over all (pool, vdev) pair
[all...]
/opensolaris-onvv-gate/usr/src/cmd/zpool/
H A Dzpool_vdev.c46 * the same level as the root of the vdev tree.
51 * 1. Construct the vdev specification. Performs syntax validation and
85 * For any given vdev specification, we can have multiple errors. The
99 (void) fprintf(stderr, gettext("invalid vdev specification\n"));
381 * Create a leaf vdev. Determine if this is a file or a device. If it's a
383 * leaf vdev are:
394 nvlist_t *vdev = NULL; local
399 * Determine what type of vdev this is, and put the full path into
467 * acceptable to use. Construct the nvlist to describe this vdev. All
470 verify(nvlist_alloc(&vdev, NV_UNIQUE_NAM
[all...]
/opensolaris-onvv-gate/usr/src/cmd/mdb/common/modules/zfs/
H A Dzfs.c1015 * -v Print vdev state
1016 * -e Print vdev error stats
1127 * ::vdev
1136 * With '-e', the statistics associated with the vdev are printed as well.
1142 vdev_t vdev; local
1148 if (mdb_vread(&vdev, sizeof (vdev), (uintptr_t)addr) == -1) {
1156 if (vdev.vdev_path != NULL) {
1158 (uintptr_t)vdev.vdev_path) == -1) {
1160 vdev
[all...]
/opensolaris-onvv-gate/usr/src/uts/common/xen/os/
H A Dxvdi.c252 DDI_PROP_DONTPASS, "vdev", VDEV_NOXS);
326 * and is contained in the 'vdev' property.
732 domid_t dom, int vdev)
748 if (vdev != VDEV_NOXS) {
751 "%s/%d", xdcp->xs_path_fe, vdev);
756 "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
807 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
943 domid_t dom, int vdev)
956 /* Console and soft devices have no vdev
731 xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass, domid_t dom, int vdev) argument
942 xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass, domid_t dom, int vdev) argument
1865 int vdev; local
2336 int i, vdev, circ; local
[all...]
/opensolaris-onvv-gate/usr/src/cmd/zdb/
H A Dzdb.c39 #include <sys/vdev.h>
107 " %s -m [-L] poolname [vdev [metaslab...]]\n"
108 " %s -R poolname vdev:offset:size[:flags]\n"
152 "-e to specify path to vdev dir\n");
610 (void) fatal("bad vdev id: %llu", (u_longlong_t)c);
2029 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
2579 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
2582 zdb_vdev_lookup(vdev_t *vdev, char *path) argument
2587 if (vdev == NULL)
2594 if (i < 0 || i >= vdev
2661 char *s, *p, *dup, *vdev, *flagstr; local
[all...]
/opensolaris-onvv-gate/usr/src/grub/grub-0.97/stage2/
H A Dfsys_zfs.c1112 * Check if this vdev is online and is in a good state.
1131 * Get a valid vdev pathname/devid from the boot device.
1157 /* for a spare vdev, pick the disk labeled with "is_spare" */
1204 * Check the disk label information and retrieve needed vdev name-value pairs.
1214 vdev_phys_t *vdev; local
1222 /* Read in the vdev name-value pair list (112K). */
1226 vdev = (vdev_phys_t *)stack;
1229 if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
/opensolaris-onvv-gate/usr/src/uts/common/fs/zfs/
H A Dmetaslab.c217 * because we're done, and possibly removing the vdev.
1029 * allocmaps and freemaps and add its capacity to the vdev.
1266 * consecutive vdevs. If we're forced to reuse a vdev before we've
1268 * that vdev as much as possible. If it turns out to not be possible,
1272 * able to reason about. Otherwise, any two top-level vdev failures
1274 * only two adjacent top-level vdev failures will result in data loss.
1277 * ourselves on the same vdev as our gang block header. That
1285 * It's possible the vdev we're using as the hint no
1334 * Avoid writing single-copy data to a failing vdev
1358 * figure out whether the corresponding vdev i
1422 uint64_t vdev = DVA_GET_VDEV(dva); local
1470 uint64_t vdev = DVA_GET_VDEV(dva); local
[all...]
H A Dvdev.c61 /* maximum scrub/resilver I/O queue per leaf vdev */
65 * Given a vdev type, return the appropriate ops vector.
99 * the vdev's asize rounded to the nearest metaslab. This allows us to
116 * The top-level vdev just returns the allocatable size rounded
123 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
142 vdev_lookup_top(spa_t *spa, uint64_t vdev) argument
148 if (vdev < rvd->vdev_children) {
149 ASSERT(rvd->vdev_child[vdev] != NULL);
150 return (rvd->vdev_child[vdev]);
294 * The root vdev'
[all...]
H A Dzfs_fm.c28 #include <sys/vdev.h>
53 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
69 * | Physical I/O | no logical data. Issued as part of vdev caching
87 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
155 * If the vdev has already been marked as failing due
157 * errors, as the DE will automatically fault the vdev
337 * If we have a vdev but no zio, this is a device fault, and the
339 * vdev.
831 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
845 * handled by higher level logic, and no vdev faul
[all...]

Completed in 251 milliseconds

12