Deleted Added
full compact
982a983,986
> typedef struct spare_cbdata {
> uint64_t cb_guid;
> zpool_handle_t *cb_zhp;
> } spare_cbdata_t;
983a988,1027
> static boolean_t
> find_vdev(nvlist_t *nv, uint64_t search)
> {
> uint64_t guid;
> nvlist_t **child;
> uint_t c, children;
>
> if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
> search == guid)
> return (B_TRUE);
>
> if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
> &child, &children) == 0) {
> for (c = 0; c < children; c++)
> if (find_vdev(child[c], search))
> return (B_TRUE);
> }
>
> return (B_FALSE);
> }
>
> static int
> find_spare(zpool_handle_t *zhp, void *data)
> {
> spare_cbdata_t *cbp = data;
> nvlist_t *config, *nvroot;
>
> config = zpool_get_config(zhp, NULL);
> verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
> &nvroot) == 0);
>
> if (find_vdev(nvroot, cbp->cb_guid)) {
> cbp->cb_zhp = zhp;
> return (1);
> }
>
> zpool_close(zhp);
> return (0);
> }
>
984a1029,1160
> * Print out configuration state as requested by status_callback.
> */
> void
> print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
> int namewidth, int depth, boolean_t isspare)
> {
> nvlist_t **child;
> uint_t c, children;
> vdev_stat_t *vs;
> char rbuf[6], wbuf[6], cbuf[6], repaired[7];
> char *vname;
> uint64_t notpresent;
> spare_cbdata_t cb;
> char *state;
>
> verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
> (uint64_t **)&vs, &c) == 0);
>
> if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
> &child, &children) != 0)
> children = 0;
>
> state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
> if (isspare) {
> /*
> * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
> * online drives.
> */
> if (vs->vs_aux == VDEV_AUX_SPARED)
> state = "INUSE";
> else if (vs->vs_state == VDEV_STATE_HEALTHY)
> state = "AVAIL";
> }
>
> (void) printf("\t%*s%-*s %-8s", depth, "", namewidth - depth,
> name, state);
>
> if (!isspare) {
> zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
> zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
> zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf));
> (void) printf(" %5s %5s %5s", rbuf, wbuf, cbuf);
> }
>
> if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
> &notpresent) == 0) {
> char *path;
> verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
> (void) printf(" was %s", path);
> } else if (vs->vs_aux != 0) {
> (void) printf(" ");
>
> switch (vs->vs_aux) {
> case VDEV_AUX_OPEN_FAILED:
> (void) printf(gettext("cannot open"));
> break;
>
> case VDEV_AUX_BAD_GUID_SUM:
> (void) printf(gettext("missing device"));
> break;
>
> case VDEV_AUX_NO_REPLICAS:
> (void) printf(gettext("insufficient replicas"));
> break;
>
> case VDEV_AUX_VERSION_NEWER:
> (void) printf(gettext("newer version"));
> break;
>
> case VDEV_AUX_SPARED:
> verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
> &cb.cb_guid) == 0);
> if (zpool_iter(g_zfs, find_spare, &cb) == 1) {
> if (strcmp(zpool_get_name(cb.cb_zhp),
> zpool_get_name(zhp)) == 0)
> (void) printf(gettext("currently in "
> "use"));
> else
> (void) printf(gettext("in use by "
> "pool '%s'"),
> zpool_get_name(cb.cb_zhp));
> zpool_close(cb.cb_zhp);
> } else {
> (void) printf(gettext("currently in use"));
> }
> break;
>
> case VDEV_AUX_ERR_EXCEEDED:
> (void) printf(gettext("too many errors"));
> break;
>
> case VDEV_AUX_IO_FAILURE:
> (void) printf(gettext("experienced I/O failures"));
> break;
>
> case VDEV_AUX_BAD_LOG:
> (void) printf(gettext("bad intent log"));
> break;
>
> default:
> (void) printf(gettext("corrupted data"));
> break;
> }
> } else if (vs->vs_scrub_repaired != 0 && children == 0) {
> /*
> * Report bytes resilvered/repaired on leaf devices.
> */
> zfs_nicenum(vs->vs_scrub_repaired, repaired, sizeof (repaired));
> (void) printf(gettext(" %s %s"), repaired,
> (vs->vs_scrub_type == POOL_SCRUB_RESILVER) ?
> "resilvered" : "repaired");
> }
>
> (void) printf("\n");
>
> for (c = 0; c < children; c++) {
> uint64_t is_log = B_FALSE;
>
> /* Don't print logs here */
> (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
> &is_log);
> if (is_log)
> continue;
> vname = zpool_vdev_name(g_zfs, zhp, child[c]);
> print_status_config(zhp, vname, child[c],
> namewidth, depth + 2, isspare);
> free(vname);
> }
> }
>
>
> /*
989,990c1165
< print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
< boolean_t print_logs)
---
> print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth)
1047c1222
< if ((is_log && !print_logs) || (!is_log && print_logs))
---
> if (is_log)
1051,1052c1226
< print_import_config(vname, child[c],
< namewidth, depth + 2, B_FALSE);
---
> print_import_config(vname, child[c], namewidth, depth + 2);
1077a1252,1288
> * Print log vdevs.
> * Logs are recorded as top level vdevs in the main pool child array
> * but with "is_log" set to 1. We use either print_status_config() or
> * print_import_config() to print the top level logs then any log
> * children (eg mirrored slogs) are printed recursively - which
> * works because only the top level vdev is marked "is_log"
> */
> static void
> print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose)
> {
> uint_t c, children;
> nvlist_t **child;
>
> if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
> &children) != 0)
> return;
>
> (void) printf(gettext("\tlogs\n"));
>
> for (c = 0; c < children; c++) {
> uint64_t is_log = B_FALSE;
> char *name;
>
> (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
> &is_log);
> if (!is_log)
> continue;
> name = zpool_vdev_name(g_zfs, zhp, child[c]);
> if (verbose)
> print_status_config(zhp, name, child[c], namewidth,
> 2, B_FALSE);
> else
> print_import_config(name, child[c], namewidth, 2);
> free(name);
> }
> }
> /*
1245,1249c1456,1458
< print_import_config(name, nvroot, namewidth, 0, B_FALSE);
< if (num_logs(nvroot) > 0) {
< (void) printf(gettext("\tlogs\n"));
< print_import_config(name, nvroot, namewidth, 0, B_TRUE);
< }
---
> print_import_config(name, nvroot, namewidth, 0);
> if (num_logs(nvroot) > 0)
> print_logs(NULL, nvroot, namewidth, B_FALSE);
2720,2895d2928
< typedef struct spare_cbdata {
< uint64_t cb_guid;
< zpool_handle_t *cb_zhp;
< } spare_cbdata_t;
<
< static boolean_t
< find_vdev(nvlist_t *nv, uint64_t search)
< {
< uint64_t guid;
< nvlist_t **child;
< uint_t c, children;
<
< if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
< search == guid)
< return (B_TRUE);
<
< if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
< &child, &children) == 0) {
< for (c = 0; c < children; c++)
< if (find_vdev(child[c], search))
< return (B_TRUE);
< }
<
< return (B_FALSE);
< }
<
< static int
< find_spare(zpool_handle_t *zhp, void *data)
< {
< spare_cbdata_t *cbp = data;
< nvlist_t *config, *nvroot;
<
< config = zpool_get_config(zhp, NULL);
< verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
< &nvroot) == 0);
<
< if (find_vdev(nvroot, cbp->cb_guid)) {
< cbp->cb_zhp = zhp;
< return (1);
< }
<
< zpool_close(zhp);
< return (0);
< }
<
< /*
< * Print out configuration state as requested by status_callback.
< */
< void
< print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
< int namewidth, int depth, boolean_t isspare)
< {
< nvlist_t **child;
< uint_t c, children;
< vdev_stat_t *vs;
< char rbuf[6], wbuf[6], cbuf[6], repaired[7];
< char *vname;
< uint64_t notpresent;
< spare_cbdata_t cb;
< char *state;
<
< verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
< (uint64_t **)&vs, &c) == 0);
<
< if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
< &child, &children) != 0)
< children = 0;
<
< state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
< if (isspare) {
< /*
< * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
< * online drives.
< */
< if (vs->vs_aux == VDEV_AUX_SPARED)
< state = "INUSE";
< else if (vs->vs_state == VDEV_STATE_HEALTHY)
< state = "AVAIL";
< }
<
< (void) printf("\t%*s%-*s %-8s", depth, "", namewidth - depth,
< name, state);
<
< if (!isspare) {
< zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
< zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
< zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf));
< (void) printf(" %5s %5s %5s", rbuf, wbuf, cbuf);
< }
<
< if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
< &notpresent) == 0) {
< char *path;
< verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
< (void) printf(" was %s", path);
< } else if (vs->vs_aux != 0) {
< (void) printf(" ");
<
< switch (vs->vs_aux) {
< case VDEV_AUX_OPEN_FAILED:
< (void) printf(gettext("cannot open"));
< break;
<
< case VDEV_AUX_BAD_GUID_SUM:
< (void) printf(gettext("missing device"));
< break;
<
< case VDEV_AUX_NO_REPLICAS:
< (void) printf(gettext("insufficient replicas"));
< break;
<
< case VDEV_AUX_VERSION_NEWER:
< (void) printf(gettext("newer version"));
< break;
<
< case VDEV_AUX_SPARED:
< verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
< &cb.cb_guid) == 0);
< if (zpool_iter(g_zfs, find_spare, &cb) == 1) {
< if (strcmp(zpool_get_name(cb.cb_zhp),
< zpool_get_name(zhp)) == 0)
< (void) printf(gettext("currently in "
< "use"));
< else
< (void) printf(gettext("in use by "
< "pool '%s'"),
< zpool_get_name(cb.cb_zhp));
< zpool_close(cb.cb_zhp);
< } else {
< (void) printf(gettext("currently in use"));
< }
< break;
<
< case VDEV_AUX_ERR_EXCEEDED:
< (void) printf(gettext("too many errors"));
< break;
<
< case VDEV_AUX_IO_FAILURE:
< (void) printf(gettext("experienced I/O failures"));
< break;
<
< case VDEV_AUX_BAD_LOG:
< (void) printf(gettext("bad intent log"));
< break;
<
< default:
< (void) printf(gettext("corrupted data"));
< break;
< }
< } else if (vs->vs_scrub_repaired != 0 && children == 0) {
< /*
< * Report bytes resilvered/repaired on leaf devices.
< */
< zfs_nicenum(vs->vs_scrub_repaired, repaired, sizeof (repaired));
< (void) printf(gettext(" %s %s"), repaired,
< (vs->vs_scrub_type == POOL_SCRUB_RESILVER) ?
< "resilvered" : "repaired");
< }
<
< (void) printf("\n");
<
< for (c = 0; c < children; c++) {
< uint64_t is_log = B_FALSE;
<
< /* Don't print logs here */
< (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
< &is_log);
< if (is_log)
< continue;
< vname = zpool_vdev_name(g_zfs, zhp, child[c]);
< print_status_config(zhp, vname, child[c],
< namewidth, depth + 2, isspare);
< free(vname);
< }
< }
<
2972,3004d3004
< * Print log vdevs.
< * Logs are recorded as top level vdevs in the main pool child array but with
< * "is_log" set to 1. We use print_status_config() to print the top level logs
< * then any log children (eg mirrored slogs) are printed recursively - which
< * works because only the top level vdev is marked "is_log"
< */
< static void
< print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth)
< {
< uint_t c, children;
< nvlist_t **child;
<
< if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
< &children) != 0)
< return;
<
< (void) printf(gettext("\tlogs\n"));
<
< for (c = 0; c < children; c++) {
< uint64_t is_log = B_FALSE;
< char *name;
<
< (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
< &is_log);
< if (!is_log)
< continue;
< name = zpool_vdev_name(g_zfs, zhp, child[c]);
< print_status_config(zhp, name, child[c], namewidth, 2, B_FALSE);
< free(name);
< }
< }
<
< /*
3232c3232
< print_logs(zhp, nvroot, namewidth);
---
> print_logs(zhp, nvroot, namewidth, B_TRUE);