libzfs_pool.c revision 185029
1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd
22168404Spjd/*
23185029Spjd * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24168404Spjd * Use is subject to license terms.
25168404Spjd */
26168404Spjd
27168404Spjd#include <sys/types.h>
28168404Spjd#include <sys/stat.h>
29168404Spjd#include <assert.h>
30168404Spjd#include <ctype.h>
31168404Spjd#include <errno.h>
32168404Spjd#include <devid.h>
33168404Spjd#include <dirent.h>
34168404Spjd#include <fcntl.h>
35168404Spjd#include <libintl.h>
36168404Spjd#include <stdio.h>
37168404Spjd#include <stdlib.h>
38168404Spjd#include <strings.h>
39168404Spjd#include <unistd.h>
40185029Spjd#include <zone.h>
41168404Spjd#include <sys/zfs_ioctl.h>
42168404Spjd#include <sys/zio.h>
43168404Spjd#include <strings.h>
44168404Spjd#include <umem.h>
45168404Spjd
46168404Spjd#include "zfs_namecheck.h"
47168404Spjd#include "zfs_prop.h"
48168404Spjd#include "libzfs_impl.h"
49168404Spjd
50185029Spjdstatic int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51185029Spjd
52168404Spjd/*
53185029Spjd * ====================================================================
54185029Spjd *   zpool property functions
55185029Spjd * ====================================================================
56185029Spjd */
57185029Spjd
58185029Spjdstatic int
59185029Spjdzpool_get_all_props(zpool_handle_t *zhp)
60185029Spjd{
61185029Spjd	zfs_cmd_t zc = { 0 };
62185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
63185029Spjd
64185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
65185029Spjd
66185029Spjd	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
67185029Spjd		return (-1);
68185029Spjd
69185029Spjd	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
70185029Spjd		if (errno == ENOMEM) {
71185029Spjd			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
72185029Spjd				zcmd_free_nvlists(&zc);
73185029Spjd				return (-1);
74185029Spjd			}
75185029Spjd		} else {
76185029Spjd			zcmd_free_nvlists(&zc);
77185029Spjd			return (-1);
78185029Spjd		}
79185029Spjd	}
80185029Spjd
81185029Spjd	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
82185029Spjd		zcmd_free_nvlists(&zc);
83185029Spjd		return (-1);
84185029Spjd	}
85185029Spjd
86185029Spjd	zcmd_free_nvlists(&zc);
87185029Spjd
88185029Spjd	return (0);
89185029Spjd}
90185029Spjd
91185029Spjdstatic int
92185029Spjdzpool_props_refresh(zpool_handle_t *zhp)
93185029Spjd{
94185029Spjd	nvlist_t *old_props;
95185029Spjd
96185029Spjd	old_props = zhp->zpool_props;
97185029Spjd
98185029Spjd	if (zpool_get_all_props(zhp) != 0)
99185029Spjd		return (-1);
100185029Spjd
101185029Spjd	nvlist_free(old_props);
102185029Spjd	return (0);
103185029Spjd}
104185029Spjd
105185029Spjdstatic char *
106185029Spjdzpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
107185029Spjd    zprop_source_t *src)
108185029Spjd{
109185029Spjd	nvlist_t *nv, *nvl;
110185029Spjd	uint64_t ival;
111185029Spjd	char *value;
112185029Spjd	zprop_source_t source;
113185029Spjd
114185029Spjd	nvl = zhp->zpool_props;
115185029Spjd	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
116185029Spjd		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
117185029Spjd		source = ival;
118185029Spjd		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
119185029Spjd	} else {
120185029Spjd		source = ZPROP_SRC_DEFAULT;
121185029Spjd		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
122185029Spjd			value = "-";
123185029Spjd	}
124185029Spjd
125185029Spjd	if (src)
126185029Spjd		*src = source;
127185029Spjd
128185029Spjd	return (value);
129185029Spjd}
130185029Spjd
131185029Spjduint64_t
132185029Spjdzpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
133185029Spjd{
134185029Spjd	nvlist_t *nv, *nvl;
135185029Spjd	uint64_t value;
136185029Spjd	zprop_source_t source;
137185029Spjd
138185029Spjd	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
139185029Spjd		/*
140185029Spjd		 * zpool_get_all_props() has most likely failed because
141185029Spjd		 * the pool is faulted, but if all we need is the top level
142185029Spjd		 * vdev's guid then get it from the zhp config nvlist.
143185029Spjd		 */
144185029Spjd		if ((prop == ZPOOL_PROP_GUID) &&
145185029Spjd		    (nvlist_lookup_nvlist(zhp->zpool_config,
146185029Spjd		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
147185029Spjd		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
148185029Spjd		    == 0)) {
149185029Spjd			return (value);
150185029Spjd		}
151185029Spjd		return (zpool_prop_default_numeric(prop));
152185029Spjd	}
153185029Spjd
154185029Spjd	nvl = zhp->zpool_props;
155185029Spjd	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
156185029Spjd		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
157185029Spjd		source = value;
158185029Spjd		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
159185029Spjd	} else {
160185029Spjd		source = ZPROP_SRC_DEFAULT;
161185029Spjd		value = zpool_prop_default_numeric(prop);
162185029Spjd	}
163185029Spjd
164185029Spjd	if (src)
165185029Spjd		*src = source;
166185029Spjd
167185029Spjd	return (value);
168185029Spjd}
169185029Spjd
170185029Spjd/*
171185029Spjd * Map VDEV STATE to printed strings.
172185029Spjd */
173185029Spjdchar *
174185029Spjdzpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
175185029Spjd{
176185029Spjd	switch (state) {
177185029Spjd	case VDEV_STATE_CLOSED:
178185029Spjd	case VDEV_STATE_OFFLINE:
179185029Spjd		return (gettext("OFFLINE"));
180185029Spjd	case VDEV_STATE_REMOVED:
181185029Spjd		return (gettext("REMOVED"));
182185029Spjd	case VDEV_STATE_CANT_OPEN:
183185029Spjd		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
184185029Spjd			return (gettext("FAULTED"));
185185029Spjd		else
186185029Spjd			return (gettext("UNAVAIL"));
187185029Spjd	case VDEV_STATE_FAULTED:
188185029Spjd		return (gettext("FAULTED"));
189185029Spjd	case VDEV_STATE_DEGRADED:
190185029Spjd		return (gettext("DEGRADED"));
191185029Spjd	case VDEV_STATE_HEALTHY:
192185029Spjd		return (gettext("ONLINE"));
193185029Spjd	}
194185029Spjd
195185029Spjd	return (gettext("UNKNOWN"));
196185029Spjd}
197185029Spjd
198185029Spjd/*
199185029Spjd * Get a zpool property value for 'prop' and return the value in
200185029Spjd * a pre-allocated buffer.
201185029Spjd */
202185029Spjdint
203185029Spjdzpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
204185029Spjd    zprop_source_t *srctype)
205185029Spjd{
206185029Spjd	uint64_t intval;
207185029Spjd	const char *strval;
208185029Spjd	zprop_source_t src = ZPROP_SRC_NONE;
209185029Spjd	nvlist_t *nvroot;
210185029Spjd	vdev_stat_t *vs;
211185029Spjd	uint_t vsc;
212185029Spjd
213185029Spjd	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
214185029Spjd		if (prop == ZPOOL_PROP_NAME)
215185029Spjd			(void) strlcpy(buf, zpool_get_name(zhp), len);
216185029Spjd		else if (prop == ZPOOL_PROP_HEALTH)
217185029Spjd			(void) strlcpy(buf, "FAULTED", len);
218185029Spjd		else
219185029Spjd			(void) strlcpy(buf, "-", len);
220185029Spjd		return (0);
221185029Spjd	}
222185029Spjd
223185029Spjd	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
224185029Spjd	    prop != ZPOOL_PROP_NAME)
225185029Spjd		return (-1);
226185029Spjd
227185029Spjd	switch (zpool_prop_get_type(prop)) {
228185029Spjd	case PROP_TYPE_STRING:
229185029Spjd		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
230185029Spjd		    len);
231185029Spjd		break;
232185029Spjd
233185029Spjd	case PROP_TYPE_NUMBER:
234185029Spjd		intval = zpool_get_prop_int(zhp, prop, &src);
235185029Spjd
236185029Spjd		switch (prop) {
237185029Spjd		case ZPOOL_PROP_SIZE:
238185029Spjd		case ZPOOL_PROP_USED:
239185029Spjd		case ZPOOL_PROP_AVAILABLE:
240185029Spjd			(void) zfs_nicenum(intval, buf, len);
241185029Spjd			break;
242185029Spjd
243185029Spjd		case ZPOOL_PROP_CAPACITY:
244185029Spjd			(void) snprintf(buf, len, "%llu%%",
245185029Spjd			    (u_longlong_t)intval);
246185029Spjd			break;
247185029Spjd
248185029Spjd		case ZPOOL_PROP_HEALTH:
249185029Spjd			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
250185029Spjd			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
251185029Spjd			verify(nvlist_lookup_uint64_array(nvroot,
252185029Spjd			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
253185029Spjd
254185029Spjd			(void) strlcpy(buf, zpool_state_to_name(intval,
255185029Spjd			    vs->vs_aux), len);
256185029Spjd			break;
257185029Spjd		default:
258185029Spjd			(void) snprintf(buf, len, "%llu", intval);
259185029Spjd		}
260185029Spjd		break;
261185029Spjd
262185029Spjd	case PROP_TYPE_INDEX:
263185029Spjd		intval = zpool_get_prop_int(zhp, prop, &src);
264185029Spjd		if (zpool_prop_index_to_string(prop, intval, &strval)
265185029Spjd		    != 0)
266185029Spjd			return (-1);
267185029Spjd		(void) strlcpy(buf, strval, len);
268185029Spjd		break;
269185029Spjd
270185029Spjd	default:
271185029Spjd		abort();
272185029Spjd	}
273185029Spjd
274185029Spjd	if (srctype)
275185029Spjd		*srctype = src;
276185029Spjd
277185029Spjd	return (0);
278185029Spjd}
279185029Spjd
280185029Spjd/*
281185029Spjd * Check if the bootfs name has the same pool name as it is set to.
282185029Spjd * Assuming bootfs is a valid dataset name.
283185029Spjd */
284185029Spjdstatic boolean_t
285185029Spjdbootfs_name_valid(const char *pool, char *bootfs)
286185029Spjd{
287185029Spjd	int len = strlen(pool);
288185029Spjd
289185029Spjd	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
290185029Spjd		return (B_FALSE);
291185029Spjd
292185029Spjd	if (strncmp(pool, bootfs, len) == 0 &&
293185029Spjd	    (bootfs[len] == '/' || bootfs[len] == '\0'))
294185029Spjd		return (B_TRUE);
295185029Spjd
296185029Spjd	return (B_FALSE);
297185029Spjd}
298185029Spjd
299185029Spjd#if defined(sun)
300185029Spjd/*
301185029Spjd * Inspect the configuration to determine if any of the devices contain
302185029Spjd * an EFI label.
303185029Spjd */
304185029Spjdstatic boolean_t
305185029Spjdpool_uses_efi(nvlist_t *config)
306185029Spjd{
307185029Spjd	nvlist_t **child;
308185029Spjd	uint_t c, children;
309185029Spjd
310185029Spjd	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
311185029Spjd	    &child, &children) != 0)
312185029Spjd		return (read_efi_label(config, NULL) >= 0);
313185029Spjd
314185029Spjd	for (c = 0; c < children; c++) {
315185029Spjd		if (pool_uses_efi(child[c]))
316185029Spjd			return (B_TRUE);
317185029Spjd	}
318185029Spjd	return (B_FALSE);
319185029Spjd}
320185029Spjd#endif
321185029Spjd
322185029Spjd/*
323185029Spjd * Given an nvlist of zpool properties to be set, validate that they are
324185029Spjd * correct, and parse any numeric properties (index, boolean, etc) if they are
325185029Spjd * specified as strings.
326185029Spjd */
327185029Spjdstatic nvlist_t *
328185029Spjdzpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
329185029Spjd    nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
330185029Spjd{
331185029Spjd	nvpair_t *elem;
332185029Spjd	nvlist_t *retprops;
333185029Spjd	zpool_prop_t prop;
334185029Spjd	char *strval;
335185029Spjd	uint64_t intval;
336185029Spjd	char *slash;
337185029Spjd	struct stat64 statbuf;
338185029Spjd	zpool_handle_t *zhp;
339185029Spjd	nvlist_t *nvroot;
340185029Spjd
341185029Spjd	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
342185029Spjd		(void) no_memory(hdl);
343185029Spjd		return (NULL);
344185029Spjd	}
345185029Spjd
346185029Spjd	elem = NULL;
347185029Spjd	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
348185029Spjd		const char *propname = nvpair_name(elem);
349185029Spjd
350185029Spjd		/*
351185029Spjd		 * Make sure this property is valid and applies to this type.
352185029Spjd		 */
353185029Spjd		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
354185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
355185029Spjd			    "invalid property '%s'"), propname);
356185029Spjd			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
357185029Spjd			goto error;
358185029Spjd		}
359185029Spjd
360185029Spjd		if (zpool_prop_readonly(prop)) {
361185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
362185029Spjd			    "is readonly"), propname);
363185029Spjd			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
364185029Spjd			goto error;
365185029Spjd		}
366185029Spjd
367185029Spjd		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
368185029Spjd		    &strval, &intval, errbuf) != 0)
369185029Spjd			goto error;
370185029Spjd
371185029Spjd		/*
372185029Spjd		 * Perform additional checking for specific properties.
373185029Spjd		 */
374185029Spjd		switch (prop) {
375185029Spjd		case ZPOOL_PROP_VERSION:
376185029Spjd			if (intval < version || intval > SPA_VERSION) {
377185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
378185029Spjd				    "property '%s' number %d is invalid."),
379185029Spjd				    propname, intval);
380185029Spjd				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
381185029Spjd				goto error;
382185029Spjd			}
383185029Spjd			break;
384185029Spjd
385185029Spjd		case ZPOOL_PROP_BOOTFS:
386185029Spjd			if (create_or_import) {
387185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
388185029Spjd				    "property '%s' cannot be set at creation "
389185029Spjd				    "or import time"), propname);
390185029Spjd				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
391185029Spjd				goto error;
392185029Spjd			}
393185029Spjd
394185029Spjd			if (version < SPA_VERSION_BOOTFS) {
395185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
396185029Spjd				    "pool must be upgraded to support "
397185029Spjd				    "'%s' property"), propname);
398185029Spjd				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
399185029Spjd				goto error;
400185029Spjd			}
401185029Spjd
402185029Spjd			/*
403185029Spjd			 * bootfs property value has to be a dataset name and
404185029Spjd			 * the dataset has to be in the same pool as it sets to.
405185029Spjd			 */
406185029Spjd			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
407185029Spjd			    strval)) {
408185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
409185029Spjd				    "is an invalid name"), strval);
410185029Spjd				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
411185029Spjd				goto error;
412185029Spjd			}
413185029Spjd
414185029Spjd			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
415185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
416185029Spjd				    "could not open pool '%s'"), poolname);
417185029Spjd				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
418185029Spjd				goto error;
419185029Spjd			}
420185029Spjd			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
421185029Spjd			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
422185029Spjd
423185029Spjd#if defined(sun)
424185029Spjd			/*
425185029Spjd			 * bootfs property cannot be set on a disk which has
426185029Spjd			 * been EFI labeled.
427185029Spjd			 */
428185029Spjd			if (pool_uses_efi(nvroot)) {
429185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430185029Spjd				    "property '%s' not supported on "
431185029Spjd				    "EFI labeled devices"), propname);
432185029Spjd				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
433185029Spjd				zpool_close(zhp);
434185029Spjd				goto error;
435185029Spjd			}
436185029Spjd#endif
437185029Spjd			zpool_close(zhp);
438185029Spjd			break;
439185029Spjd
440185029Spjd		case ZPOOL_PROP_ALTROOT:
441185029Spjd			if (!create_or_import) {
442185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
443185029Spjd				    "property '%s' can only be set during pool "
444185029Spjd				    "creation or import"), propname);
445185029Spjd				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
446185029Spjd				goto error;
447185029Spjd			}
448185029Spjd
449185029Spjd			if (strval[0] != '/') {
450185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
451185029Spjd				    "bad alternate root '%s'"), strval);
452185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
453185029Spjd				goto error;
454185029Spjd			}
455185029Spjd			break;
456185029Spjd
457185029Spjd		case ZPOOL_PROP_CACHEFILE:
458185029Spjd			if (strval[0] == '\0')
459185029Spjd				break;
460185029Spjd
461185029Spjd			if (strcmp(strval, "none") == 0)
462185029Spjd				break;
463185029Spjd
464185029Spjd			if (strval[0] != '/') {
465185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
466185029Spjd				    "property '%s' must be empty, an "
467185029Spjd				    "absolute path, or 'none'"), propname);
468185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
469185029Spjd				goto error;
470185029Spjd			}
471185029Spjd
472185029Spjd			slash = strrchr(strval, '/');
473185029Spjd
474185029Spjd			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
475185029Spjd			    strcmp(slash, "/..") == 0) {
476185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
477185029Spjd				    "'%s' is not a valid file"), strval);
478185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
479185029Spjd				goto error;
480185029Spjd			}
481185029Spjd
482185029Spjd			*slash = '\0';
483185029Spjd
484185029Spjd			if (strval[0] != '\0' &&
485185029Spjd			    (stat64(strval, &statbuf) != 0 ||
486185029Spjd			    !S_ISDIR(statbuf.st_mode))) {
487185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488185029Spjd				    "'%s' is not a valid directory"),
489185029Spjd				    strval);
490185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
491185029Spjd				goto error;
492185029Spjd			}
493185029Spjd
494185029Spjd			*slash = '/';
495185029Spjd			break;
496185029Spjd		}
497185029Spjd	}
498185029Spjd
499185029Spjd	return (retprops);
500185029Spjderror:
501185029Spjd	nvlist_free(retprops);
502185029Spjd	return (NULL);
503185029Spjd}
504185029Spjd
505185029Spjd/*
506185029Spjd * Set zpool property : propname=propval.
507185029Spjd */
508185029Spjdint
509185029Spjdzpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
510185029Spjd{
511185029Spjd	zfs_cmd_t zc = { 0 };
512185029Spjd	int ret = -1;
513185029Spjd	char errbuf[1024];
514185029Spjd	nvlist_t *nvl = NULL;
515185029Spjd	nvlist_t *realprops;
516185029Spjd	uint64_t version;
517185029Spjd
518185029Spjd	(void) snprintf(errbuf, sizeof (errbuf),
519185029Spjd	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
520185029Spjd	    zhp->zpool_name);
521185029Spjd
522185029Spjd	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
523185029Spjd		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
524185029Spjd
525185029Spjd	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
526185029Spjd		return (no_memory(zhp->zpool_hdl));
527185029Spjd
528185029Spjd	if (nvlist_add_string(nvl, propname, propval) != 0) {
529185029Spjd		nvlist_free(nvl);
530185029Spjd		return (no_memory(zhp->zpool_hdl));
531185029Spjd	}
532185029Spjd
533185029Spjd	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
534185029Spjd	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
535185029Spjd	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
536185029Spjd		nvlist_free(nvl);
537185029Spjd		return (-1);
538185029Spjd	}
539185029Spjd
540185029Spjd	nvlist_free(nvl);
541185029Spjd	nvl = realprops;
542185029Spjd
543185029Spjd	/*
544185029Spjd	 * Execute the corresponding ioctl() to set this property.
545185029Spjd	 */
546185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
547185029Spjd
548185029Spjd	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
549185029Spjd		nvlist_free(nvl);
550185029Spjd		return (-1);
551185029Spjd	}
552185029Spjd
553185029Spjd	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
554185029Spjd
555185029Spjd	zcmd_free_nvlists(&zc);
556185029Spjd	nvlist_free(nvl);
557185029Spjd
558185029Spjd	if (ret)
559185029Spjd		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
560185029Spjd	else
561185029Spjd		(void) zpool_props_refresh(zhp);
562185029Spjd
563185029Spjd	return (ret);
564185029Spjd}
565185029Spjd
566185029Spjdint
567185029Spjdzpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
568185029Spjd{
569185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
570185029Spjd	zprop_list_t *entry;
571185029Spjd	char buf[ZFS_MAXPROPLEN];
572185029Spjd
573185029Spjd	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
574185029Spjd		return (-1);
575185029Spjd
576185029Spjd	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
577185029Spjd
578185029Spjd		if (entry->pl_fixed)
579185029Spjd			continue;
580185029Spjd
581185029Spjd		if (entry->pl_prop != ZPROP_INVAL &&
582185029Spjd		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
583185029Spjd		    NULL) == 0) {
584185029Spjd			if (strlen(buf) > entry->pl_width)
585185029Spjd				entry->pl_width = strlen(buf);
586185029Spjd		}
587185029Spjd	}
588185029Spjd
589185029Spjd	return (0);
590185029Spjd}
591185029Spjd
592185029Spjd
593185029Spjd/*
594168404Spjd * Validate the given pool name, optionally putting an extended error message in
595168404Spjd * 'buf'.
596168404Spjd */
597185029Spjdboolean_t
598168404Spjdzpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
599168404Spjd{
600168404Spjd	namecheck_err_t why;
601168404Spjd	char what;
602168404Spjd	int ret;
603168404Spjd
604168404Spjd	ret = pool_namecheck(pool, &why, &what);
605168404Spjd
606168404Spjd	/*
607168404Spjd	 * The rules for reserved pool names were extended at a later point.
608168404Spjd	 * But we need to support users with existing pools that may now be
609168404Spjd	 * invalid.  So we only check for this expanded set of names during a
610168404Spjd	 * create (or import), and only in userland.
611168404Spjd	 */
612168404Spjd	if (ret == 0 && !isopen &&
613168404Spjd	    (strncmp(pool, "mirror", 6) == 0 ||
614168404Spjd	    strncmp(pool, "raidz", 5) == 0 ||
615185029Spjd	    strncmp(pool, "spare", 5) == 0 ||
616185029Spjd	    strcmp(pool, "log") == 0)) {
617185029Spjd		if (hdl != NULL)
618185029Spjd			zfs_error_aux(hdl,
619185029Spjd			    dgettext(TEXT_DOMAIN, "name is reserved"));
620168404Spjd		return (B_FALSE);
621168404Spjd	}
622168404Spjd
623168404Spjd
624168404Spjd	if (ret != 0) {
625168404Spjd		if (hdl != NULL) {
626168404Spjd			switch (why) {
627168404Spjd			case NAME_ERR_TOOLONG:
628168404Spjd				zfs_error_aux(hdl,
629168404Spjd				    dgettext(TEXT_DOMAIN, "name is too long"));
630168404Spjd				break;
631168404Spjd
632168404Spjd			case NAME_ERR_INVALCHAR:
633168404Spjd				zfs_error_aux(hdl,
634168404Spjd				    dgettext(TEXT_DOMAIN, "invalid character "
635168404Spjd				    "'%c' in pool name"), what);
636168404Spjd				break;
637168404Spjd
638168404Spjd			case NAME_ERR_NOLETTER:
639168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
640168404Spjd				    "name must begin with a letter"));
641168404Spjd				break;
642168404Spjd
643168404Spjd			case NAME_ERR_RESERVED:
644168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
645168404Spjd				    "name is reserved"));
646168404Spjd				break;
647168404Spjd
648168404Spjd			case NAME_ERR_DISKLIKE:
649168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
650168404Spjd				    "pool name is reserved"));
651168404Spjd				break;
652168404Spjd
653168404Spjd			case NAME_ERR_LEADING_SLASH:
654168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
655168404Spjd				    "leading slash in name"));
656168404Spjd				break;
657168404Spjd
658168404Spjd			case NAME_ERR_EMPTY_COMPONENT:
659168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
660168404Spjd				    "empty component in name"));
661168404Spjd				break;
662168404Spjd
663168404Spjd			case NAME_ERR_TRAILING_SLASH:
664168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
665168404Spjd				    "trailing slash in name"));
666168404Spjd				break;
667168404Spjd
668168404Spjd			case NAME_ERR_MULTIPLE_AT:
669168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
670168404Spjd				    "multiple '@' delimiters in name"));
671168404Spjd				break;
672168404Spjd
673168404Spjd			}
674168404Spjd		}
675168404Spjd		return (B_FALSE);
676168404Spjd	}
677168404Spjd
678168404Spjd	return (B_TRUE);
679168404Spjd}
680168404Spjd
681168404Spjd/*
682168404Spjd * Open a handle to the given pool, even if the pool is currently in the FAULTED
683168404Spjd * state.
684168404Spjd */
685168404Spjdzpool_handle_t *
686168404Spjdzpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
687168404Spjd{
688168404Spjd	zpool_handle_t *zhp;
689168404Spjd	boolean_t missing;
690168404Spjd
691168404Spjd	/*
692168404Spjd	 * Make sure the pool name is valid.
693168404Spjd	 */
694168404Spjd	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
695168404Spjd		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
696168404Spjd		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
697168404Spjd		    pool);
698168404Spjd		return (NULL);
699168404Spjd	}
700168404Spjd
701168404Spjd	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
702168404Spjd		return (NULL);
703168404Spjd
704168404Spjd	zhp->zpool_hdl = hdl;
705168404Spjd	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
706168404Spjd
707168404Spjd	if (zpool_refresh_stats(zhp, &missing) != 0) {
708168404Spjd		zpool_close(zhp);
709168404Spjd		return (NULL);
710168404Spjd	}
711168404Spjd
712168404Spjd	if (missing) {
713185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
714168404Spjd		(void) zfs_error_fmt(hdl, EZFS_NOENT,
715185029Spjd		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
716168404Spjd		zpool_close(zhp);
717168404Spjd		return (NULL);
718168404Spjd	}
719168404Spjd
720168404Spjd	return (zhp);
721168404Spjd}
722168404Spjd
723168404Spjd/*
724168404Spjd * Like the above, but silent on error.  Used when iterating over pools (because
725168404Spjd * the configuration cache may be out of date).
726168404Spjd */
727168404Spjdint
728168404Spjdzpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
729168404Spjd{
730168404Spjd	zpool_handle_t *zhp;
731168404Spjd	boolean_t missing;
732168404Spjd
733168404Spjd	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
734168404Spjd		return (-1);
735168404Spjd
736168404Spjd	zhp->zpool_hdl = hdl;
737168404Spjd	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
738168404Spjd
739168404Spjd	if (zpool_refresh_stats(zhp, &missing) != 0) {
740168404Spjd		zpool_close(zhp);
741168404Spjd		return (-1);
742168404Spjd	}
743168404Spjd
744168404Spjd	if (missing) {
745168404Spjd		zpool_close(zhp);
746168404Spjd		*ret = NULL;
747168404Spjd		return (0);
748168404Spjd	}
749168404Spjd
750168404Spjd	*ret = zhp;
751168404Spjd	return (0);
752168404Spjd}
753168404Spjd
754168404Spjd/*
755168404Spjd * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
756168404Spjd * state.
757168404Spjd */
758168404Spjdzpool_handle_t *
759168404Spjdzpool_open(libzfs_handle_t *hdl, const char *pool)
760168404Spjd{
761168404Spjd	zpool_handle_t *zhp;
762168404Spjd
763168404Spjd	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
764168404Spjd		return (NULL);
765168404Spjd
766168404Spjd	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
767168404Spjd		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
768168404Spjd		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
769168404Spjd		zpool_close(zhp);
770168404Spjd		return (NULL);
771168404Spjd	}
772168404Spjd
773168404Spjd	return (zhp);
774168404Spjd}
775168404Spjd
776168404Spjd/*
777168404Spjd * Close the handle.  Simply frees the memory associated with the handle.
778168404Spjd */
779168404Spjdvoid
780168404Spjdzpool_close(zpool_handle_t *zhp)
781168404Spjd{
782168404Spjd	if (zhp->zpool_config)
783168404Spjd		nvlist_free(zhp->zpool_config);
784168404Spjd	if (zhp->zpool_old_config)
785168404Spjd		nvlist_free(zhp->zpool_old_config);
786168404Spjd	if (zhp->zpool_props)
787168404Spjd		nvlist_free(zhp->zpool_props);
788168404Spjd	free(zhp);
789168404Spjd}
790168404Spjd
791168404Spjd/*
792168404Spjd * Return the name of the pool.
793168404Spjd */
794168404Spjdconst char *
795168404Spjdzpool_get_name(zpool_handle_t *zhp)
796168404Spjd{
797168404Spjd	return (zhp->zpool_name);
798168404Spjd}
799168404Spjd
800168404Spjd
801168404Spjd/*
802168404Spjd * Return the state of the pool (ACTIVE or UNAVAILABLE)
803168404Spjd */
804168404Spjdint
805168404Spjdzpool_get_state(zpool_handle_t *zhp)
806168404Spjd{
807168404Spjd	return (zhp->zpool_state);
808168404Spjd}
809168404Spjd
810168404Spjd/*
811168404Spjd * Create the named pool, using the provided vdev list.  It is assumed
812168404Spjd * that the consumer has already validated the contents of the nvlist, so we
813168404Spjd * don't have to worry about error semantics.
814168404Spjd */
815168404Spjdint
816168404Spjdzpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
817185029Spjd    nvlist_t *props, nvlist_t *fsprops)
818168404Spjd{
819168404Spjd	zfs_cmd_t zc = { 0 };
820185029Spjd	nvlist_t *zc_fsprops = NULL;
821185029Spjd	nvlist_t *zc_props = NULL;
822168404Spjd	char msg[1024];
823185029Spjd	char *altroot;
824185029Spjd	int ret = -1;
825168404Spjd
826168404Spjd	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
827168404Spjd	    "cannot create '%s'"), pool);
828168404Spjd
829168404Spjd	if (!zpool_name_valid(hdl, B_FALSE, pool))
830168404Spjd		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
831168404Spjd
832185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
833168404Spjd		return (-1);
834168404Spjd
835185029Spjd	if (props) {
836185029Spjd		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
837185029Spjd		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
838185029Spjd			goto create_failed;
839185029Spjd		}
840185029Spjd	}
841185029Spjd
842185029Spjd	if (fsprops) {
843185029Spjd		uint64_t zoned;
844185029Spjd		char *zonestr;
845185029Spjd
846185029Spjd		zoned = ((nvlist_lookup_string(fsprops,
847185029Spjd		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
848185029Spjd		    strcmp(zonestr, "on") == 0);
849185029Spjd
850185029Spjd		if ((zc_fsprops = zfs_valid_proplist(hdl,
851185029Spjd		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
852185029Spjd			goto create_failed;
853185029Spjd		}
854185029Spjd		if (!zc_props &&
855185029Spjd		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
856185029Spjd			goto create_failed;
857185029Spjd		}
858185029Spjd		if (nvlist_add_nvlist(zc_props,
859185029Spjd		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
860185029Spjd			goto create_failed;
861185029Spjd		}
862185029Spjd	}
863185029Spjd
864185029Spjd	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
865185029Spjd		goto create_failed;
866185029Spjd
867168404Spjd	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
868168404Spjd
869185029Spjd	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
870168404Spjd
871168404Spjd		zcmd_free_nvlists(&zc);
872185029Spjd		nvlist_free(zc_props);
873185029Spjd		nvlist_free(zc_fsprops);
874168404Spjd
875168404Spjd		switch (errno) {
876168404Spjd		case EBUSY:
877168404Spjd			/*
878168404Spjd			 * This can happen if the user has specified the same
879168404Spjd			 * device multiple times.  We can't reliably detect this
880168404Spjd			 * until we try to add it and see we already have a
881168404Spjd			 * label.
882168404Spjd			 */
883168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
884168404Spjd			    "one or more vdevs refer to the same device"));
885168404Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
886168404Spjd
887168404Spjd		case EOVERFLOW:
888168404Spjd			/*
889168404Spjd			 * This occurs when one of the devices is below
890168404Spjd			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
891168404Spjd			 * device was the problem device since there's no
892168404Spjd			 * reliable way to determine device size from userland.
893168404Spjd			 */
894168404Spjd			{
895168404Spjd				char buf[64];
896168404Spjd
897168404Spjd				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
898168404Spjd
899168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
900168404Spjd				    "one or more devices is less than the "
901168404Spjd				    "minimum size (%s)"), buf);
902168404Spjd			}
903168404Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
904168404Spjd
905168404Spjd		case ENOSPC:
906168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
907168404Spjd			    "one or more devices is out of space"));
908168404Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
909168404Spjd
910185029Spjd		case ENOTBLK:
911185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
912185029Spjd			    "cache device must be a disk or disk slice"));
913185029Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
914185029Spjd
915168404Spjd		default:
916168404Spjd			return (zpool_standard_error(hdl, errno, msg));
917168404Spjd		}
918168404Spjd	}
919168404Spjd
920168404Spjd	/*
921168404Spjd	 * If this is an alternate root pool, then we automatically set the
922168404Spjd	 * mountpoint of the root dataset to be '/'.
923168404Spjd	 */
924185029Spjd	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
925185029Spjd	    &altroot) == 0) {
926168404Spjd		zfs_handle_t *zhp;
927168404Spjd
928185029Spjd		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
929168404Spjd		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
930168404Spjd		    "/") == 0);
931168404Spjd
932168404Spjd		zfs_close(zhp);
933168404Spjd	}
934168404Spjd
935185029Spjdcreate_failed:
936185029Spjd	zcmd_free_nvlists(&zc);
937185029Spjd	nvlist_free(zc_props);
938185029Spjd	nvlist_free(zc_fsprops);
939185029Spjd	return (ret);
940168404Spjd}
941168404Spjd
942168404Spjd/*
943168404Spjd * Destroy the given pool.  It is up to the caller to ensure that there are no
944168404Spjd * datasets left in the pool.
945168404Spjd */
946168404Spjdint
947168404Spjdzpool_destroy(zpool_handle_t *zhp)
948168404Spjd{
949168404Spjd	zfs_cmd_t zc = { 0 };
950168404Spjd	zfs_handle_t *zfp = NULL;
951168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
952168404Spjd	char msg[1024];
953168404Spjd
954168404Spjd	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
955168404Spjd	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
956168404Spjd	    ZFS_TYPE_FILESYSTEM)) == NULL)
957168404Spjd		return (-1);
958168404Spjd
959168404Spjd	if (zpool_remove_zvol_links(zhp) != 0)
960168404Spjd		return (-1);
961168404Spjd
962168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
963168404Spjd
964185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
965168404Spjd		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
966168404Spjd		    "cannot destroy '%s'"), zhp->zpool_name);
967168404Spjd
968168404Spjd		if (errno == EROFS) {
969168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970168404Spjd			    "one or more devices is read only"));
971168404Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
972168404Spjd		} else {
973168404Spjd			(void) zpool_standard_error(hdl, errno, msg);
974168404Spjd		}
975168404Spjd
976168404Spjd		if (zfp)
977168404Spjd			zfs_close(zfp);
978168404Spjd		return (-1);
979168404Spjd	}
980168404Spjd
981168404Spjd	if (zfp) {
982168404Spjd		remove_mountpoint(zfp);
983168404Spjd		zfs_close(zfp);
984168404Spjd	}
985168404Spjd
986168404Spjd	return (0);
987168404Spjd}
988168404Spjd
989168404Spjd/*
990168404Spjd * Add the given vdevs to the pool.  The caller must have already performed the
991168404Spjd * necessary verification to ensure that the vdev specification is well-formed.
992168404Spjd */
993168404Spjdint
994168404Spjdzpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
995168404Spjd{
996168404Spjd	zfs_cmd_t zc = { 0 };
997168404Spjd	int ret;
998168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
999168404Spjd	char msg[1024];
1000185029Spjd	nvlist_t **spares, **l2cache;
1001185029Spjd	uint_t nspares, nl2cache;
1002168404Spjd
1003168404Spjd	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1004168404Spjd	    "cannot add to '%s'"), zhp->zpool_name);
1005168404Spjd
1006185029Spjd	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1007185029Spjd	    SPA_VERSION_SPARES &&
1008168404Spjd	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1009168404Spjd	    &spares, &nspares) == 0) {
1010168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1011168404Spjd		    "upgraded to add hot spares"));
1012168404Spjd		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1013168404Spjd	}
1014168404Spjd
1015185029Spjd	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1016185029Spjd	    SPA_VERSION_L2CACHE &&
1017185029Spjd	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1018185029Spjd	    &l2cache, &nl2cache) == 0) {
1019185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1020185029Spjd		    "upgraded to add cache devices"));
1021185029Spjd		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1022185029Spjd	}
1023185029Spjd
1024185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1025168404Spjd		return (-1);
1026168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1027168404Spjd
1028185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1029168404Spjd		switch (errno) {
1030168404Spjd		case EBUSY:
1031168404Spjd			/*
1032168404Spjd			 * This can happen if the user has specified the same
1033168404Spjd			 * device multiple times.  We can't reliably detect this
1034168404Spjd			 * until we try to add it and see we already have a
1035168404Spjd			 * label.
1036168404Spjd			 */
1037168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1038168404Spjd			    "one or more vdevs refer to the same device"));
1039168404Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1040168404Spjd			break;
1041168404Spjd
1042168404Spjd		case EOVERFLOW:
1043168404Spjd			/*
1044168404Spjd			 * This occurrs when one of the devices is below
1045168404Spjd			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1046168404Spjd			 * device was the problem device since there's no
1047168404Spjd			 * reliable way to determine device size from userland.
1048168404Spjd			 */
1049168404Spjd			{
1050168404Spjd				char buf[64];
1051168404Spjd
1052168404Spjd				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1053168404Spjd
1054168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1055168404Spjd				    "device is less than the minimum "
1056168404Spjd				    "size (%s)"), buf);
1057168404Spjd			}
1058168404Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1059168404Spjd			break;
1060168404Spjd
1061168404Spjd		case ENOTSUP:
1062168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1063185029Spjd			    "pool must be upgraded to add these vdevs"));
1064168404Spjd			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1065168404Spjd			break;
1066168404Spjd
1067168404Spjd		case EDOM:
1068168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1069185029Spjd			    "root pool can not have multiple vdevs"
1070185029Spjd			    " or separate logs"));
1071168404Spjd			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1072168404Spjd			break;
1073168404Spjd
1074185029Spjd		case ENOTBLK:
1075185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1076185029Spjd			    "cache device must be a disk or disk slice"));
1077185029Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1078185029Spjd			break;
1079185029Spjd
1080168404Spjd		default:
1081168404Spjd			(void) zpool_standard_error(hdl, errno, msg);
1082168404Spjd		}
1083168404Spjd
1084168404Spjd		ret = -1;
1085168404Spjd	} else {
1086168404Spjd		ret = 0;
1087168404Spjd	}
1088168404Spjd
1089168404Spjd	zcmd_free_nvlists(&zc);
1090168404Spjd
1091168404Spjd	return (ret);
1092168404Spjd}
1093168404Spjd
1094168404Spjd/*
1095168404Spjd * Exports the pool from the system.  The caller must ensure that there are no
1096168404Spjd * mounted datasets in the pool.
1097168404Spjd */
1098168404Spjdint
1099185029Spjdzpool_export(zpool_handle_t *zhp, boolean_t force)
1100168404Spjd{
1101168404Spjd	zfs_cmd_t zc = { 0 };
1102185029Spjd	char msg[1024];
1103168404Spjd
1104168404Spjd	if (zpool_remove_zvol_links(zhp) != 0)
1105168404Spjd		return (-1);
1106168404Spjd
1107185029Spjd	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1108185029Spjd	    "cannot export '%s'"), zhp->zpool_name);
1109185029Spjd
1110168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1111185029Spjd	zc.zc_cookie = force;
1112168404Spjd
1113185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1114185029Spjd		switch (errno) {
1115185029Spjd		case EXDEV:
1116185029Spjd			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1117185029Spjd			    "use '-f' to override the following errors:\n"
1118185029Spjd			    "'%s' has an active shared spare which could be"
1119185029Spjd			    " used by other pools once '%s' is exported."),
1120185029Spjd			    zhp->zpool_name, zhp->zpool_name);
1121185029Spjd			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1122185029Spjd			    msg));
1123185029Spjd		default:
1124185029Spjd			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1125185029Spjd			    msg));
1126185029Spjd		}
1127185029Spjd	}
1128185029Spjd
1129168404Spjd	return (0);
1130168404Spjd}
1131168404Spjd
1132168404Spjd/*
1133185029Spjd * zpool_import() is a contracted interface. Should be kept the same
1134185029Spjd * if possible.
1135185029Spjd *
1136185029Spjd * Applications should use zpool_import_props() to import a pool with
1137185029Spjd * new properties value to be set.
1138168404Spjd */
1139168404Spjdint
1140168404Spjdzpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1141185029Spjd    char *altroot)
1142168404Spjd{
1143185029Spjd	nvlist_t *props = NULL;
1144185029Spjd	int ret;
1145185029Spjd
1146185029Spjd	if (altroot != NULL) {
1147185029Spjd		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1148185029Spjd			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1149185029Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1150185029Spjd			    newname));
1151185029Spjd		}
1152185029Spjd
1153185029Spjd		if (nvlist_add_string(props,
1154185029Spjd		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
1155185029Spjd			nvlist_free(props);
1156185029Spjd			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1157185029Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1158185029Spjd			    newname));
1159185029Spjd		}
1160185029Spjd	}
1161185029Spjd
1162185029Spjd	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1163185029Spjd	if (props)
1164185029Spjd		nvlist_free(props);
1165185029Spjd	return (ret);
1166185029Spjd}
1167185029Spjd
1168185029Spjd/*
1169185029Spjd * Import the given pool using the known configuration and a list of
1170185029Spjd * properties to be set. The configuration should have come from
1171185029Spjd * zpool_find_import(). The 'newname' parameters control whether the pool
1172185029Spjd * is imported with a different name.
1173185029Spjd */
1174185029Spjdint
1175185029Spjdzpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1176185029Spjd    nvlist_t *props, boolean_t importfaulted)
1177185029Spjd{
1178168404Spjd	zfs_cmd_t zc = { 0 };
1179168404Spjd	char *thename;
1180168404Spjd	char *origname;
1181168404Spjd	int ret;
1182185029Spjd	char errbuf[1024];
1183168404Spjd
1184168404Spjd	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1185168404Spjd	    &origname) == 0);
1186168404Spjd
1187185029Spjd	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1188185029Spjd	    "cannot import pool '%s'"), origname);
1189185029Spjd
1190168404Spjd	if (newname != NULL) {
1191168404Spjd		if (!zpool_name_valid(hdl, B_FALSE, newname))
1192168404Spjd			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1193168404Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1194168404Spjd			    newname));
1195168404Spjd		thename = (char *)newname;
1196168404Spjd	} else {
1197168404Spjd		thename = origname;
1198168404Spjd	}
1199168404Spjd
1200185029Spjd	if (props) {
1201185029Spjd		uint64_t version;
1202168404Spjd
1203185029Spjd		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1204185029Spjd		    &version) == 0);
1205185029Spjd
1206185029Spjd		if ((props = zpool_valid_proplist(hdl, origname,
1207185029Spjd		    props, version, B_TRUE, errbuf)) == NULL) {
1208185029Spjd			return (-1);
1209185029Spjd		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1210185029Spjd			nvlist_free(props);
1211185029Spjd			return (-1);
1212185029Spjd		}
1213185029Spjd	}
1214185029Spjd
1215168404Spjd	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1216168404Spjd
1217168404Spjd	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1218168404Spjd	    &zc.zc_guid) == 0);
1219168404Spjd
1220185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1221185029Spjd		nvlist_free(props);
1222168404Spjd		return (-1);
1223185029Spjd	}
1224168404Spjd
1225185029Spjd	zc.zc_cookie = (uint64_t)importfaulted;
1226168404Spjd	ret = 0;
1227185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1228168404Spjd		char desc[1024];
1229168404Spjd		if (newname == NULL)
1230168404Spjd			(void) snprintf(desc, sizeof (desc),
1231168404Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1232168404Spjd			    thename);
1233168404Spjd		else
1234168404Spjd			(void) snprintf(desc, sizeof (desc),
1235168404Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1236168404Spjd			    origname, thename);
1237168404Spjd
1238168404Spjd		switch (errno) {
1239168404Spjd		case ENOTSUP:
1240168404Spjd			/*
1241168404Spjd			 * Unsupported version.
1242168404Spjd			 */
1243168404Spjd			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1244168404Spjd			break;
1245168404Spjd
1246168404Spjd		case EINVAL:
1247168404Spjd			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1248168404Spjd			break;
1249168404Spjd
1250168404Spjd		default:
1251168404Spjd			(void) zpool_standard_error(hdl, errno, desc);
1252168404Spjd		}
1253168404Spjd
1254168404Spjd		ret = -1;
1255168404Spjd	} else {
1256168404Spjd		zpool_handle_t *zhp;
1257185029Spjd
1258168404Spjd		/*
1259168404Spjd		 * This should never fail, but play it safe anyway.
1260168404Spjd		 */
1261168404Spjd		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1262168404Spjd			ret = -1;
1263168404Spjd		} else if (zhp != NULL) {
1264168404Spjd			ret = zpool_create_zvol_links(zhp);
1265168404Spjd			zpool_close(zhp);
1266168404Spjd		}
1267185029Spjd
1268168404Spjd	}
1269168404Spjd
1270168404Spjd	zcmd_free_nvlists(&zc);
1271185029Spjd	nvlist_free(props);
1272185029Spjd
1273168404Spjd	return (ret);
1274168404Spjd}
1275168404Spjd
1276168404Spjd/*
1277168404Spjd * Scrub the pool.
1278168404Spjd */
1279168404Spjdint
1280168404Spjdzpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1281168404Spjd{
1282168404Spjd	zfs_cmd_t zc = { 0 };
1283168404Spjd	char msg[1024];
1284168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1285168404Spjd
1286168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1287168404Spjd	zc.zc_cookie = type;
1288168404Spjd
1289185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1290168404Spjd		return (0);
1291168404Spjd
1292168404Spjd	(void) snprintf(msg, sizeof (msg),
1293168404Spjd	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1294168404Spjd
1295168404Spjd	if (errno == EBUSY)
1296168404Spjd		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1297168404Spjd	else
1298168404Spjd		return (zpool_standard_error(hdl, errno, msg));
1299168404Spjd}
1300168404Spjd
1301168404Spjd/*
1302168404Spjd * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1303168404Spjd * spare; but FALSE if its an INUSE spare.
1304168404Spjd */
1305168404Spjdstatic nvlist_t *
1306168404Spjdvdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1307185029Spjd    boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1308168404Spjd{
1309168404Spjd	uint_t c, children;
1310168404Spjd	nvlist_t **child;
1311168404Spjd	uint64_t theguid, present;
1312168404Spjd	char *path;
1313168404Spjd	uint64_t wholedisk = 0;
1314168404Spjd	nvlist_t *ret;
1315185029Spjd	uint64_t is_log;
1316168404Spjd
1317168404Spjd	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1318168404Spjd
1319168404Spjd	if (search == NULL &&
1320168404Spjd	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1321168404Spjd		/*
1322168404Spjd		 * If the device has never been present since import, the only
1323168404Spjd		 * reliable way to match the vdev is by GUID.
1324168404Spjd		 */
1325168404Spjd		if (theguid == guid)
1326168404Spjd			return (nv);
1327168404Spjd	} else if (search != NULL &&
1328168404Spjd	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1329168404Spjd		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1330168404Spjd		    &wholedisk);
1331168404Spjd		if (wholedisk) {
1332168404Spjd			/*
1333168404Spjd			 * For whole disks, the internal path has 's0', but the
1334168404Spjd			 * path passed in by the user doesn't.
1335168404Spjd			 */
1336168404Spjd			if (strlen(search) == strlen(path) - 2 &&
1337168404Spjd			    strncmp(search, path, strlen(search)) == 0)
1338168404Spjd				return (nv);
1339168404Spjd		} else if (strcmp(search, path) == 0) {
1340168404Spjd			return (nv);
1341168404Spjd		}
1342168404Spjd	}
1343168404Spjd
1344168404Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1345168404Spjd	    &child, &children) != 0)
1346168404Spjd		return (NULL);
1347168404Spjd
1348185029Spjd	for (c = 0; c < children; c++) {
1349168404Spjd		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1350185029Spjd		    avail_spare, l2cache, NULL)) != NULL) {
1351185029Spjd			/*
1352185029Spjd			 * The 'is_log' value is only set for the toplevel
1353185029Spjd			 * vdev, not the leaf vdevs.  So we always lookup the
1354185029Spjd			 * log device from the root of the vdev tree (where
1355185029Spjd			 * 'log' is non-NULL).
1356185029Spjd			 */
1357185029Spjd			if (log != NULL &&
1358185029Spjd			    nvlist_lookup_uint64(child[c],
1359185029Spjd			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1360185029Spjd			    is_log) {
1361185029Spjd				*log = B_TRUE;
1362185029Spjd			}
1363168404Spjd			return (ret);
1364185029Spjd		}
1365185029Spjd	}
1366168404Spjd
1367168404Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1368168404Spjd	    &child, &children) == 0) {
1369168404Spjd		for (c = 0; c < children; c++) {
1370168404Spjd			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1371185029Spjd			    avail_spare, l2cache, NULL)) != NULL) {
1372168404Spjd				*avail_spare = B_TRUE;
1373168404Spjd				return (ret);
1374168404Spjd			}
1375168404Spjd		}
1376168404Spjd	}
1377168404Spjd
1378185029Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1379185029Spjd	    &child, &children) == 0) {
1380185029Spjd		for (c = 0; c < children; c++) {
1381185029Spjd			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1382185029Spjd			    avail_spare, l2cache, NULL)) != NULL) {
1383185029Spjd				*l2cache = B_TRUE;
1384185029Spjd				return (ret);
1385185029Spjd			}
1386185029Spjd		}
1387185029Spjd	}
1388185029Spjd
1389168404Spjd	return (NULL);
1390168404Spjd}
1391168404Spjd
1392168404Spjdnvlist_t *
1393185029Spjdzpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1394185029Spjd    boolean_t *l2cache, boolean_t *log)
1395168404Spjd{
1396168404Spjd	char buf[MAXPATHLEN];
1397168404Spjd	const char *search;
1398168404Spjd	char *end;
1399168404Spjd	nvlist_t *nvroot;
1400168404Spjd	uint64_t guid;
1401168404Spjd
1402168404Spjd	guid = strtoull(path, &end, 10);
1403168404Spjd	if (guid != 0 && *end == '\0') {
1404168404Spjd		search = NULL;
1405168404Spjd	} else if (path[0] != '/') {
1406168404Spjd		(void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
1407168404Spjd		search = buf;
1408168404Spjd	} else {
1409168404Spjd		search = path;
1410168404Spjd	}
1411168404Spjd
1412168404Spjd	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1413168404Spjd	    &nvroot) == 0);
1414168404Spjd
1415168404Spjd	*avail_spare = B_FALSE;
1416185029Spjd	*l2cache = B_FALSE;
1417185029Spjd	if (log != NULL)
1418185029Spjd		*log = B_FALSE;
1419185029Spjd	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1420185029Spjd	    l2cache, log));
1421168404Spjd}
1422168404Spjd
1423185029Spjdstatic int
1424185029Spjdvdev_online(nvlist_t *nv)
1425185029Spjd{
1426185029Spjd	uint64_t ival;
1427185029Spjd
1428185029Spjd	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1429185029Spjd	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1430185029Spjd	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1431185029Spjd		return (0);
1432185029Spjd
1433185029Spjd	return (1);
1434185029Spjd}
1435185029Spjd
1436168404Spjd/*
1437185029Spjd * Get phys_path for a root pool
1438185029Spjd * Return 0 on success; non-zeron on failure.
1439168404Spjd */
1440185029Spjdint
1441185029Spjdzpool_get_physpath(zpool_handle_t *zhp, char *physpath)
1442185029Spjd{
1443185029Spjd	char bootfs[ZPOOL_MAXNAMELEN];
1444185029Spjd	nvlist_t *vdev_root;
1445185029Spjd	nvlist_t **child;
1446185029Spjd	uint_t count;
1447185029Spjd	int i;
1448185029Spjd
1449185029Spjd	/*
1450185029Spjd	 * Make sure this is a root pool, as phys_path doesn't mean
1451185029Spjd	 * anything to a non-root pool.
1452185029Spjd	 */
1453185029Spjd	if (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
1454185029Spjd	    sizeof (bootfs), NULL) != 0)
1455185029Spjd		return (-1);
1456185029Spjd
1457185029Spjd	verify(nvlist_lookup_nvlist(zhp->zpool_config,
1458185029Spjd	    ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
1459185029Spjd
1460185029Spjd	if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1461185029Spjd	    &child, &count) != 0)
1462185029Spjd		return (-2);
1463185029Spjd
1464185029Spjd	for (i = 0; i < count; i++) {
1465185029Spjd		nvlist_t **child2;
1466185029Spjd		uint_t count2;
1467185029Spjd		char *type;
1468185029Spjd		char *tmppath;
1469185029Spjd		int j;
1470185029Spjd
1471185029Spjd		if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
1472185029Spjd		    != 0)
1473185029Spjd			return (-3);
1474185029Spjd
1475185029Spjd		if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1476185029Spjd			if (!vdev_online(child[i]))
1477185029Spjd				return (-8);
1478185029Spjd			verify(nvlist_lookup_string(child[i],
1479185029Spjd			    ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
1480185029Spjd			(void) strncpy(physpath, tmppath, strlen(tmppath));
1481185029Spjd		} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
1482185029Spjd			if (nvlist_lookup_nvlist_array(child[i],
1483185029Spjd			    ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
1484185029Spjd				return (-4);
1485185029Spjd
1486185029Spjd			for (j = 0; j < count2; j++) {
1487185029Spjd				if (!vdev_online(child2[j]))
1488185029Spjd					return (-8);
1489185029Spjd				if (nvlist_lookup_string(child2[j],
1490185029Spjd				    ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
1491185029Spjd					return (-5);
1492185029Spjd
1493185029Spjd				if ((strlen(physpath) + strlen(tmppath)) >
1494185029Spjd				    MAXNAMELEN)
1495185029Spjd					return (-6);
1496185029Spjd
1497185029Spjd				if (strlen(physpath) == 0) {
1498185029Spjd					(void) strncpy(physpath, tmppath,
1499185029Spjd					    strlen(tmppath));
1500185029Spjd				} else {
1501185029Spjd					(void) strcat(physpath, " ");
1502185029Spjd					(void) strcat(physpath, tmppath);
1503185029Spjd				}
1504185029Spjd			}
1505185029Spjd		} else {
1506185029Spjd			return (-7);
1507185029Spjd		}
1508185029Spjd	}
1509185029Spjd
1510185029Spjd	return (0);
1511185029Spjd}
1512185029Spjd
1513185029Spjd/*
1514185029Spjd * Returns TRUE if the given guid corresponds to the given type.
1515185029Spjd * This is used to check for hot spares (INUSE or not), and level 2 cache
1516185029Spjd * devices.
1517185029Spjd */
1518168404Spjdstatic boolean_t
1519185029Spjdis_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1520168404Spjd{
1521185029Spjd	uint64_t target_guid;
1522168404Spjd	nvlist_t *nvroot;
1523185029Spjd	nvlist_t **list;
1524185029Spjd	uint_t count;
1525168404Spjd	int i;
1526168404Spjd
1527168404Spjd	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1528168404Spjd	    &nvroot) == 0);
1529185029Spjd	if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1530185029Spjd		for (i = 0; i < count; i++) {
1531185029Spjd			verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1532185029Spjd			    &target_guid) == 0);
1533185029Spjd			if (guid == target_guid)
1534168404Spjd				return (B_TRUE);
1535168404Spjd		}
1536168404Spjd	}
1537168404Spjd
1538168404Spjd	return (B_FALSE);
1539168404Spjd}
1540168404Spjd
1541168404Spjd/*
1542185029Spjd * Bring the specified vdev online.   The 'flags' parameter is a set of the
1543185029Spjd * ZFS_ONLINE_* flags.
1544168404Spjd */
1545168404Spjdint
1546185029Spjdzpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1547185029Spjd    vdev_state_t *newstate)
1548168404Spjd{
1549168404Spjd	zfs_cmd_t zc = { 0 };
1550168404Spjd	char msg[1024];
1551168404Spjd	nvlist_t *tgt;
1552185029Spjd	boolean_t avail_spare, l2cache;
1553168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1554168404Spjd
1555168404Spjd	(void) snprintf(msg, sizeof (msg),
1556168404Spjd	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1557168404Spjd
1558168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1559185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1560185029Spjd	    NULL)) == NULL)
1561168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1562168404Spjd
1563168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1564168404Spjd
1565185029Spjd	if (avail_spare ||
1566185029Spjd	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1567168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1568168404Spjd
1569185029Spjd	zc.zc_cookie = VDEV_STATE_ONLINE;
1570185029Spjd	zc.zc_obj = flags;
1571168404Spjd
1572185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1573185029Spjd		return (zpool_standard_error(hdl, errno, msg));
1574185029Spjd
1575185029Spjd	*newstate = zc.zc_cookie;
1576185029Spjd	return (0);
1577168404Spjd}
1578168404Spjd
1579168404Spjd/*
1580168404Spjd * Take the specified vdev offline
1581168404Spjd */
1582168404Spjdint
1583185029Spjdzpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1584168404Spjd{
1585168404Spjd	zfs_cmd_t zc = { 0 };
1586168404Spjd	char msg[1024];
1587168404Spjd	nvlist_t *tgt;
1588185029Spjd	boolean_t avail_spare, l2cache;
1589168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1590168404Spjd
1591168404Spjd	(void) snprintf(msg, sizeof (msg),
1592168404Spjd	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1593168404Spjd
1594168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1595185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1596185029Spjd	    NULL)) == NULL)
1597168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1598168404Spjd
1599168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1600168404Spjd
1601185029Spjd	if (avail_spare ||
1602185029Spjd	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1603168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1604168404Spjd
1605185029Spjd	zc.zc_cookie = VDEV_STATE_OFFLINE;
1606185029Spjd	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1607168404Spjd
1608185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1609168404Spjd		return (0);
1610168404Spjd
1611168404Spjd	switch (errno) {
1612168404Spjd	case EBUSY:
1613168404Spjd
1614168404Spjd		/*
1615168404Spjd		 * There are no other replicas of this device.
1616168404Spjd		 */
1617168404Spjd		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1618168404Spjd
1619168404Spjd	default:
1620168404Spjd		return (zpool_standard_error(hdl, errno, msg));
1621168404Spjd	}
1622168404Spjd}
1623168404Spjd
1624168404Spjd/*
1625185029Spjd * Mark the given vdev faulted.
1626185029Spjd */
1627185029Spjdint
1628185029Spjdzpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1629185029Spjd{
1630185029Spjd	zfs_cmd_t zc = { 0 };
1631185029Spjd	char msg[1024];
1632185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1633185029Spjd
1634185029Spjd	(void) snprintf(msg, sizeof (msg),
1635185029Spjd	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1636185029Spjd
1637185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1638185029Spjd	zc.zc_guid = guid;
1639185029Spjd	zc.zc_cookie = VDEV_STATE_FAULTED;
1640185029Spjd
1641185029Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1642185029Spjd		return (0);
1643185029Spjd
1644185029Spjd	switch (errno) {
1645185029Spjd	case EBUSY:
1646185029Spjd
1647185029Spjd		/*
1648185029Spjd		 * There are no other replicas of this device.
1649185029Spjd		 */
1650185029Spjd		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1651185029Spjd
1652185029Spjd	default:
1653185029Spjd		return (zpool_standard_error(hdl, errno, msg));
1654185029Spjd	}
1655185029Spjd
1656185029Spjd}
1657185029Spjd
1658185029Spjd/*
1659185029Spjd * Mark the given vdev degraded.
1660185029Spjd */
1661185029Spjdint
1662185029Spjdzpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1663185029Spjd{
1664185029Spjd	zfs_cmd_t zc = { 0 };
1665185029Spjd	char msg[1024];
1666185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1667185029Spjd
1668185029Spjd	(void) snprintf(msg, sizeof (msg),
1669185029Spjd	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1670185029Spjd
1671185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1672185029Spjd	zc.zc_guid = guid;
1673185029Spjd	zc.zc_cookie = VDEV_STATE_DEGRADED;
1674185029Spjd
1675185029Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1676185029Spjd		return (0);
1677185029Spjd
1678185029Spjd	return (zpool_standard_error(hdl, errno, msg));
1679185029Spjd}
1680185029Spjd
1681185029Spjd/*
1682168404Spjd * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1683168404Spjd * a hot spare.
1684168404Spjd */
1685168404Spjdstatic boolean_t
1686168404Spjdis_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1687168404Spjd{
1688168404Spjd	nvlist_t **child;
1689168404Spjd	uint_t c, children;
1690168404Spjd	char *type;
1691168404Spjd
1692168404Spjd	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1693168404Spjd	    &children) == 0) {
1694168404Spjd		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1695168404Spjd		    &type) == 0);
1696168404Spjd
1697168404Spjd		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1698168404Spjd		    children == 2 && child[which] == tgt)
1699168404Spjd			return (B_TRUE);
1700168404Spjd
1701168404Spjd		for (c = 0; c < children; c++)
1702168404Spjd			if (is_replacing_spare(child[c], tgt, which))
1703168404Spjd				return (B_TRUE);
1704168404Spjd	}
1705168404Spjd
1706168404Spjd	return (B_FALSE);
1707168404Spjd}
1708168404Spjd
1709168404Spjd/*
1710168404Spjd * Attach new_disk (fully described by nvroot) to old_disk.
1711185029Spjd * If 'replacing' is specified, the new disk will replace the old one.
1712168404Spjd */
1713168404Spjdint
1714168404Spjdzpool_vdev_attach(zpool_handle_t *zhp,
1715168404Spjd    const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1716168404Spjd{
1717168404Spjd	zfs_cmd_t zc = { 0 };
1718168404Spjd	char msg[1024];
1719168404Spjd	int ret;
1720168404Spjd	nvlist_t *tgt;
1721185029Spjd	boolean_t avail_spare, l2cache, islog;
1722168404Spjd	uint64_t val;
1723185029Spjd	char *path, *newname;
1724168404Spjd	nvlist_t **child;
1725168404Spjd	uint_t children;
1726168404Spjd	nvlist_t *config_root;
1727168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1728168404Spjd
1729168404Spjd	if (replacing)
1730168404Spjd		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1731168404Spjd		    "cannot replace %s with %s"), old_disk, new_disk);
1732168404Spjd	else
1733168404Spjd		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1734168404Spjd		    "cannot attach %s to %s"), new_disk, old_disk);
1735168404Spjd
1736168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1737185029Spjd	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
1738185029Spjd	    &islog)) == 0)
1739168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1740168404Spjd
1741168404Spjd	if (avail_spare)
1742168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1743168404Spjd
1744185029Spjd	if (l2cache)
1745185029Spjd		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1746185029Spjd
1747168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1748168404Spjd	zc.zc_cookie = replacing;
1749168404Spjd
1750168404Spjd	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1751168404Spjd	    &child, &children) != 0 || children != 1) {
1752168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1753168404Spjd		    "new device must be a single disk"));
1754168404Spjd		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1755168404Spjd	}
1756168404Spjd
1757168404Spjd	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1758168404Spjd	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1759168404Spjd
1760185029Spjd	if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1761185029Spjd		return (-1);
1762185029Spjd
1763168404Spjd	/*
1764168404Spjd	 * If the target is a hot spare that has been swapped in, we can only
1765168404Spjd	 * replace it with another hot spare.
1766168404Spjd	 */
1767168404Spjd	if (replacing &&
1768168404Spjd	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1769185029Spjd	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
1770185029Spjd	    NULL) == NULL || !avail_spare) &&
1771185029Spjd	    is_replacing_spare(config_root, tgt, 1)) {
1772168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1773168404Spjd		    "can only be replaced by another hot spare"));
1774185029Spjd		free(newname);
1775168404Spjd		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1776168404Spjd	}
1777168404Spjd
1778168404Spjd	/*
1779168404Spjd	 * If we are attempting to replace a spare, it canot be applied to an
1780168404Spjd	 * already spared device.
1781168404Spjd	 */
1782168404Spjd	if (replacing &&
1783168404Spjd	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1784185029Spjd	    zpool_find_vdev(zhp, newname, &avail_spare,
1785185029Spjd	    &l2cache, NULL) != NULL && avail_spare &&
1786168404Spjd	    is_replacing_spare(config_root, tgt, 0)) {
1787168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1788168404Spjd		    "device has already been replaced with a spare"));
1789185029Spjd		free(newname);
1790168404Spjd		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1791168404Spjd	}
1792168404Spjd
1793185029Spjd	free(newname);
1794185029Spjd
1795185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1796168404Spjd		return (-1);
1797168404Spjd
1798185029Spjd	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1799168404Spjd
1800168404Spjd	zcmd_free_nvlists(&zc);
1801168404Spjd
1802168404Spjd	if (ret == 0)
1803168404Spjd		return (0);
1804168404Spjd
1805168404Spjd	switch (errno) {
1806168404Spjd	case ENOTSUP:
1807168404Spjd		/*
1808168404Spjd		 * Can't attach to or replace this type of vdev.
1809168404Spjd		 */
1810185029Spjd		if (replacing) {
1811185029Spjd			if (islog)
1812185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1813185029Spjd				    "cannot replace a log with a spare"));
1814185029Spjd			else
1815185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1816185029Spjd				    "cannot replace a replacing device"));
1817185029Spjd		} else {
1818168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1819168404Spjd			    "can only attach to mirrors and top-level "
1820168404Spjd			    "disks"));
1821185029Spjd		}
1822168404Spjd		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1823168404Spjd		break;
1824168404Spjd
1825168404Spjd	case EINVAL:
1826168404Spjd		/*
1827168404Spjd		 * The new device must be a single disk.
1828168404Spjd		 */
1829168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1830168404Spjd		    "new device must be a single disk"));
1831168404Spjd		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1832168404Spjd		break;
1833168404Spjd
1834168404Spjd	case EBUSY:
1835168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1836168404Spjd		    new_disk);
1837168404Spjd		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1838168404Spjd		break;
1839168404Spjd
1840168404Spjd	case EOVERFLOW:
1841168404Spjd		/*
1842168404Spjd		 * The new device is too small.
1843168404Spjd		 */
1844168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1845168404Spjd		    "device is too small"));
1846168404Spjd		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1847168404Spjd		break;
1848168404Spjd
1849168404Spjd	case EDOM:
1850168404Spjd		/*
1851168404Spjd		 * The new device has a different alignment requirement.
1852168404Spjd		 */
1853168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1854168404Spjd		    "devices have different sector alignment"));
1855168404Spjd		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1856168404Spjd		break;
1857168404Spjd
1858168404Spjd	case ENAMETOOLONG:
1859168404Spjd		/*
1860168404Spjd		 * The resulting top-level vdev spec won't fit in the label.
1861168404Spjd		 */
1862168404Spjd		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1863168404Spjd		break;
1864168404Spjd
1865168404Spjd	default:
1866168404Spjd		(void) zpool_standard_error(hdl, errno, msg);
1867168404Spjd	}
1868168404Spjd
1869168404Spjd	return (-1);
1870168404Spjd}
1871168404Spjd
1872168404Spjd/*
1873168404Spjd * Detach the specified device.
1874168404Spjd */
1875168404Spjdint
1876168404Spjdzpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1877168404Spjd{
1878168404Spjd	zfs_cmd_t zc = { 0 };
1879168404Spjd	char msg[1024];
1880168404Spjd	nvlist_t *tgt;
1881185029Spjd	boolean_t avail_spare, l2cache;
1882168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1883168404Spjd
1884168404Spjd	(void) snprintf(msg, sizeof (msg),
1885168404Spjd	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1886168404Spjd
1887168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1888185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1889185029Spjd	    NULL)) == 0)
1890168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1891168404Spjd
1892168404Spjd	if (avail_spare)
1893168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1894168404Spjd
1895185029Spjd	if (l2cache)
1896185029Spjd		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1897185029Spjd
1898168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1899168404Spjd
1900185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1901168404Spjd		return (0);
1902168404Spjd
1903168404Spjd	switch (errno) {
1904168404Spjd
1905168404Spjd	case ENOTSUP:
1906168404Spjd		/*
1907168404Spjd		 * Can't detach from this type of vdev.
1908168404Spjd		 */
1909168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1910168404Spjd		    "applicable to mirror and replacing vdevs"));
1911168404Spjd		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1912168404Spjd		break;
1913168404Spjd
1914168404Spjd	case EBUSY:
1915168404Spjd		/*
1916168404Spjd		 * There are no other replicas of this device.
1917168404Spjd		 */
1918168404Spjd		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1919168404Spjd		break;
1920168404Spjd
1921168404Spjd	default:
1922168404Spjd		(void) zpool_standard_error(hdl, errno, msg);
1923168404Spjd	}
1924168404Spjd
1925168404Spjd	return (-1);
1926168404Spjd}
1927168404Spjd
1928168404Spjd/*
1929185029Spjd * Remove the given device.  Currently, this is supported only for hot spares
1930185029Spjd * and level 2 cache devices.
1931168404Spjd */
1932168404Spjdint
1933168404Spjdzpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1934168404Spjd{
1935168404Spjd	zfs_cmd_t zc = { 0 };
1936168404Spjd	char msg[1024];
1937168404Spjd	nvlist_t *tgt;
1938185029Spjd	boolean_t avail_spare, l2cache;
1939168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1940168404Spjd
1941168404Spjd	(void) snprintf(msg, sizeof (msg),
1942168404Spjd	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1943168404Spjd
1944168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1945185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1946185029Spjd	    NULL)) == 0)
1947168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1948168404Spjd
1949185029Spjd	if (!avail_spare && !l2cache) {
1950168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1951185029Spjd		    "only inactive hot spares or cache devices "
1952185029Spjd		    "can be removed"));
1953168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1954168404Spjd	}
1955168404Spjd
1956168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1957168404Spjd
1958185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1959168404Spjd		return (0);
1960168404Spjd
1961168404Spjd	return (zpool_standard_error(hdl, errno, msg));
1962168404Spjd}
1963168404Spjd
1964168404Spjd/*
1965168404Spjd * Clear the errors for the pool, or the particular device if specified.
1966168404Spjd */
1967168404Spjdint
1968168404Spjdzpool_clear(zpool_handle_t *zhp, const char *path)
1969168404Spjd{
1970168404Spjd	zfs_cmd_t zc = { 0 };
1971168404Spjd	char msg[1024];
1972168404Spjd	nvlist_t *tgt;
1973185029Spjd	boolean_t avail_spare, l2cache;
1974168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1975168404Spjd
1976168404Spjd	if (path)
1977168404Spjd		(void) snprintf(msg, sizeof (msg),
1978168404Spjd		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1979168404Spjd		    path);
1980168404Spjd	else
1981168404Spjd		(void) snprintf(msg, sizeof (msg),
1982168404Spjd		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1983168404Spjd		    zhp->zpool_name);
1984168404Spjd
1985168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1986168404Spjd	if (path) {
1987185029Spjd		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
1988185029Spjd		    &l2cache, NULL)) == 0)
1989168404Spjd			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1990168404Spjd
1991185029Spjd		/*
1992185029Spjd		 * Don't allow error clearing for hot spares.  Do allow
1993185029Spjd		 * error clearing for l2cache devices.
1994185029Spjd		 */
1995168404Spjd		if (avail_spare)
1996168404Spjd			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1997168404Spjd
1998168404Spjd		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1999168404Spjd		    &zc.zc_guid) == 0);
2000168404Spjd	}
2001168404Spjd
2002185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2003185029Spjd		return (0);
2004185029Spjd
2005185029Spjd	return (zpool_standard_error(hdl, errno, msg));
2006185029Spjd}
2007185029Spjd
2008185029Spjd/*
2009185029Spjd * Similar to zpool_clear(), but takes a GUID (used by fmd).
2010185029Spjd */
2011185029Spjdint
2012185029Spjdzpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2013185029Spjd{
2014185029Spjd	zfs_cmd_t zc = { 0 };
2015185029Spjd	char msg[1024];
2016185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2017185029Spjd
2018185029Spjd	(void) snprintf(msg, sizeof (msg),
2019185029Spjd	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2020185029Spjd	    guid);
2021185029Spjd
2022185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2023185029Spjd	zc.zc_guid = guid;
2024185029Spjd
2025168404Spjd	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2026168404Spjd		return (0);
2027168404Spjd
2028168404Spjd	return (zpool_standard_error(hdl, errno, msg));
2029168404Spjd}
2030168404Spjd
2031168404Spjd/*
2032168404Spjd * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2033168404Spjd * hierarchy.
2034168404Spjd */
2035168404Spjdint
2036168404Spjdzpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
2037168404Spjd    void *data)
2038168404Spjd{
2039168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2040168404Spjd	char (*paths)[MAXPATHLEN];
2041168404Spjd	char path[MAXPATHLEN];
2042168404Spjd	size_t size = 4;
2043168404Spjd	int curr, fd, base, ret = 0;
2044168404Spjd	DIR *dirp;
2045168404Spjd	struct dirent *dp;
2046168404Spjd	struct stat st;
2047168404Spjd
2048168404Spjd	if ((base = open(ZVOL_FULL_DEV_DIR, O_RDONLY)) < 0)
2049168404Spjd		return (errno == ENOENT ? 0 : -1);
2050168404Spjd
2051168404Spjd	snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
2052168404Spjd	    zhp->zpool_name);
2053168404Spjd	if (stat(path, &st) != 0) {
2054168404Spjd		int err = errno;
2055168404Spjd		(void) close(base);
2056168404Spjd		return (err == ENOENT ? 0 : -1);
2057168404Spjd	}
2058168404Spjd
2059168404Spjd	/*
2060168404Spjd	 * Oddly this wasn't a directory -- ignore that failure since we
2061168404Spjd	 * know there are no links lower in the (non-existant) hierarchy.
2062168404Spjd	 */
2063168404Spjd	if (!S_ISDIR(st.st_mode)) {
2064168404Spjd		(void) close(base);
2065168404Spjd		return (0);
2066168404Spjd	}
2067168404Spjd
2068168404Spjd	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
2069168404Spjd		(void) close(base);
2070168404Spjd		return (-1);
2071168404Spjd	}
2072168404Spjd
2073168404Spjd	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
2074168404Spjd	curr = 0;
2075168404Spjd
2076168404Spjd	while (curr >= 0) {
2077168404Spjd		snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
2078168404Spjd		    paths[curr]);
2079168404Spjd		if (lstat(path, &st) != 0)
2080168404Spjd			goto err;
2081168404Spjd
2082168404Spjd		if (S_ISDIR(st.st_mode)) {
2083168404Spjd			if ((dirp = opendir(path)) == NULL) {
2084168404Spjd				goto err;
2085168404Spjd			}
2086168404Spjd
2087168404Spjd			while ((dp = readdir(dirp)) != NULL) {
2088168404Spjd				if (dp->d_name[0] == '.')
2089168404Spjd					continue;
2090168404Spjd
2091168404Spjd				if (curr + 1 == size) {
2092168404Spjd					paths = zfs_realloc(hdl, paths,
2093168404Spjd					    size * sizeof (paths[0]),
2094168404Spjd					    size * 2 * sizeof (paths[0]));
2095168404Spjd					if (paths == NULL) {
2096168404Spjd						(void) closedir(dirp);
2097168404Spjd						goto err;
2098168404Spjd					}
2099168404Spjd
2100168404Spjd					size *= 2;
2101168404Spjd				}
2102168404Spjd
2103168404Spjd				(void) strlcpy(paths[curr + 1], paths[curr],
2104168404Spjd				    sizeof (paths[curr + 1]));
2105168404Spjd				(void) strlcat(paths[curr], "/",
2106168404Spjd				    sizeof (paths[curr]));
2107168404Spjd				(void) strlcat(paths[curr], dp->d_name,
2108168404Spjd				    sizeof (paths[curr]));
2109168404Spjd				curr++;
2110168404Spjd			}
2111168404Spjd
2112168404Spjd			(void) closedir(dirp);
2113168404Spjd
2114168404Spjd		} else {
2115168404Spjd			if ((ret = cb(paths[curr], data)) != 0)
2116168404Spjd				break;
2117168404Spjd		}
2118168404Spjd
2119168404Spjd		curr--;
2120168404Spjd	}
2121168404Spjd
2122168404Spjd	free(paths);
2123168404Spjd	(void) close(base);
2124168404Spjd
2125168404Spjd	return (ret);
2126168404Spjd
2127168404Spjderr:
2128168404Spjd	free(paths);
2129168404Spjd	(void) close(base);
2130168404Spjd	return (-1);
2131168404Spjd}
2132168404Spjd
2133168404Spjdtypedef struct zvol_cb {
2134168404Spjd	zpool_handle_t *zcb_pool;
2135168404Spjd	boolean_t zcb_create;
2136168404Spjd} zvol_cb_t;
2137168404Spjd
2138168404Spjd/*ARGSUSED*/
2139168404Spjdstatic int
2140168404Spjddo_zvol_create(zfs_handle_t *zhp, void *data)
2141168404Spjd{
2142185029Spjd	int ret = 0;
2143168404Spjd
2144185029Spjd	if (ZFS_IS_VOLUME(zhp)) {
2145168404Spjd		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2146185029Spjd		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2147185029Spjd	}
2148168404Spjd
2149185029Spjd	if (ret == 0)
2150185029Spjd		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2151168404Spjd
2152168404Spjd	zfs_close(zhp);
2153168404Spjd
2154168404Spjd	return (ret);
2155168404Spjd}
2156168404Spjd
2157168404Spjd/*
2158168404Spjd * Iterate over all zvols in the pool and make any necessary minor nodes.
2159168404Spjd */
2160168404Spjdint
2161168404Spjdzpool_create_zvol_links(zpool_handle_t *zhp)
2162168404Spjd{
2163168404Spjd	zfs_handle_t *zfp;
2164168404Spjd	int ret;
2165168404Spjd
2166168404Spjd	/*
2167168404Spjd	 * If the pool is unavailable, just return success.
2168168404Spjd	 */
2169168404Spjd	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2170168404Spjd	    zhp->zpool_name)) == NULL)
2171168404Spjd		return (0);
2172168404Spjd
2173185029Spjd	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2174168404Spjd
2175168404Spjd	zfs_close(zfp);
2176168404Spjd	return (ret);
2177168404Spjd}
2178168404Spjd
2179168404Spjdstatic int
2180168404Spjddo_zvol_remove(const char *dataset, void *data)
2181168404Spjd{
2182168404Spjd	zpool_handle_t *zhp = data;
2183168404Spjd
2184168404Spjd	return (zvol_remove_link(zhp->zpool_hdl, dataset));
2185168404Spjd}
2186168404Spjd
2187168404Spjd/*
2188168404Spjd * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
2189168404Spjd * by examining the /dev links so that a corrupted pool doesn't impede this
2190168404Spjd * operation.
2191168404Spjd */
2192168404Spjdint
2193168404Spjdzpool_remove_zvol_links(zpool_handle_t *zhp)
2194168404Spjd{
2195168404Spjd	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2196168404Spjd}
2197168404Spjd
2198168404Spjd/*
2199168404Spjd * Convert from a devid string to a path.
2200168404Spjd */
2201168404Spjdstatic char *
2202168404Spjddevid_to_path(char *devid_str)
2203168404Spjd{
2204168404Spjd	ddi_devid_t devid;
2205168404Spjd	char *minor;
2206168404Spjd	char *path;
2207168404Spjd	devid_nmlist_t *list = NULL;
2208168404Spjd	int ret;
2209168404Spjd
2210168404Spjd	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2211168404Spjd		return (NULL);
2212168404Spjd
2213168404Spjd	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2214168404Spjd
2215168404Spjd	devid_str_free(minor);
2216168404Spjd	devid_free(devid);
2217168404Spjd
2218168404Spjd	if (ret != 0)
2219168404Spjd		return (NULL);
2220168404Spjd
2221168404Spjd	if ((path = strdup(list[0].devname)) == NULL)
2222168404Spjd		return (NULL);
2223168404Spjd
2224168404Spjd	devid_free_nmlist(list);
2225168404Spjd
2226168404Spjd	return (path);
2227168404Spjd}
2228168404Spjd
2229168404Spjd/*
2230168404Spjd * Convert from a path to a devid string.
2231168404Spjd */
2232168404Spjdstatic char *
2233168404Spjdpath_to_devid(const char *path)
2234168404Spjd{
2235168404Spjd	int fd;
2236168404Spjd	ddi_devid_t devid;
2237168404Spjd	char *minor, *ret;
2238168404Spjd
2239168404Spjd	if ((fd = open(path, O_RDONLY)) < 0)
2240168404Spjd		return (NULL);
2241168404Spjd
2242168404Spjd	minor = NULL;
2243168404Spjd	ret = NULL;
2244168404Spjd	if (devid_get(fd, &devid) == 0) {
2245168404Spjd		if (devid_get_minor_name(fd, &minor) == 0)
2246168404Spjd			ret = devid_str_encode(devid, minor);
2247168404Spjd		if (minor != NULL)
2248168404Spjd			devid_str_free(minor);
2249168404Spjd		devid_free(devid);
2250168404Spjd	}
2251168404Spjd	(void) close(fd);
2252168404Spjd
2253168404Spjd	return (ret);
2254168404Spjd}
2255168404Spjd
2256168404Spjd/*
2257168404Spjd * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2258168404Spjd * ignore any failure here, since a common case is for an unprivileged user to
2259168404Spjd * type 'zpool status', and we'll display the correct information anyway.
2260168404Spjd */
2261168404Spjdstatic void
2262168404Spjdset_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2263168404Spjd{
2264168404Spjd	zfs_cmd_t zc = { 0 };
2265168404Spjd
2266168404Spjd	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2267168404Spjd	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2268168404Spjd	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2269168404Spjd	    &zc.zc_guid) == 0);
2270168404Spjd
2271168404Spjd	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2272168404Spjd}
2273168404Spjd
2274168404Spjd/*
2275168404Spjd * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2276168404Spjd * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2277168404Spjd * We also check if this is a whole disk, in which case we strip off the
2278168404Spjd * trailing 's0' slice name.
2279168404Spjd *
2280168404Spjd * This routine is also responsible for identifying when disks have been
2281168404Spjd * reconfigured in a new location.  The kernel will have opened the device by
2282168404Spjd * devid, but the path will still refer to the old location.  To catch this, we
2283168404Spjd * first do a path -> devid translation (which is fast for the common case).  If
2284168404Spjd * the devid matches, we're done.  If not, we do a reverse devid -> path
2285168404Spjd * translation and issue the appropriate ioctl() to update the path of the vdev.
2286168404Spjd * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2287168404Spjd * of these checks.
2288168404Spjd */
2289168404Spjdchar *
2290168404Spjdzpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2291168404Spjd{
2292168404Spjd	char *path, *devid;
2293168404Spjd	uint64_t value;
2294168404Spjd	char buf[64];
2295185029Spjd	vdev_stat_t *vs;
2296185029Spjd	uint_t vsc;
2297168404Spjd
2298168404Spjd	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2299168404Spjd	    &value) == 0) {
2300168404Spjd		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2301168404Spjd		    &value) == 0);
2302168404Spjd		(void) snprintf(buf, sizeof (buf), "%llu",
2303168404Spjd		    (u_longlong_t)value);
2304168404Spjd		path = buf;
2305168404Spjd	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2306168404Spjd
2307185029Spjd		/*
2308185029Spjd		 * If the device is dead (faulted, offline, etc) then don't
2309185029Spjd		 * bother opening it.  Otherwise we may be forcing the user to
2310185029Spjd		 * open a misbehaving device, which can have undesirable
2311185029Spjd		 * effects.
2312185029Spjd		 */
2313185029Spjd		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2314185029Spjd		    (uint64_t **)&vs, &vsc) != 0 ||
2315185029Spjd		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2316185029Spjd		    zhp != NULL &&
2317168404Spjd		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2318168404Spjd			/*
2319168404Spjd			 * Determine if the current path is correct.
2320168404Spjd			 */
2321168404Spjd			char *newdevid = path_to_devid(path);
2322168404Spjd
2323168404Spjd			if (newdevid == NULL ||
2324168404Spjd			    strcmp(devid, newdevid) != 0) {
2325168404Spjd				char *newpath;
2326168404Spjd
2327168404Spjd				if ((newpath = devid_to_path(devid)) != NULL) {
2328168404Spjd					/*
2329168404Spjd					 * Update the path appropriately.
2330168404Spjd					 */
2331168404Spjd					set_path(zhp, nv, newpath);
2332168404Spjd					if (nvlist_add_string(nv,
2333168404Spjd					    ZPOOL_CONFIG_PATH, newpath) == 0)
2334168404Spjd						verify(nvlist_lookup_string(nv,
2335168404Spjd						    ZPOOL_CONFIG_PATH,
2336168404Spjd						    &path) == 0);
2337168404Spjd					free(newpath);
2338168404Spjd				}
2339168404Spjd			}
2340168404Spjd
2341168404Spjd			if (newdevid)
2342168404Spjd				devid_str_free(newdevid);
2343168404Spjd		}
2344168404Spjd
2345168404Spjd		if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
2346168404Spjd			path += sizeof(_PATH_DEV) - 1;
2347168404Spjd
2348168404Spjd		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2349168404Spjd		    &value) == 0 && value) {
2350168404Spjd			char *tmp = zfs_strdup(hdl, path);
2351168404Spjd			if (tmp == NULL)
2352168404Spjd				return (NULL);
2353168404Spjd			tmp[strlen(path) - 2] = '\0';
2354168404Spjd			return (tmp);
2355168404Spjd		}
2356168404Spjd	} else {
2357168404Spjd		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2358168404Spjd
2359168404Spjd		/*
2360168404Spjd		 * If it's a raidz device, we need to stick in the parity level.
2361168404Spjd		 */
2362168404Spjd		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2363168404Spjd			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2364168404Spjd			    &value) == 0);
2365168404Spjd			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2366168404Spjd			    (u_longlong_t)value);
2367168404Spjd			path = buf;
2368168404Spjd		}
2369168404Spjd	}
2370168404Spjd
2371168404Spjd	return (zfs_strdup(hdl, path));
2372168404Spjd}
2373168404Spjd
2374168404Spjdstatic int
2375168404Spjdzbookmark_compare(const void *a, const void *b)
2376168404Spjd{
2377168404Spjd	return (memcmp(a, b, sizeof (zbookmark_t)));
2378168404Spjd}
2379168404Spjd
2380168404Spjd/*
2381168404Spjd * Retrieve the persistent error log, uniquify the members, and return to the
2382168404Spjd * caller.
2383168404Spjd */
2384168404Spjdint
2385168404Spjdzpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2386168404Spjd{
2387168404Spjd	zfs_cmd_t zc = { 0 };
2388168404Spjd	uint64_t count;
2389168404Spjd	zbookmark_t *zb = NULL;
2390168404Spjd	int i;
2391168404Spjd
2392168404Spjd	/*
2393168404Spjd	 * Retrieve the raw error list from the kernel.  If the number of errors
2394168404Spjd	 * has increased, allocate more space and continue until we get the
2395168404Spjd	 * entire list.
2396168404Spjd	 */
2397168404Spjd	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2398168404Spjd	    &count) == 0);
2399185029Spjd	if (count == 0)
2400185029Spjd		return (0);
2401168404Spjd	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2402168404Spjd	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2403168404Spjd		return (-1);
2404168404Spjd	zc.zc_nvlist_dst_size = count;
2405168404Spjd	(void) strcpy(zc.zc_name, zhp->zpool_name);
2406168404Spjd	for (;;) {
2407168404Spjd		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2408168404Spjd		    &zc) != 0) {
2409168404Spjd			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2410168404Spjd			if (errno == ENOMEM) {
2411168404Spjd				count = zc.zc_nvlist_dst_size;
2412168404Spjd				if ((zc.zc_nvlist_dst = (uintptr_t)
2413168404Spjd				    zfs_alloc(zhp->zpool_hdl, count *
2414168404Spjd				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2415168404Spjd					return (-1);
2416168404Spjd			} else {
2417168404Spjd				return (-1);
2418168404Spjd			}
2419168404Spjd		} else {
2420168404Spjd			break;
2421168404Spjd		}
2422168404Spjd	}
2423168404Spjd
2424168404Spjd	/*
2425168404Spjd	 * Sort the resulting bookmarks.  This is a little confusing due to the
2426168404Spjd	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2427168404Spjd	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2428168404Spjd	 * _not_ copied as part of the process.  So we point the start of our
2429168404Spjd	 * array appropriate and decrement the total number of elements.
2430168404Spjd	 */
2431168404Spjd	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2432168404Spjd	    zc.zc_nvlist_dst_size;
2433168404Spjd	count -= zc.zc_nvlist_dst_size;
2434168404Spjd
2435168404Spjd	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2436168404Spjd
2437168404Spjd	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2438168404Spjd
2439168404Spjd	/*
2440168404Spjd	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2441168404Spjd	 */
2442168404Spjd	for (i = 0; i < count; i++) {
2443168404Spjd		nvlist_t *nv;
2444168404Spjd
2445168404Spjd		/* ignoring zb_blkid and zb_level for now */
2446168404Spjd		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2447168404Spjd		    zb[i-1].zb_object == zb[i].zb_object)
2448168404Spjd			continue;
2449168404Spjd
2450168404Spjd		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2451168404Spjd			goto nomem;
2452168404Spjd		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2453168404Spjd		    zb[i].zb_objset) != 0) {
2454168404Spjd			nvlist_free(nv);
2455168404Spjd			goto nomem;
2456168404Spjd		}
2457168404Spjd		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2458168404Spjd		    zb[i].zb_object) != 0) {
2459168404Spjd			nvlist_free(nv);
2460168404Spjd			goto nomem;
2461168404Spjd		}
2462168404Spjd		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2463168404Spjd			nvlist_free(nv);
2464168404Spjd			goto nomem;
2465168404Spjd		}
2466168404Spjd		nvlist_free(nv);
2467168404Spjd	}
2468168404Spjd
2469168404Spjd	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2470168404Spjd	return (0);
2471168404Spjd
2472168404Spjdnomem:
2473168404Spjd	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2474168404Spjd	return (no_memory(zhp->zpool_hdl));
2475168404Spjd}
2476168404Spjd
2477168404Spjd/*
2478168404Spjd * Upgrade a ZFS pool to the latest on-disk version.
2479168404Spjd */
2480168404Spjdint
2481185029Spjdzpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2482168404Spjd{
2483168404Spjd	zfs_cmd_t zc = { 0 };
2484168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2485168404Spjd
2486168404Spjd	(void) strcpy(zc.zc_name, zhp->zpool_name);
2487185029Spjd	zc.zc_cookie = new_version;
2488185029Spjd
2489185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2490168404Spjd		return (zpool_standard_error_fmt(hdl, errno,
2491168404Spjd		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2492168404Spjd		    zhp->zpool_name));
2493168404Spjd	return (0);
2494168404Spjd}
2495168404Spjd
2496168404Spjdvoid
2497185029Spjdzpool_set_history_str(const char *subcommand, int argc, char **argv,
2498185029Spjd    char *history_str)
2499168404Spjd{
2500168404Spjd	int i;
2501168404Spjd
2502185029Spjd	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2503185029Spjd	for (i = 1; i < argc; i++) {
2504185029Spjd		if (strlen(history_str) + 1 + strlen(argv[i]) >
2505185029Spjd		    HIS_MAX_RECORD_LEN)
2506168404Spjd			break;
2507185029Spjd		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2508185029Spjd		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2509168404Spjd	}
2510185029Spjd}
2511168404Spjd
2512185029Spjd/*
2513185029Spjd * Stage command history for logging.
2514185029Spjd */
2515185029Spjdint
2516185029Spjdzpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2517185029Spjd{
2518185029Spjd	if (history_str == NULL)
2519185029Spjd		return (EINVAL);
2520168404Spjd
2521185029Spjd	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2522185029Spjd		return (EINVAL);
2523168404Spjd
2524185029Spjd	if (hdl->libzfs_log_str != NULL)
2525185029Spjd		free(hdl->libzfs_log_str);
2526168404Spjd
2527185029Spjd	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2528185029Spjd		return (no_memory(hdl));
2529185029Spjd
2530185029Spjd	return (0);
2531168404Spjd}
2532168404Spjd
2533168404Spjd/*
2534168404Spjd * Perform ioctl to get some command history of a pool.
2535168404Spjd *
2536168404Spjd * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2537168404Spjd * logical offset of the history buffer to start reading from.
2538168404Spjd *
2539168404Spjd * Upon return, 'off' is the next logical offset to read from and
2540168404Spjd * 'len' is the actual amount of bytes read into 'buf'.
2541168404Spjd */
2542168404Spjdstatic int
2543168404Spjdget_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2544168404Spjd{
2545168404Spjd	zfs_cmd_t zc = { 0 };
2546168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2547168404Spjd
2548168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2549168404Spjd
2550168404Spjd	zc.zc_history = (uint64_t)(uintptr_t)buf;
2551168404Spjd	zc.zc_history_len = *len;
2552168404Spjd	zc.zc_history_offset = *off;
2553168404Spjd
2554168404Spjd	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2555168404Spjd		switch (errno) {
2556168404Spjd		case EPERM:
2557168404Spjd			return (zfs_error_fmt(hdl, EZFS_PERM,
2558168404Spjd			    dgettext(TEXT_DOMAIN,
2559168404Spjd			    "cannot show history for pool '%s'"),
2560168404Spjd			    zhp->zpool_name));
2561168404Spjd		case ENOENT:
2562168404Spjd			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2563168404Spjd			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2564168404Spjd			    "'%s'"), zhp->zpool_name));
2565168404Spjd		case ENOTSUP:
2566168404Spjd			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2567168404Spjd			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2568168404Spjd			    "'%s', pool must be upgraded"), zhp->zpool_name));
2569168404Spjd		default:
2570168404Spjd			return (zpool_standard_error_fmt(hdl, errno,
2571168404Spjd			    dgettext(TEXT_DOMAIN,
2572168404Spjd			    "cannot get history for '%s'"), zhp->zpool_name));
2573168404Spjd		}
2574168404Spjd	}
2575168404Spjd
2576168404Spjd	*len = zc.zc_history_len;
2577168404Spjd	*off = zc.zc_history_offset;
2578168404Spjd
2579168404Spjd	return (0);
2580168404Spjd}
2581168404Spjd
2582168404Spjd/*
2583168404Spjd * Process the buffer of nvlists, unpacking and storing each nvlist record
2584168404Spjd * into 'records'.  'leftover' is set to the number of bytes that weren't
2585168404Spjd * processed as there wasn't a complete record.
2586168404Spjd */
2587168404Spjdstatic int
2588168404Spjdzpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2589168404Spjd    nvlist_t ***records, uint_t *numrecords)
2590168404Spjd{
2591168404Spjd	uint64_t reclen;
2592168404Spjd	nvlist_t *nv;
2593168404Spjd	int i;
2594168404Spjd
2595168404Spjd	while (bytes_read > sizeof (reclen)) {
2596168404Spjd
2597168404Spjd		/* get length of packed record (stored as little endian) */
2598168404Spjd		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2599168404Spjd			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2600168404Spjd
2601168404Spjd		if (bytes_read < sizeof (reclen) + reclen)
2602168404Spjd			break;
2603168404Spjd
2604168404Spjd		/* unpack record */
2605168404Spjd		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2606168404Spjd			return (ENOMEM);
2607168404Spjd		bytes_read -= sizeof (reclen) + reclen;
2608168404Spjd		buf += sizeof (reclen) + reclen;
2609168404Spjd
2610168404Spjd		/* add record to nvlist array */
2611168404Spjd		(*numrecords)++;
2612168404Spjd		if (ISP2(*numrecords + 1)) {
2613168404Spjd			*records = realloc(*records,
2614168404Spjd			    *numrecords * 2 * sizeof (nvlist_t *));
2615168404Spjd		}
2616168404Spjd		(*records)[*numrecords - 1] = nv;
2617168404Spjd	}
2618168404Spjd
2619168404Spjd	*leftover = bytes_read;
2620168404Spjd	return (0);
2621168404Spjd}
2622168404Spjd
2623168404Spjd#define	HIS_BUF_LEN	(128*1024)
2624168404Spjd
2625168404Spjd/*
2626168404Spjd * Retrieve the command history of a pool.
2627168404Spjd */
2628168404Spjdint
2629168404Spjdzpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2630168404Spjd{
2631168404Spjd	char buf[HIS_BUF_LEN];
2632168404Spjd	uint64_t off = 0;
2633168404Spjd	nvlist_t **records = NULL;
2634168404Spjd	uint_t numrecords = 0;
2635168404Spjd	int err, i;
2636168404Spjd
2637168404Spjd	do {
2638168404Spjd		uint64_t bytes_read = sizeof (buf);
2639168404Spjd		uint64_t leftover;
2640168404Spjd
2641168404Spjd		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2642168404Spjd			break;
2643168404Spjd
2644168404Spjd		/* if nothing else was read in, we're at EOF, just return */
2645168404Spjd		if (!bytes_read)
2646168404Spjd			break;
2647168404Spjd
2648168404Spjd		if ((err = zpool_history_unpack(buf, bytes_read,
2649168404Spjd		    &leftover, &records, &numrecords)) != 0)
2650168404Spjd			break;
2651168404Spjd		off -= leftover;
2652168404Spjd
2653168404Spjd		/* CONSTCOND */
2654168404Spjd	} while (1);
2655168404Spjd
2656168404Spjd	if (!err) {
2657168404Spjd		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2658168404Spjd		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2659168404Spjd		    records, numrecords) == 0);
2660168404Spjd	}
2661168404Spjd	for (i = 0; i < numrecords; i++)
2662168404Spjd		nvlist_free(records[i]);
2663168404Spjd	free(records);
2664168404Spjd
2665168404Spjd	return (err);
2666168404Spjd}
2667168404Spjd
2668168404Spjdvoid
2669168404Spjdzpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2670168404Spjd    char *pathname, size_t len)
2671168404Spjd{
2672168404Spjd	zfs_cmd_t zc = { 0 };
2673168404Spjd	boolean_t mounted = B_FALSE;
2674168404Spjd	char *mntpnt = NULL;
2675168404Spjd	char dsname[MAXNAMELEN];
2676168404Spjd
2677168404Spjd	if (dsobj == 0) {
2678168404Spjd		/* special case for the MOS */
2679168404Spjd		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2680168404Spjd		return;
2681168404Spjd	}
2682168404Spjd
2683168404Spjd	/* get the dataset's name */
2684168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2685168404Spjd	zc.zc_obj = dsobj;
2686168404Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2687168404Spjd	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2688168404Spjd		/* just write out a path of two object numbers */
2689168404Spjd		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2690168404Spjd		    dsobj, obj);
2691168404Spjd		return;
2692168404Spjd	}
2693168404Spjd	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2694168404Spjd
2695168404Spjd	/* find out if the dataset is mounted */
2696168404Spjd	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2697168404Spjd
2698168404Spjd	/* get the corrupted object's path */
2699168404Spjd	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2700168404Spjd	zc.zc_obj = obj;
2701168404Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2702168404Spjd	    &zc) == 0) {
2703168404Spjd		if (mounted) {
2704168404Spjd			(void) snprintf(pathname, len, "%s%s", mntpnt,
2705168404Spjd			    zc.zc_value);
2706168404Spjd		} else {
2707168404Spjd			(void) snprintf(pathname, len, "%s:%s",
2708168404Spjd			    dsname, zc.zc_value);
2709168404Spjd		}
2710168404Spjd	} else {
2711168404Spjd		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2712168404Spjd	}
2713168404Spjd	free(mntpnt);
2714168404Spjd}
2715168404Spjd
2716185029Spjd#define	RDISK_ROOT	"/dev/rdsk"
2717185029Spjd#define	BACKUP_SLICE	"s2"
2718185029Spjd/*
2719185029Spjd * Don't start the slice at the default block of 34; many storage
2720185029Spjd * devices will use a stripe width of 128k, so start there instead.
2721185029Spjd */
2722185029Spjd#define	NEW_START_BLOCK	256
2723185029Spjd
2724185029Spjd#if defined(sun)
2725185029Spjd/*
2726185029Spjd * Read the EFI label from the config, if a label does not exist then
2727185029Spjd * pass back the error to the caller. If the caller has passed a non-NULL
2728185029Spjd * diskaddr argument then we set it to the starting address of the EFI
2729185029Spjd * partition.
2730185029Spjd */
2731185029Spjdstatic int
2732185029Spjdread_efi_label(nvlist_t *config, diskaddr_t *sb)
2733168404Spjd{
2734185029Spjd	char *path;
2735185029Spjd	int fd;
2736185029Spjd	char diskname[MAXPATHLEN];
2737185029Spjd	int err = -1;
2738168404Spjd
2739185029Spjd	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2740185029Spjd		return (err);
2741168404Spjd
2742185029Spjd	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2743185029Spjd	    strrchr(path, '/'));
2744185029Spjd	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2745185029Spjd		struct dk_gpt *vtoc;
2746185029Spjd
2747185029Spjd		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2748185029Spjd			if (sb != NULL)
2749185029Spjd				*sb = vtoc->efi_parts[0].p_start;
2750185029Spjd			efi_free(vtoc);
2751185029Spjd		}
2752185029Spjd		(void) close(fd);
2753168404Spjd	}
2754185029Spjd	return (err);
2755185029Spjd}
2756168404Spjd
2757185029Spjd/*
2758185029Spjd * determine where a partition starts on a disk in the current
2759185029Spjd * configuration
2760185029Spjd */
2761185029Spjdstatic diskaddr_t
2762185029Spjdfind_start_block(nvlist_t *config)
2763185029Spjd{
2764185029Spjd	nvlist_t **child;
2765185029Spjd	uint_t c, children;
2766185029Spjd	diskaddr_t sb = MAXOFFSET_T;
2767185029Spjd	uint64_t wholedisk;
2768168404Spjd
2769185029Spjd	if (nvlist_lookup_nvlist_array(config,
2770185029Spjd	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2771185029Spjd		if (nvlist_lookup_uint64(config,
2772185029Spjd		    ZPOOL_CONFIG_WHOLE_DISK,
2773185029Spjd		    &wholedisk) != 0 || !wholedisk) {
2774185029Spjd			return (MAXOFFSET_T);
2775185029Spjd		}
2776185029Spjd		if (read_efi_label(config, &sb) < 0)
2777185029Spjd			sb = MAXOFFSET_T;
2778185029Spjd		return (sb);
2779168404Spjd	}
2780168404Spjd
2781185029Spjd	for (c = 0; c < children; c++) {
2782185029Spjd		sb = find_start_block(child[c]);
2783185029Spjd		if (sb != MAXOFFSET_T) {
2784185029Spjd			return (sb);
2785185029Spjd		}
2786168404Spjd	}
2787185029Spjd	return (MAXOFFSET_T);
2788185029Spjd}
2789185029Spjd#endif /* sun */
2790168404Spjd
2791185029Spjd/*
2792185029Spjd * Label an individual disk.  The name provided is the short name,
2793185029Spjd * stripped of any leading /dev path.
2794185029Spjd */
2795185029Spjdint
2796185029Spjdzpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2797185029Spjd{
2798185029Spjd#if defined(sun)
2799185029Spjd	char path[MAXPATHLEN];
2800185029Spjd	struct dk_gpt *vtoc;
2801185029Spjd	int fd;
2802185029Spjd	size_t resv = EFI_MIN_RESV_SIZE;
2803185029Spjd	uint64_t slice_size;
2804185029Spjd	diskaddr_t start_block;
2805185029Spjd	char errbuf[1024];
2806168404Spjd
2807185029Spjd	/* prepare an error message just in case */
2808185029Spjd	(void) snprintf(errbuf, sizeof (errbuf),
2809185029Spjd	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2810168404Spjd
2811185029Spjd	if (zhp) {
2812185029Spjd		nvlist_t *nvroot;
2813168404Spjd
2814185029Spjd		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2815185029Spjd		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2816168404Spjd
2817185029Spjd		if (zhp->zpool_start_block == 0)
2818185029Spjd			start_block = find_start_block(nvroot);
2819185029Spjd		else
2820185029Spjd			start_block = zhp->zpool_start_block;
2821185029Spjd		zhp->zpool_start_block = start_block;
2822185029Spjd	} else {
2823185029Spjd		/* new pool */
2824185029Spjd		start_block = NEW_START_BLOCK;
2825185029Spjd	}
2826168404Spjd
2827185029Spjd	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2828185029Spjd	    BACKUP_SLICE);
2829168404Spjd
2830185029Spjd	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2831185029Spjd		/*
2832185029Spjd		 * This shouldn't happen.  We've long since verified that this
2833185029Spjd		 * is a valid device.
2834185029Spjd		 */
2835185029Spjd		zfs_error_aux(hdl,
2836185029Spjd		    dgettext(TEXT_DOMAIN, "unable to open device"));
2837185029Spjd		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2838185029Spjd	}
2839168404Spjd
2840185029Spjd	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2841185029Spjd		/*
2842185029Spjd		 * The only way this can fail is if we run out of memory, or we
2843185029Spjd		 * were unable to read the disk's capacity
2844185029Spjd		 */
2845185029Spjd		if (errno == ENOMEM)
2846185029Spjd			(void) no_memory(hdl);
2847168404Spjd
2848185029Spjd		(void) close(fd);
2849185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2850185029Spjd		    "unable to read disk capacity"), name);
2851185029Spjd
2852185029Spjd		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2853168404Spjd	}
2854168404Spjd
2855185029Spjd	slice_size = vtoc->efi_last_u_lba + 1;
2856185029Spjd	slice_size -= EFI_MIN_RESV_SIZE;
2857185029Spjd	if (start_block == MAXOFFSET_T)
2858185029Spjd		start_block = NEW_START_BLOCK;
2859185029Spjd	slice_size -= start_block;
2860168404Spjd
2861185029Spjd	vtoc->efi_parts[0].p_start = start_block;
2862185029Spjd	vtoc->efi_parts[0].p_size = slice_size;
2863185029Spjd
2864168404Spjd	/*
2865185029Spjd	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2866185029Spjd	 * disposable by some EFI utilities (since EFI doesn't have a backup
2867185029Spjd	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2868185029Spjd	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2869185029Spjd	 * etc. were all pretty specific.  V_USR is as close to reality as we
2870185029Spjd	 * can get, in the absence of V_OTHER.
2871168404Spjd	 */
2872185029Spjd	vtoc->efi_parts[0].p_tag = V_USR;
2873185029Spjd	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2874168404Spjd
2875185029Spjd	vtoc->efi_parts[8].p_start = slice_size + start_block;
2876185029Spjd	vtoc->efi_parts[8].p_size = resv;
2877185029Spjd	vtoc->efi_parts[8].p_tag = V_RESERVED;
2878168404Spjd
2879185029Spjd	if (efi_write(fd, vtoc) != 0) {
2880185029Spjd		/*
2881185029Spjd		 * Some block drivers (like pcata) may not support EFI
2882185029Spjd		 * GPT labels.  Print out a helpful error message dir-
2883185029Spjd		 * ecting the user to manually label the disk and give
2884185029Spjd		 * a specific slice.
2885185029Spjd		 */
2886185029Spjd		(void) close(fd);
2887185029Spjd		efi_free(vtoc);
2888168404Spjd
2889185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2890185029Spjd		    "try using fdisk(1M) and then provide a specific slice"));
2891185029Spjd		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2892168404Spjd	}
2893185029Spjd
2894185029Spjd	(void) close(fd);
2895185029Spjd	efi_free(vtoc);
2896185029Spjd#endif /* sun */
2897168404Spjd	return (0);
2898168404Spjd}
2899168404Spjd
2900185029Spjdstatic boolean_t
2901185029Spjdsupported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2902168404Spjd{
2903185029Spjd	char *type;
2904185029Spjd	nvlist_t **child;
2905185029Spjd	uint_t children, c;
2906185029Spjd
2907185029Spjd	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2908185029Spjd	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2909185029Spjd	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
2910185029Spjd	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
2911185029Spjd	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
2912185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2913185029Spjd		    "vdev type '%s' is not supported"), type);
2914185029Spjd		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2915185029Spjd		return (B_FALSE);
2916185029Spjd	}
2917185029Spjd	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2918185029Spjd	    &child, &children) == 0) {
2919185029Spjd		for (c = 0; c < children; c++) {
2920185029Spjd			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2921185029Spjd				return (B_FALSE);
2922185029Spjd		}
2923185029Spjd	}
2924185029Spjd	return (B_TRUE);
2925168404Spjd}
2926168404Spjd
2927185029Spjd/*
2928185029Spjd * check if this zvol is allowable for use as a dump device; zero if
2929185029Spjd * it is, > 0 if it isn't, < 0 if it isn't a zvol
2930185029Spjd */
2931168404Spjdint
2932185029Spjdzvol_check_dump_config(char *arg)
2933168404Spjd{
2934185029Spjd	zpool_handle_t *zhp = NULL;
2935185029Spjd	nvlist_t *config, *nvroot;
2936185029Spjd	char *p, *volname;
2937185029Spjd	nvlist_t **top;
2938185029Spjd	uint_t toplevels;
2939185029Spjd	libzfs_handle_t *hdl;
2940185029Spjd	char errbuf[1024];
2941185029Spjd	char poolname[ZPOOL_MAXNAMELEN];
2942185029Spjd	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
2943185029Spjd	int ret = 1;
2944168404Spjd
2945185029Spjd	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
2946168404Spjd		return (-1);
2947185029Spjd	}
2948168404Spjd
2949185029Spjd	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2950185029Spjd	    "dump is not supported on device '%s'"), arg);
2951168404Spjd
2952185029Spjd	if ((hdl = libzfs_init()) == NULL)
2953185029Spjd		return (1);
2954185029Spjd	libzfs_print_on_error(hdl, B_TRUE);
2955168404Spjd
2956185029Spjd	volname = arg + pathlen;
2957185029Spjd
2958185029Spjd	/* check the configuration of the pool */
2959185029Spjd	if ((p = strchr(volname, '/')) == NULL) {
2960185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2961185029Spjd		    "malformed dataset name"));
2962185029Spjd		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
2963185029Spjd		return (1);
2964185029Spjd	} else if (p - volname >= ZFS_MAXNAMELEN) {
2965185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2966185029Spjd		    "dataset name is too long"));
2967185029Spjd		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
2968185029Spjd		return (1);
2969185029Spjd	} else {
2970185029Spjd		(void) strncpy(poolname, volname, p - volname);
2971185029Spjd		poolname[p - volname] = '\0';
2972168404Spjd	}
2973168404Spjd
2974185029Spjd	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
2975185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2976185029Spjd		    "could not open pool '%s'"), poolname);
2977185029Spjd		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
2978185029Spjd		goto out;
2979185029Spjd	}
2980185029Spjd	config = zpool_get_config(zhp, NULL);
2981185029Spjd	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2982185029Spjd	    &nvroot) != 0) {
2983185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2984185029Spjd		    "could not obtain vdev configuration for  '%s'"), poolname);
2985185029Spjd		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
2986185029Spjd		goto out;
2987185029Spjd	}
2988185029Spjd
2989185029Spjd	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2990185029Spjd	    &top, &toplevels) == 0);
2991185029Spjd	if (toplevels != 1) {
2992185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2993185029Spjd		    "'%s' has multiple top level vdevs"), poolname);
2994185029Spjd		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
2995185029Spjd		goto out;
2996185029Spjd	}
2997185029Spjd
2998185029Spjd	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
2999185029Spjd		goto out;
3000185029Spjd	}
3001185029Spjd	ret = 0;
3002185029Spjd
3003185029Spjdout:
3004185029Spjd	if (zhp)
3005185029Spjd		zpool_close(zhp);
3006185029Spjd	libzfs_fini(hdl);
3007185029Spjd	return (ret);
3008168404Spjd}
3009