libzfs_pool.c revision 207670
1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd
22168404Spjd/*
23185029Spjd * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24168404Spjd * Use is subject to license terms.
25168404Spjd */
26168404Spjd
27168404Spjd#include <sys/types.h>
28168404Spjd#include <sys/stat.h>
29168404Spjd#include <assert.h>
30168404Spjd#include <ctype.h>
31168404Spjd#include <errno.h>
32168404Spjd#include <devid.h>
33168404Spjd#include <dirent.h>
34168404Spjd#include <fcntl.h>
35168404Spjd#include <libintl.h>
36168404Spjd#include <stdio.h>
37168404Spjd#include <stdlib.h>
38168404Spjd#include <strings.h>
39168404Spjd#include <unistd.h>
40185029Spjd#include <zone.h>
41168404Spjd#include <sys/zfs_ioctl.h>
42168404Spjd#include <sys/zio.h>
43168404Spjd#include <strings.h>
44168404Spjd#include <umem.h>
45168404Spjd
46168404Spjd#include "zfs_namecheck.h"
47168404Spjd#include "zfs_prop.h"
48168404Spjd#include "libzfs_impl.h"
49168404Spjd
50185029Spjdstatic int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51185029Spjd
52168404Spjd/*
53185029Spjd * ====================================================================
54185029Spjd *   zpool property functions
55185029Spjd * ====================================================================
56185029Spjd */
57185029Spjd
58185029Spjdstatic int
59185029Spjdzpool_get_all_props(zpool_handle_t *zhp)
60185029Spjd{
61185029Spjd	zfs_cmd_t zc = { 0 };
62185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
63185029Spjd
64185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
65185029Spjd
66185029Spjd	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
67185029Spjd		return (-1);
68185029Spjd
69185029Spjd	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
70185029Spjd		if (errno == ENOMEM) {
71185029Spjd			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
72185029Spjd				zcmd_free_nvlists(&zc);
73185029Spjd				return (-1);
74185029Spjd			}
75185029Spjd		} else {
76185029Spjd			zcmd_free_nvlists(&zc);
77185029Spjd			return (-1);
78185029Spjd		}
79185029Spjd	}
80185029Spjd
81185029Spjd	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
82185029Spjd		zcmd_free_nvlists(&zc);
83185029Spjd		return (-1);
84185029Spjd	}
85185029Spjd
86185029Spjd	zcmd_free_nvlists(&zc);
87185029Spjd
88185029Spjd	return (0);
89185029Spjd}
90185029Spjd
91185029Spjdstatic int
92185029Spjdzpool_props_refresh(zpool_handle_t *zhp)
93185029Spjd{
94185029Spjd	nvlist_t *old_props;
95185029Spjd
96185029Spjd	old_props = zhp->zpool_props;
97185029Spjd
98185029Spjd	if (zpool_get_all_props(zhp) != 0)
99185029Spjd		return (-1);
100185029Spjd
101185029Spjd	nvlist_free(old_props);
102185029Spjd	return (0);
103185029Spjd}
104185029Spjd
105185029Spjdstatic char *
106185029Spjdzpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
107185029Spjd    zprop_source_t *src)
108185029Spjd{
109185029Spjd	nvlist_t *nv, *nvl;
110185029Spjd	uint64_t ival;
111185029Spjd	char *value;
112185029Spjd	zprop_source_t source;
113185029Spjd
114185029Spjd	nvl = zhp->zpool_props;
115185029Spjd	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
116185029Spjd		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
117185029Spjd		source = ival;
118185029Spjd		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
119185029Spjd	} else {
120185029Spjd		source = ZPROP_SRC_DEFAULT;
121185029Spjd		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
122185029Spjd			value = "-";
123185029Spjd	}
124185029Spjd
125185029Spjd	if (src)
126185029Spjd		*src = source;
127185029Spjd
128185029Spjd	return (value);
129185029Spjd}
130185029Spjd
131185029Spjduint64_t
132185029Spjdzpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
133185029Spjd{
134185029Spjd	nvlist_t *nv, *nvl;
135185029Spjd	uint64_t value;
136185029Spjd	zprop_source_t source;
137185029Spjd
138185029Spjd	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
139185029Spjd		/*
140185029Spjd		 * zpool_get_all_props() has most likely failed because
141185029Spjd		 * the pool is faulted, but if all we need is the top level
142185029Spjd		 * vdev's guid then get it from the zhp config nvlist.
143185029Spjd		 */
144185029Spjd		if ((prop == ZPOOL_PROP_GUID) &&
145185029Spjd		    (nvlist_lookup_nvlist(zhp->zpool_config,
146185029Spjd		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
147185029Spjd		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
148185029Spjd		    == 0)) {
149185029Spjd			return (value);
150185029Spjd		}
151185029Spjd		return (zpool_prop_default_numeric(prop));
152185029Spjd	}
153185029Spjd
154185029Spjd	nvl = zhp->zpool_props;
155185029Spjd	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
156185029Spjd		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
157185029Spjd		source = value;
158185029Spjd		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
159185029Spjd	} else {
160185029Spjd		source = ZPROP_SRC_DEFAULT;
161185029Spjd		value = zpool_prop_default_numeric(prop);
162185029Spjd	}
163185029Spjd
164185029Spjd	if (src)
165185029Spjd		*src = source;
166185029Spjd
167185029Spjd	return (value);
168185029Spjd}
169185029Spjd
170185029Spjd/*
171185029Spjd * Map VDEV STATE to printed strings.
172185029Spjd */
173185029Spjdchar *
174185029Spjdzpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
175185029Spjd{
176185029Spjd	switch (state) {
177185029Spjd	case VDEV_STATE_CLOSED:
178185029Spjd	case VDEV_STATE_OFFLINE:
179185029Spjd		return (gettext("OFFLINE"));
180185029Spjd	case VDEV_STATE_REMOVED:
181185029Spjd		return (gettext("REMOVED"));
182185029Spjd	case VDEV_STATE_CANT_OPEN:
183185029Spjd		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
184185029Spjd			return (gettext("FAULTED"));
185185029Spjd		else
186185029Spjd			return (gettext("UNAVAIL"));
187185029Spjd	case VDEV_STATE_FAULTED:
188185029Spjd		return (gettext("FAULTED"));
189185029Spjd	case VDEV_STATE_DEGRADED:
190185029Spjd		return (gettext("DEGRADED"));
191185029Spjd	case VDEV_STATE_HEALTHY:
192185029Spjd		return (gettext("ONLINE"));
193185029Spjd	}
194185029Spjd
195185029Spjd	return (gettext("UNKNOWN"));
196185029Spjd}
197185029Spjd
198185029Spjd/*
199185029Spjd * Get a zpool property value for 'prop' and return the value in
200185029Spjd * a pre-allocated buffer.
201185029Spjd */
202185029Spjdint
203185029Spjdzpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
204185029Spjd    zprop_source_t *srctype)
205185029Spjd{
206185029Spjd	uint64_t intval;
207185029Spjd	const char *strval;
208185029Spjd	zprop_source_t src = ZPROP_SRC_NONE;
209185029Spjd	nvlist_t *nvroot;
210185029Spjd	vdev_stat_t *vs;
211185029Spjd	uint_t vsc;
212185029Spjd
213185029Spjd	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
214185029Spjd		if (prop == ZPOOL_PROP_NAME)
215185029Spjd			(void) strlcpy(buf, zpool_get_name(zhp), len);
216185029Spjd		else if (prop == ZPOOL_PROP_HEALTH)
217185029Spjd			(void) strlcpy(buf, "FAULTED", len);
218185029Spjd		else
219185029Spjd			(void) strlcpy(buf, "-", len);
220185029Spjd		return (0);
221185029Spjd	}
222185029Spjd
223185029Spjd	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
224185029Spjd	    prop != ZPOOL_PROP_NAME)
225185029Spjd		return (-1);
226185029Spjd
227185029Spjd	switch (zpool_prop_get_type(prop)) {
228185029Spjd	case PROP_TYPE_STRING:
229185029Spjd		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
230185029Spjd		    len);
231185029Spjd		break;
232185029Spjd
233185029Spjd	case PROP_TYPE_NUMBER:
234185029Spjd		intval = zpool_get_prop_int(zhp, prop, &src);
235185029Spjd
236185029Spjd		switch (prop) {
237185029Spjd		case ZPOOL_PROP_SIZE:
238185029Spjd		case ZPOOL_PROP_USED:
239185029Spjd		case ZPOOL_PROP_AVAILABLE:
240185029Spjd			(void) zfs_nicenum(intval, buf, len);
241185029Spjd			break;
242185029Spjd
243185029Spjd		case ZPOOL_PROP_CAPACITY:
244185029Spjd			(void) snprintf(buf, len, "%llu%%",
245185029Spjd			    (u_longlong_t)intval);
246185029Spjd			break;
247185029Spjd
248185029Spjd		case ZPOOL_PROP_HEALTH:
249185029Spjd			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
250185029Spjd			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
251185029Spjd			verify(nvlist_lookup_uint64_array(nvroot,
252185029Spjd			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
253185029Spjd
254185029Spjd			(void) strlcpy(buf, zpool_state_to_name(intval,
255185029Spjd			    vs->vs_aux), len);
256185029Spjd			break;
257185029Spjd		default:
258185029Spjd			(void) snprintf(buf, len, "%llu", intval);
259185029Spjd		}
260185029Spjd		break;
261185029Spjd
262185029Spjd	case PROP_TYPE_INDEX:
263185029Spjd		intval = zpool_get_prop_int(zhp, prop, &src);
264185029Spjd		if (zpool_prop_index_to_string(prop, intval, &strval)
265185029Spjd		    != 0)
266185029Spjd			return (-1);
267185029Spjd		(void) strlcpy(buf, strval, len);
268185029Spjd		break;
269185029Spjd
270185029Spjd	default:
271185029Spjd		abort();
272185029Spjd	}
273185029Spjd
274185029Spjd	if (srctype)
275185029Spjd		*srctype = src;
276185029Spjd
277185029Spjd	return (0);
278185029Spjd}
279185029Spjd
280185029Spjd/*
281185029Spjd * Check if the bootfs name has the same pool name as it is set to.
282185029Spjd * Assuming bootfs is a valid dataset name.
283185029Spjd */
284185029Spjdstatic boolean_t
285185029Spjdbootfs_name_valid(const char *pool, char *bootfs)
286185029Spjd{
287185029Spjd	int len = strlen(pool);
288185029Spjd
289185029Spjd	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
290185029Spjd		return (B_FALSE);
291185029Spjd
292185029Spjd	if (strncmp(pool, bootfs, len) == 0 &&
293185029Spjd	    (bootfs[len] == '/' || bootfs[len] == '\0'))
294185029Spjd		return (B_TRUE);
295185029Spjd
296185029Spjd	return (B_FALSE);
297185029Spjd}
298185029Spjd
299185029Spjd#if defined(sun)
300185029Spjd/*
301185029Spjd * Inspect the configuration to determine if any of the devices contain
302185029Spjd * an EFI label.
303185029Spjd */
304185029Spjdstatic boolean_t
305185029Spjdpool_uses_efi(nvlist_t *config)
306185029Spjd{
307185029Spjd	nvlist_t **child;
308185029Spjd	uint_t c, children;
309185029Spjd
310185029Spjd	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
311185029Spjd	    &child, &children) != 0)
312185029Spjd		return (read_efi_label(config, NULL) >= 0);
313185029Spjd
314185029Spjd	for (c = 0; c < children; c++) {
315185029Spjd		if (pool_uses_efi(child[c]))
316185029Spjd			return (B_TRUE);
317185029Spjd	}
318185029Spjd	return (B_FALSE);
319185029Spjd}
320185029Spjd#endif
321185029Spjd
322185029Spjd/*
323185029Spjd * Given an nvlist of zpool properties to be set, validate that they are
324185029Spjd * correct, and parse any numeric properties (index, boolean, etc) if they are
325185029Spjd * specified as strings.
326185029Spjd */
327185029Spjdstatic nvlist_t *
328185029Spjdzpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
329185029Spjd    nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
330185029Spjd{
331185029Spjd	nvpair_t *elem;
332185029Spjd	nvlist_t *retprops;
333185029Spjd	zpool_prop_t prop;
334185029Spjd	char *strval;
335185029Spjd	uint64_t intval;
336185029Spjd	char *slash;
337185029Spjd	struct stat64 statbuf;
338185029Spjd	zpool_handle_t *zhp;
339185029Spjd	nvlist_t *nvroot;
340185029Spjd
341185029Spjd	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
342185029Spjd		(void) no_memory(hdl);
343185029Spjd		return (NULL);
344185029Spjd	}
345185029Spjd
346185029Spjd	elem = NULL;
347185029Spjd	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
348185029Spjd		const char *propname = nvpair_name(elem);
349185029Spjd
350185029Spjd		/*
351185029Spjd		 * Make sure this property is valid and applies to this type.
352185029Spjd		 */
353185029Spjd		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
354185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
355185029Spjd			    "invalid property '%s'"), propname);
356185029Spjd			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
357185029Spjd			goto error;
358185029Spjd		}
359185029Spjd
360185029Spjd		if (zpool_prop_readonly(prop)) {
361185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
362185029Spjd			    "is readonly"), propname);
363185029Spjd			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
364185029Spjd			goto error;
365185029Spjd		}
366185029Spjd
367185029Spjd		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
368185029Spjd		    &strval, &intval, errbuf) != 0)
369185029Spjd			goto error;
370185029Spjd
371185029Spjd		/*
372185029Spjd		 * Perform additional checking for specific properties.
373185029Spjd		 */
374185029Spjd		switch (prop) {
375185029Spjd		case ZPOOL_PROP_VERSION:
376185029Spjd			if (intval < version || intval > SPA_VERSION) {
377185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
378185029Spjd				    "property '%s' number %d is invalid."),
379185029Spjd				    propname, intval);
380185029Spjd				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
381185029Spjd				goto error;
382185029Spjd			}
383185029Spjd			break;
384185029Spjd
385185029Spjd		case ZPOOL_PROP_BOOTFS:
386185029Spjd			if (create_or_import) {
387185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
388185029Spjd				    "property '%s' cannot be set at creation "
389185029Spjd				    "or import time"), propname);
390185029Spjd				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
391185029Spjd				goto error;
392185029Spjd			}
393185029Spjd
394185029Spjd			if (version < SPA_VERSION_BOOTFS) {
395185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
396185029Spjd				    "pool must be upgraded to support "
397185029Spjd				    "'%s' property"), propname);
398185029Spjd				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
399185029Spjd				goto error;
400185029Spjd			}
401185029Spjd
402185029Spjd			/*
403185029Spjd			 * bootfs property value has to be a dataset name and
404185029Spjd			 * the dataset has to be in the same pool as it sets to.
405185029Spjd			 */
406185029Spjd			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
407185029Spjd			    strval)) {
408185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
409185029Spjd				    "is an invalid name"), strval);
410185029Spjd				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
411185029Spjd				goto error;
412185029Spjd			}
413185029Spjd
414185029Spjd			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
415185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
416185029Spjd				    "could not open pool '%s'"), poolname);
417185029Spjd				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
418185029Spjd				goto error;
419185029Spjd			}
420185029Spjd			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
421185029Spjd			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
422185029Spjd
423185029Spjd#if defined(sun)
424185029Spjd			/*
425185029Spjd			 * bootfs property cannot be set on a disk which has
426185029Spjd			 * been EFI labeled.
427185029Spjd			 */
428185029Spjd			if (pool_uses_efi(nvroot)) {
429185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430185029Spjd				    "property '%s' not supported on "
431185029Spjd				    "EFI labeled devices"), propname);
432185029Spjd				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
433185029Spjd				zpool_close(zhp);
434185029Spjd				goto error;
435185029Spjd			}
436185029Spjd#endif
437185029Spjd			zpool_close(zhp);
438185029Spjd			break;
439185029Spjd
440185029Spjd		case ZPOOL_PROP_ALTROOT:
441185029Spjd			if (!create_or_import) {
442185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
443185029Spjd				    "property '%s' can only be set during pool "
444185029Spjd				    "creation or import"), propname);
445185029Spjd				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
446185029Spjd				goto error;
447185029Spjd			}
448185029Spjd
449185029Spjd			if (strval[0] != '/') {
450185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
451185029Spjd				    "bad alternate root '%s'"), strval);
452185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
453185029Spjd				goto error;
454185029Spjd			}
455185029Spjd			break;
456185029Spjd
457185029Spjd		case ZPOOL_PROP_CACHEFILE:
458185029Spjd			if (strval[0] == '\0')
459185029Spjd				break;
460185029Spjd
461185029Spjd			if (strcmp(strval, "none") == 0)
462185029Spjd				break;
463185029Spjd
464185029Spjd			if (strval[0] != '/') {
465185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
466185029Spjd				    "property '%s' must be empty, an "
467185029Spjd				    "absolute path, or 'none'"), propname);
468185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
469185029Spjd				goto error;
470185029Spjd			}
471185029Spjd
472185029Spjd			slash = strrchr(strval, '/');
473185029Spjd
474185029Spjd			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
475185029Spjd			    strcmp(slash, "/..") == 0) {
476185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
477185029Spjd				    "'%s' is not a valid file"), strval);
478185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
479185029Spjd				goto error;
480185029Spjd			}
481185029Spjd
482185029Spjd			*slash = '\0';
483185029Spjd
484185029Spjd			if (strval[0] != '\0' &&
485185029Spjd			    (stat64(strval, &statbuf) != 0 ||
486185029Spjd			    !S_ISDIR(statbuf.st_mode))) {
487185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488185029Spjd				    "'%s' is not a valid directory"),
489185029Spjd				    strval);
490185029Spjd				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
491185029Spjd				goto error;
492185029Spjd			}
493185029Spjd
494185029Spjd			*slash = '/';
495185029Spjd			break;
496185029Spjd		}
497185029Spjd	}
498185029Spjd
499185029Spjd	return (retprops);
500185029Spjderror:
501185029Spjd	nvlist_free(retprops);
502185029Spjd	return (NULL);
503185029Spjd}
504185029Spjd
505185029Spjd/*
506185029Spjd * Set zpool property : propname=propval.
507185029Spjd */
508185029Spjdint
509185029Spjdzpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
510185029Spjd{
511185029Spjd	zfs_cmd_t zc = { 0 };
512185029Spjd	int ret = -1;
513185029Spjd	char errbuf[1024];
514185029Spjd	nvlist_t *nvl = NULL;
515185029Spjd	nvlist_t *realprops;
516185029Spjd	uint64_t version;
517185029Spjd
518185029Spjd	(void) snprintf(errbuf, sizeof (errbuf),
519185029Spjd	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
520185029Spjd	    zhp->zpool_name);
521185029Spjd
522185029Spjd	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
523185029Spjd		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
524185029Spjd
525185029Spjd	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
526185029Spjd		return (no_memory(zhp->zpool_hdl));
527185029Spjd
528185029Spjd	if (nvlist_add_string(nvl, propname, propval) != 0) {
529185029Spjd		nvlist_free(nvl);
530185029Spjd		return (no_memory(zhp->zpool_hdl));
531185029Spjd	}
532185029Spjd
533185029Spjd	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
534185029Spjd	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
535185029Spjd	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
536185029Spjd		nvlist_free(nvl);
537185029Spjd		return (-1);
538185029Spjd	}
539185029Spjd
540185029Spjd	nvlist_free(nvl);
541185029Spjd	nvl = realprops;
542185029Spjd
543185029Spjd	/*
544185029Spjd	 * Execute the corresponding ioctl() to set this property.
545185029Spjd	 */
546185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
547185029Spjd
548185029Spjd	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
549185029Spjd		nvlist_free(nvl);
550185029Spjd		return (-1);
551185029Spjd	}
552185029Spjd
553185029Spjd	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
554185029Spjd
555185029Spjd	zcmd_free_nvlists(&zc);
556185029Spjd	nvlist_free(nvl);
557185029Spjd
558185029Spjd	if (ret)
559185029Spjd		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
560185029Spjd	else
561185029Spjd		(void) zpool_props_refresh(zhp);
562185029Spjd
563185029Spjd	return (ret);
564185029Spjd}
565185029Spjd
566185029Spjdint
567185029Spjdzpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
568185029Spjd{
569185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
570185029Spjd	zprop_list_t *entry;
571185029Spjd	char buf[ZFS_MAXPROPLEN];
572185029Spjd
573185029Spjd	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
574185029Spjd		return (-1);
575185029Spjd
576185029Spjd	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
577185029Spjd
578185029Spjd		if (entry->pl_fixed)
579185029Spjd			continue;
580185029Spjd
581185029Spjd		if (entry->pl_prop != ZPROP_INVAL &&
582185029Spjd		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
583185029Spjd		    NULL) == 0) {
584185029Spjd			if (strlen(buf) > entry->pl_width)
585185029Spjd				entry->pl_width = strlen(buf);
586185029Spjd		}
587185029Spjd	}
588185029Spjd
589185029Spjd	return (0);
590185029Spjd}
591185029Spjd
592185029Spjd
593185029Spjd/*
594168404Spjd * Validate the given pool name, optionally putting an extended error message in
595168404Spjd * 'buf'.
596168404Spjd */
597185029Spjdboolean_t
598168404Spjdzpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
599168404Spjd{
600168404Spjd	namecheck_err_t why;
601168404Spjd	char what;
602168404Spjd	int ret;
603168404Spjd
604168404Spjd	ret = pool_namecheck(pool, &why, &what);
605168404Spjd
606168404Spjd	/*
607168404Spjd	 * The rules for reserved pool names were extended at a later point.
608168404Spjd	 * But we need to support users with existing pools that may now be
609168404Spjd	 * invalid.  So we only check for this expanded set of names during a
610168404Spjd	 * create (or import), and only in userland.
611168404Spjd	 */
612168404Spjd	if (ret == 0 && !isopen &&
613168404Spjd	    (strncmp(pool, "mirror", 6) == 0 ||
614168404Spjd	    strncmp(pool, "raidz", 5) == 0 ||
615185029Spjd	    strncmp(pool, "spare", 5) == 0 ||
616185029Spjd	    strcmp(pool, "log") == 0)) {
617185029Spjd		if (hdl != NULL)
618185029Spjd			zfs_error_aux(hdl,
619185029Spjd			    dgettext(TEXT_DOMAIN, "name is reserved"));
620168404Spjd		return (B_FALSE);
621168404Spjd	}
622168404Spjd
623168404Spjd
624168404Spjd	if (ret != 0) {
625168404Spjd		if (hdl != NULL) {
626168404Spjd			switch (why) {
627168404Spjd			case NAME_ERR_TOOLONG:
628168404Spjd				zfs_error_aux(hdl,
629168404Spjd				    dgettext(TEXT_DOMAIN, "name is too long"));
630168404Spjd				break;
631168404Spjd
632168404Spjd			case NAME_ERR_INVALCHAR:
633168404Spjd				zfs_error_aux(hdl,
634168404Spjd				    dgettext(TEXT_DOMAIN, "invalid character "
635168404Spjd				    "'%c' in pool name"), what);
636168404Spjd				break;
637168404Spjd
638168404Spjd			case NAME_ERR_NOLETTER:
639168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
640168404Spjd				    "name must begin with a letter"));
641168404Spjd				break;
642168404Spjd
643168404Spjd			case NAME_ERR_RESERVED:
644168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
645168404Spjd				    "name is reserved"));
646168404Spjd				break;
647168404Spjd
648168404Spjd			case NAME_ERR_DISKLIKE:
649168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
650168404Spjd				    "pool name is reserved"));
651168404Spjd				break;
652168404Spjd
653168404Spjd			case NAME_ERR_LEADING_SLASH:
654168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
655168404Spjd				    "leading slash in name"));
656168404Spjd				break;
657168404Spjd
658168404Spjd			case NAME_ERR_EMPTY_COMPONENT:
659168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
660168404Spjd				    "empty component in name"));
661168404Spjd				break;
662168404Spjd
663168404Spjd			case NAME_ERR_TRAILING_SLASH:
664168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
665168404Spjd				    "trailing slash in name"));
666168404Spjd				break;
667168404Spjd
668168404Spjd			case NAME_ERR_MULTIPLE_AT:
669168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
670168404Spjd				    "multiple '@' delimiters in name"));
671168404Spjd				break;
672168404Spjd
673168404Spjd			}
674168404Spjd		}
675168404Spjd		return (B_FALSE);
676168404Spjd	}
677168404Spjd
678168404Spjd	return (B_TRUE);
679168404Spjd}
680168404Spjd
681168404Spjd/*
682168404Spjd * Open a handle to the given pool, even if the pool is currently in the FAULTED
683168404Spjd * state.
684168404Spjd */
685168404Spjdzpool_handle_t *
686168404Spjdzpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
687168404Spjd{
688168404Spjd	zpool_handle_t *zhp;
689168404Spjd	boolean_t missing;
690168404Spjd
691168404Spjd	/*
692168404Spjd	 * Make sure the pool name is valid.
693168404Spjd	 */
694168404Spjd	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
695168404Spjd		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
696168404Spjd		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
697168404Spjd		    pool);
698168404Spjd		return (NULL);
699168404Spjd	}
700168404Spjd
701168404Spjd	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
702168404Spjd		return (NULL);
703168404Spjd
704168404Spjd	zhp->zpool_hdl = hdl;
705168404Spjd	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
706168404Spjd
707168404Spjd	if (zpool_refresh_stats(zhp, &missing) != 0) {
708168404Spjd		zpool_close(zhp);
709168404Spjd		return (NULL);
710168404Spjd	}
711168404Spjd
712168404Spjd	if (missing) {
713185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
714168404Spjd		(void) zfs_error_fmt(hdl, EZFS_NOENT,
715185029Spjd		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
716168404Spjd		zpool_close(zhp);
717168404Spjd		return (NULL);
718168404Spjd	}
719168404Spjd
720168404Spjd	return (zhp);
721168404Spjd}
722168404Spjd
723168404Spjd/*
724168404Spjd * Like the above, but silent on error.  Used when iterating over pools (because
725168404Spjd * the configuration cache may be out of date).
726168404Spjd */
727168404Spjdint
728168404Spjdzpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
729168404Spjd{
730168404Spjd	zpool_handle_t *zhp;
731168404Spjd	boolean_t missing;
732168404Spjd
733168404Spjd	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
734168404Spjd		return (-1);
735168404Spjd
736168404Spjd	zhp->zpool_hdl = hdl;
737168404Spjd	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
738168404Spjd
739168404Spjd	if (zpool_refresh_stats(zhp, &missing) != 0) {
740168404Spjd		zpool_close(zhp);
741168404Spjd		return (-1);
742168404Spjd	}
743168404Spjd
744168404Spjd	if (missing) {
745168404Spjd		zpool_close(zhp);
746168404Spjd		*ret = NULL;
747168404Spjd		return (0);
748168404Spjd	}
749168404Spjd
750168404Spjd	*ret = zhp;
751168404Spjd	return (0);
752168404Spjd}
753168404Spjd
754168404Spjd/*
755168404Spjd * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
756168404Spjd * state.
757168404Spjd */
758168404Spjdzpool_handle_t *
759168404Spjdzpool_open(libzfs_handle_t *hdl, const char *pool)
760168404Spjd{
761168404Spjd	zpool_handle_t *zhp;
762168404Spjd
763168404Spjd	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
764168404Spjd		return (NULL);
765168404Spjd
766168404Spjd	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
767168404Spjd		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
768168404Spjd		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
769168404Spjd		zpool_close(zhp);
770168404Spjd		return (NULL);
771168404Spjd	}
772168404Spjd
773168404Spjd	return (zhp);
774168404Spjd}
775168404Spjd
776168404Spjd/*
777168404Spjd * Close the handle.  Simply frees the memory associated with the handle.
778168404Spjd */
779168404Spjdvoid
780168404Spjdzpool_close(zpool_handle_t *zhp)
781168404Spjd{
782168404Spjd	if (zhp->zpool_config)
783168404Spjd		nvlist_free(zhp->zpool_config);
784168404Spjd	if (zhp->zpool_old_config)
785168404Spjd		nvlist_free(zhp->zpool_old_config);
786168404Spjd	if (zhp->zpool_props)
787168404Spjd		nvlist_free(zhp->zpool_props);
788168404Spjd	free(zhp);
789168404Spjd}
790168404Spjd
791168404Spjd/*
792168404Spjd * Return the name of the pool.
793168404Spjd */
794168404Spjdconst char *
795168404Spjdzpool_get_name(zpool_handle_t *zhp)
796168404Spjd{
797168404Spjd	return (zhp->zpool_name);
798168404Spjd}
799168404Spjd
800168404Spjd
801168404Spjd/*
802168404Spjd * Return the state of the pool (ACTIVE or UNAVAILABLE)
803168404Spjd */
804168404Spjdint
805168404Spjdzpool_get_state(zpool_handle_t *zhp)
806168404Spjd{
807168404Spjd	return (zhp->zpool_state);
808168404Spjd}
809168404Spjd
810168404Spjd/*
811168404Spjd * Create the named pool, using the provided vdev list.  It is assumed
812168404Spjd * that the consumer has already validated the contents of the nvlist, so we
813168404Spjd * don't have to worry about error semantics.
814168404Spjd */
815168404Spjdint
816168404Spjdzpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
817185029Spjd    nvlist_t *props, nvlist_t *fsprops)
818168404Spjd{
819168404Spjd	zfs_cmd_t zc = { 0 };
820185029Spjd	nvlist_t *zc_fsprops = NULL;
821185029Spjd	nvlist_t *zc_props = NULL;
822168404Spjd	char msg[1024];
823185029Spjd	char *altroot;
824185029Spjd	int ret = -1;
825168404Spjd
826168404Spjd	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
827168404Spjd	    "cannot create '%s'"), pool);
828168404Spjd
829168404Spjd	if (!zpool_name_valid(hdl, B_FALSE, pool))
830168404Spjd		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
831168404Spjd
832185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
833168404Spjd		return (-1);
834168404Spjd
835185029Spjd	if (props) {
836185029Spjd		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
837185029Spjd		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
838185029Spjd			goto create_failed;
839185029Spjd		}
840185029Spjd	}
841185029Spjd
842185029Spjd	if (fsprops) {
843185029Spjd		uint64_t zoned;
844185029Spjd		char *zonestr;
845185029Spjd
846185029Spjd		zoned = ((nvlist_lookup_string(fsprops,
847185029Spjd		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
848185029Spjd		    strcmp(zonestr, "on") == 0);
849185029Spjd
850185029Spjd		if ((zc_fsprops = zfs_valid_proplist(hdl,
851185029Spjd		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
852185029Spjd			goto create_failed;
853185029Spjd		}
854185029Spjd		if (!zc_props &&
855185029Spjd		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
856185029Spjd			goto create_failed;
857185029Spjd		}
858185029Spjd		if (nvlist_add_nvlist(zc_props,
859185029Spjd		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
860185029Spjd			goto create_failed;
861185029Spjd		}
862185029Spjd	}
863185029Spjd
864185029Spjd	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
865185029Spjd		goto create_failed;
866185029Spjd
867168404Spjd	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
868168404Spjd
869185029Spjd	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
870168404Spjd
871168404Spjd		zcmd_free_nvlists(&zc);
872185029Spjd		nvlist_free(zc_props);
873185029Spjd		nvlist_free(zc_fsprops);
874168404Spjd
875168404Spjd		switch (errno) {
876168404Spjd		case EBUSY:
877168404Spjd			/*
878168404Spjd			 * This can happen if the user has specified the same
879168404Spjd			 * device multiple times.  We can't reliably detect this
880168404Spjd			 * until we try to add it and see we already have a
881168404Spjd			 * label.
882168404Spjd			 */
883168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
884168404Spjd			    "one or more vdevs refer to the same device"));
885168404Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
886168404Spjd
887168404Spjd		case EOVERFLOW:
888168404Spjd			/*
889168404Spjd			 * This occurs when one of the devices is below
890168404Spjd			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
891168404Spjd			 * device was the problem device since there's no
892168404Spjd			 * reliable way to determine device size from userland.
893168404Spjd			 */
894168404Spjd			{
895168404Spjd				char buf[64];
896168404Spjd
897168404Spjd				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
898168404Spjd
899168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
900168404Spjd				    "one or more devices is less than the "
901168404Spjd				    "minimum size (%s)"), buf);
902168404Spjd			}
903168404Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
904168404Spjd
905168404Spjd		case ENOSPC:
906168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
907168404Spjd			    "one or more devices is out of space"));
908168404Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
909168404Spjd
910185029Spjd		case ENOTBLK:
911185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
912185029Spjd			    "cache device must be a disk or disk slice"));
913185029Spjd			return (zfs_error(hdl, EZFS_BADDEV, msg));
914185029Spjd
915168404Spjd		default:
916168404Spjd			return (zpool_standard_error(hdl, errno, msg));
917168404Spjd		}
918168404Spjd	}
919168404Spjd
920168404Spjd	/*
921168404Spjd	 * If this is an alternate root pool, then we automatically set the
922168404Spjd	 * mountpoint of the root dataset to be '/'.
923168404Spjd	 */
924185029Spjd	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
925185029Spjd	    &altroot) == 0) {
926168404Spjd		zfs_handle_t *zhp;
927168404Spjd
928185029Spjd		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
929168404Spjd		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
930168404Spjd		    "/") == 0);
931168404Spjd
932168404Spjd		zfs_close(zhp);
933168404Spjd	}
934168404Spjd
935185029Spjdcreate_failed:
936185029Spjd	zcmd_free_nvlists(&zc);
937185029Spjd	nvlist_free(zc_props);
938185029Spjd	nvlist_free(zc_fsprops);
939185029Spjd	return (ret);
940168404Spjd}
941168404Spjd
942168404Spjd/*
943168404Spjd * Destroy the given pool.  It is up to the caller to ensure that there are no
944168404Spjd * datasets left in the pool.
945168404Spjd */
946168404Spjdint
947168404Spjdzpool_destroy(zpool_handle_t *zhp)
948168404Spjd{
949168404Spjd	zfs_cmd_t zc = { 0 };
950168404Spjd	zfs_handle_t *zfp = NULL;
951168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
952168404Spjd	char msg[1024];
953168404Spjd
954168404Spjd	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
955168404Spjd	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
956168404Spjd	    ZFS_TYPE_FILESYSTEM)) == NULL)
957168404Spjd		return (-1);
958168404Spjd
959168404Spjd	if (zpool_remove_zvol_links(zhp) != 0)
960168404Spjd		return (-1);
961168404Spjd
962168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
963168404Spjd
964185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
965168404Spjd		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
966168404Spjd		    "cannot destroy '%s'"), zhp->zpool_name);
967168404Spjd
968168404Spjd		if (errno == EROFS) {
969168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970168404Spjd			    "one or more devices is read only"));
971168404Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
972168404Spjd		} else {
973168404Spjd			(void) zpool_standard_error(hdl, errno, msg);
974168404Spjd		}
975168404Spjd
976168404Spjd		if (zfp)
977168404Spjd			zfs_close(zfp);
978168404Spjd		return (-1);
979168404Spjd	}
980168404Spjd
981168404Spjd	if (zfp) {
982168404Spjd		remove_mountpoint(zfp);
983168404Spjd		zfs_close(zfp);
984168404Spjd	}
985168404Spjd
986168404Spjd	return (0);
987168404Spjd}
988168404Spjd
989168404Spjd/*
990168404Spjd * Add the given vdevs to the pool.  The caller must have already performed the
991168404Spjd * necessary verification to ensure that the vdev specification is well-formed.
992168404Spjd */
993168404Spjdint
994168404Spjdzpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
995168404Spjd{
996168404Spjd	zfs_cmd_t zc = { 0 };
997168404Spjd	int ret;
998168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
999168404Spjd	char msg[1024];
1000185029Spjd	nvlist_t **spares, **l2cache;
1001185029Spjd	uint_t nspares, nl2cache;
1002168404Spjd
1003168404Spjd	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1004168404Spjd	    "cannot add to '%s'"), zhp->zpool_name);
1005168404Spjd
1006185029Spjd	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1007185029Spjd	    SPA_VERSION_SPARES &&
1008168404Spjd	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1009168404Spjd	    &spares, &nspares) == 0) {
1010168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1011168404Spjd		    "upgraded to add hot spares"));
1012168404Spjd		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1013168404Spjd	}
1014168404Spjd
1015185029Spjd	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1016185029Spjd	    SPA_VERSION_L2CACHE &&
1017185029Spjd	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1018185029Spjd	    &l2cache, &nl2cache) == 0) {
1019185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1020185029Spjd		    "upgraded to add cache devices"));
1021185029Spjd		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1022185029Spjd	}
1023185029Spjd
1024185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1025168404Spjd		return (-1);
1026168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1027168404Spjd
1028185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1029168404Spjd		switch (errno) {
1030168404Spjd		case EBUSY:
1031168404Spjd			/*
1032168404Spjd			 * This can happen if the user has specified the same
1033168404Spjd			 * device multiple times.  We can't reliably detect this
1034168404Spjd			 * until we try to add it and see we already have a
1035168404Spjd			 * label.
1036168404Spjd			 */
1037168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1038168404Spjd			    "one or more vdevs refer to the same device"));
1039168404Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1040168404Spjd			break;
1041168404Spjd
1042168404Spjd		case EOVERFLOW:
1043168404Spjd			/*
1044168404Spjd			 * This occurrs when one of the devices is below
1045168404Spjd			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1046168404Spjd			 * device was the problem device since there's no
1047168404Spjd			 * reliable way to determine device size from userland.
1048168404Spjd			 */
1049168404Spjd			{
1050168404Spjd				char buf[64];
1051168404Spjd
1052168404Spjd				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1053168404Spjd
1054168404Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1055168404Spjd				    "device is less than the minimum "
1056168404Spjd				    "size (%s)"), buf);
1057168404Spjd			}
1058168404Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1059168404Spjd			break;
1060168404Spjd
1061168404Spjd		case ENOTSUP:
1062168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1063185029Spjd			    "pool must be upgraded to add these vdevs"));
1064168404Spjd			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1065168404Spjd			break;
1066168404Spjd
1067168404Spjd		case EDOM:
1068168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1069185029Spjd			    "root pool can not have multiple vdevs"
1070185029Spjd			    " or separate logs"));
1071168404Spjd			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1072168404Spjd			break;
1073168404Spjd
1074185029Spjd		case ENOTBLK:
1075185029Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1076185029Spjd			    "cache device must be a disk or disk slice"));
1077185029Spjd			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1078185029Spjd			break;
1079185029Spjd
1080168404Spjd		default:
1081168404Spjd			(void) zpool_standard_error(hdl, errno, msg);
1082168404Spjd		}
1083168404Spjd
1084168404Spjd		ret = -1;
1085168404Spjd	} else {
1086168404Spjd		ret = 0;
1087168404Spjd	}
1088168404Spjd
1089168404Spjd	zcmd_free_nvlists(&zc);
1090168404Spjd
1091168404Spjd	return (ret);
1092168404Spjd}
1093168404Spjd
1094168404Spjd/*
1095168404Spjd * Exports the pool from the system.  The caller must ensure that there are no
1096168404Spjd * mounted datasets in the pool.
1097168404Spjd */
1098168404Spjdint
1099207670Smmzpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1100168404Spjd{
1101168404Spjd	zfs_cmd_t zc = { 0 };
1102185029Spjd	char msg[1024];
1103168404Spjd
1104168404Spjd	if (zpool_remove_zvol_links(zhp) != 0)
1105168404Spjd		return (-1);
1106168404Spjd
1107185029Spjd	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1108185029Spjd	    "cannot export '%s'"), zhp->zpool_name);
1109185029Spjd
1110168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1111185029Spjd	zc.zc_cookie = force;
1112207670Smm	zc.zc_guid = hardforce;
1113168404Spjd
1114185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1115185029Spjd		switch (errno) {
1116185029Spjd		case EXDEV:
1117185029Spjd			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1118185029Spjd			    "use '-f' to override the following errors:\n"
1119185029Spjd			    "'%s' has an active shared spare which could be"
1120185029Spjd			    " used by other pools once '%s' is exported."),
1121185029Spjd			    zhp->zpool_name, zhp->zpool_name);
1122185029Spjd			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1123185029Spjd			    msg));
1124185029Spjd		default:
1125185029Spjd			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1126185029Spjd			    msg));
1127185029Spjd		}
1128185029Spjd	}
1129185029Spjd
1130168404Spjd	return (0);
1131168404Spjd}
1132168404Spjd
1133207670Smmint
1134207670Smmzpool_export(zpool_handle_t *zhp, boolean_t force)
1135207670Smm{
1136207670Smm	return (zpool_export_common(zhp, force, B_FALSE));
1137207670Smm}
1138207670Smm
1139207670Smmint
1140207670Smmzpool_export_force(zpool_handle_t *zhp)
1141207670Smm{
1142207670Smm	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1143207670Smm}
1144207670Smm
1145168404Spjd/*
1146185029Spjd * zpool_import() is a contracted interface. Should be kept the same
1147185029Spjd * if possible.
1148185029Spjd *
1149185029Spjd * Applications should use zpool_import_props() to import a pool with
1150185029Spjd * new properties value to be set.
1151168404Spjd */
1152168404Spjdint
1153168404Spjdzpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1154185029Spjd    char *altroot)
1155168404Spjd{
1156185029Spjd	nvlist_t *props = NULL;
1157185029Spjd	int ret;
1158185029Spjd
1159185029Spjd	if (altroot != NULL) {
1160185029Spjd		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1161185029Spjd			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1162185029Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1163185029Spjd			    newname));
1164185029Spjd		}
1165185029Spjd
1166185029Spjd		if (nvlist_add_string(props,
1167185029Spjd		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
1168185029Spjd			nvlist_free(props);
1169185029Spjd			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1170185029Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1171185029Spjd			    newname));
1172185029Spjd		}
1173185029Spjd	}
1174185029Spjd
1175185029Spjd	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1176185029Spjd	if (props)
1177185029Spjd		nvlist_free(props);
1178185029Spjd	return (ret);
1179185029Spjd}
1180185029Spjd
1181185029Spjd/*
1182185029Spjd * Import the given pool using the known configuration and a list of
1183185029Spjd * properties to be set. The configuration should have come from
1184185029Spjd * zpool_find_import(). The 'newname' parameters control whether the pool
1185185029Spjd * is imported with a different name.
1186185029Spjd */
1187185029Spjdint
1188185029Spjdzpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1189185029Spjd    nvlist_t *props, boolean_t importfaulted)
1190185029Spjd{
1191168404Spjd	zfs_cmd_t zc = { 0 };
1192168404Spjd	char *thename;
1193168404Spjd	char *origname;
1194168404Spjd	int ret;
1195185029Spjd	char errbuf[1024];
1196168404Spjd
1197168404Spjd	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1198168404Spjd	    &origname) == 0);
1199168404Spjd
1200185029Spjd	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1201185029Spjd	    "cannot import pool '%s'"), origname);
1202185029Spjd
1203168404Spjd	if (newname != NULL) {
1204168404Spjd		if (!zpool_name_valid(hdl, B_FALSE, newname))
1205168404Spjd			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1206168404Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1207168404Spjd			    newname));
1208168404Spjd		thename = (char *)newname;
1209168404Spjd	} else {
1210168404Spjd		thename = origname;
1211168404Spjd	}
1212168404Spjd
1213185029Spjd	if (props) {
1214185029Spjd		uint64_t version;
1215168404Spjd
1216185029Spjd		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1217185029Spjd		    &version) == 0);
1218185029Spjd
1219185029Spjd		if ((props = zpool_valid_proplist(hdl, origname,
1220185029Spjd		    props, version, B_TRUE, errbuf)) == NULL) {
1221185029Spjd			return (-1);
1222185029Spjd		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1223185029Spjd			nvlist_free(props);
1224185029Spjd			return (-1);
1225185029Spjd		}
1226185029Spjd	}
1227185029Spjd
1228168404Spjd	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1229168404Spjd
1230168404Spjd	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1231168404Spjd	    &zc.zc_guid) == 0);
1232168404Spjd
1233185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1234185029Spjd		nvlist_free(props);
1235168404Spjd		return (-1);
1236185029Spjd	}
1237168404Spjd
1238185029Spjd	zc.zc_cookie = (uint64_t)importfaulted;
1239168404Spjd	ret = 0;
1240185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1241168404Spjd		char desc[1024];
1242168404Spjd		if (newname == NULL)
1243168404Spjd			(void) snprintf(desc, sizeof (desc),
1244168404Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1245168404Spjd			    thename);
1246168404Spjd		else
1247168404Spjd			(void) snprintf(desc, sizeof (desc),
1248168404Spjd			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1249168404Spjd			    origname, thename);
1250168404Spjd
1251168404Spjd		switch (errno) {
1252168404Spjd		case ENOTSUP:
1253168404Spjd			/*
1254168404Spjd			 * Unsupported version.
1255168404Spjd			 */
1256168404Spjd			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1257168404Spjd			break;
1258168404Spjd
1259168404Spjd		case EINVAL:
1260168404Spjd			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1261168404Spjd			break;
1262168404Spjd
1263168404Spjd		default:
1264168404Spjd			(void) zpool_standard_error(hdl, errno, desc);
1265168404Spjd		}
1266168404Spjd
1267168404Spjd		ret = -1;
1268168404Spjd	} else {
1269168404Spjd		zpool_handle_t *zhp;
1270185029Spjd
1271168404Spjd		/*
1272168404Spjd		 * This should never fail, but play it safe anyway.
1273168404Spjd		 */
1274168404Spjd		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1275168404Spjd			ret = -1;
1276168404Spjd		} else if (zhp != NULL) {
1277168404Spjd			ret = zpool_create_zvol_links(zhp);
1278168404Spjd			zpool_close(zhp);
1279168404Spjd		}
1280185029Spjd
1281168404Spjd	}
1282168404Spjd
1283168404Spjd	zcmd_free_nvlists(&zc);
1284185029Spjd	nvlist_free(props);
1285185029Spjd
1286168404Spjd	return (ret);
1287168404Spjd}
1288168404Spjd
1289168404Spjd/*
1290168404Spjd * Scrub the pool.
1291168404Spjd */
1292168404Spjdint
1293168404Spjdzpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1294168404Spjd{
1295168404Spjd	zfs_cmd_t zc = { 0 };
1296168404Spjd	char msg[1024];
1297168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1298168404Spjd
1299168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1300168404Spjd	zc.zc_cookie = type;
1301168404Spjd
1302185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1303168404Spjd		return (0);
1304168404Spjd
1305168404Spjd	(void) snprintf(msg, sizeof (msg),
1306168404Spjd	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1307168404Spjd
1308168404Spjd	if (errno == EBUSY)
1309168404Spjd		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1310168404Spjd	else
1311168404Spjd		return (zpool_standard_error(hdl, errno, msg));
1312168404Spjd}
1313168404Spjd
1314168404Spjd/*
1315168404Spjd * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1316168404Spjd * spare; but FALSE if its an INUSE spare.
1317168404Spjd */
1318168404Spjdstatic nvlist_t *
1319168404Spjdvdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1320185029Spjd    boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1321168404Spjd{
1322168404Spjd	uint_t c, children;
1323168404Spjd	nvlist_t **child;
1324168404Spjd	uint64_t theguid, present;
1325168404Spjd	char *path;
1326168404Spjd	uint64_t wholedisk = 0;
1327168404Spjd	nvlist_t *ret;
1328185029Spjd	uint64_t is_log;
1329168404Spjd
1330168404Spjd	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1331168404Spjd
1332168404Spjd	if (search == NULL &&
1333168404Spjd	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1334168404Spjd		/*
1335168404Spjd		 * If the device has never been present since import, the only
1336168404Spjd		 * reliable way to match the vdev is by GUID.
1337168404Spjd		 */
1338168404Spjd		if (theguid == guid)
1339168404Spjd			return (nv);
1340168404Spjd	} else if (search != NULL &&
1341168404Spjd	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1342168404Spjd		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1343168404Spjd		    &wholedisk);
1344168404Spjd		if (wholedisk) {
1345168404Spjd			/*
1346168404Spjd			 * For whole disks, the internal path has 's0', but the
1347168404Spjd			 * path passed in by the user doesn't.
1348168404Spjd			 */
1349168404Spjd			if (strlen(search) == strlen(path) - 2 &&
1350168404Spjd			    strncmp(search, path, strlen(search)) == 0)
1351168404Spjd				return (nv);
1352168404Spjd		} else if (strcmp(search, path) == 0) {
1353168404Spjd			return (nv);
1354168404Spjd		}
1355168404Spjd	}
1356168404Spjd
1357168404Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1358168404Spjd	    &child, &children) != 0)
1359168404Spjd		return (NULL);
1360168404Spjd
1361185029Spjd	for (c = 0; c < children; c++) {
1362168404Spjd		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1363185029Spjd		    avail_spare, l2cache, NULL)) != NULL) {
1364185029Spjd			/*
1365185029Spjd			 * The 'is_log' value is only set for the toplevel
1366185029Spjd			 * vdev, not the leaf vdevs.  So we always lookup the
1367185029Spjd			 * log device from the root of the vdev tree (where
1368185029Spjd			 * 'log' is non-NULL).
1369185029Spjd			 */
1370185029Spjd			if (log != NULL &&
1371185029Spjd			    nvlist_lookup_uint64(child[c],
1372185029Spjd			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1373185029Spjd			    is_log) {
1374185029Spjd				*log = B_TRUE;
1375185029Spjd			}
1376168404Spjd			return (ret);
1377185029Spjd		}
1378185029Spjd	}
1379168404Spjd
1380168404Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1381168404Spjd	    &child, &children) == 0) {
1382168404Spjd		for (c = 0; c < children; c++) {
1383168404Spjd			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1384185029Spjd			    avail_spare, l2cache, NULL)) != NULL) {
1385168404Spjd				*avail_spare = B_TRUE;
1386168404Spjd				return (ret);
1387168404Spjd			}
1388168404Spjd		}
1389168404Spjd	}
1390168404Spjd
1391185029Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1392185029Spjd	    &child, &children) == 0) {
1393185029Spjd		for (c = 0; c < children; c++) {
1394185029Spjd			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1395185029Spjd			    avail_spare, l2cache, NULL)) != NULL) {
1396185029Spjd				*l2cache = B_TRUE;
1397185029Spjd				return (ret);
1398185029Spjd			}
1399185029Spjd		}
1400185029Spjd	}
1401185029Spjd
1402168404Spjd	return (NULL);
1403168404Spjd}
1404168404Spjd
1405168404Spjdnvlist_t *
1406185029Spjdzpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1407185029Spjd    boolean_t *l2cache, boolean_t *log)
1408168404Spjd{
1409168404Spjd	char buf[MAXPATHLEN];
1410168404Spjd	const char *search;
1411168404Spjd	char *end;
1412168404Spjd	nvlist_t *nvroot;
1413168404Spjd	uint64_t guid;
1414168404Spjd
1415168404Spjd	guid = strtoull(path, &end, 10);
1416168404Spjd	if (guid != 0 && *end == '\0') {
1417168404Spjd		search = NULL;
1418168404Spjd	} else if (path[0] != '/') {
1419168404Spjd		(void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
1420168404Spjd		search = buf;
1421168404Spjd	} else {
1422168404Spjd		search = path;
1423168404Spjd	}
1424168404Spjd
1425168404Spjd	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1426168404Spjd	    &nvroot) == 0);
1427168404Spjd
1428168404Spjd	*avail_spare = B_FALSE;
1429185029Spjd	*l2cache = B_FALSE;
1430185029Spjd	if (log != NULL)
1431185029Spjd		*log = B_FALSE;
1432185029Spjd	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1433185029Spjd	    l2cache, log));
1434168404Spjd}
1435168404Spjd
1436185029Spjdstatic int
1437185029Spjdvdev_online(nvlist_t *nv)
1438185029Spjd{
1439185029Spjd	uint64_t ival;
1440185029Spjd
1441185029Spjd	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1442185029Spjd	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1443185029Spjd	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1444185029Spjd		return (0);
1445185029Spjd
1446185029Spjd	return (1);
1447185029Spjd}
1448185029Spjd
1449168404Spjd/*
1450185029Spjd * Get phys_path for a root pool
1451185029Spjd * Return 0 on success; non-zeron on failure.
1452168404Spjd */
1453185029Spjdint
1454185029Spjdzpool_get_physpath(zpool_handle_t *zhp, char *physpath)
1455185029Spjd{
1456185029Spjd	char bootfs[ZPOOL_MAXNAMELEN];
1457185029Spjd	nvlist_t *vdev_root;
1458185029Spjd	nvlist_t **child;
1459185029Spjd	uint_t count;
1460185029Spjd	int i;
1461185029Spjd
1462185029Spjd	/*
1463185029Spjd	 * Make sure this is a root pool, as phys_path doesn't mean
1464185029Spjd	 * anything to a non-root pool.
1465185029Spjd	 */
1466185029Spjd	if (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
1467185029Spjd	    sizeof (bootfs), NULL) != 0)
1468185029Spjd		return (-1);
1469185029Spjd
1470185029Spjd	verify(nvlist_lookup_nvlist(zhp->zpool_config,
1471185029Spjd	    ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
1472185029Spjd
1473185029Spjd	if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1474185029Spjd	    &child, &count) != 0)
1475185029Spjd		return (-2);
1476185029Spjd
1477185029Spjd	for (i = 0; i < count; i++) {
1478185029Spjd		nvlist_t **child2;
1479185029Spjd		uint_t count2;
1480185029Spjd		char *type;
1481185029Spjd		char *tmppath;
1482185029Spjd		int j;
1483185029Spjd
1484185029Spjd		if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
1485185029Spjd		    != 0)
1486185029Spjd			return (-3);
1487185029Spjd
1488185029Spjd		if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1489185029Spjd			if (!vdev_online(child[i]))
1490185029Spjd				return (-8);
1491185029Spjd			verify(nvlist_lookup_string(child[i],
1492185029Spjd			    ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
1493185029Spjd			(void) strncpy(physpath, tmppath, strlen(tmppath));
1494185029Spjd		} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
1495185029Spjd			if (nvlist_lookup_nvlist_array(child[i],
1496185029Spjd			    ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
1497185029Spjd				return (-4);
1498185029Spjd
1499185029Spjd			for (j = 0; j < count2; j++) {
1500185029Spjd				if (!vdev_online(child2[j]))
1501185029Spjd					return (-8);
1502185029Spjd				if (nvlist_lookup_string(child2[j],
1503185029Spjd				    ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
1504185029Spjd					return (-5);
1505185029Spjd
1506185029Spjd				if ((strlen(physpath) + strlen(tmppath)) >
1507185029Spjd				    MAXNAMELEN)
1508185029Spjd					return (-6);
1509185029Spjd
1510185029Spjd				if (strlen(physpath) == 0) {
1511185029Spjd					(void) strncpy(physpath, tmppath,
1512185029Spjd					    strlen(tmppath));
1513185029Spjd				} else {
1514185029Spjd					(void) strcat(physpath, " ");
1515185029Spjd					(void) strcat(physpath, tmppath);
1516185029Spjd				}
1517185029Spjd			}
1518185029Spjd		} else {
1519185029Spjd			return (-7);
1520185029Spjd		}
1521185029Spjd	}
1522185029Spjd
1523185029Spjd	return (0);
1524185029Spjd}
1525185029Spjd
1526185029Spjd/*
1527185029Spjd * Returns TRUE if the given guid corresponds to the given type.
1528185029Spjd * This is used to check for hot spares (INUSE or not), and level 2 cache
1529185029Spjd * devices.
1530185029Spjd */
1531168404Spjdstatic boolean_t
1532185029Spjdis_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1533168404Spjd{
1534185029Spjd	uint64_t target_guid;
1535168404Spjd	nvlist_t *nvroot;
1536185029Spjd	nvlist_t **list;
1537185029Spjd	uint_t count;
1538168404Spjd	int i;
1539168404Spjd
1540168404Spjd	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1541168404Spjd	    &nvroot) == 0);
1542185029Spjd	if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1543185029Spjd		for (i = 0; i < count; i++) {
1544185029Spjd			verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1545185029Spjd			    &target_guid) == 0);
1546185029Spjd			if (guid == target_guid)
1547168404Spjd				return (B_TRUE);
1548168404Spjd		}
1549168404Spjd	}
1550168404Spjd
1551168404Spjd	return (B_FALSE);
1552168404Spjd}
1553168404Spjd
1554168404Spjd/*
1555185029Spjd * Bring the specified vdev online.   The 'flags' parameter is a set of the
1556185029Spjd * ZFS_ONLINE_* flags.
1557168404Spjd */
1558168404Spjdint
1559185029Spjdzpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1560185029Spjd    vdev_state_t *newstate)
1561168404Spjd{
1562168404Spjd	zfs_cmd_t zc = { 0 };
1563168404Spjd	char msg[1024];
1564168404Spjd	nvlist_t *tgt;
1565185029Spjd	boolean_t avail_spare, l2cache;
1566168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1567168404Spjd
1568168404Spjd	(void) snprintf(msg, sizeof (msg),
1569168404Spjd	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1570168404Spjd
1571168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1572185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1573185029Spjd	    NULL)) == NULL)
1574168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1575168404Spjd
1576168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1577168404Spjd
1578185029Spjd	if (avail_spare ||
1579185029Spjd	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1580168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1581168404Spjd
1582185029Spjd	zc.zc_cookie = VDEV_STATE_ONLINE;
1583185029Spjd	zc.zc_obj = flags;
1584168404Spjd
1585185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1586185029Spjd		return (zpool_standard_error(hdl, errno, msg));
1587185029Spjd
1588185029Spjd	*newstate = zc.zc_cookie;
1589185029Spjd	return (0);
1590168404Spjd}
1591168404Spjd
1592168404Spjd/*
1593168404Spjd * Take the specified vdev offline
1594168404Spjd */
1595168404Spjdint
1596185029Spjdzpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1597168404Spjd{
1598168404Spjd	zfs_cmd_t zc = { 0 };
1599168404Spjd	char msg[1024];
1600168404Spjd	nvlist_t *tgt;
1601185029Spjd	boolean_t avail_spare, l2cache;
1602168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1603168404Spjd
1604168404Spjd	(void) snprintf(msg, sizeof (msg),
1605168404Spjd	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1606168404Spjd
1607168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1608185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1609185029Spjd	    NULL)) == NULL)
1610168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1611168404Spjd
1612168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1613168404Spjd
1614185029Spjd	if (avail_spare ||
1615185029Spjd	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1616168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1617168404Spjd
1618185029Spjd	zc.zc_cookie = VDEV_STATE_OFFLINE;
1619185029Spjd	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1620168404Spjd
1621185029Spjd	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1622168404Spjd		return (0);
1623168404Spjd
1624168404Spjd	switch (errno) {
1625168404Spjd	case EBUSY:
1626168404Spjd
1627168404Spjd		/*
1628168404Spjd		 * There are no other replicas of this device.
1629168404Spjd		 */
1630168404Spjd		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1631168404Spjd
1632168404Spjd	default:
1633168404Spjd		return (zpool_standard_error(hdl, errno, msg));
1634168404Spjd	}
1635168404Spjd}
1636168404Spjd
1637168404Spjd/*
1638185029Spjd * Mark the given vdev faulted.
1639185029Spjd */
1640185029Spjdint
1641185029Spjdzpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1642185029Spjd{
1643185029Spjd	zfs_cmd_t zc = { 0 };
1644185029Spjd	char msg[1024];
1645185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1646185029Spjd
1647185029Spjd	(void) snprintf(msg, sizeof (msg),
1648185029Spjd	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1649185029Spjd
1650185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1651185029Spjd	zc.zc_guid = guid;
1652185029Spjd	zc.zc_cookie = VDEV_STATE_FAULTED;
1653185029Spjd
1654185029Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1655185029Spjd		return (0);
1656185029Spjd
1657185029Spjd	switch (errno) {
1658185029Spjd	case EBUSY:
1659185029Spjd
1660185029Spjd		/*
1661185029Spjd		 * There are no other replicas of this device.
1662185029Spjd		 */
1663185029Spjd		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1664185029Spjd
1665185029Spjd	default:
1666185029Spjd		return (zpool_standard_error(hdl, errno, msg));
1667185029Spjd	}
1668185029Spjd
1669185029Spjd}
1670185029Spjd
1671185029Spjd/*
1672185029Spjd * Mark the given vdev degraded.
1673185029Spjd */
1674185029Spjdint
1675185029Spjdzpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1676185029Spjd{
1677185029Spjd	zfs_cmd_t zc = { 0 };
1678185029Spjd	char msg[1024];
1679185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1680185029Spjd
1681185029Spjd	(void) snprintf(msg, sizeof (msg),
1682185029Spjd	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1683185029Spjd
1684185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1685185029Spjd	zc.zc_guid = guid;
1686185029Spjd	zc.zc_cookie = VDEV_STATE_DEGRADED;
1687185029Spjd
1688185029Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1689185029Spjd		return (0);
1690185029Spjd
1691185029Spjd	return (zpool_standard_error(hdl, errno, msg));
1692185029Spjd}
1693185029Spjd
1694185029Spjd/*
1695168404Spjd * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1696168404Spjd * a hot spare.
1697168404Spjd */
1698168404Spjdstatic boolean_t
1699168404Spjdis_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1700168404Spjd{
1701168404Spjd	nvlist_t **child;
1702168404Spjd	uint_t c, children;
1703168404Spjd	char *type;
1704168404Spjd
1705168404Spjd	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1706168404Spjd	    &children) == 0) {
1707168404Spjd		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1708168404Spjd		    &type) == 0);
1709168404Spjd
1710168404Spjd		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1711168404Spjd		    children == 2 && child[which] == tgt)
1712168404Spjd			return (B_TRUE);
1713168404Spjd
1714168404Spjd		for (c = 0; c < children; c++)
1715168404Spjd			if (is_replacing_spare(child[c], tgt, which))
1716168404Spjd				return (B_TRUE);
1717168404Spjd	}
1718168404Spjd
1719168404Spjd	return (B_FALSE);
1720168404Spjd}
1721168404Spjd
1722168404Spjd/*
1723168404Spjd * Attach new_disk (fully described by nvroot) to old_disk.
1724185029Spjd * If 'replacing' is specified, the new disk will replace the old one.
1725168404Spjd */
1726168404Spjdint
1727168404Spjdzpool_vdev_attach(zpool_handle_t *zhp,
1728168404Spjd    const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1729168404Spjd{
1730168404Spjd	zfs_cmd_t zc = { 0 };
1731168404Spjd	char msg[1024];
1732168404Spjd	int ret;
1733168404Spjd	nvlist_t *tgt;
1734185029Spjd	boolean_t avail_spare, l2cache, islog;
1735168404Spjd	uint64_t val;
1736185029Spjd	char *path, *newname;
1737168404Spjd	nvlist_t **child;
1738168404Spjd	uint_t children;
1739168404Spjd	nvlist_t *config_root;
1740168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1741168404Spjd
1742168404Spjd	if (replacing)
1743168404Spjd		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1744168404Spjd		    "cannot replace %s with %s"), old_disk, new_disk);
1745168404Spjd	else
1746168404Spjd		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1747168404Spjd		    "cannot attach %s to %s"), new_disk, old_disk);
1748168404Spjd
1749168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1750185029Spjd	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
1751185029Spjd	    &islog)) == 0)
1752168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1753168404Spjd
1754168404Spjd	if (avail_spare)
1755168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1756168404Spjd
1757185029Spjd	if (l2cache)
1758185029Spjd		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1759185029Spjd
1760168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1761168404Spjd	zc.zc_cookie = replacing;
1762168404Spjd
1763168404Spjd	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1764168404Spjd	    &child, &children) != 0 || children != 1) {
1765168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1766168404Spjd		    "new device must be a single disk"));
1767168404Spjd		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1768168404Spjd	}
1769168404Spjd
1770168404Spjd	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1771168404Spjd	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1772168404Spjd
1773185029Spjd	if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1774185029Spjd		return (-1);
1775185029Spjd
1776168404Spjd	/*
1777168404Spjd	 * If the target is a hot spare that has been swapped in, we can only
1778168404Spjd	 * replace it with another hot spare.
1779168404Spjd	 */
1780168404Spjd	if (replacing &&
1781168404Spjd	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1782185029Spjd	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
1783185029Spjd	    NULL) == NULL || !avail_spare) &&
1784185029Spjd	    is_replacing_spare(config_root, tgt, 1)) {
1785168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1786168404Spjd		    "can only be replaced by another hot spare"));
1787185029Spjd		free(newname);
1788168404Spjd		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1789168404Spjd	}
1790168404Spjd
1791168404Spjd	/*
1792168404Spjd	 * If we are attempting to replace a spare, it canot be applied to an
1793168404Spjd	 * already spared device.
1794168404Spjd	 */
1795168404Spjd	if (replacing &&
1796168404Spjd	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1797185029Spjd	    zpool_find_vdev(zhp, newname, &avail_spare,
1798185029Spjd	    &l2cache, NULL) != NULL && avail_spare &&
1799168404Spjd	    is_replacing_spare(config_root, tgt, 0)) {
1800168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1801168404Spjd		    "device has already been replaced with a spare"));
1802185029Spjd		free(newname);
1803168404Spjd		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1804168404Spjd	}
1805168404Spjd
1806185029Spjd	free(newname);
1807185029Spjd
1808185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1809168404Spjd		return (-1);
1810168404Spjd
1811185029Spjd	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1812168404Spjd
1813168404Spjd	zcmd_free_nvlists(&zc);
1814168404Spjd
1815168404Spjd	if (ret == 0)
1816168404Spjd		return (0);
1817168404Spjd
1818168404Spjd	switch (errno) {
1819168404Spjd	case ENOTSUP:
1820168404Spjd		/*
1821168404Spjd		 * Can't attach to or replace this type of vdev.
1822168404Spjd		 */
1823185029Spjd		if (replacing) {
1824185029Spjd			if (islog)
1825185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1826185029Spjd				    "cannot replace a log with a spare"));
1827185029Spjd			else
1828185029Spjd				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1829185029Spjd				    "cannot replace a replacing device"));
1830185029Spjd		} else {
1831168404Spjd			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1832168404Spjd			    "can only attach to mirrors and top-level "
1833168404Spjd			    "disks"));
1834185029Spjd		}
1835168404Spjd		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1836168404Spjd		break;
1837168404Spjd
1838168404Spjd	case EINVAL:
1839168404Spjd		/*
1840168404Spjd		 * The new device must be a single disk.
1841168404Spjd		 */
1842168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1843168404Spjd		    "new device must be a single disk"));
1844168404Spjd		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1845168404Spjd		break;
1846168404Spjd
1847168404Spjd	case EBUSY:
1848168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1849168404Spjd		    new_disk);
1850168404Spjd		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1851168404Spjd		break;
1852168404Spjd
1853168404Spjd	case EOVERFLOW:
1854168404Spjd		/*
1855168404Spjd		 * The new device is too small.
1856168404Spjd		 */
1857168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1858168404Spjd		    "device is too small"));
1859168404Spjd		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1860168404Spjd		break;
1861168404Spjd
1862168404Spjd	case EDOM:
1863168404Spjd		/*
1864168404Spjd		 * The new device has a different alignment requirement.
1865168404Spjd		 */
1866168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1867168404Spjd		    "devices have different sector alignment"));
1868168404Spjd		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1869168404Spjd		break;
1870168404Spjd
1871168404Spjd	case ENAMETOOLONG:
1872168404Spjd		/*
1873168404Spjd		 * The resulting top-level vdev spec won't fit in the label.
1874168404Spjd		 */
1875168404Spjd		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1876168404Spjd		break;
1877168404Spjd
1878168404Spjd	default:
1879168404Spjd		(void) zpool_standard_error(hdl, errno, msg);
1880168404Spjd	}
1881168404Spjd
1882168404Spjd	return (-1);
1883168404Spjd}
1884168404Spjd
1885168404Spjd/*
1886168404Spjd * Detach the specified device.
1887168404Spjd */
1888168404Spjdint
1889168404Spjdzpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1890168404Spjd{
1891168404Spjd	zfs_cmd_t zc = { 0 };
1892168404Spjd	char msg[1024];
1893168404Spjd	nvlist_t *tgt;
1894185029Spjd	boolean_t avail_spare, l2cache;
1895168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1896168404Spjd
1897168404Spjd	(void) snprintf(msg, sizeof (msg),
1898168404Spjd	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1899168404Spjd
1900168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1901185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1902185029Spjd	    NULL)) == 0)
1903168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1904168404Spjd
1905168404Spjd	if (avail_spare)
1906168404Spjd		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1907168404Spjd
1908185029Spjd	if (l2cache)
1909185029Spjd		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1910185029Spjd
1911168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1912168404Spjd
1913185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1914168404Spjd		return (0);
1915168404Spjd
1916168404Spjd	switch (errno) {
1917168404Spjd
1918168404Spjd	case ENOTSUP:
1919168404Spjd		/*
1920168404Spjd		 * Can't detach from this type of vdev.
1921168404Spjd		 */
1922168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1923168404Spjd		    "applicable to mirror and replacing vdevs"));
1924168404Spjd		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1925168404Spjd		break;
1926168404Spjd
1927168404Spjd	case EBUSY:
1928168404Spjd		/*
1929168404Spjd		 * There are no other replicas of this device.
1930168404Spjd		 */
1931168404Spjd		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1932168404Spjd		break;
1933168404Spjd
1934168404Spjd	default:
1935168404Spjd		(void) zpool_standard_error(hdl, errno, msg);
1936168404Spjd	}
1937168404Spjd
1938168404Spjd	return (-1);
1939168404Spjd}
1940168404Spjd
1941168404Spjd/*
1942185029Spjd * Remove the given device.  Currently, this is supported only for hot spares
1943185029Spjd * and level 2 cache devices.
1944168404Spjd */
1945168404Spjdint
1946168404Spjdzpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1947168404Spjd{
1948168404Spjd	zfs_cmd_t zc = { 0 };
1949168404Spjd	char msg[1024];
1950168404Spjd	nvlist_t *tgt;
1951185029Spjd	boolean_t avail_spare, l2cache;
1952168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1953168404Spjd
1954168404Spjd	(void) snprintf(msg, sizeof (msg),
1955168404Spjd	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1956168404Spjd
1957168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1958185029Spjd	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1959185029Spjd	    NULL)) == 0)
1960168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1961168404Spjd
1962185029Spjd	if (!avail_spare && !l2cache) {
1963168404Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1964185029Spjd		    "only inactive hot spares or cache devices "
1965185029Spjd		    "can be removed"));
1966168404Spjd		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1967168404Spjd	}
1968168404Spjd
1969168404Spjd	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1970168404Spjd
1971185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1972168404Spjd		return (0);
1973168404Spjd
1974168404Spjd	return (zpool_standard_error(hdl, errno, msg));
1975168404Spjd}
1976168404Spjd
1977168404Spjd/*
1978168404Spjd * Clear the errors for the pool, or the particular device if specified.
1979168404Spjd */
1980168404Spjdint
1981168404Spjdzpool_clear(zpool_handle_t *zhp, const char *path)
1982168404Spjd{
1983168404Spjd	zfs_cmd_t zc = { 0 };
1984168404Spjd	char msg[1024];
1985168404Spjd	nvlist_t *tgt;
1986185029Spjd	boolean_t avail_spare, l2cache;
1987168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
1988168404Spjd
1989168404Spjd	if (path)
1990168404Spjd		(void) snprintf(msg, sizeof (msg),
1991168404Spjd		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1992168404Spjd		    path);
1993168404Spjd	else
1994168404Spjd		(void) snprintf(msg, sizeof (msg),
1995168404Spjd		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1996168404Spjd		    zhp->zpool_name);
1997168404Spjd
1998168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1999168404Spjd	if (path) {
2000185029Spjd		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2001185029Spjd		    &l2cache, NULL)) == 0)
2002168404Spjd			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2003168404Spjd
2004185029Spjd		/*
2005185029Spjd		 * Don't allow error clearing for hot spares.  Do allow
2006185029Spjd		 * error clearing for l2cache devices.
2007185029Spjd		 */
2008168404Spjd		if (avail_spare)
2009168404Spjd			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2010168404Spjd
2011168404Spjd		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2012168404Spjd		    &zc.zc_guid) == 0);
2013168404Spjd	}
2014168404Spjd
2015185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2016185029Spjd		return (0);
2017185029Spjd
2018185029Spjd	return (zpool_standard_error(hdl, errno, msg));
2019185029Spjd}
2020185029Spjd
2021185029Spjd/*
2022185029Spjd * Similar to zpool_clear(), but takes a GUID (used by fmd).
2023185029Spjd */
2024185029Spjdint
2025185029Spjdzpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2026185029Spjd{
2027185029Spjd	zfs_cmd_t zc = { 0 };
2028185029Spjd	char msg[1024];
2029185029Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2030185029Spjd
2031185029Spjd	(void) snprintf(msg, sizeof (msg),
2032185029Spjd	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2033185029Spjd	    guid);
2034185029Spjd
2035185029Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2036185029Spjd	zc.zc_guid = guid;
2037185029Spjd
2038168404Spjd	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2039168404Spjd		return (0);
2040168404Spjd
2041168404Spjd	return (zpool_standard_error(hdl, errno, msg));
2042168404Spjd}
2043168404Spjd
2044168404Spjd/*
2045168404Spjd * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2046168404Spjd * hierarchy.
2047168404Spjd */
2048168404Spjdint
2049168404Spjdzpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
2050168404Spjd    void *data)
2051168404Spjd{
2052168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2053168404Spjd	char (*paths)[MAXPATHLEN];
2054168404Spjd	char path[MAXPATHLEN];
2055168404Spjd	size_t size = 4;
2056168404Spjd	int curr, fd, base, ret = 0;
2057168404Spjd	DIR *dirp;
2058168404Spjd	struct dirent *dp;
2059168404Spjd	struct stat st;
2060168404Spjd
2061168404Spjd	if ((base = open(ZVOL_FULL_DEV_DIR, O_RDONLY)) < 0)
2062168404Spjd		return (errno == ENOENT ? 0 : -1);
2063168404Spjd
2064168404Spjd	snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
2065168404Spjd	    zhp->zpool_name);
2066168404Spjd	if (stat(path, &st) != 0) {
2067168404Spjd		int err = errno;
2068168404Spjd		(void) close(base);
2069168404Spjd		return (err == ENOENT ? 0 : -1);
2070168404Spjd	}
2071168404Spjd
2072168404Spjd	/*
2073168404Spjd	 * Oddly this wasn't a directory -- ignore that failure since we
2074168404Spjd	 * know there are no links lower in the (non-existant) hierarchy.
2075168404Spjd	 */
2076168404Spjd	if (!S_ISDIR(st.st_mode)) {
2077168404Spjd		(void) close(base);
2078168404Spjd		return (0);
2079168404Spjd	}
2080168404Spjd
2081168404Spjd	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
2082168404Spjd		(void) close(base);
2083168404Spjd		return (-1);
2084168404Spjd	}
2085168404Spjd
2086168404Spjd	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
2087168404Spjd	curr = 0;
2088168404Spjd
2089168404Spjd	while (curr >= 0) {
2090168404Spjd		snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
2091168404Spjd		    paths[curr]);
2092168404Spjd		if (lstat(path, &st) != 0)
2093168404Spjd			goto err;
2094168404Spjd
2095168404Spjd		if (S_ISDIR(st.st_mode)) {
2096168404Spjd			if ((dirp = opendir(path)) == NULL) {
2097168404Spjd				goto err;
2098168404Spjd			}
2099168404Spjd
2100168404Spjd			while ((dp = readdir(dirp)) != NULL) {
2101168404Spjd				if (dp->d_name[0] == '.')
2102168404Spjd					continue;
2103168404Spjd
2104168404Spjd				if (curr + 1 == size) {
2105168404Spjd					paths = zfs_realloc(hdl, paths,
2106168404Spjd					    size * sizeof (paths[0]),
2107168404Spjd					    size * 2 * sizeof (paths[0]));
2108168404Spjd					if (paths == NULL) {
2109168404Spjd						(void) closedir(dirp);
2110168404Spjd						goto err;
2111168404Spjd					}
2112168404Spjd
2113168404Spjd					size *= 2;
2114168404Spjd				}
2115168404Spjd
2116168404Spjd				(void) strlcpy(paths[curr + 1], paths[curr],
2117168404Spjd				    sizeof (paths[curr + 1]));
2118168404Spjd				(void) strlcat(paths[curr], "/",
2119168404Spjd				    sizeof (paths[curr]));
2120168404Spjd				(void) strlcat(paths[curr], dp->d_name,
2121168404Spjd				    sizeof (paths[curr]));
2122168404Spjd				curr++;
2123168404Spjd			}
2124168404Spjd
2125168404Spjd			(void) closedir(dirp);
2126168404Spjd
2127168404Spjd		} else {
2128168404Spjd			if ((ret = cb(paths[curr], data)) != 0)
2129168404Spjd				break;
2130168404Spjd		}
2131168404Spjd
2132168404Spjd		curr--;
2133168404Spjd	}
2134168404Spjd
2135168404Spjd	free(paths);
2136168404Spjd	(void) close(base);
2137168404Spjd
2138168404Spjd	return (ret);
2139168404Spjd
2140168404Spjderr:
2141168404Spjd	free(paths);
2142168404Spjd	(void) close(base);
2143168404Spjd	return (-1);
2144168404Spjd}
2145168404Spjd
2146168404Spjdtypedef struct zvol_cb {
2147168404Spjd	zpool_handle_t *zcb_pool;
2148168404Spjd	boolean_t zcb_create;
2149168404Spjd} zvol_cb_t;
2150168404Spjd
2151168404Spjd/*ARGSUSED*/
2152168404Spjdstatic int
2153168404Spjddo_zvol_create(zfs_handle_t *zhp, void *data)
2154168404Spjd{
2155185029Spjd	int ret = 0;
2156168404Spjd
2157185029Spjd	if (ZFS_IS_VOLUME(zhp)) {
2158168404Spjd		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2159185029Spjd		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2160185029Spjd	}
2161168404Spjd
2162185029Spjd	if (ret == 0)
2163185029Spjd		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2164168404Spjd
2165168404Spjd	zfs_close(zhp);
2166168404Spjd
2167168404Spjd	return (ret);
2168168404Spjd}
2169168404Spjd
2170168404Spjd/*
2171168404Spjd * Iterate over all zvols in the pool and make any necessary minor nodes.
2172168404Spjd */
2173168404Spjdint
2174168404Spjdzpool_create_zvol_links(zpool_handle_t *zhp)
2175168404Spjd{
2176168404Spjd	zfs_handle_t *zfp;
2177168404Spjd	int ret;
2178168404Spjd
2179168404Spjd	/*
2180168404Spjd	 * If the pool is unavailable, just return success.
2181168404Spjd	 */
2182168404Spjd	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2183168404Spjd	    zhp->zpool_name)) == NULL)
2184168404Spjd		return (0);
2185168404Spjd
2186185029Spjd	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2187168404Spjd
2188168404Spjd	zfs_close(zfp);
2189168404Spjd	return (ret);
2190168404Spjd}
2191168404Spjd
2192168404Spjdstatic int
2193168404Spjddo_zvol_remove(const char *dataset, void *data)
2194168404Spjd{
2195168404Spjd	zpool_handle_t *zhp = data;
2196168404Spjd
2197168404Spjd	return (zvol_remove_link(zhp->zpool_hdl, dataset));
2198168404Spjd}
2199168404Spjd
2200168404Spjd/*
2201168404Spjd * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
2202168404Spjd * by examining the /dev links so that a corrupted pool doesn't impede this
2203168404Spjd * operation.
2204168404Spjd */
2205168404Spjdint
2206168404Spjdzpool_remove_zvol_links(zpool_handle_t *zhp)
2207168404Spjd{
2208168404Spjd	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2209168404Spjd}
2210168404Spjd
2211168404Spjd/*
2212168404Spjd * Convert from a devid string to a path.
2213168404Spjd */
2214168404Spjdstatic char *
2215168404Spjddevid_to_path(char *devid_str)
2216168404Spjd{
2217168404Spjd	ddi_devid_t devid;
2218168404Spjd	char *minor;
2219168404Spjd	char *path;
2220168404Spjd	devid_nmlist_t *list = NULL;
2221168404Spjd	int ret;
2222168404Spjd
2223168404Spjd	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2224168404Spjd		return (NULL);
2225168404Spjd
2226168404Spjd	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2227168404Spjd
2228168404Spjd	devid_str_free(minor);
2229168404Spjd	devid_free(devid);
2230168404Spjd
2231168404Spjd	if (ret != 0)
2232168404Spjd		return (NULL);
2233168404Spjd
2234168404Spjd	if ((path = strdup(list[0].devname)) == NULL)
2235168404Spjd		return (NULL);
2236168404Spjd
2237168404Spjd	devid_free_nmlist(list);
2238168404Spjd
2239168404Spjd	return (path);
2240168404Spjd}
2241168404Spjd
2242168404Spjd/*
2243168404Spjd * Convert from a path to a devid string.
2244168404Spjd */
2245168404Spjdstatic char *
2246168404Spjdpath_to_devid(const char *path)
2247168404Spjd{
2248168404Spjd	int fd;
2249168404Spjd	ddi_devid_t devid;
2250168404Spjd	char *minor, *ret;
2251168404Spjd
2252168404Spjd	if ((fd = open(path, O_RDONLY)) < 0)
2253168404Spjd		return (NULL);
2254168404Spjd
2255168404Spjd	minor = NULL;
2256168404Spjd	ret = NULL;
2257168404Spjd	if (devid_get(fd, &devid) == 0) {
2258168404Spjd		if (devid_get_minor_name(fd, &minor) == 0)
2259168404Spjd			ret = devid_str_encode(devid, minor);
2260168404Spjd		if (minor != NULL)
2261168404Spjd			devid_str_free(minor);
2262168404Spjd		devid_free(devid);
2263168404Spjd	}
2264168404Spjd	(void) close(fd);
2265168404Spjd
2266168404Spjd	return (ret);
2267168404Spjd}
2268168404Spjd
2269168404Spjd/*
2270168404Spjd * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2271168404Spjd * ignore any failure here, since a common case is for an unprivileged user to
2272168404Spjd * type 'zpool status', and we'll display the correct information anyway.
2273168404Spjd */
2274168404Spjdstatic void
2275168404Spjdset_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2276168404Spjd{
2277168404Spjd	zfs_cmd_t zc = { 0 };
2278168404Spjd
2279168404Spjd	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2280168404Spjd	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2281168404Spjd	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2282168404Spjd	    &zc.zc_guid) == 0);
2283168404Spjd
2284168404Spjd	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2285168404Spjd}
2286168404Spjd
2287168404Spjd/*
2288168404Spjd * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2289168404Spjd * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2290168404Spjd * We also check if this is a whole disk, in which case we strip off the
2291168404Spjd * trailing 's0' slice name.
2292168404Spjd *
2293168404Spjd * This routine is also responsible for identifying when disks have been
2294168404Spjd * reconfigured in a new location.  The kernel will have opened the device by
2295168404Spjd * devid, but the path will still refer to the old location.  To catch this, we
2296168404Spjd * first do a path -> devid translation (which is fast for the common case).  If
2297168404Spjd * the devid matches, we're done.  If not, we do a reverse devid -> path
2298168404Spjd * translation and issue the appropriate ioctl() to update the path of the vdev.
2299168404Spjd * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2300168404Spjd * of these checks.
2301168404Spjd */
2302168404Spjdchar *
2303168404Spjdzpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2304168404Spjd{
2305168404Spjd	char *path, *devid;
2306168404Spjd	uint64_t value;
2307168404Spjd	char buf[64];
2308185029Spjd	vdev_stat_t *vs;
2309185029Spjd	uint_t vsc;
2310168404Spjd
2311168404Spjd	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2312168404Spjd	    &value) == 0) {
2313168404Spjd		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2314168404Spjd		    &value) == 0);
2315168404Spjd		(void) snprintf(buf, sizeof (buf), "%llu",
2316168404Spjd		    (u_longlong_t)value);
2317168404Spjd		path = buf;
2318168404Spjd	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2319168404Spjd
2320185029Spjd		/*
2321185029Spjd		 * If the device is dead (faulted, offline, etc) then don't
2322185029Spjd		 * bother opening it.  Otherwise we may be forcing the user to
2323185029Spjd		 * open a misbehaving device, which can have undesirable
2324185029Spjd		 * effects.
2325185029Spjd		 */
2326185029Spjd		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2327185029Spjd		    (uint64_t **)&vs, &vsc) != 0 ||
2328185029Spjd		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2329185029Spjd		    zhp != NULL &&
2330168404Spjd		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2331168404Spjd			/*
2332168404Spjd			 * Determine if the current path is correct.
2333168404Spjd			 */
2334168404Spjd			char *newdevid = path_to_devid(path);
2335168404Spjd
2336168404Spjd			if (newdevid == NULL ||
2337168404Spjd			    strcmp(devid, newdevid) != 0) {
2338168404Spjd				char *newpath;
2339168404Spjd
2340168404Spjd				if ((newpath = devid_to_path(devid)) != NULL) {
2341168404Spjd					/*
2342168404Spjd					 * Update the path appropriately.
2343168404Spjd					 */
2344168404Spjd					set_path(zhp, nv, newpath);
2345168404Spjd					if (nvlist_add_string(nv,
2346168404Spjd					    ZPOOL_CONFIG_PATH, newpath) == 0)
2347168404Spjd						verify(nvlist_lookup_string(nv,
2348168404Spjd						    ZPOOL_CONFIG_PATH,
2349168404Spjd						    &path) == 0);
2350168404Spjd					free(newpath);
2351168404Spjd				}
2352168404Spjd			}
2353168404Spjd
2354168404Spjd			if (newdevid)
2355168404Spjd				devid_str_free(newdevid);
2356168404Spjd		}
2357168404Spjd
2358168404Spjd		if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
2359168404Spjd			path += sizeof(_PATH_DEV) - 1;
2360168404Spjd
2361168404Spjd		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2362168404Spjd		    &value) == 0 && value) {
2363168404Spjd			char *tmp = zfs_strdup(hdl, path);
2364168404Spjd			if (tmp == NULL)
2365168404Spjd				return (NULL);
2366168404Spjd			tmp[strlen(path) - 2] = '\0';
2367168404Spjd			return (tmp);
2368168404Spjd		}
2369168404Spjd	} else {
2370168404Spjd		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2371168404Spjd
2372168404Spjd		/*
2373168404Spjd		 * If it's a raidz device, we need to stick in the parity level.
2374168404Spjd		 */
2375168404Spjd		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2376168404Spjd			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2377168404Spjd			    &value) == 0);
2378168404Spjd			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2379168404Spjd			    (u_longlong_t)value);
2380168404Spjd			path = buf;
2381168404Spjd		}
2382168404Spjd	}
2383168404Spjd
2384168404Spjd	return (zfs_strdup(hdl, path));
2385168404Spjd}
2386168404Spjd
2387168404Spjdstatic int
2388168404Spjdzbookmark_compare(const void *a, const void *b)
2389168404Spjd{
2390168404Spjd	return (memcmp(a, b, sizeof (zbookmark_t)));
2391168404Spjd}
2392168404Spjd
2393168404Spjd/*
2394168404Spjd * Retrieve the persistent error log, uniquify the members, and return to the
2395168404Spjd * caller.
2396168404Spjd */
2397168404Spjdint
2398168404Spjdzpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2399168404Spjd{
2400168404Spjd	zfs_cmd_t zc = { 0 };
2401168404Spjd	uint64_t count;
2402168404Spjd	zbookmark_t *zb = NULL;
2403168404Spjd	int i;
2404168404Spjd
2405168404Spjd	/*
2406168404Spjd	 * Retrieve the raw error list from the kernel.  If the number of errors
2407168404Spjd	 * has increased, allocate more space and continue until we get the
2408168404Spjd	 * entire list.
2409168404Spjd	 */
2410168404Spjd	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2411168404Spjd	    &count) == 0);
2412185029Spjd	if (count == 0)
2413185029Spjd		return (0);
2414168404Spjd	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2415168404Spjd	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2416168404Spjd		return (-1);
2417168404Spjd	zc.zc_nvlist_dst_size = count;
2418168404Spjd	(void) strcpy(zc.zc_name, zhp->zpool_name);
2419168404Spjd	for (;;) {
2420168404Spjd		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2421168404Spjd		    &zc) != 0) {
2422168404Spjd			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2423168404Spjd			if (errno == ENOMEM) {
2424168404Spjd				count = zc.zc_nvlist_dst_size;
2425168404Spjd				if ((zc.zc_nvlist_dst = (uintptr_t)
2426168404Spjd				    zfs_alloc(zhp->zpool_hdl, count *
2427168404Spjd				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2428168404Spjd					return (-1);
2429168404Spjd			} else {
2430168404Spjd				return (-1);
2431168404Spjd			}
2432168404Spjd		} else {
2433168404Spjd			break;
2434168404Spjd		}
2435168404Spjd	}
2436168404Spjd
2437168404Spjd	/*
2438168404Spjd	 * Sort the resulting bookmarks.  This is a little confusing due to the
2439168404Spjd	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2440168404Spjd	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2441168404Spjd	 * _not_ copied as part of the process.  So we point the start of our
2442168404Spjd	 * array appropriate and decrement the total number of elements.
2443168404Spjd	 */
2444168404Spjd	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2445168404Spjd	    zc.zc_nvlist_dst_size;
2446168404Spjd	count -= zc.zc_nvlist_dst_size;
2447168404Spjd
2448168404Spjd	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2449168404Spjd
2450168404Spjd	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2451168404Spjd
2452168404Spjd	/*
2453168404Spjd	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2454168404Spjd	 */
2455168404Spjd	for (i = 0; i < count; i++) {
2456168404Spjd		nvlist_t *nv;
2457168404Spjd
2458168404Spjd		/* ignoring zb_blkid and zb_level for now */
2459168404Spjd		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2460168404Spjd		    zb[i-1].zb_object == zb[i].zb_object)
2461168404Spjd			continue;
2462168404Spjd
2463168404Spjd		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2464168404Spjd			goto nomem;
2465168404Spjd		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2466168404Spjd		    zb[i].zb_objset) != 0) {
2467168404Spjd			nvlist_free(nv);
2468168404Spjd			goto nomem;
2469168404Spjd		}
2470168404Spjd		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2471168404Spjd		    zb[i].zb_object) != 0) {
2472168404Spjd			nvlist_free(nv);
2473168404Spjd			goto nomem;
2474168404Spjd		}
2475168404Spjd		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2476168404Spjd			nvlist_free(nv);
2477168404Spjd			goto nomem;
2478168404Spjd		}
2479168404Spjd		nvlist_free(nv);
2480168404Spjd	}
2481168404Spjd
2482168404Spjd	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2483168404Spjd	return (0);
2484168404Spjd
2485168404Spjdnomem:
2486168404Spjd	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2487168404Spjd	return (no_memory(zhp->zpool_hdl));
2488168404Spjd}
2489168404Spjd
2490168404Spjd/*
2491168404Spjd * Upgrade a ZFS pool to the latest on-disk version.
2492168404Spjd */
2493168404Spjdint
2494185029Spjdzpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2495168404Spjd{
2496168404Spjd	zfs_cmd_t zc = { 0 };
2497168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2498168404Spjd
2499168404Spjd	(void) strcpy(zc.zc_name, zhp->zpool_name);
2500185029Spjd	zc.zc_cookie = new_version;
2501185029Spjd
2502185029Spjd	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2503168404Spjd		return (zpool_standard_error_fmt(hdl, errno,
2504168404Spjd		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2505168404Spjd		    zhp->zpool_name));
2506168404Spjd	return (0);
2507168404Spjd}
2508168404Spjd
2509168404Spjdvoid
2510185029Spjdzpool_set_history_str(const char *subcommand, int argc, char **argv,
2511185029Spjd    char *history_str)
2512168404Spjd{
2513168404Spjd	int i;
2514168404Spjd
2515185029Spjd	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2516185029Spjd	for (i = 1; i < argc; i++) {
2517185029Spjd		if (strlen(history_str) + 1 + strlen(argv[i]) >
2518185029Spjd		    HIS_MAX_RECORD_LEN)
2519168404Spjd			break;
2520185029Spjd		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2521185029Spjd		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2522168404Spjd	}
2523185029Spjd}
2524168404Spjd
2525185029Spjd/*
2526185029Spjd * Stage command history for logging.
2527185029Spjd */
2528185029Spjdint
2529185029Spjdzpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2530185029Spjd{
2531185029Spjd	if (history_str == NULL)
2532185029Spjd		return (EINVAL);
2533168404Spjd
2534185029Spjd	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2535185029Spjd		return (EINVAL);
2536168404Spjd
2537185029Spjd	if (hdl->libzfs_log_str != NULL)
2538185029Spjd		free(hdl->libzfs_log_str);
2539168404Spjd
2540185029Spjd	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2541185029Spjd		return (no_memory(hdl));
2542185029Spjd
2543185029Spjd	return (0);
2544168404Spjd}
2545168404Spjd
2546168404Spjd/*
2547168404Spjd * Perform ioctl to get some command history of a pool.
2548168404Spjd *
2549168404Spjd * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2550168404Spjd * logical offset of the history buffer to start reading from.
2551168404Spjd *
2552168404Spjd * Upon return, 'off' is the next logical offset to read from and
2553168404Spjd * 'len' is the actual amount of bytes read into 'buf'.
2554168404Spjd */
2555168404Spjdstatic int
2556168404Spjdget_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2557168404Spjd{
2558168404Spjd	zfs_cmd_t zc = { 0 };
2559168404Spjd	libzfs_handle_t *hdl = zhp->zpool_hdl;
2560168404Spjd
2561168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2562168404Spjd
2563168404Spjd	zc.zc_history = (uint64_t)(uintptr_t)buf;
2564168404Spjd	zc.zc_history_len = *len;
2565168404Spjd	zc.zc_history_offset = *off;
2566168404Spjd
2567168404Spjd	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2568168404Spjd		switch (errno) {
2569168404Spjd		case EPERM:
2570168404Spjd			return (zfs_error_fmt(hdl, EZFS_PERM,
2571168404Spjd			    dgettext(TEXT_DOMAIN,
2572168404Spjd			    "cannot show history for pool '%s'"),
2573168404Spjd			    zhp->zpool_name));
2574168404Spjd		case ENOENT:
2575168404Spjd			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2576168404Spjd			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2577168404Spjd			    "'%s'"), zhp->zpool_name));
2578168404Spjd		case ENOTSUP:
2579168404Spjd			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2580168404Spjd			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2581168404Spjd			    "'%s', pool must be upgraded"), zhp->zpool_name));
2582168404Spjd		default:
2583168404Spjd			return (zpool_standard_error_fmt(hdl, errno,
2584168404Spjd			    dgettext(TEXT_DOMAIN,
2585168404Spjd			    "cannot get history for '%s'"), zhp->zpool_name));
2586168404Spjd		}
2587168404Spjd	}
2588168404Spjd
2589168404Spjd	*len = zc.zc_history_len;
2590168404Spjd	*off = zc.zc_history_offset;
2591168404Spjd
2592168404Spjd	return (0);
2593168404Spjd}
2594168404Spjd
2595168404Spjd/*
2596168404Spjd * Process the buffer of nvlists, unpacking and storing each nvlist record
2597168404Spjd * into 'records'.  'leftover' is set to the number of bytes that weren't
2598168404Spjd * processed as there wasn't a complete record.
2599168404Spjd */
2600168404Spjdstatic int
2601168404Spjdzpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2602168404Spjd    nvlist_t ***records, uint_t *numrecords)
2603168404Spjd{
2604168404Spjd	uint64_t reclen;
2605168404Spjd	nvlist_t *nv;
2606168404Spjd	int i;
2607168404Spjd
2608168404Spjd	while (bytes_read > sizeof (reclen)) {
2609168404Spjd
2610168404Spjd		/* get length of packed record (stored as little endian) */
2611168404Spjd		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2612168404Spjd			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2613168404Spjd
2614168404Spjd		if (bytes_read < sizeof (reclen) + reclen)
2615168404Spjd			break;
2616168404Spjd
2617168404Spjd		/* unpack record */
2618168404Spjd		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2619168404Spjd			return (ENOMEM);
2620168404Spjd		bytes_read -= sizeof (reclen) + reclen;
2621168404Spjd		buf += sizeof (reclen) + reclen;
2622168404Spjd
2623168404Spjd		/* add record to nvlist array */
2624168404Spjd		(*numrecords)++;
2625168404Spjd		if (ISP2(*numrecords + 1)) {
2626168404Spjd			*records = realloc(*records,
2627168404Spjd			    *numrecords * 2 * sizeof (nvlist_t *));
2628168404Spjd		}
2629168404Spjd		(*records)[*numrecords - 1] = nv;
2630168404Spjd	}
2631168404Spjd
2632168404Spjd	*leftover = bytes_read;
2633168404Spjd	return (0);
2634168404Spjd}
2635168404Spjd
2636168404Spjd#define	HIS_BUF_LEN	(128*1024)
2637168404Spjd
2638168404Spjd/*
2639168404Spjd * Retrieve the command history of a pool.
2640168404Spjd */
2641168404Spjdint
2642168404Spjdzpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2643168404Spjd{
2644168404Spjd	char buf[HIS_BUF_LEN];
2645168404Spjd	uint64_t off = 0;
2646168404Spjd	nvlist_t **records = NULL;
2647168404Spjd	uint_t numrecords = 0;
2648168404Spjd	int err, i;
2649168404Spjd
2650168404Spjd	do {
2651168404Spjd		uint64_t bytes_read = sizeof (buf);
2652168404Spjd		uint64_t leftover;
2653168404Spjd
2654168404Spjd		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2655168404Spjd			break;
2656168404Spjd
2657168404Spjd		/* if nothing else was read in, we're at EOF, just return */
2658168404Spjd		if (!bytes_read)
2659168404Spjd			break;
2660168404Spjd
2661168404Spjd		if ((err = zpool_history_unpack(buf, bytes_read,
2662168404Spjd		    &leftover, &records, &numrecords)) != 0)
2663168404Spjd			break;
2664168404Spjd		off -= leftover;
2665168404Spjd
2666168404Spjd		/* CONSTCOND */
2667168404Spjd	} while (1);
2668168404Spjd
2669168404Spjd	if (!err) {
2670168404Spjd		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2671168404Spjd		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2672168404Spjd		    records, numrecords) == 0);
2673168404Spjd	}
2674168404Spjd	for (i = 0; i < numrecords; i++)
2675168404Spjd		nvlist_free(records[i]);
2676168404Spjd	free(records);
2677168404Spjd
2678168404Spjd	return (err);
2679168404Spjd}
2680168404Spjd
2681168404Spjdvoid
2682168404Spjdzpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2683168404Spjd    char *pathname, size_t len)
2684168404Spjd{
2685168404Spjd	zfs_cmd_t zc = { 0 };
2686168404Spjd	boolean_t mounted = B_FALSE;
2687168404Spjd	char *mntpnt = NULL;
2688168404Spjd	char dsname[MAXNAMELEN];
2689168404Spjd
2690168404Spjd	if (dsobj == 0) {
2691168404Spjd		/* special case for the MOS */
2692168404Spjd		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2693168404Spjd		return;
2694168404Spjd	}
2695168404Spjd
2696168404Spjd	/* get the dataset's name */
2697168404Spjd	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2698168404Spjd	zc.zc_obj = dsobj;
2699168404Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2700168404Spjd	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2701168404Spjd		/* just write out a path of two object numbers */
2702168404Spjd		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2703168404Spjd		    dsobj, obj);
2704168404Spjd		return;
2705168404Spjd	}
2706168404Spjd	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2707168404Spjd
2708168404Spjd	/* find out if the dataset is mounted */
2709168404Spjd	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2710168404Spjd
2711168404Spjd	/* get the corrupted object's path */
2712168404Spjd	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2713168404Spjd	zc.zc_obj = obj;
2714168404Spjd	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2715168404Spjd	    &zc) == 0) {
2716168404Spjd		if (mounted) {
2717168404Spjd			(void) snprintf(pathname, len, "%s%s", mntpnt,
2718168404Spjd			    zc.zc_value);
2719168404Spjd		} else {
2720168404Spjd			(void) snprintf(pathname, len, "%s:%s",
2721168404Spjd			    dsname, zc.zc_value);
2722168404Spjd		}
2723168404Spjd	} else {
2724168404Spjd		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2725168404Spjd	}
2726168404Spjd	free(mntpnt);
2727168404Spjd}
2728168404Spjd
2729185029Spjd#define	RDISK_ROOT	"/dev/rdsk"
2730185029Spjd#define	BACKUP_SLICE	"s2"
2731185029Spjd/*
2732185029Spjd * Don't start the slice at the default block of 34; many storage
2733185029Spjd * devices will use a stripe width of 128k, so start there instead.
2734185029Spjd */
2735185029Spjd#define	NEW_START_BLOCK	256
2736185029Spjd
2737185029Spjd#if defined(sun)
2738185029Spjd/*
2739185029Spjd * Read the EFI label from the config, if a label does not exist then
2740185029Spjd * pass back the error to the caller. If the caller has passed a non-NULL
2741185029Spjd * diskaddr argument then we set it to the starting address of the EFI
2742185029Spjd * partition.
2743185029Spjd */
2744185029Spjdstatic int
2745185029Spjdread_efi_label(nvlist_t *config, diskaddr_t *sb)
2746168404Spjd{
2747185029Spjd	char *path;
2748185029Spjd	int fd;
2749185029Spjd	char diskname[MAXPATHLEN];
2750185029Spjd	int err = -1;
2751168404Spjd
2752185029Spjd	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2753185029Spjd		return (err);
2754168404Spjd
2755185029Spjd	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2756185029Spjd	    strrchr(path, '/'));
2757185029Spjd	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2758185029Spjd		struct dk_gpt *vtoc;
2759185029Spjd
2760185029Spjd		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2761185029Spjd			if (sb != NULL)
2762185029Spjd				*sb = vtoc->efi_parts[0].p_start;
2763185029Spjd			efi_free(vtoc);
2764185029Spjd		}
2765185029Spjd		(void) close(fd);
2766168404Spjd	}
2767185029Spjd	return (err);
2768185029Spjd}
2769168404Spjd
2770185029Spjd/*
2771185029Spjd * determine where a partition starts on a disk in the current
2772185029Spjd * configuration
2773185029Spjd */
2774185029Spjdstatic diskaddr_t
2775185029Spjdfind_start_block(nvlist_t *config)
2776185029Spjd{
2777185029Spjd	nvlist_t **child;
2778185029Spjd	uint_t c, children;
2779185029Spjd	diskaddr_t sb = MAXOFFSET_T;
2780185029Spjd	uint64_t wholedisk;
2781168404Spjd
2782185029Spjd	if (nvlist_lookup_nvlist_array(config,
2783185029Spjd	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2784185029Spjd		if (nvlist_lookup_uint64(config,
2785185029Spjd		    ZPOOL_CONFIG_WHOLE_DISK,
2786185029Spjd		    &wholedisk) != 0 || !wholedisk) {
2787185029Spjd			return (MAXOFFSET_T);
2788185029Spjd		}
2789185029Spjd		if (read_efi_label(config, &sb) < 0)
2790185029Spjd			sb = MAXOFFSET_T;
2791185029Spjd		return (sb);
2792168404Spjd	}
2793168404Spjd
2794185029Spjd	for (c = 0; c < children; c++) {
2795185029Spjd		sb = find_start_block(child[c]);
2796185029Spjd		if (sb != MAXOFFSET_T) {
2797185029Spjd			return (sb);
2798185029Spjd		}
2799168404Spjd	}
2800185029Spjd	return (MAXOFFSET_T);
2801185029Spjd}
2802185029Spjd#endif /* sun */
2803168404Spjd
2804185029Spjd/*
2805185029Spjd * Label an individual disk.  The name provided is the short name,
2806185029Spjd * stripped of any leading /dev path.
2807185029Spjd */
2808185029Spjdint
2809185029Spjdzpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2810185029Spjd{
2811185029Spjd#if defined(sun)
2812185029Spjd	char path[MAXPATHLEN];
2813185029Spjd	struct dk_gpt *vtoc;
2814185029Spjd	int fd;
2815185029Spjd	size_t resv = EFI_MIN_RESV_SIZE;
2816185029Spjd	uint64_t slice_size;
2817185029Spjd	diskaddr_t start_block;
2818185029Spjd	char errbuf[1024];
2819168404Spjd
2820185029Spjd	/* prepare an error message just in case */
2821185029Spjd	(void) snprintf(errbuf, sizeof (errbuf),
2822185029Spjd	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2823168404Spjd
2824185029Spjd	if (zhp) {
2825185029Spjd		nvlist_t *nvroot;
2826168404Spjd
2827185029Spjd		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2828185029Spjd		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2829168404Spjd
2830185029Spjd		if (zhp->zpool_start_block == 0)
2831185029Spjd			start_block = find_start_block(nvroot);
2832185029Spjd		else
2833185029Spjd			start_block = zhp->zpool_start_block;
2834185029Spjd		zhp->zpool_start_block = start_block;
2835185029Spjd	} else {
2836185029Spjd		/* new pool */
2837185029Spjd		start_block = NEW_START_BLOCK;
2838185029Spjd	}
2839168404Spjd
2840185029Spjd	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2841185029Spjd	    BACKUP_SLICE);
2842168404Spjd
2843185029Spjd	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2844185029Spjd		/*
2845185029Spjd		 * This shouldn't happen.  We've long since verified that this
2846185029Spjd		 * is a valid device.
2847185029Spjd		 */
2848185029Spjd		zfs_error_aux(hdl,
2849185029Spjd		    dgettext(TEXT_DOMAIN, "unable to open device"));
2850185029Spjd		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2851185029Spjd	}
2852168404Spjd
2853185029Spjd	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2854185029Spjd		/*
2855185029Spjd		 * The only way this can fail is if we run out of memory, or we
2856185029Spjd		 * were unable to read the disk's capacity
2857185029Spjd		 */
2858185029Spjd		if (errno == ENOMEM)
2859185029Spjd			(void) no_memory(hdl);
2860168404Spjd
2861185029Spjd		(void) close(fd);
2862185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2863185029Spjd		    "unable to read disk capacity"), name);
2864185029Spjd
2865185029Spjd		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2866168404Spjd	}
2867168404Spjd
2868185029Spjd	slice_size = vtoc->efi_last_u_lba + 1;
2869185029Spjd	slice_size -= EFI_MIN_RESV_SIZE;
2870185029Spjd	if (start_block == MAXOFFSET_T)
2871185029Spjd		start_block = NEW_START_BLOCK;
2872185029Spjd	slice_size -= start_block;
2873168404Spjd
2874185029Spjd	vtoc->efi_parts[0].p_start = start_block;
2875185029Spjd	vtoc->efi_parts[0].p_size = slice_size;
2876185029Spjd
2877168404Spjd	/*
2878185029Spjd	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2879185029Spjd	 * disposable by some EFI utilities (since EFI doesn't have a backup
2880185029Spjd	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2881185029Spjd	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2882185029Spjd	 * etc. were all pretty specific.  V_USR is as close to reality as we
2883185029Spjd	 * can get, in the absence of V_OTHER.
2884168404Spjd	 */
2885185029Spjd	vtoc->efi_parts[0].p_tag = V_USR;
2886185029Spjd	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2887168404Spjd
2888185029Spjd	vtoc->efi_parts[8].p_start = slice_size + start_block;
2889185029Spjd	vtoc->efi_parts[8].p_size = resv;
2890185029Spjd	vtoc->efi_parts[8].p_tag = V_RESERVED;
2891168404Spjd
2892185029Spjd	if (efi_write(fd, vtoc) != 0) {
2893185029Spjd		/*
2894185029Spjd		 * Some block drivers (like pcata) may not support EFI
2895185029Spjd		 * GPT labels.  Print out a helpful error message dir-
2896185029Spjd		 * ecting the user to manually label the disk and give
2897185029Spjd		 * a specific slice.
2898185029Spjd		 */
2899185029Spjd		(void) close(fd);
2900185029Spjd		efi_free(vtoc);
2901168404Spjd
2902185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2903185029Spjd		    "try using fdisk(1M) and then provide a specific slice"));
2904185029Spjd		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2905168404Spjd	}
2906185029Spjd
2907185029Spjd	(void) close(fd);
2908185029Spjd	efi_free(vtoc);
2909185029Spjd#endif /* sun */
2910168404Spjd	return (0);
2911168404Spjd}
2912168404Spjd
2913185029Spjdstatic boolean_t
2914185029Spjdsupported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2915168404Spjd{
2916185029Spjd	char *type;
2917185029Spjd	nvlist_t **child;
2918185029Spjd	uint_t children, c;
2919185029Spjd
2920185029Spjd	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2921185029Spjd	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2922185029Spjd	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
2923185029Spjd	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
2924185029Spjd	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
2925185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2926185029Spjd		    "vdev type '%s' is not supported"), type);
2927185029Spjd		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2928185029Spjd		return (B_FALSE);
2929185029Spjd	}
2930185029Spjd	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2931185029Spjd	    &child, &children) == 0) {
2932185029Spjd		for (c = 0; c < children; c++) {
2933185029Spjd			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2934185029Spjd				return (B_FALSE);
2935185029Spjd		}
2936185029Spjd	}
2937185029Spjd	return (B_TRUE);
2938168404Spjd}
2939168404Spjd
2940185029Spjd/*
2941185029Spjd * check if this zvol is allowable for use as a dump device; zero if
2942185029Spjd * it is, > 0 if it isn't, < 0 if it isn't a zvol
2943185029Spjd */
2944168404Spjdint
2945185029Spjdzvol_check_dump_config(char *arg)
2946168404Spjd{
2947185029Spjd	zpool_handle_t *zhp = NULL;
2948185029Spjd	nvlist_t *config, *nvroot;
2949185029Spjd	char *p, *volname;
2950185029Spjd	nvlist_t **top;
2951185029Spjd	uint_t toplevels;
2952185029Spjd	libzfs_handle_t *hdl;
2953185029Spjd	char errbuf[1024];
2954185029Spjd	char poolname[ZPOOL_MAXNAMELEN];
2955185029Spjd	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
2956185029Spjd	int ret = 1;
2957168404Spjd
2958185029Spjd	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
2959168404Spjd		return (-1);
2960185029Spjd	}
2961168404Spjd
2962185029Spjd	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2963185029Spjd	    "dump is not supported on device '%s'"), arg);
2964168404Spjd
2965185029Spjd	if ((hdl = libzfs_init()) == NULL)
2966185029Spjd		return (1);
2967185029Spjd	libzfs_print_on_error(hdl, B_TRUE);
2968168404Spjd
2969185029Spjd	volname = arg + pathlen;
2970185029Spjd
2971185029Spjd	/* check the configuration of the pool */
2972185029Spjd	if ((p = strchr(volname, '/')) == NULL) {
2973185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2974185029Spjd		    "malformed dataset name"));
2975185029Spjd		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
2976185029Spjd		return (1);
2977185029Spjd	} else if (p - volname >= ZFS_MAXNAMELEN) {
2978185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2979185029Spjd		    "dataset name is too long"));
2980185029Spjd		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
2981185029Spjd		return (1);
2982185029Spjd	} else {
2983185029Spjd		(void) strncpy(poolname, volname, p - volname);
2984185029Spjd		poolname[p - volname] = '\0';
2985168404Spjd	}
2986168404Spjd
2987185029Spjd	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
2988185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2989185029Spjd		    "could not open pool '%s'"), poolname);
2990185029Spjd		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
2991185029Spjd		goto out;
2992185029Spjd	}
2993185029Spjd	config = zpool_get_config(zhp, NULL);
2994185029Spjd	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2995185029Spjd	    &nvroot) != 0) {
2996185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2997185029Spjd		    "could not obtain vdev configuration for  '%s'"), poolname);
2998185029Spjd		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
2999185029Spjd		goto out;
3000185029Spjd	}
3001185029Spjd
3002185029Spjd	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3003185029Spjd	    &top, &toplevels) == 0);
3004185029Spjd	if (toplevels != 1) {
3005185029Spjd		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3006185029Spjd		    "'%s' has multiple top level vdevs"), poolname);
3007185029Spjd		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3008185029Spjd		goto out;
3009185029Spjd	}
3010185029Spjd
3011185029Spjd	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3012185029Spjd		goto out;
3013185029Spjd	}
3014185029Spjd	ret = 0;
3015185029Spjd
3016185029Spjdout:
3017185029Spjd	if (zhp)
3018185029Spjd		zpool_close(zhp);
3019185029Spjd	libzfs_fini(hdl);
3020185029Spjd	return (ret);
3021168404Spjd}
3022