Deleted Added
full compact
libzfs_pool.c (177698) libzfs_pool.c (185029)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 6 unchanged lines hidden (view full) ---

15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 6 unchanged lines hidden (view full) ---

15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
24 * Use is subject to license terms.
25 */
26
27#pragma ident "%Z%%M% %I% %E% SMI"
28
29#include <sys/types.h>
30#include <sys/stat.h>
31#include <assert.h>
32#include <ctype.h>
33#include <errno.h>
34#include <devid.h>
35#include <dirent.h>
36#include <fcntl.h>
37#include <libintl.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <strings.h>
41#include <unistd.h>
27#include <sys/types.h>
28#include <sys/stat.h>
29#include <assert.h>
30#include <ctype.h>
31#include <errno.h>
32#include <devid.h>
33#include <dirent.h>
34#include <fcntl.h>
35#include <libintl.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <strings.h>
39#include <unistd.h>
40#include <zone.h>
42#include <sys/zfs_ioctl.h>
43#include <sys/zio.h>
44#include <strings.h>
45#include <umem.h>
46
47#include "zfs_namecheck.h"
48#include "zfs_prop.h"
49#include "libzfs_impl.h"
50
41#include <sys/zfs_ioctl.h>
42#include <sys/zio.h>
43#include <strings.h>
44#include <umem.h>
45
46#include "zfs_namecheck.h"
47#include "zfs_prop.h"
48#include "libzfs_impl.h"
49
50static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51
51/*
52/*
53 * ====================================================================
54 * zpool property functions
55 * ====================================================================
56 */
57
58static int
59zpool_get_all_props(zpool_handle_t *zhp)
60{
61 zfs_cmd_t zc = { 0 };
62 libzfs_handle_t *hdl = zhp->zpool_hdl;
63
64 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
65
66 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
67 return (-1);
68
69 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
70 if (errno == ENOMEM) {
71 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
72 zcmd_free_nvlists(&zc);
73 return (-1);
74 }
75 } else {
76 zcmd_free_nvlists(&zc);
77 return (-1);
78 }
79 }
80
81 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
82 zcmd_free_nvlists(&zc);
83 return (-1);
84 }
85
86 zcmd_free_nvlists(&zc);
87
88 return (0);
89}
90
91static int
92zpool_props_refresh(zpool_handle_t *zhp)
93{
94 nvlist_t *old_props;
95
96 old_props = zhp->zpool_props;
97
98 if (zpool_get_all_props(zhp) != 0)
99 return (-1);
100
101 nvlist_free(old_props);
102 return (0);
103}
104
105static char *
106zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
107 zprop_source_t *src)
108{
109 nvlist_t *nv, *nvl;
110 uint64_t ival;
111 char *value;
112 zprop_source_t source;
113
114 nvl = zhp->zpool_props;
115 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
116 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
117 source = ival;
118 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
119 } else {
120 source = ZPROP_SRC_DEFAULT;
121 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
122 value = "-";
123 }
124
125 if (src)
126 *src = source;
127
128 return (value);
129}
130
131uint64_t
132zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
133{
134 nvlist_t *nv, *nvl;
135 uint64_t value;
136 zprop_source_t source;
137
138 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
139 /*
140 * zpool_get_all_props() has most likely failed because
141 * the pool is faulted, but if all we need is the top level
142 * vdev's guid then get it from the zhp config nvlist.
143 */
144 if ((prop == ZPOOL_PROP_GUID) &&
145 (nvlist_lookup_nvlist(zhp->zpool_config,
146 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
147 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
148 == 0)) {
149 return (value);
150 }
151 return (zpool_prop_default_numeric(prop));
152 }
153
154 nvl = zhp->zpool_props;
155 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
156 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
157 source = value;
158 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
159 } else {
160 source = ZPROP_SRC_DEFAULT;
161 value = zpool_prop_default_numeric(prop);
162 }
163
164 if (src)
165 *src = source;
166
167 return (value);
168}
169
170/*
171 * Map VDEV STATE to printed strings.
172 */
173char *
174zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
175{
176 switch (state) {
177 case VDEV_STATE_CLOSED:
178 case VDEV_STATE_OFFLINE:
179 return (gettext("OFFLINE"));
180 case VDEV_STATE_REMOVED:
181 return (gettext("REMOVED"));
182 case VDEV_STATE_CANT_OPEN:
183 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
184 return (gettext("FAULTED"));
185 else
186 return (gettext("UNAVAIL"));
187 case VDEV_STATE_FAULTED:
188 return (gettext("FAULTED"));
189 case VDEV_STATE_DEGRADED:
190 return (gettext("DEGRADED"));
191 case VDEV_STATE_HEALTHY:
192 return (gettext("ONLINE"));
193 }
194
195 return (gettext("UNKNOWN"));
196}
197
198/*
199 * Get a zpool property value for 'prop' and return the value in
200 * a pre-allocated buffer.
201 */
202int
203zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
204 zprop_source_t *srctype)
205{
206 uint64_t intval;
207 const char *strval;
208 zprop_source_t src = ZPROP_SRC_NONE;
209 nvlist_t *nvroot;
210 vdev_stat_t *vs;
211 uint_t vsc;
212
213 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
214 if (prop == ZPOOL_PROP_NAME)
215 (void) strlcpy(buf, zpool_get_name(zhp), len);
216 else if (prop == ZPOOL_PROP_HEALTH)
217 (void) strlcpy(buf, "FAULTED", len);
218 else
219 (void) strlcpy(buf, "-", len);
220 return (0);
221 }
222
223 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
224 prop != ZPOOL_PROP_NAME)
225 return (-1);
226
227 switch (zpool_prop_get_type(prop)) {
228 case PROP_TYPE_STRING:
229 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
230 len);
231 break;
232
233 case PROP_TYPE_NUMBER:
234 intval = zpool_get_prop_int(zhp, prop, &src);
235
236 switch (prop) {
237 case ZPOOL_PROP_SIZE:
238 case ZPOOL_PROP_USED:
239 case ZPOOL_PROP_AVAILABLE:
240 (void) zfs_nicenum(intval, buf, len);
241 break;
242
243 case ZPOOL_PROP_CAPACITY:
244 (void) snprintf(buf, len, "%llu%%",
245 (u_longlong_t)intval);
246 break;
247
248 case ZPOOL_PROP_HEALTH:
249 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
250 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
251 verify(nvlist_lookup_uint64_array(nvroot,
252 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
253
254 (void) strlcpy(buf, zpool_state_to_name(intval,
255 vs->vs_aux), len);
256 break;
257 default:
258 (void) snprintf(buf, len, "%llu", intval);
259 }
260 break;
261
262 case PROP_TYPE_INDEX:
263 intval = zpool_get_prop_int(zhp, prop, &src);
264 if (zpool_prop_index_to_string(prop, intval, &strval)
265 != 0)
266 return (-1);
267 (void) strlcpy(buf, strval, len);
268 break;
269
270 default:
271 abort();
272 }
273
274 if (srctype)
275 *srctype = src;
276
277 return (0);
278}
279
280/*
281 * Check if the bootfs name has the same pool name as it is set to.
282 * Assuming bootfs is a valid dataset name.
283 */
284static boolean_t
285bootfs_name_valid(const char *pool, char *bootfs)
286{
287 int len = strlen(pool);
288
289 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
290 return (B_FALSE);
291
292 if (strncmp(pool, bootfs, len) == 0 &&
293 (bootfs[len] == '/' || bootfs[len] == '\0'))
294 return (B_TRUE);
295
296 return (B_FALSE);
297}
298
299#if defined(sun)
300/*
301 * Inspect the configuration to determine if any of the devices contain
302 * an EFI label.
303 */
304static boolean_t
305pool_uses_efi(nvlist_t *config)
306{
307 nvlist_t **child;
308 uint_t c, children;
309
310 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
311 &child, &children) != 0)
312 return (read_efi_label(config, NULL) >= 0);
313
314 for (c = 0; c < children; c++) {
315 if (pool_uses_efi(child[c]))
316 return (B_TRUE);
317 }
318 return (B_FALSE);
319}
320#endif
321
322/*
323 * Given an nvlist of zpool properties to be set, validate that they are
324 * correct, and parse any numeric properties (index, boolean, etc) if they are
325 * specified as strings.
326 */
327static nvlist_t *
328zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
329 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
330{
331 nvpair_t *elem;
332 nvlist_t *retprops;
333 zpool_prop_t prop;
334 char *strval;
335 uint64_t intval;
336 char *slash;
337 struct stat64 statbuf;
338 zpool_handle_t *zhp;
339 nvlist_t *nvroot;
340
341 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
342 (void) no_memory(hdl);
343 return (NULL);
344 }
345
346 elem = NULL;
347 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
348 const char *propname = nvpair_name(elem);
349
350 /*
351 * Make sure this property is valid and applies to this type.
352 */
353 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
355 "invalid property '%s'"), propname);
356 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
357 goto error;
358 }
359
360 if (zpool_prop_readonly(prop)) {
361 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
362 "is readonly"), propname);
363 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
364 goto error;
365 }
366
367 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
368 &strval, &intval, errbuf) != 0)
369 goto error;
370
371 /*
372 * Perform additional checking for specific properties.
373 */
374 switch (prop) {
375 case ZPOOL_PROP_VERSION:
376 if (intval < version || intval > SPA_VERSION) {
377 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
378 "property '%s' number %d is invalid."),
379 propname, intval);
380 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
381 goto error;
382 }
383 break;
384
385 case ZPOOL_PROP_BOOTFS:
386 if (create_or_import) {
387 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
388 "property '%s' cannot be set at creation "
389 "or import time"), propname);
390 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
391 goto error;
392 }
393
394 if (version < SPA_VERSION_BOOTFS) {
395 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
396 "pool must be upgraded to support "
397 "'%s' property"), propname);
398 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
399 goto error;
400 }
401
402 /*
403 * bootfs property value has to be a dataset name and
404 * the dataset has to be in the same pool as it sets to.
405 */
406 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
407 strval)) {
408 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
409 "is an invalid name"), strval);
410 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
411 goto error;
412 }
413
414 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
416 "could not open pool '%s'"), poolname);
417 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
418 goto error;
419 }
420 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
421 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
422
423#if defined(sun)
424 /*
425 * bootfs property cannot be set on a disk which has
426 * been EFI labeled.
427 */
428 if (pool_uses_efi(nvroot)) {
429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430 "property '%s' not supported on "
431 "EFI labeled devices"), propname);
432 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
433 zpool_close(zhp);
434 goto error;
435 }
436#endif
437 zpool_close(zhp);
438 break;
439
440 case ZPOOL_PROP_ALTROOT:
441 if (!create_or_import) {
442 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
443 "property '%s' can only be set during pool "
444 "creation or import"), propname);
445 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
446 goto error;
447 }
448
449 if (strval[0] != '/') {
450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
451 "bad alternate root '%s'"), strval);
452 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
453 goto error;
454 }
455 break;
456
457 case ZPOOL_PROP_CACHEFILE:
458 if (strval[0] == '\0')
459 break;
460
461 if (strcmp(strval, "none") == 0)
462 break;
463
464 if (strval[0] != '/') {
465 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
466 "property '%s' must be empty, an "
467 "absolute path, or 'none'"), propname);
468 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
469 goto error;
470 }
471
472 slash = strrchr(strval, '/');
473
474 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
475 strcmp(slash, "/..") == 0) {
476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
477 "'%s' is not a valid file"), strval);
478 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
479 goto error;
480 }
481
482 *slash = '\0';
483
484 if (strval[0] != '\0' &&
485 (stat64(strval, &statbuf) != 0 ||
486 !S_ISDIR(statbuf.st_mode))) {
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "'%s' is not a valid directory"),
489 strval);
490 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
491 goto error;
492 }
493
494 *slash = '/';
495 break;
496 }
497 }
498
499 return (retprops);
500error:
501 nvlist_free(retprops);
502 return (NULL);
503}
504
505/*
506 * Set zpool property : propname=propval.
507 */
508int
509zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
510{
511 zfs_cmd_t zc = { 0 };
512 int ret = -1;
513 char errbuf[1024];
514 nvlist_t *nvl = NULL;
515 nvlist_t *realprops;
516 uint64_t version;
517
518 (void) snprintf(errbuf, sizeof (errbuf),
519 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
520 zhp->zpool_name);
521
522 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
523 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
524
525 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
526 return (no_memory(zhp->zpool_hdl));
527
528 if (nvlist_add_string(nvl, propname, propval) != 0) {
529 nvlist_free(nvl);
530 return (no_memory(zhp->zpool_hdl));
531 }
532
533 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
534 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
535 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
536 nvlist_free(nvl);
537 return (-1);
538 }
539
540 nvlist_free(nvl);
541 nvl = realprops;
542
543 /*
544 * Execute the corresponding ioctl() to set this property.
545 */
546 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
547
548 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
549 nvlist_free(nvl);
550 return (-1);
551 }
552
553 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
554
555 zcmd_free_nvlists(&zc);
556 nvlist_free(nvl);
557
558 if (ret)
559 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
560 else
561 (void) zpool_props_refresh(zhp);
562
563 return (ret);
564}
565
566int
567zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
568{
569 libzfs_handle_t *hdl = zhp->zpool_hdl;
570 zprop_list_t *entry;
571 char buf[ZFS_MAXPROPLEN];
572
573 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
574 return (-1);
575
576 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
577
578 if (entry->pl_fixed)
579 continue;
580
581 if (entry->pl_prop != ZPROP_INVAL &&
582 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
583 NULL) == 0) {
584 if (strlen(buf) > entry->pl_width)
585 entry->pl_width = strlen(buf);
586 }
587 }
588
589 return (0);
590}
591
592
593/*
52 * Validate the given pool name, optionally putting an extended error message in
53 * 'buf'.
54 */
594 * Validate the given pool name, optionally putting an extended error message in
595 * 'buf'.
596 */
55static boolean_t
597boolean_t
56zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57{
58 namecheck_err_t why;
59 char what;
60 int ret;
61
62 ret = pool_namecheck(pool, &why, &what);
63
64 /*
65 * The rules for reserved pool names were extended at a later point.
66 * But we need to support users with existing pools that may now be
67 * invalid. So we only check for this expanded set of names during a
68 * create (or import), and only in userland.
69 */
70 if (ret == 0 && !isopen &&
71 (strncmp(pool, "mirror", 6) == 0 ||
72 strncmp(pool, "raidz", 5) == 0 ||
598zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
599{
600 namecheck_err_t why;
601 char what;
602 int ret;
603
604 ret = pool_namecheck(pool, &why, &what);
605
606 /*
607 * The rules for reserved pool names were extended at a later point.
608 * But we need to support users with existing pools that may now be
609 * invalid. So we only check for this expanded set of names during a
610 * create (or import), and only in userland.
611 */
612 if (ret == 0 && !isopen &&
613 (strncmp(pool, "mirror", 6) == 0 ||
614 strncmp(pool, "raidz", 5) == 0 ||
73 strncmp(pool, "spare", 5) == 0)) {
74 zfs_error_aux(hdl,
75 dgettext(TEXT_DOMAIN, "name is reserved"));
615 strncmp(pool, "spare", 5) == 0 ||
616 strcmp(pool, "log") == 0)) {
617 if (hdl != NULL)
618 zfs_error_aux(hdl,
619 dgettext(TEXT_DOMAIN, "name is reserved"));
76 return (B_FALSE);
77 }
78
79
80 if (ret != 0) {
81 if (hdl != NULL) {
82 switch (why) {
83 case NAME_ERR_TOOLONG:

--- 45 unchanged lines hidden (view full) ---

129 }
130 }
131 return (B_FALSE);
132 }
133
134 return (B_TRUE);
135}
136
620 return (B_FALSE);
621 }
622
623
624 if (ret != 0) {
625 if (hdl != NULL) {
626 switch (why) {
627 case NAME_ERR_TOOLONG:

--- 45 unchanged lines hidden (view full) ---

673 }
674 }
675 return (B_FALSE);
676 }
677
678 return (B_TRUE);
679}
680
137static int
138zpool_get_all_props(zpool_handle_t *zhp)
139{
140 zfs_cmd_t zc = { 0 };
141 libzfs_handle_t *hdl = zhp->zpool_hdl;
142
143 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
144
145 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
146 return (-1);
147
148 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
149 if (errno == ENOMEM) {
150 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
151 zcmd_free_nvlists(&zc);
152 return (-1);
153 }
154 } else {
155 zcmd_free_nvlists(&zc);
156 return (-1);
157 }
158 }
159
160 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
161 zcmd_free_nvlists(&zc);
162 return (-1);
163 }
164
165 zcmd_free_nvlists(&zc);
166
167 return (0);
168}
169
170/*
171 * Open a handle to the given pool, even if the pool is currently in the FAULTED
172 * state.
173 */
174zpool_handle_t *
175zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
176{
177 zpool_handle_t *zhp;

--- 16 unchanged lines hidden (view full) ---

194 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
195
196 if (zpool_refresh_stats(zhp, &missing) != 0) {
197 zpool_close(zhp);
198 return (NULL);
199 }
200
201 if (missing) {
681/*
682 * Open a handle to the given pool, even if the pool is currently in the FAULTED
683 * state.
684 */
685zpool_handle_t *
686zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
687{
688 zpool_handle_t *zhp;

--- 16 unchanged lines hidden (view full) ---

705 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
706
707 if (zpool_refresh_stats(zhp, &missing) != 0) {
708 zpool_close(zhp);
709 return (NULL);
710 }
711
712 if (missing) {
202 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
203 "no such pool"));
713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
204 (void) zfs_error_fmt(hdl, EZFS_NOENT,
714 (void) zfs_error_fmt(hdl, EZFS_NOENT,
205 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
206 pool);
715 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
207 zpool_close(zhp);
208 return (NULL);
209 }
210
211 return (zhp);
212}
213
214/*

--- 68 unchanged lines hidden (view full) ---

283 * Return the name of the pool.
284 */
285const char *
286zpool_get_name(zpool_handle_t *zhp)
287{
288 return (zhp->zpool_name);
289}
290
716 zpool_close(zhp);
717 return (NULL);
718 }
719
720 return (zhp);
721}
722
723/*

--- 68 unchanged lines hidden (view full) ---

792 * Return the name of the pool.
793 */
794const char *
795zpool_get_name(zpool_handle_t *zhp)
796{
797 return (zhp->zpool_name);
798}
799
291/*
292 * Return the GUID of the pool.
293 */
294uint64_t
295zpool_get_guid(zpool_handle_t *zhp)
296{
297 uint64_t guid;
298
800
299 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
300 &guid) == 0);
301 return (guid);
302}
303
304/*
801/*
305 * Return the version of the pool.
306 */
307uint64_t
308zpool_get_version(zpool_handle_t *zhp)
309{
310 uint64_t version;
311
312 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
313 &version) == 0);
314
315 return (version);
316}
317
318/*
319 * Return the amount of space currently consumed by the pool.
320 */
321uint64_t
322zpool_get_space_used(zpool_handle_t *zhp)
323{
324 nvlist_t *nvroot;
325 vdev_stat_t *vs;
326 uint_t vsc;
327
328 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
329 &nvroot) == 0);
330 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
331 (uint64_t **)&vs, &vsc) == 0);
332
333 return (vs->vs_alloc);
334}
335
336/*
337 * Return the total space in the pool.
338 */
339uint64_t
340zpool_get_space_total(zpool_handle_t *zhp)
341{
342 nvlist_t *nvroot;
343 vdev_stat_t *vs;
344 uint_t vsc;
345
346 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
347 &nvroot) == 0);
348 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
349 (uint64_t **)&vs, &vsc) == 0);
350
351 return (vs->vs_space);
352}
353
354/*
355 * Return the alternate root for this pool, if any.
356 */
357int
358zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
359{
360 zfs_cmd_t zc = { 0 };
361
362 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
363 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
364 zc.zc_value[0] == '\0')
365 return (-1);
366
367 (void) strlcpy(buf, zc.zc_value, buflen);
368
369 return (0);
370}
371
372/*
373 * Return the state of the pool (ACTIVE or UNAVAILABLE)
374 */
375int
376zpool_get_state(zpool_handle_t *zhp)
377{
378 return (zhp->zpool_state);
379}
380
381/*
382 * Create the named pool, using the provided vdev list. It is assumed
383 * that the consumer has already validated the contents of the nvlist, so we
384 * don't have to worry about error semantics.
385 */
386int
387zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
802 * Return the state of the pool (ACTIVE or UNAVAILABLE)
803 */
804int
805zpool_get_state(zpool_handle_t *zhp)
806{
807 return (zhp->zpool_state);
808}
809
810/*
811 * Create the named pool, using the provided vdev list. It is assumed
812 * that the consumer has already validated the contents of the nvlist, so we
813 * don't have to worry about error semantics.
814 */
815int
816zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
388 const char *altroot)
817 nvlist_t *props, nvlist_t *fsprops)
389{
390 zfs_cmd_t zc = { 0 };
818{
819 zfs_cmd_t zc = { 0 };
820 nvlist_t *zc_fsprops = NULL;
821 nvlist_t *zc_props = NULL;
391 char msg[1024];
822 char msg[1024];
823 char *altroot;
824 int ret = -1;
392
393 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
394 "cannot create '%s'"), pool);
395
396 if (!zpool_name_valid(hdl, B_FALSE, pool))
397 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
398
825
826 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
827 "cannot create '%s'"), pool);
828
829 if (!zpool_name_valid(hdl, B_FALSE, pool))
830 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
831
399 if (altroot != NULL && altroot[0] != '/')
400 return (zfs_error_fmt(hdl, EZFS_BADPATH,
401 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
402
403 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
832 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
404 return (-1);
405
833 return (-1);
834
835 if (props) {
836 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
837 SPA_VERSION_1, B_TRUE, msg)) == NULL) {
838 goto create_failed;
839 }
840 }
841
842 if (fsprops) {
843 uint64_t zoned;
844 char *zonestr;
845
846 zoned = ((nvlist_lookup_string(fsprops,
847 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
848 strcmp(zonestr, "on") == 0);
849
850 if ((zc_fsprops = zfs_valid_proplist(hdl,
851 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
852 goto create_failed;
853 }
854 if (!zc_props &&
855 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
856 goto create_failed;
857 }
858 if (nvlist_add_nvlist(zc_props,
859 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
860 goto create_failed;
861 }
862 }
863
864 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
865 goto create_failed;
866
406 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
407
867 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
868
408 if (altroot != NULL)
409 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
869 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
410
870
411 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
412 zcmd_free_nvlists(&zc);
871 zcmd_free_nvlists(&zc);
872 nvlist_free(zc_props);
873 nvlist_free(zc_fsprops);
413
414 switch (errno) {
415 case EBUSY:
416 /*
417 * This can happen if the user has specified the same
418 * device multiple times. We can't reliably detect this
419 * until we try to add it and see we already have a
420 * label.

--- 20 unchanged lines hidden (view full) ---

441 }
442 return (zfs_error(hdl, EZFS_BADDEV, msg));
443
444 case ENOSPC:
445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446 "one or more devices is out of space"));
447 return (zfs_error(hdl, EZFS_BADDEV, msg));
448
874
875 switch (errno) {
876 case EBUSY:
877 /*
878 * This can happen if the user has specified the same
879 * device multiple times. We can't reliably detect this
880 * until we try to add it and see we already have a
881 * label.

--- 20 unchanged lines hidden (view full) ---

902 }
903 return (zfs_error(hdl, EZFS_BADDEV, msg));
904
905 case ENOSPC:
906 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
907 "one or more devices is out of space"));
908 return (zfs_error(hdl, EZFS_BADDEV, msg));
909
910 case ENOTBLK:
911 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
912 "cache device must be a disk or disk slice"));
913 return (zfs_error(hdl, EZFS_BADDEV, msg));
914
449 default:
450 return (zpool_standard_error(hdl, errno, msg));
451 }
452 }
453
915 default:
916 return (zpool_standard_error(hdl, errno, msg));
917 }
918 }
919
454 zcmd_free_nvlists(&zc);
455
456 /*
457 * If this is an alternate root pool, then we automatically set the
458 * mountpoint of the root dataset to be '/'.
459 */
920 /*
921 * If this is an alternate root pool, then we automatically set the
922 * mountpoint of the root dataset to be '/'.
923 */
460 if (altroot != NULL) {
924 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
925 &altroot) == 0) {
461 zfs_handle_t *zhp;
462
926 zfs_handle_t *zhp;
927
463 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
928 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
464 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
465 "/") == 0);
466
467 zfs_close(zhp);
468 }
469
929 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
930 "/") == 0);
931
932 zfs_close(zhp);
933 }
934
470 return (0);
935create_failed:
936 zcmd_free_nvlists(&zc);
937 nvlist_free(zc_props);
938 nvlist_free(zc_fsprops);
939 return (ret);
471}
472
473/*
474 * Destroy the given pool. It is up to the caller to ensure that there are no
475 * datasets left in the pool.
476 */
477int
478zpool_destroy(zpool_handle_t *zhp)

--- 8 unchanged lines hidden (view full) ---

487 ZFS_TYPE_FILESYSTEM)) == NULL)
488 return (-1);
489
490 if (zpool_remove_zvol_links(zhp) != 0)
491 return (-1);
492
493 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
494
940}
941
942/*
943 * Destroy the given pool. It is up to the caller to ensure that there are no
944 * datasets left in the pool.
945 */
946int
947zpool_destroy(zpool_handle_t *zhp)

--- 8 unchanged lines hidden (view full) ---

956 ZFS_TYPE_FILESYSTEM)) == NULL)
957 return (-1);
958
959 if (zpool_remove_zvol_links(zhp) != 0)
960 return (-1);
961
962 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
963
495 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
964 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497 "cannot destroy '%s'"), zhp->zpool_name);
498
499 if (errno == EROFS) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "one or more devices is read only"));
502 (void) zfs_error(hdl, EZFS_BADDEV, msg);
503 } else {

--- 19 unchanged lines hidden (view full) ---

523 */
524int
525zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
526{
527 zfs_cmd_t zc = { 0 };
528 int ret;
529 libzfs_handle_t *hdl = zhp->zpool_hdl;
530 char msg[1024];
965 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
966 "cannot destroy '%s'"), zhp->zpool_name);
967
968 if (errno == EROFS) {
969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970 "one or more devices is read only"));
971 (void) zfs_error(hdl, EZFS_BADDEV, msg);
972 } else {

--- 19 unchanged lines hidden (view full) ---

992 */
993int
994zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
995{
996 zfs_cmd_t zc = { 0 };
997 int ret;
998 libzfs_handle_t *hdl = zhp->zpool_hdl;
999 char msg[1024];
531 nvlist_t **spares;
532 uint_t nspares;
1000 nvlist_t **spares, **l2cache;
1001 uint_t nspares, nl2cache;
533
534 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535 "cannot add to '%s'"), zhp->zpool_name);
536
1002
1003 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1004 "cannot add to '%s'"), zhp->zpool_name);
1005
537 if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
1006 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1007 SPA_VERSION_SPARES &&
538 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539 &spares, &nspares) == 0) {
540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541 "upgraded to add hot spares"));
542 return (zfs_error(hdl, EZFS_BADVERSION, msg));
543 }
544
1008 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1009 &spares, &nspares) == 0) {
1010 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1011 "upgraded to add hot spares"));
1012 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1013 }
1014
545 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1015 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1016 SPA_VERSION_L2CACHE &&
1017 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1018 &l2cache, &nl2cache) == 0) {
1019 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1020 "upgraded to add cache devices"));
1021 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1022 }
1023
1024 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
546 return (-1);
547 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
548
1025 return (-1);
1026 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1027
549 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1028 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
550 switch (errno) {
551 case EBUSY:
552 /*
553 * This can happen if the user has specified the same
554 * device multiple times. We can't reliably detect this
555 * until we try to add it and see we already have a
556 * label.
557 */

--- 18 unchanged lines hidden (view full) ---

576 "device is less than the minimum "
577 "size (%s)"), buf);
578 }
579 (void) zfs_error(hdl, EZFS_BADDEV, msg);
580 break;
581
582 case ENOTSUP:
583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 switch (errno) {
1030 case EBUSY:
1031 /*
1032 * This can happen if the user has specified the same
1033 * device multiple times. We can't reliably detect this
1034 * until we try to add it and see we already have a
1035 * label.
1036 */

--- 18 unchanged lines hidden (view full) ---

1055 "device is less than the minimum "
1056 "size (%s)"), buf);
1057 }
1058 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1059 break;
1060
1061 case ENOTSUP:
1062 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 "pool must be upgraded to add raidz2 vdevs"));
1063 "pool must be upgraded to add these vdevs"));
585 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
586 break;
587
588 case EDOM:
589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1064 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1065 break;
1066
1067 case EDOM:
1068 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 "root pool can not have concatenated devices"));
1069 "root pool can not have multiple vdevs"
1070 " or separate logs"));
591 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
592 break;
593
1071 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1072 break;
1073
1074 case ENOTBLK:
1075 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1076 "cache device must be a disk or disk slice"));
1077 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1078 break;
1079
594 default:
595 (void) zpool_standard_error(hdl, errno, msg);
596 }
597
598 ret = -1;
599 } else {
600 ret = 0;
601 }
602
603 zcmd_free_nvlists(&zc);
604
605 return (ret);
606}
607
608/*
609 * Exports the pool from the system. The caller must ensure that there are no
610 * mounted datasets in the pool.
611 */
612int
1080 default:
1081 (void) zpool_standard_error(hdl, errno, msg);
1082 }
1083
1084 ret = -1;
1085 } else {
1086 ret = 0;
1087 }
1088
1089 zcmd_free_nvlists(&zc);
1090
1091 return (ret);
1092}
1093
1094/*
1095 * Exports the pool from the system. The caller must ensure that there are no
1096 * mounted datasets in the pool.
1097 */
1098int
613zpool_export(zpool_handle_t *zhp)
1099zpool_export(zpool_handle_t *zhp, boolean_t force)
614{
615 zfs_cmd_t zc = { 0 };
1100{
1101 zfs_cmd_t zc = { 0 };
1102 char msg[1024];
616
617 if (zpool_remove_zvol_links(zhp) != 0)
618 return (-1);
619
1103
1104 if (zpool_remove_zvol_links(zhp) != 0)
1105 return (-1);
1106
1107 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1108 "cannot export '%s'"), zhp->zpool_name);
1109
620 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1110 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1111 zc.zc_cookie = force;
621
1112
622 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
623 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
624 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
625 zhp->zpool_name));
1113 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1114 switch (errno) {
1115 case EXDEV:
1116 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1117 "use '-f' to override the following errors:\n"
1118 "'%s' has an active shared spare which could be"
1119 " used by other pools once '%s' is exported."),
1120 zhp->zpool_name, zhp->zpool_name);
1121 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1122 msg));
1123 default:
1124 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1125 msg));
1126 }
1127 }
1128
626 return (0);
627}
628
629/*
1129 return (0);
1130}
1131
1132/*
630 * Import the given pool using the known configuration. The configuration
631 * should have come from zpool_find_import(). The 'newname' and 'altroot'
632 * parameters control whether the pool is imported with a different name or with
633 * an alternate root, respectively.
1133 * zpool_import() is a contracted interface. Should be kept the same
1134 * if possible.
1135 *
1136 * Applications should use zpool_import_props() to import a pool with
1137 * new properties value to be set.
634 */
635int
636zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1138 */
1139int
1140zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
637 const char *altroot)
1141 char *altroot)
638{
1142{
1143 nvlist_t *props = NULL;
1144 int ret;
1145
1146 if (altroot != NULL) {
1147 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1148 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1149 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1150 newname));
1151 }
1152
1153 if (nvlist_add_string(props,
1154 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
1155 nvlist_free(props);
1156 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1157 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1158 newname));
1159 }
1160 }
1161
1162 ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1163 if (props)
1164 nvlist_free(props);
1165 return (ret);
1166}
1167
1168/*
1169 * Import the given pool using the known configuration and a list of
1170 * properties to be set. The configuration should have come from
1171 * zpool_find_import(). The 'newname' parameters control whether the pool
1172 * is imported with a different name.
1173 */
1174int
1175zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1176 nvlist_t *props, boolean_t importfaulted)
1177{
639 zfs_cmd_t zc = { 0 };
640 char *thename;
641 char *origname;
642 int ret;
1178 zfs_cmd_t zc = { 0 };
1179 char *thename;
1180 char *origname;
1181 int ret;
1182 char errbuf[1024];
643
644 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
645 &origname) == 0);
646
1183
1184 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1185 &origname) == 0);
1186
1187 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1188 "cannot import pool '%s'"), origname);
1189
647 if (newname != NULL) {
648 if (!zpool_name_valid(hdl, B_FALSE, newname))
649 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
650 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
651 newname));
652 thename = (char *)newname;
653 } else {
654 thename = origname;
655 }
656
1190 if (newname != NULL) {
1191 if (!zpool_name_valid(hdl, B_FALSE, newname))
1192 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1193 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1194 newname));
1195 thename = (char *)newname;
1196 } else {
1197 thename = origname;
1198 }
1199
657 if (altroot != NULL && altroot[0] != '/')
658 return (zfs_error_fmt(hdl, EZFS_BADPATH,
659 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
660 altroot));
1200 if (props) {
1201 uint64_t version;
661
1202
662 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1203 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1204 &version) == 0);
663
1205
664 if (altroot != NULL)
665 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
666 else
667 zc.zc_value[0] = '\0';
1206 if ((props = zpool_valid_proplist(hdl, origname,
1207 props, version, B_TRUE, errbuf)) == NULL) {
1208 return (-1);
1209 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1210 nvlist_free(props);
1211 return (-1);
1212 }
1213 }
668
1214
1215 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1216
669 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
670 &zc.zc_guid) == 0);
671
1217 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1218 &zc.zc_guid) == 0);
1219
672 if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
1220 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1221 nvlist_free(props);
673 return (-1);
1222 return (-1);
1223 }
674
1224
1225 zc.zc_cookie = (uint64_t)importfaulted;
675 ret = 0;
1226 ret = 0;
676 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1227 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
677 char desc[1024];
678 if (newname == NULL)
679 (void) snprintf(desc, sizeof (desc),
680 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
681 thename);
682 else
683 (void) snprintf(desc, sizeof (desc),
684 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),

--- 13 unchanged lines hidden (view full) ---

698
699 default:
700 (void) zpool_standard_error(hdl, errno, desc);
701 }
702
703 ret = -1;
704 } else {
705 zpool_handle_t *zhp;
1228 char desc[1024];
1229 if (newname == NULL)
1230 (void) snprintf(desc, sizeof (desc),
1231 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1232 thename);
1233 else
1234 (void) snprintf(desc, sizeof (desc),
1235 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),

--- 13 unchanged lines hidden (view full) ---

1249
1250 default:
1251 (void) zpool_standard_error(hdl, errno, desc);
1252 }
1253
1254 ret = -1;
1255 } else {
1256 zpool_handle_t *zhp;
1257
706 /*
707 * This should never fail, but play it safe anyway.
708 */
709 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
710 ret = -1;
711 } else if (zhp != NULL) {
712 ret = zpool_create_zvol_links(zhp);
713 zpool_close(zhp);
714 }
1258 /*
1259 * This should never fail, but play it safe anyway.
1260 */
1261 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1262 ret = -1;
1263 } else if (zhp != NULL) {
1264 ret = zpool_create_zvol_links(zhp);
1265 zpool_close(zhp);
1266 }
1267
715 }
716
717 zcmd_free_nvlists(&zc);
1268 }
1269
1270 zcmd_free_nvlists(&zc);
1271 nvlist_free(props);
1272
718 return (ret);
719}
720
721/*
722 * Scrub the pool.
723 */
724int
725zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
726{
727 zfs_cmd_t zc = { 0 };
728 char msg[1024];
729 libzfs_handle_t *hdl = zhp->zpool_hdl;
730
731 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
732 zc.zc_cookie = type;
733
1273 return (ret);
1274}
1275
1276/*
1277 * Scrub the pool.
1278 */
1279int
1280zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1281{
1282 zfs_cmd_t zc = { 0 };
1283 char msg[1024];
1284 libzfs_handle_t *hdl = zhp->zpool_hdl;
1285
1286 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1287 zc.zc_cookie = type;
1288
734 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1289 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
735 return (0);
736
737 (void) snprintf(msg, sizeof (msg),
738 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
739
740 if (errno == EBUSY)
741 return (zfs_error(hdl, EZFS_RESILVERING, msg));
742 else
743 return (zpool_standard_error(hdl, errno, msg));
744}
745
746/*
747 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
748 * spare; but FALSE if its an INUSE spare.
749 */
750static nvlist_t *
751vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1290 return (0);
1291
1292 (void) snprintf(msg, sizeof (msg),
1293 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1294
1295 if (errno == EBUSY)
1296 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1297 else
1298 return (zpool_standard_error(hdl, errno, msg));
1299}
1300
1301/*
1302 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1303 * spare; but FALSE if its an INUSE spare.
1304 */
1305static nvlist_t *
1306vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
752 boolean_t *avail_spare)
1307 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
753{
754 uint_t c, children;
755 nvlist_t **child;
756 uint64_t theguid, present;
757 char *path;
758 uint64_t wholedisk = 0;
759 nvlist_t *ret;
1308{
1309 uint_t c, children;
1310 nvlist_t **child;
1311 uint64_t theguid, present;
1312 char *path;
1313 uint64_t wholedisk = 0;
1314 nvlist_t *ret;
1315 uint64_t is_log;
760
761 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
762
763 if (search == NULL &&
764 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
765 /*
766 * If the device has never been present since import, the only
767 * reliable way to match the vdev is by GUID.

--- 16 unchanged lines hidden (view full) ---

784 return (nv);
785 }
786 }
787
788 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
789 &child, &children) != 0)
790 return (NULL);
791
1316
1317 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1318
1319 if (search == NULL &&
1320 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1321 /*
1322 * If the device has never been present since import, the only
1323 * reliable way to match the vdev is by GUID.

--- 16 unchanged lines hidden (view full) ---

1340 return (nv);
1341 }
1342 }
1343
1344 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1345 &child, &children) != 0)
1346 return (NULL);
1347
792 for (c = 0; c < children; c++)
1348 for (c = 0; c < children; c++) {
793 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1349 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
794 avail_spare)) != NULL)
1350 avail_spare, l2cache, NULL)) != NULL) {
1351 /*
1352 * The 'is_log' value is only set for the toplevel
1353 * vdev, not the leaf vdevs. So we always lookup the
1354 * log device from the root of the vdev tree (where
1355 * 'log' is non-NULL).
1356 */
1357 if (log != NULL &&
1358 nvlist_lookup_uint64(child[c],
1359 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1360 is_log) {
1361 *log = B_TRUE;
1362 }
795 return (ret);
1363 return (ret);
1364 }
1365 }
796
797 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
798 &child, &children) == 0) {
799 for (c = 0; c < children; c++) {
800 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1366
1367 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1368 &child, &children) == 0) {
1369 for (c = 0; c < children; c++) {
1370 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
801 avail_spare)) != NULL) {
1371 avail_spare, l2cache, NULL)) != NULL) {
802 *avail_spare = B_TRUE;
803 return (ret);
804 }
805 }
806 }
807
1372 *avail_spare = B_TRUE;
1373 return (ret);
1374 }
1375 }
1376 }
1377
1378 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1379 &child, &children) == 0) {
1380 for (c = 0; c < children; c++) {
1381 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1382 avail_spare, l2cache, NULL)) != NULL) {
1383 *l2cache = B_TRUE;
1384 return (ret);
1385 }
1386 }
1387 }
1388
808 return (NULL);
809}
810
811nvlist_t *
1389 return (NULL);
1390}
1391
1392nvlist_t *
812zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
1393zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1394 boolean_t *l2cache, boolean_t *log)
813{
814 char buf[MAXPATHLEN];
815 const char *search;
816 char *end;
817 nvlist_t *nvroot;
818 uint64_t guid;
819
820 guid = strtoull(path, &end, 10);

--- 5 unchanged lines hidden (view full) ---

826 } else {
827 search = path;
828 }
829
830 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
831 &nvroot) == 0);
832
833 *avail_spare = B_FALSE;
1395{
1396 char buf[MAXPATHLEN];
1397 const char *search;
1398 char *end;
1399 nvlist_t *nvroot;
1400 uint64_t guid;
1401
1402 guid = strtoull(path, &end, 10);

--- 5 unchanged lines hidden (view full) ---

1408 } else {
1409 search = path;
1410 }
1411
1412 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1413 &nvroot) == 0);
1414
1415 *avail_spare = B_FALSE;
834 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
1416 *l2cache = B_FALSE;
1417 if (log != NULL)
1418 *log = B_FALSE;
1419 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1420 l2cache, log));
835}
836
1421}
1422
1423static int
1424vdev_online(nvlist_t *nv)
1425{
1426 uint64_t ival;
1427
1428 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1429 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1430 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1431 return (0);
1432
1433 return (1);
1434}
1435
837/*
1436/*
838 * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
1437 * Get phys_path for a root pool
1438 * Return 0 on success; non-zeron on failure.
839 */
1439 */
1440int
1441zpool_get_physpath(zpool_handle_t *zhp, char *physpath)
1442{
1443 char bootfs[ZPOOL_MAXNAMELEN];
1444 nvlist_t *vdev_root;
1445 nvlist_t **child;
1446 uint_t count;
1447 int i;
1448
1449 /*
1450 * Make sure this is a root pool, as phys_path doesn't mean
1451 * anything to a non-root pool.
1452 */
1453 if (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
1454 sizeof (bootfs), NULL) != 0)
1455 return (-1);
1456
1457 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1458 ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
1459
1460 if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1461 &child, &count) != 0)
1462 return (-2);
1463
1464 for (i = 0; i < count; i++) {
1465 nvlist_t **child2;
1466 uint_t count2;
1467 char *type;
1468 char *tmppath;
1469 int j;
1470
1471 if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
1472 != 0)
1473 return (-3);
1474
1475 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1476 if (!vdev_online(child[i]))
1477 return (-8);
1478 verify(nvlist_lookup_string(child[i],
1479 ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
1480 (void) strncpy(physpath, tmppath, strlen(tmppath));
1481 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
1482 if (nvlist_lookup_nvlist_array(child[i],
1483 ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
1484 return (-4);
1485
1486 for (j = 0; j < count2; j++) {
1487 if (!vdev_online(child2[j]))
1488 return (-8);
1489 if (nvlist_lookup_string(child2[j],
1490 ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
1491 return (-5);
1492
1493 if ((strlen(physpath) + strlen(tmppath)) >
1494 MAXNAMELEN)
1495 return (-6);
1496
1497 if (strlen(physpath) == 0) {
1498 (void) strncpy(physpath, tmppath,
1499 strlen(tmppath));
1500 } else {
1501 (void) strcat(physpath, " ");
1502 (void) strcat(physpath, tmppath);
1503 }
1504 }
1505 } else {
1506 return (-7);
1507 }
1508 }
1509
1510 return (0);
1511}
1512
1513/*
1514 * Returns TRUE if the given guid corresponds to the given type.
1515 * This is used to check for hot spares (INUSE or not), and level 2 cache
1516 * devices.
1517 */
840static boolean_t
1518static boolean_t
841is_spare(zpool_handle_t *zhp, uint64_t guid)
1519is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
842{
1520{
843 uint64_t spare_guid;
1521 uint64_t target_guid;
844 nvlist_t *nvroot;
1522 nvlist_t *nvroot;
845 nvlist_t **spares;
846 uint_t nspares;
1523 nvlist_t **list;
1524 uint_t count;
847 int i;
848
849 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
850 &nvroot) == 0);
1525 int i;
1526
1527 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1528 &nvroot) == 0);
851 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
852 &spares, &nspares) == 0) {
853 for (i = 0; i < nspares; i++) {
854 verify(nvlist_lookup_uint64(spares[i],
855 ZPOOL_CONFIG_GUID, &spare_guid) == 0);
856 if (guid == spare_guid)
1529 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1530 for (i = 0; i < count; i++) {
1531 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1532 &target_guid) == 0);
1533 if (guid == target_guid)
857 return (B_TRUE);
858 }
859 }
860
861 return (B_FALSE);
862}
863
864/*
1534 return (B_TRUE);
1535 }
1536 }
1537
1538 return (B_FALSE);
1539}
1540
1541/*
865 * Bring the specified vdev online
1542 * Bring the specified vdev online. The 'flags' parameter is a set of the
1543 * ZFS_ONLINE_* flags.
866 */
867int
1544 */
1545int
868zpool_vdev_online(zpool_handle_t *zhp, const char *path)
1546zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1547 vdev_state_t *newstate)
869{
870 zfs_cmd_t zc = { 0 };
871 char msg[1024];
872 nvlist_t *tgt;
1548{
1549 zfs_cmd_t zc = { 0 };
1550 char msg[1024];
1551 nvlist_t *tgt;
873 boolean_t avail_spare;
1552 boolean_t avail_spare, l2cache;
874 libzfs_handle_t *hdl = zhp->zpool_hdl;
875
876 (void) snprintf(msg, sizeof (msg),
877 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
878
879 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1553 libzfs_handle_t *hdl = zhp->zpool_hdl;
1554
1555 (void) snprintf(msg, sizeof (msg),
1556 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1557
1558 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
880 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
1559 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1560 NULL)) == NULL)
881 return (zfs_error(hdl, EZFS_NODEVICE, msg));
882
883 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
884
1561 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1562
1563 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1564
885 if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
1565 if (avail_spare ||
1566 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
886 return (zfs_error(hdl, EZFS_ISSPARE, msg));
887
1567 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1568
888 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
889 return (0);
1569 zc.zc_cookie = VDEV_STATE_ONLINE;
1570 zc.zc_obj = flags;
890
1571
891 return (zpool_standard_error(hdl, errno, msg));
1572 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1573 return (zpool_standard_error(hdl, errno, msg));
1574
1575 *newstate = zc.zc_cookie;
1576 return (0);
892}
893
894/*
895 * Take the specified vdev offline
896 */
897int
1577}
1578
1579/*
1580 * Take the specified vdev offline
1581 */
1582int
898zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
1583zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
899{
900 zfs_cmd_t zc = { 0 };
901 char msg[1024];
902 nvlist_t *tgt;
1584{
1585 zfs_cmd_t zc = { 0 };
1586 char msg[1024];
1587 nvlist_t *tgt;
903 boolean_t avail_spare;
1588 boolean_t avail_spare, l2cache;
904 libzfs_handle_t *hdl = zhp->zpool_hdl;
905
906 (void) snprintf(msg, sizeof (msg),
907 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
908
909 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1589 libzfs_handle_t *hdl = zhp->zpool_hdl;
1590
1591 (void) snprintf(msg, sizeof (msg),
1592 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1593
1594 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
910 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
1595 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1596 NULL)) == NULL)
911 return (zfs_error(hdl, EZFS_NODEVICE, msg));
912
913 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
914
1597 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1598
1599 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1600
915 if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
1601 if (avail_spare ||
1602 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
916 return (zfs_error(hdl, EZFS_ISSPARE, msg));
917
1603 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1604
918 zc.zc_cookie = istmp;
1605 zc.zc_cookie = VDEV_STATE_OFFLINE;
1606 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
919
1607
920 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
1608 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
921 return (0);
922
923 switch (errno) {
924 case EBUSY:
925
926 /*
927 * There are no other replicas of this device.
928 */
929 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
930
931 default:
932 return (zpool_standard_error(hdl, errno, msg));
933 }
934}
935
936/*
1609 return (0);
1610
1611 switch (errno) {
1612 case EBUSY:
1613
1614 /*
1615 * There are no other replicas of this device.
1616 */
1617 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1618
1619 default:
1620 return (zpool_standard_error(hdl, errno, msg));
1621 }
1622}
1623
1624/*
1625 * Mark the given vdev faulted.
1626 */
1627int
1628zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1629{
1630 zfs_cmd_t zc = { 0 };
1631 char msg[1024];
1632 libzfs_handle_t *hdl = zhp->zpool_hdl;
1633
1634 (void) snprintf(msg, sizeof (msg),
1635 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1636
1637 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1638 zc.zc_guid = guid;
1639 zc.zc_cookie = VDEV_STATE_FAULTED;
1640
1641 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1642 return (0);
1643
1644 switch (errno) {
1645 case EBUSY:
1646
1647 /*
1648 * There are no other replicas of this device.
1649 */
1650 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1651
1652 default:
1653 return (zpool_standard_error(hdl, errno, msg));
1654 }
1655
1656}
1657
1658/*
1659 * Mark the given vdev degraded.
1660 */
1661int
1662zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1663{
1664 zfs_cmd_t zc = { 0 };
1665 char msg[1024];
1666 libzfs_handle_t *hdl = zhp->zpool_hdl;
1667
1668 (void) snprintf(msg, sizeof (msg),
1669 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1670
1671 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1672 zc.zc_guid = guid;
1673 zc.zc_cookie = VDEV_STATE_DEGRADED;
1674
1675 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1676 return (0);
1677
1678 return (zpool_standard_error(hdl, errno, msg));
1679}
1680
1681/*
937 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
938 * a hot spare.
939 */
940static boolean_t
941is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
942{
943 nvlist_t **child;
944 uint_t c, children;

--- 13 unchanged lines hidden (view full) ---

958 return (B_TRUE);
959 }
960
961 return (B_FALSE);
962}
963
964/*
965 * Attach new_disk (fully described by nvroot) to old_disk.
1682 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1683 * a hot spare.
1684 */
1685static boolean_t
1686is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1687{
1688 nvlist_t **child;
1689 uint_t c, children;

--- 13 unchanged lines hidden (view full) ---

1703 return (B_TRUE);
1704 }
1705
1706 return (B_FALSE);
1707}
1708
1709/*
1710 * Attach new_disk (fully described by nvroot) to old_disk.
966 * If 'replacing' is specified, tne new disk will replace the old one.
1711 * If 'replacing' is specified, the new disk will replace the old one.
967 */
968int
969zpool_vdev_attach(zpool_handle_t *zhp,
970 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
971{
972 zfs_cmd_t zc = { 0 };
973 char msg[1024];
974 int ret;
975 nvlist_t *tgt;
1712 */
1713int
1714zpool_vdev_attach(zpool_handle_t *zhp,
1715 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1716{
1717 zfs_cmd_t zc = { 0 };
1718 char msg[1024];
1719 int ret;
1720 nvlist_t *tgt;
976 boolean_t avail_spare;
1721 boolean_t avail_spare, l2cache, islog;
977 uint64_t val;
1722 uint64_t val;
978 char *path;
1723 char *path, *newname;
979 nvlist_t **child;
980 uint_t children;
981 nvlist_t *config_root;
982 libzfs_handle_t *hdl = zhp->zpool_hdl;
983
984 if (replacing)
985 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
986 "cannot replace %s with %s"), old_disk, new_disk);
987 else
988 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
989 "cannot attach %s to %s"), new_disk, old_disk);
990
991 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1724 nvlist_t **child;
1725 uint_t children;
1726 nvlist_t *config_root;
1727 libzfs_handle_t *hdl = zhp->zpool_hdl;
1728
1729 if (replacing)
1730 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1731 "cannot replace %s with %s"), old_disk, new_disk);
1732 else
1733 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1734 "cannot attach %s to %s"), new_disk, old_disk);
1735
1736 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
992 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1737 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
1738 &islog)) == 0)
993 return (zfs_error(hdl, EZFS_NODEVICE, msg));
994
995 if (avail_spare)
996 return (zfs_error(hdl, EZFS_ISSPARE, msg));
997
1739 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1740
1741 if (avail_spare)
1742 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1743
1744 if (l2cache)
1745 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1746
998 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
999 zc.zc_cookie = replacing;
1000
1001 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1002 &child, &children) != 0 || children != 1) {
1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 "new device must be a single disk"));
1005 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1006 }
1007
1008 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1009 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1010
1747 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1748 zc.zc_cookie = replacing;
1749
1750 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1751 &child, &children) != 0 || children != 1) {
1752 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1753 "new device must be a single disk"));
1754 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1755 }
1756
1757 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1758 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1759
1760 if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1761 return (-1);
1762
1011 /*
1012 * If the target is a hot spare that has been swapped in, we can only
1013 * replace it with another hot spare.
1014 */
1015 if (replacing &&
1016 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1763 /*
1764 * If the target is a hot spare that has been swapped in, we can only
1765 * replace it with another hot spare.
1766 */
1767 if (replacing &&
1768 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1017 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1018 (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1019 !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1769 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
1770 NULL) == NULL || !avail_spare) &&
1771 is_replacing_spare(config_root, tgt, 1)) {
1020 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1021 "can only be replaced by another hot spare"));
1772 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1773 "can only be replaced by another hot spare"));
1774 free(newname);
1022 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1023 }
1024
1025 /*
1026 * If we are attempting to replace a spare, it canot be applied to an
1027 * already spared device.
1028 */
1029 if (replacing &&
1030 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1775 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1776 }
1777
1778 /*
1779 * If we are attempting to replace a spare, it canot be applied to an
1780 * already spared device.
1781 */
1782 if (replacing &&
1783 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1031 zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1784 zpool_find_vdev(zhp, newname, &avail_spare,
1785 &l2cache, NULL) != NULL && avail_spare &&
1032 is_replacing_spare(config_root, tgt, 0)) {
1033 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1034 "device has already been replaced with a spare"));
1786 is_replacing_spare(config_root, tgt, 0)) {
1787 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1788 "device has already been replaced with a spare"));
1789 free(newname);
1035 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1036 }
1037
1790 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1791 }
1792
1038 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1793 free(newname);
1794
1795 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1039 return (-1);
1040
1796 return (-1);
1797
1041 ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1798 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1042
1043 zcmd_free_nvlists(&zc);
1044
1045 if (ret == 0)
1046 return (0);
1047
1048 switch (errno) {
1049 case ENOTSUP:
1050 /*
1051 * Can't attach to or replace this type of vdev.
1052 */
1799
1800 zcmd_free_nvlists(&zc);
1801
1802 if (ret == 0)
1803 return (0);
1804
1805 switch (errno) {
1806 case ENOTSUP:
1807 /*
1808 * Can't attach to or replace this type of vdev.
1809 */
1053 if (replacing)
1810 if (replacing) {
1811 if (islog)
1812 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1813 "cannot replace a log with a spare"));
1814 else
1815 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1816 "cannot replace a replacing device"));
1817 } else {
1054 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1818 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1055 "cannot replace a replacing device"));
1056 else
1057 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1058 "can only attach to mirrors and top-level "
1059 "disks"));
1819 "can only attach to mirrors and top-level "
1820 "disks"));
1821 }
1060 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
1061 break;
1062
1063 case EINVAL:
1064 /*
1065 * The new device must be a single disk.
1066 */
1067 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,

--- 43 unchanged lines hidden (view full) ---

1111 * Detach the specified device.
1112 */
1113int
1114zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1115{
1116 zfs_cmd_t zc = { 0 };
1117 char msg[1024];
1118 nvlist_t *tgt;
1822 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
1823 break;
1824
1825 case EINVAL:
1826 /*
1827 * The new device must be a single disk.
1828 */
1829 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,

--- 43 unchanged lines hidden (view full) ---

1873 * Detach the specified device.
1874 */
1875int
1876zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1877{
1878 zfs_cmd_t zc = { 0 };
1879 char msg[1024];
1880 nvlist_t *tgt;
1119 boolean_t avail_spare;
1881 boolean_t avail_spare, l2cache;
1120 libzfs_handle_t *hdl = zhp->zpool_hdl;
1121
1122 (void) snprintf(msg, sizeof (msg),
1123 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1124
1125 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1882 libzfs_handle_t *hdl = zhp->zpool_hdl;
1883
1884 (void) snprintf(msg, sizeof (msg),
1885 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1886
1887 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1126 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1888 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1889 NULL)) == 0)
1127 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1128
1129 if (avail_spare)
1130 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1131
1890 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1891
1892 if (avail_spare)
1893 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1894
1895 if (l2cache)
1896 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1897
1132 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1133
1898 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1899
1134 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1900 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1135 return (0);
1136
1137 switch (errno) {
1138
1139 case ENOTSUP:
1140 /*
1141 * Can't detach from this type of vdev.
1142 */

--- 12 unchanged lines hidden (view full) ---

1155 default:
1156 (void) zpool_standard_error(hdl, errno, msg);
1157 }
1158
1159 return (-1);
1160}
1161
1162/*
1901 return (0);
1902
1903 switch (errno) {
1904
1905 case ENOTSUP:
1906 /*
1907 * Can't detach from this type of vdev.
1908 */

--- 12 unchanged lines hidden (view full) ---

1921 default:
1922 (void) zpool_standard_error(hdl, errno, msg);
1923 }
1924
1925 return (-1);
1926}
1927
1928/*
1163 * Remove the given device. Currently, this is supported only for hot spares.
1929 * Remove the given device. Currently, this is supported only for hot spares
1930 * and level 2 cache devices.
1164 */
1165int
1166zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1167{
1168 zfs_cmd_t zc = { 0 };
1169 char msg[1024];
1170 nvlist_t *tgt;
1931 */
1932int
1933zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1934{
1935 zfs_cmd_t zc = { 0 };
1936 char msg[1024];
1937 nvlist_t *tgt;
1171 boolean_t avail_spare;
1938 boolean_t avail_spare, l2cache;
1172 libzfs_handle_t *hdl = zhp->zpool_hdl;
1173
1174 (void) snprintf(msg, sizeof (msg),
1175 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1176
1177 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1939 libzfs_handle_t *hdl = zhp->zpool_hdl;
1940
1941 (void) snprintf(msg, sizeof (msg),
1942 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1943
1944 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1178 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1945 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1946 NULL)) == 0)
1179 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1180
1947 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1948
1181 if (!avail_spare) {
1949 if (!avail_spare && !l2cache) {
1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1183 "only inactive hot spares can be removed"));
1951 "only inactive hot spares or cache devices "
1952 "can be removed"));
1184 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1185 }
1186
1187 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1188
1953 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1954 }
1955
1956 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1957
1189 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1958 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1190 return (0);
1191
1192 return (zpool_standard_error(hdl, errno, msg));
1193}
1194
1195/*
1196 * Clear the errors for the pool, or the particular device if specified.
1197 */
1198int
1199zpool_clear(zpool_handle_t *zhp, const char *path)
1200{
1201 zfs_cmd_t zc = { 0 };
1202 char msg[1024];
1203 nvlist_t *tgt;
1959 return (0);
1960
1961 return (zpool_standard_error(hdl, errno, msg));
1962}
1963
1964/*
1965 * Clear the errors for the pool, or the particular device if specified.
1966 */
1967int
1968zpool_clear(zpool_handle_t *zhp, const char *path)
1969{
1970 zfs_cmd_t zc = { 0 };
1971 char msg[1024];
1972 nvlist_t *tgt;
1204 boolean_t avail_spare;
1973 boolean_t avail_spare, l2cache;
1205 libzfs_handle_t *hdl = zhp->zpool_hdl;
1206
1207 if (path)
1208 (void) snprintf(msg, sizeof (msg),
1209 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1210 path);
1211 else
1212 (void) snprintf(msg, sizeof (msg),
1213 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1214 zhp->zpool_name);
1215
1216 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1217 if (path) {
1974 libzfs_handle_t *hdl = zhp->zpool_hdl;
1975
1976 if (path)
1977 (void) snprintf(msg, sizeof (msg),
1978 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1979 path);
1980 else
1981 (void) snprintf(msg, sizeof (msg),
1982 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1983 zhp->zpool_name);
1984
1985 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1986 if (path) {
1218 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1987 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
1988 &l2cache, NULL)) == 0)
1219 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1220
1989 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1990
1991 /*
1992 * Don't allow error clearing for hot spares. Do allow
1993 * error clearing for l2cache devices.
1994 */
1221 if (avail_spare)
1222 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1223
1224 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1225 &zc.zc_guid) == 0);
1226 }
1227
1995 if (avail_spare)
1996 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1997
1998 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1999 &zc.zc_guid) == 0);
2000 }
2001
2002 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2003 return (0);
2004
2005 return (zpool_standard_error(hdl, errno, msg));
2006}
2007
2008/*
2009 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2010 */
2011int
2012zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2013{
2014 zfs_cmd_t zc = { 0 };
2015 char msg[1024];
2016 libzfs_handle_t *hdl = zhp->zpool_hdl;
2017
2018 (void) snprintf(msg, sizeof (msg),
2019 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2020 guid);
2021
2022 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2023 zc.zc_guid = guid;
2024
1228 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1229 return (0);
1230
1231 return (zpool_standard_error(hdl, errno, msg));
1232}
1233
1234/*
1235 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>

--- 101 unchanged lines hidden (view full) ---

1337 zpool_handle_t *zcb_pool;
1338 boolean_t zcb_create;
1339} zvol_cb_t;
1340
1341/*ARGSUSED*/
1342static int
1343do_zvol_create(zfs_handle_t *zhp, void *data)
1344{
2025 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2026 return (0);
2027
2028 return (zpool_standard_error(hdl, errno, msg));
2029}
2030
2031/*
2032 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>

--- 101 unchanged lines hidden (view full) ---

2134 zpool_handle_t *zcb_pool;
2135 boolean_t zcb_create;
2136} zvol_cb_t;
2137
2138/*ARGSUSED*/
2139static int
2140do_zvol_create(zfs_handle_t *zhp, void *data)
2141{
1345 int ret;
2142 int ret = 0;
1346
2143
1347 if (ZFS_IS_VOLUME(zhp))
2144 if (ZFS_IS_VOLUME(zhp)) {
1348 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2145 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2146 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2147 }
1349
2148
1350 ret = zfs_iter_children(zhp, do_zvol_create, NULL);
2149 if (ret == 0)
2150 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
1351
1352 zfs_close(zhp);
1353
1354 return (ret);
1355}
1356
1357/*
1358 * Iterate over all zvols in the pool and make any necessary minor nodes.

--- 6 unchanged lines hidden (view full) ---

1365
1366 /*
1367 * If the pool is unavailable, just return success.
1368 */
1369 if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1370 zhp->zpool_name)) == NULL)
1371 return (0);
1372
2151
2152 zfs_close(zhp);
2153
2154 return (ret);
2155}
2156
2157/*
2158 * Iterate over all zvols in the pool and make any necessary minor nodes.

--- 6 unchanged lines hidden (view full) ---

2165
2166 /*
2167 * If the pool is unavailable, just return success.
2168 */
2169 if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2170 zhp->zpool_name)) == NULL)
2171 return (0);
2172
1373 ret = zfs_iter_children(zfp, do_zvol_create, NULL);
2173 ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
1374
1375 zfs_close(zfp);
1376 return (ret);
1377}
1378
1379static int
1380do_zvol_remove(const char *dataset, void *data)
1381{

--- 105 unchanged lines hidden (view full) ---

1487 * of these checks.
1488 */
1489char *
1490zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1491{
1492 char *path, *devid;
1493 uint64_t value;
1494 char buf[64];
2174
2175 zfs_close(zfp);
2176 return (ret);
2177}
2178
2179static int
2180do_zvol_remove(const char *dataset, void *data)
2181{

--- 105 unchanged lines hidden (view full) ---

2287 * of these checks.
2288 */
2289char *
2290zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2291{
2292 char *path, *devid;
2293 uint64_t value;
2294 char buf[64];
2295 vdev_stat_t *vs;
2296 uint_t vsc;
1495
1496 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1497 &value) == 0) {
1498 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1499 &value) == 0);
1500 (void) snprintf(buf, sizeof (buf), "%llu",
1501 (u_longlong_t)value);
1502 path = buf;
1503 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1504
2297
2298 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2299 &value) == 0) {
2300 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2301 &value) == 0);
2302 (void) snprintf(buf, sizeof (buf), "%llu",
2303 (u_longlong_t)value);
2304 path = buf;
2305 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2306
1505 if (zhp != NULL &&
2307 /*
2308 * If the device is dead (faulted, offline, etc) then don't
2309 * bother opening it. Otherwise we may be forcing the user to
2310 * open a misbehaving device, which can have undesirable
2311 * effects.
2312 */
2313 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2314 (uint64_t **)&vs, &vsc) != 0 ||
2315 vs->vs_state >= VDEV_STATE_DEGRADED) &&
2316 zhp != NULL &&
1506 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1507 /*
1508 * Determine if the current path is correct.
1509 */
1510 char *newdevid = path_to_devid(path);
1511
1512 if (newdevid == NULL ||
1513 strcmp(devid, newdevid) != 0) {

--- 66 unchanged lines hidden (view full) ---

1580
1581 /*
1582 * Retrieve the raw error list from the kernel. If the number of errors
1583 * has increased, allocate more space and continue until we get the
1584 * entire list.
1585 */
1586 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1587 &count) == 0);
2317 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2318 /*
2319 * Determine if the current path is correct.
2320 */
2321 char *newdevid = path_to_devid(path);
2322
2323 if (newdevid == NULL ||
2324 strcmp(devid, newdevid) != 0) {

--- 66 unchanged lines hidden (view full) ---

2391
2392 /*
2393 * Retrieve the raw error list from the kernel. If the number of errors
2394 * has increased, allocate more space and continue until we get the
2395 * entire list.
2396 */
2397 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2398 &count) == 0);
2399 if (count == 0)
2400 return (0);
1588 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1589 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1590 return (-1);
1591 zc.zc_nvlist_dst_size = count;
1592 (void) strcpy(zc.zc_name, zhp->zpool_name);
1593 for (;;) {
1594 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1595 &zc) != 0) {

--- 64 unchanged lines hidden (view full) ---

1660 free((void *)(uintptr_t)zc.zc_nvlist_dst);
1661 return (no_memory(zhp->zpool_hdl));
1662}
1663
1664/*
1665 * Upgrade a ZFS pool to the latest on-disk version.
1666 */
1667int
2401 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2402 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2403 return (-1);
2404 zc.zc_nvlist_dst_size = count;
2405 (void) strcpy(zc.zc_name, zhp->zpool_name);
2406 for (;;) {
2407 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2408 &zc) != 0) {

--- 64 unchanged lines hidden (view full) ---

2473 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2474 return (no_memory(zhp->zpool_hdl));
2475}
2476
2477/*
2478 * Upgrade a ZFS pool to the latest on-disk version.
2479 */
2480int
1668zpool_upgrade(zpool_handle_t *zhp)
2481zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
1669{
1670 zfs_cmd_t zc = { 0 };
1671 libzfs_handle_t *hdl = zhp->zpool_hdl;
1672
1673 (void) strcpy(zc.zc_name, zhp->zpool_name);
2482{
2483 zfs_cmd_t zc = { 0 };
2484 libzfs_handle_t *hdl = zhp->zpool_hdl;
2485
2486 (void) strcpy(zc.zc_name, zhp->zpool_name);
1674 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2487 zc.zc_cookie = new_version;
2488
2489 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1675 return (zpool_standard_error_fmt(hdl, errno,
1676 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1677 zhp->zpool_name));
2490 return (zpool_standard_error_fmt(hdl, errno,
2491 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2492 zhp->zpool_name));
1678
1679 return (0);
1680}
1681
2493 return (0);
2494}
2495
1682/*
1683 * Log command history.
1684 *
1685 * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1686 * otherwise ('zfs'). 'pool_create' is B_TRUE if we are logging the creation
1687 * of the pool; B_FALSE otherwise. 'path' is the pathanme containing the
1688 * poolname. 'argc' and 'argv' are used to construct the command string.
1689 */
1690void
2496void
1691zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
1692 boolean_t pool, boolean_t pool_create)
2497zpool_set_history_str(const char *subcommand, int argc, char **argv,
2498 char *history_str)
1693{
2499{
1694 char cmd_buf[HIS_MAX_RECORD_LEN];
1695 char *dspath;
1696 zfs_cmd_t zc = { 0 };
1697 int i;
1698
2500 int i;
2501
1699 /* construct the command string */
1700 (void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
1701 for (i = 0; i < argc; i++) {
1702 if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
2502 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2503 for (i = 1; i < argc; i++) {
2504 if (strlen(history_str) + 1 + strlen(argv[i]) >
2505 HIS_MAX_RECORD_LEN)
1703 break;
2506 break;
1704 (void) strcat(cmd_buf, " ");
1705 (void) strcat(cmd_buf, argv[i]);
2507 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2508 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
1706 }
2509 }
2510}
1707
2511
1708 /* figure out the poolname */
1709 dspath = strpbrk(path, "/@");
1710 if (dspath == NULL) {
1711 (void) strcpy(zc.zc_name, path);
1712 } else {
1713 (void) strncpy(zc.zc_name, path, dspath - path);
1714 zc.zc_name[dspath-path] = '\0';
1715 }
2512/*
2513 * Stage command history for logging.
2514 */
2515int
2516zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2517{
2518 if (history_str == NULL)
2519 return (EINVAL);
1716
2520
1717 zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
1718 zc.zc_history_len = strlen(cmd_buf);
2521 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2522 return (EINVAL);
1719
2523
1720 /* overloading zc_history_offset */
1721 zc.zc_history_offset = pool_create;
2524 if (hdl->libzfs_log_str != NULL)
2525 free(hdl->libzfs_log_str);
1722
2526
1723 (void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
2527 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2528 return (no_memory(hdl));
2529
2530 return (0);
1724}
1725
1726/*
1727 * Perform ioctl to get some command history of a pool.
1728 *
1729 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
1730 * logical offset of the history buffer to start reading from.
1731 *

--- 169 unchanged lines hidden (view full) ---

1901 dsname, zc.zc_value);
1902 }
1903 } else {
1904 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
1905 }
1906 free(mntpnt);
1907}
1908
2531}
2532
2533/*
2534 * Perform ioctl to get some command history of a pool.
2535 *
2536 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
2537 * logical offset of the history buffer to start reading from.
2538 *

--- 169 unchanged lines hidden (view full) ---

2708 dsname, zc.zc_value);
2709 }
2710 } else {
2711 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2712 }
2713 free(mntpnt);
2714}
2715
1909int
1910zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
2716#define RDISK_ROOT "/dev/rdsk"
2717#define BACKUP_SLICE "s2"
2718/*
2719 * Don't start the slice at the default block of 34; many storage
2720 * devices will use a stripe width of 128k, so start there instead.
2721 */
2722#define NEW_START_BLOCK 256
2723
2724#if defined(sun)
2725/*
2726 * Read the EFI label from the config, if a label does not exist then
2727 * pass back the error to the caller. If the caller has passed a non-NULL
2728 * diskaddr argument then we set it to the starting address of the EFI
2729 * partition.
2730 */
2731static int
2732read_efi_label(nvlist_t *config, diskaddr_t *sb)
1911{
2733{
1912 zfs_cmd_t zc = { 0 };
1913 int ret = -1;
1914 char errbuf[1024];
1915 nvlist_t *nvl = NULL;
1916 nvlist_t *realprops;
2734 char *path;
2735 int fd;
2736 char diskname[MAXPATHLEN];
2737 int err = -1;
1917
2738
1918 (void) snprintf(errbuf, sizeof (errbuf),
1919 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
1920 zhp->zpool_name);
2739 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2740 return (err);
1921
2741
1922 if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1923 zfs_error_aux(zhp->zpool_hdl,
1924 dgettext(TEXT_DOMAIN, "pool must be "
1925 "upgraded to support pool properties"));
1926 return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
2742 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2743 strrchr(path, '/'));
2744 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2745 struct dk_gpt *vtoc;
2746
2747 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2748 if (sb != NULL)
2749 *sb = vtoc->efi_parts[0].p_start;
2750 efi_free(vtoc);
2751 }
2752 (void) close(fd);
1927 }
2753 }
2754 return (err);
2755}
1928
2756
1929 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1930 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
2757/*
2758 * determine where a partition starts on a disk in the current
2759 * configuration
2760 */
2761static diskaddr_t
2762find_start_block(nvlist_t *config)
2763{
2764 nvlist_t **child;
2765 uint_t c, children;
2766 diskaddr_t sb = MAXOFFSET_T;
2767 uint64_t wholedisk;
1931
2768
1932 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
1933 nvlist_add_string(nvl, propname, propval) != 0) {
1934 return (no_memory(zhp->zpool_hdl));
2769 if (nvlist_lookup_nvlist_array(config,
2770 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2771 if (nvlist_lookup_uint64(config,
2772 ZPOOL_CONFIG_WHOLE_DISK,
2773 &wholedisk) != 0 || !wholedisk) {
2774 return (MAXOFFSET_T);
2775 }
2776 if (read_efi_label(config, &sb) < 0)
2777 sb = MAXOFFSET_T;
2778 return (sb);
1935 }
1936
2779 }
2780
1937 if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
1938 zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
1939 nvlist_free(nvl);
1940 return (-1);
2781 for (c = 0; c < children; c++) {
2782 sb = find_start_block(child[c]);
2783 if (sb != MAXOFFSET_T) {
2784 return (sb);
2785 }
1941 }
2786 }
2787 return (MAXOFFSET_T);
2788}
2789#endif /* sun */
1942
2790
1943 nvlist_free(nvl);
1944 nvl = realprops;
2791/*
2792 * Label an individual disk. The name provided is the short name,
2793 * stripped of any leading /dev path.
2794 */
2795int
2796zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2797{
2798#if defined(sun)
2799 char path[MAXPATHLEN];
2800 struct dk_gpt *vtoc;
2801 int fd;
2802 size_t resv = EFI_MIN_RESV_SIZE;
2803 uint64_t slice_size;
2804 diskaddr_t start_block;
2805 char errbuf[1024];
1945
2806
1946 /*
1947 * Execute the corresponding ioctl() to set this property.
1948 */
1949 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2807 /* prepare an error message just in case */
2808 (void) snprintf(errbuf, sizeof (errbuf),
2809 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
1950
2810
1951 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
1952 return (-1);
2811 if (zhp) {
2812 nvlist_t *nvroot;
1953
2813
1954 ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
1955 zcmd_free_nvlists(&zc);
2814 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2815 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1956
2816
1957 if (ret)
1958 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
2817 if (zhp->zpool_start_block == 0)
2818 start_block = find_start_block(nvroot);
2819 else
2820 start_block = zhp->zpool_start_block;
2821 zhp->zpool_start_block = start_block;
2822 } else {
2823 /* new pool */
2824 start_block = NEW_START_BLOCK;
2825 }
1959
2826
1960 return (ret);
1961}
2827 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2828 BACKUP_SLICE);
1962
2829
1963int
1964zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
1965 size_t proplen, zfs_source_t *srctype)
1966{
1967 uint64_t value;
1968 char msg[1024], *strvalue;
1969 nvlist_t *nvp;
1970 zfs_source_t src = ZFS_SRC_NONE;
2830 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2831 /*
2832 * This shouldn't happen. We've long since verified that this
2833 * is a valid device.
2834 */
2835 zfs_error_aux(hdl,
2836 dgettext(TEXT_DOMAIN, "unable to open device"));
2837 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2838 }
1971
2839
1972 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1973 "cannot get property '%s'"), zpool_prop_to_name(prop));
2840 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2841 /*
2842 * The only way this can fail is if we run out of memory, or we
2843 * were unable to read the disk's capacity
2844 */
2845 if (errno == ENOMEM)
2846 (void) no_memory(hdl);
1974
2847
1975 if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1976 zfs_error_aux(zhp->zpool_hdl,
1977 dgettext(TEXT_DOMAIN, "pool must be "
1978 "upgraded to support pool properties"));
1979 return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
2848 (void) close(fd);
2849 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2850 "unable to read disk capacity"), name);
2851
2852 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
1980 }
1981
2853 }
2854
1982 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1983 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
2855 slice_size = vtoc->efi_last_u_lba + 1;
2856 slice_size -= EFI_MIN_RESV_SIZE;
2857 if (start_block == MAXOFFSET_T)
2858 start_block = NEW_START_BLOCK;
2859 slice_size -= start_block;
1984
2860
2861 vtoc->efi_parts[0].p_start = start_block;
2862 vtoc->efi_parts[0].p_size = slice_size;
2863
1985 /*
2864 /*
1986 * the "name" property is special cased
2865 * Why we use V_USR: V_BACKUP confuses users, and is considered
2866 * disposable by some EFI utilities (since EFI doesn't have a backup
2867 * slice). V_UNASSIGNED is supposed to be used only for zero size
2868 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
2869 * etc. were all pretty specific. V_USR is as close to reality as we
2870 * can get, in the absence of V_OTHER.
1987 */
2871 */
1988 if (!zfs_prop_valid_for_type(prop, ZFS_TYPE_POOL) &&
1989 prop != ZFS_PROP_NAME)
1990 return (-1);
2872 vtoc->efi_parts[0].p_tag = V_USR;
2873 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
1991
2874
1992 switch (prop) {
1993 case ZFS_PROP_NAME:
1994 (void) strlcpy(propbuf, zhp->zpool_name, proplen);
1995 break;
2875 vtoc->efi_parts[8].p_start = slice_size + start_block;
2876 vtoc->efi_parts[8].p_size = resv;
2877 vtoc->efi_parts[8].p_tag = V_RESERVED;
1996
2878
1997 case ZFS_PROP_BOOTFS:
1998 if (nvlist_lookup_nvlist(zhp->zpool_props,
1999 zpool_prop_to_name(prop), &nvp) != 0) {
2000 strvalue = (char *)zfs_prop_default_string(prop);
2001 if (strvalue == NULL)
2002 strvalue = "-";
2003 src = ZFS_SRC_DEFAULT;
2004 } else {
2005 VERIFY(nvlist_lookup_uint64(nvp,
2006 ZFS_PROP_SOURCE, &value) == 0);
2007 src = value;
2008 VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2009 &strvalue) == 0);
2010 if (strlen(strvalue) >= proplen)
2011 return (-1);
2012 }
2013 (void) strcpy(propbuf, strvalue);
2014 break;
2879 if (efi_write(fd, vtoc) != 0) {
2880 /*
2881 * Some block drivers (like pcata) may not support EFI
2882 * GPT labels. Print out a helpful error message dir-
2883 * ecting the user to manually label the disk and give
2884 * a specific slice.
2885 */
2886 (void) close(fd);
2887 efi_free(vtoc);
2015
2888
2016 default:
2017 return (-1);
2889 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2890 "try using fdisk(1M) and then provide a specific slice"));
2891 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2018 }
2892 }
2019 if (srctype)
2020 *srctype = src;
2893
2894 (void) close(fd);
2895 efi_free(vtoc);
2896#endif /* sun */
2021 return (0);
2022}
2023
2897 return (0);
2898}
2899
2024int
2025zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2900static boolean_t
2901supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2026{
2902{
2027 return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2903 char *type;
2904 nvlist_t **child;
2905 uint_t children, c;
2906
2907 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2908 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2909 strcmp(type, VDEV_TYPE_FILE) == 0 ||
2910 strcmp(type, VDEV_TYPE_LOG) == 0 ||
2911 strcmp(type, VDEV_TYPE_MISSING) == 0) {
2912 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2913 "vdev type '%s' is not supported"), type);
2914 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2915 return (B_FALSE);
2916 }
2917 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2918 &child, &children) == 0) {
2919 for (c = 0; c < children; c++) {
2920 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2921 return (B_FALSE);
2922 }
2923 }
2924 return (B_TRUE);
2028}
2029
2925}
2926
2030
2927/*
2928 * check if this zvol is allowable for use as a dump device; zero if
2929 * it is, > 0 if it isn't, < 0 if it isn't a zvol
2930 */
2031int
2931int
2032zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2932zvol_check_dump_config(char *arg)
2033{
2933{
2034 libzfs_handle_t *hdl = zhp->zpool_hdl;
2035 zpool_proplist_t *entry;
2036 char buf[ZFS_MAXPROPLEN];
2934 zpool_handle_t *zhp = NULL;
2935 nvlist_t *config, *nvroot;
2936 char *p, *volname;
2937 nvlist_t **top;
2938 uint_t toplevels;
2939 libzfs_handle_t *hdl;
2940 char errbuf[1024];
2941 char poolname[ZPOOL_MAXNAMELEN];
2942 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
2943 int ret = 1;
2037
2944
2038 if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2945 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
2039 return (-1);
2946 return (-1);
2947 }
2040
2948
2041 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2949 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2950 "dump is not supported on device '%s'"), arg);
2042
2951
2043 if (entry->pl_fixed)
2044 continue;
2952 if ((hdl = libzfs_init()) == NULL)
2953 return (1);
2954 libzfs_print_on_error(hdl, B_TRUE);
2045
2955
2046 if (entry->pl_prop != ZFS_PROP_INVAL &&
2047 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2048 NULL) == 0) {
2049 if (strlen(buf) > entry->pl_width)
2050 entry->pl_width = strlen(buf);
2051 }
2956 volname = arg + pathlen;
2957
2958 /* check the configuration of the pool */
2959 if ((p = strchr(volname, '/')) == NULL) {
2960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2961 "malformed dataset name"));
2962 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
2963 return (1);
2964 } else if (p - volname >= ZFS_MAXNAMELEN) {
2965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2966 "dataset name is too long"));
2967 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
2968 return (1);
2969 } else {
2970 (void) strncpy(poolname, volname, p - volname);
2971 poolname[p - volname] = '\0';
2052 }
2053
2972 }
2973
2054 return (0);
2974 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
2975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2976 "could not open pool '%s'"), poolname);
2977 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
2978 goto out;
2979 }
2980 config = zpool_get_config(zhp, NULL);
2981 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2982 &nvroot) != 0) {
2983 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2984 "could not obtain vdev configuration for '%s'"), poolname);
2985 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
2986 goto out;
2987 }
2988
2989 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2990 &top, &toplevels) == 0);
2991 if (toplevels != 1) {
2992 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2993 "'%s' has multiple top level vdevs"), poolname);
2994 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
2995 goto out;
2996 }
2997
2998 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
2999 goto out;
3000 }
3001 ret = 0;
3002
3003out:
3004 if (zhp)
3005 zpool_close(zhp);
3006 libzfs_fini(hdl);
3007 return (ret);
2055}
3008}