1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
|
25 * Copyright (c) 2011 by Delphix. All rights reserved.
|
25 * Copyright (c) 2012 by Delphix. All rights reserved. |
26 */ 27 28#include <sys/types.h> 29#include <sys/stat.h> 30#include <ctype.h> 31#include <errno.h> 32#include <devid.h> 33#include <fcntl.h> 34#include <libintl.h> 35#include <stdio.h> 36#include <stdlib.h> 37#include <strings.h> 38#include <unistd.h> 39#include <sys/zfs_ioctl.h> 40#include <dlfcn.h> 41 42#include "zfs_namecheck.h" 43#include "zfs_prop.h" 44#include "libzfs_impl.h" 45#include "zfs_comutil.h" 46 47static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 48 49#define DISK_ROOT "/dev/dsk" 50#define RDISK_ROOT "/dev/rdsk" 51#define BACKUP_SLICE "s2" 52 53typedef struct prop_flags { 54 int create:1; /* Validate property on creation */ 55 int import:1; /* Validate property on import */ 56} prop_flags_t; 57 58/* 59 * ==================================================================== 60 * zpool property functions 61 * ==================================================================== 62 */ 63 64static int 65zpool_get_all_props(zpool_handle_t *zhp) 66{ 67 zfs_cmd_t zc = { 0 }; 68 libzfs_handle_t *hdl = zhp->zpool_hdl; 69 70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71 72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73 return (-1); 74 75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76 if (errno == ENOMEM) { 77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } else { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } 86 87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 92 zcmd_free_nvlists(&zc); 93 94 return (0); 95} 96 97static int 98zpool_props_refresh(zpool_handle_t *zhp) 99{ 100 nvlist_t *old_props; 101 102 old_props = zhp->zpool_props; 103 104 if (zpool_get_all_props(zhp) != 0) 105 return (-1); 106 107 nvlist_free(old_props); 108 return (0); 109} 110 111static char * 112zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113 zprop_source_t *src) 114{ 115 nvlist_t *nv, *nvl; 116 uint64_t ival; 117 char *value; 118 zprop_source_t source; 119 120 nvl = zhp->zpool_props; 121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123 source = ival; 124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125 } else { 126 source = ZPROP_SRC_DEFAULT; 127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128 value = "-"; 129 } 130 131 if (src) 132 *src = source; 133 134 return (value); 135} 136 137uint64_t 138zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139{ 140 nvlist_t *nv, *nvl; 141 uint64_t value; 142 zprop_source_t source; 143 144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145 /* 146 * zpool_get_all_props() has most likely failed because 147 * the pool is faulted, but if all we need is the top level 148 * vdev's guid then get it from the zhp config nvlist. 149 */ 150 if ((prop == ZPOOL_PROP_GUID) && 151 (nvlist_lookup_nvlist(zhp->zpool_config, 152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154 == 0)) { 155 return (value); 156 } 157 return (zpool_prop_default_numeric(prop)); 158 } 159 160 nvl = zhp->zpool_props; 161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163 source = value; 164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165 } else { 166 source = ZPROP_SRC_DEFAULT; 167 value = zpool_prop_default_numeric(prop); 168 } 169 170 if (src) 171 *src = source; 172 173 return (value); 174} 175 176/* 177 * Map VDEV STATE to printed strings. 178 */ 179const char * 180zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181{ 182 switch (state) { 183 case VDEV_STATE_CLOSED: 184 case VDEV_STATE_OFFLINE: 185 return (gettext("OFFLINE")); 186 case VDEV_STATE_REMOVED: 187 return (gettext("REMOVED")); 188 case VDEV_STATE_CANT_OPEN: 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190 return (gettext("FAULTED")); 191 else if (aux == VDEV_AUX_SPLIT_POOL) 192 return (gettext("SPLIT")); 193 else 194 return (gettext("UNAVAIL")); 195 case VDEV_STATE_FAULTED: 196 return (gettext("FAULTED")); 197 case VDEV_STATE_DEGRADED: 198 return (gettext("DEGRADED")); 199 case VDEV_STATE_HEALTHY: 200 return (gettext("ONLINE")); 201 } 202 203 return (gettext("UNKNOWN")); 204} 205 206/* 207 * Map POOL STATE to printed strings. 208 */ 209const char * 210zpool_pool_state_to_name(pool_state_t state) 211{ 212 switch (state) { 213 case POOL_STATE_ACTIVE: 214 return (gettext("ACTIVE")); 215 case POOL_STATE_EXPORTED: 216 return (gettext("EXPORTED")); 217 case POOL_STATE_DESTROYED: 218 return (gettext("DESTROYED")); 219 case POOL_STATE_SPARE: 220 return (gettext("SPARE")); 221 case POOL_STATE_L2CACHE: 222 return (gettext("L2CACHE")); 223 case POOL_STATE_UNINITIALIZED: 224 return (gettext("UNINITIALIZED")); 225 case POOL_STATE_UNAVAIL: 226 return (gettext("UNAVAIL")); 227 case POOL_STATE_POTENTIALLY_ACTIVE: 228 return (gettext("POTENTIALLY_ACTIVE")); 229 } 230 231 return (gettext("UNKNOWN")); 232} 233 234/* 235 * Get a zpool property value for 'prop' and return the value in 236 * a pre-allocated buffer. 237 */ 238int 239zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 240 zprop_source_t *srctype) 241{ 242 uint64_t intval; 243 const char *strval; 244 zprop_source_t src = ZPROP_SRC_NONE; 245 nvlist_t *nvroot; 246 vdev_stat_t *vs; 247 uint_t vsc; 248 249 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 250 switch (prop) { 251 case ZPOOL_PROP_NAME: 252 (void) strlcpy(buf, zpool_get_name(zhp), len); 253 break; 254 255 case ZPOOL_PROP_HEALTH: 256 (void) strlcpy(buf, "FAULTED", len); 257 break; 258 259 case ZPOOL_PROP_GUID: 260 intval = zpool_get_prop_int(zhp, prop, &src); 261 (void) snprintf(buf, len, "%llu", intval); 262 break; 263 264 case ZPOOL_PROP_ALTROOT: 265 case ZPOOL_PROP_CACHEFILE: 266 case ZPOOL_PROP_COMMENT: 267 if (zhp->zpool_props != NULL || 268 zpool_get_all_props(zhp) == 0) { 269 (void) strlcpy(buf, 270 zpool_get_prop_string(zhp, prop, &src), 271 len); 272 if (srctype != NULL) 273 *srctype = src; 274 return (0); 275 } 276 /* FALLTHROUGH */ 277 default: 278 (void) strlcpy(buf, "-", len); 279 break; 280 } 281 282 if (srctype != NULL) 283 *srctype = src; 284 return (0); 285 } 286 287 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 288 prop != ZPOOL_PROP_NAME) 289 return (-1); 290 291 switch (zpool_prop_get_type(prop)) { 292 case PROP_TYPE_STRING: 293 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 294 len); 295 break; 296 297 case PROP_TYPE_NUMBER: 298 intval = zpool_get_prop_int(zhp, prop, &src); 299 300 switch (prop) { 301 case ZPOOL_PROP_SIZE: 302 case ZPOOL_PROP_ALLOCATED: 303 case ZPOOL_PROP_FREE:
|
304 case ZPOOL_PROP_EXPANDSZ: |
305 (void) zfs_nicenum(intval, buf, len); 306 break; 307 308 case ZPOOL_PROP_CAPACITY: 309 (void) snprintf(buf, len, "%llu%%", 310 (u_longlong_t)intval); 311 break; 312 313 case ZPOOL_PROP_DEDUPRATIO: 314 (void) snprintf(buf, len, "%llu.%02llux", 315 (u_longlong_t)(intval / 100), 316 (u_longlong_t)(intval % 100)); 317 break; 318 319 case ZPOOL_PROP_HEALTH: 320 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 321 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 322 verify(nvlist_lookup_uint64_array(nvroot, 323 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 324 == 0); 325 326 (void) strlcpy(buf, zpool_state_to_name(intval, 327 vs->vs_aux), len); 328 break; 329 default: 330 (void) snprintf(buf, len, "%llu", intval); 331 } 332 break; 333 334 case PROP_TYPE_INDEX: 335 intval = zpool_get_prop_int(zhp, prop, &src); 336 if (zpool_prop_index_to_string(prop, intval, &strval) 337 != 0) 338 return (-1); 339 (void) strlcpy(buf, strval, len); 340 break; 341 342 default: 343 abort(); 344 } 345 346 if (srctype) 347 *srctype = src; 348 349 return (0); 350} 351 352/* 353 * Check if the bootfs name has the same pool name as it is set to. 354 * Assuming bootfs is a valid dataset name. 355 */ 356static boolean_t 357bootfs_name_valid(const char *pool, char *bootfs) 358{ 359 int len = strlen(pool); 360 361 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 362 return (B_FALSE); 363 364 if (strncmp(pool, bootfs, len) == 0 && 365 (bootfs[len] == '/' || bootfs[len] == '\0')) 366 return (B_TRUE); 367 368 return (B_FALSE); 369} 370 371/* 372 * Inspect the configuration to determine if any of the devices contain 373 * an EFI label. 374 */ 375static boolean_t 376pool_uses_efi(nvlist_t *config) 377{ 378#ifdef sun 379 nvlist_t **child; 380 uint_t c, children; 381 382 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 383 &child, &children) != 0) 384 return (read_efi_label(config, NULL) >= 0); 385 386 for (c = 0; c < children; c++) { 387 if (pool_uses_efi(child[c])) 388 return (B_TRUE); 389 } 390#endif /* sun */ 391 return (B_FALSE); 392} 393
|
393static boolean_t
394pool_is_bootable(zpool_handle_t *zhp)
|
394boolean_t 395zpool_is_bootable(zpool_handle_t *zhp) |
396{ 397 char bootfs[ZPOOL_MAXNAMELEN]; 398 399 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 400 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 401 sizeof (bootfs)) != 0); 402} 403 404 405/* 406 * Given an nvlist of zpool properties to be set, validate that they are 407 * correct, and parse any numeric properties (index, boolean, etc) if they are 408 * specified as strings. 409 */ 410static nvlist_t * 411zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 412 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 413{ 414 nvpair_t *elem; 415 nvlist_t *retprops; 416 zpool_prop_t prop; 417 char *strval; 418 uint64_t intval; 419 char *slash, *check; 420 struct stat64 statbuf; 421 zpool_handle_t *zhp; 422 nvlist_t *nvroot; 423 424 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 425 (void) no_memory(hdl); 426 return (NULL); 427 } 428 429 elem = NULL; 430 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 431 const char *propname = nvpair_name(elem); 432 433 /* 434 * Make sure this property is valid and applies to this type. 435 */ 436 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 438 "invalid property '%s'"), propname); 439 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 440 goto error; 441 } 442 443 if (zpool_prop_readonly(prop)) { 444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 445 "is readonly"), propname); 446 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 447 goto error; 448 } 449 450 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 451 &strval, &intval, errbuf) != 0) 452 goto error; 453 454 /* 455 * Perform additional checking for specific properties. 456 */ 457 switch (prop) { 458 case ZPOOL_PROP_VERSION: 459 if (intval < version || intval > SPA_VERSION) { 460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 461 "property '%s' number %d is invalid."), 462 propname, intval); 463 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 464 goto error; 465 } 466 break; 467 468 case ZPOOL_PROP_BOOTFS: 469 if (flags.create || flags.import) { 470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 471 "property '%s' cannot be set at creation " 472 "or import time"), propname); 473 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 474 goto error; 475 } 476 477 if (version < SPA_VERSION_BOOTFS) { 478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 479 "pool must be upgraded to support " 480 "'%s' property"), propname); 481 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 482 goto error; 483 } 484 485 /* 486 * bootfs property value has to be a dataset name and 487 * the dataset has to be in the same pool as it sets to. 488 */ 489 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 490 strval)) { 491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 492 "is an invalid name"), strval); 493 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 494 goto error; 495 } 496 497 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 499 "could not open pool '%s'"), poolname); 500 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 501 goto error; 502 } 503 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 504 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 505 506#ifdef sun 507 /* 508 * bootfs property cannot be set on a disk which has 509 * been EFI labeled. 510 */ 511 if (pool_uses_efi(nvroot)) { 512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 513 "property '%s' not supported on " 514 "EFI labeled devices"), propname); 515 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 516 zpool_close(zhp); 517 goto error; 518 } 519#endif /* sun */ 520 zpool_close(zhp); 521 break; 522 523 case ZPOOL_PROP_ALTROOT: 524 if (!flags.create && !flags.import) { 525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 526 "property '%s' can only be set during pool " 527 "creation or import"), propname); 528 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 529 goto error; 530 } 531 532 if (strval[0] != '/') { 533 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 534 "bad alternate root '%s'"), strval); 535 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 536 goto error; 537 } 538 break; 539 540 case ZPOOL_PROP_CACHEFILE: 541 if (strval[0] == '\0') 542 break; 543 544 if (strcmp(strval, "none") == 0) 545 break; 546 547 if (strval[0] != '/') { 548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 549 "property '%s' must be empty, an " 550 "absolute path, or 'none'"), propname); 551 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 552 goto error; 553 } 554 555 slash = strrchr(strval, '/'); 556 557 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 558 strcmp(slash, "/..") == 0) { 559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 560 "'%s' is not a valid file"), strval); 561 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 562 goto error; 563 } 564 565 *slash = '\0'; 566 567 if (strval[0] != '\0' && 568 (stat64(strval, &statbuf) != 0 || 569 !S_ISDIR(statbuf.st_mode))) { 570 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 571 "'%s' is not a valid directory"), 572 strval); 573 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 574 goto error; 575 } 576 577 *slash = '/'; 578 break; 579 580 case ZPOOL_PROP_COMMENT: 581 for (check = strval; *check != '\0'; check++) { 582 if (!isprint(*check)) { 583 zfs_error_aux(hdl, 584 dgettext(TEXT_DOMAIN, 585 "comment may only have printable " 586 "characters")); 587 (void) zfs_error(hdl, EZFS_BADPROP, 588 errbuf); 589 goto error; 590 } 591 } 592 if (strlen(strval) > ZPROP_MAX_COMMENT) { 593 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 594 "comment must not exceed %d characters"), 595 ZPROP_MAX_COMMENT); 596 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 597 goto error; 598 } 599 break; 600 case ZPOOL_PROP_READONLY: 601 if (!flags.import) { 602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 603 "property '%s' can only be set at " 604 "import time"), propname); 605 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 606 goto error; 607 } 608 break; 609 } 610 } 611 612 return (retprops); 613error: 614 nvlist_free(retprops); 615 return (NULL); 616} 617 618/* 619 * Set zpool property : propname=propval. 620 */ 621int 622zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 623{ 624 zfs_cmd_t zc = { 0 }; 625 int ret = -1; 626 char errbuf[1024]; 627 nvlist_t *nvl = NULL; 628 nvlist_t *realprops; 629 uint64_t version; 630 prop_flags_t flags = { 0 }; 631 632 (void) snprintf(errbuf, sizeof (errbuf), 633 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 634 zhp->zpool_name); 635 636 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 637 return (no_memory(zhp->zpool_hdl)); 638 639 if (nvlist_add_string(nvl, propname, propval) != 0) { 640 nvlist_free(nvl); 641 return (no_memory(zhp->zpool_hdl)); 642 } 643 644 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 645 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 646 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 647 nvlist_free(nvl); 648 return (-1); 649 } 650 651 nvlist_free(nvl); 652 nvl = realprops; 653 654 /* 655 * Execute the corresponding ioctl() to set this property. 656 */ 657 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 658 659 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 660 nvlist_free(nvl); 661 return (-1); 662 } 663 664 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 665 666 zcmd_free_nvlists(&zc); 667 nvlist_free(nvl); 668 669 if (ret) 670 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 671 else 672 (void) zpool_props_refresh(zhp); 673 674 return (ret); 675} 676 677int 678zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 679{ 680 libzfs_handle_t *hdl = zhp->zpool_hdl; 681 zprop_list_t *entry; 682 char buf[ZFS_MAXPROPLEN]; 683 684 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 685 return (-1); 686 687 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 688 689 if (entry->pl_fixed) 690 continue; 691 692 if (entry->pl_prop != ZPROP_INVAL && 693 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 694 NULL) == 0) { 695 if (strlen(buf) > entry->pl_width) 696 entry->pl_width = strlen(buf); 697 } 698 } 699 700 return (0); 701} 702 703 704/* 705 * Don't start the slice at the default block of 34; many storage 706 * devices will use a stripe width of 128k, so start there instead. 707 */ 708#define NEW_START_BLOCK 256 709 710/* 711 * Validate the given pool name, optionally putting an extended error message in 712 * 'buf'. 713 */ 714boolean_t 715zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 716{ 717 namecheck_err_t why; 718 char what; 719 int ret; 720 721 ret = pool_namecheck(pool, &why, &what); 722 723 /* 724 * The rules for reserved pool names were extended at a later point. 725 * But we need to support users with existing pools that may now be 726 * invalid. So we only check for this expanded set of names during a 727 * create (or import), and only in userland. 728 */ 729 if (ret == 0 && !isopen && 730 (strncmp(pool, "mirror", 6) == 0 || 731 strncmp(pool, "raidz", 5) == 0 || 732 strncmp(pool, "spare", 5) == 0 || 733 strcmp(pool, "log") == 0)) { 734 if (hdl != NULL) 735 zfs_error_aux(hdl, 736 dgettext(TEXT_DOMAIN, "name is reserved")); 737 return (B_FALSE); 738 } 739 740 741 if (ret != 0) { 742 if (hdl != NULL) { 743 switch (why) { 744 case NAME_ERR_TOOLONG: 745 zfs_error_aux(hdl, 746 dgettext(TEXT_DOMAIN, "name is too long")); 747 break; 748 749 case NAME_ERR_INVALCHAR: 750 zfs_error_aux(hdl, 751 dgettext(TEXT_DOMAIN, "invalid character " 752 "'%c' in pool name"), what); 753 break; 754 755 case NAME_ERR_NOLETTER: 756 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 757 "name must begin with a letter")); 758 break; 759 760 case NAME_ERR_RESERVED: 761 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 762 "name is reserved")); 763 break; 764 765 case NAME_ERR_DISKLIKE: 766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 767 "pool name is reserved")); 768 break; 769 770 case NAME_ERR_LEADING_SLASH: 771 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 772 "leading slash in name")); 773 break; 774 775 case NAME_ERR_EMPTY_COMPONENT: 776 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 777 "empty component in name")); 778 break; 779 780 case NAME_ERR_TRAILING_SLASH: 781 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 782 "trailing slash in name")); 783 break; 784 785 case NAME_ERR_MULTIPLE_AT: 786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 787 "multiple '@' delimiters in name")); 788 break; 789 790 } 791 } 792 return (B_FALSE); 793 } 794 795 return (B_TRUE); 796} 797 798/* 799 * Open a handle to the given pool, even if the pool is currently in the FAULTED 800 * state. 801 */ 802zpool_handle_t * 803zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 804{ 805 zpool_handle_t *zhp; 806 boolean_t missing; 807 808 /* 809 * Make sure the pool name is valid. 810 */ 811 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 812 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 813 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 814 pool); 815 return (NULL); 816 } 817 818 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 819 return (NULL); 820 821 zhp->zpool_hdl = hdl; 822 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 823 824 if (zpool_refresh_stats(zhp, &missing) != 0) { 825 zpool_close(zhp); 826 return (NULL); 827 } 828 829 if (missing) { 830 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 831 (void) zfs_error_fmt(hdl, EZFS_NOENT, 832 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 833 zpool_close(zhp); 834 return (NULL); 835 } 836 837 return (zhp); 838} 839 840/* 841 * Like the above, but silent on error. Used when iterating over pools (because 842 * the configuration cache may be out of date). 843 */ 844int 845zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 846{ 847 zpool_handle_t *zhp; 848 boolean_t missing; 849 850 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 851 return (-1); 852 853 zhp->zpool_hdl = hdl; 854 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 855 856 if (zpool_refresh_stats(zhp, &missing) != 0) { 857 zpool_close(zhp); 858 return (-1); 859 } 860 861 if (missing) { 862 zpool_close(zhp); 863 *ret = NULL; 864 return (0); 865 } 866 867 *ret = zhp; 868 return (0); 869} 870 871/* 872 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 873 * state. 874 */ 875zpool_handle_t * 876zpool_open(libzfs_handle_t *hdl, const char *pool) 877{ 878 zpool_handle_t *zhp; 879 880 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 881 return (NULL); 882 883 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 884 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 885 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 886 zpool_close(zhp); 887 return (NULL); 888 } 889 890 return (zhp); 891} 892 893/* 894 * Close the handle. Simply frees the memory associated with the handle. 895 */ 896void 897zpool_close(zpool_handle_t *zhp) 898{ 899 if (zhp->zpool_config) 900 nvlist_free(zhp->zpool_config); 901 if (zhp->zpool_old_config) 902 nvlist_free(zhp->zpool_old_config); 903 if (zhp->zpool_props) 904 nvlist_free(zhp->zpool_props); 905 free(zhp); 906} 907 908/* 909 * Return the name of the pool. 910 */ 911const char * 912zpool_get_name(zpool_handle_t *zhp) 913{ 914 return (zhp->zpool_name); 915} 916 917 918/* 919 * Return the state of the pool (ACTIVE or UNAVAILABLE) 920 */ 921int 922zpool_get_state(zpool_handle_t *zhp) 923{ 924 return (zhp->zpool_state); 925} 926 927/* 928 * Create the named pool, using the provided vdev list. It is assumed 929 * that the consumer has already validated the contents of the nvlist, so we 930 * don't have to worry about error semantics. 931 */ 932int 933zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 934 nvlist_t *props, nvlist_t *fsprops) 935{ 936 zfs_cmd_t zc = { 0 }; 937 nvlist_t *zc_fsprops = NULL; 938 nvlist_t *zc_props = NULL; 939 char msg[1024]; 940 char *altroot; 941 int ret = -1; 942 943 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 944 "cannot create '%s'"), pool); 945 946 if (!zpool_name_valid(hdl, B_FALSE, pool)) 947 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 948 949 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 950 return (-1); 951 952 if (props) { 953 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 954 955 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 956 SPA_VERSION_1, flags, msg)) == NULL) { 957 goto create_failed; 958 } 959 } 960 961 if (fsprops) { 962 uint64_t zoned; 963 char *zonestr; 964 965 zoned = ((nvlist_lookup_string(fsprops, 966 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 967 strcmp(zonestr, "on") == 0); 968 969 if ((zc_fsprops = zfs_valid_proplist(hdl, 970 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 971 goto create_failed; 972 } 973 if (!zc_props && 974 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 975 goto create_failed; 976 } 977 if (nvlist_add_nvlist(zc_props, 978 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 979 goto create_failed; 980 } 981 } 982 983 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 984 goto create_failed; 985 986 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 987 988 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 989 990 zcmd_free_nvlists(&zc); 991 nvlist_free(zc_props); 992 nvlist_free(zc_fsprops); 993 994 switch (errno) { 995 case EBUSY: 996 /* 997 * This can happen if the user has specified the same 998 * device multiple times. We can't reliably detect this 999 * until we try to add it and see we already have a 1000 * label. 1001 */ 1002 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1003 "one or more vdevs refer to the same device")); 1004 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1005 1006 case EOVERFLOW: 1007 /* 1008 * This occurs when one of the devices is below 1009 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1010 * device was the problem device since there's no 1011 * reliable way to determine device size from userland. 1012 */ 1013 { 1014 char buf[64]; 1015 1016 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1017 1018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1019 "one or more devices is less than the " 1020 "minimum size (%s)"), buf); 1021 } 1022 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1023 1024 case ENOSPC: 1025 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1026 "one or more devices is out of space")); 1027 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1028 1029 case ENOTBLK: 1030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1031 "cache device must be a disk or disk slice")); 1032 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1033 1034 default: 1035 return (zpool_standard_error(hdl, errno, msg)); 1036 } 1037 } 1038 1039 /* 1040 * If this is an alternate root pool, then we automatically set the 1041 * mountpoint of the root dataset to be '/'. 1042 */ 1043 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 1044 &altroot) == 0) { 1045 zfs_handle_t *zhp; 1046 1047 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 1048 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1049 "/") == 0); 1050 1051 zfs_close(zhp); 1052 } 1053 1054create_failed: 1055 zcmd_free_nvlists(&zc); 1056 nvlist_free(zc_props); 1057 nvlist_free(zc_fsprops); 1058 return (ret); 1059} 1060 1061/* 1062 * Destroy the given pool. It is up to the caller to ensure that there are no 1063 * datasets left in the pool. 1064 */ 1065int 1066zpool_destroy(zpool_handle_t *zhp) 1067{ 1068 zfs_cmd_t zc = { 0 }; 1069 zfs_handle_t *zfp = NULL; 1070 libzfs_handle_t *hdl = zhp->zpool_hdl; 1071 char msg[1024]; 1072 1073 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1074 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1075 return (-1); 1076 1077 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1078 1079 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1080 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1081 "cannot destroy '%s'"), zhp->zpool_name); 1082 1083 if (errno == EROFS) { 1084 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1085 "one or more devices is read only")); 1086 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1087 } else { 1088 (void) zpool_standard_error(hdl, errno, msg); 1089 } 1090 1091 if (zfp) 1092 zfs_close(zfp); 1093 return (-1); 1094 } 1095 1096 if (zfp) { 1097 remove_mountpoint(zfp); 1098 zfs_close(zfp); 1099 } 1100 1101 return (0); 1102} 1103 1104/* 1105 * Add the given vdevs to the pool. The caller must have already performed the 1106 * necessary verification to ensure that the vdev specification is well-formed. 1107 */ 1108int 1109zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1110{ 1111 zfs_cmd_t zc = { 0 }; 1112 int ret; 1113 libzfs_handle_t *hdl = zhp->zpool_hdl; 1114 char msg[1024]; 1115 nvlist_t **spares, **l2cache; 1116 uint_t nspares, nl2cache; 1117 1118 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1119 "cannot add to '%s'"), zhp->zpool_name); 1120 1121 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1122 SPA_VERSION_SPARES && 1123 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1124 &spares, &nspares) == 0) { 1125 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1126 "upgraded to add hot spares")); 1127 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1128 } 1129
|
1129 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
|
1130 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, |
1131 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1132 uint64_t s; 1133 1134 for (s = 0; s < nspares; s++) { 1135 char *path; 1136 1137 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1138 &path) == 0 && pool_uses_efi(spares[s])) { 1139 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1140 "device '%s' contains an EFI label and " 1141 "cannot be used on root pools."), 1142 zpool_vdev_name(hdl, NULL, spares[s], 1143 B_FALSE)); 1144 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1145 } 1146 } 1147 } 1148 1149 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1150 SPA_VERSION_L2CACHE && 1151 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1152 &l2cache, &nl2cache) == 0) { 1153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1154 "upgraded to add cache devices")); 1155 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1156 } 1157 1158 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1159 return (-1); 1160 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1161 1162 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1163 switch (errno) { 1164 case EBUSY: 1165 /* 1166 * This can happen if the user has specified the same 1167 * device multiple times. We can't reliably detect this 1168 * until we try to add it and see we already have a 1169 * label. 1170 */ 1171 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1172 "one or more vdevs refer to the same device")); 1173 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1174 break; 1175 1176 case EOVERFLOW: 1177 /* 1178 * This occurrs when one of the devices is below 1179 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1180 * device was the problem device since there's no 1181 * reliable way to determine device size from userland. 1182 */ 1183 { 1184 char buf[64]; 1185 1186 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1187 1188 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1189 "device is less than the minimum " 1190 "size (%s)"), buf); 1191 } 1192 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1193 break; 1194 1195 case ENOTSUP: 1196 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1197 "pool must be upgraded to add these vdevs")); 1198 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1199 break; 1200 1201 case EDOM: 1202 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1203 "root pool can not have multiple vdevs" 1204 " or separate logs")); 1205 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1206 break; 1207 1208 case ENOTBLK: 1209 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1210 "cache device must be a disk or disk slice")); 1211 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1212 break; 1213 1214 default: 1215 (void) zpool_standard_error(hdl, errno, msg); 1216 } 1217 1218 ret = -1; 1219 } else { 1220 ret = 0; 1221 } 1222 1223 zcmd_free_nvlists(&zc); 1224 1225 return (ret); 1226} 1227 1228/* 1229 * Exports the pool from the system. The caller must ensure that there are no 1230 * mounted datasets in the pool. 1231 */ 1232int 1233zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1234{ 1235 zfs_cmd_t zc = { 0 }; 1236 char msg[1024]; 1237 1238 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1239 "cannot export '%s'"), zhp->zpool_name); 1240 1241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1242 zc.zc_cookie = force; 1243 zc.zc_guid = hardforce; 1244 1245 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1246 switch (errno) { 1247 case EXDEV: 1248 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1249 "use '-f' to override the following errors:\n" 1250 "'%s' has an active shared spare which could be" 1251 " used by other pools once '%s' is exported."), 1252 zhp->zpool_name, zhp->zpool_name); 1253 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1254 msg)); 1255 default: 1256 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1257 msg)); 1258 } 1259 } 1260 1261 return (0); 1262} 1263 1264int 1265zpool_export(zpool_handle_t *zhp, boolean_t force) 1266{ 1267 return (zpool_export_common(zhp, force, B_FALSE)); 1268} 1269 1270int 1271zpool_export_force(zpool_handle_t *zhp) 1272{ 1273 return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1274} 1275 1276static void 1277zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1278 nvlist_t *config) 1279{ 1280 nvlist_t *nv = NULL; 1281 uint64_t rewindto; 1282 int64_t loss = -1; 1283 struct tm t; 1284 char timestr[128]; 1285 1286 if (!hdl->libzfs_printerr || config == NULL) 1287 return; 1288 1289 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0) 1290 return; 1291 1292 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1293 return; 1294 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1295 1296 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1297 strftime(timestr, 128, 0, &t) != 0) { 1298 if (dryrun) { 1299 (void) printf(dgettext(TEXT_DOMAIN, 1300 "Would be able to return %s " 1301 "to its state as of %s.\n"), 1302 name, timestr); 1303 } else { 1304 (void) printf(dgettext(TEXT_DOMAIN, 1305 "Pool %s returned to its state as of %s.\n"), 1306 name, timestr); 1307 } 1308 if (loss > 120) { 1309 (void) printf(dgettext(TEXT_DOMAIN, 1310 "%s approximately %lld "), 1311 dryrun ? "Would discard" : "Discarded", 1312 (loss + 30) / 60); 1313 (void) printf(dgettext(TEXT_DOMAIN, 1314 "minutes of transactions.\n")); 1315 } else if (loss > 0) { 1316 (void) printf(dgettext(TEXT_DOMAIN, 1317 "%s approximately %lld "), 1318 dryrun ? "Would discard" : "Discarded", loss); 1319 (void) printf(dgettext(TEXT_DOMAIN, 1320 "seconds of transactions.\n")); 1321 } 1322 } 1323} 1324 1325void 1326zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1327 nvlist_t *config) 1328{ 1329 nvlist_t *nv = NULL; 1330 int64_t loss = -1; 1331 uint64_t edata = UINT64_MAX; 1332 uint64_t rewindto; 1333 struct tm t; 1334 char timestr[128]; 1335 1336 if (!hdl->libzfs_printerr) 1337 return; 1338 1339 if (reason >= 0) 1340 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1341 else 1342 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1343 1344 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1345 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1346 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1347 goto no_info; 1348 1349 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1350 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1351 &edata); 1352 1353 (void) printf(dgettext(TEXT_DOMAIN, 1354 "Recovery is possible, but will result in some data loss.\n")); 1355 1356 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1357 strftime(timestr, 128, 0, &t) != 0) { 1358 (void) printf(dgettext(TEXT_DOMAIN, 1359 "\tReturning the pool to its state as of %s\n" 1360 "\tshould correct the problem. "), 1361 timestr); 1362 } else { 1363 (void) printf(dgettext(TEXT_DOMAIN, 1364 "\tReverting the pool to an earlier state " 1365 "should correct the problem.\n\t")); 1366 } 1367 1368 if (loss > 120) { 1369 (void) printf(dgettext(TEXT_DOMAIN, 1370 "Approximately %lld minutes of data\n" 1371 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1372 } else if (loss > 0) { 1373 (void) printf(dgettext(TEXT_DOMAIN, 1374 "Approximately %lld seconds of data\n" 1375 "\tmust be discarded, irreversibly. "), loss); 1376 } 1377 if (edata != 0 && edata != UINT64_MAX) { 1378 if (edata == 1) { 1379 (void) printf(dgettext(TEXT_DOMAIN, 1380 "After rewind, at least\n" 1381 "\tone persistent user-data error will remain. ")); 1382 } else { 1383 (void) printf(dgettext(TEXT_DOMAIN, 1384 "After rewind, several\n" 1385 "\tpersistent user-data errors will remain. ")); 1386 } 1387 } 1388 (void) printf(dgettext(TEXT_DOMAIN, 1389 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1390 reason >= 0 ? "clear" : "import", name); 1391 1392 (void) printf(dgettext(TEXT_DOMAIN, 1393 "A scrub of the pool\n" 1394 "\tis strongly recommended after recovery.\n")); 1395 return; 1396 1397no_info: 1398 (void) printf(dgettext(TEXT_DOMAIN, 1399 "Destroy and re-create the pool from\n\ta backup source.\n")); 1400} 1401 1402/* 1403 * zpool_import() is a contracted interface. Should be kept the same 1404 * if possible. 1405 * 1406 * Applications should use zpool_import_props() to import a pool with 1407 * new properties value to be set. 1408 */ 1409int 1410zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1411 char *altroot) 1412{ 1413 nvlist_t *props = NULL; 1414 int ret; 1415 1416 if (altroot != NULL) { 1417 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1418 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1419 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1420 newname)); 1421 } 1422 1423 if (nvlist_add_string(props, 1424 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1425 nvlist_add_string(props, 1426 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1427 nvlist_free(props); 1428 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1429 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1430 newname)); 1431 } 1432 } 1433 1434 ret = zpool_import_props(hdl, config, newname, props, 1435 ZFS_IMPORT_NORMAL); 1436 if (props) 1437 nvlist_free(props); 1438 return (ret); 1439} 1440 1441static void 1442print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1443 int indent) 1444{ 1445 nvlist_t **child; 1446 uint_t c, children; 1447 char *vname; 1448 uint64_t is_log = 0; 1449 1450 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1451 &is_log); 1452 1453 if (name != NULL) 1454 (void) printf("\t%*s%s%s\n", indent, "", name, 1455 is_log ? " [log]" : ""); 1456 1457 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1458 &child, &children) != 0) 1459 return; 1460 1461 for (c = 0; c < children; c++) { 1462 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1463 print_vdev_tree(hdl, vname, child[c], indent + 2); 1464 free(vname); 1465 } 1466} 1467 1468/* 1469 * Import the given pool using the known configuration and a list of 1470 * properties to be set. The configuration should have come from 1471 * zpool_find_import(). The 'newname' parameters control whether the pool 1472 * is imported with a different name. 1473 */ 1474int 1475zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1476 nvlist_t *props, int flags) 1477{ 1478 zfs_cmd_t zc = { 0 }; 1479 zpool_rewind_policy_t policy; 1480 nvlist_t *nv = NULL; 1481 nvlist_t *nvinfo = NULL; 1482 nvlist_t *missing = NULL; 1483 char *thename; 1484 char *origname; 1485 int ret; 1486 int error = 0; 1487 char errbuf[1024]; 1488 1489 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1490 &origname) == 0); 1491 1492 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1493 "cannot import pool '%s'"), origname); 1494 1495 if (newname != NULL) { 1496 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1497 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1498 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1499 newname)); 1500 thename = (char *)newname; 1501 } else { 1502 thename = origname; 1503 } 1504 1505 if (props) { 1506 uint64_t version; 1507 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1508 1509 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1510 &version) == 0); 1511 1512 if ((props = zpool_valid_proplist(hdl, origname, 1513 props, version, flags, errbuf)) == NULL) { 1514 return (-1); 1515 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1516 nvlist_free(props); 1517 return (-1); 1518 } 1519 } 1520 1521 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1522 1523 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1524 &zc.zc_guid) == 0); 1525 1526 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1527 nvlist_free(props); 1528 return (-1); 1529 } 1530 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1531 nvlist_free(props); 1532 return (-1); 1533 } 1534 1535 zc.zc_cookie = flags; 1536 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1537 errno == ENOMEM) { 1538 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1539 zcmd_free_nvlists(&zc); 1540 return (-1); 1541 } 1542 } 1543 if (ret != 0) 1544 error = errno; 1545 1546 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1547 zpool_get_rewind_policy(config, &policy); 1548 1549 if (error) { 1550 char desc[1024]; 1551 1552 /* 1553 * Dry-run failed, but we print out what success 1554 * looks like if we found a best txg 1555 */ 1556 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1557 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1558 B_TRUE, nv); 1559 nvlist_free(nv); 1560 return (-1); 1561 } 1562 1563 if (newname == NULL) 1564 (void) snprintf(desc, sizeof (desc), 1565 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1566 thename); 1567 else 1568 (void) snprintf(desc, sizeof (desc), 1569 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1570 origname, thename); 1571 1572 switch (error) { 1573 case ENOTSUP: 1574 /* 1575 * Unsupported version. 1576 */ 1577 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1578 break; 1579 1580 case EINVAL: 1581 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1582 break; 1583 1584 case EROFS: 1585 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1586 "one or more devices is read only")); 1587 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1588 break; 1589 1590 case ENXIO: 1591 if (nv && nvlist_lookup_nvlist(nv, 1592 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1593 nvlist_lookup_nvlist(nvinfo, 1594 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1595 (void) printf(dgettext(TEXT_DOMAIN, 1596 "The devices below are missing, use " 1597 "'-m' to import the pool anyway:\n")); 1598 print_vdev_tree(hdl, NULL, missing, 2); 1599 (void) printf("\n"); 1600 } 1601 (void) zpool_standard_error(hdl, error, desc); 1602 break; 1603 1604 case EEXIST: 1605 (void) zpool_standard_error(hdl, error, desc); 1606 break; 1607 1608 default: 1609 (void) zpool_standard_error(hdl, error, desc); 1610 zpool_explain_recover(hdl, 1611 newname ? origname : thename, -error, nv); 1612 break; 1613 } 1614 1615 nvlist_free(nv); 1616 ret = -1; 1617 } else { 1618 zpool_handle_t *zhp; 1619 1620 /* 1621 * This should never fail, but play it safe anyway. 1622 */ 1623 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1624 ret = -1; 1625 else if (zhp != NULL) 1626 zpool_close(zhp); 1627 if (policy.zrp_request & 1628 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1629 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1630 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1631 } 1632 nvlist_free(nv); 1633 return (0); 1634 } 1635 1636 zcmd_free_nvlists(&zc); 1637 nvlist_free(props); 1638 1639 return (ret); 1640} 1641 1642/* 1643 * Scan the pool. 1644 */ 1645int 1646zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1647{ 1648 zfs_cmd_t zc = { 0 }; 1649 char msg[1024]; 1650 libzfs_handle_t *hdl = zhp->zpool_hdl; 1651 1652 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1653 zc.zc_cookie = func; 1654 1655 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1656 (errno == ENOENT && func != POOL_SCAN_NONE)) 1657 return (0); 1658 1659 if (func == POOL_SCAN_SCRUB) { 1660 (void) snprintf(msg, sizeof (msg), 1661 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1662 } else if (func == POOL_SCAN_NONE) { 1663 (void) snprintf(msg, sizeof (msg), 1664 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1665 zc.zc_name); 1666 } else { 1667 assert(!"unexpected result"); 1668 } 1669 1670 if (errno == EBUSY) { 1671 nvlist_t *nvroot; 1672 pool_scan_stat_t *ps = NULL; 1673 uint_t psc; 1674 1675 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1676 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1677 (void) nvlist_lookup_uint64_array(nvroot, 1678 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1679 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1680 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1681 else 1682 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1683 } else if (errno == ENOENT) { 1684 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1685 } else { 1686 return (zpool_standard_error(hdl, errno, msg)); 1687 } 1688} 1689 1690/* 1691 * This provides a very minimal check whether a given string is likely a 1692 * c#t#d# style string. Users of this are expected to do their own 1693 * verification of the s# part. 1694 */ 1695#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1696 1697/* 1698 * More elaborate version for ones which may start with "/dev/dsk/" 1699 * and the like. 1700 */ 1701static int 1702ctd_check_path(char *str) { 1703 /* 1704 * If it starts with a slash, check the last component. 1705 */ 1706 if (str && str[0] == '/') { 1707 char *tmp = strrchr(str, '/'); 1708 1709 /* 1710 * If it ends in "/old", check the second-to-last 1711 * component of the string instead. 1712 */ 1713 if (tmp != str && strcmp(tmp, "/old") == 0) { 1714 for (tmp--; *tmp != '/'; tmp--) 1715 ; 1716 } 1717 str = tmp + 1; 1718 } 1719 return (CTD_CHECK(str)); 1720} 1721 1722/* 1723 * Find a vdev that matches the search criteria specified. We use the 1724 * the nvpair name to determine how we should look for the device. 1725 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1726 * spare; but FALSE if its an INUSE spare. 1727 */ 1728static nvlist_t * 1729vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1730 boolean_t *l2cache, boolean_t *log) 1731{ 1732 uint_t c, children; 1733 nvlist_t **child; 1734 nvlist_t *ret; 1735 uint64_t is_log; 1736 char *srchkey; 1737 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1738 1739 /* Nothing to look for */ 1740 if (search == NULL || pair == NULL) 1741 return (NULL); 1742 1743 /* Obtain the key we will use to search */ 1744 srchkey = nvpair_name(pair); 1745 1746 switch (nvpair_type(pair)) { 1747 case DATA_TYPE_UINT64: 1748 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1749 uint64_t srchval, theguid; 1750 1751 verify(nvpair_value_uint64(pair, &srchval) == 0); 1752 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1753 &theguid) == 0); 1754 if (theguid == srchval) 1755 return (nv); 1756 } 1757 break; 1758 1759 case DATA_TYPE_STRING: { 1760 char *srchval, *val; 1761 1762 verify(nvpair_value_string(pair, &srchval) == 0); 1763 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1764 break; 1765 1766 /* 1767 * Search for the requested value. Special cases: 1768 * 1769 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1770 * "s0" or "s0/old". The "s0" part is hidden from the user, 1771 * but included in the string, so this matches around it. 1772 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1773 * 1774 * Otherwise, all other searches are simple string compares. 1775 */ 1776 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1777 ctd_check_path(val)) { 1778 uint64_t wholedisk = 0; 1779 1780 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1781 &wholedisk); 1782 if (wholedisk) { 1783 int slen = strlen(srchval); 1784 int vlen = strlen(val); 1785 1786 if (slen != vlen - 2) 1787 break; 1788 1789 /* 1790 * make_leaf_vdev() should only set 1791 * wholedisk for ZPOOL_CONFIG_PATHs which 1792 * will include "/dev/dsk/", giving plenty of 1793 * room for the indices used next. 1794 */ 1795 ASSERT(vlen >= 6); 1796 1797 /* 1798 * strings identical except trailing "s0" 1799 */ 1800 if (strcmp(&val[vlen - 2], "s0") == 0 && 1801 strncmp(srchval, val, slen) == 0) 1802 return (nv); 1803 1804 /* 1805 * strings identical except trailing "s0/old" 1806 */ 1807 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1808 strcmp(&srchval[slen - 4], "/old") == 0 && 1809 strncmp(srchval, val, slen - 4) == 0) 1810 return (nv); 1811 1812 break; 1813 } 1814 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1815 char *type, *idx, *end, *p; 1816 uint64_t id, vdev_id; 1817 1818 /* 1819 * Determine our vdev type, keeping in mind 1820 * that the srchval is composed of a type and 1821 * vdev id pair (i.e. mirror-4). 1822 */ 1823 if ((type = strdup(srchval)) == NULL) 1824 return (NULL); 1825 1826 if ((p = strrchr(type, '-')) == NULL) { 1827 free(type); 1828 break; 1829 } 1830 idx = p + 1; 1831 *p = '\0'; 1832 1833 /* 1834 * If the types don't match then keep looking. 1835 */ 1836 if (strncmp(val, type, strlen(val)) != 0) { 1837 free(type); 1838 break; 1839 } 1840 1841 verify(strncmp(type, VDEV_TYPE_RAIDZ, 1842 strlen(VDEV_TYPE_RAIDZ)) == 0 || 1843 strncmp(type, VDEV_TYPE_MIRROR, 1844 strlen(VDEV_TYPE_MIRROR)) == 0); 1845 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 1846 &id) == 0); 1847 1848 errno = 0; 1849 vdev_id = strtoull(idx, &end, 10); 1850 1851 free(type); 1852 if (errno != 0) 1853 return (NULL); 1854 1855 /* 1856 * Now verify that we have the correct vdev id. 1857 */ 1858 if (vdev_id == id) 1859 return (nv); 1860 } 1861 1862 /* 1863 * Common case 1864 */ 1865 if (strcmp(srchval, val) == 0) 1866 return (nv); 1867 break; 1868 } 1869 1870 default: 1871 break; 1872 } 1873 1874 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1875 &child, &children) != 0) 1876 return (NULL); 1877 1878 for (c = 0; c < children; c++) { 1879 if ((ret = vdev_to_nvlist_iter(child[c], search, 1880 avail_spare, l2cache, NULL)) != NULL) { 1881 /* 1882 * The 'is_log' value is only set for the toplevel 1883 * vdev, not the leaf vdevs. So we always lookup the 1884 * log device from the root of the vdev tree (where 1885 * 'log' is non-NULL). 1886 */ 1887 if (log != NULL && 1888 nvlist_lookup_uint64(child[c], 1889 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1890 is_log) { 1891 *log = B_TRUE; 1892 } 1893 return (ret); 1894 } 1895 } 1896 1897 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1898 &child, &children) == 0) { 1899 for (c = 0; c < children; c++) { 1900 if ((ret = vdev_to_nvlist_iter(child[c], search, 1901 avail_spare, l2cache, NULL)) != NULL) { 1902 *avail_spare = B_TRUE; 1903 return (ret); 1904 } 1905 } 1906 } 1907 1908 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1909 &child, &children) == 0) { 1910 for (c = 0; c < children; c++) { 1911 if ((ret = vdev_to_nvlist_iter(child[c], search, 1912 avail_spare, l2cache, NULL)) != NULL) { 1913 *l2cache = B_TRUE; 1914 return (ret); 1915 } 1916 } 1917 } 1918 1919 return (NULL); 1920} 1921 1922/* 1923 * Given a physical path (minus the "/devices" prefix), find the 1924 * associated vdev. 1925 */ 1926nvlist_t * 1927zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 1928 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1929{ 1930 nvlist_t *search, *nvroot, *ret; 1931 1932 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1933 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 1934 1935 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1936 &nvroot) == 0); 1937 1938 *avail_spare = B_FALSE; 1939 *l2cache = B_FALSE; 1940 if (log != NULL) 1941 *log = B_FALSE; 1942 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1943 nvlist_free(search); 1944 1945 return (ret); 1946} 1947 1948/* 1949 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 1950 */ 1951boolean_t 1952zpool_vdev_is_interior(const char *name) 1953{ 1954 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 1955 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 1956 return (B_TRUE); 1957 return (B_FALSE); 1958} 1959 1960nvlist_t * 1961zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1962 boolean_t *l2cache, boolean_t *log) 1963{ 1964 char buf[MAXPATHLEN]; 1965 char *end; 1966 nvlist_t *nvroot, *search, *ret; 1967 uint64_t guid; 1968 1969 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1970 1971 guid = strtoull(path, &end, 10); 1972 if (guid != 0 && *end == '\0') { 1973 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 1974 } else if (zpool_vdev_is_interior(path)) { 1975 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 1976 } else if (path[0] != '/') { 1977 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 1978 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 1979 } else { 1980 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 1981 } 1982 1983 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1984 &nvroot) == 0); 1985 1986 *avail_spare = B_FALSE; 1987 *l2cache = B_FALSE; 1988 if (log != NULL) 1989 *log = B_FALSE; 1990 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1991 nvlist_free(search); 1992 1993 return (ret); 1994} 1995 1996static int 1997vdev_online(nvlist_t *nv) 1998{ 1999 uint64_t ival; 2000 2001 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2002 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2003 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2004 return (0); 2005 2006 return (1); 2007} 2008 2009/* 2010 * Helper function for zpool_get_physpaths(). 2011 */ 2012static int 2013vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2014 size_t *bytes_written) 2015{ 2016 size_t bytes_left, pos, rsz; 2017 char *tmppath; 2018 const char *format; 2019 2020 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2021 &tmppath) != 0) 2022 return (EZFS_NODEVICE); 2023 2024 pos = *bytes_written; 2025 bytes_left = physpath_size - pos; 2026 format = (pos == 0) ? "%s" : " %s"; 2027 2028 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2029 *bytes_written += rsz; 2030 2031 if (rsz >= bytes_left) { 2032 /* if physpath was not copied properly, clear it */ 2033 if (bytes_left != 0) { 2034 physpath[pos] = 0; 2035 } 2036 return (EZFS_NOSPC); 2037 } 2038 return (0); 2039} 2040 2041static int 2042vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2043 size_t *rsz, boolean_t is_spare) 2044{ 2045 char *type; 2046 int ret; 2047 2048 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2049 return (EZFS_INVALCONFIG); 2050 2051 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2052 /* 2053 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2054 * For a spare vdev, we only want to boot from the active 2055 * spare device. 2056 */ 2057 if (is_spare) { 2058 uint64_t spare = 0; 2059 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2060 &spare); 2061 if (!spare) 2062 return (EZFS_INVALCONFIG); 2063 } 2064 2065 if (vdev_online(nv)) { 2066 if ((ret = vdev_get_one_physpath(nv, physpath, 2067 phypath_size, rsz)) != 0) 2068 return (ret); 2069 } 2070 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2071 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2072 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2073 nvlist_t **child; 2074 uint_t count; 2075 int i, ret; 2076 2077 if (nvlist_lookup_nvlist_array(nv, 2078 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2079 return (EZFS_INVALCONFIG); 2080 2081 for (i = 0; i < count; i++) { 2082 ret = vdev_get_physpaths(child[i], physpath, 2083 phypath_size, rsz, is_spare); 2084 if (ret == EZFS_NOSPC) 2085 return (ret); 2086 } 2087 } 2088 2089 return (EZFS_POOL_INVALARG); 2090} 2091 2092/* 2093 * Get phys_path for a root pool config. 2094 * Return 0 on success; non-zero on failure. 2095 */ 2096static int 2097zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2098{ 2099 size_t rsz; 2100 nvlist_t *vdev_root; 2101 nvlist_t **child; 2102 uint_t count; 2103 char *type; 2104 2105 rsz = 0; 2106 2107 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2108 &vdev_root) != 0) 2109 return (EZFS_INVALCONFIG); 2110 2111 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2112 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2113 &child, &count) != 0) 2114 return (EZFS_INVALCONFIG); 2115 2116 /* 2117 * root pool can not have EFI labeled disks and can only have 2118 * a single top-level vdev. 2119 */ 2120 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2121 pool_uses_efi(vdev_root)) 2122 return (EZFS_POOL_INVALARG); 2123 2124 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2125 B_FALSE); 2126 2127 /* No online devices */ 2128 if (rsz == 0) 2129 return (EZFS_NODEVICE); 2130 2131 return (0); 2132} 2133 2134/* 2135 * Get phys_path for a root pool 2136 * Return 0 on success; non-zero on failure. 2137 */ 2138int 2139zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2140{ 2141 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2142 phypath_size)); 2143} 2144 2145/* 2146 * If the device has being dynamically expanded then we need to relabel 2147 * the disk to use the new unallocated space. 2148 */ 2149static int 2150zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2151{ 2152#ifdef sun 2153 char path[MAXPATHLEN]; 2154 char errbuf[1024]; 2155 int fd, error; 2156 int (*_efi_use_whole_disk)(int); 2157 2158 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2159 "efi_use_whole_disk")) == NULL) 2160 return (-1); 2161 2162 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2163 2164 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2165 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2166 "relabel '%s': unable to open device"), name); 2167 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2168 } 2169 2170 /* 2171 * It's possible that we might encounter an error if the device 2172 * does not have any unallocated space left. If so, we simply 2173 * ignore that error and continue on. 2174 */ 2175 error = _efi_use_whole_disk(fd); 2176 (void) close(fd); 2177 if (error && error != VT_ENOSPC) { 2178 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2179 "relabel '%s': unable to read disk capacity"), name); 2180 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2181 } 2182#endif /* sun */ 2183 return (0); 2184} 2185 2186/* 2187 * Bring the specified vdev online. The 'flags' parameter is a set of the 2188 * ZFS_ONLINE_* flags. 2189 */ 2190int 2191zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2192 vdev_state_t *newstate) 2193{ 2194 zfs_cmd_t zc = { 0 }; 2195 char msg[1024]; 2196 nvlist_t *tgt; 2197 boolean_t avail_spare, l2cache, islog; 2198 libzfs_handle_t *hdl = zhp->zpool_hdl; 2199 2200 if (flags & ZFS_ONLINE_EXPAND) { 2201 (void) snprintf(msg, sizeof (msg), 2202 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2203 } else { 2204 (void) snprintf(msg, sizeof (msg), 2205 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2206 } 2207 2208 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2209 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2210 &islog)) == NULL) 2211 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2212 2213 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2214 2215 if (avail_spare) 2216 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2217 2218 if (flags & ZFS_ONLINE_EXPAND || 2219 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2220 char *pathname = NULL; 2221 uint64_t wholedisk = 0; 2222 2223 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2224 &wholedisk); 2225 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2226 &pathname) == 0); 2227 2228 /* 2229 * XXX - L2ARC 1.0 devices can't support expansion. 2230 */ 2231 if (l2cache) { 2232 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2233 "cannot expand cache devices")); 2234 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2235 } 2236 2237 if (wholedisk) { 2238 pathname += strlen(DISK_ROOT) + 1; 2239 (void) zpool_relabel_disk(hdl, pathname); 2240 } 2241 } 2242 2243 zc.zc_cookie = VDEV_STATE_ONLINE; 2244 zc.zc_obj = flags; 2245 2246 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2247 if (errno == EINVAL) { 2248 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2249 "from this pool into a new one. Use '%s' " 2250 "instead"), "zpool detach"); 2251 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2252 } 2253 return (zpool_standard_error(hdl, errno, msg)); 2254 } 2255 2256 *newstate = zc.zc_cookie; 2257 return (0); 2258} 2259 2260/* 2261 * Take the specified vdev offline 2262 */ 2263int 2264zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2265{ 2266 zfs_cmd_t zc = { 0 }; 2267 char msg[1024]; 2268 nvlist_t *tgt; 2269 boolean_t avail_spare, l2cache; 2270 libzfs_handle_t *hdl = zhp->zpool_hdl; 2271 2272 (void) snprintf(msg, sizeof (msg), 2273 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2274 2275 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2276 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2277 NULL)) == NULL) 2278 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2279 2280 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2281 2282 if (avail_spare) 2283 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2284 2285 zc.zc_cookie = VDEV_STATE_OFFLINE; 2286 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2287 2288 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2289 return (0); 2290 2291 switch (errno) { 2292 case EBUSY: 2293 2294 /* 2295 * There are no other replicas of this device. 2296 */ 2297 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2298 2299 case EEXIST: 2300 /* 2301 * The log device has unplayed logs 2302 */ 2303 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2304 2305 default: 2306 return (zpool_standard_error(hdl, errno, msg)); 2307 } 2308} 2309 2310/* 2311 * Mark the given vdev faulted. 2312 */ 2313int 2314zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2315{ 2316 zfs_cmd_t zc = { 0 }; 2317 char msg[1024]; 2318 libzfs_handle_t *hdl = zhp->zpool_hdl; 2319 2320 (void) snprintf(msg, sizeof (msg), 2321 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2322 2323 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2324 zc.zc_guid = guid; 2325 zc.zc_cookie = VDEV_STATE_FAULTED; 2326 zc.zc_obj = aux; 2327 2328 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2329 return (0); 2330 2331 switch (errno) { 2332 case EBUSY: 2333 2334 /* 2335 * There are no other replicas of this device. 2336 */ 2337 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2338 2339 default: 2340 return (zpool_standard_error(hdl, errno, msg)); 2341 } 2342 2343} 2344 2345/* 2346 * Mark the given vdev degraded. 2347 */ 2348int 2349zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2350{ 2351 zfs_cmd_t zc = { 0 }; 2352 char msg[1024]; 2353 libzfs_handle_t *hdl = zhp->zpool_hdl; 2354 2355 (void) snprintf(msg, sizeof (msg), 2356 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2357 2358 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2359 zc.zc_guid = guid; 2360 zc.zc_cookie = VDEV_STATE_DEGRADED; 2361 zc.zc_obj = aux; 2362 2363 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2364 return (0); 2365 2366 return (zpool_standard_error(hdl, errno, msg)); 2367} 2368 2369/* 2370 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2371 * a hot spare. 2372 */ 2373static boolean_t 2374is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2375{ 2376 nvlist_t **child; 2377 uint_t c, children; 2378 char *type; 2379 2380 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2381 &children) == 0) { 2382 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2383 &type) == 0); 2384 2385 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2386 children == 2 && child[which] == tgt) 2387 return (B_TRUE); 2388 2389 for (c = 0; c < children; c++) 2390 if (is_replacing_spare(child[c], tgt, which)) 2391 return (B_TRUE); 2392 } 2393 2394 return (B_FALSE); 2395} 2396 2397/* 2398 * Attach new_disk (fully described by nvroot) to old_disk. 2399 * If 'replacing' is specified, the new disk will replace the old one. 2400 */ 2401int 2402zpool_vdev_attach(zpool_handle_t *zhp, 2403 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2404{ 2405 zfs_cmd_t zc = { 0 }; 2406 char msg[1024]; 2407 int ret; 2408 nvlist_t *tgt; 2409 boolean_t avail_spare, l2cache, islog; 2410 uint64_t val; 2411 char *newname; 2412 nvlist_t **child; 2413 uint_t children; 2414 nvlist_t *config_root; 2415 libzfs_handle_t *hdl = zhp->zpool_hdl;
|
2415 boolean_t rootpool = pool_is_bootable(zhp);
|
2416 boolean_t rootpool = zpool_is_bootable(zhp); |
2417 2418 if (replacing) 2419 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2420 "cannot replace %s with %s"), old_disk, new_disk); 2421 else 2422 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2423 "cannot attach %s to %s"), new_disk, old_disk); 2424 2425 /* 2426 * If this is a root pool, make sure that we're not attaching an 2427 * EFI labeled device. 2428 */ 2429 if (rootpool && pool_uses_efi(nvroot)) { 2430 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2431 "EFI labeled devices are not supported on root pools.")); 2432 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2433 } 2434 2435 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2436 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2437 &islog)) == 0) 2438 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2439 2440 if (avail_spare) 2441 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2442 2443 if (l2cache) 2444 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2445 2446 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2447 zc.zc_cookie = replacing; 2448 2449 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2450 &child, &children) != 0 || children != 1) { 2451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2452 "new device must be a single disk")); 2453 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2454 } 2455 2456 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2457 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2458 2459 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2460 return (-1); 2461 2462 /* 2463 * If the target is a hot spare that has been swapped in, we can only 2464 * replace it with another hot spare. 2465 */ 2466 if (replacing && 2467 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2468 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2469 NULL) == NULL || !avail_spare) && 2470 is_replacing_spare(config_root, tgt, 1)) { 2471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2472 "can only be replaced by another hot spare")); 2473 free(newname); 2474 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2475 } 2476 2477 free(newname); 2478 2479 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2480 return (-1); 2481 2482 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2483 2484 zcmd_free_nvlists(&zc); 2485 2486 if (ret == 0) { 2487 if (rootpool) { 2488 /* 2489 * XXX need a better way to prevent user from 2490 * booting up a half-baked vdev. 2491 */ 2492 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2493 "sure to wait until resilver is done " 2494 "before rebooting.\n")); 2495 (void) fprintf(stderr, "\n"); 2496 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2497 "you boot from pool '%s', you may need to update\n" 2498 "boot code on newly attached disk '%s'.\n\n" 2499 "Assuming you use GPT partitioning and 'da0' is " 2500 "your new boot disk\n" 2501 "you may use the following command:\n\n" 2502 "\tgpart bootcode -b /boot/pmbr -p " 2503 "/boot/gptzfsboot -i 1 da0\n\n"), 2504 zhp->zpool_name, new_disk); 2505 } 2506 return (0); 2507 } 2508 2509 switch (errno) { 2510 case ENOTSUP: 2511 /* 2512 * Can't attach to or replace this type of vdev. 2513 */ 2514 if (replacing) { 2515 uint64_t version = zpool_get_prop_int(zhp, 2516 ZPOOL_PROP_VERSION, NULL); 2517 2518 if (islog) 2519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2520 "cannot replace a log with a spare")); 2521 else if (version >= SPA_VERSION_MULTI_REPLACE) 2522 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2523 "already in replacing/spare config; wait " 2524 "for completion or use 'zpool detach'")); 2525 else 2526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2527 "cannot replace a replacing device")); 2528 } else { 2529 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2530 "can only attach to mirrors and top-level " 2531 "disks")); 2532 } 2533 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2534 break; 2535 2536 case EINVAL: 2537 /* 2538 * The new device must be a single disk. 2539 */ 2540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2541 "new device must be a single disk")); 2542 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2543 break; 2544 2545 case EBUSY: 2546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2547 new_disk); 2548 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2549 break; 2550 2551 case EOVERFLOW: 2552 /* 2553 * The new device is too small. 2554 */ 2555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2556 "device is too small")); 2557 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2558 break; 2559 2560 case EDOM: 2561 /* 2562 * The new device has a different alignment requirement. 2563 */ 2564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2565 "devices have different sector alignment")); 2566 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2567 break; 2568 2569 case ENAMETOOLONG: 2570 /* 2571 * The resulting top-level vdev spec won't fit in the label. 2572 */ 2573 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2574 break; 2575 2576 default: 2577 (void) zpool_standard_error(hdl, errno, msg); 2578 } 2579 2580 return (-1); 2581} 2582 2583/* 2584 * Detach the specified device. 2585 */ 2586int 2587zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2588{ 2589 zfs_cmd_t zc = { 0 }; 2590 char msg[1024]; 2591 nvlist_t *tgt; 2592 boolean_t avail_spare, l2cache; 2593 libzfs_handle_t *hdl = zhp->zpool_hdl; 2594 2595 (void) snprintf(msg, sizeof (msg), 2596 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2597 2598 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2599 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2600 NULL)) == 0) 2601 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2602 2603 if (avail_spare) 2604 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2605 2606 if (l2cache) 2607 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2608 2609 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2610 2611 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2612 return (0); 2613 2614 switch (errno) { 2615 2616 case ENOTSUP: 2617 /* 2618 * Can't detach from this type of vdev. 2619 */ 2620 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2621 "applicable to mirror and replacing vdevs")); 2622 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2623 break; 2624 2625 case EBUSY: 2626 /* 2627 * There are no other replicas of this device. 2628 */ 2629 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2630 break; 2631 2632 default: 2633 (void) zpool_standard_error(hdl, errno, msg); 2634 } 2635 2636 return (-1); 2637} 2638 2639/* 2640 * Find a mirror vdev in the source nvlist. 2641 * 2642 * The mchild array contains a list of disks in one of the top-level mirrors 2643 * of the source pool. The schild array contains a list of disks that the 2644 * user specified on the command line. We loop over the mchild array to 2645 * see if any entry in the schild array matches. 2646 * 2647 * If a disk in the mchild array is found in the schild array, we return 2648 * the index of that entry. Otherwise we return -1. 2649 */ 2650static int 2651find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2652 nvlist_t **schild, uint_t schildren) 2653{ 2654 uint_t mc; 2655 2656 for (mc = 0; mc < mchildren; mc++) { 2657 uint_t sc; 2658 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2659 mchild[mc], B_FALSE); 2660 2661 for (sc = 0; sc < schildren; sc++) { 2662 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2663 schild[sc], B_FALSE); 2664 boolean_t result = (strcmp(mpath, spath) == 0); 2665 2666 free(spath); 2667 if (result) { 2668 free(mpath); 2669 return (mc); 2670 } 2671 } 2672 2673 free(mpath); 2674 } 2675 2676 return (-1); 2677} 2678 2679/* 2680 * Split a mirror pool. If newroot points to null, then a new nvlist 2681 * is generated and it is the responsibility of the caller to free it. 2682 */ 2683int 2684zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2685 nvlist_t *props, splitflags_t flags) 2686{ 2687 zfs_cmd_t zc = { 0 }; 2688 char msg[1024]; 2689 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2690 nvlist_t **varray = NULL, *zc_props = NULL; 2691 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2692 libzfs_handle_t *hdl = zhp->zpool_hdl; 2693 uint64_t vers; 2694 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2695 int retval = 0; 2696 2697 (void) snprintf(msg, sizeof (msg), 2698 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2699 2700 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2701 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2702 2703 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2704 (void) fprintf(stderr, gettext("Internal error: unable to " 2705 "retrieve pool configuration\n")); 2706 return (-1); 2707 } 2708 2709 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2710 == 0); 2711 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2712 2713 if (props) { 2714 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2715 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2716 props, vers, flags, msg)) == NULL) 2717 return (-1); 2718 } 2719 2720 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2721 &children) != 0) { 2722 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2723 "Source pool is missing vdev tree")); 2724 if (zc_props) 2725 nvlist_free(zc_props); 2726 return (-1); 2727 } 2728 2729 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2730 vcount = 0; 2731 2732 if (*newroot == NULL || 2733 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2734 &newchild, &newchildren) != 0) 2735 newchildren = 0; 2736 2737 for (c = 0; c < children; c++) { 2738 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2739 char *type; 2740 nvlist_t **mchild, *vdev; 2741 uint_t mchildren; 2742 int entry; 2743 2744 /* 2745 * Unlike cache & spares, slogs are stored in the 2746 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2747 */ 2748 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2749 &is_log); 2750 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2751 &is_hole); 2752 if (is_log || is_hole) { 2753 /* 2754 * Create a hole vdev and put it in the config. 2755 */ 2756 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2757 goto out; 2758 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2759 VDEV_TYPE_HOLE) != 0) 2760 goto out; 2761 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2762 1) != 0) 2763 goto out; 2764 if (lastlog == 0) 2765 lastlog = vcount; 2766 varray[vcount++] = vdev; 2767 continue; 2768 } 2769 lastlog = 0; 2770 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2771 == 0); 2772 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2774 "Source pool must be composed only of mirrors\n")); 2775 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2776 goto out; 2777 } 2778 2779 verify(nvlist_lookup_nvlist_array(child[c], 2780 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2781 2782 /* find or add an entry for this top-level vdev */ 2783 if (newchildren > 0 && 2784 (entry = find_vdev_entry(zhp, mchild, mchildren, 2785 newchild, newchildren)) >= 0) { 2786 /* We found a disk that the user specified. */ 2787 vdev = mchild[entry]; 2788 ++found; 2789 } else { 2790 /* User didn't specify a disk for this vdev. */ 2791 vdev = mchild[mchildren - 1]; 2792 } 2793 2794 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2795 goto out; 2796 } 2797 2798 /* did we find every disk the user specified? */ 2799 if (found != newchildren) { 2800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2801 "include at most one disk from each mirror")); 2802 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2803 goto out; 2804 } 2805 2806 /* Prepare the nvlist for populating. */ 2807 if (*newroot == NULL) { 2808 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2809 goto out; 2810 freelist = B_TRUE; 2811 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2812 VDEV_TYPE_ROOT) != 0) 2813 goto out; 2814 } else { 2815 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2816 } 2817 2818 /* Add all the children we found */ 2819 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2820 lastlog == 0 ? vcount : lastlog) != 0) 2821 goto out; 2822 2823 /* 2824 * If we're just doing a dry run, exit now with success. 2825 */ 2826 if (flags.dryrun) { 2827 memory_err = B_FALSE; 2828 freelist = B_FALSE; 2829 goto out; 2830 } 2831 2832 /* now build up the config list & call the ioctl */ 2833 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 2834 goto out; 2835 2836 if (nvlist_add_nvlist(newconfig, 2837 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 2838 nvlist_add_string(newconfig, 2839 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 2840 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 2841 goto out; 2842 2843 /* 2844 * The new pool is automatically part of the namespace unless we 2845 * explicitly export it. 2846 */ 2847 if (!flags.import) 2848 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 2849 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2850 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 2851 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 2852 goto out; 2853 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 2854 goto out; 2855 2856 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 2857 retval = zpool_standard_error(hdl, errno, msg); 2858 goto out; 2859 } 2860 2861 freelist = B_FALSE; 2862 memory_err = B_FALSE; 2863 2864out: 2865 if (varray != NULL) { 2866 int v; 2867 2868 for (v = 0; v < vcount; v++) 2869 nvlist_free(varray[v]); 2870 free(varray); 2871 } 2872 zcmd_free_nvlists(&zc); 2873 if (zc_props) 2874 nvlist_free(zc_props); 2875 if (newconfig) 2876 nvlist_free(newconfig); 2877 if (freelist) { 2878 nvlist_free(*newroot); 2879 *newroot = NULL; 2880 } 2881 2882 if (retval != 0) 2883 return (retval); 2884 2885 if (memory_err) 2886 return (no_memory(hdl)); 2887 2888 return (0); 2889} 2890 2891/* 2892 * Remove the given device. Currently, this is supported only for hot spares 2893 * and level 2 cache devices. 2894 */ 2895int 2896zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2897{ 2898 zfs_cmd_t zc = { 0 }; 2899 char msg[1024]; 2900 nvlist_t *tgt; 2901 boolean_t avail_spare, l2cache, islog; 2902 libzfs_handle_t *hdl = zhp->zpool_hdl; 2903 uint64_t version; 2904 2905 (void) snprintf(msg, sizeof (msg), 2906 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2907 2908 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2909 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2910 &islog)) == 0) 2911 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2912 /* 2913 * XXX - this should just go away. 2914 */ 2915 if (!avail_spare && !l2cache && !islog) { 2916 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2917 "only inactive hot spares, cache, top-level, " 2918 "or log devices can be removed")); 2919 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2920 } 2921 2922 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 2923 if (islog && version < SPA_VERSION_HOLES) { 2924 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2925 "pool must be upgrade to support log removal")); 2926 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 2927 } 2928 2929 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2930 2931 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2932 return (0); 2933 2934 return (zpool_standard_error(hdl, errno, msg)); 2935} 2936 2937/* 2938 * Clear the errors for the pool, or the particular device if specified. 2939 */ 2940int 2941zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 2942{ 2943 zfs_cmd_t zc = { 0 }; 2944 char msg[1024]; 2945 nvlist_t *tgt; 2946 zpool_rewind_policy_t policy; 2947 boolean_t avail_spare, l2cache; 2948 libzfs_handle_t *hdl = zhp->zpool_hdl; 2949 nvlist_t *nvi = NULL; 2950 int error; 2951 2952 if (path) 2953 (void) snprintf(msg, sizeof (msg), 2954 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2955 path); 2956 else 2957 (void) snprintf(msg, sizeof (msg), 2958 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2959 zhp->zpool_name); 2960 2961 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2962 if (path) { 2963 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2964 &l2cache, NULL)) == 0) 2965 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2966 2967 /* 2968 * Don't allow error clearing for hot spares. Do allow 2969 * error clearing for l2cache devices. 2970 */ 2971 if (avail_spare) 2972 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2973 2974 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2975 &zc.zc_guid) == 0); 2976 } 2977 2978 zpool_get_rewind_policy(rewindnvl, &policy); 2979 zc.zc_cookie = policy.zrp_request; 2980 2981 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 2982 return (-1); 2983 2984 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 2985 return (-1); 2986 2987 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 2988 errno == ENOMEM) { 2989 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 2990 zcmd_free_nvlists(&zc); 2991 return (-1); 2992 } 2993 } 2994 2995 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 2996 errno != EPERM && errno != EACCES)) { 2997 if (policy.zrp_request & 2998 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2999 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3000 zpool_rewind_exclaim(hdl, zc.zc_name, 3001 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3002 nvi); 3003 nvlist_free(nvi); 3004 } 3005 zcmd_free_nvlists(&zc); 3006 return (0); 3007 } 3008 3009 zcmd_free_nvlists(&zc); 3010 return (zpool_standard_error(hdl, errno, msg)); 3011} 3012 3013/* 3014 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3015 */ 3016int 3017zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3018{ 3019 zfs_cmd_t zc = { 0 }; 3020 char msg[1024]; 3021 libzfs_handle_t *hdl = zhp->zpool_hdl; 3022 3023 (void) snprintf(msg, sizeof (msg), 3024 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3025 guid); 3026 3027 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3028 zc.zc_guid = guid; 3029 zc.zc_cookie = ZPOOL_NO_REWIND; 3030 3031 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3032 return (0); 3033 3034 return (zpool_standard_error(hdl, errno, msg)); 3035} 3036 3037/* 3038 * Change the GUID for a pool. 3039 */ 3040int 3041zpool_reguid(zpool_handle_t *zhp) 3042{ 3043 char msg[1024]; 3044 libzfs_handle_t *hdl = zhp->zpool_hdl; 3045 zfs_cmd_t zc = { 0 }; 3046 3047 (void) snprintf(msg, sizeof (msg), 3048 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3049 3050 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3051 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3052 return (0); 3053 3054 return (zpool_standard_error(hdl, errno, msg)); 3055} 3056 3057/*
|
3058 * Reopen the pool. 3059 */ 3060int 3061zpool_reopen(zpool_handle_t *zhp) 3062{ 3063 zfs_cmd_t zc = { 0 }; 3064 char msg[1024]; 3065 libzfs_handle_t *hdl = zhp->zpool_hdl; 3066 3067 (void) snprintf(msg, sizeof (msg), 3068 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3069 zhp->zpool_name); 3070 3071 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3072 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3073 return (0); 3074 return (zpool_standard_error(hdl, errno, msg)); 3075} 3076 3077/* |
3078 * Convert from a devid string to a path. 3079 */ 3080static char * 3081devid_to_path(char *devid_str) 3082{ 3083 ddi_devid_t devid; 3084 char *minor; 3085 char *path; 3086 devid_nmlist_t *list = NULL; 3087 int ret; 3088 3089 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3090 return (NULL); 3091 3092 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3093 3094 devid_str_free(minor); 3095 devid_free(devid); 3096 3097 if (ret != 0) 3098 return (NULL); 3099 3100 if ((path = strdup(list[0].devname)) == NULL) 3101 return (NULL); 3102 3103 devid_free_nmlist(list); 3104 3105 return (path); 3106} 3107 3108/* 3109 * Convert from a path to a devid string. 3110 */ 3111static char * 3112path_to_devid(const char *path) 3113{ 3114 int fd; 3115 ddi_devid_t devid; 3116 char *minor, *ret; 3117 3118 if ((fd = open(path, O_RDONLY)) < 0) 3119 return (NULL); 3120 3121 minor = NULL; 3122 ret = NULL; 3123 if (devid_get(fd, &devid) == 0) { 3124 if (devid_get_minor_name(fd, &minor) == 0) 3125 ret = devid_str_encode(devid, minor); 3126 if (minor != NULL) 3127 devid_str_free(minor); 3128 devid_free(devid); 3129 } 3130 (void) close(fd); 3131 3132 return (ret); 3133} 3134 3135/* 3136 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3137 * ignore any failure here, since a common case is for an unprivileged user to 3138 * type 'zpool status', and we'll display the correct information anyway. 3139 */ 3140static void 3141set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3142{ 3143 zfs_cmd_t zc = { 0 }; 3144 3145 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3146 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3147 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3148 &zc.zc_guid) == 0); 3149 3150 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3151} 3152 3153/* 3154 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3155 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3156 * We also check if this is a whole disk, in which case we strip off the 3157 * trailing 's0' slice name. 3158 * 3159 * This routine is also responsible for identifying when disks have been 3160 * reconfigured in a new location. The kernel will have opened the device by 3161 * devid, but the path will still refer to the old location. To catch this, we 3162 * first do a path -> devid translation (which is fast for the common case). If 3163 * the devid matches, we're done. If not, we do a reverse devid -> path 3164 * translation and issue the appropriate ioctl() to update the path of the vdev. 3165 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3166 * of these checks. 3167 */ 3168char * 3169zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3170 boolean_t verbose) 3171{ 3172 char *path, *devid; 3173 uint64_t value; 3174 char buf[64]; 3175 vdev_stat_t *vs; 3176 uint_t vsc; 3177 int have_stats; 3178 int have_path; 3179 3180 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3181 (uint64_t **)&vs, &vsc) == 0; 3182 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3183 3184 /* 3185 * If the device is not currently present, assume it will not 3186 * come back at the same device path. Display the device by GUID. 3187 */ 3188 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3189 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3190 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3191 &value) == 0); 3192 (void) snprintf(buf, sizeof (buf), "%llu", 3193 (u_longlong_t)value); 3194 path = buf; 3195 } else if (have_path) { 3196 3197 /* 3198 * If the device is dead (faulted, offline, etc) then don't 3199 * bother opening it. Otherwise we may be forcing the user to 3200 * open a misbehaving device, which can have undesirable 3201 * effects. 3202 */ 3203 if ((have_stats == 0 || 3204 vs->vs_state >= VDEV_STATE_DEGRADED) && 3205 zhp != NULL && 3206 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3207 /* 3208 * Determine if the current path is correct. 3209 */ 3210 char *newdevid = path_to_devid(path); 3211 3212 if (newdevid == NULL || 3213 strcmp(devid, newdevid) != 0) { 3214 char *newpath; 3215 3216 if ((newpath = devid_to_path(devid)) != NULL) { 3217 /* 3218 * Update the path appropriately. 3219 */ 3220 set_path(zhp, nv, newpath); 3221 if (nvlist_add_string(nv, 3222 ZPOOL_CONFIG_PATH, newpath) == 0) 3223 verify(nvlist_lookup_string(nv, 3224 ZPOOL_CONFIG_PATH, 3225 &path) == 0); 3226 free(newpath); 3227 } 3228 } 3229 3230 if (newdevid) 3231 devid_str_free(newdevid); 3232 } 3233 3234#ifdef sun 3235 if (strncmp(path, "/dev/dsk/", 9) == 0) 3236 path += 9; 3237 3238 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3239 &value) == 0 && value) { 3240 int pathlen = strlen(path); 3241 char *tmp = zfs_strdup(hdl, path); 3242 3243 /* 3244 * If it starts with c#, and ends with "s0", chop 3245 * the "s0" off, or if it ends with "s0/old", remove 3246 * the "s0" from the middle. 3247 */ 3248 if (CTD_CHECK(tmp)) { 3249 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3250 tmp[pathlen - 2] = '\0'; 3251 } else if (pathlen > 6 && 3252 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3253 (void) strcpy(&tmp[pathlen - 6], 3254 "/old"); 3255 } 3256 } 3257 return (tmp); 3258 } 3259#else /* !sun */ 3260 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3261 path += sizeof(_PATH_DEV) - 1; 3262#endif /* !sun */ 3263 } else { 3264 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3265 3266 /* 3267 * If it's a raidz device, we need to stick in the parity level. 3268 */ 3269 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3270 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3271 &value) == 0); 3272 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3273 (u_longlong_t)value); 3274 path = buf; 3275 } 3276 3277 /* 3278 * We identify each top-level vdev by using a <type-id> 3279 * naming convention. 3280 */ 3281 if (verbose) { 3282 uint64_t id; 3283 3284 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3285 &id) == 0); 3286 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3287 (u_longlong_t)id); 3288 path = buf; 3289 } 3290 } 3291 3292 return (zfs_strdup(hdl, path)); 3293} 3294 3295static int 3296zbookmark_compare(const void *a, const void *b) 3297{ 3298 return (memcmp(a, b, sizeof (zbookmark_t))); 3299} 3300 3301/* 3302 * Retrieve the persistent error log, uniquify the members, and return to the 3303 * caller. 3304 */ 3305int 3306zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3307{ 3308 zfs_cmd_t zc = { 0 }; 3309 uint64_t count; 3310 zbookmark_t *zb = NULL; 3311 int i; 3312 3313 /* 3314 * Retrieve the raw error list from the kernel. If the number of errors 3315 * has increased, allocate more space and continue until we get the 3316 * entire list. 3317 */ 3318 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3319 &count) == 0); 3320 if (count == 0) 3321 return (0); 3322 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3323 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3324 return (-1); 3325 zc.zc_nvlist_dst_size = count; 3326 (void) strcpy(zc.zc_name, zhp->zpool_name); 3327 for (;;) { 3328 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3329 &zc) != 0) { 3330 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3331 if (errno == ENOMEM) { 3332 count = zc.zc_nvlist_dst_size; 3333 if ((zc.zc_nvlist_dst = (uintptr_t) 3334 zfs_alloc(zhp->zpool_hdl, count * 3335 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3336 return (-1); 3337 } else { 3338 return (-1); 3339 } 3340 } else { 3341 break; 3342 } 3343 } 3344 3345 /* 3346 * Sort the resulting bookmarks. This is a little confusing due to the 3347 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3348 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3349 * _not_ copied as part of the process. So we point the start of our 3350 * array appropriate and decrement the total number of elements. 3351 */ 3352 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3353 zc.zc_nvlist_dst_size; 3354 count -= zc.zc_nvlist_dst_size; 3355 3356 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3357 3358 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3359 3360 /* 3361 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3362 */ 3363 for (i = 0; i < count; i++) { 3364 nvlist_t *nv; 3365 3366 /* ignoring zb_blkid and zb_level for now */ 3367 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3368 zb[i-1].zb_object == zb[i].zb_object) 3369 continue; 3370 3371 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3372 goto nomem; 3373 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3374 zb[i].zb_objset) != 0) { 3375 nvlist_free(nv); 3376 goto nomem; 3377 } 3378 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3379 zb[i].zb_object) != 0) { 3380 nvlist_free(nv); 3381 goto nomem; 3382 } 3383 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3384 nvlist_free(nv); 3385 goto nomem; 3386 } 3387 nvlist_free(nv); 3388 } 3389 3390 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3391 return (0); 3392 3393nomem: 3394 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3395 return (no_memory(zhp->zpool_hdl)); 3396} 3397 3398/* 3399 * Upgrade a ZFS pool to the latest on-disk version. 3400 */ 3401int 3402zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3403{ 3404 zfs_cmd_t zc = { 0 }; 3405 libzfs_handle_t *hdl = zhp->zpool_hdl; 3406 3407 (void) strcpy(zc.zc_name, zhp->zpool_name); 3408 zc.zc_cookie = new_version; 3409 3410 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3411 return (zpool_standard_error_fmt(hdl, errno, 3412 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3413 zhp->zpool_name)); 3414 return (0); 3415} 3416 3417void 3418zpool_set_history_str(const char *subcommand, int argc, char **argv, 3419 char *history_str) 3420{ 3421 int i; 3422 3423 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 3424 for (i = 1; i < argc; i++) { 3425 if (strlen(history_str) + 1 + strlen(argv[i]) > 3426 HIS_MAX_RECORD_LEN) 3427 break; 3428 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 3429 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 3430 } 3431} 3432 3433/* 3434 * Stage command history for logging. 3435 */ 3436int 3437zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 3438{ 3439 if (history_str == NULL) 3440 return (EINVAL); 3441 3442 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 3443 return (EINVAL); 3444 3445 if (hdl->libzfs_log_str != NULL) 3446 free(hdl->libzfs_log_str); 3447 3448 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 3449 return (no_memory(hdl)); 3450 3451 return (0); 3452} 3453 3454/* 3455 * Perform ioctl to get some command history of a pool. 3456 * 3457 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3458 * logical offset of the history buffer to start reading from. 3459 * 3460 * Upon return, 'off' is the next logical offset to read from and 3461 * 'len' is the actual amount of bytes read into 'buf'. 3462 */ 3463static int 3464get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3465{ 3466 zfs_cmd_t zc = { 0 }; 3467 libzfs_handle_t *hdl = zhp->zpool_hdl; 3468 3469 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3470 3471 zc.zc_history = (uint64_t)(uintptr_t)buf; 3472 zc.zc_history_len = *len; 3473 zc.zc_history_offset = *off; 3474 3475 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3476 switch (errno) { 3477 case EPERM: 3478 return (zfs_error_fmt(hdl, EZFS_PERM, 3479 dgettext(TEXT_DOMAIN, 3480 "cannot show history for pool '%s'"), 3481 zhp->zpool_name)); 3482 case ENOENT: 3483 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3484 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3485 "'%s'"), zhp->zpool_name)); 3486 case ENOTSUP: 3487 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3488 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3489 "'%s', pool must be upgraded"), zhp->zpool_name)); 3490 default: 3491 return (zpool_standard_error_fmt(hdl, errno, 3492 dgettext(TEXT_DOMAIN, 3493 "cannot get history for '%s'"), zhp->zpool_name)); 3494 } 3495 } 3496 3497 *len = zc.zc_history_len; 3498 *off = zc.zc_history_offset; 3499 3500 return (0); 3501} 3502 3503/* 3504 * Process the buffer of nvlists, unpacking and storing each nvlist record 3505 * into 'records'. 'leftover' is set to the number of bytes that weren't 3506 * processed as there wasn't a complete record. 3507 */ 3508int 3509zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3510 nvlist_t ***records, uint_t *numrecords) 3511{ 3512 uint64_t reclen; 3513 nvlist_t *nv; 3514 int i; 3515 3516 while (bytes_read > sizeof (reclen)) { 3517 3518 /* get length of packed record (stored as little endian) */ 3519 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3520 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3521 3522 if (bytes_read < sizeof (reclen) + reclen) 3523 break; 3524 3525 /* unpack record */ 3526 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3527 return (ENOMEM); 3528 bytes_read -= sizeof (reclen) + reclen; 3529 buf += sizeof (reclen) + reclen; 3530 3531 /* add record to nvlist array */ 3532 (*numrecords)++; 3533 if (ISP2(*numrecords + 1)) { 3534 *records = realloc(*records, 3535 *numrecords * 2 * sizeof (nvlist_t *)); 3536 } 3537 (*records)[*numrecords - 1] = nv; 3538 } 3539 3540 *leftover = bytes_read; 3541 return (0); 3542} 3543 3544#define HIS_BUF_LEN (128*1024) 3545 3546/* 3547 * Retrieve the command history of a pool. 3548 */ 3549int 3550zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3551{ 3552 char buf[HIS_BUF_LEN]; 3553 uint64_t off = 0; 3554 nvlist_t **records = NULL; 3555 uint_t numrecords = 0; 3556 int err, i; 3557 3558 do { 3559 uint64_t bytes_read = sizeof (buf); 3560 uint64_t leftover; 3561 3562 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3563 break; 3564 3565 /* if nothing else was read in, we're at EOF, just return */ 3566 if (!bytes_read) 3567 break; 3568 3569 if ((err = zpool_history_unpack(buf, bytes_read, 3570 &leftover, &records, &numrecords)) != 0) 3571 break; 3572 off -= leftover; 3573 3574 /* CONSTCOND */ 3575 } while (1); 3576 3577 if (!err) { 3578 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3579 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3580 records, numrecords) == 0); 3581 } 3582 for (i = 0; i < numrecords; i++) 3583 nvlist_free(records[i]); 3584 free(records); 3585 3586 return (err); 3587} 3588 3589void 3590zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3591 char *pathname, size_t len) 3592{ 3593 zfs_cmd_t zc = { 0 }; 3594 boolean_t mounted = B_FALSE; 3595 char *mntpnt = NULL; 3596 char dsname[MAXNAMELEN]; 3597 3598 if (dsobj == 0) { 3599 /* special case for the MOS */ 3600 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3601 return; 3602 } 3603 3604 /* get the dataset's name */ 3605 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3606 zc.zc_obj = dsobj; 3607 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3608 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3609 /* just write out a path of two object numbers */ 3610 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3611 dsobj, obj); 3612 return; 3613 } 3614 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3615 3616 /* find out if the dataset is mounted */ 3617 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3618 3619 /* get the corrupted object's path */ 3620 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3621 zc.zc_obj = obj; 3622 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3623 &zc) == 0) { 3624 if (mounted) { 3625 (void) snprintf(pathname, len, "%s%s", mntpnt, 3626 zc.zc_value); 3627 } else { 3628 (void) snprintf(pathname, len, "%s:%s", 3629 dsname, zc.zc_value); 3630 } 3631 } else { 3632 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3633 } 3634 free(mntpnt); 3635} 3636 3637#ifdef sun 3638/* 3639 * Read the EFI label from the config, if a label does not exist then 3640 * pass back the error to the caller. If the caller has passed a non-NULL 3641 * diskaddr argument then we set it to the starting address of the EFI 3642 * partition. 3643 */ 3644static int 3645read_efi_label(nvlist_t *config, diskaddr_t *sb) 3646{ 3647 char *path; 3648 int fd; 3649 char diskname[MAXPATHLEN]; 3650 int err = -1; 3651 3652 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3653 return (err); 3654 3655 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3656 strrchr(path, '/')); 3657 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3658 struct dk_gpt *vtoc; 3659 3660 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3661 if (sb != NULL) 3662 *sb = vtoc->efi_parts[0].p_start; 3663 efi_free(vtoc); 3664 } 3665 (void) close(fd); 3666 } 3667 return (err); 3668} 3669 3670/* 3671 * determine where a partition starts on a disk in the current 3672 * configuration 3673 */ 3674static diskaddr_t 3675find_start_block(nvlist_t *config) 3676{ 3677 nvlist_t **child; 3678 uint_t c, children; 3679 diskaddr_t sb = MAXOFFSET_T; 3680 uint64_t wholedisk; 3681 3682 if (nvlist_lookup_nvlist_array(config, 3683 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3684 if (nvlist_lookup_uint64(config, 3685 ZPOOL_CONFIG_WHOLE_DISK, 3686 &wholedisk) != 0 || !wholedisk) { 3687 return (MAXOFFSET_T); 3688 } 3689 if (read_efi_label(config, &sb) < 0) 3690 sb = MAXOFFSET_T; 3691 return (sb); 3692 } 3693 3694 for (c = 0; c < children; c++) { 3695 sb = find_start_block(child[c]); 3696 if (sb != MAXOFFSET_T) { 3697 return (sb); 3698 } 3699 } 3700 return (MAXOFFSET_T); 3701} 3702#endif /* sun */ 3703 3704/* 3705 * Label an individual disk. The name provided is the short name, 3706 * stripped of any leading /dev path. 3707 */ 3708int 3709zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3710{ 3711#ifdef sun 3712 char path[MAXPATHLEN]; 3713 struct dk_gpt *vtoc; 3714 int fd; 3715 size_t resv = EFI_MIN_RESV_SIZE; 3716 uint64_t slice_size; 3717 diskaddr_t start_block; 3718 char errbuf[1024]; 3719 3720 /* prepare an error message just in case */ 3721 (void) snprintf(errbuf, sizeof (errbuf), 3722 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3723 3724 if (zhp) { 3725 nvlist_t *nvroot; 3726
|
3706 if (pool_is_bootable(zhp)) {
|
3727 if (zpool_is_bootable(zhp)) { |
3728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3729 "EFI labeled devices are not supported on root " 3730 "pools.")); 3731 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3732 } 3733 3734 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3735 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3736 3737 if (zhp->zpool_start_block == 0) 3738 start_block = find_start_block(nvroot); 3739 else 3740 start_block = zhp->zpool_start_block; 3741 zhp->zpool_start_block = start_block; 3742 } else { 3743 /* new pool */ 3744 start_block = NEW_START_BLOCK; 3745 } 3746 3747 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3748 BACKUP_SLICE); 3749 3750 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3751 /* 3752 * This shouldn't happen. We've long since verified that this 3753 * is a valid device. 3754 */ 3755 zfs_error_aux(hdl, 3756 dgettext(TEXT_DOMAIN, "unable to open device")); 3757 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3758 } 3759 3760 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3761 /* 3762 * The only way this can fail is if we run out of memory, or we 3763 * were unable to read the disk's capacity 3764 */ 3765 if (errno == ENOMEM) 3766 (void) no_memory(hdl); 3767 3768 (void) close(fd); 3769 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3770 "unable to read disk capacity"), name); 3771 3772 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3773 } 3774 3775 slice_size = vtoc->efi_last_u_lba + 1; 3776 slice_size -= EFI_MIN_RESV_SIZE; 3777 if (start_block == MAXOFFSET_T) 3778 start_block = NEW_START_BLOCK; 3779 slice_size -= start_block; 3780 3781 vtoc->efi_parts[0].p_start = start_block; 3782 vtoc->efi_parts[0].p_size = slice_size; 3783 3784 /* 3785 * Why we use V_USR: V_BACKUP confuses users, and is considered 3786 * disposable by some EFI utilities (since EFI doesn't have a backup 3787 * slice). V_UNASSIGNED is supposed to be used only for zero size 3788 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3789 * etc. were all pretty specific. V_USR is as close to reality as we 3790 * can get, in the absence of V_OTHER. 3791 */ 3792 vtoc->efi_parts[0].p_tag = V_USR; 3793 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3794 3795 vtoc->efi_parts[8].p_start = slice_size + start_block; 3796 vtoc->efi_parts[8].p_size = resv; 3797 vtoc->efi_parts[8].p_tag = V_RESERVED; 3798 3799 if (efi_write(fd, vtoc) != 0) { 3800 /* 3801 * Some block drivers (like pcata) may not support EFI 3802 * GPT labels. Print out a helpful error message dir- 3803 * ecting the user to manually label the disk and give 3804 * a specific slice. 3805 */ 3806 (void) close(fd); 3807 efi_free(vtoc); 3808 3809 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3810 "try using fdisk(1M) and then provide a specific slice")); 3811 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3812 } 3813 3814 (void) close(fd); 3815 efi_free(vtoc); 3816#endif /* sun */ 3817 return (0); 3818} 3819 3820static boolean_t 3821supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3822{ 3823 char *type; 3824 nvlist_t **child; 3825 uint_t children, c; 3826 3827 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3828 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3829 strcmp(type, VDEV_TYPE_FILE) == 0 || 3830 strcmp(type, VDEV_TYPE_LOG) == 0 || 3831 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3832 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3833 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3834 "vdev type '%s' is not supported"), type); 3835 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3836 return (B_FALSE); 3837 } 3838 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3839 &child, &children) == 0) { 3840 for (c = 0; c < children; c++) { 3841 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3842 return (B_FALSE); 3843 } 3844 } 3845 return (B_TRUE); 3846} 3847 3848/* 3849 * check if this zvol is allowable for use as a dump device; zero if 3850 * it is, > 0 if it isn't, < 0 if it isn't a zvol 3851 */ 3852int 3853zvol_check_dump_config(char *arg) 3854{ 3855 zpool_handle_t *zhp = NULL; 3856 nvlist_t *config, *nvroot; 3857 char *p, *volname; 3858 nvlist_t **top; 3859 uint_t toplevels; 3860 libzfs_handle_t *hdl; 3861 char errbuf[1024]; 3862 char poolname[ZPOOL_MAXNAMELEN]; 3863 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3864 int ret = 1; 3865 3866 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3867 return (-1); 3868 } 3869 3870 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3871 "dump is not supported on device '%s'"), arg); 3872 3873 if ((hdl = libzfs_init()) == NULL) 3874 return (1); 3875 libzfs_print_on_error(hdl, B_TRUE); 3876 3877 volname = arg + pathlen; 3878 3879 /* check the configuration of the pool */ 3880 if ((p = strchr(volname, '/')) == NULL) { 3881 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3882 "malformed dataset name")); 3883 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3884 return (1); 3885 } else if (p - volname >= ZFS_MAXNAMELEN) { 3886 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3887 "dataset name is too long")); 3888 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3889 return (1); 3890 } else { 3891 (void) strncpy(poolname, volname, p - volname); 3892 poolname[p - volname] = '\0'; 3893 } 3894 3895 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3896 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3897 "could not open pool '%s'"), poolname); 3898 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3899 goto out; 3900 } 3901 config = zpool_get_config(zhp, NULL); 3902 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3903 &nvroot) != 0) { 3904 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3905 "could not obtain vdev configuration for '%s'"), poolname); 3906 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3907 goto out; 3908 } 3909 3910 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3911 &top, &toplevels) == 0); 3912 if (toplevels != 1) { 3913 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3914 "'%s' has multiple top level vdevs"), poolname); 3915 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3916 goto out; 3917 } 3918 3919 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3920 goto out; 3921 } 3922 ret = 0; 3923 3924out: 3925 if (zhp) 3926 zpool_close(zhp); 3927 libzfs_fini(hdl); 3928 return (ret); 3929}
|