libzfs_pool.c revision 259813
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2013 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29#include <sys/types.h> 30#include <sys/stat.h> 31#include <ctype.h> 32#include <errno.h> 33#include <devid.h> 34#include <fcntl.h> 35#include <libintl.h> 36#include <stdio.h> 37#include <stdlib.h> 38#include <strings.h> 39#include <unistd.h> 40#include <libgen.h> 41#include <sys/zfs_ioctl.h> 42#include <dlfcn.h> 43 44#include "zfs_namecheck.h" 45#include "zfs_prop.h" 46#include "libzfs_impl.h" 47#include "zfs_comutil.h" 48#include "zfeature_common.h" 49 50static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52#define DISK_ROOT "/dev/dsk" 53#define RDISK_ROOT "/dev/rdsk" 54#define BACKUP_SLICE "s2" 55 56typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59} prop_flags_t; 60 61/* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67static int 68zpool_get_all_props(zpool_handle_t *zhp) 69{ 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98} 99 100static int 101zpool_props_refresh(zpool_handle_t *zhp) 102{ 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112} 113 114static char * 115zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117{ 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138} 139 140uint64_t 141zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142{ 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177} 178 179/* 180 * Map VDEV STATE to printed strings. 181 */ 182const char * 183zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184{ 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207} 208 209/* 210 * Map POOL STATE to printed strings. 211 */ 212const char * 213zpool_pool_state_to_name(pool_state_t state) 214{ 215 switch (state) { 216 case POOL_STATE_ACTIVE: 217 return (gettext("ACTIVE")); 218 case POOL_STATE_EXPORTED: 219 return (gettext("EXPORTED")); 220 case POOL_STATE_DESTROYED: 221 return (gettext("DESTROYED")); 222 case POOL_STATE_SPARE: 223 return (gettext("SPARE")); 224 case POOL_STATE_L2CACHE: 225 return (gettext("L2CACHE")); 226 case POOL_STATE_UNINITIALIZED: 227 return (gettext("UNINITIALIZED")); 228 case POOL_STATE_UNAVAIL: 229 return (gettext("UNAVAIL")); 230 case POOL_STATE_POTENTIALLY_ACTIVE: 231 return (gettext("POTENTIALLY_ACTIVE")); 232 } 233 234 return (gettext("UNKNOWN")); 235} 236 237/* 238 * Get a zpool property value for 'prop' and return the value in 239 * a pre-allocated buffer. 240 */ 241int 242zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 243 zprop_source_t *srctype) 244{ 245 uint64_t intval; 246 const char *strval; 247 zprop_source_t src = ZPROP_SRC_NONE; 248 nvlist_t *nvroot; 249 vdev_stat_t *vs; 250 uint_t vsc; 251 252 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 253 switch (prop) { 254 case ZPOOL_PROP_NAME: 255 (void) strlcpy(buf, zpool_get_name(zhp), len); 256 break; 257 258 case ZPOOL_PROP_HEALTH: 259 (void) strlcpy(buf, "FAULTED", len); 260 break; 261 262 case ZPOOL_PROP_GUID: 263 intval = zpool_get_prop_int(zhp, prop, &src); 264 (void) snprintf(buf, len, "%llu", intval); 265 break; 266 267 case ZPOOL_PROP_ALTROOT: 268 case ZPOOL_PROP_CACHEFILE: 269 case ZPOOL_PROP_COMMENT: 270 if (zhp->zpool_props != NULL || 271 zpool_get_all_props(zhp) == 0) { 272 (void) strlcpy(buf, 273 zpool_get_prop_string(zhp, prop, &src), 274 len); 275 if (srctype != NULL) 276 *srctype = src; 277 return (0); 278 } 279 /* FALLTHROUGH */ 280 default: 281 (void) strlcpy(buf, "-", len); 282 break; 283 } 284 285 if (srctype != NULL) 286 *srctype = src; 287 return (0); 288 } 289 290 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 291 prop != ZPOOL_PROP_NAME) 292 return (-1); 293 294 switch (zpool_prop_get_type(prop)) { 295 case PROP_TYPE_STRING: 296 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 297 len); 298 break; 299 300 case PROP_TYPE_NUMBER: 301 intval = zpool_get_prop_int(zhp, prop, &src); 302 303 switch (prop) { 304 case ZPOOL_PROP_SIZE: 305 case ZPOOL_PROP_ALLOCATED: 306 case ZPOOL_PROP_FREE: 307 case ZPOOL_PROP_FREEING: 308 case ZPOOL_PROP_EXPANDSZ: 309 (void) zfs_nicenum(intval, buf, len); 310 break; 311 312 case ZPOOL_PROP_CAPACITY: 313 (void) snprintf(buf, len, "%llu%%", 314 (u_longlong_t)intval); 315 break; 316 317 case ZPOOL_PROP_DEDUPRATIO: 318 (void) snprintf(buf, len, "%llu.%02llux", 319 (u_longlong_t)(intval / 100), 320 (u_longlong_t)(intval % 100)); 321 break; 322 323 case ZPOOL_PROP_HEALTH: 324 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 325 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 326 verify(nvlist_lookup_uint64_array(nvroot, 327 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 328 == 0); 329 330 (void) strlcpy(buf, zpool_state_to_name(intval, 331 vs->vs_aux), len); 332 break; 333 case ZPOOL_PROP_VERSION: 334 if (intval >= SPA_VERSION_FEATURES) { 335 (void) snprintf(buf, len, "-"); 336 break; 337 } 338 /* FALLTHROUGH */ 339 default: 340 (void) snprintf(buf, len, "%llu", intval); 341 } 342 break; 343 344 case PROP_TYPE_INDEX: 345 intval = zpool_get_prop_int(zhp, prop, &src); 346 if (zpool_prop_index_to_string(prop, intval, &strval) 347 != 0) 348 return (-1); 349 (void) strlcpy(buf, strval, len); 350 break; 351 352 default: 353 abort(); 354 } 355 356 if (srctype) 357 *srctype = src; 358 359 return (0); 360} 361 362/* 363 * Check if the bootfs name has the same pool name as it is set to. 364 * Assuming bootfs is a valid dataset name. 365 */ 366static boolean_t 367bootfs_name_valid(const char *pool, char *bootfs) 368{ 369 int len = strlen(pool); 370 371 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 372 return (B_FALSE); 373 374 if (strncmp(pool, bootfs, len) == 0 && 375 (bootfs[len] == '/' || bootfs[len] == '\0')) 376 return (B_TRUE); 377 378 return (B_FALSE); 379} 380 381/* 382 * Inspect the configuration to determine if any of the devices contain 383 * an EFI label. 384 */ 385static boolean_t 386pool_uses_efi(nvlist_t *config) 387{ 388#ifdef sun 389 nvlist_t **child; 390 uint_t c, children; 391 392 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 393 &child, &children) != 0) 394 return (read_efi_label(config, NULL) >= 0); 395 396 for (c = 0; c < children; c++) { 397 if (pool_uses_efi(child[c])) 398 return (B_TRUE); 399 } 400#endif /* sun */ 401 return (B_FALSE); 402} 403 404boolean_t 405zpool_is_bootable(zpool_handle_t *zhp) 406{ 407 char bootfs[ZPOOL_MAXNAMELEN]; 408 409 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 410 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 411 sizeof (bootfs)) != 0); 412} 413 414 415/* 416 * Given an nvlist of zpool properties to be set, validate that they are 417 * correct, and parse any numeric properties (index, boolean, etc) if they are 418 * specified as strings. 419 */ 420static nvlist_t * 421zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 422 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 423{ 424 nvpair_t *elem; 425 nvlist_t *retprops; 426 zpool_prop_t prop; 427 char *strval; 428 uint64_t intval; 429 char *slash, *check; 430 struct stat64 statbuf; 431 zpool_handle_t *zhp; 432 nvlist_t *nvroot; 433 434 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 435 (void) no_memory(hdl); 436 return (NULL); 437 } 438 439 elem = NULL; 440 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 441 const char *propname = nvpair_name(elem); 442 443 prop = zpool_name_to_prop(propname); 444 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 445 int err; 446 char *fname = strchr(propname, '@') + 1; 447 448 err = zfeature_lookup_name(fname, NULL); 449 if (err != 0) { 450 ASSERT3U(err, ==, ENOENT); 451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 452 "invalid feature '%s'"), fname); 453 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 454 goto error; 455 } 456 457 if (nvpair_type(elem) != DATA_TYPE_STRING) { 458 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 459 "'%s' must be a string"), propname); 460 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 461 goto error; 462 } 463 464 (void) nvpair_value_string(elem, &strval); 465 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 467 "property '%s' can only be set to " 468 "'enabled'"), propname); 469 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 470 goto error; 471 } 472 473 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 474 (void) no_memory(hdl); 475 goto error; 476 } 477 continue; 478 } 479 480 /* 481 * Make sure this property is valid and applies to this type. 482 */ 483 if (prop == ZPROP_INVAL) { 484 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 485 "invalid property '%s'"), propname); 486 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 487 goto error; 488 } 489 490 if (zpool_prop_readonly(prop)) { 491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 492 "is readonly"), propname); 493 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 494 goto error; 495 } 496 497 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 498 &strval, &intval, errbuf) != 0) 499 goto error; 500 501 /* 502 * Perform additional checking for specific properties. 503 */ 504 switch (prop) { 505 case ZPOOL_PROP_VERSION: 506 if (intval < version || 507 !SPA_VERSION_IS_SUPPORTED(intval)) { 508 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 509 "property '%s' number %d is invalid."), 510 propname, intval); 511 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 512 goto error; 513 } 514 break; 515 516 case ZPOOL_PROP_BOOTFS: 517 if (flags.create || flags.import) { 518 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 519 "property '%s' cannot be set at creation " 520 "or import time"), propname); 521 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 522 goto error; 523 } 524 525 if (version < SPA_VERSION_BOOTFS) { 526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 527 "pool must be upgraded to support " 528 "'%s' property"), propname); 529 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 530 goto error; 531 } 532 533 /* 534 * bootfs property value has to be a dataset name and 535 * the dataset has to be in the same pool as it sets to. 536 */ 537 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 538 strval)) { 539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 540 "is an invalid name"), strval); 541 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 542 goto error; 543 } 544 545 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 547 "could not open pool '%s'"), poolname); 548 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 549 goto error; 550 } 551 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 552 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 553 554#ifdef sun 555 /* 556 * bootfs property cannot be set on a disk which has 557 * been EFI labeled. 558 */ 559 if (pool_uses_efi(nvroot)) { 560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 561 "property '%s' not supported on " 562 "EFI labeled devices"), propname); 563 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 564 zpool_close(zhp); 565 goto error; 566 } 567#endif /* sun */ 568 zpool_close(zhp); 569 break; 570 571 case ZPOOL_PROP_ALTROOT: 572 if (!flags.create && !flags.import) { 573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 574 "property '%s' can only be set during pool " 575 "creation or import"), propname); 576 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 577 goto error; 578 } 579 580 if (strval[0] != '/') { 581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 582 "bad alternate root '%s'"), strval); 583 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 584 goto error; 585 } 586 break; 587 588 case ZPOOL_PROP_CACHEFILE: 589 if (strval[0] == '\0') 590 break; 591 592 if (strcmp(strval, "none") == 0) 593 break; 594 595 if (strval[0] != '/') { 596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 597 "property '%s' must be empty, an " 598 "absolute path, or 'none'"), propname); 599 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 600 goto error; 601 } 602 603 slash = strrchr(strval, '/'); 604 605 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 606 strcmp(slash, "/..") == 0) { 607 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 608 "'%s' is not a valid file"), strval); 609 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 610 goto error; 611 } 612 613 *slash = '\0'; 614 615 if (strval[0] != '\0' && 616 (stat64(strval, &statbuf) != 0 || 617 !S_ISDIR(statbuf.st_mode))) { 618 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 619 "'%s' is not a valid directory"), 620 strval); 621 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 622 goto error; 623 } 624 625 *slash = '/'; 626 break; 627 628 case ZPOOL_PROP_COMMENT: 629 for (check = strval; *check != '\0'; check++) { 630 if (!isprint(*check)) { 631 zfs_error_aux(hdl, 632 dgettext(TEXT_DOMAIN, 633 "comment may only have printable " 634 "characters")); 635 (void) zfs_error(hdl, EZFS_BADPROP, 636 errbuf); 637 goto error; 638 } 639 } 640 if (strlen(strval) > ZPROP_MAX_COMMENT) { 641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 642 "comment must not exceed %d characters"), 643 ZPROP_MAX_COMMENT); 644 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 645 goto error; 646 } 647 break; 648 case ZPOOL_PROP_READONLY: 649 if (!flags.import) { 650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 651 "property '%s' can only be set at " 652 "import time"), propname); 653 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 654 goto error; 655 } 656 break; 657 } 658 } 659 660 return (retprops); 661error: 662 nvlist_free(retprops); 663 return (NULL); 664} 665 666/* 667 * Set zpool property : propname=propval. 668 */ 669int 670zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 671{ 672 zfs_cmd_t zc = { 0 }; 673 int ret = -1; 674 char errbuf[1024]; 675 nvlist_t *nvl = NULL; 676 nvlist_t *realprops; 677 uint64_t version; 678 prop_flags_t flags = { 0 }; 679 680 (void) snprintf(errbuf, sizeof (errbuf), 681 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 682 zhp->zpool_name); 683 684 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 685 return (no_memory(zhp->zpool_hdl)); 686 687 if (nvlist_add_string(nvl, propname, propval) != 0) { 688 nvlist_free(nvl); 689 return (no_memory(zhp->zpool_hdl)); 690 } 691 692 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 693 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 694 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 695 nvlist_free(nvl); 696 return (-1); 697 } 698 699 nvlist_free(nvl); 700 nvl = realprops; 701 702 /* 703 * Execute the corresponding ioctl() to set this property. 704 */ 705 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 706 707 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 708 nvlist_free(nvl); 709 return (-1); 710 } 711 712 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 713 714 zcmd_free_nvlists(&zc); 715 nvlist_free(nvl); 716 717 if (ret) 718 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 719 else 720 (void) zpool_props_refresh(zhp); 721 722 return (ret); 723} 724 725int 726zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 727{ 728 libzfs_handle_t *hdl = zhp->zpool_hdl; 729 zprop_list_t *entry; 730 char buf[ZFS_MAXPROPLEN]; 731 nvlist_t *features = NULL; 732 zprop_list_t **last; 733 boolean_t firstexpand = (NULL == *plp); 734 735 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 736 return (-1); 737 738 last = plp; 739 while (*last != NULL) 740 last = &(*last)->pl_next; 741 742 if ((*plp)->pl_all) 743 features = zpool_get_features(zhp); 744 745 if ((*plp)->pl_all && firstexpand) { 746 for (int i = 0; i < SPA_FEATURES; i++) { 747 zprop_list_t *entry = zfs_alloc(hdl, 748 sizeof (zprop_list_t)); 749 entry->pl_prop = ZPROP_INVAL; 750 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 751 spa_feature_table[i].fi_uname); 752 entry->pl_width = strlen(entry->pl_user_prop); 753 entry->pl_all = B_TRUE; 754 755 *last = entry; 756 last = &entry->pl_next; 757 } 758 } 759 760 /* add any unsupported features */ 761 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 762 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 763 char *propname; 764 boolean_t found; 765 zprop_list_t *entry; 766 767 if (zfeature_is_supported(nvpair_name(nvp))) 768 continue; 769 770 propname = zfs_asprintf(hdl, "unsupported@%s", 771 nvpair_name(nvp)); 772 773 /* 774 * Before adding the property to the list make sure that no 775 * other pool already added the same property. 776 */ 777 found = B_FALSE; 778 entry = *plp; 779 while (entry != NULL) { 780 if (entry->pl_user_prop != NULL && 781 strcmp(propname, entry->pl_user_prop) == 0) { 782 found = B_TRUE; 783 break; 784 } 785 entry = entry->pl_next; 786 } 787 if (found) { 788 free(propname); 789 continue; 790 } 791 792 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 793 entry->pl_prop = ZPROP_INVAL; 794 entry->pl_user_prop = propname; 795 entry->pl_width = strlen(entry->pl_user_prop); 796 entry->pl_all = B_TRUE; 797 798 *last = entry; 799 last = &entry->pl_next; 800 } 801 802 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 803 804 if (entry->pl_fixed) 805 continue; 806 807 if (entry->pl_prop != ZPROP_INVAL && 808 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 809 NULL) == 0) { 810 if (strlen(buf) > entry->pl_width) 811 entry->pl_width = strlen(buf); 812 } 813 } 814 815 return (0); 816} 817 818/* 819 * Get the state for the given feature on the given ZFS pool. 820 */ 821int 822zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 823 size_t len) 824{ 825 uint64_t refcount; 826 boolean_t found = B_FALSE; 827 nvlist_t *features = zpool_get_features(zhp); 828 boolean_t supported; 829 const char *feature = strchr(propname, '@') + 1; 830 831 supported = zpool_prop_feature(propname); 832 ASSERT(supported || zpool_prop_unsupported(propname)); 833 834 /* 835 * Convert from feature name to feature guid. This conversion is 836 * unecessary for unsupported@... properties because they already 837 * use guids. 838 */ 839 if (supported) { 840 int ret; 841 spa_feature_t fid; 842 843 ret = zfeature_lookup_name(feature, &fid); 844 if (ret != 0) { 845 (void) strlcpy(buf, "-", len); 846 return (ENOTSUP); 847 } 848 feature = spa_feature_table[fid].fi_guid; 849 } 850 851 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 852 found = B_TRUE; 853 854 if (supported) { 855 if (!found) { 856 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 857 } else { 858 if (refcount == 0) 859 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 860 else 861 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 862 } 863 } else { 864 if (found) { 865 if (refcount == 0) { 866 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 867 } else { 868 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 869 } 870 } else { 871 (void) strlcpy(buf, "-", len); 872 return (ENOTSUP); 873 } 874 } 875 876 return (0); 877} 878 879/* 880 * Don't start the slice at the default block of 34; many storage 881 * devices will use a stripe width of 128k, so start there instead. 882 */ 883#define NEW_START_BLOCK 256 884 885/* 886 * Validate the given pool name, optionally putting an extended error message in 887 * 'buf'. 888 */ 889boolean_t 890zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 891{ 892 namecheck_err_t why; 893 char what; 894 int ret; 895 896 ret = pool_namecheck(pool, &why, &what); 897 898 /* 899 * The rules for reserved pool names were extended at a later point. 900 * But we need to support users with existing pools that may now be 901 * invalid. So we only check for this expanded set of names during a 902 * create (or import), and only in userland. 903 */ 904 if (ret == 0 && !isopen && 905 (strncmp(pool, "mirror", 6) == 0 || 906 strncmp(pool, "raidz", 5) == 0 || 907 strncmp(pool, "spare", 5) == 0 || 908 strcmp(pool, "log") == 0)) { 909 if (hdl != NULL) 910 zfs_error_aux(hdl, 911 dgettext(TEXT_DOMAIN, "name is reserved")); 912 return (B_FALSE); 913 } 914 915 916 if (ret != 0) { 917 if (hdl != NULL) { 918 switch (why) { 919 case NAME_ERR_TOOLONG: 920 zfs_error_aux(hdl, 921 dgettext(TEXT_DOMAIN, "name is too long")); 922 break; 923 924 case NAME_ERR_INVALCHAR: 925 zfs_error_aux(hdl, 926 dgettext(TEXT_DOMAIN, "invalid character " 927 "'%c' in pool name"), what); 928 break; 929 930 case NAME_ERR_NOLETTER: 931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 932 "name must begin with a letter")); 933 break; 934 935 case NAME_ERR_RESERVED: 936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 937 "name is reserved")); 938 break; 939 940 case NAME_ERR_DISKLIKE: 941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 942 "pool name is reserved")); 943 break; 944 945 case NAME_ERR_LEADING_SLASH: 946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 947 "leading slash in name")); 948 break; 949 950 case NAME_ERR_EMPTY_COMPONENT: 951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 952 "empty component in name")); 953 break; 954 955 case NAME_ERR_TRAILING_SLASH: 956 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 957 "trailing slash in name")); 958 break; 959 960 case NAME_ERR_MULTIPLE_AT: 961 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 962 "multiple '@' delimiters in name")); 963 break; 964 965 } 966 } 967 return (B_FALSE); 968 } 969 970 return (B_TRUE); 971} 972 973/* 974 * Open a handle to the given pool, even if the pool is currently in the FAULTED 975 * state. 976 */ 977zpool_handle_t * 978zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 979{ 980 zpool_handle_t *zhp; 981 boolean_t missing; 982 983 /* 984 * Make sure the pool name is valid. 985 */ 986 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 987 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 988 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 989 pool); 990 return (NULL); 991 } 992 993 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 994 return (NULL); 995 996 zhp->zpool_hdl = hdl; 997 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 998 999 if (zpool_refresh_stats(zhp, &missing) != 0) { 1000 zpool_close(zhp); 1001 return (NULL); 1002 } 1003 1004 if (missing) { 1005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1006 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1007 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1008 zpool_close(zhp); 1009 return (NULL); 1010 } 1011 1012 return (zhp); 1013} 1014 1015/* 1016 * Like the above, but silent on error. Used when iterating over pools (because 1017 * the configuration cache may be out of date). 1018 */ 1019int 1020zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1021{ 1022 zpool_handle_t *zhp; 1023 boolean_t missing; 1024 1025 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1026 return (-1); 1027 1028 zhp->zpool_hdl = hdl; 1029 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1030 1031 if (zpool_refresh_stats(zhp, &missing) != 0) { 1032 zpool_close(zhp); 1033 return (-1); 1034 } 1035 1036 if (missing) { 1037 zpool_close(zhp); 1038 *ret = NULL; 1039 return (0); 1040 } 1041 1042 *ret = zhp; 1043 return (0); 1044} 1045 1046/* 1047 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1048 * state. 1049 */ 1050zpool_handle_t * 1051zpool_open(libzfs_handle_t *hdl, const char *pool) 1052{ 1053 zpool_handle_t *zhp; 1054 1055 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1056 return (NULL); 1057 1058 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1059 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1060 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1061 zpool_close(zhp); 1062 return (NULL); 1063 } 1064 1065 return (zhp); 1066} 1067 1068/* 1069 * Close the handle. Simply frees the memory associated with the handle. 1070 */ 1071void 1072zpool_close(zpool_handle_t *zhp) 1073{ 1074 if (zhp->zpool_config) 1075 nvlist_free(zhp->zpool_config); 1076 if (zhp->zpool_old_config) 1077 nvlist_free(zhp->zpool_old_config); 1078 if (zhp->zpool_props) 1079 nvlist_free(zhp->zpool_props); 1080 free(zhp); 1081} 1082 1083/* 1084 * Return the name of the pool. 1085 */ 1086const char * 1087zpool_get_name(zpool_handle_t *zhp) 1088{ 1089 return (zhp->zpool_name); 1090} 1091 1092 1093/* 1094 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1095 */ 1096int 1097zpool_get_state(zpool_handle_t *zhp) 1098{ 1099 return (zhp->zpool_state); 1100} 1101 1102/* 1103 * Create the named pool, using the provided vdev list. It is assumed 1104 * that the consumer has already validated the contents of the nvlist, so we 1105 * don't have to worry about error semantics. 1106 */ 1107int 1108zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1109 nvlist_t *props, nvlist_t *fsprops) 1110{ 1111 zfs_cmd_t zc = { 0 }; 1112 nvlist_t *zc_fsprops = NULL; 1113 nvlist_t *zc_props = NULL; 1114 char msg[1024]; 1115 int ret = -1; 1116 1117 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1118 "cannot create '%s'"), pool); 1119 1120 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1121 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1122 1123 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1124 return (-1); 1125 1126 if (props) { 1127 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1128 1129 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1130 SPA_VERSION_1, flags, msg)) == NULL) { 1131 goto create_failed; 1132 } 1133 } 1134 1135 if (fsprops) { 1136 uint64_t zoned; 1137 char *zonestr; 1138 1139 zoned = ((nvlist_lookup_string(fsprops, 1140 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1141 strcmp(zonestr, "on") == 0); 1142 1143 if ((zc_fsprops = zfs_valid_proplist(hdl, 1144 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1145 goto create_failed; 1146 } 1147 if (!zc_props && 1148 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1149 goto create_failed; 1150 } 1151 if (nvlist_add_nvlist(zc_props, 1152 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1153 goto create_failed; 1154 } 1155 } 1156 1157 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1158 goto create_failed; 1159 1160 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1161 1162 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1163 1164 zcmd_free_nvlists(&zc); 1165 nvlist_free(zc_props); 1166 nvlist_free(zc_fsprops); 1167 1168 switch (errno) { 1169 case EBUSY: 1170 /* 1171 * This can happen if the user has specified the same 1172 * device multiple times. We can't reliably detect this 1173 * until we try to add it and see we already have a 1174 * label. 1175 */ 1176 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1177 "one or more vdevs refer to the same device")); 1178 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1179 1180 case EOVERFLOW: 1181 /* 1182 * This occurs when one of the devices is below 1183 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1184 * device was the problem device since there's no 1185 * reliable way to determine device size from userland. 1186 */ 1187 { 1188 char buf[64]; 1189 1190 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1191 1192 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1193 "one or more devices is less than the " 1194 "minimum size (%s)"), buf); 1195 } 1196 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1197 1198 case ENOSPC: 1199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1200 "one or more devices is out of space")); 1201 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1202 1203 case ENOTBLK: 1204 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1205 "cache device must be a disk or disk slice")); 1206 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1207 1208 default: 1209 return (zpool_standard_error(hdl, errno, msg)); 1210 } 1211 } 1212 1213create_failed: 1214 zcmd_free_nvlists(&zc); 1215 nvlist_free(zc_props); 1216 nvlist_free(zc_fsprops); 1217 return (ret); 1218} 1219 1220/* 1221 * Destroy the given pool. It is up to the caller to ensure that there are no 1222 * datasets left in the pool. 1223 */ 1224int 1225zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1226{ 1227 zfs_cmd_t zc = { 0 }; 1228 zfs_handle_t *zfp = NULL; 1229 libzfs_handle_t *hdl = zhp->zpool_hdl; 1230 char msg[1024]; 1231 1232 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1233 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1234 return (-1); 1235 1236 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1237 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1238 1239 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1240 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1241 "cannot destroy '%s'"), zhp->zpool_name); 1242 1243 if (errno == EROFS) { 1244 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1245 "one or more devices is read only")); 1246 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1247 } else { 1248 (void) zpool_standard_error(hdl, errno, msg); 1249 } 1250 1251 if (zfp) 1252 zfs_close(zfp); 1253 return (-1); 1254 } 1255 1256 if (zfp) { 1257 remove_mountpoint(zfp); 1258 zfs_close(zfp); 1259 } 1260 1261 return (0); 1262} 1263 1264/* 1265 * Add the given vdevs to the pool. The caller must have already performed the 1266 * necessary verification to ensure that the vdev specification is well-formed. 1267 */ 1268int 1269zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1270{ 1271 zfs_cmd_t zc = { 0 }; 1272 int ret; 1273 libzfs_handle_t *hdl = zhp->zpool_hdl; 1274 char msg[1024]; 1275 nvlist_t **spares, **l2cache; 1276 uint_t nspares, nl2cache; 1277 1278 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1279 "cannot add to '%s'"), zhp->zpool_name); 1280 1281 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1282 SPA_VERSION_SPARES && 1283 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1284 &spares, &nspares) == 0) { 1285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1286 "upgraded to add hot spares")); 1287 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1288 } 1289 1290 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1291 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1292 uint64_t s; 1293 1294 for (s = 0; s < nspares; s++) { 1295 char *path; 1296 1297 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1298 &path) == 0 && pool_uses_efi(spares[s])) { 1299 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1300 "device '%s' contains an EFI label and " 1301 "cannot be used on root pools."), 1302 zpool_vdev_name(hdl, NULL, spares[s], 1303 B_FALSE)); 1304 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1305 } 1306 } 1307 } 1308 1309 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1310 SPA_VERSION_L2CACHE && 1311 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1312 &l2cache, &nl2cache) == 0) { 1313 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1314 "upgraded to add cache devices")); 1315 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1316 } 1317 1318 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1319 return (-1); 1320 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1321 1322 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1323 switch (errno) { 1324 case EBUSY: 1325 /* 1326 * This can happen if the user has specified the same 1327 * device multiple times. We can't reliably detect this 1328 * until we try to add it and see we already have a 1329 * label. 1330 */ 1331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1332 "one or more vdevs refer to the same device")); 1333 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1334 break; 1335 1336 case EOVERFLOW: 1337 /* 1338 * This occurrs when one of the devices is below 1339 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1340 * device was the problem device since there's no 1341 * reliable way to determine device size from userland. 1342 */ 1343 { 1344 char buf[64]; 1345 1346 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1347 1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1349 "device is less than the minimum " 1350 "size (%s)"), buf); 1351 } 1352 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1353 break; 1354 1355 case ENOTSUP: 1356 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1357 "pool must be upgraded to add these vdevs")); 1358 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1359 break; 1360 1361 case EDOM: 1362 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1363 "root pool can not have multiple vdevs" 1364 " or separate logs")); 1365 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1366 break; 1367 1368 case ENOTBLK: 1369 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1370 "cache device must be a disk or disk slice")); 1371 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1372 break; 1373 1374 default: 1375 (void) zpool_standard_error(hdl, errno, msg); 1376 } 1377 1378 ret = -1; 1379 } else { 1380 ret = 0; 1381 } 1382 1383 zcmd_free_nvlists(&zc); 1384 1385 return (ret); 1386} 1387 1388/* 1389 * Exports the pool from the system. The caller must ensure that there are no 1390 * mounted datasets in the pool. 1391 */ 1392static int 1393zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1394 const char *log_str) 1395{ 1396 zfs_cmd_t zc = { 0 }; 1397 char msg[1024]; 1398 1399 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1400 "cannot export '%s'"), zhp->zpool_name); 1401 1402 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1403 zc.zc_cookie = force; 1404 zc.zc_guid = hardforce; 1405 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1406 1407 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1408 switch (errno) { 1409 case EXDEV: 1410 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1411 "use '-f' to override the following errors:\n" 1412 "'%s' has an active shared spare which could be" 1413 " used by other pools once '%s' is exported."), 1414 zhp->zpool_name, zhp->zpool_name); 1415 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1416 msg)); 1417 default: 1418 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1419 msg)); 1420 } 1421 } 1422 1423 return (0); 1424} 1425 1426int 1427zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1428{ 1429 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1430} 1431 1432int 1433zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1434{ 1435 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1436} 1437 1438static void 1439zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1440 nvlist_t *config) 1441{ 1442 nvlist_t *nv = NULL; 1443 uint64_t rewindto; 1444 int64_t loss = -1; 1445 struct tm t; 1446 char timestr[128]; 1447 1448 if (!hdl->libzfs_printerr || config == NULL) 1449 return; 1450 1451 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1452 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1453 return; 1454 } 1455 1456 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1457 return; 1458 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1459 1460 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1461 strftime(timestr, 128, 0, &t) != 0) { 1462 if (dryrun) { 1463 (void) printf(dgettext(TEXT_DOMAIN, 1464 "Would be able to return %s " 1465 "to its state as of %s.\n"), 1466 name, timestr); 1467 } else { 1468 (void) printf(dgettext(TEXT_DOMAIN, 1469 "Pool %s returned to its state as of %s.\n"), 1470 name, timestr); 1471 } 1472 if (loss > 120) { 1473 (void) printf(dgettext(TEXT_DOMAIN, 1474 "%s approximately %lld "), 1475 dryrun ? "Would discard" : "Discarded", 1476 (loss + 30) / 60); 1477 (void) printf(dgettext(TEXT_DOMAIN, 1478 "minutes of transactions.\n")); 1479 } else if (loss > 0) { 1480 (void) printf(dgettext(TEXT_DOMAIN, 1481 "%s approximately %lld "), 1482 dryrun ? "Would discard" : "Discarded", loss); 1483 (void) printf(dgettext(TEXT_DOMAIN, 1484 "seconds of transactions.\n")); 1485 } 1486 } 1487} 1488 1489void 1490zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1491 nvlist_t *config) 1492{ 1493 nvlist_t *nv = NULL; 1494 int64_t loss = -1; 1495 uint64_t edata = UINT64_MAX; 1496 uint64_t rewindto; 1497 struct tm t; 1498 char timestr[128]; 1499 1500 if (!hdl->libzfs_printerr) 1501 return; 1502 1503 if (reason >= 0) 1504 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1505 else 1506 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1507 1508 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1509 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1510 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1511 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1512 goto no_info; 1513 1514 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1515 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1516 &edata); 1517 1518 (void) printf(dgettext(TEXT_DOMAIN, 1519 "Recovery is possible, but will result in some data loss.\n")); 1520 1521 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1522 strftime(timestr, 128, 0, &t) != 0) { 1523 (void) printf(dgettext(TEXT_DOMAIN, 1524 "\tReturning the pool to its state as of %s\n" 1525 "\tshould correct the problem. "), 1526 timestr); 1527 } else { 1528 (void) printf(dgettext(TEXT_DOMAIN, 1529 "\tReverting the pool to an earlier state " 1530 "should correct the problem.\n\t")); 1531 } 1532 1533 if (loss > 120) { 1534 (void) printf(dgettext(TEXT_DOMAIN, 1535 "Approximately %lld minutes of data\n" 1536 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1537 } else if (loss > 0) { 1538 (void) printf(dgettext(TEXT_DOMAIN, 1539 "Approximately %lld seconds of data\n" 1540 "\tmust be discarded, irreversibly. "), loss); 1541 } 1542 if (edata != 0 && edata != UINT64_MAX) { 1543 if (edata == 1) { 1544 (void) printf(dgettext(TEXT_DOMAIN, 1545 "After rewind, at least\n" 1546 "\tone persistent user-data error will remain. ")); 1547 } else { 1548 (void) printf(dgettext(TEXT_DOMAIN, 1549 "After rewind, several\n" 1550 "\tpersistent user-data errors will remain. ")); 1551 } 1552 } 1553 (void) printf(dgettext(TEXT_DOMAIN, 1554 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1555 reason >= 0 ? "clear" : "import", name); 1556 1557 (void) printf(dgettext(TEXT_DOMAIN, 1558 "A scrub of the pool\n" 1559 "\tis strongly recommended after recovery.\n")); 1560 return; 1561 1562no_info: 1563 (void) printf(dgettext(TEXT_DOMAIN, 1564 "Destroy and re-create the pool from\n\ta backup source.\n")); 1565} 1566 1567/* 1568 * zpool_import() is a contracted interface. Should be kept the same 1569 * if possible. 1570 * 1571 * Applications should use zpool_import_props() to import a pool with 1572 * new properties value to be set. 1573 */ 1574int 1575zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1576 char *altroot) 1577{ 1578 nvlist_t *props = NULL; 1579 int ret; 1580 1581 if (altroot != NULL) { 1582 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1583 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1584 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1585 newname)); 1586 } 1587 1588 if (nvlist_add_string(props, 1589 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1590 nvlist_add_string(props, 1591 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1592 nvlist_free(props); 1593 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1594 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1595 newname)); 1596 } 1597 } 1598 1599 ret = zpool_import_props(hdl, config, newname, props, 1600 ZFS_IMPORT_NORMAL); 1601 if (props) 1602 nvlist_free(props); 1603 return (ret); 1604} 1605 1606static void 1607print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1608 int indent) 1609{ 1610 nvlist_t **child; 1611 uint_t c, children; 1612 char *vname; 1613 uint64_t is_log = 0; 1614 1615 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1616 &is_log); 1617 1618 if (name != NULL) 1619 (void) printf("\t%*s%s%s\n", indent, "", name, 1620 is_log ? " [log]" : ""); 1621 1622 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1623 &child, &children) != 0) 1624 return; 1625 1626 for (c = 0; c < children; c++) { 1627 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1628 print_vdev_tree(hdl, vname, child[c], indent + 2); 1629 free(vname); 1630 } 1631} 1632 1633void 1634zpool_print_unsup_feat(nvlist_t *config) 1635{ 1636 nvlist_t *nvinfo, *unsup_feat; 1637 1638 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1639 0); 1640 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1641 &unsup_feat) == 0); 1642 1643 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1644 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1645 char *desc; 1646 1647 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1648 verify(nvpair_value_string(nvp, &desc) == 0); 1649 1650 if (strlen(desc) > 0) 1651 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1652 else 1653 (void) printf("\t%s\n", nvpair_name(nvp)); 1654 } 1655} 1656 1657/* 1658 * Import the given pool using the known configuration and a list of 1659 * properties to be set. The configuration should have come from 1660 * zpool_find_import(). The 'newname' parameters control whether the pool 1661 * is imported with a different name. 1662 */ 1663int 1664zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1665 nvlist_t *props, int flags) 1666{ 1667 zfs_cmd_t zc = { 0 }; 1668 zpool_rewind_policy_t policy; 1669 nvlist_t *nv = NULL; 1670 nvlist_t *nvinfo = NULL; 1671 nvlist_t *missing = NULL; 1672 char *thename; 1673 char *origname; 1674 int ret; 1675 int error = 0; 1676 char errbuf[1024]; 1677 1678 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1679 &origname) == 0); 1680 1681 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1682 "cannot import pool '%s'"), origname); 1683 1684 if (newname != NULL) { 1685 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1686 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1687 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1688 newname)); 1689 thename = (char *)newname; 1690 } else { 1691 thename = origname; 1692 } 1693 1694 if (props) { 1695 uint64_t version; 1696 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1697 1698 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1699 &version) == 0); 1700 1701 if ((props = zpool_valid_proplist(hdl, origname, 1702 props, version, flags, errbuf)) == NULL) { 1703 return (-1); 1704 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1705 nvlist_free(props); 1706 return (-1); 1707 } 1708 } 1709 1710 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1711 1712 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1713 &zc.zc_guid) == 0); 1714 1715 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1716 nvlist_free(props); 1717 return (-1); 1718 } 1719 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1720 nvlist_free(props); 1721 return (-1); 1722 } 1723 1724 zc.zc_cookie = flags; 1725 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1726 errno == ENOMEM) { 1727 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1728 zcmd_free_nvlists(&zc); 1729 return (-1); 1730 } 1731 } 1732 if (ret != 0) 1733 error = errno; 1734 1735 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1736 zpool_get_rewind_policy(config, &policy); 1737 1738 if (error) { 1739 char desc[1024]; 1740 1741 /* 1742 * Dry-run failed, but we print out what success 1743 * looks like if we found a best txg 1744 */ 1745 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1746 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1747 B_TRUE, nv); 1748 nvlist_free(nv); 1749 return (-1); 1750 } 1751 1752 if (newname == NULL) 1753 (void) snprintf(desc, sizeof (desc), 1754 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1755 thename); 1756 else 1757 (void) snprintf(desc, sizeof (desc), 1758 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1759 origname, thename); 1760 1761 switch (error) { 1762 case ENOTSUP: 1763 if (nv != NULL && nvlist_lookup_nvlist(nv, 1764 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1765 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1766 (void) printf(dgettext(TEXT_DOMAIN, "This " 1767 "pool uses the following feature(s) not " 1768 "supported by this system:\n")); 1769 zpool_print_unsup_feat(nv); 1770 if (nvlist_exists(nvinfo, 1771 ZPOOL_CONFIG_CAN_RDONLY)) { 1772 (void) printf(dgettext(TEXT_DOMAIN, 1773 "All unsupported features are only " 1774 "required for writing to the pool." 1775 "\nThe pool can be imported using " 1776 "'-o readonly=on'.\n")); 1777 } 1778 } 1779 /* 1780 * Unsupported version. 1781 */ 1782 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1783 break; 1784 1785 case EINVAL: 1786 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1787 break; 1788 1789 case EROFS: 1790 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1791 "one or more devices is read only")); 1792 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1793 break; 1794 1795 case ENXIO: 1796 if (nv && nvlist_lookup_nvlist(nv, 1797 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1798 nvlist_lookup_nvlist(nvinfo, 1799 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1800 (void) printf(dgettext(TEXT_DOMAIN, 1801 "The devices below are missing, use " 1802 "'-m' to import the pool anyway:\n")); 1803 print_vdev_tree(hdl, NULL, missing, 2); 1804 (void) printf("\n"); 1805 } 1806 (void) zpool_standard_error(hdl, error, desc); 1807 break; 1808 1809 case EEXIST: 1810 (void) zpool_standard_error(hdl, error, desc); 1811 break; 1812 1813 default: 1814 (void) zpool_standard_error(hdl, error, desc); 1815 zpool_explain_recover(hdl, 1816 newname ? origname : thename, -error, nv); 1817 break; 1818 } 1819 1820 nvlist_free(nv); 1821 ret = -1; 1822 } else { 1823 zpool_handle_t *zhp; 1824 1825 /* 1826 * This should never fail, but play it safe anyway. 1827 */ 1828 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1829 ret = -1; 1830 else if (zhp != NULL) 1831 zpool_close(zhp); 1832 if (policy.zrp_request & 1833 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1834 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1835 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1836 } 1837 nvlist_free(nv); 1838 return (0); 1839 } 1840 1841 zcmd_free_nvlists(&zc); 1842 nvlist_free(props); 1843 1844 return (ret); 1845} 1846 1847/* 1848 * Scan the pool. 1849 */ 1850int 1851zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1852{ 1853 zfs_cmd_t zc = { 0 }; 1854 char msg[1024]; 1855 libzfs_handle_t *hdl = zhp->zpool_hdl; 1856 1857 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1858 zc.zc_cookie = func; 1859 1860 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1861 (errno == ENOENT && func != POOL_SCAN_NONE)) 1862 return (0); 1863 1864 if (func == POOL_SCAN_SCRUB) { 1865 (void) snprintf(msg, sizeof (msg), 1866 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1867 } else if (func == POOL_SCAN_NONE) { 1868 (void) snprintf(msg, sizeof (msg), 1869 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1870 zc.zc_name); 1871 } else { 1872 assert(!"unexpected result"); 1873 } 1874 1875 if (errno == EBUSY) { 1876 nvlist_t *nvroot; 1877 pool_scan_stat_t *ps = NULL; 1878 uint_t psc; 1879 1880 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1881 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1882 (void) nvlist_lookup_uint64_array(nvroot, 1883 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1884 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1885 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1886 else 1887 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1888 } else if (errno == ENOENT) { 1889 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1890 } else { 1891 return (zpool_standard_error(hdl, errno, msg)); 1892 } 1893} 1894 1895/* 1896 * This provides a very minimal check whether a given string is likely a 1897 * c#t#d# style string. Users of this are expected to do their own 1898 * verification of the s# part. 1899 */ 1900#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1901 1902/* 1903 * More elaborate version for ones which may start with "/dev/dsk/" 1904 * and the like. 1905 */ 1906static int 1907ctd_check_path(char *str) { 1908 /* 1909 * If it starts with a slash, check the last component. 1910 */ 1911 if (str && str[0] == '/') { 1912 char *tmp = strrchr(str, '/'); 1913 1914 /* 1915 * If it ends in "/old", check the second-to-last 1916 * component of the string instead. 1917 */ 1918 if (tmp != str && strcmp(tmp, "/old") == 0) { 1919 for (tmp--; *tmp != '/'; tmp--) 1920 ; 1921 } 1922 str = tmp + 1; 1923 } 1924 return (CTD_CHECK(str)); 1925} 1926 1927/* 1928 * Find a vdev that matches the search criteria specified. We use the 1929 * the nvpair name to determine how we should look for the device. 1930 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1931 * spare; but FALSE if its an INUSE spare. 1932 */ 1933static nvlist_t * 1934vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1935 boolean_t *l2cache, boolean_t *log) 1936{ 1937 uint_t c, children; 1938 nvlist_t **child; 1939 nvlist_t *ret; 1940 uint64_t is_log; 1941 char *srchkey; 1942 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1943 1944 /* Nothing to look for */ 1945 if (search == NULL || pair == NULL) 1946 return (NULL); 1947 1948 /* Obtain the key we will use to search */ 1949 srchkey = nvpair_name(pair); 1950 1951 switch (nvpair_type(pair)) { 1952 case DATA_TYPE_UINT64: 1953 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1954 uint64_t srchval, theguid; 1955 1956 verify(nvpair_value_uint64(pair, &srchval) == 0); 1957 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1958 &theguid) == 0); 1959 if (theguid == srchval) 1960 return (nv); 1961 } 1962 break; 1963 1964 case DATA_TYPE_STRING: { 1965 char *srchval, *val; 1966 1967 verify(nvpair_value_string(pair, &srchval) == 0); 1968 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1969 break; 1970 1971 /* 1972 * Search for the requested value. Special cases: 1973 * 1974 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1975 * "s0" or "s0/old". The "s0" part is hidden from the user, 1976 * but included in the string, so this matches around it. 1977 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1978 * 1979 * Otherwise, all other searches are simple string compares. 1980 */ 1981 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1982 ctd_check_path(val)) { 1983 uint64_t wholedisk = 0; 1984 1985 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1986 &wholedisk); 1987 if (wholedisk) { 1988 int slen = strlen(srchval); 1989 int vlen = strlen(val); 1990 1991 if (slen != vlen - 2) 1992 break; 1993 1994 /* 1995 * make_leaf_vdev() should only set 1996 * wholedisk for ZPOOL_CONFIG_PATHs which 1997 * will include "/dev/dsk/", giving plenty of 1998 * room for the indices used next. 1999 */ 2000 ASSERT(vlen >= 6); 2001 2002 /* 2003 * strings identical except trailing "s0" 2004 */ 2005 if (strcmp(&val[vlen - 2], "s0") == 0 && 2006 strncmp(srchval, val, slen) == 0) 2007 return (nv); 2008 2009 /* 2010 * strings identical except trailing "s0/old" 2011 */ 2012 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2013 strcmp(&srchval[slen - 4], "/old") == 0 && 2014 strncmp(srchval, val, slen - 4) == 0) 2015 return (nv); 2016 2017 break; 2018 } 2019 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2020 char *type, *idx, *end, *p; 2021 uint64_t id, vdev_id; 2022 2023 /* 2024 * Determine our vdev type, keeping in mind 2025 * that the srchval is composed of a type and 2026 * vdev id pair (i.e. mirror-4). 2027 */ 2028 if ((type = strdup(srchval)) == NULL) 2029 return (NULL); 2030 2031 if ((p = strrchr(type, '-')) == NULL) { 2032 free(type); 2033 break; 2034 } 2035 idx = p + 1; 2036 *p = '\0'; 2037 2038 /* 2039 * If the types don't match then keep looking. 2040 */ 2041 if (strncmp(val, type, strlen(val)) != 0) { 2042 free(type); 2043 break; 2044 } 2045 2046 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2047 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2048 strncmp(type, VDEV_TYPE_MIRROR, 2049 strlen(VDEV_TYPE_MIRROR)) == 0); 2050 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2051 &id) == 0); 2052 2053 errno = 0; 2054 vdev_id = strtoull(idx, &end, 10); 2055 2056 free(type); 2057 if (errno != 0) 2058 return (NULL); 2059 2060 /* 2061 * Now verify that we have the correct vdev id. 2062 */ 2063 if (vdev_id == id) 2064 return (nv); 2065 } 2066 2067 /* 2068 * Common case 2069 */ 2070 if (strcmp(srchval, val) == 0) 2071 return (nv); 2072 break; 2073 } 2074 2075 default: 2076 break; 2077 } 2078 2079 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2080 &child, &children) != 0) 2081 return (NULL); 2082 2083 for (c = 0; c < children; c++) { 2084 if ((ret = vdev_to_nvlist_iter(child[c], search, 2085 avail_spare, l2cache, NULL)) != NULL) { 2086 /* 2087 * The 'is_log' value is only set for the toplevel 2088 * vdev, not the leaf vdevs. So we always lookup the 2089 * log device from the root of the vdev tree (where 2090 * 'log' is non-NULL). 2091 */ 2092 if (log != NULL && 2093 nvlist_lookup_uint64(child[c], 2094 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2095 is_log) { 2096 *log = B_TRUE; 2097 } 2098 return (ret); 2099 } 2100 } 2101 2102 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2103 &child, &children) == 0) { 2104 for (c = 0; c < children; c++) { 2105 if ((ret = vdev_to_nvlist_iter(child[c], search, 2106 avail_spare, l2cache, NULL)) != NULL) { 2107 *avail_spare = B_TRUE; 2108 return (ret); 2109 } 2110 } 2111 } 2112 2113 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2114 &child, &children) == 0) { 2115 for (c = 0; c < children; c++) { 2116 if ((ret = vdev_to_nvlist_iter(child[c], search, 2117 avail_spare, l2cache, NULL)) != NULL) { 2118 *l2cache = B_TRUE; 2119 return (ret); 2120 } 2121 } 2122 } 2123 2124 return (NULL); 2125} 2126 2127/* 2128 * Given a physical path (minus the "/devices" prefix), find the 2129 * associated vdev. 2130 */ 2131nvlist_t * 2132zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2133 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2134{ 2135 nvlist_t *search, *nvroot, *ret; 2136 2137 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2138 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2139 2140 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2141 &nvroot) == 0); 2142 2143 *avail_spare = B_FALSE; 2144 *l2cache = B_FALSE; 2145 if (log != NULL) 2146 *log = B_FALSE; 2147 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2148 nvlist_free(search); 2149 2150 return (ret); 2151} 2152 2153/* 2154 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2155 */ 2156boolean_t 2157zpool_vdev_is_interior(const char *name) 2158{ 2159 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2160 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2161 return (B_TRUE); 2162 return (B_FALSE); 2163} 2164 2165nvlist_t * 2166zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2167 boolean_t *l2cache, boolean_t *log) 2168{ 2169 char buf[MAXPATHLEN]; 2170 char *end; 2171 nvlist_t *nvroot, *search, *ret; 2172 uint64_t guid; 2173 2174 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2175 2176 guid = strtoull(path, &end, 10); 2177 if (guid != 0 && *end == '\0') { 2178 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2179 } else if (zpool_vdev_is_interior(path)) { 2180 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2181 } else if (path[0] != '/') { 2182 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2183 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2184 } else { 2185 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2186 } 2187 2188 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2189 &nvroot) == 0); 2190 2191 *avail_spare = B_FALSE; 2192 *l2cache = B_FALSE; 2193 if (log != NULL) 2194 *log = B_FALSE; 2195 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2196 nvlist_free(search); 2197 2198 return (ret); 2199} 2200 2201static int 2202vdev_online(nvlist_t *nv) 2203{ 2204 uint64_t ival; 2205 2206 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2207 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2208 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2209 return (0); 2210 2211 return (1); 2212} 2213 2214/* 2215 * Helper function for zpool_get_physpaths(). 2216 */ 2217static int 2218vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2219 size_t *bytes_written) 2220{ 2221 size_t bytes_left, pos, rsz; 2222 char *tmppath; 2223 const char *format; 2224 2225 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2226 &tmppath) != 0) 2227 return (EZFS_NODEVICE); 2228 2229 pos = *bytes_written; 2230 bytes_left = physpath_size - pos; 2231 format = (pos == 0) ? "%s" : " %s"; 2232 2233 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2234 *bytes_written += rsz; 2235 2236 if (rsz >= bytes_left) { 2237 /* if physpath was not copied properly, clear it */ 2238 if (bytes_left != 0) { 2239 physpath[pos] = 0; 2240 } 2241 return (EZFS_NOSPC); 2242 } 2243 return (0); 2244} 2245 2246static int 2247vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2248 size_t *rsz, boolean_t is_spare) 2249{ 2250 char *type; 2251 int ret; 2252 2253 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2254 return (EZFS_INVALCONFIG); 2255 2256 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2257 /* 2258 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2259 * For a spare vdev, we only want to boot from the active 2260 * spare device. 2261 */ 2262 if (is_spare) { 2263 uint64_t spare = 0; 2264 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2265 &spare); 2266 if (!spare) 2267 return (EZFS_INVALCONFIG); 2268 } 2269 2270 if (vdev_online(nv)) { 2271 if ((ret = vdev_get_one_physpath(nv, physpath, 2272 phypath_size, rsz)) != 0) 2273 return (ret); 2274 } 2275 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2276 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2277 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2278 nvlist_t **child; 2279 uint_t count; 2280 int i, ret; 2281 2282 if (nvlist_lookup_nvlist_array(nv, 2283 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2284 return (EZFS_INVALCONFIG); 2285 2286 for (i = 0; i < count; i++) { 2287 ret = vdev_get_physpaths(child[i], physpath, 2288 phypath_size, rsz, is_spare); 2289 if (ret == EZFS_NOSPC) 2290 return (ret); 2291 } 2292 } 2293 2294 return (EZFS_POOL_INVALARG); 2295} 2296 2297/* 2298 * Get phys_path for a root pool config. 2299 * Return 0 on success; non-zero on failure. 2300 */ 2301static int 2302zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2303{ 2304 size_t rsz; 2305 nvlist_t *vdev_root; 2306 nvlist_t **child; 2307 uint_t count; 2308 char *type; 2309 2310 rsz = 0; 2311 2312 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2313 &vdev_root) != 0) 2314 return (EZFS_INVALCONFIG); 2315 2316 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2317 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2318 &child, &count) != 0) 2319 return (EZFS_INVALCONFIG); 2320 2321 /* 2322 * root pool can not have EFI labeled disks and can only have 2323 * a single top-level vdev. 2324 */ 2325 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2326 pool_uses_efi(vdev_root)) 2327 return (EZFS_POOL_INVALARG); 2328 2329 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2330 B_FALSE); 2331 2332 /* No online devices */ 2333 if (rsz == 0) 2334 return (EZFS_NODEVICE); 2335 2336 return (0); 2337} 2338 2339/* 2340 * Get phys_path for a root pool 2341 * Return 0 on success; non-zero on failure. 2342 */ 2343int 2344zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2345{ 2346 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2347 phypath_size)); 2348} 2349 2350/* 2351 * If the device has being dynamically expanded then we need to relabel 2352 * the disk to use the new unallocated space. 2353 */ 2354static int 2355zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2356{ 2357#ifdef sun 2358 char path[MAXPATHLEN]; 2359 char errbuf[1024]; 2360 int fd, error; 2361 int (*_efi_use_whole_disk)(int); 2362 2363 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2364 "efi_use_whole_disk")) == NULL) 2365 return (-1); 2366 2367 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2368 2369 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2370 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2371 "relabel '%s': unable to open device"), name); 2372 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2373 } 2374 2375 /* 2376 * It's possible that we might encounter an error if the device 2377 * does not have any unallocated space left. If so, we simply 2378 * ignore that error and continue on. 2379 */ 2380 error = _efi_use_whole_disk(fd); 2381 (void) close(fd); 2382 if (error && error != VT_ENOSPC) { 2383 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2384 "relabel '%s': unable to read disk capacity"), name); 2385 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2386 } 2387#endif /* sun */ 2388 return (0); 2389} 2390 2391/* 2392 * Bring the specified vdev online. The 'flags' parameter is a set of the 2393 * ZFS_ONLINE_* flags. 2394 */ 2395int 2396zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2397 vdev_state_t *newstate) 2398{ 2399 zfs_cmd_t zc = { 0 }; 2400 char msg[1024]; 2401 nvlist_t *tgt; 2402 boolean_t avail_spare, l2cache, islog; 2403 libzfs_handle_t *hdl = zhp->zpool_hdl; 2404 2405 if (flags & ZFS_ONLINE_EXPAND) { 2406 (void) snprintf(msg, sizeof (msg), 2407 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2408 } else { 2409 (void) snprintf(msg, sizeof (msg), 2410 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2411 } 2412 2413 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2414 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2415 &islog)) == NULL) 2416 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2417 2418 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2419 2420 if (avail_spare) 2421 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2422 2423 if (flags & ZFS_ONLINE_EXPAND || 2424 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2425 char *pathname = NULL; 2426 uint64_t wholedisk = 0; 2427 2428 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2429 &wholedisk); 2430 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2431 &pathname) == 0); 2432 2433 /* 2434 * XXX - L2ARC 1.0 devices can't support expansion. 2435 */ 2436 if (l2cache) { 2437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2438 "cannot expand cache devices")); 2439 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2440 } 2441 2442 if (wholedisk) { 2443 pathname += strlen(DISK_ROOT) + 1; 2444 (void) zpool_relabel_disk(hdl, pathname); 2445 } 2446 } 2447 2448 zc.zc_cookie = VDEV_STATE_ONLINE; 2449 zc.zc_obj = flags; 2450 2451 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2452 if (errno == EINVAL) { 2453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2454 "from this pool into a new one. Use '%s' " 2455 "instead"), "zpool detach"); 2456 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2457 } 2458 return (zpool_standard_error(hdl, errno, msg)); 2459 } 2460 2461 *newstate = zc.zc_cookie; 2462 return (0); 2463} 2464 2465/* 2466 * Take the specified vdev offline 2467 */ 2468int 2469zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2470{ 2471 zfs_cmd_t zc = { 0 }; 2472 char msg[1024]; 2473 nvlist_t *tgt; 2474 boolean_t avail_spare, l2cache; 2475 libzfs_handle_t *hdl = zhp->zpool_hdl; 2476 2477 (void) snprintf(msg, sizeof (msg), 2478 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2479 2480 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2481 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2482 NULL)) == NULL) 2483 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2484 2485 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2486 2487 if (avail_spare) 2488 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2489 2490 zc.zc_cookie = VDEV_STATE_OFFLINE; 2491 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2492 2493 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2494 return (0); 2495 2496 switch (errno) { 2497 case EBUSY: 2498 2499 /* 2500 * There are no other replicas of this device. 2501 */ 2502 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2503 2504 case EEXIST: 2505 /* 2506 * The log device has unplayed logs 2507 */ 2508 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2509 2510 default: 2511 return (zpool_standard_error(hdl, errno, msg)); 2512 } 2513} 2514 2515/* 2516 * Mark the given vdev faulted. 2517 */ 2518int 2519zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2520{ 2521 zfs_cmd_t zc = { 0 }; 2522 char msg[1024]; 2523 libzfs_handle_t *hdl = zhp->zpool_hdl; 2524 2525 (void) snprintf(msg, sizeof (msg), 2526 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2527 2528 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2529 zc.zc_guid = guid; 2530 zc.zc_cookie = VDEV_STATE_FAULTED; 2531 zc.zc_obj = aux; 2532 2533 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2534 return (0); 2535 2536 switch (errno) { 2537 case EBUSY: 2538 2539 /* 2540 * There are no other replicas of this device. 2541 */ 2542 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2543 2544 default: 2545 return (zpool_standard_error(hdl, errno, msg)); 2546 } 2547 2548} 2549 2550/* 2551 * Mark the given vdev degraded. 2552 */ 2553int 2554zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2555{ 2556 zfs_cmd_t zc = { 0 }; 2557 char msg[1024]; 2558 libzfs_handle_t *hdl = zhp->zpool_hdl; 2559 2560 (void) snprintf(msg, sizeof (msg), 2561 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2562 2563 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2564 zc.zc_guid = guid; 2565 zc.zc_cookie = VDEV_STATE_DEGRADED; 2566 zc.zc_obj = aux; 2567 2568 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2569 return (0); 2570 2571 return (zpool_standard_error(hdl, errno, msg)); 2572} 2573 2574/* 2575 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2576 * a hot spare. 2577 */ 2578static boolean_t 2579is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2580{ 2581 nvlist_t **child; 2582 uint_t c, children; 2583 char *type; 2584 2585 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2586 &children) == 0) { 2587 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2588 &type) == 0); 2589 2590 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2591 children == 2 && child[which] == tgt) 2592 return (B_TRUE); 2593 2594 for (c = 0; c < children; c++) 2595 if (is_replacing_spare(child[c], tgt, which)) 2596 return (B_TRUE); 2597 } 2598 2599 return (B_FALSE); 2600} 2601 2602/* 2603 * Attach new_disk (fully described by nvroot) to old_disk. 2604 * If 'replacing' is specified, the new disk will replace the old one. 2605 */ 2606int 2607zpool_vdev_attach(zpool_handle_t *zhp, 2608 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2609{ 2610 zfs_cmd_t zc = { 0 }; 2611 char msg[1024]; 2612 int ret; 2613 nvlist_t *tgt; 2614 boolean_t avail_spare, l2cache, islog; 2615 uint64_t val; 2616 char *newname; 2617 nvlist_t **child; 2618 uint_t children; 2619 nvlist_t *config_root; 2620 libzfs_handle_t *hdl = zhp->zpool_hdl; 2621 boolean_t rootpool = zpool_is_bootable(zhp); 2622 2623 if (replacing) 2624 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2625 "cannot replace %s with %s"), old_disk, new_disk); 2626 else 2627 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2628 "cannot attach %s to %s"), new_disk, old_disk); 2629 2630 /* 2631 * If this is a root pool, make sure that we're not attaching an 2632 * EFI labeled device. 2633 */ 2634 if (rootpool && pool_uses_efi(nvroot)) { 2635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2636 "EFI labeled devices are not supported on root pools.")); 2637 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2638 } 2639 2640 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2641 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2642 &islog)) == 0) 2643 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2644 2645 if (avail_spare) 2646 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2647 2648 if (l2cache) 2649 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2650 2651 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2652 zc.zc_cookie = replacing; 2653 2654 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2655 &child, &children) != 0 || children != 1) { 2656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2657 "new device must be a single disk")); 2658 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2659 } 2660 2661 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2662 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2663 2664 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2665 return (-1); 2666 2667 /* 2668 * If the target is a hot spare that has been swapped in, we can only 2669 * replace it with another hot spare. 2670 */ 2671 if (replacing && 2672 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2673 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2674 NULL) == NULL || !avail_spare) && 2675 is_replacing_spare(config_root, tgt, 1)) { 2676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2677 "can only be replaced by another hot spare")); 2678 free(newname); 2679 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2680 } 2681 2682 free(newname); 2683 2684 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2685 return (-1); 2686 2687 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2688 2689 zcmd_free_nvlists(&zc); 2690 2691 if (ret == 0) { 2692 if (rootpool) { 2693 /* 2694 * XXX need a better way to prevent user from 2695 * booting up a half-baked vdev. 2696 */ 2697 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2698 "sure to wait until resilver is done " 2699 "before rebooting.\n")); 2700 (void) fprintf(stderr, "\n"); 2701 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2702 "you boot from pool '%s', you may need to update\n" 2703 "boot code on newly attached disk '%s'.\n\n" 2704 "Assuming you use GPT partitioning and 'da0' is " 2705 "your new boot disk\n" 2706 "you may use the following command:\n\n" 2707 "\tgpart bootcode -b /boot/pmbr -p " 2708 "/boot/gptzfsboot -i 1 da0\n\n"), 2709 zhp->zpool_name, new_disk); 2710 } 2711 return (0); 2712 } 2713 2714 switch (errno) { 2715 case ENOTSUP: 2716 /* 2717 * Can't attach to or replace this type of vdev. 2718 */ 2719 if (replacing) { 2720 uint64_t version = zpool_get_prop_int(zhp, 2721 ZPOOL_PROP_VERSION, NULL); 2722 2723 if (islog) 2724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2725 "cannot replace a log with a spare")); 2726 else if (version >= SPA_VERSION_MULTI_REPLACE) 2727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2728 "already in replacing/spare config; wait " 2729 "for completion or use 'zpool detach'")); 2730 else 2731 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2732 "cannot replace a replacing device")); 2733 } else { 2734 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2735 "can only attach to mirrors and top-level " 2736 "disks")); 2737 } 2738 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2739 break; 2740 2741 case EINVAL: 2742 /* 2743 * The new device must be a single disk. 2744 */ 2745 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2746 "new device must be a single disk")); 2747 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2748 break; 2749 2750 case EBUSY: 2751 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2752 new_disk); 2753 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2754 break; 2755 2756 case EOVERFLOW: 2757 /* 2758 * The new device is too small. 2759 */ 2760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2761 "device is too small")); 2762 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2763 break; 2764 2765 case EDOM: 2766 /* 2767 * The new device has a different alignment requirement. 2768 */ 2769 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2770 "devices have different sector alignment")); 2771 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2772 break; 2773 2774 case ENAMETOOLONG: 2775 /* 2776 * The resulting top-level vdev spec won't fit in the label. 2777 */ 2778 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2779 break; 2780 2781 default: 2782 (void) zpool_standard_error(hdl, errno, msg); 2783 } 2784 2785 return (-1); 2786} 2787 2788/* 2789 * Detach the specified device. 2790 */ 2791int 2792zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2793{ 2794 zfs_cmd_t zc = { 0 }; 2795 char msg[1024]; 2796 nvlist_t *tgt; 2797 boolean_t avail_spare, l2cache; 2798 libzfs_handle_t *hdl = zhp->zpool_hdl; 2799 2800 (void) snprintf(msg, sizeof (msg), 2801 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2802 2803 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2804 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2805 NULL)) == 0) 2806 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2807 2808 if (avail_spare) 2809 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2810 2811 if (l2cache) 2812 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2813 2814 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2815 2816 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2817 return (0); 2818 2819 switch (errno) { 2820 2821 case ENOTSUP: 2822 /* 2823 * Can't detach from this type of vdev. 2824 */ 2825 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2826 "applicable to mirror and replacing vdevs")); 2827 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2828 break; 2829 2830 case EBUSY: 2831 /* 2832 * There are no other replicas of this device. 2833 */ 2834 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2835 break; 2836 2837 default: 2838 (void) zpool_standard_error(hdl, errno, msg); 2839 } 2840 2841 return (-1); 2842} 2843 2844/* 2845 * Find a mirror vdev in the source nvlist. 2846 * 2847 * The mchild array contains a list of disks in one of the top-level mirrors 2848 * of the source pool. The schild array contains a list of disks that the 2849 * user specified on the command line. We loop over the mchild array to 2850 * see if any entry in the schild array matches. 2851 * 2852 * If a disk in the mchild array is found in the schild array, we return 2853 * the index of that entry. Otherwise we return -1. 2854 */ 2855static int 2856find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2857 nvlist_t **schild, uint_t schildren) 2858{ 2859 uint_t mc; 2860 2861 for (mc = 0; mc < mchildren; mc++) { 2862 uint_t sc; 2863 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2864 mchild[mc], B_FALSE); 2865 2866 for (sc = 0; sc < schildren; sc++) { 2867 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2868 schild[sc], B_FALSE); 2869 boolean_t result = (strcmp(mpath, spath) == 0); 2870 2871 free(spath); 2872 if (result) { 2873 free(mpath); 2874 return (mc); 2875 } 2876 } 2877 2878 free(mpath); 2879 } 2880 2881 return (-1); 2882} 2883 2884/* 2885 * Split a mirror pool. If newroot points to null, then a new nvlist 2886 * is generated and it is the responsibility of the caller to free it. 2887 */ 2888int 2889zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2890 nvlist_t *props, splitflags_t flags) 2891{ 2892 zfs_cmd_t zc = { 0 }; 2893 char msg[1024]; 2894 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2895 nvlist_t **varray = NULL, *zc_props = NULL; 2896 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2897 libzfs_handle_t *hdl = zhp->zpool_hdl; 2898 uint64_t vers; 2899 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2900 int retval = 0; 2901 2902 (void) snprintf(msg, sizeof (msg), 2903 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2904 2905 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2906 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2907 2908 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2909 (void) fprintf(stderr, gettext("Internal error: unable to " 2910 "retrieve pool configuration\n")); 2911 return (-1); 2912 } 2913 2914 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2915 == 0); 2916 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2917 2918 if (props) { 2919 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2920 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2921 props, vers, flags, msg)) == NULL) 2922 return (-1); 2923 } 2924 2925 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2926 &children) != 0) { 2927 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2928 "Source pool is missing vdev tree")); 2929 if (zc_props) 2930 nvlist_free(zc_props); 2931 return (-1); 2932 } 2933 2934 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2935 vcount = 0; 2936 2937 if (*newroot == NULL || 2938 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2939 &newchild, &newchildren) != 0) 2940 newchildren = 0; 2941 2942 for (c = 0; c < children; c++) { 2943 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2944 char *type; 2945 nvlist_t **mchild, *vdev; 2946 uint_t mchildren; 2947 int entry; 2948 2949 /* 2950 * Unlike cache & spares, slogs are stored in the 2951 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2952 */ 2953 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2954 &is_log); 2955 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2956 &is_hole); 2957 if (is_log || is_hole) { 2958 /* 2959 * Create a hole vdev and put it in the config. 2960 */ 2961 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2962 goto out; 2963 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2964 VDEV_TYPE_HOLE) != 0) 2965 goto out; 2966 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2967 1) != 0) 2968 goto out; 2969 if (lastlog == 0) 2970 lastlog = vcount; 2971 varray[vcount++] = vdev; 2972 continue; 2973 } 2974 lastlog = 0; 2975 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2976 == 0); 2977 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2978 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2979 "Source pool must be composed only of mirrors\n")); 2980 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2981 goto out; 2982 } 2983 2984 verify(nvlist_lookup_nvlist_array(child[c], 2985 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2986 2987 /* find or add an entry for this top-level vdev */ 2988 if (newchildren > 0 && 2989 (entry = find_vdev_entry(zhp, mchild, mchildren, 2990 newchild, newchildren)) >= 0) { 2991 /* We found a disk that the user specified. */ 2992 vdev = mchild[entry]; 2993 ++found; 2994 } else { 2995 /* User didn't specify a disk for this vdev. */ 2996 vdev = mchild[mchildren - 1]; 2997 } 2998 2999 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3000 goto out; 3001 } 3002 3003 /* did we find every disk the user specified? */ 3004 if (found != newchildren) { 3005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3006 "include at most one disk from each mirror")); 3007 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3008 goto out; 3009 } 3010 3011 /* Prepare the nvlist for populating. */ 3012 if (*newroot == NULL) { 3013 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3014 goto out; 3015 freelist = B_TRUE; 3016 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3017 VDEV_TYPE_ROOT) != 0) 3018 goto out; 3019 } else { 3020 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3021 } 3022 3023 /* Add all the children we found */ 3024 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3025 lastlog == 0 ? vcount : lastlog) != 0) 3026 goto out; 3027 3028 /* 3029 * If we're just doing a dry run, exit now with success. 3030 */ 3031 if (flags.dryrun) { 3032 memory_err = B_FALSE; 3033 freelist = B_FALSE; 3034 goto out; 3035 } 3036 3037 /* now build up the config list & call the ioctl */ 3038 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3039 goto out; 3040 3041 if (nvlist_add_nvlist(newconfig, 3042 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3043 nvlist_add_string(newconfig, 3044 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3045 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3046 goto out; 3047 3048 /* 3049 * The new pool is automatically part of the namespace unless we 3050 * explicitly export it. 3051 */ 3052 if (!flags.import) 3053 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3054 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3055 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3056 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3057 goto out; 3058 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3059 goto out; 3060 3061 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3062 retval = zpool_standard_error(hdl, errno, msg); 3063 goto out; 3064 } 3065 3066 freelist = B_FALSE; 3067 memory_err = B_FALSE; 3068 3069out: 3070 if (varray != NULL) { 3071 int v; 3072 3073 for (v = 0; v < vcount; v++) 3074 nvlist_free(varray[v]); 3075 free(varray); 3076 } 3077 zcmd_free_nvlists(&zc); 3078 if (zc_props) 3079 nvlist_free(zc_props); 3080 if (newconfig) 3081 nvlist_free(newconfig); 3082 if (freelist) { 3083 nvlist_free(*newroot); 3084 *newroot = NULL; 3085 } 3086 3087 if (retval != 0) 3088 return (retval); 3089 3090 if (memory_err) 3091 return (no_memory(hdl)); 3092 3093 return (0); 3094} 3095 3096/* 3097 * Remove the given device. Currently, this is supported only for hot spares 3098 * and level 2 cache devices. 3099 */ 3100int 3101zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3102{ 3103 zfs_cmd_t zc = { 0 }; 3104 char msg[1024]; 3105 nvlist_t *tgt; 3106 boolean_t avail_spare, l2cache, islog; 3107 libzfs_handle_t *hdl = zhp->zpool_hdl; 3108 uint64_t version; 3109 3110 (void) snprintf(msg, sizeof (msg), 3111 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3112 3113 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3114 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3115 &islog)) == 0) 3116 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3117 /* 3118 * XXX - this should just go away. 3119 */ 3120 if (!avail_spare && !l2cache && !islog) { 3121 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3122 "only inactive hot spares, cache, top-level, " 3123 "or log devices can be removed")); 3124 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3125 } 3126 3127 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3128 if (islog && version < SPA_VERSION_HOLES) { 3129 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3130 "pool must be upgrade to support log removal")); 3131 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3132 } 3133 3134 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3135 3136 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3137 return (0); 3138 3139 return (zpool_standard_error(hdl, errno, msg)); 3140} 3141 3142/* 3143 * Clear the errors for the pool, or the particular device if specified. 3144 */ 3145int 3146zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3147{ 3148 zfs_cmd_t zc = { 0 }; 3149 char msg[1024]; 3150 nvlist_t *tgt; 3151 zpool_rewind_policy_t policy; 3152 boolean_t avail_spare, l2cache; 3153 libzfs_handle_t *hdl = zhp->zpool_hdl; 3154 nvlist_t *nvi = NULL; 3155 int error; 3156 3157 if (path) 3158 (void) snprintf(msg, sizeof (msg), 3159 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3160 path); 3161 else 3162 (void) snprintf(msg, sizeof (msg), 3163 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3164 zhp->zpool_name); 3165 3166 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3167 if (path) { 3168 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3169 &l2cache, NULL)) == 0) 3170 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3171 3172 /* 3173 * Don't allow error clearing for hot spares. Do allow 3174 * error clearing for l2cache devices. 3175 */ 3176 if (avail_spare) 3177 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3178 3179 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3180 &zc.zc_guid) == 0); 3181 } 3182 3183 zpool_get_rewind_policy(rewindnvl, &policy); 3184 zc.zc_cookie = policy.zrp_request; 3185 3186 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3187 return (-1); 3188 3189 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3190 return (-1); 3191 3192 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3193 errno == ENOMEM) { 3194 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3195 zcmd_free_nvlists(&zc); 3196 return (-1); 3197 } 3198 } 3199 3200 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3201 errno != EPERM && errno != EACCES)) { 3202 if (policy.zrp_request & 3203 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3204 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3205 zpool_rewind_exclaim(hdl, zc.zc_name, 3206 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3207 nvi); 3208 nvlist_free(nvi); 3209 } 3210 zcmd_free_nvlists(&zc); 3211 return (0); 3212 } 3213 3214 zcmd_free_nvlists(&zc); 3215 return (zpool_standard_error(hdl, errno, msg)); 3216} 3217 3218/* 3219 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3220 */ 3221int 3222zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3223{ 3224 zfs_cmd_t zc = { 0 }; 3225 char msg[1024]; 3226 libzfs_handle_t *hdl = zhp->zpool_hdl; 3227 3228 (void) snprintf(msg, sizeof (msg), 3229 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3230 guid); 3231 3232 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3233 zc.zc_guid = guid; 3234 zc.zc_cookie = ZPOOL_NO_REWIND; 3235 3236 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3237 return (0); 3238 3239 return (zpool_standard_error(hdl, errno, msg)); 3240} 3241 3242/* 3243 * Change the GUID for a pool. 3244 */ 3245int 3246zpool_reguid(zpool_handle_t *zhp) 3247{ 3248 char msg[1024]; 3249 libzfs_handle_t *hdl = zhp->zpool_hdl; 3250 zfs_cmd_t zc = { 0 }; 3251 3252 (void) snprintf(msg, sizeof (msg), 3253 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3254 3255 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3256 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3257 return (0); 3258 3259 return (zpool_standard_error(hdl, errno, msg)); 3260} 3261 3262/* 3263 * Reopen the pool. 3264 */ 3265int 3266zpool_reopen(zpool_handle_t *zhp) 3267{ 3268 zfs_cmd_t zc = { 0 }; 3269 char msg[1024]; 3270 libzfs_handle_t *hdl = zhp->zpool_hdl; 3271 3272 (void) snprintf(msg, sizeof (msg), 3273 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3274 zhp->zpool_name); 3275 3276 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3277 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3278 return (0); 3279 return (zpool_standard_error(hdl, errno, msg)); 3280} 3281 3282/* 3283 * Convert from a devid string to a path. 3284 */ 3285static char * 3286devid_to_path(char *devid_str) 3287{ 3288 ddi_devid_t devid; 3289 char *minor; 3290 char *path; 3291 devid_nmlist_t *list = NULL; 3292 int ret; 3293 3294 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3295 return (NULL); 3296 3297 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3298 3299 devid_str_free(minor); 3300 devid_free(devid); 3301 3302 if (ret != 0) 3303 return (NULL); 3304 3305 if ((path = strdup(list[0].devname)) == NULL) 3306 return (NULL); 3307 3308 devid_free_nmlist(list); 3309 3310 return (path); 3311} 3312 3313/* 3314 * Convert from a path to a devid string. 3315 */ 3316static char * 3317path_to_devid(const char *path) 3318{ 3319 int fd; 3320 ddi_devid_t devid; 3321 char *minor, *ret; 3322 3323 if ((fd = open(path, O_RDONLY)) < 0) 3324 return (NULL); 3325 3326 minor = NULL; 3327 ret = NULL; 3328 if (devid_get(fd, &devid) == 0) { 3329 if (devid_get_minor_name(fd, &minor) == 0) 3330 ret = devid_str_encode(devid, minor); 3331 if (minor != NULL) 3332 devid_str_free(minor); 3333 devid_free(devid); 3334 } 3335 (void) close(fd); 3336 3337 return (ret); 3338} 3339 3340/* 3341 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3342 * ignore any failure here, since a common case is for an unprivileged user to 3343 * type 'zpool status', and we'll display the correct information anyway. 3344 */ 3345static void 3346set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3347{ 3348 zfs_cmd_t zc = { 0 }; 3349 3350 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3351 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3352 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3353 &zc.zc_guid) == 0); 3354 3355 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3356} 3357 3358/* 3359 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3360 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3361 * We also check if this is a whole disk, in which case we strip off the 3362 * trailing 's0' slice name. 3363 * 3364 * This routine is also responsible for identifying when disks have been 3365 * reconfigured in a new location. The kernel will have opened the device by 3366 * devid, but the path will still refer to the old location. To catch this, we 3367 * first do a path -> devid translation (which is fast for the common case). If 3368 * the devid matches, we're done. If not, we do a reverse devid -> path 3369 * translation and issue the appropriate ioctl() to update the path of the vdev. 3370 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3371 * of these checks. 3372 */ 3373char * 3374zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3375 boolean_t verbose) 3376{ 3377 char *path, *devid; 3378 uint64_t value; 3379 char buf[64]; 3380 vdev_stat_t *vs; 3381 uint_t vsc; 3382 int have_stats; 3383 int have_path; 3384 3385 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3386 (uint64_t **)&vs, &vsc) == 0; 3387 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3388 3389 /* 3390 * If the device is not currently present, assume it will not 3391 * come back at the same device path. Display the device by GUID. 3392 */ 3393 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3394 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3395 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3396 &value) == 0); 3397 (void) snprintf(buf, sizeof (buf), "%llu", 3398 (u_longlong_t)value); 3399 path = buf; 3400 } else if (have_path) { 3401 3402 /* 3403 * If the device is dead (faulted, offline, etc) then don't 3404 * bother opening it. Otherwise we may be forcing the user to 3405 * open a misbehaving device, which can have undesirable 3406 * effects. 3407 */ 3408 if ((have_stats == 0 || 3409 vs->vs_state >= VDEV_STATE_DEGRADED) && 3410 zhp != NULL && 3411 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3412 /* 3413 * Determine if the current path is correct. 3414 */ 3415 char *newdevid = path_to_devid(path); 3416 3417 if (newdevid == NULL || 3418 strcmp(devid, newdevid) != 0) { 3419 char *newpath; 3420 3421 if ((newpath = devid_to_path(devid)) != NULL) { 3422 /* 3423 * Update the path appropriately. 3424 */ 3425 set_path(zhp, nv, newpath); 3426 if (nvlist_add_string(nv, 3427 ZPOOL_CONFIG_PATH, newpath) == 0) 3428 verify(nvlist_lookup_string(nv, 3429 ZPOOL_CONFIG_PATH, 3430 &path) == 0); 3431 free(newpath); 3432 } 3433 } 3434 3435 if (newdevid) 3436 devid_str_free(newdevid); 3437 } 3438 3439#ifdef sun 3440 if (strncmp(path, "/dev/dsk/", 9) == 0) 3441 path += 9; 3442 3443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3444 &value) == 0 && value) { 3445 int pathlen = strlen(path); 3446 char *tmp = zfs_strdup(hdl, path); 3447 3448 /* 3449 * If it starts with c#, and ends with "s0", chop 3450 * the "s0" off, or if it ends with "s0/old", remove 3451 * the "s0" from the middle. 3452 */ 3453 if (CTD_CHECK(tmp)) { 3454 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3455 tmp[pathlen - 2] = '\0'; 3456 } else if (pathlen > 6 && 3457 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3458 (void) strcpy(&tmp[pathlen - 6], 3459 "/old"); 3460 } 3461 } 3462 return (tmp); 3463 } 3464#else /* !sun */ 3465 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3466 path += sizeof(_PATH_DEV) - 1; 3467#endif /* !sun */ 3468 } else { 3469 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3470 3471 /* 3472 * If it's a raidz device, we need to stick in the parity level. 3473 */ 3474 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3475 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3476 &value) == 0); 3477 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3478 (u_longlong_t)value); 3479 path = buf; 3480 } 3481 3482 /* 3483 * We identify each top-level vdev by using a <type-id> 3484 * naming convention. 3485 */ 3486 if (verbose) { 3487 uint64_t id; 3488 3489 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3490 &id) == 0); 3491 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3492 (u_longlong_t)id); 3493 path = buf; 3494 } 3495 } 3496 3497 return (zfs_strdup(hdl, path)); 3498} 3499 3500static int 3501zbookmark_compare(const void *a, const void *b) 3502{ 3503 return (memcmp(a, b, sizeof (zbookmark_t))); 3504} 3505 3506/* 3507 * Retrieve the persistent error log, uniquify the members, and return to the 3508 * caller. 3509 */ 3510int 3511zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3512{ 3513 zfs_cmd_t zc = { 0 }; 3514 uint64_t count; 3515 zbookmark_t *zb = NULL; 3516 int i; 3517 3518 /* 3519 * Retrieve the raw error list from the kernel. If the number of errors 3520 * has increased, allocate more space and continue until we get the 3521 * entire list. 3522 */ 3523 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3524 &count) == 0); 3525 if (count == 0) 3526 return (0); 3527 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3528 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3529 return (-1); 3530 zc.zc_nvlist_dst_size = count; 3531 (void) strcpy(zc.zc_name, zhp->zpool_name); 3532 for (;;) { 3533 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3534 &zc) != 0) { 3535 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3536 if (errno == ENOMEM) { 3537 count = zc.zc_nvlist_dst_size; 3538 if ((zc.zc_nvlist_dst = (uintptr_t) 3539 zfs_alloc(zhp->zpool_hdl, count * 3540 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3541 return (-1); 3542 } else { 3543 return (-1); 3544 } 3545 } else { 3546 break; 3547 } 3548 } 3549 3550 /* 3551 * Sort the resulting bookmarks. This is a little confusing due to the 3552 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3553 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3554 * _not_ copied as part of the process. So we point the start of our 3555 * array appropriate and decrement the total number of elements. 3556 */ 3557 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3558 zc.zc_nvlist_dst_size; 3559 count -= zc.zc_nvlist_dst_size; 3560 3561 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3562 3563 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3564 3565 /* 3566 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3567 */ 3568 for (i = 0; i < count; i++) { 3569 nvlist_t *nv; 3570 3571 /* ignoring zb_blkid and zb_level for now */ 3572 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3573 zb[i-1].zb_object == zb[i].zb_object) 3574 continue; 3575 3576 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3577 goto nomem; 3578 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3579 zb[i].zb_objset) != 0) { 3580 nvlist_free(nv); 3581 goto nomem; 3582 } 3583 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3584 zb[i].zb_object) != 0) { 3585 nvlist_free(nv); 3586 goto nomem; 3587 } 3588 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3589 nvlist_free(nv); 3590 goto nomem; 3591 } 3592 nvlist_free(nv); 3593 } 3594 3595 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3596 return (0); 3597 3598nomem: 3599 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3600 return (no_memory(zhp->zpool_hdl)); 3601} 3602 3603/* 3604 * Upgrade a ZFS pool to the latest on-disk version. 3605 */ 3606int 3607zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3608{ 3609 zfs_cmd_t zc = { 0 }; 3610 libzfs_handle_t *hdl = zhp->zpool_hdl; 3611 3612 (void) strcpy(zc.zc_name, zhp->zpool_name); 3613 zc.zc_cookie = new_version; 3614 3615 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3616 return (zpool_standard_error_fmt(hdl, errno, 3617 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3618 zhp->zpool_name)); 3619 return (0); 3620} 3621 3622void 3623zfs_save_arguments(int argc, char **argv, char *string, int len) 3624{ 3625 (void) strlcpy(string, basename(argv[0]), len); 3626 for (int i = 1; i < argc; i++) { 3627 (void) strlcat(string, " ", len); 3628 (void) strlcat(string, argv[i], len); 3629 } 3630} 3631 3632int 3633zpool_log_history(libzfs_handle_t *hdl, const char *message) 3634{ 3635 zfs_cmd_t zc = { 0 }; 3636 nvlist_t *args; 3637 int err; 3638 3639 args = fnvlist_alloc(); 3640 fnvlist_add_string(args, "message", message); 3641 err = zcmd_write_src_nvlist(hdl, &zc, args); 3642 if (err == 0) 3643 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3644 nvlist_free(args); 3645 zcmd_free_nvlists(&zc); 3646 return (err); 3647} 3648 3649/* 3650 * Perform ioctl to get some command history of a pool. 3651 * 3652 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3653 * logical offset of the history buffer to start reading from. 3654 * 3655 * Upon return, 'off' is the next logical offset to read from and 3656 * 'len' is the actual amount of bytes read into 'buf'. 3657 */ 3658static int 3659get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3660{ 3661 zfs_cmd_t zc = { 0 }; 3662 libzfs_handle_t *hdl = zhp->zpool_hdl; 3663 3664 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3665 3666 zc.zc_history = (uint64_t)(uintptr_t)buf; 3667 zc.zc_history_len = *len; 3668 zc.zc_history_offset = *off; 3669 3670 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3671 switch (errno) { 3672 case EPERM: 3673 return (zfs_error_fmt(hdl, EZFS_PERM, 3674 dgettext(TEXT_DOMAIN, 3675 "cannot show history for pool '%s'"), 3676 zhp->zpool_name)); 3677 case ENOENT: 3678 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3679 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3680 "'%s'"), zhp->zpool_name)); 3681 case ENOTSUP: 3682 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3683 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3684 "'%s', pool must be upgraded"), zhp->zpool_name)); 3685 default: 3686 return (zpool_standard_error_fmt(hdl, errno, 3687 dgettext(TEXT_DOMAIN, 3688 "cannot get history for '%s'"), zhp->zpool_name)); 3689 } 3690 } 3691 3692 *len = zc.zc_history_len; 3693 *off = zc.zc_history_offset; 3694 3695 return (0); 3696} 3697 3698/* 3699 * Process the buffer of nvlists, unpacking and storing each nvlist record 3700 * into 'records'. 'leftover' is set to the number of bytes that weren't 3701 * processed as there wasn't a complete record. 3702 */ 3703int 3704zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3705 nvlist_t ***records, uint_t *numrecords) 3706{ 3707 uint64_t reclen; 3708 nvlist_t *nv; 3709 int i; 3710 3711 while (bytes_read > sizeof (reclen)) { 3712 3713 /* get length of packed record (stored as little endian) */ 3714 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3715 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3716 3717 if (bytes_read < sizeof (reclen) + reclen) 3718 break; 3719 3720 /* unpack record */ 3721 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3722 return (ENOMEM); 3723 bytes_read -= sizeof (reclen) + reclen; 3724 buf += sizeof (reclen) + reclen; 3725 3726 /* add record to nvlist array */ 3727 (*numrecords)++; 3728 if (ISP2(*numrecords + 1)) { 3729 *records = realloc(*records, 3730 *numrecords * 2 * sizeof (nvlist_t *)); 3731 } 3732 (*records)[*numrecords - 1] = nv; 3733 } 3734 3735 *leftover = bytes_read; 3736 return (0); 3737} 3738 3739#define HIS_BUF_LEN (128*1024) 3740 3741/* 3742 * Retrieve the command history of a pool. 3743 */ 3744int 3745zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3746{ 3747 char buf[HIS_BUF_LEN]; 3748 uint64_t off = 0; 3749 nvlist_t **records = NULL; 3750 uint_t numrecords = 0; 3751 int err, i; 3752 3753 do { 3754 uint64_t bytes_read = sizeof (buf); 3755 uint64_t leftover; 3756 3757 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3758 break; 3759 3760 /* if nothing else was read in, we're at EOF, just return */ 3761 if (!bytes_read) 3762 break; 3763 3764 if ((err = zpool_history_unpack(buf, bytes_read, 3765 &leftover, &records, &numrecords)) != 0) 3766 break; 3767 off -= leftover; 3768 3769 /* CONSTCOND */ 3770 } while (1); 3771 3772 if (!err) { 3773 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3774 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3775 records, numrecords) == 0); 3776 } 3777 for (i = 0; i < numrecords; i++) 3778 nvlist_free(records[i]); 3779 free(records); 3780 3781 return (err); 3782} 3783 3784void 3785zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3786 char *pathname, size_t len) 3787{ 3788 zfs_cmd_t zc = { 0 }; 3789 boolean_t mounted = B_FALSE; 3790 char *mntpnt = NULL; 3791 char dsname[MAXNAMELEN]; 3792 3793 if (dsobj == 0) { 3794 /* special case for the MOS */ 3795 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3796 return; 3797 } 3798 3799 /* get the dataset's name */ 3800 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3801 zc.zc_obj = dsobj; 3802 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3803 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3804 /* just write out a path of two object numbers */ 3805 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3806 dsobj, obj); 3807 return; 3808 } 3809 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3810 3811 /* find out if the dataset is mounted */ 3812 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3813 3814 /* get the corrupted object's path */ 3815 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3816 zc.zc_obj = obj; 3817 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3818 &zc) == 0) { 3819 if (mounted) { 3820 (void) snprintf(pathname, len, "%s%s", mntpnt, 3821 zc.zc_value); 3822 } else { 3823 (void) snprintf(pathname, len, "%s:%s", 3824 dsname, zc.zc_value); 3825 } 3826 } else { 3827 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3828 } 3829 free(mntpnt); 3830} 3831 3832#ifdef sun 3833/* 3834 * Read the EFI label from the config, if a label does not exist then 3835 * pass back the error to the caller. If the caller has passed a non-NULL 3836 * diskaddr argument then we set it to the starting address of the EFI 3837 * partition. 3838 */ 3839static int 3840read_efi_label(nvlist_t *config, diskaddr_t *sb) 3841{ 3842 char *path; 3843 int fd; 3844 char diskname[MAXPATHLEN]; 3845 int err = -1; 3846 3847 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3848 return (err); 3849 3850 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3851 strrchr(path, '/')); 3852 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3853 struct dk_gpt *vtoc; 3854 3855 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3856 if (sb != NULL) 3857 *sb = vtoc->efi_parts[0].p_start; 3858 efi_free(vtoc); 3859 } 3860 (void) close(fd); 3861 } 3862 return (err); 3863} 3864 3865/* 3866 * determine where a partition starts on a disk in the current 3867 * configuration 3868 */ 3869static diskaddr_t 3870find_start_block(nvlist_t *config) 3871{ 3872 nvlist_t **child; 3873 uint_t c, children; 3874 diskaddr_t sb = MAXOFFSET_T; 3875 uint64_t wholedisk; 3876 3877 if (nvlist_lookup_nvlist_array(config, 3878 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3879 if (nvlist_lookup_uint64(config, 3880 ZPOOL_CONFIG_WHOLE_DISK, 3881 &wholedisk) != 0 || !wholedisk) { 3882 return (MAXOFFSET_T); 3883 } 3884 if (read_efi_label(config, &sb) < 0) 3885 sb = MAXOFFSET_T; 3886 return (sb); 3887 } 3888 3889 for (c = 0; c < children; c++) { 3890 sb = find_start_block(child[c]); 3891 if (sb != MAXOFFSET_T) { 3892 return (sb); 3893 } 3894 } 3895 return (MAXOFFSET_T); 3896} 3897#endif /* sun */ 3898 3899/* 3900 * Label an individual disk. The name provided is the short name, 3901 * stripped of any leading /dev path. 3902 */ 3903int 3904zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3905{ 3906#ifdef sun 3907 char path[MAXPATHLEN]; 3908 struct dk_gpt *vtoc; 3909 int fd; 3910 size_t resv = EFI_MIN_RESV_SIZE; 3911 uint64_t slice_size; 3912 diskaddr_t start_block; 3913 char errbuf[1024]; 3914 3915 /* prepare an error message just in case */ 3916 (void) snprintf(errbuf, sizeof (errbuf), 3917 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3918 3919 if (zhp) { 3920 nvlist_t *nvroot; 3921 3922 if (zpool_is_bootable(zhp)) { 3923 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3924 "EFI labeled devices are not supported on root " 3925 "pools.")); 3926 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3927 } 3928 3929 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3930 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3931 3932 if (zhp->zpool_start_block == 0) 3933 start_block = find_start_block(nvroot); 3934 else 3935 start_block = zhp->zpool_start_block; 3936 zhp->zpool_start_block = start_block; 3937 } else { 3938 /* new pool */ 3939 start_block = NEW_START_BLOCK; 3940 } 3941 3942 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3943 BACKUP_SLICE); 3944 3945 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3946 /* 3947 * This shouldn't happen. We've long since verified that this 3948 * is a valid device. 3949 */ 3950 zfs_error_aux(hdl, 3951 dgettext(TEXT_DOMAIN, "unable to open device")); 3952 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3953 } 3954 3955 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3956 /* 3957 * The only way this can fail is if we run out of memory, or we 3958 * were unable to read the disk's capacity 3959 */ 3960 if (errno == ENOMEM) 3961 (void) no_memory(hdl); 3962 3963 (void) close(fd); 3964 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3965 "unable to read disk capacity"), name); 3966 3967 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3968 } 3969 3970 slice_size = vtoc->efi_last_u_lba + 1; 3971 slice_size -= EFI_MIN_RESV_SIZE; 3972 if (start_block == MAXOFFSET_T) 3973 start_block = NEW_START_BLOCK; 3974 slice_size -= start_block; 3975 3976 vtoc->efi_parts[0].p_start = start_block; 3977 vtoc->efi_parts[0].p_size = slice_size; 3978 3979 /* 3980 * Why we use V_USR: V_BACKUP confuses users, and is considered 3981 * disposable by some EFI utilities (since EFI doesn't have a backup 3982 * slice). V_UNASSIGNED is supposed to be used only for zero size 3983 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3984 * etc. were all pretty specific. V_USR is as close to reality as we 3985 * can get, in the absence of V_OTHER. 3986 */ 3987 vtoc->efi_parts[0].p_tag = V_USR; 3988 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3989 3990 vtoc->efi_parts[8].p_start = slice_size + start_block; 3991 vtoc->efi_parts[8].p_size = resv; 3992 vtoc->efi_parts[8].p_tag = V_RESERVED; 3993 3994 if (efi_write(fd, vtoc) != 0) { 3995 /* 3996 * Some block drivers (like pcata) may not support EFI 3997 * GPT labels. Print out a helpful error message dir- 3998 * ecting the user to manually label the disk and give 3999 * a specific slice. 4000 */ 4001 (void) close(fd); 4002 efi_free(vtoc); 4003 4004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4005 "try using fdisk(1M) and then provide a specific slice")); 4006 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4007 } 4008 4009 (void) close(fd); 4010 efi_free(vtoc); 4011#endif /* sun */ 4012 return (0); 4013} 4014 4015static boolean_t 4016supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4017{ 4018 char *type; 4019 nvlist_t **child; 4020 uint_t children, c; 4021 4022 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4023 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4024 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4025 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4026 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4027 "vdev type '%s' is not supported"), type); 4028 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4029 return (B_FALSE); 4030 } 4031 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4032 &child, &children) == 0) { 4033 for (c = 0; c < children; c++) { 4034 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4035 return (B_FALSE); 4036 } 4037 } 4038 return (B_TRUE); 4039} 4040 4041/* 4042 * Check if this zvol is allowable for use as a dump device; zero if 4043 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4044 * 4045 * Allowable storage configurations include mirrors, all raidz variants, and 4046 * pools with log, cache, and spare devices. Pools which are backed by files or 4047 * have missing/hole vdevs are not suitable. 4048 */ 4049int 4050zvol_check_dump_config(char *arg) 4051{ 4052 zpool_handle_t *zhp = NULL; 4053 nvlist_t *config, *nvroot; 4054 char *p, *volname; 4055 nvlist_t **top; 4056 uint_t toplevels; 4057 libzfs_handle_t *hdl; 4058 char errbuf[1024]; 4059 char poolname[ZPOOL_MAXNAMELEN]; 4060 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4061 int ret = 1; 4062 4063 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4064 return (-1); 4065 } 4066 4067 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4068 "dump is not supported on device '%s'"), arg); 4069 4070 if ((hdl = libzfs_init()) == NULL) 4071 return (1); 4072 libzfs_print_on_error(hdl, B_TRUE); 4073 4074 volname = arg + pathlen; 4075 4076 /* check the configuration of the pool */ 4077 if ((p = strchr(volname, '/')) == NULL) { 4078 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4079 "malformed dataset name")); 4080 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4081 return (1); 4082 } else if (p - volname >= ZFS_MAXNAMELEN) { 4083 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4084 "dataset name is too long")); 4085 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4086 return (1); 4087 } else { 4088 (void) strncpy(poolname, volname, p - volname); 4089 poolname[p - volname] = '\0'; 4090 } 4091 4092 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4094 "could not open pool '%s'"), poolname); 4095 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4096 goto out; 4097 } 4098 config = zpool_get_config(zhp, NULL); 4099 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4100 &nvroot) != 0) { 4101 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4102 "could not obtain vdev configuration for '%s'"), poolname); 4103 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4104 goto out; 4105 } 4106 4107 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4108 &top, &toplevels) == 0); 4109 4110 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4111 goto out; 4112 } 4113 ret = 0; 4114 4115out: 4116 if (zhp) 4117 zpool_close(zhp); 4118 libzfs_fini(hdl); 4119 return (ret); 4120} 4121