libzfs_pool.c revision 268650
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2013 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29#include <sys/types.h> 30#include <sys/stat.h> 31#include <ctype.h> 32#include <errno.h> 33#include <devid.h> 34#include <fcntl.h> 35#include <libintl.h> 36#include <stdio.h> 37#include <stdlib.h> 38#include <strings.h> 39#include <unistd.h> 40#include <libgen.h> 41#include <sys/zfs_ioctl.h> 42#include <dlfcn.h> 43 44#include "zfs_namecheck.h" 45#include "zfs_prop.h" 46#include "libzfs_impl.h" 47#include "zfs_comutil.h" 48#include "zfeature_common.h" 49 50static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52#define DISK_ROOT "/dev/dsk" 53#define RDISK_ROOT "/dev/rdsk" 54#define BACKUP_SLICE "s2" 55 56typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59} prop_flags_t; 60 61/* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67static int 68zpool_get_all_props(zpool_handle_t *zhp) 69{ 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98} 99 100static int 101zpool_props_refresh(zpool_handle_t *zhp) 102{ 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112} 113 114static char * 115zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117{ 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138} 139 140uint64_t 141zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142{ 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177} 178 179/* 180 * Map VDEV STATE to printed strings. 181 */ 182const char * 183zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184{ 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207} 208 209/* 210 * Map POOL STATE to printed strings. 211 */ 212const char * 213zpool_pool_state_to_name(pool_state_t state) 214{ 215 switch (state) { 216 case POOL_STATE_ACTIVE: 217 return (gettext("ACTIVE")); 218 case POOL_STATE_EXPORTED: 219 return (gettext("EXPORTED")); 220 case POOL_STATE_DESTROYED: 221 return (gettext("DESTROYED")); 222 case POOL_STATE_SPARE: 223 return (gettext("SPARE")); 224 case POOL_STATE_L2CACHE: 225 return (gettext("L2CACHE")); 226 case POOL_STATE_UNINITIALIZED: 227 return (gettext("UNINITIALIZED")); 228 case POOL_STATE_UNAVAIL: 229 return (gettext("UNAVAIL")); 230 case POOL_STATE_POTENTIALLY_ACTIVE: 231 return (gettext("POTENTIALLY_ACTIVE")); 232 } 233 234 return (gettext("UNKNOWN")); 235} 236 237/* 238 * Get a zpool property value for 'prop' and return the value in 239 * a pre-allocated buffer. 240 */ 241int 242zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 243 zprop_source_t *srctype, boolean_t literal) 244{ 245 uint64_t intval; 246 const char *strval; 247 zprop_source_t src = ZPROP_SRC_NONE; 248 nvlist_t *nvroot; 249 vdev_stat_t *vs; 250 uint_t vsc; 251 252 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 253 switch (prop) { 254 case ZPOOL_PROP_NAME: 255 (void) strlcpy(buf, zpool_get_name(zhp), len); 256 break; 257 258 case ZPOOL_PROP_HEALTH: 259 (void) strlcpy(buf, "FAULTED", len); 260 break; 261 262 case ZPOOL_PROP_GUID: 263 intval = zpool_get_prop_int(zhp, prop, &src); 264 (void) snprintf(buf, len, "%llu", intval); 265 break; 266 267 case ZPOOL_PROP_ALTROOT: 268 case ZPOOL_PROP_CACHEFILE: 269 case ZPOOL_PROP_COMMENT: 270 if (zhp->zpool_props != NULL || 271 zpool_get_all_props(zhp) == 0) { 272 (void) strlcpy(buf, 273 zpool_get_prop_string(zhp, prop, &src), 274 len); 275 break; 276 } 277 /* FALLTHROUGH */ 278 default: 279 (void) strlcpy(buf, "-", len); 280 break; 281 } 282 283 if (srctype != NULL) 284 *srctype = src; 285 return (0); 286 } 287 288 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 289 prop != ZPOOL_PROP_NAME) 290 return (-1); 291 292 switch (zpool_prop_get_type(prop)) { 293 case PROP_TYPE_STRING: 294 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 295 len); 296 break; 297 298 case PROP_TYPE_NUMBER: 299 intval = zpool_get_prop_int(zhp, prop, &src); 300 301 switch (prop) { 302 case ZPOOL_PROP_SIZE: 303 case ZPOOL_PROP_ALLOCATED: 304 case ZPOOL_PROP_FREE: 305 case ZPOOL_PROP_FREEING: 306 case ZPOOL_PROP_LEAKED: 307 case ZPOOL_PROP_EXPANDSZ: 308 if (literal) { 309 (void) snprintf(buf, len, "%llu", 310 (u_longlong_t)intval); 311 } else { 312 (void) zfs_nicenum(intval, buf, len); 313 } 314 break; 315 316 case ZPOOL_PROP_CAPACITY: 317 if (literal) { 318 (void) snprintf(buf, len, "%llu", 319 (u_longlong_t)intval); 320 } else { 321 (void) snprintf(buf, len, "%llu%%", 322 (u_longlong_t)intval); 323 } 324 break; 325 326 case ZPOOL_PROP_DEDUPRATIO: 327 (void) snprintf(buf, len, "%llu.%02llux", 328 (u_longlong_t)(intval / 100), 329 (u_longlong_t)(intval % 100)); 330 break; 331 332 case ZPOOL_PROP_HEALTH: 333 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 334 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 335 verify(nvlist_lookup_uint64_array(nvroot, 336 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 337 == 0); 338 339 (void) strlcpy(buf, zpool_state_to_name(intval, 340 vs->vs_aux), len); 341 break; 342 case ZPOOL_PROP_VERSION: 343 if (intval >= SPA_VERSION_FEATURES) { 344 (void) snprintf(buf, len, "-"); 345 break; 346 } 347 /* FALLTHROUGH */ 348 default: 349 (void) snprintf(buf, len, "%llu", intval); 350 } 351 break; 352 353 case PROP_TYPE_INDEX: 354 intval = zpool_get_prop_int(zhp, prop, &src); 355 if (zpool_prop_index_to_string(prop, intval, &strval) 356 != 0) 357 return (-1); 358 (void) strlcpy(buf, strval, len); 359 break; 360 361 default: 362 abort(); 363 } 364 365 if (srctype) 366 *srctype = src; 367 368 return (0); 369} 370 371/* 372 * Check if the bootfs name has the same pool name as it is set to. 373 * Assuming bootfs is a valid dataset name. 374 */ 375static boolean_t 376bootfs_name_valid(const char *pool, char *bootfs) 377{ 378 int len = strlen(pool); 379 380 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 381 return (B_FALSE); 382 383 if (strncmp(pool, bootfs, len) == 0 && 384 (bootfs[len] == '/' || bootfs[len] == '\0')) 385 return (B_TRUE); 386 387 return (B_FALSE); 388} 389 390/* 391 * Inspect the configuration to determine if any of the devices contain 392 * an EFI label. 393 */ 394static boolean_t 395pool_uses_efi(nvlist_t *config) 396{ 397#ifdef sun 398 nvlist_t **child; 399 uint_t c, children; 400 401 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 402 &child, &children) != 0) 403 return (read_efi_label(config, NULL) >= 0); 404 405 for (c = 0; c < children; c++) { 406 if (pool_uses_efi(child[c])) 407 return (B_TRUE); 408 } 409#endif /* sun */ 410 return (B_FALSE); 411} 412 413boolean_t 414zpool_is_bootable(zpool_handle_t *zhp) 415{ 416 char bootfs[ZPOOL_MAXNAMELEN]; 417 418 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 419 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 420 sizeof (bootfs)) != 0); 421} 422 423 424/* 425 * Given an nvlist of zpool properties to be set, validate that they are 426 * correct, and parse any numeric properties (index, boolean, etc) if they are 427 * specified as strings. 428 */ 429static nvlist_t * 430zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 431 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 432{ 433 nvpair_t *elem; 434 nvlist_t *retprops; 435 zpool_prop_t prop; 436 char *strval; 437 uint64_t intval; 438 char *slash, *check; 439 struct stat64 statbuf; 440 zpool_handle_t *zhp; 441 nvlist_t *nvroot; 442 443 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 444 (void) no_memory(hdl); 445 return (NULL); 446 } 447 448 elem = NULL; 449 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 450 const char *propname = nvpair_name(elem); 451 452 prop = zpool_name_to_prop(propname); 453 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 454 int err; 455 char *fname = strchr(propname, '@') + 1; 456 457 err = zfeature_lookup_name(fname, NULL); 458 if (err != 0) { 459 ASSERT3U(err, ==, ENOENT); 460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 461 "invalid feature '%s'"), fname); 462 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 463 goto error; 464 } 465 466 if (nvpair_type(elem) != DATA_TYPE_STRING) { 467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 468 "'%s' must be a string"), propname); 469 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 470 goto error; 471 } 472 473 (void) nvpair_value_string(elem, &strval); 474 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 475 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 476 "property '%s' can only be set to " 477 "'enabled'"), propname); 478 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 479 goto error; 480 } 481 482 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 483 (void) no_memory(hdl); 484 goto error; 485 } 486 continue; 487 } 488 489 /* 490 * Make sure this property is valid and applies to this type. 491 */ 492 if (prop == ZPROP_INVAL) { 493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 494 "invalid property '%s'"), propname); 495 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 496 goto error; 497 } 498 499 if (zpool_prop_readonly(prop)) { 500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 501 "is readonly"), propname); 502 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 503 goto error; 504 } 505 506 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 507 &strval, &intval, errbuf) != 0) 508 goto error; 509 510 /* 511 * Perform additional checking for specific properties. 512 */ 513 switch (prop) { 514 case ZPOOL_PROP_VERSION: 515 if (intval < version || 516 !SPA_VERSION_IS_SUPPORTED(intval)) { 517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 518 "property '%s' number %d is invalid."), 519 propname, intval); 520 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 521 goto error; 522 } 523 break; 524 525 case ZPOOL_PROP_BOOTFS: 526 if (flags.create || flags.import) { 527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 528 "property '%s' cannot be set at creation " 529 "or import time"), propname); 530 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 531 goto error; 532 } 533 534 if (version < SPA_VERSION_BOOTFS) { 535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 536 "pool must be upgraded to support " 537 "'%s' property"), propname); 538 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 539 goto error; 540 } 541 542 /* 543 * bootfs property value has to be a dataset name and 544 * the dataset has to be in the same pool as it sets to. 545 */ 546 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 547 strval)) { 548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 549 "is an invalid name"), strval); 550 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 551 goto error; 552 } 553 554 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 556 "could not open pool '%s'"), poolname); 557 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 558 goto error; 559 } 560 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 561 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 562 563#ifdef sun 564 /* 565 * bootfs property cannot be set on a disk which has 566 * been EFI labeled. 567 */ 568 if (pool_uses_efi(nvroot)) { 569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 570 "property '%s' not supported on " 571 "EFI labeled devices"), propname); 572 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 573 zpool_close(zhp); 574 goto error; 575 } 576#endif /* sun */ 577 zpool_close(zhp); 578 break; 579 580 case ZPOOL_PROP_ALTROOT: 581 if (!flags.create && !flags.import) { 582 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 583 "property '%s' can only be set during pool " 584 "creation or import"), propname); 585 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 586 goto error; 587 } 588 589 if (strval[0] != '/') { 590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 591 "bad alternate root '%s'"), strval); 592 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 593 goto error; 594 } 595 break; 596 597 case ZPOOL_PROP_CACHEFILE: 598 if (strval[0] == '\0') 599 break; 600 601 if (strcmp(strval, "none") == 0) 602 break; 603 604 if (strval[0] != '/') { 605 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 606 "property '%s' must be empty, an " 607 "absolute path, or 'none'"), propname); 608 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 609 goto error; 610 } 611 612 slash = strrchr(strval, '/'); 613 614 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 615 strcmp(slash, "/..") == 0) { 616 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 617 "'%s' is not a valid file"), strval); 618 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 619 goto error; 620 } 621 622 *slash = '\0'; 623 624 if (strval[0] != '\0' && 625 (stat64(strval, &statbuf) != 0 || 626 !S_ISDIR(statbuf.st_mode))) { 627 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 628 "'%s' is not a valid directory"), 629 strval); 630 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 631 goto error; 632 } 633 634 *slash = '/'; 635 break; 636 637 case ZPOOL_PROP_COMMENT: 638 for (check = strval; *check != '\0'; check++) { 639 if (!isprint(*check)) { 640 zfs_error_aux(hdl, 641 dgettext(TEXT_DOMAIN, 642 "comment may only have printable " 643 "characters")); 644 (void) zfs_error(hdl, EZFS_BADPROP, 645 errbuf); 646 goto error; 647 } 648 } 649 if (strlen(strval) > ZPROP_MAX_COMMENT) { 650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 651 "comment must not exceed %d characters"), 652 ZPROP_MAX_COMMENT); 653 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 654 goto error; 655 } 656 break; 657 case ZPOOL_PROP_READONLY: 658 if (!flags.import) { 659 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 660 "property '%s' can only be set at " 661 "import time"), propname); 662 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 663 goto error; 664 } 665 break; 666 } 667 } 668 669 return (retprops); 670error: 671 nvlist_free(retprops); 672 return (NULL); 673} 674 675/* 676 * Set zpool property : propname=propval. 677 */ 678int 679zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 680{ 681 zfs_cmd_t zc = { 0 }; 682 int ret = -1; 683 char errbuf[1024]; 684 nvlist_t *nvl = NULL; 685 nvlist_t *realprops; 686 uint64_t version; 687 prop_flags_t flags = { 0 }; 688 689 (void) snprintf(errbuf, sizeof (errbuf), 690 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 691 zhp->zpool_name); 692 693 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 694 return (no_memory(zhp->zpool_hdl)); 695 696 if (nvlist_add_string(nvl, propname, propval) != 0) { 697 nvlist_free(nvl); 698 return (no_memory(zhp->zpool_hdl)); 699 } 700 701 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 702 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 703 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 704 nvlist_free(nvl); 705 return (-1); 706 } 707 708 nvlist_free(nvl); 709 nvl = realprops; 710 711 /* 712 * Execute the corresponding ioctl() to set this property. 713 */ 714 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 715 716 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 717 nvlist_free(nvl); 718 return (-1); 719 } 720 721 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 722 723 zcmd_free_nvlists(&zc); 724 nvlist_free(nvl); 725 726 if (ret) 727 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 728 else 729 (void) zpool_props_refresh(zhp); 730 731 return (ret); 732} 733 734int 735zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 736{ 737 libzfs_handle_t *hdl = zhp->zpool_hdl; 738 zprop_list_t *entry; 739 char buf[ZFS_MAXPROPLEN]; 740 nvlist_t *features = NULL; 741 zprop_list_t **last; 742 boolean_t firstexpand = (NULL == *plp); 743 744 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 745 return (-1); 746 747 last = plp; 748 while (*last != NULL) 749 last = &(*last)->pl_next; 750 751 if ((*plp)->pl_all) 752 features = zpool_get_features(zhp); 753 754 if ((*plp)->pl_all && firstexpand) { 755 for (int i = 0; i < SPA_FEATURES; i++) { 756 zprop_list_t *entry = zfs_alloc(hdl, 757 sizeof (zprop_list_t)); 758 entry->pl_prop = ZPROP_INVAL; 759 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 760 spa_feature_table[i].fi_uname); 761 entry->pl_width = strlen(entry->pl_user_prop); 762 entry->pl_all = B_TRUE; 763 764 *last = entry; 765 last = &entry->pl_next; 766 } 767 } 768 769 /* add any unsupported features */ 770 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 771 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 772 char *propname; 773 boolean_t found; 774 zprop_list_t *entry; 775 776 if (zfeature_is_supported(nvpair_name(nvp))) 777 continue; 778 779 propname = zfs_asprintf(hdl, "unsupported@%s", 780 nvpair_name(nvp)); 781 782 /* 783 * Before adding the property to the list make sure that no 784 * other pool already added the same property. 785 */ 786 found = B_FALSE; 787 entry = *plp; 788 while (entry != NULL) { 789 if (entry->pl_user_prop != NULL && 790 strcmp(propname, entry->pl_user_prop) == 0) { 791 found = B_TRUE; 792 break; 793 } 794 entry = entry->pl_next; 795 } 796 if (found) { 797 free(propname); 798 continue; 799 } 800 801 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 802 entry->pl_prop = ZPROP_INVAL; 803 entry->pl_user_prop = propname; 804 entry->pl_width = strlen(entry->pl_user_prop); 805 entry->pl_all = B_TRUE; 806 807 *last = entry; 808 last = &entry->pl_next; 809 } 810 811 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 812 813 if (entry->pl_fixed) 814 continue; 815 816 if (entry->pl_prop != ZPROP_INVAL && 817 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 818 NULL, B_FALSE) == 0) { 819 if (strlen(buf) > entry->pl_width) 820 entry->pl_width = strlen(buf); 821 } 822 } 823 824 return (0); 825} 826 827/* 828 * Get the state for the given feature on the given ZFS pool. 829 */ 830int 831zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 832 size_t len) 833{ 834 uint64_t refcount; 835 boolean_t found = B_FALSE; 836 nvlist_t *features = zpool_get_features(zhp); 837 boolean_t supported; 838 const char *feature = strchr(propname, '@') + 1; 839 840 supported = zpool_prop_feature(propname); 841 ASSERT(supported || zpool_prop_unsupported(propname)); 842 843 /* 844 * Convert from feature name to feature guid. This conversion is 845 * unecessary for unsupported@... properties because they already 846 * use guids. 847 */ 848 if (supported) { 849 int ret; 850 spa_feature_t fid; 851 852 ret = zfeature_lookup_name(feature, &fid); 853 if (ret != 0) { 854 (void) strlcpy(buf, "-", len); 855 return (ENOTSUP); 856 } 857 feature = spa_feature_table[fid].fi_guid; 858 } 859 860 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 861 found = B_TRUE; 862 863 if (supported) { 864 if (!found) { 865 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 866 } else { 867 if (refcount == 0) 868 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 869 else 870 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 871 } 872 } else { 873 if (found) { 874 if (refcount == 0) { 875 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 876 } else { 877 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 878 } 879 } else { 880 (void) strlcpy(buf, "-", len); 881 return (ENOTSUP); 882 } 883 } 884 885 return (0); 886} 887 888/* 889 * Don't start the slice at the default block of 34; many storage 890 * devices will use a stripe width of 128k, so start there instead. 891 */ 892#define NEW_START_BLOCK 256 893 894/* 895 * Validate the given pool name, optionally putting an extended error message in 896 * 'buf'. 897 */ 898boolean_t 899zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 900{ 901 namecheck_err_t why; 902 char what; 903 int ret; 904 905 ret = pool_namecheck(pool, &why, &what); 906 907 /* 908 * The rules for reserved pool names were extended at a later point. 909 * But we need to support users with existing pools that may now be 910 * invalid. So we only check for this expanded set of names during a 911 * create (or import), and only in userland. 912 */ 913 if (ret == 0 && !isopen && 914 (strncmp(pool, "mirror", 6) == 0 || 915 strncmp(pool, "raidz", 5) == 0 || 916 strncmp(pool, "spare", 5) == 0 || 917 strcmp(pool, "log") == 0)) { 918 if (hdl != NULL) 919 zfs_error_aux(hdl, 920 dgettext(TEXT_DOMAIN, "name is reserved")); 921 return (B_FALSE); 922 } 923 924 925 if (ret != 0) { 926 if (hdl != NULL) { 927 switch (why) { 928 case NAME_ERR_TOOLONG: 929 zfs_error_aux(hdl, 930 dgettext(TEXT_DOMAIN, "name is too long")); 931 break; 932 933 case NAME_ERR_INVALCHAR: 934 zfs_error_aux(hdl, 935 dgettext(TEXT_DOMAIN, "invalid character " 936 "'%c' in pool name"), what); 937 break; 938 939 case NAME_ERR_NOLETTER: 940 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 941 "name must begin with a letter")); 942 break; 943 944 case NAME_ERR_RESERVED: 945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 946 "name is reserved")); 947 break; 948 949 case NAME_ERR_DISKLIKE: 950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 951 "pool name is reserved")); 952 break; 953 954 case NAME_ERR_LEADING_SLASH: 955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 956 "leading slash in name")); 957 break; 958 959 case NAME_ERR_EMPTY_COMPONENT: 960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 961 "empty component in name")); 962 break; 963 964 case NAME_ERR_TRAILING_SLASH: 965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 966 "trailing slash in name")); 967 break; 968 969 case NAME_ERR_MULTIPLE_AT: 970 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 971 "multiple '@' delimiters in name")); 972 break; 973 974 } 975 } 976 return (B_FALSE); 977 } 978 979 return (B_TRUE); 980} 981 982/* 983 * Open a handle to the given pool, even if the pool is currently in the FAULTED 984 * state. 985 */ 986zpool_handle_t * 987zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 988{ 989 zpool_handle_t *zhp; 990 boolean_t missing; 991 992 /* 993 * Make sure the pool name is valid. 994 */ 995 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 996 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 997 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 998 pool); 999 return (NULL); 1000 } 1001 1002 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1003 return (NULL); 1004 1005 zhp->zpool_hdl = hdl; 1006 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1007 1008 if (zpool_refresh_stats(zhp, &missing) != 0) { 1009 zpool_close(zhp); 1010 return (NULL); 1011 } 1012 1013 if (missing) { 1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1015 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1016 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1017 zpool_close(zhp); 1018 return (NULL); 1019 } 1020 1021 return (zhp); 1022} 1023 1024/* 1025 * Like the above, but silent on error. Used when iterating over pools (because 1026 * the configuration cache may be out of date). 1027 */ 1028int 1029zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1030{ 1031 zpool_handle_t *zhp; 1032 boolean_t missing; 1033 1034 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1035 return (-1); 1036 1037 zhp->zpool_hdl = hdl; 1038 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1039 1040 if (zpool_refresh_stats(zhp, &missing) != 0) { 1041 zpool_close(zhp); 1042 return (-1); 1043 } 1044 1045 if (missing) { 1046 zpool_close(zhp); 1047 *ret = NULL; 1048 return (0); 1049 } 1050 1051 *ret = zhp; 1052 return (0); 1053} 1054 1055/* 1056 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1057 * state. 1058 */ 1059zpool_handle_t * 1060zpool_open(libzfs_handle_t *hdl, const char *pool) 1061{ 1062 zpool_handle_t *zhp; 1063 1064 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1065 return (NULL); 1066 1067 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1068 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1069 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1070 zpool_close(zhp); 1071 return (NULL); 1072 } 1073 1074 return (zhp); 1075} 1076 1077/* 1078 * Close the handle. Simply frees the memory associated with the handle. 1079 */ 1080void 1081zpool_close(zpool_handle_t *zhp) 1082{ 1083 if (zhp->zpool_config) 1084 nvlist_free(zhp->zpool_config); 1085 if (zhp->zpool_old_config) 1086 nvlist_free(zhp->zpool_old_config); 1087 if (zhp->zpool_props) 1088 nvlist_free(zhp->zpool_props); 1089 free(zhp); 1090} 1091 1092/* 1093 * Return the name of the pool. 1094 */ 1095const char * 1096zpool_get_name(zpool_handle_t *zhp) 1097{ 1098 return (zhp->zpool_name); 1099} 1100 1101 1102/* 1103 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1104 */ 1105int 1106zpool_get_state(zpool_handle_t *zhp) 1107{ 1108 return (zhp->zpool_state); 1109} 1110 1111/* 1112 * Create the named pool, using the provided vdev list. It is assumed 1113 * that the consumer has already validated the contents of the nvlist, so we 1114 * don't have to worry about error semantics. 1115 */ 1116int 1117zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1118 nvlist_t *props, nvlist_t *fsprops) 1119{ 1120 zfs_cmd_t zc = { 0 }; 1121 nvlist_t *zc_fsprops = NULL; 1122 nvlist_t *zc_props = NULL; 1123 char msg[1024]; 1124 int ret = -1; 1125 1126 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1127 "cannot create '%s'"), pool); 1128 1129 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1130 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1131 1132 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1133 return (-1); 1134 1135 if (props) { 1136 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1137 1138 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1139 SPA_VERSION_1, flags, msg)) == NULL) { 1140 goto create_failed; 1141 } 1142 } 1143 1144 if (fsprops) { 1145 uint64_t zoned; 1146 char *zonestr; 1147 1148 zoned = ((nvlist_lookup_string(fsprops, 1149 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1150 strcmp(zonestr, "on") == 0); 1151 1152 if ((zc_fsprops = zfs_valid_proplist(hdl, 1153 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1154 goto create_failed; 1155 } 1156 if (!zc_props && 1157 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1158 goto create_failed; 1159 } 1160 if (nvlist_add_nvlist(zc_props, 1161 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1162 goto create_failed; 1163 } 1164 } 1165 1166 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1167 goto create_failed; 1168 1169 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1170 1171 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1172 1173 zcmd_free_nvlists(&zc); 1174 nvlist_free(zc_props); 1175 nvlist_free(zc_fsprops); 1176 1177 switch (errno) { 1178 case EBUSY: 1179 /* 1180 * This can happen if the user has specified the same 1181 * device multiple times. We can't reliably detect this 1182 * until we try to add it and see we already have a 1183 * label. 1184 */ 1185 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1186 "one or more vdevs refer to the same device")); 1187 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1188 1189 case EOVERFLOW: 1190 /* 1191 * This occurs when one of the devices is below 1192 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1193 * device was the problem device since there's no 1194 * reliable way to determine device size from userland. 1195 */ 1196 { 1197 char buf[64]; 1198 1199 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1200 1201 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1202 "one or more devices is less than the " 1203 "minimum size (%s)"), buf); 1204 } 1205 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1206 1207 case ENOSPC: 1208 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1209 "one or more devices is out of space")); 1210 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1211 1212 case ENOTBLK: 1213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1214 "cache device must be a disk or disk slice")); 1215 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1216 1217 default: 1218 return (zpool_standard_error(hdl, errno, msg)); 1219 } 1220 } 1221 1222create_failed: 1223 zcmd_free_nvlists(&zc); 1224 nvlist_free(zc_props); 1225 nvlist_free(zc_fsprops); 1226 return (ret); 1227} 1228 1229/* 1230 * Destroy the given pool. It is up to the caller to ensure that there are no 1231 * datasets left in the pool. 1232 */ 1233int 1234zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1235{ 1236 zfs_cmd_t zc = { 0 }; 1237 zfs_handle_t *zfp = NULL; 1238 libzfs_handle_t *hdl = zhp->zpool_hdl; 1239 char msg[1024]; 1240 1241 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1242 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1243 return (-1); 1244 1245 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1246 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1247 1248 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1249 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1250 "cannot destroy '%s'"), zhp->zpool_name); 1251 1252 if (errno == EROFS) { 1253 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1254 "one or more devices is read only")); 1255 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1256 } else { 1257 (void) zpool_standard_error(hdl, errno, msg); 1258 } 1259 1260 if (zfp) 1261 zfs_close(zfp); 1262 return (-1); 1263 } 1264 1265 if (zfp) { 1266 remove_mountpoint(zfp); 1267 zfs_close(zfp); 1268 } 1269 1270 return (0); 1271} 1272 1273/* 1274 * Add the given vdevs to the pool. The caller must have already performed the 1275 * necessary verification to ensure that the vdev specification is well-formed. 1276 */ 1277int 1278zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1279{ 1280 zfs_cmd_t zc = { 0 }; 1281 int ret; 1282 libzfs_handle_t *hdl = zhp->zpool_hdl; 1283 char msg[1024]; 1284 nvlist_t **spares, **l2cache; 1285 uint_t nspares, nl2cache; 1286 1287 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1288 "cannot add to '%s'"), zhp->zpool_name); 1289 1290 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1291 SPA_VERSION_SPARES && 1292 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1293 &spares, &nspares) == 0) { 1294 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1295 "upgraded to add hot spares")); 1296 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1297 } 1298 1299 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1300 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1301 uint64_t s; 1302 1303 for (s = 0; s < nspares; s++) { 1304 char *path; 1305 1306 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1307 &path) == 0 && pool_uses_efi(spares[s])) { 1308 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1309 "device '%s' contains an EFI label and " 1310 "cannot be used on root pools."), 1311 zpool_vdev_name(hdl, NULL, spares[s], 1312 B_FALSE)); 1313 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1314 } 1315 } 1316 } 1317 1318 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1319 SPA_VERSION_L2CACHE && 1320 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1321 &l2cache, &nl2cache) == 0) { 1322 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1323 "upgraded to add cache devices")); 1324 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1325 } 1326 1327 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1328 return (-1); 1329 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1330 1331 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1332 switch (errno) { 1333 case EBUSY: 1334 /* 1335 * This can happen if the user has specified the same 1336 * device multiple times. We can't reliably detect this 1337 * until we try to add it and see we already have a 1338 * label. 1339 */ 1340 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1341 "one or more vdevs refer to the same device")); 1342 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1343 break; 1344 1345 case EOVERFLOW: 1346 /* 1347 * This occurrs when one of the devices is below 1348 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1349 * device was the problem device since there's no 1350 * reliable way to determine device size from userland. 1351 */ 1352 { 1353 char buf[64]; 1354 1355 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1356 1357 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1358 "device is less than the minimum " 1359 "size (%s)"), buf); 1360 } 1361 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1362 break; 1363 1364 case ENOTSUP: 1365 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1366 "pool must be upgraded to add these vdevs")); 1367 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1368 break; 1369 1370 case EDOM: 1371 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1372 "root pool can not have multiple vdevs" 1373 " or separate logs")); 1374 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1375 break; 1376 1377 case ENOTBLK: 1378 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1379 "cache device must be a disk or disk slice")); 1380 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1381 break; 1382 1383 default: 1384 (void) zpool_standard_error(hdl, errno, msg); 1385 } 1386 1387 ret = -1; 1388 } else { 1389 ret = 0; 1390 } 1391 1392 zcmd_free_nvlists(&zc); 1393 1394 return (ret); 1395} 1396 1397/* 1398 * Exports the pool from the system. The caller must ensure that there are no 1399 * mounted datasets in the pool. 1400 */ 1401static int 1402zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1403 const char *log_str) 1404{ 1405 zfs_cmd_t zc = { 0 }; 1406 char msg[1024]; 1407 1408 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1409 "cannot export '%s'"), zhp->zpool_name); 1410 1411 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1412 zc.zc_cookie = force; 1413 zc.zc_guid = hardforce; 1414 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1415 1416 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1417 switch (errno) { 1418 case EXDEV: 1419 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1420 "use '-f' to override the following errors:\n" 1421 "'%s' has an active shared spare which could be" 1422 " used by other pools once '%s' is exported."), 1423 zhp->zpool_name, zhp->zpool_name); 1424 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1425 msg)); 1426 default: 1427 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1428 msg)); 1429 } 1430 } 1431 1432 return (0); 1433} 1434 1435int 1436zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1437{ 1438 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1439} 1440 1441int 1442zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1443{ 1444 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1445} 1446 1447static void 1448zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1449 nvlist_t *config) 1450{ 1451 nvlist_t *nv = NULL; 1452 uint64_t rewindto; 1453 int64_t loss = -1; 1454 struct tm t; 1455 char timestr[128]; 1456 1457 if (!hdl->libzfs_printerr || config == NULL) 1458 return; 1459 1460 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1461 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1462 return; 1463 } 1464 1465 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1466 return; 1467 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1468 1469 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1470 strftime(timestr, 128, 0, &t) != 0) { 1471 if (dryrun) { 1472 (void) printf(dgettext(TEXT_DOMAIN, 1473 "Would be able to return %s " 1474 "to its state as of %s.\n"), 1475 name, timestr); 1476 } else { 1477 (void) printf(dgettext(TEXT_DOMAIN, 1478 "Pool %s returned to its state as of %s.\n"), 1479 name, timestr); 1480 } 1481 if (loss > 120) { 1482 (void) printf(dgettext(TEXT_DOMAIN, 1483 "%s approximately %lld "), 1484 dryrun ? "Would discard" : "Discarded", 1485 (loss + 30) / 60); 1486 (void) printf(dgettext(TEXT_DOMAIN, 1487 "minutes of transactions.\n")); 1488 } else if (loss > 0) { 1489 (void) printf(dgettext(TEXT_DOMAIN, 1490 "%s approximately %lld "), 1491 dryrun ? "Would discard" : "Discarded", loss); 1492 (void) printf(dgettext(TEXT_DOMAIN, 1493 "seconds of transactions.\n")); 1494 } 1495 } 1496} 1497 1498void 1499zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1500 nvlist_t *config) 1501{ 1502 nvlist_t *nv = NULL; 1503 int64_t loss = -1; 1504 uint64_t edata = UINT64_MAX; 1505 uint64_t rewindto; 1506 struct tm t; 1507 char timestr[128]; 1508 1509 if (!hdl->libzfs_printerr) 1510 return; 1511 1512 if (reason >= 0) 1513 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1514 else 1515 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1516 1517 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1518 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1519 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1520 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1521 goto no_info; 1522 1523 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1524 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1525 &edata); 1526 1527 (void) printf(dgettext(TEXT_DOMAIN, 1528 "Recovery is possible, but will result in some data loss.\n")); 1529 1530 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1531 strftime(timestr, 128, 0, &t) != 0) { 1532 (void) printf(dgettext(TEXT_DOMAIN, 1533 "\tReturning the pool to its state as of %s\n" 1534 "\tshould correct the problem. "), 1535 timestr); 1536 } else { 1537 (void) printf(dgettext(TEXT_DOMAIN, 1538 "\tReverting the pool to an earlier state " 1539 "should correct the problem.\n\t")); 1540 } 1541 1542 if (loss > 120) { 1543 (void) printf(dgettext(TEXT_DOMAIN, 1544 "Approximately %lld minutes of data\n" 1545 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1546 } else if (loss > 0) { 1547 (void) printf(dgettext(TEXT_DOMAIN, 1548 "Approximately %lld seconds of data\n" 1549 "\tmust be discarded, irreversibly. "), loss); 1550 } 1551 if (edata != 0 && edata != UINT64_MAX) { 1552 if (edata == 1) { 1553 (void) printf(dgettext(TEXT_DOMAIN, 1554 "After rewind, at least\n" 1555 "\tone persistent user-data error will remain. ")); 1556 } else { 1557 (void) printf(dgettext(TEXT_DOMAIN, 1558 "After rewind, several\n" 1559 "\tpersistent user-data errors will remain. ")); 1560 } 1561 } 1562 (void) printf(dgettext(TEXT_DOMAIN, 1563 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1564 reason >= 0 ? "clear" : "import", name); 1565 1566 (void) printf(dgettext(TEXT_DOMAIN, 1567 "A scrub of the pool\n" 1568 "\tis strongly recommended after recovery.\n")); 1569 return; 1570 1571no_info: 1572 (void) printf(dgettext(TEXT_DOMAIN, 1573 "Destroy and re-create the pool from\n\ta backup source.\n")); 1574} 1575 1576/* 1577 * zpool_import() is a contracted interface. Should be kept the same 1578 * if possible. 1579 * 1580 * Applications should use zpool_import_props() to import a pool with 1581 * new properties value to be set. 1582 */ 1583int 1584zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1585 char *altroot) 1586{ 1587 nvlist_t *props = NULL; 1588 int ret; 1589 1590 if (altroot != NULL) { 1591 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1592 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1593 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1594 newname)); 1595 } 1596 1597 if (nvlist_add_string(props, 1598 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1599 nvlist_add_string(props, 1600 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1601 nvlist_free(props); 1602 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1603 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1604 newname)); 1605 } 1606 } 1607 1608 ret = zpool_import_props(hdl, config, newname, props, 1609 ZFS_IMPORT_NORMAL); 1610 if (props) 1611 nvlist_free(props); 1612 return (ret); 1613} 1614 1615static void 1616print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1617 int indent) 1618{ 1619 nvlist_t **child; 1620 uint_t c, children; 1621 char *vname; 1622 uint64_t is_log = 0; 1623 1624 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1625 &is_log); 1626 1627 if (name != NULL) 1628 (void) printf("\t%*s%s%s\n", indent, "", name, 1629 is_log ? " [log]" : ""); 1630 1631 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1632 &child, &children) != 0) 1633 return; 1634 1635 for (c = 0; c < children; c++) { 1636 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1637 print_vdev_tree(hdl, vname, child[c], indent + 2); 1638 free(vname); 1639 } 1640} 1641 1642void 1643zpool_print_unsup_feat(nvlist_t *config) 1644{ 1645 nvlist_t *nvinfo, *unsup_feat; 1646 1647 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1648 0); 1649 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1650 &unsup_feat) == 0); 1651 1652 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1653 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1654 char *desc; 1655 1656 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1657 verify(nvpair_value_string(nvp, &desc) == 0); 1658 1659 if (strlen(desc) > 0) 1660 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1661 else 1662 (void) printf("\t%s\n", nvpair_name(nvp)); 1663 } 1664} 1665 1666/* 1667 * Import the given pool using the known configuration and a list of 1668 * properties to be set. The configuration should have come from 1669 * zpool_find_import(). The 'newname' parameters control whether the pool 1670 * is imported with a different name. 1671 */ 1672int 1673zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1674 nvlist_t *props, int flags) 1675{ 1676 zfs_cmd_t zc = { 0 }; 1677 zpool_rewind_policy_t policy; 1678 nvlist_t *nv = NULL; 1679 nvlist_t *nvinfo = NULL; 1680 nvlist_t *missing = NULL; 1681 char *thename; 1682 char *origname; 1683 int ret; 1684 int error = 0; 1685 char errbuf[1024]; 1686 1687 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1688 &origname) == 0); 1689 1690 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1691 "cannot import pool '%s'"), origname); 1692 1693 if (newname != NULL) { 1694 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1695 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1696 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1697 newname)); 1698 thename = (char *)newname; 1699 } else { 1700 thename = origname; 1701 } 1702 1703 if (props) { 1704 uint64_t version; 1705 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1706 1707 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1708 &version) == 0); 1709 1710 if ((props = zpool_valid_proplist(hdl, origname, 1711 props, version, flags, errbuf)) == NULL) { 1712 return (-1); 1713 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1714 nvlist_free(props); 1715 return (-1); 1716 } 1717 } 1718 1719 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1720 1721 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1722 &zc.zc_guid) == 0); 1723 1724 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1725 nvlist_free(props); 1726 return (-1); 1727 } 1728 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1729 nvlist_free(props); 1730 return (-1); 1731 } 1732 1733 zc.zc_cookie = flags; 1734 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1735 errno == ENOMEM) { 1736 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1737 zcmd_free_nvlists(&zc); 1738 return (-1); 1739 } 1740 } 1741 if (ret != 0) 1742 error = errno; 1743 1744 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1745 zpool_get_rewind_policy(config, &policy); 1746 1747 if (error) { 1748 char desc[1024]; 1749 1750 /* 1751 * Dry-run failed, but we print out what success 1752 * looks like if we found a best txg 1753 */ 1754 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1755 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1756 B_TRUE, nv); 1757 nvlist_free(nv); 1758 return (-1); 1759 } 1760 1761 if (newname == NULL) 1762 (void) snprintf(desc, sizeof (desc), 1763 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1764 thename); 1765 else 1766 (void) snprintf(desc, sizeof (desc), 1767 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1768 origname, thename); 1769 1770 switch (error) { 1771 case ENOTSUP: 1772 if (nv != NULL && nvlist_lookup_nvlist(nv, 1773 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1774 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1775 (void) printf(dgettext(TEXT_DOMAIN, "This " 1776 "pool uses the following feature(s) not " 1777 "supported by this system:\n")); 1778 zpool_print_unsup_feat(nv); 1779 if (nvlist_exists(nvinfo, 1780 ZPOOL_CONFIG_CAN_RDONLY)) { 1781 (void) printf(dgettext(TEXT_DOMAIN, 1782 "All unsupported features are only " 1783 "required for writing to the pool." 1784 "\nThe pool can be imported using " 1785 "'-o readonly=on'.\n")); 1786 } 1787 } 1788 /* 1789 * Unsupported version. 1790 */ 1791 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1792 break; 1793 1794 case EINVAL: 1795 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1796 break; 1797 1798 case EROFS: 1799 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1800 "one or more devices is read only")); 1801 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1802 break; 1803 1804 case ENXIO: 1805 if (nv && nvlist_lookup_nvlist(nv, 1806 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1807 nvlist_lookup_nvlist(nvinfo, 1808 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1809 (void) printf(dgettext(TEXT_DOMAIN, 1810 "The devices below are missing, use " 1811 "'-m' to import the pool anyway:\n")); 1812 print_vdev_tree(hdl, NULL, missing, 2); 1813 (void) printf("\n"); 1814 } 1815 (void) zpool_standard_error(hdl, error, desc); 1816 break; 1817 1818 case EEXIST: 1819 (void) zpool_standard_error(hdl, error, desc); 1820 break; 1821 1822 default: 1823 (void) zpool_standard_error(hdl, error, desc); 1824 zpool_explain_recover(hdl, 1825 newname ? origname : thename, -error, nv); 1826 break; 1827 } 1828 1829 nvlist_free(nv); 1830 ret = -1; 1831 } else { 1832 zpool_handle_t *zhp; 1833 1834 /* 1835 * This should never fail, but play it safe anyway. 1836 */ 1837 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1838 ret = -1; 1839 else if (zhp != NULL) 1840 zpool_close(zhp); 1841 if (policy.zrp_request & 1842 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1843 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1844 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1845 } 1846 nvlist_free(nv); 1847 return (0); 1848 } 1849 1850 zcmd_free_nvlists(&zc); 1851 nvlist_free(props); 1852 1853 return (ret); 1854} 1855 1856/* 1857 * Scan the pool. 1858 */ 1859int 1860zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1861{ 1862 zfs_cmd_t zc = { 0 }; 1863 char msg[1024]; 1864 libzfs_handle_t *hdl = zhp->zpool_hdl; 1865 1866 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1867 zc.zc_cookie = func; 1868 1869 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1870 (errno == ENOENT && func != POOL_SCAN_NONE)) 1871 return (0); 1872 1873 if (func == POOL_SCAN_SCRUB) { 1874 (void) snprintf(msg, sizeof (msg), 1875 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1876 } else if (func == POOL_SCAN_NONE) { 1877 (void) snprintf(msg, sizeof (msg), 1878 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1879 zc.zc_name); 1880 } else { 1881 assert(!"unexpected result"); 1882 } 1883 1884 if (errno == EBUSY) { 1885 nvlist_t *nvroot; 1886 pool_scan_stat_t *ps = NULL; 1887 uint_t psc; 1888 1889 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1890 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1891 (void) nvlist_lookup_uint64_array(nvroot, 1892 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1893 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1894 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1895 else 1896 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1897 } else if (errno == ENOENT) { 1898 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1899 } else { 1900 return (zpool_standard_error(hdl, errno, msg)); 1901 } 1902} 1903 1904/* 1905 * This provides a very minimal check whether a given string is likely a 1906 * c#t#d# style string. Users of this are expected to do their own 1907 * verification of the s# part. 1908 */ 1909#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1910 1911/* 1912 * More elaborate version for ones which may start with "/dev/dsk/" 1913 * and the like. 1914 */ 1915static int 1916ctd_check_path(char *str) { 1917 /* 1918 * If it starts with a slash, check the last component. 1919 */ 1920 if (str && str[0] == '/') { 1921 char *tmp = strrchr(str, '/'); 1922 1923 /* 1924 * If it ends in "/old", check the second-to-last 1925 * component of the string instead. 1926 */ 1927 if (tmp != str && strcmp(tmp, "/old") == 0) { 1928 for (tmp--; *tmp != '/'; tmp--) 1929 ; 1930 } 1931 str = tmp + 1; 1932 } 1933 return (CTD_CHECK(str)); 1934} 1935 1936/* 1937 * Find a vdev that matches the search criteria specified. We use the 1938 * the nvpair name to determine how we should look for the device. 1939 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1940 * spare; but FALSE if its an INUSE spare. 1941 */ 1942static nvlist_t * 1943vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1944 boolean_t *l2cache, boolean_t *log) 1945{ 1946 uint_t c, children; 1947 nvlist_t **child; 1948 nvlist_t *ret; 1949 uint64_t is_log; 1950 char *srchkey; 1951 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1952 1953 /* Nothing to look for */ 1954 if (search == NULL || pair == NULL) 1955 return (NULL); 1956 1957 /* Obtain the key we will use to search */ 1958 srchkey = nvpair_name(pair); 1959 1960 switch (nvpair_type(pair)) { 1961 case DATA_TYPE_UINT64: 1962 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1963 uint64_t srchval, theguid; 1964 1965 verify(nvpair_value_uint64(pair, &srchval) == 0); 1966 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1967 &theguid) == 0); 1968 if (theguid == srchval) 1969 return (nv); 1970 } 1971 break; 1972 1973 case DATA_TYPE_STRING: { 1974 char *srchval, *val; 1975 1976 verify(nvpair_value_string(pair, &srchval) == 0); 1977 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1978 break; 1979 1980 /* 1981 * Search for the requested value. Special cases: 1982 * 1983 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1984 * "s0" or "s0/old". The "s0" part is hidden from the user, 1985 * but included in the string, so this matches around it. 1986 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1987 * 1988 * Otherwise, all other searches are simple string compares. 1989 */ 1990 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1991 ctd_check_path(val)) { 1992 uint64_t wholedisk = 0; 1993 1994 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1995 &wholedisk); 1996 if (wholedisk) { 1997 int slen = strlen(srchval); 1998 int vlen = strlen(val); 1999 2000 if (slen != vlen - 2) 2001 break; 2002 2003 /* 2004 * make_leaf_vdev() should only set 2005 * wholedisk for ZPOOL_CONFIG_PATHs which 2006 * will include "/dev/dsk/", giving plenty of 2007 * room for the indices used next. 2008 */ 2009 ASSERT(vlen >= 6); 2010 2011 /* 2012 * strings identical except trailing "s0" 2013 */ 2014 if (strcmp(&val[vlen - 2], "s0") == 0 && 2015 strncmp(srchval, val, slen) == 0) 2016 return (nv); 2017 2018 /* 2019 * strings identical except trailing "s0/old" 2020 */ 2021 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2022 strcmp(&srchval[slen - 4], "/old") == 0 && 2023 strncmp(srchval, val, slen - 4) == 0) 2024 return (nv); 2025 2026 break; 2027 } 2028 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2029 char *type, *idx, *end, *p; 2030 uint64_t id, vdev_id; 2031 2032 /* 2033 * Determine our vdev type, keeping in mind 2034 * that the srchval is composed of a type and 2035 * vdev id pair (i.e. mirror-4). 2036 */ 2037 if ((type = strdup(srchval)) == NULL) 2038 return (NULL); 2039 2040 if ((p = strrchr(type, '-')) == NULL) { 2041 free(type); 2042 break; 2043 } 2044 idx = p + 1; 2045 *p = '\0'; 2046 2047 /* 2048 * If the types don't match then keep looking. 2049 */ 2050 if (strncmp(val, type, strlen(val)) != 0) { 2051 free(type); 2052 break; 2053 } 2054 2055 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2056 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2057 strncmp(type, VDEV_TYPE_MIRROR, 2058 strlen(VDEV_TYPE_MIRROR)) == 0); 2059 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2060 &id) == 0); 2061 2062 errno = 0; 2063 vdev_id = strtoull(idx, &end, 10); 2064 2065 free(type); 2066 if (errno != 0) 2067 return (NULL); 2068 2069 /* 2070 * Now verify that we have the correct vdev id. 2071 */ 2072 if (vdev_id == id) 2073 return (nv); 2074 } 2075 2076 /* 2077 * Common case 2078 */ 2079 if (strcmp(srchval, val) == 0) 2080 return (nv); 2081 break; 2082 } 2083 2084 default: 2085 break; 2086 } 2087 2088 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2089 &child, &children) != 0) 2090 return (NULL); 2091 2092 for (c = 0; c < children; c++) { 2093 if ((ret = vdev_to_nvlist_iter(child[c], search, 2094 avail_spare, l2cache, NULL)) != NULL) { 2095 /* 2096 * The 'is_log' value is only set for the toplevel 2097 * vdev, not the leaf vdevs. So we always lookup the 2098 * log device from the root of the vdev tree (where 2099 * 'log' is non-NULL). 2100 */ 2101 if (log != NULL && 2102 nvlist_lookup_uint64(child[c], 2103 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2104 is_log) { 2105 *log = B_TRUE; 2106 } 2107 return (ret); 2108 } 2109 } 2110 2111 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2112 &child, &children) == 0) { 2113 for (c = 0; c < children; c++) { 2114 if ((ret = vdev_to_nvlist_iter(child[c], search, 2115 avail_spare, l2cache, NULL)) != NULL) { 2116 *avail_spare = B_TRUE; 2117 return (ret); 2118 } 2119 } 2120 } 2121 2122 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2123 &child, &children) == 0) { 2124 for (c = 0; c < children; c++) { 2125 if ((ret = vdev_to_nvlist_iter(child[c], search, 2126 avail_spare, l2cache, NULL)) != NULL) { 2127 *l2cache = B_TRUE; 2128 return (ret); 2129 } 2130 } 2131 } 2132 2133 return (NULL); 2134} 2135 2136/* 2137 * Given a physical path (minus the "/devices" prefix), find the 2138 * associated vdev. 2139 */ 2140nvlist_t * 2141zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2142 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2143{ 2144 nvlist_t *search, *nvroot, *ret; 2145 2146 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2147 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2148 2149 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2150 &nvroot) == 0); 2151 2152 *avail_spare = B_FALSE; 2153 *l2cache = B_FALSE; 2154 if (log != NULL) 2155 *log = B_FALSE; 2156 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2157 nvlist_free(search); 2158 2159 return (ret); 2160} 2161 2162/* 2163 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2164 */ 2165boolean_t 2166zpool_vdev_is_interior(const char *name) 2167{ 2168 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2169 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2170 return (B_TRUE); 2171 return (B_FALSE); 2172} 2173 2174nvlist_t * 2175zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2176 boolean_t *l2cache, boolean_t *log) 2177{ 2178 char buf[MAXPATHLEN]; 2179 char *end; 2180 nvlist_t *nvroot, *search, *ret; 2181 uint64_t guid; 2182 2183 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2184 2185 guid = strtoull(path, &end, 10); 2186 if (guid != 0 && *end == '\0') { 2187 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2188 } else if (zpool_vdev_is_interior(path)) { 2189 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2190 } else if (path[0] != '/') { 2191 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2192 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2193 } else { 2194 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2195 } 2196 2197 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2198 &nvroot) == 0); 2199 2200 *avail_spare = B_FALSE; 2201 *l2cache = B_FALSE; 2202 if (log != NULL) 2203 *log = B_FALSE; 2204 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2205 nvlist_free(search); 2206 2207 return (ret); 2208} 2209 2210static int 2211vdev_online(nvlist_t *nv) 2212{ 2213 uint64_t ival; 2214 2215 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2216 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2217 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2218 return (0); 2219 2220 return (1); 2221} 2222 2223/* 2224 * Helper function for zpool_get_physpaths(). 2225 */ 2226static int 2227vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2228 size_t *bytes_written) 2229{ 2230 size_t bytes_left, pos, rsz; 2231 char *tmppath; 2232 const char *format; 2233 2234 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2235 &tmppath) != 0) 2236 return (EZFS_NODEVICE); 2237 2238 pos = *bytes_written; 2239 bytes_left = physpath_size - pos; 2240 format = (pos == 0) ? "%s" : " %s"; 2241 2242 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2243 *bytes_written += rsz; 2244 2245 if (rsz >= bytes_left) { 2246 /* if physpath was not copied properly, clear it */ 2247 if (bytes_left != 0) { 2248 physpath[pos] = 0; 2249 } 2250 return (EZFS_NOSPC); 2251 } 2252 return (0); 2253} 2254 2255static int 2256vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2257 size_t *rsz, boolean_t is_spare) 2258{ 2259 char *type; 2260 int ret; 2261 2262 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2263 return (EZFS_INVALCONFIG); 2264 2265 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2266 /* 2267 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2268 * For a spare vdev, we only want to boot from the active 2269 * spare device. 2270 */ 2271 if (is_spare) { 2272 uint64_t spare = 0; 2273 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2274 &spare); 2275 if (!spare) 2276 return (EZFS_INVALCONFIG); 2277 } 2278 2279 if (vdev_online(nv)) { 2280 if ((ret = vdev_get_one_physpath(nv, physpath, 2281 phypath_size, rsz)) != 0) 2282 return (ret); 2283 } 2284 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2285 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2286 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2287 nvlist_t **child; 2288 uint_t count; 2289 int i, ret; 2290 2291 if (nvlist_lookup_nvlist_array(nv, 2292 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2293 return (EZFS_INVALCONFIG); 2294 2295 for (i = 0; i < count; i++) { 2296 ret = vdev_get_physpaths(child[i], physpath, 2297 phypath_size, rsz, is_spare); 2298 if (ret == EZFS_NOSPC) 2299 return (ret); 2300 } 2301 } 2302 2303 return (EZFS_POOL_INVALARG); 2304} 2305 2306/* 2307 * Get phys_path for a root pool config. 2308 * Return 0 on success; non-zero on failure. 2309 */ 2310static int 2311zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2312{ 2313 size_t rsz; 2314 nvlist_t *vdev_root; 2315 nvlist_t **child; 2316 uint_t count; 2317 char *type; 2318 2319 rsz = 0; 2320 2321 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2322 &vdev_root) != 0) 2323 return (EZFS_INVALCONFIG); 2324 2325 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2326 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2327 &child, &count) != 0) 2328 return (EZFS_INVALCONFIG); 2329 2330 /* 2331 * root pool can not have EFI labeled disks and can only have 2332 * a single top-level vdev. 2333 */ 2334 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2335 pool_uses_efi(vdev_root)) 2336 return (EZFS_POOL_INVALARG); 2337 2338 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2339 B_FALSE); 2340 2341 /* No online devices */ 2342 if (rsz == 0) 2343 return (EZFS_NODEVICE); 2344 2345 return (0); 2346} 2347 2348/* 2349 * Get phys_path for a root pool 2350 * Return 0 on success; non-zero on failure. 2351 */ 2352int 2353zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2354{ 2355 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2356 phypath_size)); 2357} 2358 2359/* 2360 * If the device has being dynamically expanded then we need to relabel 2361 * the disk to use the new unallocated space. 2362 */ 2363static int 2364zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2365{ 2366#ifdef sun 2367 char path[MAXPATHLEN]; 2368 char errbuf[1024]; 2369 int fd, error; 2370 int (*_efi_use_whole_disk)(int); 2371 2372 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2373 "efi_use_whole_disk")) == NULL) 2374 return (-1); 2375 2376 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2377 2378 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2379 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2380 "relabel '%s': unable to open device"), name); 2381 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2382 } 2383 2384 /* 2385 * It's possible that we might encounter an error if the device 2386 * does not have any unallocated space left. If so, we simply 2387 * ignore that error and continue on. 2388 */ 2389 error = _efi_use_whole_disk(fd); 2390 (void) close(fd); 2391 if (error && error != VT_ENOSPC) { 2392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2393 "relabel '%s': unable to read disk capacity"), name); 2394 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2395 } 2396#endif /* sun */ 2397 return (0); 2398} 2399 2400/* 2401 * Bring the specified vdev online. The 'flags' parameter is a set of the 2402 * ZFS_ONLINE_* flags. 2403 */ 2404int 2405zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2406 vdev_state_t *newstate) 2407{ 2408 zfs_cmd_t zc = { 0 }; 2409 char msg[1024]; 2410 nvlist_t *tgt; 2411 boolean_t avail_spare, l2cache, islog; 2412 libzfs_handle_t *hdl = zhp->zpool_hdl; 2413 2414 if (flags & ZFS_ONLINE_EXPAND) { 2415 (void) snprintf(msg, sizeof (msg), 2416 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2417 } else { 2418 (void) snprintf(msg, sizeof (msg), 2419 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2420 } 2421 2422 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2423 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2424 &islog)) == NULL) 2425 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2426 2427 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2428 2429 if (avail_spare) 2430 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2431 2432 if (flags & ZFS_ONLINE_EXPAND || 2433 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2434 char *pathname = NULL; 2435 uint64_t wholedisk = 0; 2436 2437 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2438 &wholedisk); 2439 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2440 &pathname) == 0); 2441 2442 /* 2443 * XXX - L2ARC 1.0 devices can't support expansion. 2444 */ 2445 if (l2cache) { 2446 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2447 "cannot expand cache devices")); 2448 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2449 } 2450 2451 if (wholedisk) { 2452 pathname += strlen(DISK_ROOT) + 1; 2453 (void) zpool_relabel_disk(hdl, pathname); 2454 } 2455 } 2456 2457 zc.zc_cookie = VDEV_STATE_ONLINE; 2458 zc.zc_obj = flags; 2459 2460 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2461 if (errno == EINVAL) { 2462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2463 "from this pool into a new one. Use '%s' " 2464 "instead"), "zpool detach"); 2465 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2466 } 2467 return (zpool_standard_error(hdl, errno, msg)); 2468 } 2469 2470 *newstate = zc.zc_cookie; 2471 return (0); 2472} 2473 2474/* 2475 * Take the specified vdev offline 2476 */ 2477int 2478zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2479{ 2480 zfs_cmd_t zc = { 0 }; 2481 char msg[1024]; 2482 nvlist_t *tgt; 2483 boolean_t avail_spare, l2cache; 2484 libzfs_handle_t *hdl = zhp->zpool_hdl; 2485 2486 (void) snprintf(msg, sizeof (msg), 2487 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2488 2489 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2490 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2491 NULL)) == NULL) 2492 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2493 2494 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2495 2496 if (avail_spare) 2497 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2498 2499 zc.zc_cookie = VDEV_STATE_OFFLINE; 2500 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2501 2502 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2503 return (0); 2504 2505 switch (errno) { 2506 case EBUSY: 2507 2508 /* 2509 * There are no other replicas of this device. 2510 */ 2511 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2512 2513 case EEXIST: 2514 /* 2515 * The log device has unplayed logs 2516 */ 2517 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2518 2519 default: 2520 return (zpool_standard_error(hdl, errno, msg)); 2521 } 2522} 2523 2524/* 2525 * Mark the given vdev faulted. 2526 */ 2527int 2528zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2529{ 2530 zfs_cmd_t zc = { 0 }; 2531 char msg[1024]; 2532 libzfs_handle_t *hdl = zhp->zpool_hdl; 2533 2534 (void) snprintf(msg, sizeof (msg), 2535 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2536 2537 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2538 zc.zc_guid = guid; 2539 zc.zc_cookie = VDEV_STATE_FAULTED; 2540 zc.zc_obj = aux; 2541 2542 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2543 return (0); 2544 2545 switch (errno) { 2546 case EBUSY: 2547 2548 /* 2549 * There are no other replicas of this device. 2550 */ 2551 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2552 2553 default: 2554 return (zpool_standard_error(hdl, errno, msg)); 2555 } 2556 2557} 2558 2559/* 2560 * Mark the given vdev degraded. 2561 */ 2562int 2563zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2564{ 2565 zfs_cmd_t zc = { 0 }; 2566 char msg[1024]; 2567 libzfs_handle_t *hdl = zhp->zpool_hdl; 2568 2569 (void) snprintf(msg, sizeof (msg), 2570 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2571 2572 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2573 zc.zc_guid = guid; 2574 zc.zc_cookie = VDEV_STATE_DEGRADED; 2575 zc.zc_obj = aux; 2576 2577 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2578 return (0); 2579 2580 return (zpool_standard_error(hdl, errno, msg)); 2581} 2582 2583/* 2584 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2585 * a hot spare. 2586 */ 2587static boolean_t 2588is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2589{ 2590 nvlist_t **child; 2591 uint_t c, children; 2592 char *type; 2593 2594 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2595 &children) == 0) { 2596 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2597 &type) == 0); 2598 2599 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2600 children == 2 && child[which] == tgt) 2601 return (B_TRUE); 2602 2603 for (c = 0; c < children; c++) 2604 if (is_replacing_spare(child[c], tgt, which)) 2605 return (B_TRUE); 2606 } 2607 2608 return (B_FALSE); 2609} 2610 2611/* 2612 * Attach new_disk (fully described by nvroot) to old_disk. 2613 * If 'replacing' is specified, the new disk will replace the old one. 2614 */ 2615int 2616zpool_vdev_attach(zpool_handle_t *zhp, 2617 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2618{ 2619 zfs_cmd_t zc = { 0 }; 2620 char msg[1024]; 2621 int ret; 2622 nvlist_t *tgt; 2623 boolean_t avail_spare, l2cache, islog; 2624 uint64_t val; 2625 char *newname; 2626 nvlist_t **child; 2627 uint_t children; 2628 nvlist_t *config_root; 2629 libzfs_handle_t *hdl = zhp->zpool_hdl; 2630 boolean_t rootpool = zpool_is_bootable(zhp); 2631 2632 if (replacing) 2633 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2634 "cannot replace %s with %s"), old_disk, new_disk); 2635 else 2636 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2637 "cannot attach %s to %s"), new_disk, old_disk); 2638 2639 /* 2640 * If this is a root pool, make sure that we're not attaching an 2641 * EFI labeled device. 2642 */ 2643 if (rootpool && pool_uses_efi(nvroot)) { 2644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2645 "EFI labeled devices are not supported on root pools.")); 2646 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2647 } 2648 2649 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2650 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2651 &islog)) == 0) 2652 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2653 2654 if (avail_spare) 2655 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2656 2657 if (l2cache) 2658 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2659 2660 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2661 zc.zc_cookie = replacing; 2662 2663 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2664 &child, &children) != 0 || children != 1) { 2665 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2666 "new device must be a single disk")); 2667 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2668 } 2669 2670 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2671 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2672 2673 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2674 return (-1); 2675 2676 /* 2677 * If the target is a hot spare that has been swapped in, we can only 2678 * replace it with another hot spare. 2679 */ 2680 if (replacing && 2681 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2682 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2683 NULL) == NULL || !avail_spare) && 2684 is_replacing_spare(config_root, tgt, 1)) { 2685 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2686 "can only be replaced by another hot spare")); 2687 free(newname); 2688 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2689 } 2690 2691 free(newname); 2692 2693 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2694 return (-1); 2695 2696 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2697 2698 zcmd_free_nvlists(&zc); 2699 2700 if (ret == 0) { 2701 if (rootpool) { 2702 /* 2703 * XXX need a better way to prevent user from 2704 * booting up a half-baked vdev. 2705 */ 2706 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2707 "sure to wait until resilver is done " 2708 "before rebooting.\n")); 2709 (void) fprintf(stderr, "\n"); 2710 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2711 "you boot from pool '%s', you may need to update\n" 2712 "boot code on newly attached disk '%s'.\n\n" 2713 "Assuming you use GPT partitioning and 'da0' is " 2714 "your new boot disk\n" 2715 "you may use the following command:\n\n" 2716 "\tgpart bootcode -b /boot/pmbr -p " 2717 "/boot/gptzfsboot -i 1 da0\n\n"), 2718 zhp->zpool_name, new_disk); 2719 } 2720 return (0); 2721 } 2722 2723 switch (errno) { 2724 case ENOTSUP: 2725 /* 2726 * Can't attach to or replace this type of vdev. 2727 */ 2728 if (replacing) { 2729 uint64_t version = zpool_get_prop_int(zhp, 2730 ZPOOL_PROP_VERSION, NULL); 2731 2732 if (islog) 2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2734 "cannot replace a log with a spare")); 2735 else if (version >= SPA_VERSION_MULTI_REPLACE) 2736 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2737 "already in replacing/spare config; wait " 2738 "for completion or use 'zpool detach'")); 2739 else 2740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2741 "cannot replace a replacing device")); 2742 } else { 2743 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2744 "can only attach to mirrors and top-level " 2745 "disks")); 2746 } 2747 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2748 break; 2749 2750 case EINVAL: 2751 /* 2752 * The new device must be a single disk. 2753 */ 2754 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2755 "new device must be a single disk")); 2756 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2757 break; 2758 2759 case EBUSY: 2760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2761 new_disk); 2762 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2763 break; 2764 2765 case EOVERFLOW: 2766 /* 2767 * The new device is too small. 2768 */ 2769 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2770 "device is too small")); 2771 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2772 break; 2773 2774 case EDOM: 2775 /* 2776 * The new device has a different alignment requirement. 2777 */ 2778 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2779 "devices have different sector alignment")); 2780 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2781 break; 2782 2783 case ENAMETOOLONG: 2784 /* 2785 * The resulting top-level vdev spec won't fit in the label. 2786 */ 2787 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2788 break; 2789 2790 default: 2791 (void) zpool_standard_error(hdl, errno, msg); 2792 } 2793 2794 return (-1); 2795} 2796 2797/* 2798 * Detach the specified device. 2799 */ 2800int 2801zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2802{ 2803 zfs_cmd_t zc = { 0 }; 2804 char msg[1024]; 2805 nvlist_t *tgt; 2806 boolean_t avail_spare, l2cache; 2807 libzfs_handle_t *hdl = zhp->zpool_hdl; 2808 2809 (void) snprintf(msg, sizeof (msg), 2810 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2811 2812 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2813 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2814 NULL)) == 0) 2815 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2816 2817 if (avail_spare) 2818 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2819 2820 if (l2cache) 2821 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2822 2823 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2824 2825 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2826 return (0); 2827 2828 switch (errno) { 2829 2830 case ENOTSUP: 2831 /* 2832 * Can't detach from this type of vdev. 2833 */ 2834 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2835 "applicable to mirror and replacing vdevs")); 2836 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2837 break; 2838 2839 case EBUSY: 2840 /* 2841 * There are no other replicas of this device. 2842 */ 2843 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2844 break; 2845 2846 default: 2847 (void) zpool_standard_error(hdl, errno, msg); 2848 } 2849 2850 return (-1); 2851} 2852 2853/* 2854 * Find a mirror vdev in the source nvlist. 2855 * 2856 * The mchild array contains a list of disks in one of the top-level mirrors 2857 * of the source pool. The schild array contains a list of disks that the 2858 * user specified on the command line. We loop over the mchild array to 2859 * see if any entry in the schild array matches. 2860 * 2861 * If a disk in the mchild array is found in the schild array, we return 2862 * the index of that entry. Otherwise we return -1. 2863 */ 2864static int 2865find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2866 nvlist_t **schild, uint_t schildren) 2867{ 2868 uint_t mc; 2869 2870 for (mc = 0; mc < mchildren; mc++) { 2871 uint_t sc; 2872 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2873 mchild[mc], B_FALSE); 2874 2875 for (sc = 0; sc < schildren; sc++) { 2876 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2877 schild[sc], B_FALSE); 2878 boolean_t result = (strcmp(mpath, spath) == 0); 2879 2880 free(spath); 2881 if (result) { 2882 free(mpath); 2883 return (mc); 2884 } 2885 } 2886 2887 free(mpath); 2888 } 2889 2890 return (-1); 2891} 2892 2893/* 2894 * Split a mirror pool. If newroot points to null, then a new nvlist 2895 * is generated and it is the responsibility of the caller to free it. 2896 */ 2897int 2898zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2899 nvlist_t *props, splitflags_t flags) 2900{ 2901 zfs_cmd_t zc = { 0 }; 2902 char msg[1024]; 2903 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2904 nvlist_t **varray = NULL, *zc_props = NULL; 2905 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2906 libzfs_handle_t *hdl = zhp->zpool_hdl; 2907 uint64_t vers; 2908 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2909 int retval = 0; 2910 2911 (void) snprintf(msg, sizeof (msg), 2912 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2913 2914 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2915 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2916 2917 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2918 (void) fprintf(stderr, gettext("Internal error: unable to " 2919 "retrieve pool configuration\n")); 2920 return (-1); 2921 } 2922 2923 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2924 == 0); 2925 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2926 2927 if (props) { 2928 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2929 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2930 props, vers, flags, msg)) == NULL) 2931 return (-1); 2932 } 2933 2934 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2935 &children) != 0) { 2936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2937 "Source pool is missing vdev tree")); 2938 if (zc_props) 2939 nvlist_free(zc_props); 2940 return (-1); 2941 } 2942 2943 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2944 vcount = 0; 2945 2946 if (*newroot == NULL || 2947 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2948 &newchild, &newchildren) != 0) 2949 newchildren = 0; 2950 2951 for (c = 0; c < children; c++) { 2952 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2953 char *type; 2954 nvlist_t **mchild, *vdev; 2955 uint_t mchildren; 2956 int entry; 2957 2958 /* 2959 * Unlike cache & spares, slogs are stored in the 2960 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2961 */ 2962 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2963 &is_log); 2964 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2965 &is_hole); 2966 if (is_log || is_hole) { 2967 /* 2968 * Create a hole vdev and put it in the config. 2969 */ 2970 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2971 goto out; 2972 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2973 VDEV_TYPE_HOLE) != 0) 2974 goto out; 2975 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2976 1) != 0) 2977 goto out; 2978 if (lastlog == 0) 2979 lastlog = vcount; 2980 varray[vcount++] = vdev; 2981 continue; 2982 } 2983 lastlog = 0; 2984 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2985 == 0); 2986 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2987 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2988 "Source pool must be composed only of mirrors\n")); 2989 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2990 goto out; 2991 } 2992 2993 verify(nvlist_lookup_nvlist_array(child[c], 2994 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2995 2996 /* find or add an entry for this top-level vdev */ 2997 if (newchildren > 0 && 2998 (entry = find_vdev_entry(zhp, mchild, mchildren, 2999 newchild, newchildren)) >= 0) { 3000 /* We found a disk that the user specified. */ 3001 vdev = mchild[entry]; 3002 ++found; 3003 } else { 3004 /* User didn't specify a disk for this vdev. */ 3005 vdev = mchild[mchildren - 1]; 3006 } 3007 3008 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3009 goto out; 3010 } 3011 3012 /* did we find every disk the user specified? */ 3013 if (found != newchildren) { 3014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3015 "include at most one disk from each mirror")); 3016 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3017 goto out; 3018 } 3019 3020 /* Prepare the nvlist for populating. */ 3021 if (*newroot == NULL) { 3022 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3023 goto out; 3024 freelist = B_TRUE; 3025 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3026 VDEV_TYPE_ROOT) != 0) 3027 goto out; 3028 } else { 3029 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3030 } 3031 3032 /* Add all the children we found */ 3033 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3034 lastlog == 0 ? vcount : lastlog) != 0) 3035 goto out; 3036 3037 /* 3038 * If we're just doing a dry run, exit now with success. 3039 */ 3040 if (flags.dryrun) { 3041 memory_err = B_FALSE; 3042 freelist = B_FALSE; 3043 goto out; 3044 } 3045 3046 /* now build up the config list & call the ioctl */ 3047 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3048 goto out; 3049 3050 if (nvlist_add_nvlist(newconfig, 3051 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3052 nvlist_add_string(newconfig, 3053 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3054 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3055 goto out; 3056 3057 /* 3058 * The new pool is automatically part of the namespace unless we 3059 * explicitly export it. 3060 */ 3061 if (!flags.import) 3062 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3063 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3064 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3065 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3066 goto out; 3067 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3068 goto out; 3069 3070 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3071 retval = zpool_standard_error(hdl, errno, msg); 3072 goto out; 3073 } 3074 3075 freelist = B_FALSE; 3076 memory_err = B_FALSE; 3077 3078out: 3079 if (varray != NULL) { 3080 int v; 3081 3082 for (v = 0; v < vcount; v++) 3083 nvlist_free(varray[v]); 3084 free(varray); 3085 } 3086 zcmd_free_nvlists(&zc); 3087 if (zc_props) 3088 nvlist_free(zc_props); 3089 if (newconfig) 3090 nvlist_free(newconfig); 3091 if (freelist) { 3092 nvlist_free(*newroot); 3093 *newroot = NULL; 3094 } 3095 3096 if (retval != 0) 3097 return (retval); 3098 3099 if (memory_err) 3100 return (no_memory(hdl)); 3101 3102 return (0); 3103} 3104 3105/* 3106 * Remove the given device. Currently, this is supported only for hot spares 3107 * and level 2 cache devices. 3108 */ 3109int 3110zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3111{ 3112 zfs_cmd_t zc = { 0 }; 3113 char msg[1024]; 3114 nvlist_t *tgt; 3115 boolean_t avail_spare, l2cache, islog; 3116 libzfs_handle_t *hdl = zhp->zpool_hdl; 3117 uint64_t version; 3118 3119 (void) snprintf(msg, sizeof (msg), 3120 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3121 3122 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3123 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3124 &islog)) == 0) 3125 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3126 /* 3127 * XXX - this should just go away. 3128 */ 3129 if (!avail_spare && !l2cache && !islog) { 3130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3131 "only inactive hot spares, cache, top-level, " 3132 "or log devices can be removed")); 3133 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3134 } 3135 3136 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3137 if (islog && version < SPA_VERSION_HOLES) { 3138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3139 "pool must be upgrade to support log removal")); 3140 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3141 } 3142 3143 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3144 3145 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3146 return (0); 3147 3148 return (zpool_standard_error(hdl, errno, msg)); 3149} 3150 3151/* 3152 * Clear the errors for the pool, or the particular device if specified. 3153 */ 3154int 3155zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3156{ 3157 zfs_cmd_t zc = { 0 }; 3158 char msg[1024]; 3159 nvlist_t *tgt; 3160 zpool_rewind_policy_t policy; 3161 boolean_t avail_spare, l2cache; 3162 libzfs_handle_t *hdl = zhp->zpool_hdl; 3163 nvlist_t *nvi = NULL; 3164 int error; 3165 3166 if (path) 3167 (void) snprintf(msg, sizeof (msg), 3168 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3169 path); 3170 else 3171 (void) snprintf(msg, sizeof (msg), 3172 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3173 zhp->zpool_name); 3174 3175 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3176 if (path) { 3177 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3178 &l2cache, NULL)) == 0) 3179 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3180 3181 /* 3182 * Don't allow error clearing for hot spares. Do allow 3183 * error clearing for l2cache devices. 3184 */ 3185 if (avail_spare) 3186 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3187 3188 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3189 &zc.zc_guid) == 0); 3190 } 3191 3192 zpool_get_rewind_policy(rewindnvl, &policy); 3193 zc.zc_cookie = policy.zrp_request; 3194 3195 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3196 return (-1); 3197 3198 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3199 return (-1); 3200 3201 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3202 errno == ENOMEM) { 3203 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3204 zcmd_free_nvlists(&zc); 3205 return (-1); 3206 } 3207 } 3208 3209 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3210 errno != EPERM && errno != EACCES)) { 3211 if (policy.zrp_request & 3212 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3213 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3214 zpool_rewind_exclaim(hdl, zc.zc_name, 3215 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3216 nvi); 3217 nvlist_free(nvi); 3218 } 3219 zcmd_free_nvlists(&zc); 3220 return (0); 3221 } 3222 3223 zcmd_free_nvlists(&zc); 3224 return (zpool_standard_error(hdl, errno, msg)); 3225} 3226 3227/* 3228 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3229 */ 3230int 3231zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3232{ 3233 zfs_cmd_t zc = { 0 }; 3234 char msg[1024]; 3235 libzfs_handle_t *hdl = zhp->zpool_hdl; 3236 3237 (void) snprintf(msg, sizeof (msg), 3238 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3239 guid); 3240 3241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3242 zc.zc_guid = guid; 3243 zc.zc_cookie = ZPOOL_NO_REWIND; 3244 3245 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3246 return (0); 3247 3248 return (zpool_standard_error(hdl, errno, msg)); 3249} 3250 3251/* 3252 * Change the GUID for a pool. 3253 */ 3254int 3255zpool_reguid(zpool_handle_t *zhp) 3256{ 3257 char msg[1024]; 3258 libzfs_handle_t *hdl = zhp->zpool_hdl; 3259 zfs_cmd_t zc = { 0 }; 3260 3261 (void) snprintf(msg, sizeof (msg), 3262 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3263 3264 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3265 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3266 return (0); 3267 3268 return (zpool_standard_error(hdl, errno, msg)); 3269} 3270 3271/* 3272 * Reopen the pool. 3273 */ 3274int 3275zpool_reopen(zpool_handle_t *zhp) 3276{ 3277 zfs_cmd_t zc = { 0 }; 3278 char msg[1024]; 3279 libzfs_handle_t *hdl = zhp->zpool_hdl; 3280 3281 (void) snprintf(msg, sizeof (msg), 3282 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3283 zhp->zpool_name); 3284 3285 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3286 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3287 return (0); 3288 return (zpool_standard_error(hdl, errno, msg)); 3289} 3290 3291/* 3292 * Convert from a devid string to a path. 3293 */ 3294static char * 3295devid_to_path(char *devid_str) 3296{ 3297 ddi_devid_t devid; 3298 char *minor; 3299 char *path; 3300 devid_nmlist_t *list = NULL; 3301 int ret; 3302 3303 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3304 return (NULL); 3305 3306 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3307 3308 devid_str_free(minor); 3309 devid_free(devid); 3310 3311 if (ret != 0) 3312 return (NULL); 3313 3314 if ((path = strdup(list[0].devname)) == NULL) 3315 return (NULL); 3316 3317 devid_free_nmlist(list); 3318 3319 return (path); 3320} 3321 3322/* 3323 * Convert from a path to a devid string. 3324 */ 3325static char * 3326path_to_devid(const char *path) 3327{ 3328#ifdef have_devid 3329 int fd; 3330 ddi_devid_t devid; 3331 char *minor, *ret; 3332 3333 if ((fd = open(path, O_RDONLY)) < 0) 3334 return (NULL); 3335 3336 minor = NULL; 3337 ret = NULL; 3338 if (devid_get(fd, &devid) == 0) { 3339 if (devid_get_minor_name(fd, &minor) == 0) 3340 ret = devid_str_encode(devid, minor); 3341 if (minor != NULL) 3342 devid_str_free(minor); 3343 devid_free(devid); 3344 } 3345 (void) close(fd); 3346 3347 return (ret); 3348#else 3349 return (NULL); 3350#endif 3351} 3352 3353/* 3354 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3355 * ignore any failure here, since a common case is for an unprivileged user to 3356 * type 'zpool status', and we'll display the correct information anyway. 3357 */ 3358static void 3359set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3360{ 3361 zfs_cmd_t zc = { 0 }; 3362 3363 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3364 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3365 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3366 &zc.zc_guid) == 0); 3367 3368 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3369} 3370 3371/* 3372 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3373 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3374 * We also check if this is a whole disk, in which case we strip off the 3375 * trailing 's0' slice name. 3376 * 3377 * This routine is also responsible for identifying when disks have been 3378 * reconfigured in a new location. The kernel will have opened the device by 3379 * devid, but the path will still refer to the old location. To catch this, we 3380 * first do a path -> devid translation (which is fast for the common case). If 3381 * the devid matches, we're done. If not, we do a reverse devid -> path 3382 * translation and issue the appropriate ioctl() to update the path of the vdev. 3383 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3384 * of these checks. 3385 */ 3386char * 3387zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3388 boolean_t verbose) 3389{ 3390 char *path, *devid; 3391 uint64_t value; 3392 char buf[64]; 3393 vdev_stat_t *vs; 3394 uint_t vsc; 3395 int have_stats; 3396 int have_path; 3397 3398 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3399 (uint64_t **)&vs, &vsc) == 0; 3400 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3401 3402 /* 3403 * If the device is not currently present, assume it will not 3404 * come back at the same device path. Display the device by GUID. 3405 */ 3406 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3407 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3408 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3409 &value) == 0); 3410 (void) snprintf(buf, sizeof (buf), "%llu", 3411 (u_longlong_t)value); 3412 path = buf; 3413 } else if (have_path) { 3414 3415 /* 3416 * If the device is dead (faulted, offline, etc) then don't 3417 * bother opening it. Otherwise we may be forcing the user to 3418 * open a misbehaving device, which can have undesirable 3419 * effects. 3420 */ 3421 if ((have_stats == 0 || 3422 vs->vs_state >= VDEV_STATE_DEGRADED) && 3423 zhp != NULL && 3424 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3425 /* 3426 * Determine if the current path is correct. 3427 */ 3428 char *newdevid = path_to_devid(path); 3429 3430 if (newdevid == NULL || 3431 strcmp(devid, newdevid) != 0) { 3432 char *newpath; 3433 3434 if ((newpath = devid_to_path(devid)) != NULL) { 3435 /* 3436 * Update the path appropriately. 3437 */ 3438 set_path(zhp, nv, newpath); 3439 if (nvlist_add_string(nv, 3440 ZPOOL_CONFIG_PATH, newpath) == 0) 3441 verify(nvlist_lookup_string(nv, 3442 ZPOOL_CONFIG_PATH, 3443 &path) == 0); 3444 free(newpath); 3445 } 3446 } 3447 3448 if (newdevid) 3449 devid_str_free(newdevid); 3450 } 3451 3452#ifdef sun 3453 if (strncmp(path, "/dev/dsk/", 9) == 0) 3454 path += 9; 3455 3456 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3457 &value) == 0 && value) { 3458 int pathlen = strlen(path); 3459 char *tmp = zfs_strdup(hdl, path); 3460 3461 /* 3462 * If it starts with c#, and ends with "s0", chop 3463 * the "s0" off, or if it ends with "s0/old", remove 3464 * the "s0" from the middle. 3465 */ 3466 if (CTD_CHECK(tmp)) { 3467 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3468 tmp[pathlen - 2] = '\0'; 3469 } else if (pathlen > 6 && 3470 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3471 (void) strcpy(&tmp[pathlen - 6], 3472 "/old"); 3473 } 3474 } 3475 return (tmp); 3476 } 3477#else /* !sun */ 3478 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3479 path += sizeof(_PATH_DEV) - 1; 3480#endif /* !sun */ 3481 } else { 3482 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3483 3484 /* 3485 * If it's a raidz device, we need to stick in the parity level. 3486 */ 3487 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3488 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3489 &value) == 0); 3490 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3491 (u_longlong_t)value); 3492 path = buf; 3493 } 3494 3495 /* 3496 * We identify each top-level vdev by using a <type-id> 3497 * naming convention. 3498 */ 3499 if (verbose) { 3500 uint64_t id; 3501 3502 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3503 &id) == 0); 3504 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3505 (u_longlong_t)id); 3506 path = buf; 3507 } 3508 } 3509 3510 return (zfs_strdup(hdl, path)); 3511} 3512 3513static int 3514zbookmark_compare(const void *a, const void *b) 3515{ 3516 return (memcmp(a, b, sizeof (zbookmark_t))); 3517} 3518 3519/* 3520 * Retrieve the persistent error log, uniquify the members, and return to the 3521 * caller. 3522 */ 3523int 3524zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3525{ 3526 zfs_cmd_t zc = { 0 }; 3527 uint64_t count; 3528 zbookmark_t *zb = NULL; 3529 int i; 3530 3531 /* 3532 * Retrieve the raw error list from the kernel. If the number of errors 3533 * has increased, allocate more space and continue until we get the 3534 * entire list. 3535 */ 3536 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3537 &count) == 0); 3538 if (count == 0) 3539 return (0); 3540 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3541 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3542 return (-1); 3543 zc.zc_nvlist_dst_size = count; 3544 (void) strcpy(zc.zc_name, zhp->zpool_name); 3545 for (;;) { 3546 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3547 &zc) != 0) { 3548 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3549 if (errno == ENOMEM) { 3550 count = zc.zc_nvlist_dst_size; 3551 if ((zc.zc_nvlist_dst = (uintptr_t) 3552 zfs_alloc(zhp->zpool_hdl, count * 3553 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3554 return (-1); 3555 } else { 3556 return (-1); 3557 } 3558 } else { 3559 break; 3560 } 3561 } 3562 3563 /* 3564 * Sort the resulting bookmarks. This is a little confusing due to the 3565 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3566 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3567 * _not_ copied as part of the process. So we point the start of our 3568 * array appropriate and decrement the total number of elements. 3569 */ 3570 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3571 zc.zc_nvlist_dst_size; 3572 count -= zc.zc_nvlist_dst_size; 3573 3574 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3575 3576 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3577 3578 /* 3579 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3580 */ 3581 for (i = 0; i < count; i++) { 3582 nvlist_t *nv; 3583 3584 /* ignoring zb_blkid and zb_level for now */ 3585 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3586 zb[i-1].zb_object == zb[i].zb_object) 3587 continue; 3588 3589 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3590 goto nomem; 3591 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3592 zb[i].zb_objset) != 0) { 3593 nvlist_free(nv); 3594 goto nomem; 3595 } 3596 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3597 zb[i].zb_object) != 0) { 3598 nvlist_free(nv); 3599 goto nomem; 3600 } 3601 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3602 nvlist_free(nv); 3603 goto nomem; 3604 } 3605 nvlist_free(nv); 3606 } 3607 3608 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3609 return (0); 3610 3611nomem: 3612 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3613 return (no_memory(zhp->zpool_hdl)); 3614} 3615 3616/* 3617 * Upgrade a ZFS pool to the latest on-disk version. 3618 */ 3619int 3620zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3621{ 3622 zfs_cmd_t zc = { 0 }; 3623 libzfs_handle_t *hdl = zhp->zpool_hdl; 3624 3625 (void) strcpy(zc.zc_name, zhp->zpool_name); 3626 zc.zc_cookie = new_version; 3627 3628 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3629 return (zpool_standard_error_fmt(hdl, errno, 3630 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3631 zhp->zpool_name)); 3632 return (0); 3633} 3634 3635void 3636zfs_save_arguments(int argc, char **argv, char *string, int len) 3637{ 3638 (void) strlcpy(string, basename(argv[0]), len); 3639 for (int i = 1; i < argc; i++) { 3640 (void) strlcat(string, " ", len); 3641 (void) strlcat(string, argv[i], len); 3642 } 3643} 3644 3645int 3646zpool_log_history(libzfs_handle_t *hdl, const char *message) 3647{ 3648 zfs_cmd_t zc = { 0 }; 3649 nvlist_t *args; 3650 int err; 3651 3652 args = fnvlist_alloc(); 3653 fnvlist_add_string(args, "message", message); 3654 err = zcmd_write_src_nvlist(hdl, &zc, args); 3655 if (err == 0) 3656 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3657 nvlist_free(args); 3658 zcmd_free_nvlists(&zc); 3659 return (err); 3660} 3661 3662/* 3663 * Perform ioctl to get some command history of a pool. 3664 * 3665 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3666 * logical offset of the history buffer to start reading from. 3667 * 3668 * Upon return, 'off' is the next logical offset to read from and 3669 * 'len' is the actual amount of bytes read into 'buf'. 3670 */ 3671static int 3672get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3673{ 3674 zfs_cmd_t zc = { 0 }; 3675 libzfs_handle_t *hdl = zhp->zpool_hdl; 3676 3677 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3678 3679 zc.zc_history = (uint64_t)(uintptr_t)buf; 3680 zc.zc_history_len = *len; 3681 zc.zc_history_offset = *off; 3682 3683 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3684 switch (errno) { 3685 case EPERM: 3686 return (zfs_error_fmt(hdl, EZFS_PERM, 3687 dgettext(TEXT_DOMAIN, 3688 "cannot show history for pool '%s'"), 3689 zhp->zpool_name)); 3690 case ENOENT: 3691 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3692 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3693 "'%s'"), zhp->zpool_name)); 3694 case ENOTSUP: 3695 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3696 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3697 "'%s', pool must be upgraded"), zhp->zpool_name)); 3698 default: 3699 return (zpool_standard_error_fmt(hdl, errno, 3700 dgettext(TEXT_DOMAIN, 3701 "cannot get history for '%s'"), zhp->zpool_name)); 3702 } 3703 } 3704 3705 *len = zc.zc_history_len; 3706 *off = zc.zc_history_offset; 3707 3708 return (0); 3709} 3710 3711/* 3712 * Process the buffer of nvlists, unpacking and storing each nvlist record 3713 * into 'records'. 'leftover' is set to the number of bytes that weren't 3714 * processed as there wasn't a complete record. 3715 */ 3716int 3717zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3718 nvlist_t ***records, uint_t *numrecords) 3719{ 3720 uint64_t reclen; 3721 nvlist_t *nv; 3722 int i; 3723 3724 while (bytes_read > sizeof (reclen)) { 3725 3726 /* get length of packed record (stored as little endian) */ 3727 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3728 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3729 3730 if (bytes_read < sizeof (reclen) + reclen) 3731 break; 3732 3733 /* unpack record */ 3734 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3735 return (ENOMEM); 3736 bytes_read -= sizeof (reclen) + reclen; 3737 buf += sizeof (reclen) + reclen; 3738 3739 /* add record to nvlist array */ 3740 (*numrecords)++; 3741 if (ISP2(*numrecords + 1)) { 3742 *records = realloc(*records, 3743 *numrecords * 2 * sizeof (nvlist_t *)); 3744 } 3745 (*records)[*numrecords - 1] = nv; 3746 } 3747 3748 *leftover = bytes_read; 3749 return (0); 3750} 3751 3752/* from spa_history.c: spa_history_create_obj() */ 3753#define HIS_BUF_LEN_DEF (128 << 10) 3754#define HIS_BUF_LEN_MAX (1 << 30) 3755 3756/* 3757 * Retrieve the command history of a pool. 3758 */ 3759int 3760zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3761{ 3762 char *buf = NULL; 3763 uint64_t bufsize = HIS_BUF_LEN_DEF; 3764 uint64_t off = 0; 3765 nvlist_t **records = NULL; 3766 uint_t numrecords = 0; 3767 int err, i; 3768 3769 if ((buf = malloc(bufsize)) == NULL) 3770 return (ENOMEM); 3771 do { 3772 uint64_t bytes_read = bufsize; 3773 uint64_t leftover; 3774 3775 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3776 break; 3777 3778 /* if nothing else was read in, we're at EOF, just return */ 3779 if (bytes_read == 0) 3780 break; 3781 3782 if ((err = zpool_history_unpack(buf, bytes_read, 3783 &leftover, &records, &numrecords)) != 0) 3784 break; 3785 off -= leftover; 3786 3787 /* 3788 * If the history block is too big, double the buffer 3789 * size and try again. 3790 */ 3791 if (leftover == bytes_read) { 3792 free(buf); 3793 buf = NULL; 3794 3795 bufsize <<= 1; 3796 if ((bufsize >= HIS_BUF_LEN_MAX) || 3797 ((buf = malloc(bufsize)) == NULL)) { 3798 err = ENOMEM; 3799 break; 3800 } 3801 } 3802 3803 /* CONSTCOND */ 3804 } while (1); 3805 free(buf); 3806 3807 if (!err) { 3808 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3809 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3810 records, numrecords) == 0); 3811 } 3812 for (i = 0; i < numrecords; i++) 3813 nvlist_free(records[i]); 3814 free(records); 3815 3816 return (err); 3817} 3818 3819void 3820zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3821 char *pathname, size_t len) 3822{ 3823 zfs_cmd_t zc = { 0 }; 3824 boolean_t mounted = B_FALSE; 3825 char *mntpnt = NULL; 3826 char dsname[MAXNAMELEN]; 3827 3828 if (dsobj == 0) { 3829 /* special case for the MOS */ 3830 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3831 return; 3832 } 3833 3834 /* get the dataset's name */ 3835 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3836 zc.zc_obj = dsobj; 3837 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3838 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3839 /* just write out a path of two object numbers */ 3840 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3841 dsobj, obj); 3842 return; 3843 } 3844 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3845 3846 /* find out if the dataset is mounted */ 3847 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3848 3849 /* get the corrupted object's path */ 3850 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3851 zc.zc_obj = obj; 3852 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3853 &zc) == 0) { 3854 if (mounted) { 3855 (void) snprintf(pathname, len, "%s%s", mntpnt, 3856 zc.zc_value); 3857 } else { 3858 (void) snprintf(pathname, len, "%s:%s", 3859 dsname, zc.zc_value); 3860 } 3861 } else { 3862 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3863 } 3864 free(mntpnt); 3865} 3866 3867#ifdef sun 3868/* 3869 * Read the EFI label from the config, if a label does not exist then 3870 * pass back the error to the caller. If the caller has passed a non-NULL 3871 * diskaddr argument then we set it to the starting address of the EFI 3872 * partition. 3873 */ 3874static int 3875read_efi_label(nvlist_t *config, diskaddr_t *sb) 3876{ 3877 char *path; 3878 int fd; 3879 char diskname[MAXPATHLEN]; 3880 int err = -1; 3881 3882 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3883 return (err); 3884 3885 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3886 strrchr(path, '/')); 3887 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3888 struct dk_gpt *vtoc; 3889 3890 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3891 if (sb != NULL) 3892 *sb = vtoc->efi_parts[0].p_start; 3893 efi_free(vtoc); 3894 } 3895 (void) close(fd); 3896 } 3897 return (err); 3898} 3899 3900/* 3901 * determine where a partition starts on a disk in the current 3902 * configuration 3903 */ 3904static diskaddr_t 3905find_start_block(nvlist_t *config) 3906{ 3907 nvlist_t **child; 3908 uint_t c, children; 3909 diskaddr_t sb = MAXOFFSET_T; 3910 uint64_t wholedisk; 3911 3912 if (nvlist_lookup_nvlist_array(config, 3913 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3914 if (nvlist_lookup_uint64(config, 3915 ZPOOL_CONFIG_WHOLE_DISK, 3916 &wholedisk) != 0 || !wholedisk) { 3917 return (MAXOFFSET_T); 3918 } 3919 if (read_efi_label(config, &sb) < 0) 3920 sb = MAXOFFSET_T; 3921 return (sb); 3922 } 3923 3924 for (c = 0; c < children; c++) { 3925 sb = find_start_block(child[c]); 3926 if (sb != MAXOFFSET_T) { 3927 return (sb); 3928 } 3929 } 3930 return (MAXOFFSET_T); 3931} 3932#endif /* sun */ 3933 3934/* 3935 * Label an individual disk. The name provided is the short name, 3936 * stripped of any leading /dev path. 3937 */ 3938int 3939zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3940{ 3941#ifdef sun 3942 char path[MAXPATHLEN]; 3943 struct dk_gpt *vtoc; 3944 int fd; 3945 size_t resv = EFI_MIN_RESV_SIZE; 3946 uint64_t slice_size; 3947 diskaddr_t start_block; 3948 char errbuf[1024]; 3949 3950 /* prepare an error message just in case */ 3951 (void) snprintf(errbuf, sizeof (errbuf), 3952 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3953 3954 if (zhp) { 3955 nvlist_t *nvroot; 3956 3957 if (zpool_is_bootable(zhp)) { 3958 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3959 "EFI labeled devices are not supported on root " 3960 "pools.")); 3961 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3962 } 3963 3964 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3965 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3966 3967 if (zhp->zpool_start_block == 0) 3968 start_block = find_start_block(nvroot); 3969 else 3970 start_block = zhp->zpool_start_block; 3971 zhp->zpool_start_block = start_block; 3972 } else { 3973 /* new pool */ 3974 start_block = NEW_START_BLOCK; 3975 } 3976 3977 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3978 BACKUP_SLICE); 3979 3980 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3981 /* 3982 * This shouldn't happen. We've long since verified that this 3983 * is a valid device. 3984 */ 3985 zfs_error_aux(hdl, 3986 dgettext(TEXT_DOMAIN, "unable to open device")); 3987 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3988 } 3989 3990 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3991 /* 3992 * The only way this can fail is if we run out of memory, or we 3993 * were unable to read the disk's capacity 3994 */ 3995 if (errno == ENOMEM) 3996 (void) no_memory(hdl); 3997 3998 (void) close(fd); 3999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4000 "unable to read disk capacity"), name); 4001 4002 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 4003 } 4004 4005 slice_size = vtoc->efi_last_u_lba + 1; 4006 slice_size -= EFI_MIN_RESV_SIZE; 4007 if (start_block == MAXOFFSET_T) 4008 start_block = NEW_START_BLOCK; 4009 slice_size -= start_block; 4010 4011 vtoc->efi_parts[0].p_start = start_block; 4012 vtoc->efi_parts[0].p_size = slice_size; 4013 4014 /* 4015 * Why we use V_USR: V_BACKUP confuses users, and is considered 4016 * disposable by some EFI utilities (since EFI doesn't have a backup 4017 * slice). V_UNASSIGNED is supposed to be used only for zero size 4018 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4019 * etc. were all pretty specific. V_USR is as close to reality as we 4020 * can get, in the absence of V_OTHER. 4021 */ 4022 vtoc->efi_parts[0].p_tag = V_USR; 4023 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4024 4025 vtoc->efi_parts[8].p_start = slice_size + start_block; 4026 vtoc->efi_parts[8].p_size = resv; 4027 vtoc->efi_parts[8].p_tag = V_RESERVED; 4028 4029 if (efi_write(fd, vtoc) != 0) { 4030 /* 4031 * Some block drivers (like pcata) may not support EFI 4032 * GPT labels. Print out a helpful error message dir- 4033 * ecting the user to manually label the disk and give 4034 * a specific slice. 4035 */ 4036 (void) close(fd); 4037 efi_free(vtoc); 4038 4039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4040 "try using fdisk(1M) and then provide a specific slice")); 4041 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4042 } 4043 4044 (void) close(fd); 4045 efi_free(vtoc); 4046#endif /* sun */ 4047 return (0); 4048} 4049 4050static boolean_t 4051supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4052{ 4053 char *type; 4054 nvlist_t **child; 4055 uint_t children, c; 4056 4057 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4058 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4059 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4060 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4061 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4062 "vdev type '%s' is not supported"), type); 4063 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4064 return (B_FALSE); 4065 } 4066 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4067 &child, &children) == 0) { 4068 for (c = 0; c < children; c++) { 4069 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4070 return (B_FALSE); 4071 } 4072 } 4073 return (B_TRUE); 4074} 4075 4076/* 4077 * Check if this zvol is allowable for use as a dump device; zero if 4078 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4079 * 4080 * Allowable storage configurations include mirrors, all raidz variants, and 4081 * pools with log, cache, and spare devices. Pools which are backed by files or 4082 * have missing/hole vdevs are not suitable. 4083 */ 4084int 4085zvol_check_dump_config(char *arg) 4086{ 4087 zpool_handle_t *zhp = NULL; 4088 nvlist_t *config, *nvroot; 4089 char *p, *volname; 4090 nvlist_t **top; 4091 uint_t toplevels; 4092 libzfs_handle_t *hdl; 4093 char errbuf[1024]; 4094 char poolname[ZPOOL_MAXNAMELEN]; 4095 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4096 int ret = 1; 4097 4098 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4099 return (-1); 4100 } 4101 4102 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4103 "dump is not supported on device '%s'"), arg); 4104 4105 if ((hdl = libzfs_init()) == NULL) 4106 return (1); 4107 libzfs_print_on_error(hdl, B_TRUE); 4108 4109 volname = arg + pathlen; 4110 4111 /* check the configuration of the pool */ 4112 if ((p = strchr(volname, '/')) == NULL) { 4113 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4114 "malformed dataset name")); 4115 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4116 return (1); 4117 } else if (p - volname >= ZFS_MAXNAMELEN) { 4118 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4119 "dataset name is too long")); 4120 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4121 return (1); 4122 } else { 4123 (void) strncpy(poolname, volname, p - volname); 4124 poolname[p - volname] = '\0'; 4125 } 4126 4127 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4128 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4129 "could not open pool '%s'"), poolname); 4130 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4131 goto out; 4132 } 4133 config = zpool_get_config(zhp, NULL); 4134 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4135 &nvroot) != 0) { 4136 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4137 "could not obtain vdev configuration for '%s'"), poolname); 4138 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4139 goto out; 4140 } 4141 4142 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4143 &top, &toplevels) == 0); 4144 4145 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4146 goto out; 4147 } 4148 ret = 0; 4149 4150out: 4151 if (zhp) 4152 zpool_close(zhp); 4153 libzfs_fini(hdl); 4154 return (ret); 4155} 4156