libzfs_pool.c revision 265039
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2013 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29#include <sys/types.h> 30#include <sys/stat.h> 31#include <ctype.h> 32#include <errno.h> 33#include <devid.h> 34#include <fcntl.h> 35#include <libintl.h> 36#include <stdio.h> 37#include <stdlib.h> 38#include <strings.h> 39#include <unistd.h> 40#include <libgen.h> 41#include <sys/zfs_ioctl.h> 42#include <dlfcn.h> 43 44#include "zfs_namecheck.h" 45#include "zfs_prop.h" 46#include "libzfs_impl.h" 47#include "zfs_comutil.h" 48#include "zfeature_common.h" 49 50static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52#define DISK_ROOT "/dev/dsk" 53#define RDISK_ROOT "/dev/rdsk" 54#define BACKUP_SLICE "s2" 55 56typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59} prop_flags_t; 60 61/* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67static int 68zpool_get_all_props(zpool_handle_t *zhp) 69{ 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98} 99 100static int 101zpool_props_refresh(zpool_handle_t *zhp) 102{ 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112} 113 114static char * 115zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117{ 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138} 139 140uint64_t 141zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142{ 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177} 178 179/* 180 * Map VDEV STATE to printed strings. 181 */ 182const char * 183zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184{ 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207} 208 209/* 210 * Map POOL STATE to printed strings. 211 */ 212const char * 213zpool_pool_state_to_name(pool_state_t state) 214{ 215 switch (state) { 216 case POOL_STATE_ACTIVE: 217 return (gettext("ACTIVE")); 218 case POOL_STATE_EXPORTED: 219 return (gettext("EXPORTED")); 220 case POOL_STATE_DESTROYED: 221 return (gettext("DESTROYED")); 222 case POOL_STATE_SPARE: 223 return (gettext("SPARE")); 224 case POOL_STATE_L2CACHE: 225 return (gettext("L2CACHE")); 226 case POOL_STATE_UNINITIALIZED: 227 return (gettext("UNINITIALIZED")); 228 case POOL_STATE_UNAVAIL: 229 return (gettext("UNAVAIL")); 230 case POOL_STATE_POTENTIALLY_ACTIVE: 231 return (gettext("POTENTIALLY_ACTIVE")); 232 } 233 234 return (gettext("UNKNOWN")); 235} 236 237/* 238 * Get a zpool property value for 'prop' and return the value in 239 * a pre-allocated buffer. 240 */ 241int 242zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 243 zprop_source_t *srctype, boolean_t literal) 244{ 245 uint64_t intval; 246 const char *strval; 247 zprop_source_t src = ZPROP_SRC_NONE; 248 nvlist_t *nvroot; 249 vdev_stat_t *vs; 250 uint_t vsc; 251 252 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 253 switch (prop) { 254 case ZPOOL_PROP_NAME: 255 (void) strlcpy(buf, zpool_get_name(zhp), len); 256 break; 257 258 case ZPOOL_PROP_HEALTH: 259 (void) strlcpy(buf, "FAULTED", len); 260 break; 261 262 case ZPOOL_PROP_GUID: 263 intval = zpool_get_prop_int(zhp, prop, &src); 264 (void) snprintf(buf, len, "%llu", intval); 265 break; 266 267 case ZPOOL_PROP_ALTROOT: 268 case ZPOOL_PROP_CACHEFILE: 269 case ZPOOL_PROP_COMMENT: 270 if (zhp->zpool_props != NULL || 271 zpool_get_all_props(zhp) == 0) { 272 (void) strlcpy(buf, 273 zpool_get_prop_string(zhp, prop, &src), 274 len); 275 break; 276 } 277 /* FALLTHROUGH */ 278 default: 279 (void) strlcpy(buf, "-", len); 280 break; 281 } 282 283 if (srctype != NULL) 284 *srctype = src; 285 return (0); 286 } 287 288 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 289 prop != ZPOOL_PROP_NAME) 290 return (-1); 291 292 switch (zpool_prop_get_type(prop)) { 293 case PROP_TYPE_STRING: 294 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 295 len); 296 break; 297 298 case PROP_TYPE_NUMBER: 299 intval = zpool_get_prop_int(zhp, prop, &src); 300 301 switch (prop) { 302 case ZPOOL_PROP_SIZE: 303 case ZPOOL_PROP_ALLOCATED: 304 case ZPOOL_PROP_FREE: 305 case ZPOOL_PROP_FREEING: 306 case ZPOOL_PROP_EXPANDSZ: 307 if (literal) { 308 (void) snprintf(buf, len, "%llu", 309 (u_longlong_t)intval); 310 } else { 311 (void) zfs_nicenum(intval, buf, len); 312 } 313 break; 314 315 case ZPOOL_PROP_CAPACITY: 316 if (literal) { 317 (void) snprintf(buf, len, "%llu", 318 (u_longlong_t)intval); 319 } else { 320 (void) snprintf(buf, len, "%llu%%", 321 (u_longlong_t)intval); 322 } 323 break; 324 325 case ZPOOL_PROP_DEDUPRATIO: 326 (void) snprintf(buf, len, "%llu.%02llux", 327 (u_longlong_t)(intval / 100), 328 (u_longlong_t)(intval % 100)); 329 break; 330 331 case ZPOOL_PROP_HEALTH: 332 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 333 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 334 verify(nvlist_lookup_uint64_array(nvroot, 335 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 336 == 0); 337 338 (void) strlcpy(buf, zpool_state_to_name(intval, 339 vs->vs_aux), len); 340 break; 341 case ZPOOL_PROP_VERSION: 342 if (intval >= SPA_VERSION_FEATURES) { 343 (void) snprintf(buf, len, "-"); 344 break; 345 } 346 /* FALLTHROUGH */ 347 default: 348 (void) snprintf(buf, len, "%llu", intval); 349 } 350 break; 351 352 case PROP_TYPE_INDEX: 353 intval = zpool_get_prop_int(zhp, prop, &src); 354 if (zpool_prop_index_to_string(prop, intval, &strval) 355 != 0) 356 return (-1); 357 (void) strlcpy(buf, strval, len); 358 break; 359 360 default: 361 abort(); 362 } 363 364 if (srctype) 365 *srctype = src; 366 367 return (0); 368} 369 370/* 371 * Check if the bootfs name has the same pool name as it is set to. 372 * Assuming bootfs is a valid dataset name. 373 */ 374static boolean_t 375bootfs_name_valid(const char *pool, char *bootfs) 376{ 377 int len = strlen(pool); 378 379 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 380 return (B_FALSE); 381 382 if (strncmp(pool, bootfs, len) == 0 && 383 (bootfs[len] == '/' || bootfs[len] == '\0')) 384 return (B_TRUE); 385 386 return (B_FALSE); 387} 388 389/* 390 * Inspect the configuration to determine if any of the devices contain 391 * an EFI label. 392 */ 393static boolean_t 394pool_uses_efi(nvlist_t *config) 395{ 396#ifdef sun 397 nvlist_t **child; 398 uint_t c, children; 399 400 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 401 &child, &children) != 0) 402 return (read_efi_label(config, NULL) >= 0); 403 404 for (c = 0; c < children; c++) { 405 if (pool_uses_efi(child[c])) 406 return (B_TRUE); 407 } 408#endif /* sun */ 409 return (B_FALSE); 410} 411 412boolean_t 413zpool_is_bootable(zpool_handle_t *zhp) 414{ 415 char bootfs[ZPOOL_MAXNAMELEN]; 416 417 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 418 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 419 sizeof (bootfs)) != 0); 420} 421 422 423/* 424 * Given an nvlist of zpool properties to be set, validate that they are 425 * correct, and parse any numeric properties (index, boolean, etc) if they are 426 * specified as strings. 427 */ 428static nvlist_t * 429zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 430 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 431{ 432 nvpair_t *elem; 433 nvlist_t *retprops; 434 zpool_prop_t prop; 435 char *strval; 436 uint64_t intval; 437 char *slash, *check; 438 struct stat64 statbuf; 439 zpool_handle_t *zhp; 440 nvlist_t *nvroot; 441 442 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 443 (void) no_memory(hdl); 444 return (NULL); 445 } 446 447 elem = NULL; 448 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 449 const char *propname = nvpair_name(elem); 450 451 prop = zpool_name_to_prop(propname); 452 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 453 int err; 454 char *fname = strchr(propname, '@') + 1; 455 456 err = zfeature_lookup_name(fname, NULL); 457 if (err != 0) { 458 ASSERT3U(err, ==, ENOENT); 459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 460 "invalid feature '%s'"), fname); 461 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 462 goto error; 463 } 464 465 if (nvpair_type(elem) != DATA_TYPE_STRING) { 466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 467 "'%s' must be a string"), propname); 468 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 469 goto error; 470 } 471 472 (void) nvpair_value_string(elem, &strval); 473 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 474 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 475 "property '%s' can only be set to " 476 "'enabled'"), propname); 477 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 478 goto error; 479 } 480 481 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 482 (void) no_memory(hdl); 483 goto error; 484 } 485 continue; 486 } 487 488 /* 489 * Make sure this property is valid and applies to this type. 490 */ 491 if (prop == ZPROP_INVAL) { 492 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 493 "invalid property '%s'"), propname); 494 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 495 goto error; 496 } 497 498 if (zpool_prop_readonly(prop)) { 499 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 500 "is readonly"), propname); 501 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 502 goto error; 503 } 504 505 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 506 &strval, &intval, errbuf) != 0) 507 goto error; 508 509 /* 510 * Perform additional checking for specific properties. 511 */ 512 switch (prop) { 513 case ZPOOL_PROP_VERSION: 514 if (intval < version || 515 !SPA_VERSION_IS_SUPPORTED(intval)) { 516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 517 "property '%s' number %d is invalid."), 518 propname, intval); 519 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 520 goto error; 521 } 522 break; 523 524 case ZPOOL_PROP_BOOTFS: 525 if (flags.create || flags.import) { 526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 527 "property '%s' cannot be set at creation " 528 "or import time"), propname); 529 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 530 goto error; 531 } 532 533 if (version < SPA_VERSION_BOOTFS) { 534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 535 "pool must be upgraded to support " 536 "'%s' property"), propname); 537 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 538 goto error; 539 } 540 541 /* 542 * bootfs property value has to be a dataset name and 543 * the dataset has to be in the same pool as it sets to. 544 */ 545 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 546 strval)) { 547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 548 "is an invalid name"), strval); 549 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 550 goto error; 551 } 552 553 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 555 "could not open pool '%s'"), poolname); 556 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 557 goto error; 558 } 559 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 560 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 561 562#ifdef sun 563 /* 564 * bootfs property cannot be set on a disk which has 565 * been EFI labeled. 566 */ 567 if (pool_uses_efi(nvroot)) { 568 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 569 "property '%s' not supported on " 570 "EFI labeled devices"), propname); 571 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 572 zpool_close(zhp); 573 goto error; 574 } 575#endif /* sun */ 576 zpool_close(zhp); 577 break; 578 579 case ZPOOL_PROP_ALTROOT: 580 if (!flags.create && !flags.import) { 581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 582 "property '%s' can only be set during pool " 583 "creation or import"), propname); 584 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 585 goto error; 586 } 587 588 if (strval[0] != '/') { 589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 590 "bad alternate root '%s'"), strval); 591 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 592 goto error; 593 } 594 break; 595 596 case ZPOOL_PROP_CACHEFILE: 597 if (strval[0] == '\0') 598 break; 599 600 if (strcmp(strval, "none") == 0) 601 break; 602 603 if (strval[0] != '/') { 604 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 605 "property '%s' must be empty, an " 606 "absolute path, or 'none'"), propname); 607 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 608 goto error; 609 } 610 611 slash = strrchr(strval, '/'); 612 613 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 614 strcmp(slash, "/..") == 0) { 615 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 616 "'%s' is not a valid file"), strval); 617 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 618 goto error; 619 } 620 621 *slash = '\0'; 622 623 if (strval[0] != '\0' && 624 (stat64(strval, &statbuf) != 0 || 625 !S_ISDIR(statbuf.st_mode))) { 626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 627 "'%s' is not a valid directory"), 628 strval); 629 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 630 goto error; 631 } 632 633 *slash = '/'; 634 break; 635 636 case ZPOOL_PROP_COMMENT: 637 for (check = strval; *check != '\0'; check++) { 638 if (!isprint(*check)) { 639 zfs_error_aux(hdl, 640 dgettext(TEXT_DOMAIN, 641 "comment may only have printable " 642 "characters")); 643 (void) zfs_error(hdl, EZFS_BADPROP, 644 errbuf); 645 goto error; 646 } 647 } 648 if (strlen(strval) > ZPROP_MAX_COMMENT) { 649 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 650 "comment must not exceed %d characters"), 651 ZPROP_MAX_COMMENT); 652 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 653 goto error; 654 } 655 break; 656 case ZPOOL_PROP_READONLY: 657 if (!flags.import) { 658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 659 "property '%s' can only be set at " 660 "import time"), propname); 661 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 662 goto error; 663 } 664 break; 665 } 666 } 667 668 return (retprops); 669error: 670 nvlist_free(retprops); 671 return (NULL); 672} 673 674/* 675 * Set zpool property : propname=propval. 676 */ 677int 678zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 679{ 680 zfs_cmd_t zc = { 0 }; 681 int ret = -1; 682 char errbuf[1024]; 683 nvlist_t *nvl = NULL; 684 nvlist_t *realprops; 685 uint64_t version; 686 prop_flags_t flags = { 0 }; 687 688 (void) snprintf(errbuf, sizeof (errbuf), 689 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 690 zhp->zpool_name); 691 692 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 693 return (no_memory(zhp->zpool_hdl)); 694 695 if (nvlist_add_string(nvl, propname, propval) != 0) { 696 nvlist_free(nvl); 697 return (no_memory(zhp->zpool_hdl)); 698 } 699 700 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 701 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 702 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 703 nvlist_free(nvl); 704 return (-1); 705 } 706 707 nvlist_free(nvl); 708 nvl = realprops; 709 710 /* 711 * Execute the corresponding ioctl() to set this property. 712 */ 713 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 714 715 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 716 nvlist_free(nvl); 717 return (-1); 718 } 719 720 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 721 722 zcmd_free_nvlists(&zc); 723 nvlist_free(nvl); 724 725 if (ret) 726 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 727 else 728 (void) zpool_props_refresh(zhp); 729 730 return (ret); 731} 732 733int 734zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 735{ 736 libzfs_handle_t *hdl = zhp->zpool_hdl; 737 zprop_list_t *entry; 738 char buf[ZFS_MAXPROPLEN]; 739 nvlist_t *features = NULL; 740 zprop_list_t **last; 741 boolean_t firstexpand = (NULL == *plp); 742 743 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 744 return (-1); 745 746 last = plp; 747 while (*last != NULL) 748 last = &(*last)->pl_next; 749 750 if ((*plp)->pl_all) 751 features = zpool_get_features(zhp); 752 753 if ((*plp)->pl_all && firstexpand) { 754 for (int i = 0; i < SPA_FEATURES; i++) { 755 zprop_list_t *entry = zfs_alloc(hdl, 756 sizeof (zprop_list_t)); 757 entry->pl_prop = ZPROP_INVAL; 758 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 759 spa_feature_table[i].fi_uname); 760 entry->pl_width = strlen(entry->pl_user_prop); 761 entry->pl_all = B_TRUE; 762 763 *last = entry; 764 last = &entry->pl_next; 765 } 766 } 767 768 /* add any unsupported features */ 769 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 770 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 771 char *propname; 772 boolean_t found; 773 zprop_list_t *entry; 774 775 if (zfeature_is_supported(nvpair_name(nvp))) 776 continue; 777 778 propname = zfs_asprintf(hdl, "unsupported@%s", 779 nvpair_name(nvp)); 780 781 /* 782 * Before adding the property to the list make sure that no 783 * other pool already added the same property. 784 */ 785 found = B_FALSE; 786 entry = *plp; 787 while (entry != NULL) { 788 if (entry->pl_user_prop != NULL && 789 strcmp(propname, entry->pl_user_prop) == 0) { 790 found = B_TRUE; 791 break; 792 } 793 entry = entry->pl_next; 794 } 795 if (found) { 796 free(propname); 797 continue; 798 } 799 800 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 801 entry->pl_prop = ZPROP_INVAL; 802 entry->pl_user_prop = propname; 803 entry->pl_width = strlen(entry->pl_user_prop); 804 entry->pl_all = B_TRUE; 805 806 *last = entry; 807 last = &entry->pl_next; 808 } 809 810 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 811 812 if (entry->pl_fixed) 813 continue; 814 815 if (entry->pl_prop != ZPROP_INVAL && 816 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 817 NULL, B_FALSE) == 0) { 818 if (strlen(buf) > entry->pl_width) 819 entry->pl_width = strlen(buf); 820 } 821 } 822 823 return (0); 824} 825 826/* 827 * Get the state for the given feature on the given ZFS pool. 828 */ 829int 830zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 831 size_t len) 832{ 833 uint64_t refcount; 834 boolean_t found = B_FALSE; 835 nvlist_t *features = zpool_get_features(zhp); 836 boolean_t supported; 837 const char *feature = strchr(propname, '@') + 1; 838 839 supported = zpool_prop_feature(propname); 840 ASSERT(supported || zpool_prop_unsupported(propname)); 841 842 /* 843 * Convert from feature name to feature guid. This conversion is 844 * unecessary for unsupported@... properties because they already 845 * use guids. 846 */ 847 if (supported) { 848 int ret; 849 spa_feature_t fid; 850 851 ret = zfeature_lookup_name(feature, &fid); 852 if (ret != 0) { 853 (void) strlcpy(buf, "-", len); 854 return (ENOTSUP); 855 } 856 feature = spa_feature_table[fid].fi_guid; 857 } 858 859 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 860 found = B_TRUE; 861 862 if (supported) { 863 if (!found) { 864 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 865 } else { 866 if (refcount == 0) 867 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 868 else 869 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 870 } 871 } else { 872 if (found) { 873 if (refcount == 0) { 874 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 875 } else { 876 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 877 } 878 } else { 879 (void) strlcpy(buf, "-", len); 880 return (ENOTSUP); 881 } 882 } 883 884 return (0); 885} 886 887/* 888 * Don't start the slice at the default block of 34; many storage 889 * devices will use a stripe width of 128k, so start there instead. 890 */ 891#define NEW_START_BLOCK 256 892 893/* 894 * Validate the given pool name, optionally putting an extended error message in 895 * 'buf'. 896 */ 897boolean_t 898zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 899{ 900 namecheck_err_t why; 901 char what; 902 int ret; 903 904 ret = pool_namecheck(pool, &why, &what); 905 906 /* 907 * The rules for reserved pool names were extended at a later point. 908 * But we need to support users with existing pools that may now be 909 * invalid. So we only check for this expanded set of names during a 910 * create (or import), and only in userland. 911 */ 912 if (ret == 0 && !isopen && 913 (strncmp(pool, "mirror", 6) == 0 || 914 strncmp(pool, "raidz", 5) == 0 || 915 strncmp(pool, "spare", 5) == 0 || 916 strcmp(pool, "log") == 0)) { 917 if (hdl != NULL) 918 zfs_error_aux(hdl, 919 dgettext(TEXT_DOMAIN, "name is reserved")); 920 return (B_FALSE); 921 } 922 923 924 if (ret != 0) { 925 if (hdl != NULL) { 926 switch (why) { 927 case NAME_ERR_TOOLONG: 928 zfs_error_aux(hdl, 929 dgettext(TEXT_DOMAIN, "name is too long")); 930 break; 931 932 case NAME_ERR_INVALCHAR: 933 zfs_error_aux(hdl, 934 dgettext(TEXT_DOMAIN, "invalid character " 935 "'%c' in pool name"), what); 936 break; 937 938 case NAME_ERR_NOLETTER: 939 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 940 "name must begin with a letter")); 941 break; 942 943 case NAME_ERR_RESERVED: 944 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 945 "name is reserved")); 946 break; 947 948 case NAME_ERR_DISKLIKE: 949 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 950 "pool name is reserved")); 951 break; 952 953 case NAME_ERR_LEADING_SLASH: 954 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 955 "leading slash in name")); 956 break; 957 958 case NAME_ERR_EMPTY_COMPONENT: 959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 960 "empty component in name")); 961 break; 962 963 case NAME_ERR_TRAILING_SLASH: 964 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 965 "trailing slash in name")); 966 break; 967 968 case NAME_ERR_MULTIPLE_AT: 969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 970 "multiple '@' delimiters in name")); 971 break; 972 973 } 974 } 975 return (B_FALSE); 976 } 977 978 return (B_TRUE); 979} 980 981/* 982 * Open a handle to the given pool, even if the pool is currently in the FAULTED 983 * state. 984 */ 985zpool_handle_t * 986zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 987{ 988 zpool_handle_t *zhp; 989 boolean_t missing; 990 991 /* 992 * Make sure the pool name is valid. 993 */ 994 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 995 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 996 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 997 pool); 998 return (NULL); 999 } 1000 1001 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1002 return (NULL); 1003 1004 zhp->zpool_hdl = hdl; 1005 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1006 1007 if (zpool_refresh_stats(zhp, &missing) != 0) { 1008 zpool_close(zhp); 1009 return (NULL); 1010 } 1011 1012 if (missing) { 1013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1014 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1015 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1016 zpool_close(zhp); 1017 return (NULL); 1018 } 1019 1020 return (zhp); 1021} 1022 1023/* 1024 * Like the above, but silent on error. Used when iterating over pools (because 1025 * the configuration cache may be out of date). 1026 */ 1027int 1028zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1029{ 1030 zpool_handle_t *zhp; 1031 boolean_t missing; 1032 1033 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1034 return (-1); 1035 1036 zhp->zpool_hdl = hdl; 1037 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1038 1039 if (zpool_refresh_stats(zhp, &missing) != 0) { 1040 zpool_close(zhp); 1041 return (-1); 1042 } 1043 1044 if (missing) { 1045 zpool_close(zhp); 1046 *ret = NULL; 1047 return (0); 1048 } 1049 1050 *ret = zhp; 1051 return (0); 1052} 1053 1054/* 1055 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1056 * state. 1057 */ 1058zpool_handle_t * 1059zpool_open(libzfs_handle_t *hdl, const char *pool) 1060{ 1061 zpool_handle_t *zhp; 1062 1063 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1064 return (NULL); 1065 1066 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1067 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1068 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1069 zpool_close(zhp); 1070 return (NULL); 1071 } 1072 1073 return (zhp); 1074} 1075 1076/* 1077 * Close the handle. Simply frees the memory associated with the handle. 1078 */ 1079void 1080zpool_close(zpool_handle_t *zhp) 1081{ 1082 if (zhp->zpool_config) 1083 nvlist_free(zhp->zpool_config); 1084 if (zhp->zpool_old_config) 1085 nvlist_free(zhp->zpool_old_config); 1086 if (zhp->zpool_props) 1087 nvlist_free(zhp->zpool_props); 1088 free(zhp); 1089} 1090 1091/* 1092 * Return the name of the pool. 1093 */ 1094const char * 1095zpool_get_name(zpool_handle_t *zhp) 1096{ 1097 return (zhp->zpool_name); 1098} 1099 1100 1101/* 1102 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1103 */ 1104int 1105zpool_get_state(zpool_handle_t *zhp) 1106{ 1107 return (zhp->zpool_state); 1108} 1109 1110/* 1111 * Create the named pool, using the provided vdev list. It is assumed 1112 * that the consumer has already validated the contents of the nvlist, so we 1113 * don't have to worry about error semantics. 1114 */ 1115int 1116zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1117 nvlist_t *props, nvlist_t *fsprops) 1118{ 1119 zfs_cmd_t zc = { 0 }; 1120 nvlist_t *zc_fsprops = NULL; 1121 nvlist_t *zc_props = NULL; 1122 char msg[1024]; 1123 int ret = -1; 1124 1125 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1126 "cannot create '%s'"), pool); 1127 1128 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1129 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1130 1131 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1132 return (-1); 1133 1134 if (props) { 1135 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1136 1137 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1138 SPA_VERSION_1, flags, msg)) == NULL) { 1139 goto create_failed; 1140 } 1141 } 1142 1143 if (fsprops) { 1144 uint64_t zoned; 1145 char *zonestr; 1146 1147 zoned = ((nvlist_lookup_string(fsprops, 1148 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1149 strcmp(zonestr, "on") == 0); 1150 1151 if ((zc_fsprops = zfs_valid_proplist(hdl, 1152 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1153 goto create_failed; 1154 } 1155 if (!zc_props && 1156 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1157 goto create_failed; 1158 } 1159 if (nvlist_add_nvlist(zc_props, 1160 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1161 goto create_failed; 1162 } 1163 } 1164 1165 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1166 goto create_failed; 1167 1168 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1169 1170 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1171 1172 zcmd_free_nvlists(&zc); 1173 nvlist_free(zc_props); 1174 nvlist_free(zc_fsprops); 1175 1176 switch (errno) { 1177 case EBUSY: 1178 /* 1179 * This can happen if the user has specified the same 1180 * device multiple times. We can't reliably detect this 1181 * until we try to add it and see we already have a 1182 * label. 1183 */ 1184 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1185 "one or more vdevs refer to the same device")); 1186 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1187 1188 case EOVERFLOW: 1189 /* 1190 * This occurs when one of the devices is below 1191 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1192 * device was the problem device since there's no 1193 * reliable way to determine device size from userland. 1194 */ 1195 { 1196 char buf[64]; 1197 1198 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1199 1200 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1201 "one or more devices is less than the " 1202 "minimum size (%s)"), buf); 1203 } 1204 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1205 1206 case ENOSPC: 1207 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1208 "one or more devices is out of space")); 1209 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1210 1211 case ENOTBLK: 1212 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1213 "cache device must be a disk or disk slice")); 1214 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1215 1216 default: 1217 return (zpool_standard_error(hdl, errno, msg)); 1218 } 1219 } 1220 1221create_failed: 1222 zcmd_free_nvlists(&zc); 1223 nvlist_free(zc_props); 1224 nvlist_free(zc_fsprops); 1225 return (ret); 1226} 1227 1228/* 1229 * Destroy the given pool. It is up to the caller to ensure that there are no 1230 * datasets left in the pool. 1231 */ 1232int 1233zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1234{ 1235 zfs_cmd_t zc = { 0 }; 1236 zfs_handle_t *zfp = NULL; 1237 libzfs_handle_t *hdl = zhp->zpool_hdl; 1238 char msg[1024]; 1239 1240 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1241 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1242 return (-1); 1243 1244 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1245 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1246 1247 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1248 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1249 "cannot destroy '%s'"), zhp->zpool_name); 1250 1251 if (errno == EROFS) { 1252 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1253 "one or more devices is read only")); 1254 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1255 } else { 1256 (void) zpool_standard_error(hdl, errno, msg); 1257 } 1258 1259 if (zfp) 1260 zfs_close(zfp); 1261 return (-1); 1262 } 1263 1264 if (zfp) { 1265 remove_mountpoint(zfp); 1266 zfs_close(zfp); 1267 } 1268 1269 return (0); 1270} 1271 1272/* 1273 * Add the given vdevs to the pool. The caller must have already performed the 1274 * necessary verification to ensure that the vdev specification is well-formed. 1275 */ 1276int 1277zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1278{ 1279 zfs_cmd_t zc = { 0 }; 1280 int ret; 1281 libzfs_handle_t *hdl = zhp->zpool_hdl; 1282 char msg[1024]; 1283 nvlist_t **spares, **l2cache; 1284 uint_t nspares, nl2cache; 1285 1286 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1287 "cannot add to '%s'"), zhp->zpool_name); 1288 1289 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1290 SPA_VERSION_SPARES && 1291 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1292 &spares, &nspares) == 0) { 1293 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1294 "upgraded to add hot spares")); 1295 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1296 } 1297 1298 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1299 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1300 uint64_t s; 1301 1302 for (s = 0; s < nspares; s++) { 1303 char *path; 1304 1305 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1306 &path) == 0 && pool_uses_efi(spares[s])) { 1307 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1308 "device '%s' contains an EFI label and " 1309 "cannot be used on root pools."), 1310 zpool_vdev_name(hdl, NULL, spares[s], 1311 B_FALSE)); 1312 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1313 } 1314 } 1315 } 1316 1317 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1318 SPA_VERSION_L2CACHE && 1319 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1320 &l2cache, &nl2cache) == 0) { 1321 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1322 "upgraded to add cache devices")); 1323 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1324 } 1325 1326 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1327 return (-1); 1328 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1329 1330 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1331 switch (errno) { 1332 case EBUSY: 1333 /* 1334 * This can happen if the user has specified the same 1335 * device multiple times. We can't reliably detect this 1336 * until we try to add it and see we already have a 1337 * label. 1338 */ 1339 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1340 "one or more vdevs refer to the same device")); 1341 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1342 break; 1343 1344 case EOVERFLOW: 1345 /* 1346 * This occurrs when one of the devices is below 1347 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1348 * device was the problem device since there's no 1349 * reliable way to determine device size from userland. 1350 */ 1351 { 1352 char buf[64]; 1353 1354 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1355 1356 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1357 "device is less than the minimum " 1358 "size (%s)"), buf); 1359 } 1360 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1361 break; 1362 1363 case ENOTSUP: 1364 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1365 "pool must be upgraded to add these vdevs")); 1366 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1367 break; 1368 1369 case EDOM: 1370 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1371 "root pool can not have multiple vdevs" 1372 " or separate logs")); 1373 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1374 break; 1375 1376 case ENOTBLK: 1377 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1378 "cache device must be a disk or disk slice")); 1379 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1380 break; 1381 1382 default: 1383 (void) zpool_standard_error(hdl, errno, msg); 1384 } 1385 1386 ret = -1; 1387 } else { 1388 ret = 0; 1389 } 1390 1391 zcmd_free_nvlists(&zc); 1392 1393 return (ret); 1394} 1395 1396/* 1397 * Exports the pool from the system. The caller must ensure that there are no 1398 * mounted datasets in the pool. 1399 */ 1400static int 1401zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1402 const char *log_str) 1403{ 1404 zfs_cmd_t zc = { 0 }; 1405 char msg[1024]; 1406 1407 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1408 "cannot export '%s'"), zhp->zpool_name); 1409 1410 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1411 zc.zc_cookie = force; 1412 zc.zc_guid = hardforce; 1413 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1414 1415 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1416 switch (errno) { 1417 case EXDEV: 1418 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1419 "use '-f' to override the following errors:\n" 1420 "'%s' has an active shared spare which could be" 1421 " used by other pools once '%s' is exported."), 1422 zhp->zpool_name, zhp->zpool_name); 1423 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1424 msg)); 1425 default: 1426 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1427 msg)); 1428 } 1429 } 1430 1431 return (0); 1432} 1433 1434int 1435zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1436{ 1437 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1438} 1439 1440int 1441zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1442{ 1443 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1444} 1445 1446static void 1447zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1448 nvlist_t *config) 1449{ 1450 nvlist_t *nv = NULL; 1451 uint64_t rewindto; 1452 int64_t loss = -1; 1453 struct tm t; 1454 char timestr[128]; 1455 1456 if (!hdl->libzfs_printerr || config == NULL) 1457 return; 1458 1459 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1460 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1461 return; 1462 } 1463 1464 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1465 return; 1466 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1467 1468 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1469 strftime(timestr, 128, 0, &t) != 0) { 1470 if (dryrun) { 1471 (void) printf(dgettext(TEXT_DOMAIN, 1472 "Would be able to return %s " 1473 "to its state as of %s.\n"), 1474 name, timestr); 1475 } else { 1476 (void) printf(dgettext(TEXT_DOMAIN, 1477 "Pool %s returned to its state as of %s.\n"), 1478 name, timestr); 1479 } 1480 if (loss > 120) { 1481 (void) printf(dgettext(TEXT_DOMAIN, 1482 "%s approximately %lld "), 1483 dryrun ? "Would discard" : "Discarded", 1484 (loss + 30) / 60); 1485 (void) printf(dgettext(TEXT_DOMAIN, 1486 "minutes of transactions.\n")); 1487 } else if (loss > 0) { 1488 (void) printf(dgettext(TEXT_DOMAIN, 1489 "%s approximately %lld "), 1490 dryrun ? "Would discard" : "Discarded", loss); 1491 (void) printf(dgettext(TEXT_DOMAIN, 1492 "seconds of transactions.\n")); 1493 } 1494 } 1495} 1496 1497void 1498zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1499 nvlist_t *config) 1500{ 1501 nvlist_t *nv = NULL; 1502 int64_t loss = -1; 1503 uint64_t edata = UINT64_MAX; 1504 uint64_t rewindto; 1505 struct tm t; 1506 char timestr[128]; 1507 1508 if (!hdl->libzfs_printerr) 1509 return; 1510 1511 if (reason >= 0) 1512 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1513 else 1514 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1515 1516 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1517 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1518 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1519 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1520 goto no_info; 1521 1522 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1523 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1524 &edata); 1525 1526 (void) printf(dgettext(TEXT_DOMAIN, 1527 "Recovery is possible, but will result in some data loss.\n")); 1528 1529 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1530 strftime(timestr, 128, 0, &t) != 0) { 1531 (void) printf(dgettext(TEXT_DOMAIN, 1532 "\tReturning the pool to its state as of %s\n" 1533 "\tshould correct the problem. "), 1534 timestr); 1535 } else { 1536 (void) printf(dgettext(TEXT_DOMAIN, 1537 "\tReverting the pool to an earlier state " 1538 "should correct the problem.\n\t")); 1539 } 1540 1541 if (loss > 120) { 1542 (void) printf(dgettext(TEXT_DOMAIN, 1543 "Approximately %lld minutes of data\n" 1544 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1545 } else if (loss > 0) { 1546 (void) printf(dgettext(TEXT_DOMAIN, 1547 "Approximately %lld seconds of data\n" 1548 "\tmust be discarded, irreversibly. "), loss); 1549 } 1550 if (edata != 0 && edata != UINT64_MAX) { 1551 if (edata == 1) { 1552 (void) printf(dgettext(TEXT_DOMAIN, 1553 "After rewind, at least\n" 1554 "\tone persistent user-data error will remain. ")); 1555 } else { 1556 (void) printf(dgettext(TEXT_DOMAIN, 1557 "After rewind, several\n" 1558 "\tpersistent user-data errors will remain. ")); 1559 } 1560 } 1561 (void) printf(dgettext(TEXT_DOMAIN, 1562 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1563 reason >= 0 ? "clear" : "import", name); 1564 1565 (void) printf(dgettext(TEXT_DOMAIN, 1566 "A scrub of the pool\n" 1567 "\tis strongly recommended after recovery.\n")); 1568 return; 1569 1570no_info: 1571 (void) printf(dgettext(TEXT_DOMAIN, 1572 "Destroy and re-create the pool from\n\ta backup source.\n")); 1573} 1574 1575/* 1576 * zpool_import() is a contracted interface. Should be kept the same 1577 * if possible. 1578 * 1579 * Applications should use zpool_import_props() to import a pool with 1580 * new properties value to be set. 1581 */ 1582int 1583zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1584 char *altroot) 1585{ 1586 nvlist_t *props = NULL; 1587 int ret; 1588 1589 if (altroot != NULL) { 1590 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1591 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1592 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1593 newname)); 1594 } 1595 1596 if (nvlist_add_string(props, 1597 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1598 nvlist_add_string(props, 1599 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1600 nvlist_free(props); 1601 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1602 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1603 newname)); 1604 } 1605 } 1606 1607 ret = zpool_import_props(hdl, config, newname, props, 1608 ZFS_IMPORT_NORMAL); 1609 if (props) 1610 nvlist_free(props); 1611 return (ret); 1612} 1613 1614static void 1615print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1616 int indent) 1617{ 1618 nvlist_t **child; 1619 uint_t c, children; 1620 char *vname; 1621 uint64_t is_log = 0; 1622 1623 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1624 &is_log); 1625 1626 if (name != NULL) 1627 (void) printf("\t%*s%s%s\n", indent, "", name, 1628 is_log ? " [log]" : ""); 1629 1630 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1631 &child, &children) != 0) 1632 return; 1633 1634 for (c = 0; c < children; c++) { 1635 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1636 print_vdev_tree(hdl, vname, child[c], indent + 2); 1637 free(vname); 1638 } 1639} 1640 1641void 1642zpool_print_unsup_feat(nvlist_t *config) 1643{ 1644 nvlist_t *nvinfo, *unsup_feat; 1645 1646 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1647 0); 1648 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1649 &unsup_feat) == 0); 1650 1651 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1652 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1653 char *desc; 1654 1655 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1656 verify(nvpair_value_string(nvp, &desc) == 0); 1657 1658 if (strlen(desc) > 0) 1659 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1660 else 1661 (void) printf("\t%s\n", nvpair_name(nvp)); 1662 } 1663} 1664 1665/* 1666 * Import the given pool using the known configuration and a list of 1667 * properties to be set. The configuration should have come from 1668 * zpool_find_import(). The 'newname' parameters control whether the pool 1669 * is imported with a different name. 1670 */ 1671int 1672zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1673 nvlist_t *props, int flags) 1674{ 1675 zfs_cmd_t zc = { 0 }; 1676 zpool_rewind_policy_t policy; 1677 nvlist_t *nv = NULL; 1678 nvlist_t *nvinfo = NULL; 1679 nvlist_t *missing = NULL; 1680 char *thename; 1681 char *origname; 1682 int ret; 1683 int error = 0; 1684 char errbuf[1024]; 1685 1686 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1687 &origname) == 0); 1688 1689 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1690 "cannot import pool '%s'"), origname); 1691 1692 if (newname != NULL) { 1693 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1694 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1695 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1696 newname)); 1697 thename = (char *)newname; 1698 } else { 1699 thename = origname; 1700 } 1701 1702 if (props) { 1703 uint64_t version; 1704 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1705 1706 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1707 &version) == 0); 1708 1709 if ((props = zpool_valid_proplist(hdl, origname, 1710 props, version, flags, errbuf)) == NULL) { 1711 return (-1); 1712 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1713 nvlist_free(props); 1714 return (-1); 1715 } 1716 } 1717 1718 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1719 1720 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1721 &zc.zc_guid) == 0); 1722 1723 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1724 nvlist_free(props); 1725 return (-1); 1726 } 1727 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1728 nvlist_free(props); 1729 return (-1); 1730 } 1731 1732 zc.zc_cookie = flags; 1733 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1734 errno == ENOMEM) { 1735 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1736 zcmd_free_nvlists(&zc); 1737 return (-1); 1738 } 1739 } 1740 if (ret != 0) 1741 error = errno; 1742 1743 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1744 zpool_get_rewind_policy(config, &policy); 1745 1746 if (error) { 1747 char desc[1024]; 1748 1749 /* 1750 * Dry-run failed, but we print out what success 1751 * looks like if we found a best txg 1752 */ 1753 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1754 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1755 B_TRUE, nv); 1756 nvlist_free(nv); 1757 return (-1); 1758 } 1759 1760 if (newname == NULL) 1761 (void) snprintf(desc, sizeof (desc), 1762 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1763 thename); 1764 else 1765 (void) snprintf(desc, sizeof (desc), 1766 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1767 origname, thename); 1768 1769 switch (error) { 1770 case ENOTSUP: 1771 if (nv != NULL && nvlist_lookup_nvlist(nv, 1772 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1773 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1774 (void) printf(dgettext(TEXT_DOMAIN, "This " 1775 "pool uses the following feature(s) not " 1776 "supported by this system:\n")); 1777 zpool_print_unsup_feat(nv); 1778 if (nvlist_exists(nvinfo, 1779 ZPOOL_CONFIG_CAN_RDONLY)) { 1780 (void) printf(dgettext(TEXT_DOMAIN, 1781 "All unsupported features are only " 1782 "required for writing to the pool." 1783 "\nThe pool can be imported using " 1784 "'-o readonly=on'.\n")); 1785 } 1786 } 1787 /* 1788 * Unsupported version. 1789 */ 1790 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1791 break; 1792 1793 case EINVAL: 1794 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1795 break; 1796 1797 case EROFS: 1798 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1799 "one or more devices is read only")); 1800 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1801 break; 1802 1803 case ENXIO: 1804 if (nv && nvlist_lookup_nvlist(nv, 1805 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1806 nvlist_lookup_nvlist(nvinfo, 1807 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1808 (void) printf(dgettext(TEXT_DOMAIN, 1809 "The devices below are missing, use " 1810 "'-m' to import the pool anyway:\n")); 1811 print_vdev_tree(hdl, NULL, missing, 2); 1812 (void) printf("\n"); 1813 } 1814 (void) zpool_standard_error(hdl, error, desc); 1815 break; 1816 1817 case EEXIST: 1818 (void) zpool_standard_error(hdl, error, desc); 1819 break; 1820 1821 default: 1822 (void) zpool_standard_error(hdl, error, desc); 1823 zpool_explain_recover(hdl, 1824 newname ? origname : thename, -error, nv); 1825 break; 1826 } 1827 1828 nvlist_free(nv); 1829 ret = -1; 1830 } else { 1831 zpool_handle_t *zhp; 1832 1833 /* 1834 * This should never fail, but play it safe anyway. 1835 */ 1836 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1837 ret = -1; 1838 else if (zhp != NULL) 1839 zpool_close(zhp); 1840 if (policy.zrp_request & 1841 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1842 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1843 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1844 } 1845 nvlist_free(nv); 1846 return (0); 1847 } 1848 1849 zcmd_free_nvlists(&zc); 1850 nvlist_free(props); 1851 1852 return (ret); 1853} 1854 1855/* 1856 * Scan the pool. 1857 */ 1858int 1859zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1860{ 1861 zfs_cmd_t zc = { 0 }; 1862 char msg[1024]; 1863 libzfs_handle_t *hdl = zhp->zpool_hdl; 1864 1865 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1866 zc.zc_cookie = func; 1867 1868 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1869 (errno == ENOENT && func != POOL_SCAN_NONE)) 1870 return (0); 1871 1872 if (func == POOL_SCAN_SCRUB) { 1873 (void) snprintf(msg, sizeof (msg), 1874 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1875 } else if (func == POOL_SCAN_NONE) { 1876 (void) snprintf(msg, sizeof (msg), 1877 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1878 zc.zc_name); 1879 } else { 1880 assert(!"unexpected result"); 1881 } 1882 1883 if (errno == EBUSY) { 1884 nvlist_t *nvroot; 1885 pool_scan_stat_t *ps = NULL; 1886 uint_t psc; 1887 1888 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1889 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1890 (void) nvlist_lookup_uint64_array(nvroot, 1891 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1892 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1893 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1894 else 1895 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1896 } else if (errno == ENOENT) { 1897 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1898 } else { 1899 return (zpool_standard_error(hdl, errno, msg)); 1900 } 1901} 1902 1903/* 1904 * This provides a very minimal check whether a given string is likely a 1905 * c#t#d# style string. Users of this are expected to do their own 1906 * verification of the s# part. 1907 */ 1908#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1909 1910/* 1911 * More elaborate version for ones which may start with "/dev/dsk/" 1912 * and the like. 1913 */ 1914static int 1915ctd_check_path(char *str) { 1916 /* 1917 * If it starts with a slash, check the last component. 1918 */ 1919 if (str && str[0] == '/') { 1920 char *tmp = strrchr(str, '/'); 1921 1922 /* 1923 * If it ends in "/old", check the second-to-last 1924 * component of the string instead. 1925 */ 1926 if (tmp != str && strcmp(tmp, "/old") == 0) { 1927 for (tmp--; *tmp != '/'; tmp--) 1928 ; 1929 } 1930 str = tmp + 1; 1931 } 1932 return (CTD_CHECK(str)); 1933} 1934 1935/* 1936 * Find a vdev that matches the search criteria specified. We use the 1937 * the nvpair name to determine how we should look for the device. 1938 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1939 * spare; but FALSE if its an INUSE spare. 1940 */ 1941static nvlist_t * 1942vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1943 boolean_t *l2cache, boolean_t *log) 1944{ 1945 uint_t c, children; 1946 nvlist_t **child; 1947 nvlist_t *ret; 1948 uint64_t is_log; 1949 char *srchkey; 1950 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1951 1952 /* Nothing to look for */ 1953 if (search == NULL || pair == NULL) 1954 return (NULL); 1955 1956 /* Obtain the key we will use to search */ 1957 srchkey = nvpair_name(pair); 1958 1959 switch (nvpair_type(pair)) { 1960 case DATA_TYPE_UINT64: 1961 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1962 uint64_t srchval, theguid; 1963 1964 verify(nvpair_value_uint64(pair, &srchval) == 0); 1965 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1966 &theguid) == 0); 1967 if (theguid == srchval) 1968 return (nv); 1969 } 1970 break; 1971 1972 case DATA_TYPE_STRING: { 1973 char *srchval, *val; 1974 1975 verify(nvpair_value_string(pair, &srchval) == 0); 1976 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1977 break; 1978 1979 /* 1980 * Search for the requested value. Special cases: 1981 * 1982 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1983 * "s0" or "s0/old". The "s0" part is hidden from the user, 1984 * but included in the string, so this matches around it. 1985 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1986 * 1987 * Otherwise, all other searches are simple string compares. 1988 */ 1989 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1990 ctd_check_path(val)) { 1991 uint64_t wholedisk = 0; 1992 1993 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1994 &wholedisk); 1995 if (wholedisk) { 1996 int slen = strlen(srchval); 1997 int vlen = strlen(val); 1998 1999 if (slen != vlen - 2) 2000 break; 2001 2002 /* 2003 * make_leaf_vdev() should only set 2004 * wholedisk for ZPOOL_CONFIG_PATHs which 2005 * will include "/dev/dsk/", giving plenty of 2006 * room for the indices used next. 2007 */ 2008 ASSERT(vlen >= 6); 2009 2010 /* 2011 * strings identical except trailing "s0" 2012 */ 2013 if (strcmp(&val[vlen - 2], "s0") == 0 && 2014 strncmp(srchval, val, slen) == 0) 2015 return (nv); 2016 2017 /* 2018 * strings identical except trailing "s0/old" 2019 */ 2020 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2021 strcmp(&srchval[slen - 4], "/old") == 0 && 2022 strncmp(srchval, val, slen - 4) == 0) 2023 return (nv); 2024 2025 break; 2026 } 2027 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2028 char *type, *idx, *end, *p; 2029 uint64_t id, vdev_id; 2030 2031 /* 2032 * Determine our vdev type, keeping in mind 2033 * that the srchval is composed of a type and 2034 * vdev id pair (i.e. mirror-4). 2035 */ 2036 if ((type = strdup(srchval)) == NULL) 2037 return (NULL); 2038 2039 if ((p = strrchr(type, '-')) == NULL) { 2040 free(type); 2041 break; 2042 } 2043 idx = p + 1; 2044 *p = '\0'; 2045 2046 /* 2047 * If the types don't match then keep looking. 2048 */ 2049 if (strncmp(val, type, strlen(val)) != 0) { 2050 free(type); 2051 break; 2052 } 2053 2054 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2055 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2056 strncmp(type, VDEV_TYPE_MIRROR, 2057 strlen(VDEV_TYPE_MIRROR)) == 0); 2058 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2059 &id) == 0); 2060 2061 errno = 0; 2062 vdev_id = strtoull(idx, &end, 10); 2063 2064 free(type); 2065 if (errno != 0) 2066 return (NULL); 2067 2068 /* 2069 * Now verify that we have the correct vdev id. 2070 */ 2071 if (vdev_id == id) 2072 return (nv); 2073 } 2074 2075 /* 2076 * Common case 2077 */ 2078 if (strcmp(srchval, val) == 0) 2079 return (nv); 2080 break; 2081 } 2082 2083 default: 2084 break; 2085 } 2086 2087 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2088 &child, &children) != 0) 2089 return (NULL); 2090 2091 for (c = 0; c < children; c++) { 2092 if ((ret = vdev_to_nvlist_iter(child[c], search, 2093 avail_spare, l2cache, NULL)) != NULL) { 2094 /* 2095 * The 'is_log' value is only set for the toplevel 2096 * vdev, not the leaf vdevs. So we always lookup the 2097 * log device from the root of the vdev tree (where 2098 * 'log' is non-NULL). 2099 */ 2100 if (log != NULL && 2101 nvlist_lookup_uint64(child[c], 2102 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2103 is_log) { 2104 *log = B_TRUE; 2105 } 2106 return (ret); 2107 } 2108 } 2109 2110 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2111 &child, &children) == 0) { 2112 for (c = 0; c < children; c++) { 2113 if ((ret = vdev_to_nvlist_iter(child[c], search, 2114 avail_spare, l2cache, NULL)) != NULL) { 2115 *avail_spare = B_TRUE; 2116 return (ret); 2117 } 2118 } 2119 } 2120 2121 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2122 &child, &children) == 0) { 2123 for (c = 0; c < children; c++) { 2124 if ((ret = vdev_to_nvlist_iter(child[c], search, 2125 avail_spare, l2cache, NULL)) != NULL) { 2126 *l2cache = B_TRUE; 2127 return (ret); 2128 } 2129 } 2130 } 2131 2132 return (NULL); 2133} 2134 2135/* 2136 * Given a physical path (minus the "/devices" prefix), find the 2137 * associated vdev. 2138 */ 2139nvlist_t * 2140zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2141 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2142{ 2143 nvlist_t *search, *nvroot, *ret; 2144 2145 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2146 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2147 2148 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2149 &nvroot) == 0); 2150 2151 *avail_spare = B_FALSE; 2152 *l2cache = B_FALSE; 2153 if (log != NULL) 2154 *log = B_FALSE; 2155 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2156 nvlist_free(search); 2157 2158 return (ret); 2159} 2160 2161/* 2162 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2163 */ 2164boolean_t 2165zpool_vdev_is_interior(const char *name) 2166{ 2167 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2168 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2169 return (B_TRUE); 2170 return (B_FALSE); 2171} 2172 2173nvlist_t * 2174zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2175 boolean_t *l2cache, boolean_t *log) 2176{ 2177 char buf[MAXPATHLEN]; 2178 char *end; 2179 nvlist_t *nvroot, *search, *ret; 2180 uint64_t guid; 2181 2182 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2183 2184 guid = strtoull(path, &end, 10); 2185 if (guid != 0 && *end == '\0') { 2186 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2187 } else if (zpool_vdev_is_interior(path)) { 2188 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2189 } else if (path[0] != '/') { 2190 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2191 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2192 } else { 2193 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2194 } 2195 2196 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2197 &nvroot) == 0); 2198 2199 *avail_spare = B_FALSE; 2200 *l2cache = B_FALSE; 2201 if (log != NULL) 2202 *log = B_FALSE; 2203 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2204 nvlist_free(search); 2205 2206 return (ret); 2207} 2208 2209static int 2210vdev_online(nvlist_t *nv) 2211{ 2212 uint64_t ival; 2213 2214 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2215 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2216 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2217 return (0); 2218 2219 return (1); 2220} 2221 2222/* 2223 * Helper function for zpool_get_physpaths(). 2224 */ 2225static int 2226vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2227 size_t *bytes_written) 2228{ 2229 size_t bytes_left, pos, rsz; 2230 char *tmppath; 2231 const char *format; 2232 2233 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2234 &tmppath) != 0) 2235 return (EZFS_NODEVICE); 2236 2237 pos = *bytes_written; 2238 bytes_left = physpath_size - pos; 2239 format = (pos == 0) ? "%s" : " %s"; 2240 2241 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2242 *bytes_written += rsz; 2243 2244 if (rsz >= bytes_left) { 2245 /* if physpath was not copied properly, clear it */ 2246 if (bytes_left != 0) { 2247 physpath[pos] = 0; 2248 } 2249 return (EZFS_NOSPC); 2250 } 2251 return (0); 2252} 2253 2254static int 2255vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2256 size_t *rsz, boolean_t is_spare) 2257{ 2258 char *type; 2259 int ret; 2260 2261 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2262 return (EZFS_INVALCONFIG); 2263 2264 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2265 /* 2266 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2267 * For a spare vdev, we only want to boot from the active 2268 * spare device. 2269 */ 2270 if (is_spare) { 2271 uint64_t spare = 0; 2272 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2273 &spare); 2274 if (!spare) 2275 return (EZFS_INVALCONFIG); 2276 } 2277 2278 if (vdev_online(nv)) { 2279 if ((ret = vdev_get_one_physpath(nv, physpath, 2280 phypath_size, rsz)) != 0) 2281 return (ret); 2282 } 2283 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2284 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2285 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2286 nvlist_t **child; 2287 uint_t count; 2288 int i, ret; 2289 2290 if (nvlist_lookup_nvlist_array(nv, 2291 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2292 return (EZFS_INVALCONFIG); 2293 2294 for (i = 0; i < count; i++) { 2295 ret = vdev_get_physpaths(child[i], physpath, 2296 phypath_size, rsz, is_spare); 2297 if (ret == EZFS_NOSPC) 2298 return (ret); 2299 } 2300 } 2301 2302 return (EZFS_POOL_INVALARG); 2303} 2304 2305/* 2306 * Get phys_path for a root pool config. 2307 * Return 0 on success; non-zero on failure. 2308 */ 2309static int 2310zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2311{ 2312 size_t rsz; 2313 nvlist_t *vdev_root; 2314 nvlist_t **child; 2315 uint_t count; 2316 char *type; 2317 2318 rsz = 0; 2319 2320 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2321 &vdev_root) != 0) 2322 return (EZFS_INVALCONFIG); 2323 2324 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2325 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2326 &child, &count) != 0) 2327 return (EZFS_INVALCONFIG); 2328 2329 /* 2330 * root pool can not have EFI labeled disks and can only have 2331 * a single top-level vdev. 2332 */ 2333 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2334 pool_uses_efi(vdev_root)) 2335 return (EZFS_POOL_INVALARG); 2336 2337 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2338 B_FALSE); 2339 2340 /* No online devices */ 2341 if (rsz == 0) 2342 return (EZFS_NODEVICE); 2343 2344 return (0); 2345} 2346 2347/* 2348 * Get phys_path for a root pool 2349 * Return 0 on success; non-zero on failure. 2350 */ 2351int 2352zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2353{ 2354 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2355 phypath_size)); 2356} 2357 2358/* 2359 * If the device has being dynamically expanded then we need to relabel 2360 * the disk to use the new unallocated space. 2361 */ 2362static int 2363zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2364{ 2365#ifdef sun 2366 char path[MAXPATHLEN]; 2367 char errbuf[1024]; 2368 int fd, error; 2369 int (*_efi_use_whole_disk)(int); 2370 2371 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2372 "efi_use_whole_disk")) == NULL) 2373 return (-1); 2374 2375 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2376 2377 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2378 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2379 "relabel '%s': unable to open device"), name); 2380 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2381 } 2382 2383 /* 2384 * It's possible that we might encounter an error if the device 2385 * does not have any unallocated space left. If so, we simply 2386 * ignore that error and continue on. 2387 */ 2388 error = _efi_use_whole_disk(fd); 2389 (void) close(fd); 2390 if (error && error != VT_ENOSPC) { 2391 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2392 "relabel '%s': unable to read disk capacity"), name); 2393 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2394 } 2395#endif /* sun */ 2396 return (0); 2397} 2398 2399/* 2400 * Bring the specified vdev online. The 'flags' parameter is a set of the 2401 * ZFS_ONLINE_* flags. 2402 */ 2403int 2404zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2405 vdev_state_t *newstate) 2406{ 2407 zfs_cmd_t zc = { 0 }; 2408 char msg[1024]; 2409 nvlist_t *tgt; 2410 boolean_t avail_spare, l2cache, islog; 2411 libzfs_handle_t *hdl = zhp->zpool_hdl; 2412 2413 if (flags & ZFS_ONLINE_EXPAND) { 2414 (void) snprintf(msg, sizeof (msg), 2415 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2416 } else { 2417 (void) snprintf(msg, sizeof (msg), 2418 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2419 } 2420 2421 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2422 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2423 &islog)) == NULL) 2424 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2425 2426 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2427 2428 if (avail_spare) 2429 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2430 2431 if (flags & ZFS_ONLINE_EXPAND || 2432 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2433 char *pathname = NULL; 2434 uint64_t wholedisk = 0; 2435 2436 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2437 &wholedisk); 2438 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2439 &pathname) == 0); 2440 2441 /* 2442 * XXX - L2ARC 1.0 devices can't support expansion. 2443 */ 2444 if (l2cache) { 2445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2446 "cannot expand cache devices")); 2447 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2448 } 2449 2450 if (wholedisk) { 2451 pathname += strlen(DISK_ROOT) + 1; 2452 (void) zpool_relabel_disk(hdl, pathname); 2453 } 2454 } 2455 2456 zc.zc_cookie = VDEV_STATE_ONLINE; 2457 zc.zc_obj = flags; 2458 2459 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2460 if (errno == EINVAL) { 2461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2462 "from this pool into a new one. Use '%s' " 2463 "instead"), "zpool detach"); 2464 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2465 } 2466 return (zpool_standard_error(hdl, errno, msg)); 2467 } 2468 2469 *newstate = zc.zc_cookie; 2470 return (0); 2471} 2472 2473/* 2474 * Take the specified vdev offline 2475 */ 2476int 2477zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2478{ 2479 zfs_cmd_t zc = { 0 }; 2480 char msg[1024]; 2481 nvlist_t *tgt; 2482 boolean_t avail_spare, l2cache; 2483 libzfs_handle_t *hdl = zhp->zpool_hdl; 2484 2485 (void) snprintf(msg, sizeof (msg), 2486 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2487 2488 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2489 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2490 NULL)) == NULL) 2491 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2492 2493 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2494 2495 if (avail_spare) 2496 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2497 2498 zc.zc_cookie = VDEV_STATE_OFFLINE; 2499 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2500 2501 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2502 return (0); 2503 2504 switch (errno) { 2505 case EBUSY: 2506 2507 /* 2508 * There are no other replicas of this device. 2509 */ 2510 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2511 2512 case EEXIST: 2513 /* 2514 * The log device has unplayed logs 2515 */ 2516 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2517 2518 default: 2519 return (zpool_standard_error(hdl, errno, msg)); 2520 } 2521} 2522 2523/* 2524 * Mark the given vdev faulted. 2525 */ 2526int 2527zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2528{ 2529 zfs_cmd_t zc = { 0 }; 2530 char msg[1024]; 2531 libzfs_handle_t *hdl = zhp->zpool_hdl; 2532 2533 (void) snprintf(msg, sizeof (msg), 2534 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2535 2536 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2537 zc.zc_guid = guid; 2538 zc.zc_cookie = VDEV_STATE_FAULTED; 2539 zc.zc_obj = aux; 2540 2541 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2542 return (0); 2543 2544 switch (errno) { 2545 case EBUSY: 2546 2547 /* 2548 * There are no other replicas of this device. 2549 */ 2550 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2551 2552 default: 2553 return (zpool_standard_error(hdl, errno, msg)); 2554 } 2555 2556} 2557 2558/* 2559 * Mark the given vdev degraded. 2560 */ 2561int 2562zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2563{ 2564 zfs_cmd_t zc = { 0 }; 2565 char msg[1024]; 2566 libzfs_handle_t *hdl = zhp->zpool_hdl; 2567 2568 (void) snprintf(msg, sizeof (msg), 2569 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2570 2571 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2572 zc.zc_guid = guid; 2573 zc.zc_cookie = VDEV_STATE_DEGRADED; 2574 zc.zc_obj = aux; 2575 2576 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2577 return (0); 2578 2579 return (zpool_standard_error(hdl, errno, msg)); 2580} 2581 2582/* 2583 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2584 * a hot spare. 2585 */ 2586static boolean_t 2587is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2588{ 2589 nvlist_t **child; 2590 uint_t c, children; 2591 char *type; 2592 2593 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2594 &children) == 0) { 2595 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2596 &type) == 0); 2597 2598 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2599 children == 2 && child[which] == tgt) 2600 return (B_TRUE); 2601 2602 for (c = 0; c < children; c++) 2603 if (is_replacing_spare(child[c], tgt, which)) 2604 return (B_TRUE); 2605 } 2606 2607 return (B_FALSE); 2608} 2609 2610/* 2611 * Attach new_disk (fully described by nvroot) to old_disk. 2612 * If 'replacing' is specified, the new disk will replace the old one. 2613 */ 2614int 2615zpool_vdev_attach(zpool_handle_t *zhp, 2616 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2617{ 2618 zfs_cmd_t zc = { 0 }; 2619 char msg[1024]; 2620 int ret; 2621 nvlist_t *tgt; 2622 boolean_t avail_spare, l2cache, islog; 2623 uint64_t val; 2624 char *newname; 2625 nvlist_t **child; 2626 uint_t children; 2627 nvlist_t *config_root; 2628 libzfs_handle_t *hdl = zhp->zpool_hdl; 2629 boolean_t rootpool = zpool_is_bootable(zhp); 2630 2631 if (replacing) 2632 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2633 "cannot replace %s with %s"), old_disk, new_disk); 2634 else 2635 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2636 "cannot attach %s to %s"), new_disk, old_disk); 2637 2638 /* 2639 * If this is a root pool, make sure that we're not attaching an 2640 * EFI labeled device. 2641 */ 2642 if (rootpool && pool_uses_efi(nvroot)) { 2643 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2644 "EFI labeled devices are not supported on root pools.")); 2645 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2646 } 2647 2648 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2649 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2650 &islog)) == 0) 2651 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2652 2653 if (avail_spare) 2654 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2655 2656 if (l2cache) 2657 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2658 2659 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2660 zc.zc_cookie = replacing; 2661 2662 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2663 &child, &children) != 0 || children != 1) { 2664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2665 "new device must be a single disk")); 2666 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2667 } 2668 2669 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2670 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2671 2672 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2673 return (-1); 2674 2675 /* 2676 * If the target is a hot spare that has been swapped in, we can only 2677 * replace it with another hot spare. 2678 */ 2679 if (replacing && 2680 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2681 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2682 NULL) == NULL || !avail_spare) && 2683 is_replacing_spare(config_root, tgt, 1)) { 2684 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2685 "can only be replaced by another hot spare")); 2686 free(newname); 2687 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2688 } 2689 2690 free(newname); 2691 2692 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2693 return (-1); 2694 2695 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2696 2697 zcmd_free_nvlists(&zc); 2698 2699 if (ret == 0) { 2700 if (rootpool) { 2701 /* 2702 * XXX need a better way to prevent user from 2703 * booting up a half-baked vdev. 2704 */ 2705 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2706 "sure to wait until resilver is done " 2707 "before rebooting.\n")); 2708 (void) fprintf(stderr, "\n"); 2709 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2710 "you boot from pool '%s', you may need to update\n" 2711 "boot code on newly attached disk '%s'.\n\n" 2712 "Assuming you use GPT partitioning and 'da0' is " 2713 "your new boot disk\n" 2714 "you may use the following command:\n\n" 2715 "\tgpart bootcode -b /boot/pmbr -p " 2716 "/boot/gptzfsboot -i 1 da0\n\n"), 2717 zhp->zpool_name, new_disk); 2718 } 2719 return (0); 2720 } 2721 2722 switch (errno) { 2723 case ENOTSUP: 2724 /* 2725 * Can't attach to or replace this type of vdev. 2726 */ 2727 if (replacing) { 2728 uint64_t version = zpool_get_prop_int(zhp, 2729 ZPOOL_PROP_VERSION, NULL); 2730 2731 if (islog) 2732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2733 "cannot replace a log with a spare")); 2734 else if (version >= SPA_VERSION_MULTI_REPLACE) 2735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2736 "already in replacing/spare config; wait " 2737 "for completion or use 'zpool detach'")); 2738 else 2739 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2740 "cannot replace a replacing device")); 2741 } else { 2742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2743 "can only attach to mirrors and top-level " 2744 "disks")); 2745 } 2746 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2747 break; 2748 2749 case EINVAL: 2750 /* 2751 * The new device must be a single disk. 2752 */ 2753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2754 "new device must be a single disk")); 2755 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2756 break; 2757 2758 case EBUSY: 2759 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2760 new_disk); 2761 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2762 break; 2763 2764 case EOVERFLOW: 2765 /* 2766 * The new device is too small. 2767 */ 2768 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2769 "device is too small")); 2770 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2771 break; 2772 2773 case EDOM: 2774 /* 2775 * The new device has a different alignment requirement. 2776 */ 2777 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2778 "devices have different sector alignment")); 2779 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2780 break; 2781 2782 case ENAMETOOLONG: 2783 /* 2784 * The resulting top-level vdev spec won't fit in the label. 2785 */ 2786 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2787 break; 2788 2789 default: 2790 (void) zpool_standard_error(hdl, errno, msg); 2791 } 2792 2793 return (-1); 2794} 2795 2796/* 2797 * Detach the specified device. 2798 */ 2799int 2800zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2801{ 2802 zfs_cmd_t zc = { 0 }; 2803 char msg[1024]; 2804 nvlist_t *tgt; 2805 boolean_t avail_spare, l2cache; 2806 libzfs_handle_t *hdl = zhp->zpool_hdl; 2807 2808 (void) snprintf(msg, sizeof (msg), 2809 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2810 2811 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2812 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2813 NULL)) == 0) 2814 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2815 2816 if (avail_spare) 2817 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2818 2819 if (l2cache) 2820 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2821 2822 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2823 2824 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2825 return (0); 2826 2827 switch (errno) { 2828 2829 case ENOTSUP: 2830 /* 2831 * Can't detach from this type of vdev. 2832 */ 2833 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2834 "applicable to mirror and replacing vdevs")); 2835 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2836 break; 2837 2838 case EBUSY: 2839 /* 2840 * There are no other replicas of this device. 2841 */ 2842 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2843 break; 2844 2845 default: 2846 (void) zpool_standard_error(hdl, errno, msg); 2847 } 2848 2849 return (-1); 2850} 2851 2852/* 2853 * Find a mirror vdev in the source nvlist. 2854 * 2855 * The mchild array contains a list of disks in one of the top-level mirrors 2856 * of the source pool. The schild array contains a list of disks that the 2857 * user specified on the command line. We loop over the mchild array to 2858 * see if any entry in the schild array matches. 2859 * 2860 * If a disk in the mchild array is found in the schild array, we return 2861 * the index of that entry. Otherwise we return -1. 2862 */ 2863static int 2864find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2865 nvlist_t **schild, uint_t schildren) 2866{ 2867 uint_t mc; 2868 2869 for (mc = 0; mc < mchildren; mc++) { 2870 uint_t sc; 2871 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2872 mchild[mc], B_FALSE); 2873 2874 for (sc = 0; sc < schildren; sc++) { 2875 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2876 schild[sc], B_FALSE); 2877 boolean_t result = (strcmp(mpath, spath) == 0); 2878 2879 free(spath); 2880 if (result) { 2881 free(mpath); 2882 return (mc); 2883 } 2884 } 2885 2886 free(mpath); 2887 } 2888 2889 return (-1); 2890} 2891 2892/* 2893 * Split a mirror pool. If newroot points to null, then a new nvlist 2894 * is generated and it is the responsibility of the caller to free it. 2895 */ 2896int 2897zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2898 nvlist_t *props, splitflags_t flags) 2899{ 2900 zfs_cmd_t zc = { 0 }; 2901 char msg[1024]; 2902 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2903 nvlist_t **varray = NULL, *zc_props = NULL; 2904 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2905 libzfs_handle_t *hdl = zhp->zpool_hdl; 2906 uint64_t vers; 2907 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2908 int retval = 0; 2909 2910 (void) snprintf(msg, sizeof (msg), 2911 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2912 2913 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2914 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2915 2916 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2917 (void) fprintf(stderr, gettext("Internal error: unable to " 2918 "retrieve pool configuration\n")); 2919 return (-1); 2920 } 2921 2922 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2923 == 0); 2924 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2925 2926 if (props) { 2927 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2928 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2929 props, vers, flags, msg)) == NULL) 2930 return (-1); 2931 } 2932 2933 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2934 &children) != 0) { 2935 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2936 "Source pool is missing vdev tree")); 2937 if (zc_props) 2938 nvlist_free(zc_props); 2939 return (-1); 2940 } 2941 2942 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2943 vcount = 0; 2944 2945 if (*newroot == NULL || 2946 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2947 &newchild, &newchildren) != 0) 2948 newchildren = 0; 2949 2950 for (c = 0; c < children; c++) { 2951 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2952 char *type; 2953 nvlist_t **mchild, *vdev; 2954 uint_t mchildren; 2955 int entry; 2956 2957 /* 2958 * Unlike cache & spares, slogs are stored in the 2959 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2960 */ 2961 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2962 &is_log); 2963 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2964 &is_hole); 2965 if (is_log || is_hole) { 2966 /* 2967 * Create a hole vdev and put it in the config. 2968 */ 2969 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2970 goto out; 2971 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2972 VDEV_TYPE_HOLE) != 0) 2973 goto out; 2974 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2975 1) != 0) 2976 goto out; 2977 if (lastlog == 0) 2978 lastlog = vcount; 2979 varray[vcount++] = vdev; 2980 continue; 2981 } 2982 lastlog = 0; 2983 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2984 == 0); 2985 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2986 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2987 "Source pool must be composed only of mirrors\n")); 2988 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2989 goto out; 2990 } 2991 2992 verify(nvlist_lookup_nvlist_array(child[c], 2993 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2994 2995 /* find or add an entry for this top-level vdev */ 2996 if (newchildren > 0 && 2997 (entry = find_vdev_entry(zhp, mchild, mchildren, 2998 newchild, newchildren)) >= 0) { 2999 /* We found a disk that the user specified. */ 3000 vdev = mchild[entry]; 3001 ++found; 3002 } else { 3003 /* User didn't specify a disk for this vdev. */ 3004 vdev = mchild[mchildren - 1]; 3005 } 3006 3007 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3008 goto out; 3009 } 3010 3011 /* did we find every disk the user specified? */ 3012 if (found != newchildren) { 3013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3014 "include at most one disk from each mirror")); 3015 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3016 goto out; 3017 } 3018 3019 /* Prepare the nvlist for populating. */ 3020 if (*newroot == NULL) { 3021 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3022 goto out; 3023 freelist = B_TRUE; 3024 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3025 VDEV_TYPE_ROOT) != 0) 3026 goto out; 3027 } else { 3028 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3029 } 3030 3031 /* Add all the children we found */ 3032 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3033 lastlog == 0 ? vcount : lastlog) != 0) 3034 goto out; 3035 3036 /* 3037 * If we're just doing a dry run, exit now with success. 3038 */ 3039 if (flags.dryrun) { 3040 memory_err = B_FALSE; 3041 freelist = B_FALSE; 3042 goto out; 3043 } 3044 3045 /* now build up the config list & call the ioctl */ 3046 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3047 goto out; 3048 3049 if (nvlist_add_nvlist(newconfig, 3050 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3051 nvlist_add_string(newconfig, 3052 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3053 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3054 goto out; 3055 3056 /* 3057 * The new pool is automatically part of the namespace unless we 3058 * explicitly export it. 3059 */ 3060 if (!flags.import) 3061 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3062 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3063 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3064 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3065 goto out; 3066 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3067 goto out; 3068 3069 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3070 retval = zpool_standard_error(hdl, errno, msg); 3071 goto out; 3072 } 3073 3074 freelist = B_FALSE; 3075 memory_err = B_FALSE; 3076 3077out: 3078 if (varray != NULL) { 3079 int v; 3080 3081 for (v = 0; v < vcount; v++) 3082 nvlist_free(varray[v]); 3083 free(varray); 3084 } 3085 zcmd_free_nvlists(&zc); 3086 if (zc_props) 3087 nvlist_free(zc_props); 3088 if (newconfig) 3089 nvlist_free(newconfig); 3090 if (freelist) { 3091 nvlist_free(*newroot); 3092 *newroot = NULL; 3093 } 3094 3095 if (retval != 0) 3096 return (retval); 3097 3098 if (memory_err) 3099 return (no_memory(hdl)); 3100 3101 return (0); 3102} 3103 3104/* 3105 * Remove the given device. Currently, this is supported only for hot spares 3106 * and level 2 cache devices. 3107 */ 3108int 3109zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3110{ 3111 zfs_cmd_t zc = { 0 }; 3112 char msg[1024]; 3113 nvlist_t *tgt; 3114 boolean_t avail_spare, l2cache, islog; 3115 libzfs_handle_t *hdl = zhp->zpool_hdl; 3116 uint64_t version; 3117 3118 (void) snprintf(msg, sizeof (msg), 3119 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3120 3121 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3122 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3123 &islog)) == 0) 3124 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3125 /* 3126 * XXX - this should just go away. 3127 */ 3128 if (!avail_spare && !l2cache && !islog) { 3129 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3130 "only inactive hot spares, cache, top-level, " 3131 "or log devices can be removed")); 3132 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3133 } 3134 3135 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3136 if (islog && version < SPA_VERSION_HOLES) { 3137 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3138 "pool must be upgrade to support log removal")); 3139 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3140 } 3141 3142 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3143 3144 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3145 return (0); 3146 3147 return (zpool_standard_error(hdl, errno, msg)); 3148} 3149 3150/* 3151 * Clear the errors for the pool, or the particular device if specified. 3152 */ 3153int 3154zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3155{ 3156 zfs_cmd_t zc = { 0 }; 3157 char msg[1024]; 3158 nvlist_t *tgt; 3159 zpool_rewind_policy_t policy; 3160 boolean_t avail_spare, l2cache; 3161 libzfs_handle_t *hdl = zhp->zpool_hdl; 3162 nvlist_t *nvi = NULL; 3163 int error; 3164 3165 if (path) 3166 (void) snprintf(msg, sizeof (msg), 3167 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3168 path); 3169 else 3170 (void) snprintf(msg, sizeof (msg), 3171 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3172 zhp->zpool_name); 3173 3174 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3175 if (path) { 3176 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3177 &l2cache, NULL)) == 0) 3178 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3179 3180 /* 3181 * Don't allow error clearing for hot spares. Do allow 3182 * error clearing for l2cache devices. 3183 */ 3184 if (avail_spare) 3185 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3186 3187 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3188 &zc.zc_guid) == 0); 3189 } 3190 3191 zpool_get_rewind_policy(rewindnvl, &policy); 3192 zc.zc_cookie = policy.zrp_request; 3193 3194 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3195 return (-1); 3196 3197 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3198 return (-1); 3199 3200 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3201 errno == ENOMEM) { 3202 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3203 zcmd_free_nvlists(&zc); 3204 return (-1); 3205 } 3206 } 3207 3208 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3209 errno != EPERM && errno != EACCES)) { 3210 if (policy.zrp_request & 3211 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3212 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3213 zpool_rewind_exclaim(hdl, zc.zc_name, 3214 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3215 nvi); 3216 nvlist_free(nvi); 3217 } 3218 zcmd_free_nvlists(&zc); 3219 return (0); 3220 } 3221 3222 zcmd_free_nvlists(&zc); 3223 return (zpool_standard_error(hdl, errno, msg)); 3224} 3225 3226/* 3227 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3228 */ 3229int 3230zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3231{ 3232 zfs_cmd_t zc = { 0 }; 3233 char msg[1024]; 3234 libzfs_handle_t *hdl = zhp->zpool_hdl; 3235 3236 (void) snprintf(msg, sizeof (msg), 3237 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3238 guid); 3239 3240 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3241 zc.zc_guid = guid; 3242 zc.zc_cookie = ZPOOL_NO_REWIND; 3243 3244 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3245 return (0); 3246 3247 return (zpool_standard_error(hdl, errno, msg)); 3248} 3249 3250/* 3251 * Change the GUID for a pool. 3252 */ 3253int 3254zpool_reguid(zpool_handle_t *zhp) 3255{ 3256 char msg[1024]; 3257 libzfs_handle_t *hdl = zhp->zpool_hdl; 3258 zfs_cmd_t zc = { 0 }; 3259 3260 (void) snprintf(msg, sizeof (msg), 3261 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3262 3263 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3264 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3265 return (0); 3266 3267 return (zpool_standard_error(hdl, errno, msg)); 3268} 3269 3270/* 3271 * Reopen the pool. 3272 */ 3273int 3274zpool_reopen(zpool_handle_t *zhp) 3275{ 3276 zfs_cmd_t zc = { 0 }; 3277 char msg[1024]; 3278 libzfs_handle_t *hdl = zhp->zpool_hdl; 3279 3280 (void) snprintf(msg, sizeof (msg), 3281 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3282 zhp->zpool_name); 3283 3284 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3285 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3286 return (0); 3287 return (zpool_standard_error(hdl, errno, msg)); 3288} 3289 3290/* 3291 * Convert from a devid string to a path. 3292 */ 3293static char * 3294devid_to_path(char *devid_str) 3295{ 3296 ddi_devid_t devid; 3297 char *minor; 3298 char *path; 3299 devid_nmlist_t *list = NULL; 3300 int ret; 3301 3302 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3303 return (NULL); 3304 3305 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3306 3307 devid_str_free(minor); 3308 devid_free(devid); 3309 3310 if (ret != 0) 3311 return (NULL); 3312 3313 if ((path = strdup(list[0].devname)) == NULL) 3314 return (NULL); 3315 3316 devid_free_nmlist(list); 3317 3318 return (path); 3319} 3320 3321/* 3322 * Convert from a path to a devid string. 3323 */ 3324static char * 3325path_to_devid(const char *path) 3326{ 3327 int fd; 3328 ddi_devid_t devid; 3329 char *minor, *ret; 3330 3331 if ((fd = open(path, O_RDONLY)) < 0) 3332 return (NULL); 3333 3334 minor = NULL; 3335 ret = NULL; 3336 if (devid_get(fd, &devid) == 0) { 3337 if (devid_get_minor_name(fd, &minor) == 0) 3338 ret = devid_str_encode(devid, minor); 3339 if (minor != NULL) 3340 devid_str_free(minor); 3341 devid_free(devid); 3342 } 3343 (void) close(fd); 3344 3345 return (ret); 3346} 3347 3348/* 3349 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3350 * ignore any failure here, since a common case is for an unprivileged user to 3351 * type 'zpool status', and we'll display the correct information anyway. 3352 */ 3353static void 3354set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3355{ 3356 zfs_cmd_t zc = { 0 }; 3357 3358 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3359 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3360 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3361 &zc.zc_guid) == 0); 3362 3363 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3364} 3365 3366/* 3367 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3368 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3369 * We also check if this is a whole disk, in which case we strip off the 3370 * trailing 's0' slice name. 3371 * 3372 * This routine is also responsible for identifying when disks have been 3373 * reconfigured in a new location. The kernel will have opened the device by 3374 * devid, but the path will still refer to the old location. To catch this, we 3375 * first do a path -> devid translation (which is fast for the common case). If 3376 * the devid matches, we're done. If not, we do a reverse devid -> path 3377 * translation and issue the appropriate ioctl() to update the path of the vdev. 3378 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3379 * of these checks. 3380 */ 3381char * 3382zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3383 boolean_t verbose) 3384{ 3385 char *path, *devid; 3386 uint64_t value; 3387 char buf[64]; 3388 vdev_stat_t *vs; 3389 uint_t vsc; 3390 int have_stats; 3391 int have_path; 3392 3393 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3394 (uint64_t **)&vs, &vsc) == 0; 3395 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3396 3397 /* 3398 * If the device is not currently present, assume it will not 3399 * come back at the same device path. Display the device by GUID. 3400 */ 3401 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3402 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3403 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3404 &value) == 0); 3405 (void) snprintf(buf, sizeof (buf), "%llu", 3406 (u_longlong_t)value); 3407 path = buf; 3408 } else if (have_path) { 3409 3410 /* 3411 * If the device is dead (faulted, offline, etc) then don't 3412 * bother opening it. Otherwise we may be forcing the user to 3413 * open a misbehaving device, which can have undesirable 3414 * effects. 3415 */ 3416 if ((have_stats == 0 || 3417 vs->vs_state >= VDEV_STATE_DEGRADED) && 3418 zhp != NULL && 3419 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3420 /* 3421 * Determine if the current path is correct. 3422 */ 3423 char *newdevid = path_to_devid(path); 3424 3425 if (newdevid == NULL || 3426 strcmp(devid, newdevid) != 0) { 3427 char *newpath; 3428 3429 if ((newpath = devid_to_path(devid)) != NULL) { 3430 /* 3431 * Update the path appropriately. 3432 */ 3433 set_path(zhp, nv, newpath); 3434 if (nvlist_add_string(nv, 3435 ZPOOL_CONFIG_PATH, newpath) == 0) 3436 verify(nvlist_lookup_string(nv, 3437 ZPOOL_CONFIG_PATH, 3438 &path) == 0); 3439 free(newpath); 3440 } 3441 } 3442 3443 if (newdevid) 3444 devid_str_free(newdevid); 3445 } 3446 3447#ifdef sun 3448 if (strncmp(path, "/dev/dsk/", 9) == 0) 3449 path += 9; 3450 3451 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3452 &value) == 0 && value) { 3453 int pathlen = strlen(path); 3454 char *tmp = zfs_strdup(hdl, path); 3455 3456 /* 3457 * If it starts with c#, and ends with "s0", chop 3458 * the "s0" off, or if it ends with "s0/old", remove 3459 * the "s0" from the middle. 3460 */ 3461 if (CTD_CHECK(tmp)) { 3462 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3463 tmp[pathlen - 2] = '\0'; 3464 } else if (pathlen > 6 && 3465 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3466 (void) strcpy(&tmp[pathlen - 6], 3467 "/old"); 3468 } 3469 } 3470 return (tmp); 3471 } 3472#else /* !sun */ 3473 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3474 path += sizeof(_PATH_DEV) - 1; 3475#endif /* !sun */ 3476 } else { 3477 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3478 3479 /* 3480 * If it's a raidz device, we need to stick in the parity level. 3481 */ 3482 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3483 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3484 &value) == 0); 3485 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3486 (u_longlong_t)value); 3487 path = buf; 3488 } 3489 3490 /* 3491 * We identify each top-level vdev by using a <type-id> 3492 * naming convention. 3493 */ 3494 if (verbose) { 3495 uint64_t id; 3496 3497 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3498 &id) == 0); 3499 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3500 (u_longlong_t)id); 3501 path = buf; 3502 } 3503 } 3504 3505 return (zfs_strdup(hdl, path)); 3506} 3507 3508static int 3509zbookmark_compare(const void *a, const void *b) 3510{ 3511 return (memcmp(a, b, sizeof (zbookmark_t))); 3512} 3513 3514/* 3515 * Retrieve the persistent error log, uniquify the members, and return to the 3516 * caller. 3517 */ 3518int 3519zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3520{ 3521 zfs_cmd_t zc = { 0 }; 3522 uint64_t count; 3523 zbookmark_t *zb = NULL; 3524 int i; 3525 3526 /* 3527 * Retrieve the raw error list from the kernel. If the number of errors 3528 * has increased, allocate more space and continue until we get the 3529 * entire list. 3530 */ 3531 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3532 &count) == 0); 3533 if (count == 0) 3534 return (0); 3535 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3536 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3537 return (-1); 3538 zc.zc_nvlist_dst_size = count; 3539 (void) strcpy(zc.zc_name, zhp->zpool_name); 3540 for (;;) { 3541 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3542 &zc) != 0) { 3543 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3544 if (errno == ENOMEM) { 3545 count = zc.zc_nvlist_dst_size; 3546 if ((zc.zc_nvlist_dst = (uintptr_t) 3547 zfs_alloc(zhp->zpool_hdl, count * 3548 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3549 return (-1); 3550 } else { 3551 return (-1); 3552 } 3553 } else { 3554 break; 3555 } 3556 } 3557 3558 /* 3559 * Sort the resulting bookmarks. This is a little confusing due to the 3560 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3561 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3562 * _not_ copied as part of the process. So we point the start of our 3563 * array appropriate and decrement the total number of elements. 3564 */ 3565 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3566 zc.zc_nvlist_dst_size; 3567 count -= zc.zc_nvlist_dst_size; 3568 3569 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3570 3571 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3572 3573 /* 3574 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3575 */ 3576 for (i = 0; i < count; i++) { 3577 nvlist_t *nv; 3578 3579 /* ignoring zb_blkid and zb_level for now */ 3580 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3581 zb[i-1].zb_object == zb[i].zb_object) 3582 continue; 3583 3584 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3585 goto nomem; 3586 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3587 zb[i].zb_objset) != 0) { 3588 nvlist_free(nv); 3589 goto nomem; 3590 } 3591 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3592 zb[i].zb_object) != 0) { 3593 nvlist_free(nv); 3594 goto nomem; 3595 } 3596 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3597 nvlist_free(nv); 3598 goto nomem; 3599 } 3600 nvlist_free(nv); 3601 } 3602 3603 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3604 return (0); 3605 3606nomem: 3607 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3608 return (no_memory(zhp->zpool_hdl)); 3609} 3610 3611/* 3612 * Upgrade a ZFS pool to the latest on-disk version. 3613 */ 3614int 3615zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3616{ 3617 zfs_cmd_t zc = { 0 }; 3618 libzfs_handle_t *hdl = zhp->zpool_hdl; 3619 3620 (void) strcpy(zc.zc_name, zhp->zpool_name); 3621 zc.zc_cookie = new_version; 3622 3623 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3624 return (zpool_standard_error_fmt(hdl, errno, 3625 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3626 zhp->zpool_name)); 3627 return (0); 3628} 3629 3630void 3631zfs_save_arguments(int argc, char **argv, char *string, int len) 3632{ 3633 (void) strlcpy(string, basename(argv[0]), len); 3634 for (int i = 1; i < argc; i++) { 3635 (void) strlcat(string, " ", len); 3636 (void) strlcat(string, argv[i], len); 3637 } 3638} 3639 3640int 3641zpool_log_history(libzfs_handle_t *hdl, const char *message) 3642{ 3643 zfs_cmd_t zc = { 0 }; 3644 nvlist_t *args; 3645 int err; 3646 3647 args = fnvlist_alloc(); 3648 fnvlist_add_string(args, "message", message); 3649 err = zcmd_write_src_nvlist(hdl, &zc, args); 3650 if (err == 0) 3651 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3652 nvlist_free(args); 3653 zcmd_free_nvlists(&zc); 3654 return (err); 3655} 3656 3657/* 3658 * Perform ioctl to get some command history of a pool. 3659 * 3660 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3661 * logical offset of the history buffer to start reading from. 3662 * 3663 * Upon return, 'off' is the next logical offset to read from and 3664 * 'len' is the actual amount of bytes read into 'buf'. 3665 */ 3666static int 3667get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3668{ 3669 zfs_cmd_t zc = { 0 }; 3670 libzfs_handle_t *hdl = zhp->zpool_hdl; 3671 3672 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3673 3674 zc.zc_history = (uint64_t)(uintptr_t)buf; 3675 zc.zc_history_len = *len; 3676 zc.zc_history_offset = *off; 3677 3678 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3679 switch (errno) { 3680 case EPERM: 3681 return (zfs_error_fmt(hdl, EZFS_PERM, 3682 dgettext(TEXT_DOMAIN, 3683 "cannot show history for pool '%s'"), 3684 zhp->zpool_name)); 3685 case ENOENT: 3686 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3687 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3688 "'%s'"), zhp->zpool_name)); 3689 case ENOTSUP: 3690 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3691 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3692 "'%s', pool must be upgraded"), zhp->zpool_name)); 3693 default: 3694 return (zpool_standard_error_fmt(hdl, errno, 3695 dgettext(TEXT_DOMAIN, 3696 "cannot get history for '%s'"), zhp->zpool_name)); 3697 } 3698 } 3699 3700 *len = zc.zc_history_len; 3701 *off = zc.zc_history_offset; 3702 3703 return (0); 3704} 3705 3706/* 3707 * Process the buffer of nvlists, unpacking and storing each nvlist record 3708 * into 'records'. 'leftover' is set to the number of bytes that weren't 3709 * processed as there wasn't a complete record. 3710 */ 3711int 3712zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3713 nvlist_t ***records, uint_t *numrecords) 3714{ 3715 uint64_t reclen; 3716 nvlist_t *nv; 3717 int i; 3718 3719 while (bytes_read > sizeof (reclen)) { 3720 3721 /* get length of packed record (stored as little endian) */ 3722 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3723 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3724 3725 if (bytes_read < sizeof (reclen) + reclen) 3726 break; 3727 3728 /* unpack record */ 3729 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3730 return (ENOMEM); 3731 bytes_read -= sizeof (reclen) + reclen; 3732 buf += sizeof (reclen) + reclen; 3733 3734 /* add record to nvlist array */ 3735 (*numrecords)++; 3736 if (ISP2(*numrecords + 1)) { 3737 *records = realloc(*records, 3738 *numrecords * 2 * sizeof (nvlist_t *)); 3739 } 3740 (*records)[*numrecords - 1] = nv; 3741 } 3742 3743 *leftover = bytes_read; 3744 return (0); 3745} 3746 3747/* from spa_history.c: spa_history_create_obj() */ 3748#define HIS_BUF_LEN_DEF (128 << 10) 3749#define HIS_BUF_LEN_MAX (1 << 30) 3750 3751/* 3752 * Retrieve the command history of a pool. 3753 */ 3754int 3755zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3756{ 3757 char *buf = NULL; 3758 uint64_t bufsize = HIS_BUF_LEN_DEF; 3759 uint64_t off = 0; 3760 nvlist_t **records = NULL; 3761 uint_t numrecords = 0; 3762 int err, i; 3763 3764 if ((buf = malloc(bufsize)) == NULL) 3765 return (ENOMEM); 3766 do { 3767 uint64_t bytes_read = bufsize; 3768 uint64_t leftover; 3769 3770 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3771 break; 3772 3773 /* if nothing else was read in, we're at EOF, just return */ 3774 if (bytes_read == 0) 3775 break; 3776 3777 if ((err = zpool_history_unpack(buf, bytes_read, 3778 &leftover, &records, &numrecords)) != 0) 3779 break; 3780 off -= leftover; 3781 3782 /* 3783 * If the history block is too big, double the buffer 3784 * size and try again. 3785 */ 3786 if (leftover == bytes_read) { 3787 free(buf); 3788 buf = NULL; 3789 3790 bufsize <<= 1; 3791 if ((bufsize >= HIS_BUF_LEN_MAX) || 3792 ((buf = malloc(bufsize)) == NULL)) { 3793 err = ENOMEM; 3794 break; 3795 } 3796 } 3797 3798 /* CONSTCOND */ 3799 } while (1); 3800 free(buf); 3801 3802 if (!err) { 3803 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3804 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3805 records, numrecords) == 0); 3806 } 3807 for (i = 0; i < numrecords; i++) 3808 nvlist_free(records[i]); 3809 free(records); 3810 3811 return (err); 3812} 3813 3814void 3815zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3816 char *pathname, size_t len) 3817{ 3818 zfs_cmd_t zc = { 0 }; 3819 boolean_t mounted = B_FALSE; 3820 char *mntpnt = NULL; 3821 char dsname[MAXNAMELEN]; 3822 3823 if (dsobj == 0) { 3824 /* special case for the MOS */ 3825 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3826 return; 3827 } 3828 3829 /* get the dataset's name */ 3830 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3831 zc.zc_obj = dsobj; 3832 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3833 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3834 /* just write out a path of two object numbers */ 3835 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3836 dsobj, obj); 3837 return; 3838 } 3839 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3840 3841 /* find out if the dataset is mounted */ 3842 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3843 3844 /* get the corrupted object's path */ 3845 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3846 zc.zc_obj = obj; 3847 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3848 &zc) == 0) { 3849 if (mounted) { 3850 (void) snprintf(pathname, len, "%s%s", mntpnt, 3851 zc.zc_value); 3852 } else { 3853 (void) snprintf(pathname, len, "%s:%s", 3854 dsname, zc.zc_value); 3855 } 3856 } else { 3857 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3858 } 3859 free(mntpnt); 3860} 3861 3862#ifdef sun 3863/* 3864 * Read the EFI label from the config, if a label does not exist then 3865 * pass back the error to the caller. If the caller has passed a non-NULL 3866 * diskaddr argument then we set it to the starting address of the EFI 3867 * partition. 3868 */ 3869static int 3870read_efi_label(nvlist_t *config, diskaddr_t *sb) 3871{ 3872 char *path; 3873 int fd; 3874 char diskname[MAXPATHLEN]; 3875 int err = -1; 3876 3877 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3878 return (err); 3879 3880 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3881 strrchr(path, '/')); 3882 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3883 struct dk_gpt *vtoc; 3884 3885 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3886 if (sb != NULL) 3887 *sb = vtoc->efi_parts[0].p_start; 3888 efi_free(vtoc); 3889 } 3890 (void) close(fd); 3891 } 3892 return (err); 3893} 3894 3895/* 3896 * determine where a partition starts on a disk in the current 3897 * configuration 3898 */ 3899static diskaddr_t 3900find_start_block(nvlist_t *config) 3901{ 3902 nvlist_t **child; 3903 uint_t c, children; 3904 diskaddr_t sb = MAXOFFSET_T; 3905 uint64_t wholedisk; 3906 3907 if (nvlist_lookup_nvlist_array(config, 3908 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3909 if (nvlist_lookup_uint64(config, 3910 ZPOOL_CONFIG_WHOLE_DISK, 3911 &wholedisk) != 0 || !wholedisk) { 3912 return (MAXOFFSET_T); 3913 } 3914 if (read_efi_label(config, &sb) < 0) 3915 sb = MAXOFFSET_T; 3916 return (sb); 3917 } 3918 3919 for (c = 0; c < children; c++) { 3920 sb = find_start_block(child[c]); 3921 if (sb != MAXOFFSET_T) { 3922 return (sb); 3923 } 3924 } 3925 return (MAXOFFSET_T); 3926} 3927#endif /* sun */ 3928 3929/* 3930 * Label an individual disk. The name provided is the short name, 3931 * stripped of any leading /dev path. 3932 */ 3933int 3934zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3935{ 3936#ifdef sun 3937 char path[MAXPATHLEN]; 3938 struct dk_gpt *vtoc; 3939 int fd; 3940 size_t resv = EFI_MIN_RESV_SIZE; 3941 uint64_t slice_size; 3942 diskaddr_t start_block; 3943 char errbuf[1024]; 3944 3945 /* prepare an error message just in case */ 3946 (void) snprintf(errbuf, sizeof (errbuf), 3947 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3948 3949 if (zhp) { 3950 nvlist_t *nvroot; 3951 3952 if (zpool_is_bootable(zhp)) { 3953 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3954 "EFI labeled devices are not supported on root " 3955 "pools.")); 3956 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3957 } 3958 3959 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3960 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3961 3962 if (zhp->zpool_start_block == 0) 3963 start_block = find_start_block(nvroot); 3964 else 3965 start_block = zhp->zpool_start_block; 3966 zhp->zpool_start_block = start_block; 3967 } else { 3968 /* new pool */ 3969 start_block = NEW_START_BLOCK; 3970 } 3971 3972 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3973 BACKUP_SLICE); 3974 3975 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3976 /* 3977 * This shouldn't happen. We've long since verified that this 3978 * is a valid device. 3979 */ 3980 zfs_error_aux(hdl, 3981 dgettext(TEXT_DOMAIN, "unable to open device")); 3982 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3983 } 3984 3985 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3986 /* 3987 * The only way this can fail is if we run out of memory, or we 3988 * were unable to read the disk's capacity 3989 */ 3990 if (errno == ENOMEM) 3991 (void) no_memory(hdl); 3992 3993 (void) close(fd); 3994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3995 "unable to read disk capacity"), name); 3996 3997 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3998 } 3999 4000 slice_size = vtoc->efi_last_u_lba + 1; 4001 slice_size -= EFI_MIN_RESV_SIZE; 4002 if (start_block == MAXOFFSET_T) 4003 start_block = NEW_START_BLOCK; 4004 slice_size -= start_block; 4005 4006 vtoc->efi_parts[0].p_start = start_block; 4007 vtoc->efi_parts[0].p_size = slice_size; 4008 4009 /* 4010 * Why we use V_USR: V_BACKUP confuses users, and is considered 4011 * disposable by some EFI utilities (since EFI doesn't have a backup 4012 * slice). V_UNASSIGNED is supposed to be used only for zero size 4013 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4014 * etc. were all pretty specific. V_USR is as close to reality as we 4015 * can get, in the absence of V_OTHER. 4016 */ 4017 vtoc->efi_parts[0].p_tag = V_USR; 4018 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4019 4020 vtoc->efi_parts[8].p_start = slice_size + start_block; 4021 vtoc->efi_parts[8].p_size = resv; 4022 vtoc->efi_parts[8].p_tag = V_RESERVED; 4023 4024 if (efi_write(fd, vtoc) != 0) { 4025 /* 4026 * Some block drivers (like pcata) may not support EFI 4027 * GPT labels. Print out a helpful error message dir- 4028 * ecting the user to manually label the disk and give 4029 * a specific slice. 4030 */ 4031 (void) close(fd); 4032 efi_free(vtoc); 4033 4034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4035 "try using fdisk(1M) and then provide a specific slice")); 4036 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4037 } 4038 4039 (void) close(fd); 4040 efi_free(vtoc); 4041#endif /* sun */ 4042 return (0); 4043} 4044 4045static boolean_t 4046supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4047{ 4048 char *type; 4049 nvlist_t **child; 4050 uint_t children, c; 4051 4052 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4053 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4054 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4055 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4056 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4057 "vdev type '%s' is not supported"), type); 4058 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4059 return (B_FALSE); 4060 } 4061 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4062 &child, &children) == 0) { 4063 for (c = 0; c < children; c++) { 4064 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4065 return (B_FALSE); 4066 } 4067 } 4068 return (B_TRUE); 4069} 4070 4071/* 4072 * Check if this zvol is allowable for use as a dump device; zero if 4073 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4074 * 4075 * Allowable storage configurations include mirrors, all raidz variants, and 4076 * pools with log, cache, and spare devices. Pools which are backed by files or 4077 * have missing/hole vdevs are not suitable. 4078 */ 4079int 4080zvol_check_dump_config(char *arg) 4081{ 4082 zpool_handle_t *zhp = NULL; 4083 nvlist_t *config, *nvroot; 4084 char *p, *volname; 4085 nvlist_t **top; 4086 uint_t toplevels; 4087 libzfs_handle_t *hdl; 4088 char errbuf[1024]; 4089 char poolname[ZPOOL_MAXNAMELEN]; 4090 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4091 int ret = 1; 4092 4093 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4094 return (-1); 4095 } 4096 4097 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4098 "dump is not supported on device '%s'"), arg); 4099 4100 if ((hdl = libzfs_init()) == NULL) 4101 return (1); 4102 libzfs_print_on_error(hdl, B_TRUE); 4103 4104 volname = arg + pathlen; 4105 4106 /* check the configuration of the pool */ 4107 if ((p = strchr(volname, '/')) == NULL) { 4108 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4109 "malformed dataset name")); 4110 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4111 return (1); 4112 } else if (p - volname >= ZFS_MAXNAMELEN) { 4113 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4114 "dataset name is too long")); 4115 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4116 return (1); 4117 } else { 4118 (void) strncpy(poolname, volname, p - volname); 4119 poolname[p - volname] = '\0'; 4120 } 4121 4122 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4123 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4124 "could not open pool '%s'"), poolname); 4125 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4126 goto out; 4127 } 4128 config = zpool_get_config(zhp, NULL); 4129 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4130 &nvroot) != 0) { 4131 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4132 "could not obtain vdev configuration for '%s'"), poolname); 4133 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4134 goto out; 4135 } 4136 4137 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4138 &top, &toplevels) == 0); 4139 4140 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4141 goto out; 4142 } 4143 ret = 0; 4144 4145out: 4146 if (zhp) 4147 zpool_close(zhp); 4148 libzfs_fini(hdl); 4149 return (ret); 4150} 4151