libzfs_pool.c revision 269118
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29#include <sys/types.h> 30#include <sys/stat.h> 31#include <ctype.h> 32#include <errno.h> 33#include <devid.h> 34#include <fcntl.h> 35#include <libintl.h> 36#include <stdio.h> 37#include <stdlib.h> 38#include <strings.h> 39#include <unistd.h> 40#include <libgen.h> 41#include <sys/zfs_ioctl.h> 42#include <dlfcn.h> 43 44#include "zfs_namecheck.h" 45#include "zfs_prop.h" 46#include "libzfs_impl.h" 47#include "zfs_comutil.h" 48#include "zfeature_common.h" 49 50static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52#define DISK_ROOT "/dev/dsk" 53#define RDISK_ROOT "/dev/rdsk" 54#define BACKUP_SLICE "s2" 55 56typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59} prop_flags_t; 60 61/* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67static int 68zpool_get_all_props(zpool_handle_t *zhp) 69{ 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98} 99 100static int 101zpool_props_refresh(zpool_handle_t *zhp) 102{ 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112} 113 114static char * 115zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117{ 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138} 139 140uint64_t 141zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142{ 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177} 178 179/* 180 * Map VDEV STATE to printed strings. 181 */ 182const char * 183zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184{ 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207} 208 209/* 210 * Map POOL STATE to printed strings. 211 */ 212const char * 213zpool_pool_state_to_name(pool_state_t state) 214{ 215 switch (state) { 216 case POOL_STATE_ACTIVE: 217 return (gettext("ACTIVE")); 218 case POOL_STATE_EXPORTED: 219 return (gettext("EXPORTED")); 220 case POOL_STATE_DESTROYED: 221 return (gettext("DESTROYED")); 222 case POOL_STATE_SPARE: 223 return (gettext("SPARE")); 224 case POOL_STATE_L2CACHE: 225 return (gettext("L2CACHE")); 226 case POOL_STATE_UNINITIALIZED: 227 return (gettext("UNINITIALIZED")); 228 case POOL_STATE_UNAVAIL: 229 return (gettext("UNAVAIL")); 230 case POOL_STATE_POTENTIALLY_ACTIVE: 231 return (gettext("POTENTIALLY_ACTIVE")); 232 } 233 234 return (gettext("UNKNOWN")); 235} 236 237/* 238 * Get a zpool property value for 'prop' and return the value in 239 * a pre-allocated buffer. 240 */ 241int 242zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 243 zprop_source_t *srctype, boolean_t literal) 244{ 245 uint64_t intval; 246 const char *strval; 247 zprop_source_t src = ZPROP_SRC_NONE; 248 nvlist_t *nvroot; 249 vdev_stat_t *vs; 250 uint_t vsc; 251 252 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 253 switch (prop) { 254 case ZPOOL_PROP_NAME: 255 (void) strlcpy(buf, zpool_get_name(zhp), len); 256 break; 257 258 case ZPOOL_PROP_HEALTH: 259 (void) strlcpy(buf, "FAULTED", len); 260 break; 261 262 case ZPOOL_PROP_GUID: 263 intval = zpool_get_prop_int(zhp, prop, &src); 264 (void) snprintf(buf, len, "%llu", intval); 265 break; 266 267 case ZPOOL_PROP_ALTROOT: 268 case ZPOOL_PROP_CACHEFILE: 269 case ZPOOL_PROP_COMMENT: 270 if (zhp->zpool_props != NULL || 271 zpool_get_all_props(zhp) == 0) { 272 (void) strlcpy(buf, 273 zpool_get_prop_string(zhp, prop, &src), 274 len); 275 break; 276 } 277 /* FALLTHROUGH */ 278 default: 279 (void) strlcpy(buf, "-", len); 280 break; 281 } 282 283 if (srctype != NULL) 284 *srctype = src; 285 return (0); 286 } 287 288 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 289 prop != ZPOOL_PROP_NAME) 290 return (-1); 291 292 switch (zpool_prop_get_type(prop)) { 293 case PROP_TYPE_STRING: 294 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 295 len); 296 break; 297 298 case PROP_TYPE_NUMBER: 299 intval = zpool_get_prop_int(zhp, prop, &src); 300 301 switch (prop) { 302 case ZPOOL_PROP_SIZE: 303 case ZPOOL_PROP_ALLOCATED: 304 case ZPOOL_PROP_FREE: 305 case ZPOOL_PROP_FREEING: 306 case ZPOOL_PROP_LEAKED: 307 case ZPOOL_PROP_EXPANDSZ: 308 if (literal) { 309 (void) snprintf(buf, len, "%llu", 310 (u_longlong_t)intval); 311 } else { 312 (void) zfs_nicenum(intval, buf, len); 313 } 314 break; 315 316 case ZPOOL_PROP_CAPACITY: 317 if (literal) { 318 (void) snprintf(buf, len, "%llu", 319 (u_longlong_t)intval); 320 } else { 321 (void) snprintf(buf, len, "%llu%%", 322 (u_longlong_t)intval); 323 } 324 break; 325 case ZPOOL_PROP_FRAGMENTATION: 326 if (intval == UINT64_MAX) { 327 (void) strlcpy(buf, "-", len); 328 } else { 329 (void) snprintf(buf, len, "%llu%%", 330 (u_longlong_t)intval); 331 } 332 break; 333 334 case ZPOOL_PROP_DEDUPRATIO: 335 (void) snprintf(buf, len, "%llu.%02llux", 336 (u_longlong_t)(intval / 100), 337 (u_longlong_t)(intval % 100)); 338 break; 339 340 case ZPOOL_PROP_HEALTH: 341 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 342 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 343 verify(nvlist_lookup_uint64_array(nvroot, 344 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 345 == 0); 346 347 (void) strlcpy(buf, zpool_state_to_name(intval, 348 vs->vs_aux), len); 349 break; 350 case ZPOOL_PROP_VERSION: 351 if (intval >= SPA_VERSION_FEATURES) { 352 (void) snprintf(buf, len, "-"); 353 break; 354 } 355 /* FALLTHROUGH */ 356 default: 357 (void) snprintf(buf, len, "%llu", intval); 358 } 359 break; 360 361 case PROP_TYPE_INDEX: 362 intval = zpool_get_prop_int(zhp, prop, &src); 363 if (zpool_prop_index_to_string(prop, intval, &strval) 364 != 0) 365 return (-1); 366 (void) strlcpy(buf, strval, len); 367 break; 368 369 default: 370 abort(); 371 } 372 373 if (srctype) 374 *srctype = src; 375 376 return (0); 377} 378 379/* 380 * Check if the bootfs name has the same pool name as it is set to. 381 * Assuming bootfs is a valid dataset name. 382 */ 383static boolean_t 384bootfs_name_valid(const char *pool, char *bootfs) 385{ 386 int len = strlen(pool); 387 388 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 389 return (B_FALSE); 390 391 if (strncmp(pool, bootfs, len) == 0 && 392 (bootfs[len] == '/' || bootfs[len] == '\0')) 393 return (B_TRUE); 394 395 return (B_FALSE); 396} 397 398/* 399 * Inspect the configuration to determine if any of the devices contain 400 * an EFI label. 401 */ 402static boolean_t 403pool_uses_efi(nvlist_t *config) 404{ 405#ifdef sun 406 nvlist_t **child; 407 uint_t c, children; 408 409 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 410 &child, &children) != 0) 411 return (read_efi_label(config, NULL) >= 0); 412 413 for (c = 0; c < children; c++) { 414 if (pool_uses_efi(child[c])) 415 return (B_TRUE); 416 } 417#endif /* sun */ 418 return (B_FALSE); 419} 420 421boolean_t 422zpool_is_bootable(zpool_handle_t *zhp) 423{ 424 char bootfs[ZPOOL_MAXNAMELEN]; 425 426 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 427 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 428 sizeof (bootfs)) != 0); 429} 430 431 432/* 433 * Given an nvlist of zpool properties to be set, validate that they are 434 * correct, and parse any numeric properties (index, boolean, etc) if they are 435 * specified as strings. 436 */ 437static nvlist_t * 438zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 439 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 440{ 441 nvpair_t *elem; 442 nvlist_t *retprops; 443 zpool_prop_t prop; 444 char *strval; 445 uint64_t intval; 446 char *slash, *check; 447 struct stat64 statbuf; 448 zpool_handle_t *zhp; 449 nvlist_t *nvroot; 450 451 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 452 (void) no_memory(hdl); 453 return (NULL); 454 } 455 456 elem = NULL; 457 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 458 const char *propname = nvpair_name(elem); 459 460 prop = zpool_name_to_prop(propname); 461 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 462 int err; 463 char *fname = strchr(propname, '@') + 1; 464 465 err = zfeature_lookup_name(fname, NULL); 466 if (err != 0) { 467 ASSERT3U(err, ==, ENOENT); 468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 469 "invalid feature '%s'"), fname); 470 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 471 goto error; 472 } 473 474 if (nvpair_type(elem) != DATA_TYPE_STRING) { 475 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 476 "'%s' must be a string"), propname); 477 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 478 goto error; 479 } 480 481 (void) nvpair_value_string(elem, &strval); 482 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 483 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 484 "property '%s' can only be set to " 485 "'enabled'"), propname); 486 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 487 goto error; 488 } 489 490 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 491 (void) no_memory(hdl); 492 goto error; 493 } 494 continue; 495 } 496 497 /* 498 * Make sure this property is valid and applies to this type. 499 */ 500 if (prop == ZPROP_INVAL) { 501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 502 "invalid property '%s'"), propname); 503 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 504 goto error; 505 } 506 507 if (zpool_prop_readonly(prop)) { 508 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 509 "is readonly"), propname); 510 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 511 goto error; 512 } 513 514 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 515 &strval, &intval, errbuf) != 0) 516 goto error; 517 518 /* 519 * Perform additional checking for specific properties. 520 */ 521 switch (prop) { 522 case ZPOOL_PROP_VERSION: 523 if (intval < version || 524 !SPA_VERSION_IS_SUPPORTED(intval)) { 525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 526 "property '%s' number %d is invalid."), 527 propname, intval); 528 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 529 goto error; 530 } 531 break; 532 533 case ZPOOL_PROP_BOOTFS: 534 if (flags.create || flags.import) { 535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 536 "property '%s' cannot be set at creation " 537 "or import time"), propname); 538 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 539 goto error; 540 } 541 542 if (version < SPA_VERSION_BOOTFS) { 543 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 544 "pool must be upgraded to support " 545 "'%s' property"), propname); 546 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 547 goto error; 548 } 549 550 /* 551 * bootfs property value has to be a dataset name and 552 * the dataset has to be in the same pool as it sets to. 553 */ 554 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 555 strval)) { 556 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 557 "is an invalid name"), strval); 558 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 559 goto error; 560 } 561 562 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 564 "could not open pool '%s'"), poolname); 565 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 566 goto error; 567 } 568 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 569 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 570 571#ifdef sun 572 /* 573 * bootfs property cannot be set on a disk which has 574 * been EFI labeled. 575 */ 576 if (pool_uses_efi(nvroot)) { 577 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 578 "property '%s' not supported on " 579 "EFI labeled devices"), propname); 580 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 581 zpool_close(zhp); 582 goto error; 583 } 584#endif /* sun */ 585 zpool_close(zhp); 586 break; 587 588 case ZPOOL_PROP_ALTROOT: 589 if (!flags.create && !flags.import) { 590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 591 "property '%s' can only be set during pool " 592 "creation or import"), propname); 593 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 594 goto error; 595 } 596 597 if (strval[0] != '/') { 598 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 599 "bad alternate root '%s'"), strval); 600 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 601 goto error; 602 } 603 break; 604 605 case ZPOOL_PROP_CACHEFILE: 606 if (strval[0] == '\0') 607 break; 608 609 if (strcmp(strval, "none") == 0) 610 break; 611 612 if (strval[0] != '/') { 613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 614 "property '%s' must be empty, an " 615 "absolute path, or 'none'"), propname); 616 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 617 goto error; 618 } 619 620 slash = strrchr(strval, '/'); 621 622 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 623 strcmp(slash, "/..") == 0) { 624 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 625 "'%s' is not a valid file"), strval); 626 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 627 goto error; 628 } 629 630 *slash = '\0'; 631 632 if (strval[0] != '\0' && 633 (stat64(strval, &statbuf) != 0 || 634 !S_ISDIR(statbuf.st_mode))) { 635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 636 "'%s' is not a valid directory"), 637 strval); 638 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 639 goto error; 640 } 641 642 *slash = '/'; 643 break; 644 645 case ZPOOL_PROP_COMMENT: 646 for (check = strval; *check != '\0'; check++) { 647 if (!isprint(*check)) { 648 zfs_error_aux(hdl, 649 dgettext(TEXT_DOMAIN, 650 "comment may only have printable " 651 "characters")); 652 (void) zfs_error(hdl, EZFS_BADPROP, 653 errbuf); 654 goto error; 655 } 656 } 657 if (strlen(strval) > ZPROP_MAX_COMMENT) { 658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 659 "comment must not exceed %d characters"), 660 ZPROP_MAX_COMMENT); 661 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 662 goto error; 663 } 664 break; 665 case ZPOOL_PROP_READONLY: 666 if (!flags.import) { 667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 668 "property '%s' can only be set at " 669 "import time"), propname); 670 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 671 goto error; 672 } 673 break; 674 } 675 } 676 677 return (retprops); 678error: 679 nvlist_free(retprops); 680 return (NULL); 681} 682 683/* 684 * Set zpool property : propname=propval. 685 */ 686int 687zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 688{ 689 zfs_cmd_t zc = { 0 }; 690 int ret = -1; 691 char errbuf[1024]; 692 nvlist_t *nvl = NULL; 693 nvlist_t *realprops; 694 uint64_t version; 695 prop_flags_t flags = { 0 }; 696 697 (void) snprintf(errbuf, sizeof (errbuf), 698 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 699 zhp->zpool_name); 700 701 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 702 return (no_memory(zhp->zpool_hdl)); 703 704 if (nvlist_add_string(nvl, propname, propval) != 0) { 705 nvlist_free(nvl); 706 return (no_memory(zhp->zpool_hdl)); 707 } 708 709 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 710 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 711 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 712 nvlist_free(nvl); 713 return (-1); 714 } 715 716 nvlist_free(nvl); 717 nvl = realprops; 718 719 /* 720 * Execute the corresponding ioctl() to set this property. 721 */ 722 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 723 724 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 725 nvlist_free(nvl); 726 return (-1); 727 } 728 729 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 730 731 zcmd_free_nvlists(&zc); 732 nvlist_free(nvl); 733 734 if (ret) 735 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 736 else 737 (void) zpool_props_refresh(zhp); 738 739 return (ret); 740} 741 742int 743zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 744{ 745 libzfs_handle_t *hdl = zhp->zpool_hdl; 746 zprop_list_t *entry; 747 char buf[ZFS_MAXPROPLEN]; 748 nvlist_t *features = NULL; 749 zprop_list_t **last; 750 boolean_t firstexpand = (NULL == *plp); 751 752 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 753 return (-1); 754 755 last = plp; 756 while (*last != NULL) 757 last = &(*last)->pl_next; 758 759 if ((*plp)->pl_all) 760 features = zpool_get_features(zhp); 761 762 if ((*plp)->pl_all && firstexpand) { 763 for (int i = 0; i < SPA_FEATURES; i++) { 764 zprop_list_t *entry = zfs_alloc(hdl, 765 sizeof (zprop_list_t)); 766 entry->pl_prop = ZPROP_INVAL; 767 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 768 spa_feature_table[i].fi_uname); 769 entry->pl_width = strlen(entry->pl_user_prop); 770 entry->pl_all = B_TRUE; 771 772 *last = entry; 773 last = &entry->pl_next; 774 } 775 } 776 777 /* add any unsupported features */ 778 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 779 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 780 char *propname; 781 boolean_t found; 782 zprop_list_t *entry; 783 784 if (zfeature_is_supported(nvpair_name(nvp))) 785 continue; 786 787 propname = zfs_asprintf(hdl, "unsupported@%s", 788 nvpair_name(nvp)); 789 790 /* 791 * Before adding the property to the list make sure that no 792 * other pool already added the same property. 793 */ 794 found = B_FALSE; 795 entry = *plp; 796 while (entry != NULL) { 797 if (entry->pl_user_prop != NULL && 798 strcmp(propname, entry->pl_user_prop) == 0) { 799 found = B_TRUE; 800 break; 801 } 802 entry = entry->pl_next; 803 } 804 if (found) { 805 free(propname); 806 continue; 807 } 808 809 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 810 entry->pl_prop = ZPROP_INVAL; 811 entry->pl_user_prop = propname; 812 entry->pl_width = strlen(entry->pl_user_prop); 813 entry->pl_all = B_TRUE; 814 815 *last = entry; 816 last = &entry->pl_next; 817 } 818 819 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 820 821 if (entry->pl_fixed) 822 continue; 823 824 if (entry->pl_prop != ZPROP_INVAL && 825 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 826 NULL, B_FALSE) == 0) { 827 if (strlen(buf) > entry->pl_width) 828 entry->pl_width = strlen(buf); 829 } 830 } 831 832 return (0); 833} 834 835/* 836 * Get the state for the given feature on the given ZFS pool. 837 */ 838int 839zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 840 size_t len) 841{ 842 uint64_t refcount; 843 boolean_t found = B_FALSE; 844 nvlist_t *features = zpool_get_features(zhp); 845 boolean_t supported; 846 const char *feature = strchr(propname, '@') + 1; 847 848 supported = zpool_prop_feature(propname); 849 ASSERT(supported || zpool_prop_unsupported(propname)); 850 851 /* 852 * Convert from feature name to feature guid. This conversion is 853 * unecessary for unsupported@... properties because they already 854 * use guids. 855 */ 856 if (supported) { 857 int ret; 858 spa_feature_t fid; 859 860 ret = zfeature_lookup_name(feature, &fid); 861 if (ret != 0) { 862 (void) strlcpy(buf, "-", len); 863 return (ENOTSUP); 864 } 865 feature = spa_feature_table[fid].fi_guid; 866 } 867 868 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 869 found = B_TRUE; 870 871 if (supported) { 872 if (!found) { 873 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 874 } else { 875 if (refcount == 0) 876 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 877 else 878 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 879 } 880 } else { 881 if (found) { 882 if (refcount == 0) { 883 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 884 } else { 885 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 886 } 887 } else { 888 (void) strlcpy(buf, "-", len); 889 return (ENOTSUP); 890 } 891 } 892 893 return (0); 894} 895 896/* 897 * Don't start the slice at the default block of 34; many storage 898 * devices will use a stripe width of 128k, so start there instead. 899 */ 900#define NEW_START_BLOCK 256 901 902/* 903 * Validate the given pool name, optionally putting an extended error message in 904 * 'buf'. 905 */ 906boolean_t 907zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 908{ 909 namecheck_err_t why; 910 char what; 911 int ret; 912 913 ret = pool_namecheck(pool, &why, &what); 914 915 /* 916 * The rules for reserved pool names were extended at a later point. 917 * But we need to support users with existing pools that may now be 918 * invalid. So we only check for this expanded set of names during a 919 * create (or import), and only in userland. 920 */ 921 if (ret == 0 && !isopen && 922 (strncmp(pool, "mirror", 6) == 0 || 923 strncmp(pool, "raidz", 5) == 0 || 924 strncmp(pool, "spare", 5) == 0 || 925 strcmp(pool, "log") == 0)) { 926 if (hdl != NULL) 927 zfs_error_aux(hdl, 928 dgettext(TEXT_DOMAIN, "name is reserved")); 929 return (B_FALSE); 930 } 931 932 933 if (ret != 0) { 934 if (hdl != NULL) { 935 switch (why) { 936 case NAME_ERR_TOOLONG: 937 zfs_error_aux(hdl, 938 dgettext(TEXT_DOMAIN, "name is too long")); 939 break; 940 941 case NAME_ERR_INVALCHAR: 942 zfs_error_aux(hdl, 943 dgettext(TEXT_DOMAIN, "invalid character " 944 "'%c' in pool name"), what); 945 break; 946 947 case NAME_ERR_NOLETTER: 948 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 949 "name must begin with a letter")); 950 break; 951 952 case NAME_ERR_RESERVED: 953 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 954 "name is reserved")); 955 break; 956 957 case NAME_ERR_DISKLIKE: 958 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 959 "pool name is reserved")); 960 break; 961 962 case NAME_ERR_LEADING_SLASH: 963 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 964 "leading slash in name")); 965 break; 966 967 case NAME_ERR_EMPTY_COMPONENT: 968 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 969 "empty component in name")); 970 break; 971 972 case NAME_ERR_TRAILING_SLASH: 973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 974 "trailing slash in name")); 975 break; 976 977 case NAME_ERR_MULTIPLE_AT: 978 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 979 "multiple '@' delimiters in name")); 980 break; 981 982 } 983 } 984 return (B_FALSE); 985 } 986 987 return (B_TRUE); 988} 989 990/* 991 * Open a handle to the given pool, even if the pool is currently in the FAULTED 992 * state. 993 */ 994zpool_handle_t * 995zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 996{ 997 zpool_handle_t *zhp; 998 boolean_t missing; 999 1000 /* 1001 * Make sure the pool name is valid. 1002 */ 1003 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1004 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1005 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1006 pool); 1007 return (NULL); 1008 } 1009 1010 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1011 return (NULL); 1012 1013 zhp->zpool_hdl = hdl; 1014 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1015 1016 if (zpool_refresh_stats(zhp, &missing) != 0) { 1017 zpool_close(zhp); 1018 return (NULL); 1019 } 1020 1021 if (missing) { 1022 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1023 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1024 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1025 zpool_close(zhp); 1026 return (NULL); 1027 } 1028 1029 return (zhp); 1030} 1031 1032/* 1033 * Like the above, but silent on error. Used when iterating over pools (because 1034 * the configuration cache may be out of date). 1035 */ 1036int 1037zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1038{ 1039 zpool_handle_t *zhp; 1040 boolean_t missing; 1041 1042 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1043 return (-1); 1044 1045 zhp->zpool_hdl = hdl; 1046 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1047 1048 if (zpool_refresh_stats(zhp, &missing) != 0) { 1049 zpool_close(zhp); 1050 return (-1); 1051 } 1052 1053 if (missing) { 1054 zpool_close(zhp); 1055 *ret = NULL; 1056 return (0); 1057 } 1058 1059 *ret = zhp; 1060 return (0); 1061} 1062 1063/* 1064 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1065 * state. 1066 */ 1067zpool_handle_t * 1068zpool_open(libzfs_handle_t *hdl, const char *pool) 1069{ 1070 zpool_handle_t *zhp; 1071 1072 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1073 return (NULL); 1074 1075 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1076 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1077 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1078 zpool_close(zhp); 1079 return (NULL); 1080 } 1081 1082 return (zhp); 1083} 1084 1085/* 1086 * Close the handle. Simply frees the memory associated with the handle. 1087 */ 1088void 1089zpool_close(zpool_handle_t *zhp) 1090{ 1091 if (zhp->zpool_config) 1092 nvlist_free(zhp->zpool_config); 1093 if (zhp->zpool_old_config) 1094 nvlist_free(zhp->zpool_old_config); 1095 if (zhp->zpool_props) 1096 nvlist_free(zhp->zpool_props); 1097 free(zhp); 1098} 1099 1100/* 1101 * Return the name of the pool. 1102 */ 1103const char * 1104zpool_get_name(zpool_handle_t *zhp) 1105{ 1106 return (zhp->zpool_name); 1107} 1108 1109 1110/* 1111 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1112 */ 1113int 1114zpool_get_state(zpool_handle_t *zhp) 1115{ 1116 return (zhp->zpool_state); 1117} 1118 1119/* 1120 * Create the named pool, using the provided vdev list. It is assumed 1121 * that the consumer has already validated the contents of the nvlist, so we 1122 * don't have to worry about error semantics. 1123 */ 1124int 1125zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1126 nvlist_t *props, nvlist_t *fsprops) 1127{ 1128 zfs_cmd_t zc = { 0 }; 1129 nvlist_t *zc_fsprops = NULL; 1130 nvlist_t *zc_props = NULL; 1131 char msg[1024]; 1132 int ret = -1; 1133 1134 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1135 "cannot create '%s'"), pool); 1136 1137 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1138 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1139 1140 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1141 return (-1); 1142 1143 if (props) { 1144 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1145 1146 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1147 SPA_VERSION_1, flags, msg)) == NULL) { 1148 goto create_failed; 1149 } 1150 } 1151 1152 if (fsprops) { 1153 uint64_t zoned; 1154 char *zonestr; 1155 1156 zoned = ((nvlist_lookup_string(fsprops, 1157 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1158 strcmp(zonestr, "on") == 0); 1159 1160 if ((zc_fsprops = zfs_valid_proplist(hdl, 1161 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1162 goto create_failed; 1163 } 1164 if (!zc_props && 1165 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1166 goto create_failed; 1167 } 1168 if (nvlist_add_nvlist(zc_props, 1169 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1170 goto create_failed; 1171 } 1172 } 1173 1174 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1175 goto create_failed; 1176 1177 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1178 1179 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1180 1181 zcmd_free_nvlists(&zc); 1182 nvlist_free(zc_props); 1183 nvlist_free(zc_fsprops); 1184 1185 switch (errno) { 1186 case EBUSY: 1187 /* 1188 * This can happen if the user has specified the same 1189 * device multiple times. We can't reliably detect this 1190 * until we try to add it and see we already have a 1191 * label. 1192 */ 1193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1194 "one or more vdevs refer to the same device")); 1195 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1196 1197 case EOVERFLOW: 1198 /* 1199 * This occurs when one of the devices is below 1200 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1201 * device was the problem device since there's no 1202 * reliable way to determine device size from userland. 1203 */ 1204 { 1205 char buf[64]; 1206 1207 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1208 1209 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1210 "one or more devices is less than the " 1211 "minimum size (%s)"), buf); 1212 } 1213 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1214 1215 case ENOSPC: 1216 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1217 "one or more devices is out of space")); 1218 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1219 1220 case ENOTBLK: 1221 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1222 "cache device must be a disk or disk slice")); 1223 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1224 1225 default: 1226 return (zpool_standard_error(hdl, errno, msg)); 1227 } 1228 } 1229 1230create_failed: 1231 zcmd_free_nvlists(&zc); 1232 nvlist_free(zc_props); 1233 nvlist_free(zc_fsprops); 1234 return (ret); 1235} 1236 1237/* 1238 * Destroy the given pool. It is up to the caller to ensure that there are no 1239 * datasets left in the pool. 1240 */ 1241int 1242zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1243{ 1244 zfs_cmd_t zc = { 0 }; 1245 zfs_handle_t *zfp = NULL; 1246 libzfs_handle_t *hdl = zhp->zpool_hdl; 1247 char msg[1024]; 1248 1249 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1250 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1251 return (-1); 1252 1253 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1254 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1255 1256 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1257 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1258 "cannot destroy '%s'"), zhp->zpool_name); 1259 1260 if (errno == EROFS) { 1261 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1262 "one or more devices is read only")); 1263 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1264 } else { 1265 (void) zpool_standard_error(hdl, errno, msg); 1266 } 1267 1268 if (zfp) 1269 zfs_close(zfp); 1270 return (-1); 1271 } 1272 1273 if (zfp) { 1274 remove_mountpoint(zfp); 1275 zfs_close(zfp); 1276 } 1277 1278 return (0); 1279} 1280 1281/* 1282 * Add the given vdevs to the pool. The caller must have already performed the 1283 * necessary verification to ensure that the vdev specification is well-formed. 1284 */ 1285int 1286zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1287{ 1288 zfs_cmd_t zc = { 0 }; 1289 int ret; 1290 libzfs_handle_t *hdl = zhp->zpool_hdl; 1291 char msg[1024]; 1292 nvlist_t **spares, **l2cache; 1293 uint_t nspares, nl2cache; 1294 1295 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1296 "cannot add to '%s'"), zhp->zpool_name); 1297 1298 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1299 SPA_VERSION_SPARES && 1300 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1301 &spares, &nspares) == 0) { 1302 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1303 "upgraded to add hot spares")); 1304 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1305 } 1306 1307 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1308 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1309 uint64_t s; 1310 1311 for (s = 0; s < nspares; s++) { 1312 char *path; 1313 1314 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1315 &path) == 0 && pool_uses_efi(spares[s])) { 1316 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1317 "device '%s' contains an EFI label and " 1318 "cannot be used on root pools."), 1319 zpool_vdev_name(hdl, NULL, spares[s], 1320 B_FALSE)); 1321 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1322 } 1323 } 1324 } 1325 1326 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1327 SPA_VERSION_L2CACHE && 1328 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1329 &l2cache, &nl2cache) == 0) { 1330 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1331 "upgraded to add cache devices")); 1332 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1333 } 1334 1335 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1336 return (-1); 1337 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1338 1339 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1340 switch (errno) { 1341 case EBUSY: 1342 /* 1343 * This can happen if the user has specified the same 1344 * device multiple times. We can't reliably detect this 1345 * until we try to add it and see we already have a 1346 * label. 1347 */ 1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1349 "one or more vdevs refer to the same device")); 1350 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1351 break; 1352 1353 case EOVERFLOW: 1354 /* 1355 * This occurrs when one of the devices is below 1356 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1357 * device was the problem device since there's no 1358 * reliable way to determine device size from userland. 1359 */ 1360 { 1361 char buf[64]; 1362 1363 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1364 1365 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1366 "device is less than the minimum " 1367 "size (%s)"), buf); 1368 } 1369 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1370 break; 1371 1372 case ENOTSUP: 1373 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1374 "pool must be upgraded to add these vdevs")); 1375 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1376 break; 1377 1378 case EDOM: 1379 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1380 "root pool can not have multiple vdevs" 1381 " or separate logs")); 1382 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1383 break; 1384 1385 case ENOTBLK: 1386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1387 "cache device must be a disk or disk slice")); 1388 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1389 break; 1390 1391 default: 1392 (void) zpool_standard_error(hdl, errno, msg); 1393 } 1394 1395 ret = -1; 1396 } else { 1397 ret = 0; 1398 } 1399 1400 zcmd_free_nvlists(&zc); 1401 1402 return (ret); 1403} 1404 1405/* 1406 * Exports the pool from the system. The caller must ensure that there are no 1407 * mounted datasets in the pool. 1408 */ 1409static int 1410zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1411 const char *log_str) 1412{ 1413 zfs_cmd_t zc = { 0 }; 1414 char msg[1024]; 1415 1416 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1417 "cannot export '%s'"), zhp->zpool_name); 1418 1419 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1420 zc.zc_cookie = force; 1421 zc.zc_guid = hardforce; 1422 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1423 1424 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1425 switch (errno) { 1426 case EXDEV: 1427 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1428 "use '-f' to override the following errors:\n" 1429 "'%s' has an active shared spare which could be" 1430 " used by other pools once '%s' is exported."), 1431 zhp->zpool_name, zhp->zpool_name); 1432 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1433 msg)); 1434 default: 1435 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1436 msg)); 1437 } 1438 } 1439 1440 return (0); 1441} 1442 1443int 1444zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1445{ 1446 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1447} 1448 1449int 1450zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1451{ 1452 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1453} 1454 1455static void 1456zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1457 nvlist_t *config) 1458{ 1459 nvlist_t *nv = NULL; 1460 uint64_t rewindto; 1461 int64_t loss = -1; 1462 struct tm t; 1463 char timestr[128]; 1464 1465 if (!hdl->libzfs_printerr || config == NULL) 1466 return; 1467 1468 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1469 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1470 return; 1471 } 1472 1473 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1474 return; 1475 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1476 1477 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1478 strftime(timestr, 128, 0, &t) != 0) { 1479 if (dryrun) { 1480 (void) printf(dgettext(TEXT_DOMAIN, 1481 "Would be able to return %s " 1482 "to its state as of %s.\n"), 1483 name, timestr); 1484 } else { 1485 (void) printf(dgettext(TEXT_DOMAIN, 1486 "Pool %s returned to its state as of %s.\n"), 1487 name, timestr); 1488 } 1489 if (loss > 120) { 1490 (void) printf(dgettext(TEXT_DOMAIN, 1491 "%s approximately %lld "), 1492 dryrun ? "Would discard" : "Discarded", 1493 (loss + 30) / 60); 1494 (void) printf(dgettext(TEXT_DOMAIN, 1495 "minutes of transactions.\n")); 1496 } else if (loss > 0) { 1497 (void) printf(dgettext(TEXT_DOMAIN, 1498 "%s approximately %lld "), 1499 dryrun ? "Would discard" : "Discarded", loss); 1500 (void) printf(dgettext(TEXT_DOMAIN, 1501 "seconds of transactions.\n")); 1502 } 1503 } 1504} 1505 1506void 1507zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1508 nvlist_t *config) 1509{ 1510 nvlist_t *nv = NULL; 1511 int64_t loss = -1; 1512 uint64_t edata = UINT64_MAX; 1513 uint64_t rewindto; 1514 struct tm t; 1515 char timestr[128]; 1516 1517 if (!hdl->libzfs_printerr) 1518 return; 1519 1520 if (reason >= 0) 1521 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1522 else 1523 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1524 1525 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1526 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1527 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1528 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1529 goto no_info; 1530 1531 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1532 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1533 &edata); 1534 1535 (void) printf(dgettext(TEXT_DOMAIN, 1536 "Recovery is possible, but will result in some data loss.\n")); 1537 1538 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1539 strftime(timestr, 128, 0, &t) != 0) { 1540 (void) printf(dgettext(TEXT_DOMAIN, 1541 "\tReturning the pool to its state as of %s\n" 1542 "\tshould correct the problem. "), 1543 timestr); 1544 } else { 1545 (void) printf(dgettext(TEXT_DOMAIN, 1546 "\tReverting the pool to an earlier state " 1547 "should correct the problem.\n\t")); 1548 } 1549 1550 if (loss > 120) { 1551 (void) printf(dgettext(TEXT_DOMAIN, 1552 "Approximately %lld minutes of data\n" 1553 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1554 } else if (loss > 0) { 1555 (void) printf(dgettext(TEXT_DOMAIN, 1556 "Approximately %lld seconds of data\n" 1557 "\tmust be discarded, irreversibly. "), loss); 1558 } 1559 if (edata != 0 && edata != UINT64_MAX) { 1560 if (edata == 1) { 1561 (void) printf(dgettext(TEXT_DOMAIN, 1562 "After rewind, at least\n" 1563 "\tone persistent user-data error will remain. ")); 1564 } else { 1565 (void) printf(dgettext(TEXT_DOMAIN, 1566 "After rewind, several\n" 1567 "\tpersistent user-data errors will remain. ")); 1568 } 1569 } 1570 (void) printf(dgettext(TEXT_DOMAIN, 1571 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1572 reason >= 0 ? "clear" : "import", name); 1573 1574 (void) printf(dgettext(TEXT_DOMAIN, 1575 "A scrub of the pool\n" 1576 "\tis strongly recommended after recovery.\n")); 1577 return; 1578 1579no_info: 1580 (void) printf(dgettext(TEXT_DOMAIN, 1581 "Destroy and re-create the pool from\n\ta backup source.\n")); 1582} 1583 1584/* 1585 * zpool_import() is a contracted interface. Should be kept the same 1586 * if possible. 1587 * 1588 * Applications should use zpool_import_props() to import a pool with 1589 * new properties value to be set. 1590 */ 1591int 1592zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1593 char *altroot) 1594{ 1595 nvlist_t *props = NULL; 1596 int ret; 1597 1598 if (altroot != NULL) { 1599 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1600 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1601 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1602 newname)); 1603 } 1604 1605 if (nvlist_add_string(props, 1606 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1607 nvlist_add_string(props, 1608 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1609 nvlist_free(props); 1610 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1611 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1612 newname)); 1613 } 1614 } 1615 1616 ret = zpool_import_props(hdl, config, newname, props, 1617 ZFS_IMPORT_NORMAL); 1618 if (props) 1619 nvlist_free(props); 1620 return (ret); 1621} 1622 1623static void 1624print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1625 int indent) 1626{ 1627 nvlist_t **child; 1628 uint_t c, children; 1629 char *vname; 1630 uint64_t is_log = 0; 1631 1632 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1633 &is_log); 1634 1635 if (name != NULL) 1636 (void) printf("\t%*s%s%s\n", indent, "", name, 1637 is_log ? " [log]" : ""); 1638 1639 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1640 &child, &children) != 0) 1641 return; 1642 1643 for (c = 0; c < children; c++) { 1644 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1645 print_vdev_tree(hdl, vname, child[c], indent + 2); 1646 free(vname); 1647 } 1648} 1649 1650void 1651zpool_print_unsup_feat(nvlist_t *config) 1652{ 1653 nvlist_t *nvinfo, *unsup_feat; 1654 1655 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1656 0); 1657 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1658 &unsup_feat) == 0); 1659 1660 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1661 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1662 char *desc; 1663 1664 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1665 verify(nvpair_value_string(nvp, &desc) == 0); 1666 1667 if (strlen(desc) > 0) 1668 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1669 else 1670 (void) printf("\t%s\n", nvpair_name(nvp)); 1671 } 1672} 1673 1674/* 1675 * Import the given pool using the known configuration and a list of 1676 * properties to be set. The configuration should have come from 1677 * zpool_find_import(). The 'newname' parameters control whether the pool 1678 * is imported with a different name. 1679 */ 1680int 1681zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1682 nvlist_t *props, int flags) 1683{ 1684 zfs_cmd_t zc = { 0 }; 1685 zpool_rewind_policy_t policy; 1686 nvlist_t *nv = NULL; 1687 nvlist_t *nvinfo = NULL; 1688 nvlist_t *missing = NULL; 1689 char *thename; 1690 char *origname; 1691 int ret; 1692 int error = 0; 1693 char errbuf[1024]; 1694 1695 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1696 &origname) == 0); 1697 1698 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1699 "cannot import pool '%s'"), origname); 1700 1701 if (newname != NULL) { 1702 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1703 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1704 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1705 newname)); 1706 thename = (char *)newname; 1707 } else { 1708 thename = origname; 1709 } 1710 1711 if (props) { 1712 uint64_t version; 1713 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1714 1715 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1716 &version) == 0); 1717 1718 if ((props = zpool_valid_proplist(hdl, origname, 1719 props, version, flags, errbuf)) == NULL) { 1720 return (-1); 1721 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1722 nvlist_free(props); 1723 return (-1); 1724 } 1725 } 1726 1727 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1728 1729 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1730 &zc.zc_guid) == 0); 1731 1732 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1733 nvlist_free(props); 1734 return (-1); 1735 } 1736 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1737 nvlist_free(props); 1738 return (-1); 1739 } 1740 1741 zc.zc_cookie = flags; 1742 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1743 errno == ENOMEM) { 1744 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1745 zcmd_free_nvlists(&zc); 1746 return (-1); 1747 } 1748 } 1749 if (ret != 0) 1750 error = errno; 1751 1752 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1753 zpool_get_rewind_policy(config, &policy); 1754 1755 if (error) { 1756 char desc[1024]; 1757 1758 /* 1759 * Dry-run failed, but we print out what success 1760 * looks like if we found a best txg 1761 */ 1762 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1763 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1764 B_TRUE, nv); 1765 nvlist_free(nv); 1766 return (-1); 1767 } 1768 1769 if (newname == NULL) 1770 (void) snprintf(desc, sizeof (desc), 1771 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1772 thename); 1773 else 1774 (void) snprintf(desc, sizeof (desc), 1775 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1776 origname, thename); 1777 1778 switch (error) { 1779 case ENOTSUP: 1780 if (nv != NULL && nvlist_lookup_nvlist(nv, 1781 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1782 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1783 (void) printf(dgettext(TEXT_DOMAIN, "This " 1784 "pool uses the following feature(s) not " 1785 "supported by this system:\n")); 1786 zpool_print_unsup_feat(nv); 1787 if (nvlist_exists(nvinfo, 1788 ZPOOL_CONFIG_CAN_RDONLY)) { 1789 (void) printf(dgettext(TEXT_DOMAIN, 1790 "All unsupported features are only " 1791 "required for writing to the pool." 1792 "\nThe pool can be imported using " 1793 "'-o readonly=on'.\n")); 1794 } 1795 } 1796 /* 1797 * Unsupported version. 1798 */ 1799 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1800 break; 1801 1802 case EINVAL: 1803 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1804 break; 1805 1806 case EROFS: 1807 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1808 "one or more devices is read only")); 1809 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1810 break; 1811 1812 case ENXIO: 1813 if (nv && nvlist_lookup_nvlist(nv, 1814 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1815 nvlist_lookup_nvlist(nvinfo, 1816 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1817 (void) printf(dgettext(TEXT_DOMAIN, 1818 "The devices below are missing, use " 1819 "'-m' to import the pool anyway:\n")); 1820 print_vdev_tree(hdl, NULL, missing, 2); 1821 (void) printf("\n"); 1822 } 1823 (void) zpool_standard_error(hdl, error, desc); 1824 break; 1825 1826 case EEXIST: 1827 (void) zpool_standard_error(hdl, error, desc); 1828 break; 1829 1830 default: 1831 (void) zpool_standard_error(hdl, error, desc); 1832 zpool_explain_recover(hdl, 1833 newname ? origname : thename, -error, nv); 1834 break; 1835 } 1836 1837 nvlist_free(nv); 1838 ret = -1; 1839 } else { 1840 zpool_handle_t *zhp; 1841 1842 /* 1843 * This should never fail, but play it safe anyway. 1844 */ 1845 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1846 ret = -1; 1847 else if (zhp != NULL) 1848 zpool_close(zhp); 1849 if (policy.zrp_request & 1850 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1851 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1852 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1853 } 1854 nvlist_free(nv); 1855 return (0); 1856 } 1857 1858 zcmd_free_nvlists(&zc); 1859 nvlist_free(props); 1860 1861 return (ret); 1862} 1863 1864/* 1865 * Scan the pool. 1866 */ 1867int 1868zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1869{ 1870 zfs_cmd_t zc = { 0 }; 1871 char msg[1024]; 1872 libzfs_handle_t *hdl = zhp->zpool_hdl; 1873 1874 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1875 zc.zc_cookie = func; 1876 1877 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1878 (errno == ENOENT && func != POOL_SCAN_NONE)) 1879 return (0); 1880 1881 if (func == POOL_SCAN_SCRUB) { 1882 (void) snprintf(msg, sizeof (msg), 1883 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1884 } else if (func == POOL_SCAN_NONE) { 1885 (void) snprintf(msg, sizeof (msg), 1886 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1887 zc.zc_name); 1888 } else { 1889 assert(!"unexpected result"); 1890 } 1891 1892 if (errno == EBUSY) { 1893 nvlist_t *nvroot; 1894 pool_scan_stat_t *ps = NULL; 1895 uint_t psc; 1896 1897 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1898 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1899 (void) nvlist_lookup_uint64_array(nvroot, 1900 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1901 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1902 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1903 else 1904 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1905 } else if (errno == ENOENT) { 1906 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1907 } else { 1908 return (zpool_standard_error(hdl, errno, msg)); 1909 } 1910} 1911 1912/* 1913 * This provides a very minimal check whether a given string is likely a 1914 * c#t#d# style string. Users of this are expected to do their own 1915 * verification of the s# part. 1916 */ 1917#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1918 1919/* 1920 * More elaborate version for ones which may start with "/dev/dsk/" 1921 * and the like. 1922 */ 1923static int 1924ctd_check_path(char *str) { 1925 /* 1926 * If it starts with a slash, check the last component. 1927 */ 1928 if (str && str[0] == '/') { 1929 char *tmp = strrchr(str, '/'); 1930 1931 /* 1932 * If it ends in "/old", check the second-to-last 1933 * component of the string instead. 1934 */ 1935 if (tmp != str && strcmp(tmp, "/old") == 0) { 1936 for (tmp--; *tmp != '/'; tmp--) 1937 ; 1938 } 1939 str = tmp + 1; 1940 } 1941 return (CTD_CHECK(str)); 1942} 1943 1944/* 1945 * Find a vdev that matches the search criteria specified. We use the 1946 * the nvpair name to determine how we should look for the device. 1947 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1948 * spare; but FALSE if its an INUSE spare. 1949 */ 1950static nvlist_t * 1951vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1952 boolean_t *l2cache, boolean_t *log) 1953{ 1954 uint_t c, children; 1955 nvlist_t **child; 1956 nvlist_t *ret; 1957 uint64_t is_log; 1958 char *srchkey; 1959 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1960 1961 /* Nothing to look for */ 1962 if (search == NULL || pair == NULL) 1963 return (NULL); 1964 1965 /* Obtain the key we will use to search */ 1966 srchkey = nvpair_name(pair); 1967 1968 switch (nvpair_type(pair)) { 1969 case DATA_TYPE_UINT64: 1970 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1971 uint64_t srchval, theguid; 1972 1973 verify(nvpair_value_uint64(pair, &srchval) == 0); 1974 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1975 &theguid) == 0); 1976 if (theguid == srchval) 1977 return (nv); 1978 } 1979 break; 1980 1981 case DATA_TYPE_STRING: { 1982 char *srchval, *val; 1983 1984 verify(nvpair_value_string(pair, &srchval) == 0); 1985 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1986 break; 1987 1988 /* 1989 * Search for the requested value. Special cases: 1990 * 1991 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1992 * "s0" or "s0/old". The "s0" part is hidden from the user, 1993 * but included in the string, so this matches around it. 1994 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1995 * 1996 * Otherwise, all other searches are simple string compares. 1997 */ 1998 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1999 ctd_check_path(val)) { 2000 uint64_t wholedisk = 0; 2001 2002 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2003 &wholedisk); 2004 if (wholedisk) { 2005 int slen = strlen(srchval); 2006 int vlen = strlen(val); 2007 2008 if (slen != vlen - 2) 2009 break; 2010 2011 /* 2012 * make_leaf_vdev() should only set 2013 * wholedisk for ZPOOL_CONFIG_PATHs which 2014 * will include "/dev/dsk/", giving plenty of 2015 * room for the indices used next. 2016 */ 2017 ASSERT(vlen >= 6); 2018 2019 /* 2020 * strings identical except trailing "s0" 2021 */ 2022 if (strcmp(&val[vlen - 2], "s0") == 0 && 2023 strncmp(srchval, val, slen) == 0) 2024 return (nv); 2025 2026 /* 2027 * strings identical except trailing "s0/old" 2028 */ 2029 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2030 strcmp(&srchval[slen - 4], "/old") == 0 && 2031 strncmp(srchval, val, slen - 4) == 0) 2032 return (nv); 2033 2034 break; 2035 } 2036 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2037 char *type, *idx, *end, *p; 2038 uint64_t id, vdev_id; 2039 2040 /* 2041 * Determine our vdev type, keeping in mind 2042 * that the srchval is composed of a type and 2043 * vdev id pair (i.e. mirror-4). 2044 */ 2045 if ((type = strdup(srchval)) == NULL) 2046 return (NULL); 2047 2048 if ((p = strrchr(type, '-')) == NULL) { 2049 free(type); 2050 break; 2051 } 2052 idx = p + 1; 2053 *p = '\0'; 2054 2055 /* 2056 * If the types don't match then keep looking. 2057 */ 2058 if (strncmp(val, type, strlen(val)) != 0) { 2059 free(type); 2060 break; 2061 } 2062 2063 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2064 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2065 strncmp(type, VDEV_TYPE_MIRROR, 2066 strlen(VDEV_TYPE_MIRROR)) == 0); 2067 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2068 &id) == 0); 2069 2070 errno = 0; 2071 vdev_id = strtoull(idx, &end, 10); 2072 2073 free(type); 2074 if (errno != 0) 2075 return (NULL); 2076 2077 /* 2078 * Now verify that we have the correct vdev id. 2079 */ 2080 if (vdev_id == id) 2081 return (nv); 2082 } 2083 2084 /* 2085 * Common case 2086 */ 2087 if (strcmp(srchval, val) == 0) 2088 return (nv); 2089 break; 2090 } 2091 2092 default: 2093 break; 2094 } 2095 2096 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2097 &child, &children) != 0) 2098 return (NULL); 2099 2100 for (c = 0; c < children; c++) { 2101 if ((ret = vdev_to_nvlist_iter(child[c], search, 2102 avail_spare, l2cache, NULL)) != NULL) { 2103 /* 2104 * The 'is_log' value is only set for the toplevel 2105 * vdev, not the leaf vdevs. So we always lookup the 2106 * log device from the root of the vdev tree (where 2107 * 'log' is non-NULL). 2108 */ 2109 if (log != NULL && 2110 nvlist_lookup_uint64(child[c], 2111 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2112 is_log) { 2113 *log = B_TRUE; 2114 } 2115 return (ret); 2116 } 2117 } 2118 2119 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2120 &child, &children) == 0) { 2121 for (c = 0; c < children; c++) { 2122 if ((ret = vdev_to_nvlist_iter(child[c], search, 2123 avail_spare, l2cache, NULL)) != NULL) { 2124 *avail_spare = B_TRUE; 2125 return (ret); 2126 } 2127 } 2128 } 2129 2130 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2131 &child, &children) == 0) { 2132 for (c = 0; c < children; c++) { 2133 if ((ret = vdev_to_nvlist_iter(child[c], search, 2134 avail_spare, l2cache, NULL)) != NULL) { 2135 *l2cache = B_TRUE; 2136 return (ret); 2137 } 2138 } 2139 } 2140 2141 return (NULL); 2142} 2143 2144/* 2145 * Given a physical path (minus the "/devices" prefix), find the 2146 * associated vdev. 2147 */ 2148nvlist_t * 2149zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2150 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2151{ 2152 nvlist_t *search, *nvroot, *ret; 2153 2154 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2155 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2156 2157 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2158 &nvroot) == 0); 2159 2160 *avail_spare = B_FALSE; 2161 *l2cache = B_FALSE; 2162 if (log != NULL) 2163 *log = B_FALSE; 2164 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2165 nvlist_free(search); 2166 2167 return (ret); 2168} 2169 2170/* 2171 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2172 */ 2173boolean_t 2174zpool_vdev_is_interior(const char *name) 2175{ 2176 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2177 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2178 return (B_TRUE); 2179 return (B_FALSE); 2180} 2181 2182nvlist_t * 2183zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2184 boolean_t *l2cache, boolean_t *log) 2185{ 2186 char buf[MAXPATHLEN]; 2187 char *end; 2188 nvlist_t *nvroot, *search, *ret; 2189 uint64_t guid; 2190 2191 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2192 2193 guid = strtoull(path, &end, 10); 2194 if (guid != 0 && *end == '\0') { 2195 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2196 } else if (zpool_vdev_is_interior(path)) { 2197 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2198 } else if (path[0] != '/') { 2199 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2200 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2201 } else { 2202 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2203 } 2204 2205 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2206 &nvroot) == 0); 2207 2208 *avail_spare = B_FALSE; 2209 *l2cache = B_FALSE; 2210 if (log != NULL) 2211 *log = B_FALSE; 2212 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2213 nvlist_free(search); 2214 2215 return (ret); 2216} 2217 2218static int 2219vdev_online(nvlist_t *nv) 2220{ 2221 uint64_t ival; 2222 2223 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2224 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2225 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2226 return (0); 2227 2228 return (1); 2229} 2230 2231/* 2232 * Helper function for zpool_get_physpaths(). 2233 */ 2234static int 2235vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2236 size_t *bytes_written) 2237{ 2238 size_t bytes_left, pos, rsz; 2239 char *tmppath; 2240 const char *format; 2241 2242 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2243 &tmppath) != 0) 2244 return (EZFS_NODEVICE); 2245 2246 pos = *bytes_written; 2247 bytes_left = physpath_size - pos; 2248 format = (pos == 0) ? "%s" : " %s"; 2249 2250 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2251 *bytes_written += rsz; 2252 2253 if (rsz >= bytes_left) { 2254 /* if physpath was not copied properly, clear it */ 2255 if (bytes_left != 0) { 2256 physpath[pos] = 0; 2257 } 2258 return (EZFS_NOSPC); 2259 } 2260 return (0); 2261} 2262 2263static int 2264vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2265 size_t *rsz, boolean_t is_spare) 2266{ 2267 char *type; 2268 int ret; 2269 2270 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2271 return (EZFS_INVALCONFIG); 2272 2273 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2274 /* 2275 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2276 * For a spare vdev, we only want to boot from the active 2277 * spare device. 2278 */ 2279 if (is_spare) { 2280 uint64_t spare = 0; 2281 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2282 &spare); 2283 if (!spare) 2284 return (EZFS_INVALCONFIG); 2285 } 2286 2287 if (vdev_online(nv)) { 2288 if ((ret = vdev_get_one_physpath(nv, physpath, 2289 phypath_size, rsz)) != 0) 2290 return (ret); 2291 } 2292 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2293 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2294 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2295 nvlist_t **child; 2296 uint_t count; 2297 int i, ret; 2298 2299 if (nvlist_lookup_nvlist_array(nv, 2300 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2301 return (EZFS_INVALCONFIG); 2302 2303 for (i = 0; i < count; i++) { 2304 ret = vdev_get_physpaths(child[i], physpath, 2305 phypath_size, rsz, is_spare); 2306 if (ret == EZFS_NOSPC) 2307 return (ret); 2308 } 2309 } 2310 2311 return (EZFS_POOL_INVALARG); 2312} 2313 2314/* 2315 * Get phys_path for a root pool config. 2316 * Return 0 on success; non-zero on failure. 2317 */ 2318static int 2319zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2320{ 2321 size_t rsz; 2322 nvlist_t *vdev_root; 2323 nvlist_t **child; 2324 uint_t count; 2325 char *type; 2326 2327 rsz = 0; 2328 2329 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2330 &vdev_root) != 0) 2331 return (EZFS_INVALCONFIG); 2332 2333 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2334 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2335 &child, &count) != 0) 2336 return (EZFS_INVALCONFIG); 2337 2338 /* 2339 * root pool can not have EFI labeled disks and can only have 2340 * a single top-level vdev. 2341 */ 2342 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2343 pool_uses_efi(vdev_root)) 2344 return (EZFS_POOL_INVALARG); 2345 2346 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2347 B_FALSE); 2348 2349 /* No online devices */ 2350 if (rsz == 0) 2351 return (EZFS_NODEVICE); 2352 2353 return (0); 2354} 2355 2356/* 2357 * Get phys_path for a root pool 2358 * Return 0 on success; non-zero on failure. 2359 */ 2360int 2361zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2362{ 2363 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2364 phypath_size)); 2365} 2366 2367/* 2368 * If the device has being dynamically expanded then we need to relabel 2369 * the disk to use the new unallocated space. 2370 */ 2371static int 2372zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2373{ 2374#ifdef sun 2375 char path[MAXPATHLEN]; 2376 char errbuf[1024]; 2377 int fd, error; 2378 int (*_efi_use_whole_disk)(int); 2379 2380 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2381 "efi_use_whole_disk")) == NULL) 2382 return (-1); 2383 2384 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2385 2386 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2387 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2388 "relabel '%s': unable to open device"), name); 2389 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2390 } 2391 2392 /* 2393 * It's possible that we might encounter an error if the device 2394 * does not have any unallocated space left. If so, we simply 2395 * ignore that error and continue on. 2396 */ 2397 error = _efi_use_whole_disk(fd); 2398 (void) close(fd); 2399 if (error && error != VT_ENOSPC) { 2400 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2401 "relabel '%s': unable to read disk capacity"), name); 2402 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2403 } 2404#endif /* sun */ 2405 return (0); 2406} 2407 2408/* 2409 * Bring the specified vdev online. The 'flags' parameter is a set of the 2410 * ZFS_ONLINE_* flags. 2411 */ 2412int 2413zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2414 vdev_state_t *newstate) 2415{ 2416 zfs_cmd_t zc = { 0 }; 2417 char msg[1024]; 2418 nvlist_t *tgt; 2419 boolean_t avail_spare, l2cache, islog; 2420 libzfs_handle_t *hdl = zhp->zpool_hdl; 2421 2422 if (flags & ZFS_ONLINE_EXPAND) { 2423 (void) snprintf(msg, sizeof (msg), 2424 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2425 } else { 2426 (void) snprintf(msg, sizeof (msg), 2427 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2428 } 2429 2430 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2431 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2432 &islog)) == NULL) 2433 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2434 2435 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2436 2437 if (avail_spare) 2438 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2439 2440 if (flags & ZFS_ONLINE_EXPAND || 2441 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2442 char *pathname = NULL; 2443 uint64_t wholedisk = 0; 2444 2445 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2446 &wholedisk); 2447 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2448 &pathname) == 0); 2449 2450 /* 2451 * XXX - L2ARC 1.0 devices can't support expansion. 2452 */ 2453 if (l2cache) { 2454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2455 "cannot expand cache devices")); 2456 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2457 } 2458 2459 if (wholedisk) { 2460 pathname += strlen(DISK_ROOT) + 1; 2461 (void) zpool_relabel_disk(hdl, pathname); 2462 } 2463 } 2464 2465 zc.zc_cookie = VDEV_STATE_ONLINE; 2466 zc.zc_obj = flags; 2467 2468 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2469 if (errno == EINVAL) { 2470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2471 "from this pool into a new one. Use '%s' " 2472 "instead"), "zpool detach"); 2473 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2474 } 2475 return (zpool_standard_error(hdl, errno, msg)); 2476 } 2477 2478 *newstate = zc.zc_cookie; 2479 return (0); 2480} 2481 2482/* 2483 * Take the specified vdev offline 2484 */ 2485int 2486zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2487{ 2488 zfs_cmd_t zc = { 0 }; 2489 char msg[1024]; 2490 nvlist_t *tgt; 2491 boolean_t avail_spare, l2cache; 2492 libzfs_handle_t *hdl = zhp->zpool_hdl; 2493 2494 (void) snprintf(msg, sizeof (msg), 2495 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2496 2497 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2498 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2499 NULL)) == NULL) 2500 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2501 2502 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2503 2504 if (avail_spare) 2505 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2506 2507 zc.zc_cookie = VDEV_STATE_OFFLINE; 2508 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2509 2510 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2511 return (0); 2512 2513 switch (errno) { 2514 case EBUSY: 2515 2516 /* 2517 * There are no other replicas of this device. 2518 */ 2519 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2520 2521 case EEXIST: 2522 /* 2523 * The log device has unplayed logs 2524 */ 2525 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2526 2527 default: 2528 return (zpool_standard_error(hdl, errno, msg)); 2529 } 2530} 2531 2532/* 2533 * Mark the given vdev faulted. 2534 */ 2535int 2536zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2537{ 2538 zfs_cmd_t zc = { 0 }; 2539 char msg[1024]; 2540 libzfs_handle_t *hdl = zhp->zpool_hdl; 2541 2542 (void) snprintf(msg, sizeof (msg), 2543 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2544 2545 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2546 zc.zc_guid = guid; 2547 zc.zc_cookie = VDEV_STATE_FAULTED; 2548 zc.zc_obj = aux; 2549 2550 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2551 return (0); 2552 2553 switch (errno) { 2554 case EBUSY: 2555 2556 /* 2557 * There are no other replicas of this device. 2558 */ 2559 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2560 2561 default: 2562 return (zpool_standard_error(hdl, errno, msg)); 2563 } 2564 2565} 2566 2567/* 2568 * Mark the given vdev degraded. 2569 */ 2570int 2571zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2572{ 2573 zfs_cmd_t zc = { 0 }; 2574 char msg[1024]; 2575 libzfs_handle_t *hdl = zhp->zpool_hdl; 2576 2577 (void) snprintf(msg, sizeof (msg), 2578 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2579 2580 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2581 zc.zc_guid = guid; 2582 zc.zc_cookie = VDEV_STATE_DEGRADED; 2583 zc.zc_obj = aux; 2584 2585 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2586 return (0); 2587 2588 return (zpool_standard_error(hdl, errno, msg)); 2589} 2590 2591/* 2592 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2593 * a hot spare. 2594 */ 2595static boolean_t 2596is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2597{ 2598 nvlist_t **child; 2599 uint_t c, children; 2600 char *type; 2601 2602 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2603 &children) == 0) { 2604 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2605 &type) == 0); 2606 2607 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2608 children == 2 && child[which] == tgt) 2609 return (B_TRUE); 2610 2611 for (c = 0; c < children; c++) 2612 if (is_replacing_spare(child[c], tgt, which)) 2613 return (B_TRUE); 2614 } 2615 2616 return (B_FALSE); 2617} 2618 2619/* 2620 * Attach new_disk (fully described by nvroot) to old_disk. 2621 * If 'replacing' is specified, the new disk will replace the old one. 2622 */ 2623int 2624zpool_vdev_attach(zpool_handle_t *zhp, 2625 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2626{ 2627 zfs_cmd_t zc = { 0 }; 2628 char msg[1024]; 2629 int ret; 2630 nvlist_t *tgt; 2631 boolean_t avail_spare, l2cache, islog; 2632 uint64_t val; 2633 char *newname; 2634 nvlist_t **child; 2635 uint_t children; 2636 nvlist_t *config_root; 2637 libzfs_handle_t *hdl = zhp->zpool_hdl; 2638 boolean_t rootpool = zpool_is_bootable(zhp); 2639 2640 if (replacing) 2641 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2642 "cannot replace %s with %s"), old_disk, new_disk); 2643 else 2644 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2645 "cannot attach %s to %s"), new_disk, old_disk); 2646 2647 /* 2648 * If this is a root pool, make sure that we're not attaching an 2649 * EFI labeled device. 2650 */ 2651 if (rootpool && pool_uses_efi(nvroot)) { 2652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2653 "EFI labeled devices are not supported on root pools.")); 2654 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2655 } 2656 2657 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2658 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2659 &islog)) == 0) 2660 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2661 2662 if (avail_spare) 2663 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2664 2665 if (l2cache) 2666 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2667 2668 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2669 zc.zc_cookie = replacing; 2670 2671 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2672 &child, &children) != 0 || children != 1) { 2673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2674 "new device must be a single disk")); 2675 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2676 } 2677 2678 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2679 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2680 2681 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2682 return (-1); 2683 2684 /* 2685 * If the target is a hot spare that has been swapped in, we can only 2686 * replace it with another hot spare. 2687 */ 2688 if (replacing && 2689 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2690 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2691 NULL) == NULL || !avail_spare) && 2692 is_replacing_spare(config_root, tgt, 1)) { 2693 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2694 "can only be replaced by another hot spare")); 2695 free(newname); 2696 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2697 } 2698 2699 free(newname); 2700 2701 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2702 return (-1); 2703 2704 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2705 2706 zcmd_free_nvlists(&zc); 2707 2708 if (ret == 0) { 2709 if (rootpool) { 2710 /* 2711 * XXX need a better way to prevent user from 2712 * booting up a half-baked vdev. 2713 */ 2714 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2715 "sure to wait until resilver is done " 2716 "before rebooting.\n")); 2717 (void) fprintf(stderr, "\n"); 2718 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2719 "you boot from pool '%s', you may need to update\n" 2720 "boot code on newly attached disk '%s'.\n\n" 2721 "Assuming you use GPT partitioning and 'da0' is " 2722 "your new boot disk\n" 2723 "you may use the following command:\n\n" 2724 "\tgpart bootcode -b /boot/pmbr -p " 2725 "/boot/gptzfsboot -i 1 da0\n\n"), 2726 zhp->zpool_name, new_disk); 2727 } 2728 return (0); 2729 } 2730 2731 switch (errno) { 2732 case ENOTSUP: 2733 /* 2734 * Can't attach to or replace this type of vdev. 2735 */ 2736 if (replacing) { 2737 uint64_t version = zpool_get_prop_int(zhp, 2738 ZPOOL_PROP_VERSION, NULL); 2739 2740 if (islog) 2741 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2742 "cannot replace a log with a spare")); 2743 else if (version >= SPA_VERSION_MULTI_REPLACE) 2744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2745 "already in replacing/spare config; wait " 2746 "for completion or use 'zpool detach'")); 2747 else 2748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2749 "cannot replace a replacing device")); 2750 } else { 2751 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2752 "can only attach to mirrors and top-level " 2753 "disks")); 2754 } 2755 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2756 break; 2757 2758 case EINVAL: 2759 /* 2760 * The new device must be a single disk. 2761 */ 2762 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2763 "new device must be a single disk")); 2764 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2765 break; 2766 2767 case EBUSY: 2768 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2769 new_disk); 2770 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2771 break; 2772 2773 case EOVERFLOW: 2774 /* 2775 * The new device is too small. 2776 */ 2777 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2778 "device is too small")); 2779 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2780 break; 2781 2782 case EDOM: 2783 /* 2784 * The new device has a different alignment requirement. 2785 */ 2786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2787 "devices have different sector alignment")); 2788 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2789 break; 2790 2791 case ENAMETOOLONG: 2792 /* 2793 * The resulting top-level vdev spec won't fit in the label. 2794 */ 2795 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2796 break; 2797 2798 default: 2799 (void) zpool_standard_error(hdl, errno, msg); 2800 } 2801 2802 return (-1); 2803} 2804 2805/* 2806 * Detach the specified device. 2807 */ 2808int 2809zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2810{ 2811 zfs_cmd_t zc = { 0 }; 2812 char msg[1024]; 2813 nvlist_t *tgt; 2814 boolean_t avail_spare, l2cache; 2815 libzfs_handle_t *hdl = zhp->zpool_hdl; 2816 2817 (void) snprintf(msg, sizeof (msg), 2818 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2819 2820 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2821 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2822 NULL)) == 0) 2823 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2824 2825 if (avail_spare) 2826 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2827 2828 if (l2cache) 2829 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2830 2831 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2832 2833 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2834 return (0); 2835 2836 switch (errno) { 2837 2838 case ENOTSUP: 2839 /* 2840 * Can't detach from this type of vdev. 2841 */ 2842 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2843 "applicable to mirror and replacing vdevs")); 2844 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2845 break; 2846 2847 case EBUSY: 2848 /* 2849 * There are no other replicas of this device. 2850 */ 2851 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2852 break; 2853 2854 default: 2855 (void) zpool_standard_error(hdl, errno, msg); 2856 } 2857 2858 return (-1); 2859} 2860 2861/* 2862 * Find a mirror vdev in the source nvlist. 2863 * 2864 * The mchild array contains a list of disks in one of the top-level mirrors 2865 * of the source pool. The schild array contains a list of disks that the 2866 * user specified on the command line. We loop over the mchild array to 2867 * see if any entry in the schild array matches. 2868 * 2869 * If a disk in the mchild array is found in the schild array, we return 2870 * the index of that entry. Otherwise we return -1. 2871 */ 2872static int 2873find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2874 nvlist_t **schild, uint_t schildren) 2875{ 2876 uint_t mc; 2877 2878 for (mc = 0; mc < mchildren; mc++) { 2879 uint_t sc; 2880 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2881 mchild[mc], B_FALSE); 2882 2883 for (sc = 0; sc < schildren; sc++) { 2884 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2885 schild[sc], B_FALSE); 2886 boolean_t result = (strcmp(mpath, spath) == 0); 2887 2888 free(spath); 2889 if (result) { 2890 free(mpath); 2891 return (mc); 2892 } 2893 } 2894 2895 free(mpath); 2896 } 2897 2898 return (-1); 2899} 2900 2901/* 2902 * Split a mirror pool. If newroot points to null, then a new nvlist 2903 * is generated and it is the responsibility of the caller to free it. 2904 */ 2905int 2906zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2907 nvlist_t *props, splitflags_t flags) 2908{ 2909 zfs_cmd_t zc = { 0 }; 2910 char msg[1024]; 2911 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2912 nvlist_t **varray = NULL, *zc_props = NULL; 2913 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2914 libzfs_handle_t *hdl = zhp->zpool_hdl; 2915 uint64_t vers; 2916 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2917 int retval = 0; 2918 2919 (void) snprintf(msg, sizeof (msg), 2920 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2921 2922 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2923 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2924 2925 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2926 (void) fprintf(stderr, gettext("Internal error: unable to " 2927 "retrieve pool configuration\n")); 2928 return (-1); 2929 } 2930 2931 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2932 == 0); 2933 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2934 2935 if (props) { 2936 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2937 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2938 props, vers, flags, msg)) == NULL) 2939 return (-1); 2940 } 2941 2942 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2943 &children) != 0) { 2944 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2945 "Source pool is missing vdev tree")); 2946 if (zc_props) 2947 nvlist_free(zc_props); 2948 return (-1); 2949 } 2950 2951 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2952 vcount = 0; 2953 2954 if (*newroot == NULL || 2955 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2956 &newchild, &newchildren) != 0) 2957 newchildren = 0; 2958 2959 for (c = 0; c < children; c++) { 2960 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2961 char *type; 2962 nvlist_t **mchild, *vdev; 2963 uint_t mchildren; 2964 int entry; 2965 2966 /* 2967 * Unlike cache & spares, slogs are stored in the 2968 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2969 */ 2970 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2971 &is_log); 2972 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2973 &is_hole); 2974 if (is_log || is_hole) { 2975 /* 2976 * Create a hole vdev and put it in the config. 2977 */ 2978 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2979 goto out; 2980 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2981 VDEV_TYPE_HOLE) != 0) 2982 goto out; 2983 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2984 1) != 0) 2985 goto out; 2986 if (lastlog == 0) 2987 lastlog = vcount; 2988 varray[vcount++] = vdev; 2989 continue; 2990 } 2991 lastlog = 0; 2992 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2993 == 0); 2994 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2995 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2996 "Source pool must be composed only of mirrors\n")); 2997 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2998 goto out; 2999 } 3000 3001 verify(nvlist_lookup_nvlist_array(child[c], 3002 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3003 3004 /* find or add an entry for this top-level vdev */ 3005 if (newchildren > 0 && 3006 (entry = find_vdev_entry(zhp, mchild, mchildren, 3007 newchild, newchildren)) >= 0) { 3008 /* We found a disk that the user specified. */ 3009 vdev = mchild[entry]; 3010 ++found; 3011 } else { 3012 /* User didn't specify a disk for this vdev. */ 3013 vdev = mchild[mchildren - 1]; 3014 } 3015 3016 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3017 goto out; 3018 } 3019 3020 /* did we find every disk the user specified? */ 3021 if (found != newchildren) { 3022 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3023 "include at most one disk from each mirror")); 3024 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3025 goto out; 3026 } 3027 3028 /* Prepare the nvlist for populating. */ 3029 if (*newroot == NULL) { 3030 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3031 goto out; 3032 freelist = B_TRUE; 3033 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3034 VDEV_TYPE_ROOT) != 0) 3035 goto out; 3036 } else { 3037 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3038 } 3039 3040 /* Add all the children we found */ 3041 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3042 lastlog == 0 ? vcount : lastlog) != 0) 3043 goto out; 3044 3045 /* 3046 * If we're just doing a dry run, exit now with success. 3047 */ 3048 if (flags.dryrun) { 3049 memory_err = B_FALSE; 3050 freelist = B_FALSE; 3051 goto out; 3052 } 3053 3054 /* now build up the config list & call the ioctl */ 3055 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3056 goto out; 3057 3058 if (nvlist_add_nvlist(newconfig, 3059 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3060 nvlist_add_string(newconfig, 3061 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3062 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3063 goto out; 3064 3065 /* 3066 * The new pool is automatically part of the namespace unless we 3067 * explicitly export it. 3068 */ 3069 if (!flags.import) 3070 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3071 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3072 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3073 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3074 goto out; 3075 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3076 goto out; 3077 3078 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3079 retval = zpool_standard_error(hdl, errno, msg); 3080 goto out; 3081 } 3082 3083 freelist = B_FALSE; 3084 memory_err = B_FALSE; 3085 3086out: 3087 if (varray != NULL) { 3088 int v; 3089 3090 for (v = 0; v < vcount; v++) 3091 nvlist_free(varray[v]); 3092 free(varray); 3093 } 3094 zcmd_free_nvlists(&zc); 3095 if (zc_props) 3096 nvlist_free(zc_props); 3097 if (newconfig) 3098 nvlist_free(newconfig); 3099 if (freelist) { 3100 nvlist_free(*newroot); 3101 *newroot = NULL; 3102 } 3103 3104 if (retval != 0) 3105 return (retval); 3106 3107 if (memory_err) 3108 return (no_memory(hdl)); 3109 3110 return (0); 3111} 3112 3113/* 3114 * Remove the given device. Currently, this is supported only for hot spares 3115 * and level 2 cache devices. 3116 */ 3117int 3118zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3119{ 3120 zfs_cmd_t zc = { 0 }; 3121 char msg[1024]; 3122 nvlist_t *tgt; 3123 boolean_t avail_spare, l2cache, islog; 3124 libzfs_handle_t *hdl = zhp->zpool_hdl; 3125 uint64_t version; 3126 3127 (void) snprintf(msg, sizeof (msg), 3128 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3129 3130 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3131 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3132 &islog)) == 0) 3133 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3134 /* 3135 * XXX - this should just go away. 3136 */ 3137 if (!avail_spare && !l2cache && !islog) { 3138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3139 "only inactive hot spares, cache, top-level, " 3140 "or log devices can be removed")); 3141 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3142 } 3143 3144 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3145 if (islog && version < SPA_VERSION_HOLES) { 3146 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3147 "pool must be upgrade to support log removal")); 3148 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3149 } 3150 3151 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3152 3153 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3154 return (0); 3155 3156 return (zpool_standard_error(hdl, errno, msg)); 3157} 3158 3159/* 3160 * Clear the errors for the pool, or the particular device if specified. 3161 */ 3162int 3163zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3164{ 3165 zfs_cmd_t zc = { 0 }; 3166 char msg[1024]; 3167 nvlist_t *tgt; 3168 zpool_rewind_policy_t policy; 3169 boolean_t avail_spare, l2cache; 3170 libzfs_handle_t *hdl = zhp->zpool_hdl; 3171 nvlist_t *nvi = NULL; 3172 int error; 3173 3174 if (path) 3175 (void) snprintf(msg, sizeof (msg), 3176 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3177 path); 3178 else 3179 (void) snprintf(msg, sizeof (msg), 3180 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3181 zhp->zpool_name); 3182 3183 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3184 if (path) { 3185 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3186 &l2cache, NULL)) == 0) 3187 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3188 3189 /* 3190 * Don't allow error clearing for hot spares. Do allow 3191 * error clearing for l2cache devices. 3192 */ 3193 if (avail_spare) 3194 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3195 3196 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3197 &zc.zc_guid) == 0); 3198 } 3199 3200 zpool_get_rewind_policy(rewindnvl, &policy); 3201 zc.zc_cookie = policy.zrp_request; 3202 3203 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3204 return (-1); 3205 3206 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3207 return (-1); 3208 3209 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3210 errno == ENOMEM) { 3211 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3212 zcmd_free_nvlists(&zc); 3213 return (-1); 3214 } 3215 } 3216 3217 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3218 errno != EPERM && errno != EACCES)) { 3219 if (policy.zrp_request & 3220 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3221 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3222 zpool_rewind_exclaim(hdl, zc.zc_name, 3223 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3224 nvi); 3225 nvlist_free(nvi); 3226 } 3227 zcmd_free_nvlists(&zc); 3228 return (0); 3229 } 3230 3231 zcmd_free_nvlists(&zc); 3232 return (zpool_standard_error(hdl, errno, msg)); 3233} 3234 3235/* 3236 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3237 */ 3238int 3239zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3240{ 3241 zfs_cmd_t zc = { 0 }; 3242 char msg[1024]; 3243 libzfs_handle_t *hdl = zhp->zpool_hdl; 3244 3245 (void) snprintf(msg, sizeof (msg), 3246 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3247 guid); 3248 3249 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3250 zc.zc_guid = guid; 3251 zc.zc_cookie = ZPOOL_NO_REWIND; 3252 3253 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3254 return (0); 3255 3256 return (zpool_standard_error(hdl, errno, msg)); 3257} 3258 3259/* 3260 * Change the GUID for a pool. 3261 */ 3262int 3263zpool_reguid(zpool_handle_t *zhp) 3264{ 3265 char msg[1024]; 3266 libzfs_handle_t *hdl = zhp->zpool_hdl; 3267 zfs_cmd_t zc = { 0 }; 3268 3269 (void) snprintf(msg, sizeof (msg), 3270 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3271 3272 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3273 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3274 return (0); 3275 3276 return (zpool_standard_error(hdl, errno, msg)); 3277} 3278 3279/* 3280 * Reopen the pool. 3281 */ 3282int 3283zpool_reopen(zpool_handle_t *zhp) 3284{ 3285 zfs_cmd_t zc = { 0 }; 3286 char msg[1024]; 3287 libzfs_handle_t *hdl = zhp->zpool_hdl; 3288 3289 (void) snprintf(msg, sizeof (msg), 3290 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3291 zhp->zpool_name); 3292 3293 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3294 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3295 return (0); 3296 return (zpool_standard_error(hdl, errno, msg)); 3297} 3298 3299/* 3300 * Convert from a devid string to a path. 3301 */ 3302static char * 3303devid_to_path(char *devid_str) 3304{ 3305 ddi_devid_t devid; 3306 char *minor; 3307 char *path; 3308 devid_nmlist_t *list = NULL; 3309 int ret; 3310 3311 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3312 return (NULL); 3313 3314 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3315 3316 devid_str_free(minor); 3317 devid_free(devid); 3318 3319 if (ret != 0) 3320 return (NULL); 3321 3322 if ((path = strdup(list[0].devname)) == NULL) 3323 return (NULL); 3324 3325 devid_free_nmlist(list); 3326 3327 return (path); 3328} 3329 3330/* 3331 * Convert from a path to a devid string. 3332 */ 3333static char * 3334path_to_devid(const char *path) 3335{ 3336#ifdef have_devid 3337 int fd; 3338 ddi_devid_t devid; 3339 char *minor, *ret; 3340 3341 if ((fd = open(path, O_RDONLY)) < 0) 3342 return (NULL); 3343 3344 minor = NULL; 3345 ret = NULL; 3346 if (devid_get(fd, &devid) == 0) { 3347 if (devid_get_minor_name(fd, &minor) == 0) 3348 ret = devid_str_encode(devid, minor); 3349 if (minor != NULL) 3350 devid_str_free(minor); 3351 devid_free(devid); 3352 } 3353 (void) close(fd); 3354 3355 return (ret); 3356#else 3357 return (NULL); 3358#endif 3359} 3360 3361/* 3362 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3363 * ignore any failure here, since a common case is for an unprivileged user to 3364 * type 'zpool status', and we'll display the correct information anyway. 3365 */ 3366static void 3367set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3368{ 3369 zfs_cmd_t zc = { 0 }; 3370 3371 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3372 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3373 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3374 &zc.zc_guid) == 0); 3375 3376 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3377} 3378 3379/* 3380 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3381 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3382 * We also check if this is a whole disk, in which case we strip off the 3383 * trailing 's0' slice name. 3384 * 3385 * This routine is also responsible for identifying when disks have been 3386 * reconfigured in a new location. The kernel will have opened the device by 3387 * devid, but the path will still refer to the old location. To catch this, we 3388 * first do a path -> devid translation (which is fast for the common case). If 3389 * the devid matches, we're done. If not, we do a reverse devid -> path 3390 * translation and issue the appropriate ioctl() to update the path of the vdev. 3391 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3392 * of these checks. 3393 */ 3394char * 3395zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3396 boolean_t verbose) 3397{ 3398 char *path, *devid; 3399 uint64_t value; 3400 char buf[64]; 3401 vdev_stat_t *vs; 3402 uint_t vsc; 3403 int have_stats; 3404 int have_path; 3405 3406 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3407 (uint64_t **)&vs, &vsc) == 0; 3408 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3409 3410 /* 3411 * If the device is not currently present, assume it will not 3412 * come back at the same device path. Display the device by GUID. 3413 */ 3414 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3415 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3416 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3417 &value) == 0); 3418 (void) snprintf(buf, sizeof (buf), "%llu", 3419 (u_longlong_t)value); 3420 path = buf; 3421 } else if (have_path) { 3422 3423 /* 3424 * If the device is dead (faulted, offline, etc) then don't 3425 * bother opening it. Otherwise we may be forcing the user to 3426 * open a misbehaving device, which can have undesirable 3427 * effects. 3428 */ 3429 if ((have_stats == 0 || 3430 vs->vs_state >= VDEV_STATE_DEGRADED) && 3431 zhp != NULL && 3432 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3433 /* 3434 * Determine if the current path is correct. 3435 */ 3436 char *newdevid = path_to_devid(path); 3437 3438 if (newdevid == NULL || 3439 strcmp(devid, newdevid) != 0) { 3440 char *newpath; 3441 3442 if ((newpath = devid_to_path(devid)) != NULL) { 3443 /* 3444 * Update the path appropriately. 3445 */ 3446 set_path(zhp, nv, newpath); 3447 if (nvlist_add_string(nv, 3448 ZPOOL_CONFIG_PATH, newpath) == 0) 3449 verify(nvlist_lookup_string(nv, 3450 ZPOOL_CONFIG_PATH, 3451 &path) == 0); 3452 free(newpath); 3453 } 3454 } 3455 3456 if (newdevid) 3457 devid_str_free(newdevid); 3458 } 3459 3460#ifdef sun 3461 if (strncmp(path, "/dev/dsk/", 9) == 0) 3462 path += 9; 3463 3464 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3465 &value) == 0 && value) { 3466 int pathlen = strlen(path); 3467 char *tmp = zfs_strdup(hdl, path); 3468 3469 /* 3470 * If it starts with c#, and ends with "s0", chop 3471 * the "s0" off, or if it ends with "s0/old", remove 3472 * the "s0" from the middle. 3473 */ 3474 if (CTD_CHECK(tmp)) { 3475 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3476 tmp[pathlen - 2] = '\0'; 3477 } else if (pathlen > 6 && 3478 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3479 (void) strcpy(&tmp[pathlen - 6], 3480 "/old"); 3481 } 3482 } 3483 return (tmp); 3484 } 3485#else /* !sun */ 3486 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3487 path += sizeof(_PATH_DEV) - 1; 3488#endif /* !sun */ 3489 } else { 3490 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3491 3492 /* 3493 * If it's a raidz device, we need to stick in the parity level. 3494 */ 3495 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3496 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3497 &value) == 0); 3498 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3499 (u_longlong_t)value); 3500 path = buf; 3501 } 3502 3503 /* 3504 * We identify each top-level vdev by using a <type-id> 3505 * naming convention. 3506 */ 3507 if (verbose) { 3508 uint64_t id; 3509 3510 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3511 &id) == 0); 3512 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3513 (u_longlong_t)id); 3514 path = buf; 3515 } 3516 } 3517 3518 return (zfs_strdup(hdl, path)); 3519} 3520 3521static int 3522zbookmark_compare(const void *a, const void *b) 3523{ 3524 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3525} 3526 3527/* 3528 * Retrieve the persistent error log, uniquify the members, and return to the 3529 * caller. 3530 */ 3531int 3532zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3533{ 3534 zfs_cmd_t zc = { 0 }; 3535 uint64_t count; 3536 zbookmark_phys_t *zb = NULL; 3537 int i; 3538 3539 /* 3540 * Retrieve the raw error list from the kernel. If the number of errors 3541 * has increased, allocate more space and continue until we get the 3542 * entire list. 3543 */ 3544 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3545 &count) == 0); 3546 if (count == 0) 3547 return (0); 3548 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3549 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3550 return (-1); 3551 zc.zc_nvlist_dst_size = count; 3552 (void) strcpy(zc.zc_name, zhp->zpool_name); 3553 for (;;) { 3554 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3555 &zc) != 0) { 3556 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3557 if (errno == ENOMEM) { 3558 void *dst; 3559 3560 count = zc.zc_nvlist_dst_size; 3561 dst = zfs_alloc(zhp->zpool_hdl, count * 3562 sizeof (zbookmark_phys_t)); 3563 if (dst == NULL) 3564 return (-1); 3565 zc.zc_nvlist_dst = (uintptr_t)dst; 3566 } else { 3567 return (-1); 3568 } 3569 } else { 3570 break; 3571 } 3572 } 3573 3574 /* 3575 * Sort the resulting bookmarks. This is a little confusing due to the 3576 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3577 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3578 * _not_ copied as part of the process. So we point the start of our 3579 * array appropriate and decrement the total number of elements. 3580 */ 3581 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3582 zc.zc_nvlist_dst_size; 3583 count -= zc.zc_nvlist_dst_size; 3584 3585 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare); 3586 3587 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3588 3589 /* 3590 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3591 */ 3592 for (i = 0; i < count; i++) { 3593 nvlist_t *nv; 3594 3595 /* ignoring zb_blkid and zb_level for now */ 3596 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3597 zb[i-1].zb_object == zb[i].zb_object) 3598 continue; 3599 3600 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3601 goto nomem; 3602 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3603 zb[i].zb_objset) != 0) { 3604 nvlist_free(nv); 3605 goto nomem; 3606 } 3607 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3608 zb[i].zb_object) != 0) { 3609 nvlist_free(nv); 3610 goto nomem; 3611 } 3612 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3613 nvlist_free(nv); 3614 goto nomem; 3615 } 3616 nvlist_free(nv); 3617 } 3618 3619 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3620 return (0); 3621 3622nomem: 3623 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3624 return (no_memory(zhp->zpool_hdl)); 3625} 3626 3627/* 3628 * Upgrade a ZFS pool to the latest on-disk version. 3629 */ 3630int 3631zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3632{ 3633 zfs_cmd_t zc = { 0 }; 3634 libzfs_handle_t *hdl = zhp->zpool_hdl; 3635 3636 (void) strcpy(zc.zc_name, zhp->zpool_name); 3637 zc.zc_cookie = new_version; 3638 3639 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3640 return (zpool_standard_error_fmt(hdl, errno, 3641 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3642 zhp->zpool_name)); 3643 return (0); 3644} 3645 3646void 3647zfs_save_arguments(int argc, char **argv, char *string, int len) 3648{ 3649 (void) strlcpy(string, basename(argv[0]), len); 3650 for (int i = 1; i < argc; i++) { 3651 (void) strlcat(string, " ", len); 3652 (void) strlcat(string, argv[i], len); 3653 } 3654} 3655 3656int 3657zpool_log_history(libzfs_handle_t *hdl, const char *message) 3658{ 3659 zfs_cmd_t zc = { 0 }; 3660 nvlist_t *args; 3661 int err; 3662 3663 args = fnvlist_alloc(); 3664 fnvlist_add_string(args, "message", message); 3665 err = zcmd_write_src_nvlist(hdl, &zc, args); 3666 if (err == 0) 3667 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3668 nvlist_free(args); 3669 zcmd_free_nvlists(&zc); 3670 return (err); 3671} 3672 3673/* 3674 * Perform ioctl to get some command history of a pool. 3675 * 3676 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3677 * logical offset of the history buffer to start reading from. 3678 * 3679 * Upon return, 'off' is the next logical offset to read from and 3680 * 'len' is the actual amount of bytes read into 'buf'. 3681 */ 3682static int 3683get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3684{ 3685 zfs_cmd_t zc = { 0 }; 3686 libzfs_handle_t *hdl = zhp->zpool_hdl; 3687 3688 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3689 3690 zc.zc_history = (uint64_t)(uintptr_t)buf; 3691 zc.zc_history_len = *len; 3692 zc.zc_history_offset = *off; 3693 3694 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3695 switch (errno) { 3696 case EPERM: 3697 return (zfs_error_fmt(hdl, EZFS_PERM, 3698 dgettext(TEXT_DOMAIN, 3699 "cannot show history for pool '%s'"), 3700 zhp->zpool_name)); 3701 case ENOENT: 3702 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3703 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3704 "'%s'"), zhp->zpool_name)); 3705 case ENOTSUP: 3706 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3707 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3708 "'%s', pool must be upgraded"), zhp->zpool_name)); 3709 default: 3710 return (zpool_standard_error_fmt(hdl, errno, 3711 dgettext(TEXT_DOMAIN, 3712 "cannot get history for '%s'"), zhp->zpool_name)); 3713 } 3714 } 3715 3716 *len = zc.zc_history_len; 3717 *off = zc.zc_history_offset; 3718 3719 return (0); 3720} 3721 3722/* 3723 * Process the buffer of nvlists, unpacking and storing each nvlist record 3724 * into 'records'. 'leftover' is set to the number of bytes that weren't 3725 * processed as there wasn't a complete record. 3726 */ 3727int 3728zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3729 nvlist_t ***records, uint_t *numrecords) 3730{ 3731 uint64_t reclen; 3732 nvlist_t *nv; 3733 int i; 3734 3735 while (bytes_read > sizeof (reclen)) { 3736 3737 /* get length of packed record (stored as little endian) */ 3738 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3739 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3740 3741 if (bytes_read < sizeof (reclen) + reclen) 3742 break; 3743 3744 /* unpack record */ 3745 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3746 return (ENOMEM); 3747 bytes_read -= sizeof (reclen) + reclen; 3748 buf += sizeof (reclen) + reclen; 3749 3750 /* add record to nvlist array */ 3751 (*numrecords)++; 3752 if (ISP2(*numrecords + 1)) { 3753 *records = realloc(*records, 3754 *numrecords * 2 * sizeof (nvlist_t *)); 3755 } 3756 (*records)[*numrecords - 1] = nv; 3757 } 3758 3759 *leftover = bytes_read; 3760 return (0); 3761} 3762 3763/* from spa_history.c: spa_history_create_obj() */ 3764#define HIS_BUF_LEN_DEF (128 << 10) 3765#define HIS_BUF_LEN_MAX (1 << 30) 3766 3767/* 3768 * Retrieve the command history of a pool. 3769 */ 3770int 3771zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3772{ 3773 char *buf = NULL; 3774 uint64_t bufsize = HIS_BUF_LEN_DEF; 3775 uint64_t off = 0; 3776 nvlist_t **records = NULL; 3777 uint_t numrecords = 0; 3778 int err, i; 3779 3780 if ((buf = malloc(bufsize)) == NULL) 3781 return (ENOMEM); 3782 do { 3783 uint64_t bytes_read = bufsize; 3784 uint64_t leftover; 3785 3786 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3787 break; 3788 3789 /* if nothing else was read in, we're at EOF, just return */ 3790 if (bytes_read == 0) 3791 break; 3792 3793 if ((err = zpool_history_unpack(buf, bytes_read, 3794 &leftover, &records, &numrecords)) != 0) 3795 break; 3796 off -= leftover; 3797 3798 /* 3799 * If the history block is too big, double the buffer 3800 * size and try again. 3801 */ 3802 if (leftover == bytes_read) { 3803 free(buf); 3804 buf = NULL; 3805 3806 bufsize <<= 1; 3807 if ((bufsize >= HIS_BUF_LEN_MAX) || 3808 ((buf = malloc(bufsize)) == NULL)) { 3809 err = ENOMEM; 3810 break; 3811 } 3812 } 3813 3814 /* CONSTCOND */ 3815 } while (1); 3816 free(buf); 3817 3818 if (!err) { 3819 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3820 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3821 records, numrecords) == 0); 3822 } 3823 for (i = 0; i < numrecords; i++) 3824 nvlist_free(records[i]); 3825 free(records); 3826 3827 return (err); 3828} 3829 3830void 3831zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3832 char *pathname, size_t len) 3833{ 3834 zfs_cmd_t zc = { 0 }; 3835 boolean_t mounted = B_FALSE; 3836 char *mntpnt = NULL; 3837 char dsname[MAXNAMELEN]; 3838 3839 if (dsobj == 0) { 3840 /* special case for the MOS */ 3841 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3842 return; 3843 } 3844 3845 /* get the dataset's name */ 3846 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3847 zc.zc_obj = dsobj; 3848 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3849 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3850 /* just write out a path of two object numbers */ 3851 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3852 dsobj, obj); 3853 return; 3854 } 3855 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3856 3857 /* find out if the dataset is mounted */ 3858 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3859 3860 /* get the corrupted object's path */ 3861 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3862 zc.zc_obj = obj; 3863 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3864 &zc) == 0) { 3865 if (mounted) { 3866 (void) snprintf(pathname, len, "%s%s", mntpnt, 3867 zc.zc_value); 3868 } else { 3869 (void) snprintf(pathname, len, "%s:%s", 3870 dsname, zc.zc_value); 3871 } 3872 } else { 3873 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3874 } 3875 free(mntpnt); 3876} 3877 3878#ifdef sun 3879/* 3880 * Read the EFI label from the config, if a label does not exist then 3881 * pass back the error to the caller. If the caller has passed a non-NULL 3882 * diskaddr argument then we set it to the starting address of the EFI 3883 * partition. 3884 */ 3885static int 3886read_efi_label(nvlist_t *config, diskaddr_t *sb) 3887{ 3888 char *path; 3889 int fd; 3890 char diskname[MAXPATHLEN]; 3891 int err = -1; 3892 3893 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3894 return (err); 3895 3896 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3897 strrchr(path, '/')); 3898 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3899 struct dk_gpt *vtoc; 3900 3901 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3902 if (sb != NULL) 3903 *sb = vtoc->efi_parts[0].p_start; 3904 efi_free(vtoc); 3905 } 3906 (void) close(fd); 3907 } 3908 return (err); 3909} 3910 3911/* 3912 * determine where a partition starts on a disk in the current 3913 * configuration 3914 */ 3915static diskaddr_t 3916find_start_block(nvlist_t *config) 3917{ 3918 nvlist_t **child; 3919 uint_t c, children; 3920 diskaddr_t sb = MAXOFFSET_T; 3921 uint64_t wholedisk; 3922 3923 if (nvlist_lookup_nvlist_array(config, 3924 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3925 if (nvlist_lookup_uint64(config, 3926 ZPOOL_CONFIG_WHOLE_DISK, 3927 &wholedisk) != 0 || !wholedisk) { 3928 return (MAXOFFSET_T); 3929 } 3930 if (read_efi_label(config, &sb) < 0) 3931 sb = MAXOFFSET_T; 3932 return (sb); 3933 } 3934 3935 for (c = 0; c < children; c++) { 3936 sb = find_start_block(child[c]); 3937 if (sb != MAXOFFSET_T) { 3938 return (sb); 3939 } 3940 } 3941 return (MAXOFFSET_T); 3942} 3943#endif /* sun */ 3944 3945/* 3946 * Label an individual disk. The name provided is the short name, 3947 * stripped of any leading /dev path. 3948 */ 3949int 3950zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3951{ 3952#ifdef sun 3953 char path[MAXPATHLEN]; 3954 struct dk_gpt *vtoc; 3955 int fd; 3956 size_t resv = EFI_MIN_RESV_SIZE; 3957 uint64_t slice_size; 3958 diskaddr_t start_block; 3959 char errbuf[1024]; 3960 3961 /* prepare an error message just in case */ 3962 (void) snprintf(errbuf, sizeof (errbuf), 3963 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3964 3965 if (zhp) { 3966 nvlist_t *nvroot; 3967 3968 if (zpool_is_bootable(zhp)) { 3969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3970 "EFI labeled devices are not supported on root " 3971 "pools.")); 3972 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3973 } 3974 3975 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3976 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3977 3978 if (zhp->zpool_start_block == 0) 3979 start_block = find_start_block(nvroot); 3980 else 3981 start_block = zhp->zpool_start_block; 3982 zhp->zpool_start_block = start_block; 3983 } else { 3984 /* new pool */ 3985 start_block = NEW_START_BLOCK; 3986 } 3987 3988 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3989 BACKUP_SLICE); 3990 3991 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3992 /* 3993 * This shouldn't happen. We've long since verified that this 3994 * is a valid device. 3995 */ 3996 zfs_error_aux(hdl, 3997 dgettext(TEXT_DOMAIN, "unable to open device")); 3998 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3999 } 4000 4001 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 4002 /* 4003 * The only way this can fail is if we run out of memory, or we 4004 * were unable to read the disk's capacity 4005 */ 4006 if (errno == ENOMEM) 4007 (void) no_memory(hdl); 4008 4009 (void) close(fd); 4010 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4011 "unable to read disk capacity"), name); 4012 4013 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 4014 } 4015 4016 slice_size = vtoc->efi_last_u_lba + 1; 4017 slice_size -= EFI_MIN_RESV_SIZE; 4018 if (start_block == MAXOFFSET_T) 4019 start_block = NEW_START_BLOCK; 4020 slice_size -= start_block; 4021 4022 vtoc->efi_parts[0].p_start = start_block; 4023 vtoc->efi_parts[0].p_size = slice_size; 4024 4025 /* 4026 * Why we use V_USR: V_BACKUP confuses users, and is considered 4027 * disposable by some EFI utilities (since EFI doesn't have a backup 4028 * slice). V_UNASSIGNED is supposed to be used only for zero size 4029 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4030 * etc. were all pretty specific. V_USR is as close to reality as we 4031 * can get, in the absence of V_OTHER. 4032 */ 4033 vtoc->efi_parts[0].p_tag = V_USR; 4034 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4035 4036 vtoc->efi_parts[8].p_start = slice_size + start_block; 4037 vtoc->efi_parts[8].p_size = resv; 4038 vtoc->efi_parts[8].p_tag = V_RESERVED; 4039 4040 if (efi_write(fd, vtoc) != 0) { 4041 /* 4042 * Some block drivers (like pcata) may not support EFI 4043 * GPT labels. Print out a helpful error message dir- 4044 * ecting the user to manually label the disk and give 4045 * a specific slice. 4046 */ 4047 (void) close(fd); 4048 efi_free(vtoc); 4049 4050 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4051 "try using fdisk(1M) and then provide a specific slice")); 4052 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4053 } 4054 4055 (void) close(fd); 4056 efi_free(vtoc); 4057#endif /* sun */ 4058 return (0); 4059} 4060 4061static boolean_t 4062supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4063{ 4064 char *type; 4065 nvlist_t **child; 4066 uint_t children, c; 4067 4068 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4069 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4070 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4071 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4072 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4073 "vdev type '%s' is not supported"), type); 4074 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4075 return (B_FALSE); 4076 } 4077 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4078 &child, &children) == 0) { 4079 for (c = 0; c < children; c++) { 4080 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4081 return (B_FALSE); 4082 } 4083 } 4084 return (B_TRUE); 4085} 4086 4087/* 4088 * Check if this zvol is allowable for use as a dump device; zero if 4089 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4090 * 4091 * Allowable storage configurations include mirrors, all raidz variants, and 4092 * pools with log, cache, and spare devices. Pools which are backed by files or 4093 * have missing/hole vdevs are not suitable. 4094 */ 4095int 4096zvol_check_dump_config(char *arg) 4097{ 4098 zpool_handle_t *zhp = NULL; 4099 nvlist_t *config, *nvroot; 4100 char *p, *volname; 4101 nvlist_t **top; 4102 uint_t toplevels; 4103 libzfs_handle_t *hdl; 4104 char errbuf[1024]; 4105 char poolname[ZPOOL_MAXNAMELEN]; 4106 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4107 int ret = 1; 4108 4109 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4110 return (-1); 4111 } 4112 4113 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4114 "dump is not supported on device '%s'"), arg); 4115 4116 if ((hdl = libzfs_init()) == NULL) 4117 return (1); 4118 libzfs_print_on_error(hdl, B_TRUE); 4119 4120 volname = arg + pathlen; 4121 4122 /* check the configuration of the pool */ 4123 if ((p = strchr(volname, '/')) == NULL) { 4124 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4125 "malformed dataset name")); 4126 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4127 return (1); 4128 } else if (p - volname >= ZFS_MAXNAMELEN) { 4129 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4130 "dataset name is too long")); 4131 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4132 return (1); 4133 } else { 4134 (void) strncpy(poolname, volname, p - volname); 4135 poolname[p - volname] = '\0'; 4136 } 4137 4138 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4139 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4140 "could not open pool '%s'"), poolname); 4141 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4142 goto out; 4143 } 4144 config = zpool_get_config(zhp, NULL); 4145 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4146 &nvroot) != 0) { 4147 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4148 "could not obtain vdev configuration for '%s'"), poolname); 4149 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4150 goto out; 4151 } 4152 4153 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4154 &top, &toplevels) == 0); 4155 4156 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4157 goto out; 4158 } 4159 ret = 0; 4160 4161out: 4162 if (zhp) 4163 zpool_close(zhp); 4164 libzfs_fini(hdl); 4165 return (ret); 4166} 4167