libzfs_pool.c revision 332547
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright 2016 Nexenta Systems, Inc. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 * Copyright (c) 2017 Datto Inc. 29 */ 30 31#include <sys/types.h> 32#include <sys/stat.h> 33#include <ctype.h> 34#include <errno.h> 35#include <devid.h> 36#include <fcntl.h> 37#include <libintl.h> 38#include <stdio.h> 39#include <stdlib.h> 40#include <strings.h> 41#include <unistd.h> 42#include <libgen.h> 43#include <sys/zfs_ioctl.h> 44#include <dlfcn.h> 45 46#include "zfs_namecheck.h" 47#include "zfs_prop.h" 48#include "libzfs_impl.h" 49#include "zfs_comutil.h" 50#include "zfeature_common.h" 51 52static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *); 53static boolean_t zpool_vdev_is_interior(const char *name); 54 55#define BACKUP_SLICE "s2" 56 57typedef struct prop_flags { 58 int create:1; /* Validate property on creation */ 59 int import:1; /* Validate property on import */ 60} prop_flags_t; 61 62/* 63 * ==================================================================== 64 * zpool property functions 65 * ==================================================================== 66 */ 67 68static int 69zpool_get_all_props(zpool_handle_t *zhp) 70{ 71 zfs_cmd_t zc = { 0 }; 72 libzfs_handle_t *hdl = zhp->zpool_hdl; 73 74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 75 76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 77 return (-1); 78 79 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 80 if (errno == ENOMEM) { 81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } else { 86 zcmd_free_nvlists(&zc); 87 return (-1); 88 } 89 } 90 91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 92 zcmd_free_nvlists(&zc); 93 return (-1); 94 } 95 96 zcmd_free_nvlists(&zc); 97 98 return (0); 99} 100 101static int 102zpool_props_refresh(zpool_handle_t *zhp) 103{ 104 nvlist_t *old_props; 105 106 old_props = zhp->zpool_props; 107 108 if (zpool_get_all_props(zhp) != 0) 109 return (-1); 110 111 nvlist_free(old_props); 112 return (0); 113} 114 115static char * 116zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 117 zprop_source_t *src) 118{ 119 nvlist_t *nv, *nvl; 120 uint64_t ival; 121 char *value; 122 zprop_source_t source; 123 124 nvl = zhp->zpool_props; 125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 127 source = ival; 128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 129 } else { 130 source = ZPROP_SRC_DEFAULT; 131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 132 value = "-"; 133 } 134 135 if (src) 136 *src = source; 137 138 return (value); 139} 140 141uint64_t 142zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 143{ 144 nvlist_t *nv, *nvl; 145 uint64_t value; 146 zprop_source_t source; 147 148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 149 /* 150 * zpool_get_all_props() has most likely failed because 151 * the pool is faulted, but if all we need is the top level 152 * vdev's guid then get it from the zhp config nvlist. 153 */ 154 if ((prop == ZPOOL_PROP_GUID) && 155 (nvlist_lookup_nvlist(zhp->zpool_config, 156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 158 == 0)) { 159 return (value); 160 } 161 return (zpool_prop_default_numeric(prop)); 162 } 163 164 nvl = zhp->zpool_props; 165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 167 source = value; 168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 169 } else { 170 source = ZPROP_SRC_DEFAULT; 171 value = zpool_prop_default_numeric(prop); 172 } 173 174 if (src) 175 *src = source; 176 177 return (value); 178} 179 180/* 181 * Map VDEV STATE to printed strings. 182 */ 183const char * 184zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 185{ 186 switch (state) { 187 case VDEV_STATE_CLOSED: 188 case VDEV_STATE_OFFLINE: 189 return (gettext("OFFLINE")); 190 case VDEV_STATE_REMOVED: 191 return (gettext("REMOVED")); 192 case VDEV_STATE_CANT_OPEN: 193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 194 return (gettext("FAULTED")); 195 else if (aux == VDEV_AUX_SPLIT_POOL) 196 return (gettext("SPLIT")); 197 else 198 return (gettext("UNAVAIL")); 199 case VDEV_STATE_FAULTED: 200 return (gettext("FAULTED")); 201 case VDEV_STATE_DEGRADED: 202 return (gettext("DEGRADED")); 203 case VDEV_STATE_HEALTHY: 204 return (gettext("ONLINE")); 205 206 default: 207 break; 208 } 209 210 return (gettext("UNKNOWN")); 211} 212 213/* 214 * Map POOL STATE to printed strings. 215 */ 216const char * 217zpool_pool_state_to_name(pool_state_t state) 218{ 219 switch (state) { 220 case POOL_STATE_ACTIVE: 221 return (gettext("ACTIVE")); 222 case POOL_STATE_EXPORTED: 223 return (gettext("EXPORTED")); 224 case POOL_STATE_DESTROYED: 225 return (gettext("DESTROYED")); 226 case POOL_STATE_SPARE: 227 return (gettext("SPARE")); 228 case POOL_STATE_L2CACHE: 229 return (gettext("L2CACHE")); 230 case POOL_STATE_UNINITIALIZED: 231 return (gettext("UNINITIALIZED")); 232 case POOL_STATE_UNAVAIL: 233 return (gettext("UNAVAIL")); 234 case POOL_STATE_POTENTIALLY_ACTIVE: 235 return (gettext("POTENTIALLY_ACTIVE")); 236 } 237 238 return (gettext("UNKNOWN")); 239} 240 241/* 242 * Get a zpool property value for 'prop' and return the value in 243 * a pre-allocated buffer. 244 */ 245int 246zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 247 zprop_source_t *srctype, boolean_t literal) 248{ 249 uint64_t intval; 250 const char *strval; 251 zprop_source_t src = ZPROP_SRC_NONE; 252 nvlist_t *nvroot; 253 vdev_stat_t *vs; 254 uint_t vsc; 255 256 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 257 switch (prop) { 258 case ZPOOL_PROP_NAME: 259 (void) strlcpy(buf, zpool_get_name(zhp), len); 260 break; 261 262 case ZPOOL_PROP_HEALTH: 263 (void) strlcpy(buf, 264 zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len); 265 break; 266 267 case ZPOOL_PROP_GUID: 268 intval = zpool_get_prop_int(zhp, prop, &src); 269 (void) snprintf(buf, len, "%llu", intval); 270 break; 271 272 case ZPOOL_PROP_ALTROOT: 273 case ZPOOL_PROP_CACHEFILE: 274 case ZPOOL_PROP_COMMENT: 275 if (zhp->zpool_props != NULL || 276 zpool_get_all_props(zhp) == 0) { 277 (void) strlcpy(buf, 278 zpool_get_prop_string(zhp, prop, &src), 279 len); 280 break; 281 } 282 /* FALLTHROUGH */ 283 default: 284 (void) strlcpy(buf, "-", len); 285 break; 286 } 287 288 if (srctype != NULL) 289 *srctype = src; 290 return (0); 291 } 292 293 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 294 prop != ZPOOL_PROP_NAME) 295 return (-1); 296 297 switch (zpool_prop_get_type(prop)) { 298 case PROP_TYPE_STRING: 299 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 300 len); 301 break; 302 303 case PROP_TYPE_NUMBER: 304 intval = zpool_get_prop_int(zhp, prop, &src); 305 306 switch (prop) { 307 case ZPOOL_PROP_SIZE: 308 case ZPOOL_PROP_ALLOCATED: 309 case ZPOOL_PROP_FREE: 310 case ZPOOL_PROP_FREEING: 311 case ZPOOL_PROP_LEAKED: 312 if (literal) { 313 (void) snprintf(buf, len, "%llu", 314 (u_longlong_t)intval); 315 } else { 316 (void) zfs_nicenum(intval, buf, len); 317 } 318 break; 319 case ZPOOL_PROP_BOOTSIZE: 320 case ZPOOL_PROP_EXPANDSZ: 321 case ZPOOL_PROP_CHECKPOINT: 322 if (intval == 0) { 323 (void) strlcpy(buf, "-", len); 324 } else if (literal) { 325 (void) snprintf(buf, len, "%llu", 326 (u_longlong_t)intval); 327 } else { 328 (void) zfs_nicenum(intval, buf, len); 329 } 330 break; 331 case ZPOOL_PROP_CAPACITY: 332 if (literal) { 333 (void) snprintf(buf, len, "%llu", 334 (u_longlong_t)intval); 335 } else { 336 (void) snprintf(buf, len, "%llu%%", 337 (u_longlong_t)intval); 338 } 339 break; 340 case ZPOOL_PROP_FRAGMENTATION: 341 if (intval == UINT64_MAX) { 342 (void) strlcpy(buf, "-", len); 343 } else { 344 (void) snprintf(buf, len, "%llu%%", 345 (u_longlong_t)intval); 346 } 347 break; 348 case ZPOOL_PROP_DEDUPRATIO: 349 (void) snprintf(buf, len, "%llu.%02llux", 350 (u_longlong_t)(intval / 100), 351 (u_longlong_t)(intval % 100)); 352 break; 353 case ZPOOL_PROP_HEALTH: 354 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 355 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 356 verify(nvlist_lookup_uint64_array(nvroot, 357 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 358 == 0); 359 360 (void) strlcpy(buf, zpool_state_to_name(intval, 361 vs->vs_aux), len); 362 break; 363 case ZPOOL_PROP_VERSION: 364 if (intval >= SPA_VERSION_FEATURES) { 365 (void) snprintf(buf, len, "-"); 366 break; 367 } 368 /* FALLTHROUGH */ 369 default: 370 (void) snprintf(buf, len, "%llu", intval); 371 } 372 break; 373 374 case PROP_TYPE_INDEX: 375 intval = zpool_get_prop_int(zhp, prop, &src); 376 if (zpool_prop_index_to_string(prop, intval, &strval) 377 != 0) 378 return (-1); 379 (void) strlcpy(buf, strval, len); 380 break; 381 382 default: 383 abort(); 384 } 385 386 if (srctype) 387 *srctype = src; 388 389 return (0); 390} 391 392/* 393 * Check if the bootfs name has the same pool name as it is set to. 394 * Assuming bootfs is a valid dataset name. 395 */ 396static boolean_t 397bootfs_name_valid(const char *pool, char *bootfs) 398{ 399 int len = strlen(pool); 400 401 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 402 return (B_FALSE); 403 404 if (strncmp(pool, bootfs, len) == 0 && 405 (bootfs[len] == '/' || bootfs[len] == '\0')) 406 return (B_TRUE); 407 408 return (B_FALSE); 409} 410 411boolean_t 412zpool_is_bootable(zpool_handle_t *zhp) 413{ 414 char bootfs[ZFS_MAX_DATASET_NAME_LEN]; 415 416 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 417 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 418 sizeof (bootfs)) != 0); 419} 420 421 422/* 423 * Given an nvlist of zpool properties to be set, validate that they are 424 * correct, and parse any numeric properties (index, boolean, etc) if they are 425 * specified as strings. 426 */ 427static nvlist_t * 428zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 429 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 430{ 431 nvpair_t *elem; 432 nvlist_t *retprops; 433 zpool_prop_t prop; 434 char *strval; 435 uint64_t intval; 436 char *slash, *check; 437 struct stat64 statbuf; 438 zpool_handle_t *zhp; 439 440 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 441 (void) no_memory(hdl); 442 return (NULL); 443 } 444 445 elem = NULL; 446 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 447 const char *propname = nvpair_name(elem); 448 449 prop = zpool_name_to_prop(propname); 450 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) { 451 int err; 452 char *fname = strchr(propname, '@') + 1; 453 454 err = zfeature_lookup_name(fname, NULL); 455 if (err != 0) { 456 ASSERT3U(err, ==, ENOENT); 457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 458 "invalid feature '%s'"), fname); 459 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 460 goto error; 461 } 462 463 if (nvpair_type(elem) != DATA_TYPE_STRING) { 464 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 465 "'%s' must be a string"), propname); 466 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 467 goto error; 468 } 469 470 (void) nvpair_value_string(elem, &strval); 471 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 472 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 473 "property '%s' can only be set to " 474 "'enabled'"), propname); 475 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 476 goto error; 477 } 478 479 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 480 (void) no_memory(hdl); 481 goto error; 482 } 483 continue; 484 } 485 486 /* 487 * Make sure this property is valid and applies to this type. 488 */ 489 if (prop == ZPOOL_PROP_INVAL) { 490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 491 "invalid property '%s'"), propname); 492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 493 goto error; 494 } 495 496 if (zpool_prop_readonly(prop)) { 497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 498 "is readonly"), propname); 499 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 500 goto error; 501 } 502 503 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 504 &strval, &intval, errbuf) != 0) 505 goto error; 506 507 /* 508 * Perform additional checking for specific properties. 509 */ 510 switch (prop) { 511 case ZPOOL_PROP_VERSION: 512 if (intval < version || 513 !SPA_VERSION_IS_SUPPORTED(intval)) { 514 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 515 "property '%s' number %d is invalid."), 516 propname, intval); 517 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 518 goto error; 519 } 520 break; 521 522 case ZPOOL_PROP_BOOTSIZE: 523 if (!flags.create) { 524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 525 "property '%s' can only be set during pool " 526 "creation"), propname); 527 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 528 goto error; 529 } 530 break; 531 532 case ZPOOL_PROP_BOOTFS: 533 if (flags.create || flags.import) { 534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 535 "property '%s' cannot be set at creation " 536 "or import time"), propname); 537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 538 goto error; 539 } 540 541 if (version < SPA_VERSION_BOOTFS) { 542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 543 "pool must be upgraded to support " 544 "'%s' property"), propname); 545 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 546 goto error; 547 } 548 549 /* 550 * bootfs property value has to be a dataset name and 551 * the dataset has to be in the same pool as it sets to. 552 */ 553 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 554 strval)) { 555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 556 "is an invalid name"), strval); 557 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 558 goto error; 559 } 560 561 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 563 "could not open pool '%s'"), poolname); 564 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 565 goto error; 566 } 567 zpool_close(zhp); 568 break; 569 570 case ZPOOL_PROP_ALTROOT: 571 if (!flags.create && !flags.import) { 572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 573 "property '%s' can only be set during pool " 574 "creation or import"), propname); 575 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 576 goto error; 577 } 578 579 if (strval[0] != '/') { 580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 581 "bad alternate root '%s'"), strval); 582 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 583 goto error; 584 } 585 break; 586 587 case ZPOOL_PROP_CACHEFILE: 588 if (strval[0] == '\0') 589 break; 590 591 if (strcmp(strval, "none") == 0) 592 break; 593 594 if (strval[0] != '/') { 595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 596 "property '%s' must be empty, an " 597 "absolute path, or 'none'"), propname); 598 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 599 goto error; 600 } 601 602 slash = strrchr(strval, '/'); 603 604 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 605 strcmp(slash, "/..") == 0) { 606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 607 "'%s' is not a valid file"), strval); 608 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 609 goto error; 610 } 611 612 *slash = '\0'; 613 614 if (strval[0] != '\0' && 615 (stat64(strval, &statbuf) != 0 || 616 !S_ISDIR(statbuf.st_mode))) { 617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 618 "'%s' is not a valid directory"), 619 strval); 620 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 621 goto error; 622 } 623 624 *slash = '/'; 625 break; 626 627 case ZPOOL_PROP_COMMENT: 628 for (check = strval; *check != '\0'; check++) { 629 if (!isprint(*check)) { 630 zfs_error_aux(hdl, 631 dgettext(TEXT_DOMAIN, 632 "comment may only have printable " 633 "characters")); 634 (void) zfs_error(hdl, EZFS_BADPROP, 635 errbuf); 636 goto error; 637 } 638 } 639 if (strlen(strval) > ZPROP_MAX_COMMENT) { 640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 641 "comment must not exceed %d characters"), 642 ZPROP_MAX_COMMENT); 643 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 644 goto error; 645 } 646 break; 647 case ZPOOL_PROP_READONLY: 648 if (!flags.import) { 649 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 650 "property '%s' can only be set at " 651 "import time"), propname); 652 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 653 goto error; 654 } 655 break; 656 657 default: 658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 659 "property '%s'(%d) not defined"), propname, prop); 660 break; 661 } 662 } 663 664 return (retprops); 665error: 666 nvlist_free(retprops); 667 return (NULL); 668} 669 670/* 671 * Set zpool property : propname=propval. 672 */ 673int 674zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 675{ 676 zfs_cmd_t zc = { 0 }; 677 int ret = -1; 678 char errbuf[1024]; 679 nvlist_t *nvl = NULL; 680 nvlist_t *realprops; 681 uint64_t version; 682 prop_flags_t flags = { 0 }; 683 684 (void) snprintf(errbuf, sizeof (errbuf), 685 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 686 zhp->zpool_name); 687 688 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 689 return (no_memory(zhp->zpool_hdl)); 690 691 if (nvlist_add_string(nvl, propname, propval) != 0) { 692 nvlist_free(nvl); 693 return (no_memory(zhp->zpool_hdl)); 694 } 695 696 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 697 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 698 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 699 nvlist_free(nvl); 700 return (-1); 701 } 702 703 nvlist_free(nvl); 704 nvl = realprops; 705 706 /* 707 * Execute the corresponding ioctl() to set this property. 708 */ 709 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 710 711 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 712 nvlist_free(nvl); 713 return (-1); 714 } 715 716 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 717 718 zcmd_free_nvlists(&zc); 719 nvlist_free(nvl); 720 721 if (ret) 722 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 723 else 724 (void) zpool_props_refresh(zhp); 725 726 return (ret); 727} 728 729int 730zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 731{ 732 libzfs_handle_t *hdl = zhp->zpool_hdl; 733 zprop_list_t *entry; 734 char buf[ZFS_MAXPROPLEN]; 735 nvlist_t *features = NULL; 736 zprop_list_t **last; 737 boolean_t firstexpand = (NULL == *plp); 738 739 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 740 return (-1); 741 742 last = plp; 743 while (*last != NULL) 744 last = &(*last)->pl_next; 745 746 if ((*plp)->pl_all) 747 features = zpool_get_features(zhp); 748 749 if ((*plp)->pl_all && firstexpand) { 750 for (int i = 0; i < SPA_FEATURES; i++) { 751 zprop_list_t *entry = zfs_alloc(hdl, 752 sizeof (zprop_list_t)); 753 entry->pl_prop = ZPROP_INVAL; 754 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 755 spa_feature_table[i].fi_uname); 756 entry->pl_width = strlen(entry->pl_user_prop); 757 entry->pl_all = B_TRUE; 758 759 *last = entry; 760 last = &entry->pl_next; 761 } 762 } 763 764 /* add any unsupported features */ 765 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 766 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 767 char *propname; 768 boolean_t found; 769 zprop_list_t *entry; 770 771 if (zfeature_is_supported(nvpair_name(nvp))) 772 continue; 773 774 propname = zfs_asprintf(hdl, "unsupported@%s", 775 nvpair_name(nvp)); 776 777 /* 778 * Before adding the property to the list make sure that no 779 * other pool already added the same property. 780 */ 781 found = B_FALSE; 782 entry = *plp; 783 while (entry != NULL) { 784 if (entry->pl_user_prop != NULL && 785 strcmp(propname, entry->pl_user_prop) == 0) { 786 found = B_TRUE; 787 break; 788 } 789 entry = entry->pl_next; 790 } 791 if (found) { 792 free(propname); 793 continue; 794 } 795 796 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 797 entry->pl_prop = ZPROP_INVAL; 798 entry->pl_user_prop = propname; 799 entry->pl_width = strlen(entry->pl_user_prop); 800 entry->pl_all = B_TRUE; 801 802 *last = entry; 803 last = &entry->pl_next; 804 } 805 806 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 807 808 if (entry->pl_fixed) 809 continue; 810 811 if (entry->pl_prop != ZPROP_INVAL && 812 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 813 NULL, B_FALSE) == 0) { 814 if (strlen(buf) > entry->pl_width) 815 entry->pl_width = strlen(buf); 816 } 817 } 818 819 return (0); 820} 821 822/* 823 * Get the state for the given feature on the given ZFS pool. 824 */ 825int 826zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 827 size_t len) 828{ 829 uint64_t refcount; 830 boolean_t found = B_FALSE; 831 nvlist_t *features = zpool_get_features(zhp); 832 boolean_t supported; 833 const char *feature = strchr(propname, '@') + 1; 834 835 supported = zpool_prop_feature(propname); 836 ASSERT(supported || zpool_prop_unsupported(propname)); 837 838 /* 839 * Convert from feature name to feature guid. This conversion is 840 * unecessary for unsupported@... properties because they already 841 * use guids. 842 */ 843 if (supported) { 844 int ret; 845 spa_feature_t fid; 846 847 ret = zfeature_lookup_name(feature, &fid); 848 if (ret != 0) { 849 (void) strlcpy(buf, "-", len); 850 return (ENOTSUP); 851 } 852 feature = spa_feature_table[fid].fi_guid; 853 } 854 855 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 856 found = B_TRUE; 857 858 if (supported) { 859 if (!found) { 860 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 861 } else { 862 if (refcount == 0) 863 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 864 else 865 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 866 } 867 } else { 868 if (found) { 869 if (refcount == 0) { 870 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 871 } else { 872 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 873 } 874 } else { 875 (void) strlcpy(buf, "-", len); 876 return (ENOTSUP); 877 } 878 } 879 880 return (0); 881} 882 883/* 884 * Don't start the slice at the default block of 34; many storage 885 * devices will use a stripe width of 128k, so start there instead. 886 */ 887#define NEW_START_BLOCK 256 888 889/* 890 * Validate the given pool name, optionally putting an extended error message in 891 * 'buf'. 892 */ 893boolean_t 894zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 895{ 896 namecheck_err_t why; 897 char what; 898 int ret; 899 900 ret = pool_namecheck(pool, &why, &what); 901 902 /* 903 * The rules for reserved pool names were extended at a later point. 904 * But we need to support users with existing pools that may now be 905 * invalid. So we only check for this expanded set of names during a 906 * create (or import), and only in userland. 907 */ 908 if (ret == 0 && !isopen && 909 (strncmp(pool, "mirror", 6) == 0 || 910 strncmp(pool, "raidz", 5) == 0 || 911 strncmp(pool, "spare", 5) == 0 || 912 strcmp(pool, "log") == 0)) { 913 if (hdl != NULL) 914 zfs_error_aux(hdl, 915 dgettext(TEXT_DOMAIN, "name is reserved")); 916 return (B_FALSE); 917 } 918 919 920 if (ret != 0) { 921 if (hdl != NULL) { 922 switch (why) { 923 case NAME_ERR_TOOLONG: 924 zfs_error_aux(hdl, 925 dgettext(TEXT_DOMAIN, "name is too long")); 926 break; 927 928 case NAME_ERR_INVALCHAR: 929 zfs_error_aux(hdl, 930 dgettext(TEXT_DOMAIN, "invalid character " 931 "'%c' in pool name"), what); 932 break; 933 934 case NAME_ERR_NOLETTER: 935 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 936 "name must begin with a letter")); 937 break; 938 939 case NAME_ERR_RESERVED: 940 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 941 "name is reserved")); 942 break; 943 944 case NAME_ERR_DISKLIKE: 945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 946 "pool name is reserved")); 947 break; 948 949 case NAME_ERR_LEADING_SLASH: 950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 951 "leading slash in name")); 952 break; 953 954 case NAME_ERR_EMPTY_COMPONENT: 955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 956 "empty component in name")); 957 break; 958 959 case NAME_ERR_TRAILING_SLASH: 960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 961 "trailing slash in name")); 962 break; 963 964 case NAME_ERR_MULTIPLE_DELIMITERS: 965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 966 "multiple '@' and/or '#' delimiters in " 967 "name")); 968 break; 969 970 default: 971 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 972 "(%d) not defined"), why); 973 break; 974 } 975 } 976 return (B_FALSE); 977 } 978 979 return (B_TRUE); 980} 981 982/* 983 * Open a handle to the given pool, even if the pool is currently in the FAULTED 984 * state. 985 */ 986zpool_handle_t * 987zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 988{ 989 zpool_handle_t *zhp; 990 boolean_t missing; 991 992 /* 993 * Make sure the pool name is valid. 994 */ 995 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 996 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 997 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 998 pool); 999 return (NULL); 1000 } 1001 1002 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1003 return (NULL); 1004 1005 zhp->zpool_hdl = hdl; 1006 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1007 1008 if (zpool_refresh_stats(zhp, &missing) != 0) { 1009 zpool_close(zhp); 1010 return (NULL); 1011 } 1012 1013 if (missing) { 1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1015 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1016 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1017 zpool_close(zhp); 1018 return (NULL); 1019 } 1020 1021 return (zhp); 1022} 1023 1024/* 1025 * Like the above, but silent on error. Used when iterating over pools (because 1026 * the configuration cache may be out of date). 1027 */ 1028int 1029zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1030{ 1031 zpool_handle_t *zhp; 1032 boolean_t missing; 1033 1034 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1035 return (-1); 1036 1037 zhp->zpool_hdl = hdl; 1038 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1039 1040 if (zpool_refresh_stats(zhp, &missing) != 0) { 1041 zpool_close(zhp); 1042 return (-1); 1043 } 1044 1045 if (missing) { 1046 zpool_close(zhp); 1047 *ret = NULL; 1048 return (0); 1049 } 1050 1051 *ret = zhp; 1052 return (0); 1053} 1054 1055/* 1056 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1057 * state. 1058 */ 1059zpool_handle_t * 1060zpool_open(libzfs_handle_t *hdl, const char *pool) 1061{ 1062 zpool_handle_t *zhp; 1063 1064 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1065 return (NULL); 1066 1067 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1068 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1069 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1070 zpool_close(zhp); 1071 return (NULL); 1072 } 1073 1074 return (zhp); 1075} 1076 1077/* 1078 * Close the handle. Simply frees the memory associated with the handle. 1079 */ 1080void 1081zpool_close(zpool_handle_t *zhp) 1082{ 1083 nvlist_free(zhp->zpool_config); 1084 nvlist_free(zhp->zpool_old_config); 1085 nvlist_free(zhp->zpool_props); 1086 free(zhp); 1087} 1088 1089/* 1090 * Return the name of the pool. 1091 */ 1092const char * 1093zpool_get_name(zpool_handle_t *zhp) 1094{ 1095 return (zhp->zpool_name); 1096} 1097 1098 1099/* 1100 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1101 */ 1102int 1103zpool_get_state(zpool_handle_t *zhp) 1104{ 1105 return (zhp->zpool_state); 1106} 1107 1108/* 1109 * Create the named pool, using the provided vdev list. It is assumed 1110 * that the consumer has already validated the contents of the nvlist, so we 1111 * don't have to worry about error semantics. 1112 */ 1113int 1114zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1115 nvlist_t *props, nvlist_t *fsprops) 1116{ 1117 zfs_cmd_t zc = { 0 }; 1118 nvlist_t *zc_fsprops = NULL; 1119 nvlist_t *zc_props = NULL; 1120 char msg[1024]; 1121 int ret = -1; 1122 1123 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1124 "cannot create '%s'"), pool); 1125 1126 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1127 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1128 1129 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1130 return (-1); 1131 1132 if (props) { 1133 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1134 1135 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1136 SPA_VERSION_1, flags, msg)) == NULL) { 1137 goto create_failed; 1138 } 1139 } 1140 1141 if (fsprops) { 1142 uint64_t zoned; 1143 char *zonestr; 1144 1145 zoned = ((nvlist_lookup_string(fsprops, 1146 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1147 strcmp(zonestr, "on") == 0); 1148 1149 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1150 fsprops, zoned, NULL, NULL, msg)) == NULL) { 1151 goto create_failed; 1152 } 1153 if (!zc_props && 1154 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1155 goto create_failed; 1156 } 1157 if (nvlist_add_nvlist(zc_props, 1158 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1159 goto create_failed; 1160 } 1161 } 1162 1163 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1164 goto create_failed; 1165 1166 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1167 1168 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1169 1170 zcmd_free_nvlists(&zc); 1171 nvlist_free(zc_props); 1172 nvlist_free(zc_fsprops); 1173 1174 switch (errno) { 1175 case EBUSY: 1176 /* 1177 * This can happen if the user has specified the same 1178 * device multiple times. We can't reliably detect this 1179 * until we try to add it and see we already have a 1180 * label. 1181 */ 1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1183 "one or more vdevs refer to the same device")); 1184 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1185 1186 case ERANGE: 1187 /* 1188 * This happens if the record size is smaller or larger 1189 * than the allowed size range, or not a power of 2. 1190 * 1191 * NOTE: although zfs_valid_proplist is called earlier, 1192 * this case may have slipped through since the 1193 * pool does not exist yet and it is therefore 1194 * impossible to read properties e.g. max blocksize 1195 * from the pool. 1196 */ 1197 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1198 "record size invalid")); 1199 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1200 1201 case EOVERFLOW: 1202 /* 1203 * This occurs when one of the devices is below 1204 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1205 * device was the problem device since there's no 1206 * reliable way to determine device size from userland. 1207 */ 1208 { 1209 char buf[64]; 1210 1211 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1212 1213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1214 "one or more devices is less than the " 1215 "minimum size (%s)"), buf); 1216 } 1217 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1218 1219 case ENOSPC: 1220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1221 "one or more devices is out of space")); 1222 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1223 1224 case ENOTBLK: 1225 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1226 "cache device must be a disk or disk slice")); 1227 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1228 1229 default: 1230 return (zpool_standard_error(hdl, errno, msg)); 1231 } 1232 } 1233 1234create_failed: 1235 zcmd_free_nvlists(&zc); 1236 nvlist_free(zc_props); 1237 nvlist_free(zc_fsprops); 1238 return (ret); 1239} 1240 1241/* 1242 * Destroy the given pool. It is up to the caller to ensure that there are no 1243 * datasets left in the pool. 1244 */ 1245int 1246zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1247{ 1248 zfs_cmd_t zc = { 0 }; 1249 zfs_handle_t *zfp = NULL; 1250 libzfs_handle_t *hdl = zhp->zpool_hdl; 1251 char msg[1024]; 1252 1253 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1254 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1255 return (-1); 1256 1257 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1258 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1259 1260 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1261 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1262 "cannot destroy '%s'"), zhp->zpool_name); 1263 1264 if (errno == EROFS) { 1265 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1266 "one or more devices is read only")); 1267 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1268 } else { 1269 (void) zpool_standard_error(hdl, errno, msg); 1270 } 1271 1272 if (zfp) 1273 zfs_close(zfp); 1274 return (-1); 1275 } 1276 1277 if (zfp) { 1278 remove_mountpoint(zfp); 1279 zfs_close(zfp); 1280 } 1281 1282 return (0); 1283} 1284 1285/* 1286 * Create a checkpoint in the given pool. 1287 */ 1288int 1289zpool_checkpoint(zpool_handle_t *zhp) 1290{ 1291 libzfs_handle_t *hdl = zhp->zpool_hdl; 1292 char msg[1024]; 1293 int error; 1294 1295 error = lzc_pool_checkpoint(zhp->zpool_name); 1296 if (error != 0) { 1297 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1298 "cannot checkpoint '%s'"), zhp->zpool_name); 1299 (void) zpool_standard_error(hdl, error, msg); 1300 return (-1); 1301 } 1302 1303 return (0); 1304} 1305 1306/* 1307 * Discard the checkpoint from the given pool. 1308 */ 1309int 1310zpool_discard_checkpoint(zpool_handle_t *zhp) 1311{ 1312 libzfs_handle_t *hdl = zhp->zpool_hdl; 1313 char msg[1024]; 1314 int error; 1315 1316 error = lzc_pool_checkpoint_discard(zhp->zpool_name); 1317 if (error != 0) { 1318 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1319 "cannot discard checkpoint in '%s'"), zhp->zpool_name); 1320 (void) zpool_standard_error(hdl, error, msg); 1321 return (-1); 1322 } 1323 1324 return (0); 1325} 1326 1327/* 1328 * Add the given vdevs to the pool. The caller must have already performed the 1329 * necessary verification to ensure that the vdev specification is well-formed. 1330 */ 1331int 1332zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1333{ 1334 zfs_cmd_t zc = { 0 }; 1335 int ret; 1336 libzfs_handle_t *hdl = zhp->zpool_hdl; 1337 char msg[1024]; 1338 nvlist_t **spares, **l2cache; 1339 uint_t nspares, nl2cache; 1340 1341 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1342 "cannot add to '%s'"), zhp->zpool_name); 1343 1344 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1345 SPA_VERSION_SPARES && 1346 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1347 &spares, &nspares) == 0) { 1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1349 "upgraded to add hot spares")); 1350 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1351 } 1352 1353 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1354 SPA_VERSION_L2CACHE && 1355 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1356 &l2cache, &nl2cache) == 0) { 1357 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1358 "upgraded to add cache devices")); 1359 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1360 } 1361 1362 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1363 return (-1); 1364 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1365 1366 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1367 switch (errno) { 1368 case EBUSY: 1369 /* 1370 * This can happen if the user has specified the same 1371 * device multiple times. We can't reliably detect this 1372 * until we try to add it and see we already have a 1373 * label. 1374 */ 1375 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1376 "one or more vdevs refer to the same device")); 1377 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1378 break; 1379 1380 case EINVAL: 1381 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1382 "invalid config; a pool with removing/removed " 1383 "vdevs does not support adding raidz vdevs")); 1384 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1385 break; 1386 1387 case EOVERFLOW: 1388 /* 1389 * This occurrs when one of the devices is below 1390 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1391 * device was the problem device since there's no 1392 * reliable way to determine device size from userland. 1393 */ 1394 { 1395 char buf[64]; 1396 1397 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1398 1399 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1400 "device is less than the minimum " 1401 "size (%s)"), buf); 1402 } 1403 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1404 break; 1405 1406 case ENOTSUP: 1407 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1408 "pool must be upgraded to add these vdevs")); 1409 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1410 break; 1411 1412 case EDOM: 1413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1414 "root pool can not have multiple vdevs" 1415 " or separate logs")); 1416 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1417 break; 1418 1419 case ENOTBLK: 1420 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1421 "cache device must be a disk or disk slice")); 1422 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1423 break; 1424 1425 default: 1426 (void) zpool_standard_error(hdl, errno, msg); 1427 } 1428 1429 ret = -1; 1430 } else { 1431 ret = 0; 1432 } 1433 1434 zcmd_free_nvlists(&zc); 1435 1436 return (ret); 1437} 1438 1439/* 1440 * Exports the pool from the system. The caller must ensure that there are no 1441 * mounted datasets in the pool. 1442 */ 1443static int 1444zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1445 const char *log_str) 1446{ 1447 zfs_cmd_t zc = { 0 }; 1448 char msg[1024]; 1449 1450 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1451 "cannot export '%s'"), zhp->zpool_name); 1452 1453 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1454 zc.zc_cookie = force; 1455 zc.zc_guid = hardforce; 1456 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1457 1458 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1459 switch (errno) { 1460 case EXDEV: 1461 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1462 "use '-f' to override the following errors:\n" 1463 "'%s' has an active shared spare which could be" 1464 " used by other pools once '%s' is exported."), 1465 zhp->zpool_name, zhp->zpool_name); 1466 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1467 msg)); 1468 default: 1469 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1470 msg)); 1471 } 1472 } 1473 1474 return (0); 1475} 1476 1477int 1478zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1479{ 1480 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1481} 1482 1483int 1484zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1485{ 1486 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1487} 1488 1489static void 1490zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1491 nvlist_t *config) 1492{ 1493 nvlist_t *nv = NULL; 1494 uint64_t rewindto; 1495 int64_t loss = -1; 1496 struct tm t; 1497 char timestr[128]; 1498 1499 if (!hdl->libzfs_printerr || config == NULL) 1500 return; 1501 1502 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1503 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1504 return; 1505 } 1506 1507 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1508 return; 1509 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1510 1511 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1512 strftime(timestr, 128, 0, &t) != 0) { 1513 if (dryrun) { 1514 (void) printf(dgettext(TEXT_DOMAIN, 1515 "Would be able to return %s " 1516 "to its state as of %s.\n"), 1517 name, timestr); 1518 } else { 1519 (void) printf(dgettext(TEXT_DOMAIN, 1520 "Pool %s returned to its state as of %s.\n"), 1521 name, timestr); 1522 } 1523 if (loss > 120) { 1524 (void) printf(dgettext(TEXT_DOMAIN, 1525 "%s approximately %lld "), 1526 dryrun ? "Would discard" : "Discarded", 1527 (loss + 30) / 60); 1528 (void) printf(dgettext(TEXT_DOMAIN, 1529 "minutes of transactions.\n")); 1530 } else if (loss > 0) { 1531 (void) printf(dgettext(TEXT_DOMAIN, 1532 "%s approximately %lld "), 1533 dryrun ? "Would discard" : "Discarded", loss); 1534 (void) printf(dgettext(TEXT_DOMAIN, 1535 "seconds of transactions.\n")); 1536 } 1537 } 1538} 1539 1540void 1541zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1542 nvlist_t *config) 1543{ 1544 nvlist_t *nv = NULL; 1545 int64_t loss = -1; 1546 uint64_t edata = UINT64_MAX; 1547 uint64_t rewindto; 1548 struct tm t; 1549 char timestr[128]; 1550 1551 if (!hdl->libzfs_printerr) 1552 return; 1553 1554 if (reason >= 0) 1555 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1556 else 1557 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1558 1559 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1560 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1561 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1562 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1563 goto no_info; 1564 1565 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1566 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1567 &edata); 1568 1569 (void) printf(dgettext(TEXT_DOMAIN, 1570 "Recovery is possible, but will result in some data loss.\n")); 1571 1572 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1573 strftime(timestr, 128, 0, &t) != 0) { 1574 (void) printf(dgettext(TEXT_DOMAIN, 1575 "\tReturning the pool to its state as of %s\n" 1576 "\tshould correct the problem. "), 1577 timestr); 1578 } else { 1579 (void) printf(dgettext(TEXT_DOMAIN, 1580 "\tReverting the pool to an earlier state " 1581 "should correct the problem.\n\t")); 1582 } 1583 1584 if (loss > 120) { 1585 (void) printf(dgettext(TEXT_DOMAIN, 1586 "Approximately %lld minutes of data\n" 1587 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1588 } else if (loss > 0) { 1589 (void) printf(dgettext(TEXT_DOMAIN, 1590 "Approximately %lld seconds of data\n" 1591 "\tmust be discarded, irreversibly. "), loss); 1592 } 1593 if (edata != 0 && edata != UINT64_MAX) { 1594 if (edata == 1) { 1595 (void) printf(dgettext(TEXT_DOMAIN, 1596 "After rewind, at least\n" 1597 "\tone persistent user-data error will remain. ")); 1598 } else { 1599 (void) printf(dgettext(TEXT_DOMAIN, 1600 "After rewind, several\n" 1601 "\tpersistent user-data errors will remain. ")); 1602 } 1603 } 1604 (void) printf(dgettext(TEXT_DOMAIN, 1605 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1606 reason >= 0 ? "clear" : "import", name); 1607 1608 (void) printf(dgettext(TEXT_DOMAIN, 1609 "A scrub of the pool\n" 1610 "\tis strongly recommended after recovery.\n")); 1611 return; 1612 1613no_info: 1614 (void) printf(dgettext(TEXT_DOMAIN, 1615 "Destroy and re-create the pool from\n\ta backup source.\n")); 1616} 1617 1618/* 1619 * zpool_import() is a contracted interface. Should be kept the same 1620 * if possible. 1621 * 1622 * Applications should use zpool_import_props() to import a pool with 1623 * new properties value to be set. 1624 */ 1625int 1626zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1627 char *altroot) 1628{ 1629 nvlist_t *props = NULL; 1630 int ret; 1631 1632 if (altroot != NULL) { 1633 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1634 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1635 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1636 newname)); 1637 } 1638 1639 if (nvlist_add_string(props, 1640 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1641 nvlist_add_string(props, 1642 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1643 nvlist_free(props); 1644 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1645 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1646 newname)); 1647 } 1648 } 1649 1650 ret = zpool_import_props(hdl, config, newname, props, 1651 ZFS_IMPORT_NORMAL); 1652 nvlist_free(props); 1653 return (ret); 1654} 1655 1656static void 1657print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1658 int indent) 1659{ 1660 nvlist_t **child; 1661 uint_t c, children; 1662 char *vname; 1663 uint64_t is_log = 0; 1664 1665 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1666 &is_log); 1667 1668 if (name != NULL) 1669 (void) printf("\t%*s%s%s\n", indent, "", name, 1670 is_log ? " [log]" : ""); 1671 1672 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1673 &child, &children) != 0) 1674 return; 1675 1676 for (c = 0; c < children; c++) { 1677 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1678 print_vdev_tree(hdl, vname, child[c], indent + 2); 1679 free(vname); 1680 } 1681} 1682 1683void 1684zpool_print_unsup_feat(nvlist_t *config) 1685{ 1686 nvlist_t *nvinfo, *unsup_feat; 1687 1688 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1689 0); 1690 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1691 &unsup_feat) == 0); 1692 1693 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1694 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1695 char *desc; 1696 1697 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1698 verify(nvpair_value_string(nvp, &desc) == 0); 1699 1700 if (strlen(desc) > 0) 1701 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1702 else 1703 (void) printf("\t%s\n", nvpair_name(nvp)); 1704 } 1705} 1706 1707/* 1708 * Import the given pool using the known configuration and a list of 1709 * properties to be set. The configuration should have come from 1710 * zpool_find_import(). The 'newname' parameters control whether the pool 1711 * is imported with a different name. 1712 */ 1713int 1714zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1715 nvlist_t *props, int flags) 1716{ 1717 zfs_cmd_t zc = { 0 }; 1718 zpool_rewind_policy_t policy; 1719 nvlist_t *nv = NULL; 1720 nvlist_t *nvinfo = NULL; 1721 nvlist_t *missing = NULL; 1722 char *thename; 1723 char *origname; 1724 int ret; 1725 int error = 0; 1726 char errbuf[1024]; 1727 1728 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1729 &origname) == 0); 1730 1731 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1732 "cannot import pool '%s'"), origname); 1733 1734 if (newname != NULL) { 1735 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1736 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1737 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1738 newname)); 1739 thename = (char *)newname; 1740 } else { 1741 thename = origname; 1742 } 1743 1744 if (props != NULL) { 1745 uint64_t version; 1746 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1747 1748 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1749 &version) == 0); 1750 1751 if ((props = zpool_valid_proplist(hdl, origname, 1752 props, version, flags, errbuf)) == NULL) 1753 return (-1); 1754 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1755 nvlist_free(props); 1756 return (-1); 1757 } 1758 nvlist_free(props); 1759 } 1760 1761 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1762 1763 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1764 &zc.zc_guid) == 0); 1765 1766 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1767 zcmd_free_nvlists(&zc); 1768 return (-1); 1769 } 1770 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1771 zcmd_free_nvlists(&zc); 1772 return (-1); 1773 } 1774 1775 zc.zc_cookie = flags; 1776 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1777 errno == ENOMEM) { 1778 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1779 zcmd_free_nvlists(&zc); 1780 return (-1); 1781 } 1782 } 1783 if (ret != 0) 1784 error = errno; 1785 1786 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1787 1788 zcmd_free_nvlists(&zc); 1789 1790 zpool_get_rewind_policy(config, &policy); 1791 1792 if (error) { 1793 char desc[1024]; 1794 1795 /* 1796 * Dry-run failed, but we print out what success 1797 * looks like if we found a best txg 1798 */ 1799 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1800 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1801 B_TRUE, nv); 1802 nvlist_free(nv); 1803 return (-1); 1804 } 1805 1806 if (newname == NULL) 1807 (void) snprintf(desc, sizeof (desc), 1808 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1809 thename); 1810 else 1811 (void) snprintf(desc, sizeof (desc), 1812 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1813 origname, thename); 1814 1815 switch (error) { 1816 case ENOTSUP: 1817 if (nv != NULL && nvlist_lookup_nvlist(nv, 1818 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1819 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1820 (void) printf(dgettext(TEXT_DOMAIN, "This " 1821 "pool uses the following feature(s) not " 1822 "supported by this system:\n")); 1823 zpool_print_unsup_feat(nv); 1824 if (nvlist_exists(nvinfo, 1825 ZPOOL_CONFIG_CAN_RDONLY)) { 1826 (void) printf(dgettext(TEXT_DOMAIN, 1827 "All unsupported features are only " 1828 "required for writing to the pool." 1829 "\nThe pool can be imported using " 1830 "'-o readonly=on'.\n")); 1831 } 1832 } 1833 /* 1834 * Unsupported version. 1835 */ 1836 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1837 break; 1838 1839 case EINVAL: 1840 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1841 break; 1842 1843 case EROFS: 1844 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1845 "one or more devices is read only")); 1846 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1847 break; 1848 1849 case ENXIO: 1850 if (nv && nvlist_lookup_nvlist(nv, 1851 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1852 nvlist_lookup_nvlist(nvinfo, 1853 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1854 (void) printf(dgettext(TEXT_DOMAIN, 1855 "The devices below are missing or " 1856 "corrupted, use '-m' to import the pool " 1857 "anyway:\n")); 1858 print_vdev_tree(hdl, NULL, missing, 2); 1859 (void) printf("\n"); 1860 } 1861 (void) zpool_standard_error(hdl, error, desc); 1862 break; 1863 1864 case EEXIST: 1865 (void) zpool_standard_error(hdl, error, desc); 1866 break; 1867 case ENAMETOOLONG: 1868 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1869 "new name of at least one dataset is longer than " 1870 "the maximum allowable length")); 1871 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1872 break; 1873 default: 1874 (void) zpool_standard_error(hdl, error, desc); 1875 zpool_explain_recover(hdl, 1876 newname ? origname : thename, -error, nv); 1877 break; 1878 } 1879 1880 nvlist_free(nv); 1881 ret = -1; 1882 } else { 1883 zpool_handle_t *zhp; 1884 1885 /* 1886 * This should never fail, but play it safe anyway. 1887 */ 1888 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1889 ret = -1; 1890 else if (zhp != NULL) 1891 zpool_close(zhp); 1892 if (policy.zrp_request & 1893 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1894 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1895 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1896 } 1897 nvlist_free(nv); 1898 return (0); 1899 } 1900 1901 return (ret); 1902} 1903 1904/* 1905 * Scan the pool. 1906 */ 1907int 1908zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) 1909{ 1910 zfs_cmd_t zc = { 0 }; 1911 char msg[1024]; 1912 int err; 1913 libzfs_handle_t *hdl = zhp->zpool_hdl; 1914 1915 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1916 zc.zc_cookie = func; 1917 zc.zc_flags = cmd; 1918 1919 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0) 1920 return (0); 1921 1922 err = errno; 1923 1924 /* ECANCELED on a scrub means we resumed a paused scrub */ 1925 if (err == ECANCELED && func == POOL_SCAN_SCRUB && 1926 cmd == POOL_SCRUB_NORMAL) 1927 return (0); 1928 1929 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL) 1930 return (0); 1931 1932 if (func == POOL_SCAN_SCRUB) { 1933 if (cmd == POOL_SCRUB_PAUSE) { 1934 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1935 "cannot pause scrubbing %s"), zc.zc_name); 1936 } else { 1937 assert(cmd == POOL_SCRUB_NORMAL); 1938 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1939 "cannot scrub %s"), zc.zc_name); 1940 } 1941 } else if (func == POOL_SCAN_NONE) { 1942 (void) snprintf(msg, sizeof (msg), 1943 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1944 zc.zc_name); 1945 } else { 1946 assert(!"unexpected result"); 1947 } 1948 1949 if (err == EBUSY) { 1950 nvlist_t *nvroot; 1951 pool_scan_stat_t *ps = NULL; 1952 uint_t psc; 1953 1954 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1955 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1956 (void) nvlist_lookup_uint64_array(nvroot, 1957 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1958 if (ps && ps->pss_func == POOL_SCAN_SCRUB) { 1959 if (cmd == POOL_SCRUB_PAUSE) 1960 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg)); 1961 else 1962 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1963 } else { 1964 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1965 } 1966 } else if (err == ENOENT) { 1967 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1968 } else { 1969 return (zpool_standard_error(hdl, err, msg)); 1970 } 1971} 1972 1973#ifdef illumos 1974/* 1975 * This provides a very minimal check whether a given string is likely a 1976 * c#t#d# style string. Users of this are expected to do their own 1977 * verification of the s# part. 1978 */ 1979#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1980 1981/* 1982 * More elaborate version for ones which may start with "/dev/dsk/" 1983 * and the like. 1984 */ 1985static int 1986ctd_check_path(char *str) 1987{ 1988 /* 1989 * If it starts with a slash, check the last component. 1990 */ 1991 if (str && str[0] == '/') { 1992 char *tmp = strrchr(str, '/'); 1993 1994 /* 1995 * If it ends in "/old", check the second-to-last 1996 * component of the string instead. 1997 */ 1998 if (tmp != str && strcmp(tmp, "/old") == 0) { 1999 for (tmp--; *tmp != '/'; tmp--) 2000 ; 2001 } 2002 str = tmp + 1; 2003 } 2004 return (CTD_CHECK(str)); 2005} 2006#endif 2007 2008/* 2009 * Find a vdev that matches the search criteria specified. We use the 2010 * the nvpair name to determine how we should look for the device. 2011 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 2012 * spare; but FALSE if its an INUSE spare. 2013 */ 2014static nvlist_t * 2015vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 2016 boolean_t *l2cache, boolean_t *log) 2017{ 2018 uint_t c, children; 2019 nvlist_t **child; 2020 nvlist_t *ret; 2021 uint64_t is_log; 2022 char *srchkey; 2023 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 2024 2025 /* Nothing to look for */ 2026 if (search == NULL || pair == NULL) 2027 return (NULL); 2028 2029 /* Obtain the key we will use to search */ 2030 srchkey = nvpair_name(pair); 2031 2032 switch (nvpair_type(pair)) { 2033 case DATA_TYPE_UINT64: 2034 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 2035 uint64_t srchval, theguid; 2036 2037 verify(nvpair_value_uint64(pair, &srchval) == 0); 2038 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2039 &theguid) == 0); 2040 if (theguid == srchval) 2041 return (nv); 2042 } 2043 break; 2044 2045 case DATA_TYPE_STRING: { 2046 char *srchval, *val; 2047 2048 verify(nvpair_value_string(pair, &srchval) == 0); 2049 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 2050 break; 2051 2052 /* 2053 * Search for the requested value. Special cases: 2054 * 2055 * - ZPOOL_CONFIG_PATH for whole disk entries. To support 2056 * UEFI boot, these end in "s0" or "s0/old" or "s1" or 2057 * "s1/old". The "s0" or "s1" part is hidden from the user, 2058 * but included in the string, so this matches around it. 2059 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2060 * 2061 * Otherwise, all other searches are simple string compares. 2062 */ 2063#ifdef illumos 2064 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 2065 ctd_check_path(val)) { 2066 uint64_t wholedisk = 0; 2067 2068 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2069 &wholedisk); 2070 if (wholedisk) { 2071 int slen = strlen(srchval); 2072 int vlen = strlen(val); 2073 2074 if (slen != vlen - 2) 2075 break; 2076 2077 /* 2078 * make_leaf_vdev() should only set 2079 * wholedisk for ZPOOL_CONFIG_PATHs which 2080 * will include "/dev/dsk/", giving plenty of 2081 * room for the indices used next. 2082 */ 2083 ASSERT(vlen >= 6); 2084 2085 /* 2086 * strings identical except trailing "s0" 2087 */ 2088 if ((strcmp(&val[vlen - 2], "s0") == 0 || 2089 strcmp(&val[vlen - 2], "s1") == 0) && 2090 strncmp(srchval, val, slen) == 0) 2091 return (nv); 2092 2093 /* 2094 * strings identical except trailing "s0/old" 2095 */ 2096 if ((strcmp(&val[vlen - 6], "s0/old") == 0 || 2097 strcmp(&val[vlen - 6], "s1/old") == 0) && 2098 strcmp(&srchval[slen - 4], "/old") == 0 && 2099 strncmp(srchval, val, slen - 4) == 0) 2100 return (nv); 2101 2102 break; 2103 } 2104 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2105#else 2106 if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2107#endif 2108 char *type, *idx, *end, *p; 2109 uint64_t id, vdev_id; 2110 2111 /* 2112 * Determine our vdev type, keeping in mind 2113 * that the srchval is composed of a type and 2114 * vdev id pair (i.e. mirror-4). 2115 */ 2116 if ((type = strdup(srchval)) == NULL) 2117 return (NULL); 2118 2119 if ((p = strrchr(type, '-')) == NULL) { 2120 free(type); 2121 break; 2122 } 2123 idx = p + 1; 2124 *p = '\0'; 2125 2126 /* 2127 * If the types don't match then keep looking. 2128 */ 2129 if (strncmp(val, type, strlen(val)) != 0) { 2130 free(type); 2131 break; 2132 } 2133 2134 verify(zpool_vdev_is_interior(type)); 2135 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2136 &id) == 0); 2137 2138 errno = 0; 2139 vdev_id = strtoull(idx, &end, 10); 2140 2141 free(type); 2142 if (errno != 0) 2143 return (NULL); 2144 2145 /* 2146 * Now verify that we have the correct vdev id. 2147 */ 2148 if (vdev_id == id) 2149 return (nv); 2150 } 2151 2152 /* 2153 * Common case 2154 */ 2155 if (strcmp(srchval, val) == 0) 2156 return (nv); 2157 break; 2158 } 2159 2160 default: 2161 break; 2162 } 2163 2164 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2165 &child, &children) != 0) 2166 return (NULL); 2167 2168 for (c = 0; c < children; c++) { 2169 if ((ret = vdev_to_nvlist_iter(child[c], search, 2170 avail_spare, l2cache, NULL)) != NULL) { 2171 /* 2172 * The 'is_log' value is only set for the toplevel 2173 * vdev, not the leaf vdevs. So we always lookup the 2174 * log device from the root of the vdev tree (where 2175 * 'log' is non-NULL). 2176 */ 2177 if (log != NULL && 2178 nvlist_lookup_uint64(child[c], 2179 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2180 is_log) { 2181 *log = B_TRUE; 2182 } 2183 return (ret); 2184 } 2185 } 2186 2187 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2188 &child, &children) == 0) { 2189 for (c = 0; c < children; c++) { 2190 if ((ret = vdev_to_nvlist_iter(child[c], search, 2191 avail_spare, l2cache, NULL)) != NULL) { 2192 *avail_spare = B_TRUE; 2193 return (ret); 2194 } 2195 } 2196 } 2197 2198 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2199 &child, &children) == 0) { 2200 for (c = 0; c < children; c++) { 2201 if ((ret = vdev_to_nvlist_iter(child[c], search, 2202 avail_spare, l2cache, NULL)) != NULL) { 2203 *l2cache = B_TRUE; 2204 return (ret); 2205 } 2206 } 2207 } 2208 2209 return (NULL); 2210} 2211 2212/* 2213 * Given a physical path (minus the "/devices" prefix), find the 2214 * associated vdev. 2215 */ 2216nvlist_t * 2217zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2218 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2219{ 2220 nvlist_t *search, *nvroot, *ret; 2221 2222 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2223 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2224 2225 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2226 &nvroot) == 0); 2227 2228 *avail_spare = B_FALSE; 2229 *l2cache = B_FALSE; 2230 if (log != NULL) 2231 *log = B_FALSE; 2232 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2233 nvlist_free(search); 2234 2235 return (ret); 2236} 2237 2238/* 2239 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2240 */ 2241static boolean_t 2242zpool_vdev_is_interior(const char *name) 2243{ 2244 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2245 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || 2246 strncmp(name, 2247 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || 2248 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2249 return (B_TRUE); 2250 return (B_FALSE); 2251} 2252 2253nvlist_t * 2254zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2255 boolean_t *l2cache, boolean_t *log) 2256{ 2257 char buf[MAXPATHLEN]; 2258 char *end; 2259 nvlist_t *nvroot, *search, *ret; 2260 uint64_t guid; 2261 2262 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2263 2264 guid = strtoull(path, &end, 10); 2265 if (guid != 0 && *end == '\0') { 2266 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2267 } else if (zpool_vdev_is_interior(path)) { 2268 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2269 } else if (path[0] != '/') { 2270 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2271 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2272 } else { 2273 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2274 } 2275 2276 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2277 &nvroot) == 0); 2278 2279 *avail_spare = B_FALSE; 2280 *l2cache = B_FALSE; 2281 if (log != NULL) 2282 *log = B_FALSE; 2283 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2284 nvlist_free(search); 2285 2286 return (ret); 2287} 2288 2289static int 2290vdev_online(nvlist_t *nv) 2291{ 2292 uint64_t ival; 2293 2294 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2295 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2296 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2297 return (0); 2298 2299 return (1); 2300} 2301 2302/* 2303 * Helper function for zpool_get_physpaths(). 2304 */ 2305static int 2306vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2307 size_t *bytes_written) 2308{ 2309 size_t bytes_left, pos, rsz; 2310 char *tmppath; 2311 const char *format; 2312 2313 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2314 &tmppath) != 0) 2315 return (EZFS_NODEVICE); 2316 2317 pos = *bytes_written; 2318 bytes_left = physpath_size - pos; 2319 format = (pos == 0) ? "%s" : " %s"; 2320 2321 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2322 *bytes_written += rsz; 2323 2324 if (rsz >= bytes_left) { 2325 /* if physpath was not copied properly, clear it */ 2326 if (bytes_left != 0) { 2327 physpath[pos] = 0; 2328 } 2329 return (EZFS_NOSPC); 2330 } 2331 return (0); 2332} 2333 2334static int 2335vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2336 size_t *rsz, boolean_t is_spare) 2337{ 2338 char *type; 2339 int ret; 2340 2341 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2342 return (EZFS_INVALCONFIG); 2343 2344 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2345 /* 2346 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2347 * For a spare vdev, we only want to boot from the active 2348 * spare device. 2349 */ 2350 if (is_spare) { 2351 uint64_t spare = 0; 2352 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2353 &spare); 2354 if (!spare) 2355 return (EZFS_INVALCONFIG); 2356 } 2357 2358 if (vdev_online(nv)) { 2359 if ((ret = vdev_get_one_physpath(nv, physpath, 2360 phypath_size, rsz)) != 0) 2361 return (ret); 2362 } 2363 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2364 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2365 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2366 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2367 nvlist_t **child; 2368 uint_t count; 2369 int i, ret; 2370 2371 if (nvlist_lookup_nvlist_array(nv, 2372 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2373 return (EZFS_INVALCONFIG); 2374 2375 for (i = 0; i < count; i++) { 2376 ret = vdev_get_physpaths(child[i], physpath, 2377 phypath_size, rsz, is_spare); 2378 if (ret == EZFS_NOSPC) 2379 return (ret); 2380 } 2381 } 2382 2383 return (EZFS_POOL_INVALARG); 2384} 2385 2386/* 2387 * Get phys_path for a root pool config. 2388 * Return 0 on success; non-zero on failure. 2389 */ 2390static int 2391zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2392{ 2393 size_t rsz; 2394 nvlist_t *vdev_root; 2395 nvlist_t **child; 2396 uint_t count; 2397 char *type; 2398 2399 rsz = 0; 2400 2401 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2402 &vdev_root) != 0) 2403 return (EZFS_INVALCONFIG); 2404 2405 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2406 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2407 &child, &count) != 0) 2408 return (EZFS_INVALCONFIG); 2409 2410 /* 2411 * root pool can only have a single top-level vdev. 2412 */ 2413 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2414 return (EZFS_POOL_INVALARG); 2415 2416 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2417 B_FALSE); 2418 2419 /* No online devices */ 2420 if (rsz == 0) 2421 return (EZFS_NODEVICE); 2422 2423 return (0); 2424} 2425 2426/* 2427 * Get phys_path for a root pool 2428 * Return 0 on success; non-zero on failure. 2429 */ 2430int 2431zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2432{ 2433 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2434 phypath_size)); 2435} 2436 2437/* 2438 * If the device has being dynamically expanded then we need to relabel 2439 * the disk to use the new unallocated space. 2440 */ 2441static int 2442zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2443{ 2444#ifdef illumos 2445 char path[MAXPATHLEN]; 2446 char errbuf[1024]; 2447 int fd, error; 2448 int (*_efi_use_whole_disk)(int); 2449 2450 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2451 "efi_use_whole_disk")) == NULL) 2452 return (-1); 2453 2454 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2455 2456 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2458 "relabel '%s': unable to open device"), name); 2459 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2460 } 2461 2462 /* 2463 * It's possible that we might encounter an error if the device 2464 * does not have any unallocated space left. If so, we simply 2465 * ignore that error and continue on. 2466 */ 2467 error = _efi_use_whole_disk(fd); 2468 (void) close(fd); 2469 if (error && error != VT_ENOSPC) { 2470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2471 "relabel '%s': unable to read disk capacity"), name); 2472 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2473 } 2474#endif /* illumos */ 2475 return (0); 2476} 2477 2478/* 2479 * Bring the specified vdev online. The 'flags' parameter is a set of the 2480 * ZFS_ONLINE_* flags. 2481 */ 2482int 2483zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2484 vdev_state_t *newstate) 2485{ 2486 zfs_cmd_t zc = { 0 }; 2487 char msg[1024]; 2488 char *pathname; 2489 nvlist_t *tgt; 2490 boolean_t avail_spare, l2cache, islog; 2491 libzfs_handle_t *hdl = zhp->zpool_hdl; 2492 2493 if (flags & ZFS_ONLINE_EXPAND) { 2494 (void) snprintf(msg, sizeof (msg), 2495 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2496 } else { 2497 (void) snprintf(msg, sizeof (msg), 2498 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2499 } 2500 2501 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2502 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2503 &islog)) == NULL) 2504 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2505 2506 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2507 2508 if (avail_spare) 2509 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2510 2511 if ((flags & ZFS_ONLINE_EXPAND || 2512 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) && 2513 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) { 2514 uint64_t wholedisk = 0; 2515 2516 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2517 &wholedisk); 2518 2519 /* 2520 * XXX - L2ARC 1.0 devices can't support expansion. 2521 */ 2522 if (l2cache) { 2523 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2524 "cannot expand cache devices")); 2525 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2526 } 2527 2528 if (wholedisk) { 2529 pathname += strlen(ZFS_DISK_ROOT) + 1; 2530 (void) zpool_relabel_disk(hdl, pathname); 2531 } 2532 } 2533 2534 zc.zc_cookie = VDEV_STATE_ONLINE; 2535 zc.zc_obj = flags; 2536 2537 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2538 if (errno == EINVAL) { 2539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2540 "from this pool into a new one. Use '%s' " 2541 "instead"), "zpool detach"); 2542 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2543 } 2544 return (zpool_standard_error(hdl, errno, msg)); 2545 } 2546 2547 *newstate = zc.zc_cookie; 2548 return (0); 2549} 2550 2551/* 2552 * Take the specified vdev offline 2553 */ 2554int 2555zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2556{ 2557 zfs_cmd_t zc = { 0 }; 2558 char msg[1024]; 2559 nvlist_t *tgt; 2560 boolean_t avail_spare, l2cache; 2561 libzfs_handle_t *hdl = zhp->zpool_hdl; 2562 2563 (void) snprintf(msg, sizeof (msg), 2564 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2565 2566 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2567 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2568 NULL)) == NULL) 2569 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2570 2571 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2572 2573 if (avail_spare) 2574 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2575 2576 zc.zc_cookie = VDEV_STATE_OFFLINE; 2577 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2578 2579 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2580 return (0); 2581 2582 switch (errno) { 2583 case EBUSY: 2584 2585 /* 2586 * There are no other replicas of this device. 2587 */ 2588 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2589 2590 case EEXIST: 2591 /* 2592 * The log device has unplayed logs 2593 */ 2594 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2595 2596 default: 2597 return (zpool_standard_error(hdl, errno, msg)); 2598 } 2599} 2600 2601/* 2602 * Mark the given vdev faulted. 2603 */ 2604int 2605zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2606{ 2607 zfs_cmd_t zc = { 0 }; 2608 char msg[1024]; 2609 libzfs_handle_t *hdl = zhp->zpool_hdl; 2610 2611 (void) snprintf(msg, sizeof (msg), 2612 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2613 2614 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2615 zc.zc_guid = guid; 2616 zc.zc_cookie = VDEV_STATE_FAULTED; 2617 zc.zc_obj = aux; 2618 2619 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2620 return (0); 2621 2622 switch (errno) { 2623 case EBUSY: 2624 2625 /* 2626 * There are no other replicas of this device. 2627 */ 2628 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2629 2630 default: 2631 return (zpool_standard_error(hdl, errno, msg)); 2632 } 2633 2634} 2635 2636/* 2637 * Mark the given vdev degraded. 2638 */ 2639int 2640zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2641{ 2642 zfs_cmd_t zc = { 0 }; 2643 char msg[1024]; 2644 libzfs_handle_t *hdl = zhp->zpool_hdl; 2645 2646 (void) snprintf(msg, sizeof (msg), 2647 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2648 2649 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2650 zc.zc_guid = guid; 2651 zc.zc_cookie = VDEV_STATE_DEGRADED; 2652 zc.zc_obj = aux; 2653 2654 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2655 return (0); 2656 2657 return (zpool_standard_error(hdl, errno, msg)); 2658} 2659 2660/* 2661 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2662 * a hot spare. 2663 */ 2664static boolean_t 2665is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2666{ 2667 nvlist_t **child; 2668 uint_t c, children; 2669 char *type; 2670 2671 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2672 &children) == 0) { 2673 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2674 &type) == 0); 2675 2676 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2677 children == 2 && child[which] == tgt) 2678 return (B_TRUE); 2679 2680 for (c = 0; c < children; c++) 2681 if (is_replacing_spare(child[c], tgt, which)) 2682 return (B_TRUE); 2683 } 2684 2685 return (B_FALSE); 2686} 2687 2688/* 2689 * Attach new_disk (fully described by nvroot) to old_disk. 2690 * If 'replacing' is specified, the new disk will replace the old one. 2691 */ 2692int 2693zpool_vdev_attach(zpool_handle_t *zhp, 2694 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2695{ 2696 zfs_cmd_t zc = { 0 }; 2697 char msg[1024]; 2698 int ret; 2699 nvlist_t *tgt; 2700 boolean_t avail_spare, l2cache, islog; 2701 uint64_t val; 2702 char *newname; 2703 nvlist_t **child; 2704 uint_t children; 2705 nvlist_t *config_root; 2706 libzfs_handle_t *hdl = zhp->zpool_hdl; 2707 boolean_t rootpool = zpool_is_bootable(zhp); 2708 2709 if (replacing) 2710 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2711 "cannot replace %s with %s"), old_disk, new_disk); 2712 else 2713 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2714 "cannot attach %s to %s"), new_disk, old_disk); 2715 2716 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2717 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2718 &islog)) == NULL) 2719 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2720 2721 if (avail_spare) 2722 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2723 2724 if (l2cache) 2725 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2726 2727 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2728 zc.zc_cookie = replacing; 2729 2730 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2731 &child, &children) != 0 || children != 1) { 2732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2733 "new device must be a single disk")); 2734 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2735 } 2736 2737 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2738 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2739 2740 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2741 return (-1); 2742 2743 /* 2744 * If the target is a hot spare that has been swapped in, we can only 2745 * replace it with another hot spare. 2746 */ 2747 if (replacing && 2748 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2749 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2750 NULL) == NULL || !avail_spare) && 2751 is_replacing_spare(config_root, tgt, 1)) { 2752 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2753 "can only be replaced by another hot spare")); 2754 free(newname); 2755 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2756 } 2757 2758 free(newname); 2759 2760 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2761 return (-1); 2762 2763 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2764 2765 zcmd_free_nvlists(&zc); 2766 2767 if (ret == 0) { 2768 if (rootpool) { 2769 /* 2770 * XXX need a better way to prevent user from 2771 * booting up a half-baked vdev. 2772 */ 2773 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2774 "sure to wait until resilver is done " 2775 "before rebooting.\n")); 2776 (void) fprintf(stderr, "\n"); 2777 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2778 "you boot from pool '%s', you may need to update\n" 2779 "boot code on newly attached disk '%s'.\n\n" 2780 "Assuming you use GPT partitioning and 'da0' is " 2781 "your new boot disk\n" 2782 "you may use the following command:\n\n" 2783 "\tgpart bootcode -b /boot/pmbr -p " 2784 "/boot/gptzfsboot -i 1 da0\n\n"), 2785 zhp->zpool_name, new_disk); 2786 } 2787 return (0); 2788 } 2789 2790 switch (errno) { 2791 case ENOTSUP: 2792 /* 2793 * Can't attach to or replace this type of vdev. 2794 */ 2795 if (replacing) { 2796 uint64_t version = zpool_get_prop_int(zhp, 2797 ZPOOL_PROP_VERSION, NULL); 2798 2799 if (islog) 2800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2801 "cannot replace a log with a spare")); 2802 else if (version >= SPA_VERSION_MULTI_REPLACE) 2803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2804 "already in replacing/spare config; wait " 2805 "for completion or use 'zpool detach'")); 2806 else 2807 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2808 "cannot replace a replacing device")); 2809 } else { 2810 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2811 "can only attach to mirrors and top-level " 2812 "disks")); 2813 } 2814 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2815 break; 2816 2817 case EINVAL: 2818 /* 2819 * The new device must be a single disk. 2820 */ 2821 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2822 "new device must be a single disk")); 2823 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2824 break; 2825 2826 case EBUSY: 2827 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, " 2828 "or pool has removing/removed vdevs"), 2829 new_disk); 2830 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2831 break; 2832 2833 case EOVERFLOW: 2834 /* 2835 * The new device is too small. 2836 */ 2837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2838 "device is too small")); 2839 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2840 break; 2841 2842 case EDOM: 2843 /* 2844 * The new device has a different alignment requirement. 2845 */ 2846 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2847 "devices have different sector alignment")); 2848 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2849 break; 2850 2851 case ENAMETOOLONG: 2852 /* 2853 * The resulting top-level vdev spec won't fit in the label. 2854 */ 2855 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2856 break; 2857 2858 default: 2859 (void) zpool_standard_error(hdl, errno, msg); 2860 } 2861 2862 return (-1); 2863} 2864 2865/* 2866 * Detach the specified device. 2867 */ 2868int 2869zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2870{ 2871 zfs_cmd_t zc = { 0 }; 2872 char msg[1024]; 2873 nvlist_t *tgt; 2874 boolean_t avail_spare, l2cache; 2875 libzfs_handle_t *hdl = zhp->zpool_hdl; 2876 2877 (void) snprintf(msg, sizeof (msg), 2878 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2879 2880 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2881 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2882 NULL)) == NULL) 2883 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2884 2885 if (avail_spare) 2886 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2887 2888 if (l2cache) 2889 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2890 2891 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2892 2893 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2894 return (0); 2895 2896 switch (errno) { 2897 2898 case ENOTSUP: 2899 /* 2900 * Can't detach from this type of vdev. 2901 */ 2902 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2903 "applicable to mirror and replacing vdevs")); 2904 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2905 break; 2906 2907 case EBUSY: 2908 /* 2909 * There are no other replicas of this device. 2910 */ 2911 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2912 break; 2913 2914 default: 2915 (void) zpool_standard_error(hdl, errno, msg); 2916 } 2917 2918 return (-1); 2919} 2920 2921/* 2922 * Find a mirror vdev in the source nvlist. 2923 * 2924 * The mchild array contains a list of disks in one of the top-level mirrors 2925 * of the source pool. The schild array contains a list of disks that the 2926 * user specified on the command line. We loop over the mchild array to 2927 * see if any entry in the schild array matches. 2928 * 2929 * If a disk in the mchild array is found in the schild array, we return 2930 * the index of that entry. Otherwise we return -1. 2931 */ 2932static int 2933find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2934 nvlist_t **schild, uint_t schildren) 2935{ 2936 uint_t mc; 2937 2938 for (mc = 0; mc < mchildren; mc++) { 2939 uint_t sc; 2940 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2941 mchild[mc], B_FALSE); 2942 2943 for (sc = 0; sc < schildren; sc++) { 2944 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2945 schild[sc], B_FALSE); 2946 boolean_t result = (strcmp(mpath, spath) == 0); 2947 2948 free(spath); 2949 if (result) { 2950 free(mpath); 2951 return (mc); 2952 } 2953 } 2954 2955 free(mpath); 2956 } 2957 2958 return (-1); 2959} 2960 2961/* 2962 * Split a mirror pool. If newroot points to null, then a new nvlist 2963 * is generated and it is the responsibility of the caller to free it. 2964 */ 2965int 2966zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2967 nvlist_t *props, splitflags_t flags) 2968{ 2969 zfs_cmd_t zc = { 0 }; 2970 char msg[1024]; 2971 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2972 nvlist_t **varray = NULL, *zc_props = NULL; 2973 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2974 libzfs_handle_t *hdl = zhp->zpool_hdl; 2975 uint64_t vers; 2976 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2977 int retval = 0; 2978 2979 (void) snprintf(msg, sizeof (msg), 2980 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2981 2982 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2983 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2984 2985 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2986 (void) fprintf(stderr, gettext("Internal error: unable to " 2987 "retrieve pool configuration\n")); 2988 return (-1); 2989 } 2990 2991 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2992 == 0); 2993 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2994 2995 if (props) { 2996 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2997 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2998 props, vers, flags, msg)) == NULL) 2999 return (-1); 3000 } 3001 3002 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 3003 &children) != 0) { 3004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3005 "Source pool is missing vdev tree")); 3006 nvlist_free(zc_props); 3007 return (-1); 3008 } 3009 3010 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 3011 vcount = 0; 3012 3013 if (*newroot == NULL || 3014 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 3015 &newchild, &newchildren) != 0) 3016 newchildren = 0; 3017 3018 for (c = 0; c < children; c++) { 3019 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 3020 char *type; 3021 nvlist_t **mchild, *vdev; 3022 uint_t mchildren; 3023 int entry; 3024 3025 /* 3026 * Unlike cache & spares, slogs are stored in the 3027 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 3028 */ 3029 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 3030 &is_log); 3031 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 3032 &is_hole); 3033 if (is_log || is_hole) { 3034 /* 3035 * Create a hole vdev and put it in the config. 3036 */ 3037 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 3038 goto out; 3039 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 3040 VDEV_TYPE_HOLE) != 0) 3041 goto out; 3042 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 3043 1) != 0) 3044 goto out; 3045 if (lastlog == 0) 3046 lastlog = vcount; 3047 varray[vcount++] = vdev; 3048 continue; 3049 } 3050 lastlog = 0; 3051 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3052 == 0); 3053 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3054 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3055 "Source pool must be composed only of mirrors\n")); 3056 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3057 goto out; 3058 } 3059 3060 verify(nvlist_lookup_nvlist_array(child[c], 3061 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3062 3063 /* find or add an entry for this top-level vdev */ 3064 if (newchildren > 0 && 3065 (entry = find_vdev_entry(zhp, mchild, mchildren, 3066 newchild, newchildren)) >= 0) { 3067 /* We found a disk that the user specified. */ 3068 vdev = mchild[entry]; 3069 ++found; 3070 } else { 3071 /* User didn't specify a disk for this vdev. */ 3072 vdev = mchild[mchildren - 1]; 3073 } 3074 3075 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3076 goto out; 3077 } 3078 3079 /* did we find every disk the user specified? */ 3080 if (found != newchildren) { 3081 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3082 "include at most one disk from each mirror")); 3083 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3084 goto out; 3085 } 3086 3087 /* Prepare the nvlist for populating. */ 3088 if (*newroot == NULL) { 3089 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3090 goto out; 3091 freelist = B_TRUE; 3092 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3093 VDEV_TYPE_ROOT) != 0) 3094 goto out; 3095 } else { 3096 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3097 } 3098 3099 /* Add all the children we found */ 3100 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3101 lastlog == 0 ? vcount : lastlog) != 0) 3102 goto out; 3103 3104 /* 3105 * If we're just doing a dry run, exit now with success. 3106 */ 3107 if (flags.dryrun) { 3108 memory_err = B_FALSE; 3109 freelist = B_FALSE; 3110 goto out; 3111 } 3112 3113 /* now build up the config list & call the ioctl */ 3114 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3115 goto out; 3116 3117 if (nvlist_add_nvlist(newconfig, 3118 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3119 nvlist_add_string(newconfig, 3120 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3121 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3122 goto out; 3123 3124 /* 3125 * The new pool is automatically part of the namespace unless we 3126 * explicitly export it. 3127 */ 3128 if (!flags.import) 3129 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3130 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3131 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3132 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3133 goto out; 3134 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3135 goto out; 3136 3137 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3138 retval = zpool_standard_error(hdl, errno, msg); 3139 goto out; 3140 } 3141 3142 freelist = B_FALSE; 3143 memory_err = B_FALSE; 3144 3145out: 3146 if (varray != NULL) { 3147 int v; 3148 3149 for (v = 0; v < vcount; v++) 3150 nvlist_free(varray[v]); 3151 free(varray); 3152 } 3153 zcmd_free_nvlists(&zc); 3154 nvlist_free(zc_props); 3155 nvlist_free(newconfig); 3156 if (freelist) { 3157 nvlist_free(*newroot); 3158 *newroot = NULL; 3159 } 3160 3161 if (retval != 0) 3162 return (retval); 3163 3164 if (memory_err) 3165 return (no_memory(hdl)); 3166 3167 return (0); 3168} 3169 3170/* 3171 * Remove the given device. 3172 */ 3173int 3174zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3175{ 3176 zfs_cmd_t zc = { 0 }; 3177 char msg[1024]; 3178 nvlist_t *tgt; 3179 boolean_t avail_spare, l2cache, islog; 3180 libzfs_handle_t *hdl = zhp->zpool_hdl; 3181 uint64_t version; 3182 3183 (void) snprintf(msg, sizeof (msg), 3184 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3185 3186 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3187 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3188 &islog)) == NULL) 3189 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3190 3191 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3192 if (islog && version < SPA_VERSION_HOLES) { 3193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3194 "pool must be upgraded to support log removal")); 3195 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3196 } 3197 3198 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) { 3199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3200 "root pool can not have removed devices, " 3201 "because GRUB does not understand them")); 3202 return (zfs_error(hdl, EINVAL, msg)); 3203 } 3204 3205 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID); 3206 3207 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3208 return (0); 3209 3210 switch (errno) { 3211 3212 case EINVAL: 3213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3214 "invalid config; all top-level vdevs must " 3215 "have the same sector size and not be raidz.")); 3216 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 3217 break; 3218 3219 case EBUSY: 3220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3221 "Pool busy; removal may already be in progress")); 3222 (void) zfs_error(hdl, EZFS_BUSY, msg); 3223 break; 3224 3225 default: 3226 (void) zpool_standard_error(hdl, errno, msg); 3227 } 3228 return (-1); 3229} 3230 3231int 3232zpool_vdev_remove_cancel(zpool_handle_t *zhp) 3233{ 3234 zfs_cmd_t zc = { 0 }; 3235 char msg[1024]; 3236 libzfs_handle_t *hdl = zhp->zpool_hdl; 3237 3238 (void) snprintf(msg, sizeof (msg), 3239 dgettext(TEXT_DOMAIN, "cannot cancel removal")); 3240 3241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3242 zc.zc_cookie = 1; 3243 3244 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3245 return (0); 3246 3247 return (zpool_standard_error(hdl, errno, msg)); 3248} 3249 3250int 3251zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path, 3252 uint64_t *sizep) 3253{ 3254 char msg[1024]; 3255 nvlist_t *tgt; 3256 boolean_t avail_spare, l2cache, islog; 3257 libzfs_handle_t *hdl = zhp->zpool_hdl; 3258 3259 (void) snprintf(msg, sizeof (msg), 3260 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"), 3261 path); 3262 3263 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3264 &islog)) == NULL) 3265 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3266 3267 if (avail_spare || l2cache || islog) { 3268 *sizep = 0; 3269 return (0); 3270 } 3271 3272 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) { 3273 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3274 "indirect size not available")); 3275 return (zfs_error(hdl, EINVAL, msg)); 3276 } 3277 return (0); 3278} 3279 3280/* 3281 * Clear the errors for the pool, or the particular device if specified. 3282 */ 3283int 3284zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3285{ 3286 zfs_cmd_t zc = { 0 }; 3287 char msg[1024]; 3288 nvlist_t *tgt; 3289 zpool_rewind_policy_t policy; 3290 boolean_t avail_spare, l2cache; 3291 libzfs_handle_t *hdl = zhp->zpool_hdl; 3292 nvlist_t *nvi = NULL; 3293 int error; 3294 3295 if (path) 3296 (void) snprintf(msg, sizeof (msg), 3297 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3298 path); 3299 else 3300 (void) snprintf(msg, sizeof (msg), 3301 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3302 zhp->zpool_name); 3303 3304 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3305 if (path) { 3306 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3307 &l2cache, NULL)) == NULL) 3308 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3309 3310 /* 3311 * Don't allow error clearing for hot spares. Do allow 3312 * error clearing for l2cache devices. 3313 */ 3314 if (avail_spare) 3315 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3316 3317 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3318 &zc.zc_guid) == 0); 3319 } 3320 3321 zpool_get_rewind_policy(rewindnvl, &policy); 3322 zc.zc_cookie = policy.zrp_request; 3323 3324 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3325 return (-1); 3326 3327 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3328 return (-1); 3329 3330 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3331 errno == ENOMEM) { 3332 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3333 zcmd_free_nvlists(&zc); 3334 return (-1); 3335 } 3336 } 3337 3338 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3339 errno != EPERM && errno != EACCES)) { 3340 if (policy.zrp_request & 3341 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3342 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3343 zpool_rewind_exclaim(hdl, zc.zc_name, 3344 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3345 nvi); 3346 nvlist_free(nvi); 3347 } 3348 zcmd_free_nvlists(&zc); 3349 return (0); 3350 } 3351 3352 zcmd_free_nvlists(&zc); 3353 return (zpool_standard_error(hdl, errno, msg)); 3354} 3355 3356/* 3357 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3358 */ 3359int 3360zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3361{ 3362 zfs_cmd_t zc = { 0 }; 3363 char msg[1024]; 3364 libzfs_handle_t *hdl = zhp->zpool_hdl; 3365 3366 (void) snprintf(msg, sizeof (msg), 3367 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3368 guid); 3369 3370 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3371 zc.zc_guid = guid; 3372 zc.zc_cookie = ZPOOL_NO_REWIND; 3373 3374 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3375 return (0); 3376 3377 return (zpool_standard_error(hdl, errno, msg)); 3378} 3379 3380/* 3381 * Change the GUID for a pool. 3382 */ 3383int 3384zpool_reguid(zpool_handle_t *zhp) 3385{ 3386 char msg[1024]; 3387 libzfs_handle_t *hdl = zhp->zpool_hdl; 3388 zfs_cmd_t zc = { 0 }; 3389 3390 (void) snprintf(msg, sizeof (msg), 3391 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3392 3393 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3394 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3395 return (0); 3396 3397 return (zpool_standard_error(hdl, errno, msg)); 3398} 3399 3400/* 3401 * Reopen the pool. 3402 */ 3403int 3404zpool_reopen(zpool_handle_t *zhp) 3405{ 3406 zfs_cmd_t zc = { 0 }; 3407 char msg[1024]; 3408 libzfs_handle_t *hdl = zhp->zpool_hdl; 3409 3410 (void) snprintf(msg, sizeof (msg), 3411 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3412 zhp->zpool_name); 3413 3414 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3415 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3416 return (0); 3417 return (zpool_standard_error(hdl, errno, msg)); 3418} 3419 3420/* 3421 * Convert from a devid string to a path. 3422 */ 3423static char * 3424devid_to_path(char *devid_str) 3425{ 3426 ddi_devid_t devid; 3427 char *minor; 3428 char *path; 3429 devid_nmlist_t *list = NULL; 3430 int ret; 3431 3432 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3433 return (NULL); 3434 3435 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3436 3437 devid_str_free(minor); 3438 devid_free(devid); 3439 3440 if (ret != 0) 3441 return (NULL); 3442 3443 /* 3444 * In a case the strdup() fails, we will just return NULL below. 3445 */ 3446 path = strdup(list[0].devname); 3447 3448 devid_free_nmlist(list); 3449 3450 return (path); 3451} 3452 3453/* 3454 * Convert from a path to a devid string. 3455 */ 3456static char * 3457path_to_devid(const char *path) 3458{ 3459#ifdef have_devid 3460 int fd; 3461 ddi_devid_t devid; 3462 char *minor, *ret; 3463 3464 if ((fd = open(path, O_RDONLY)) < 0) 3465 return (NULL); 3466 3467 minor = NULL; 3468 ret = NULL; 3469 if (devid_get(fd, &devid) == 0) { 3470 if (devid_get_minor_name(fd, &minor) == 0) 3471 ret = devid_str_encode(devid, minor); 3472 if (minor != NULL) 3473 devid_str_free(minor); 3474 devid_free(devid); 3475 } 3476 (void) close(fd); 3477 3478 return (ret); 3479#else 3480 return (NULL); 3481#endif 3482} 3483 3484/* 3485 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3486 * ignore any failure here, since a common case is for an unprivileged user to 3487 * type 'zpool status', and we'll display the correct information anyway. 3488 */ 3489static void 3490set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3491{ 3492 zfs_cmd_t zc = { 0 }; 3493 3494 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3495 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3496 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3497 &zc.zc_guid) == 0); 3498 3499 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3500} 3501 3502/* 3503 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3504 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3505 * We also check if this is a whole disk, in which case we strip off the 3506 * trailing 's0' slice name. 3507 * 3508 * This routine is also responsible for identifying when disks have been 3509 * reconfigured in a new location. The kernel will have opened the device by 3510 * devid, but the path will still refer to the old location. To catch this, we 3511 * first do a path -> devid translation (which is fast for the common case). If 3512 * the devid matches, we're done. If not, we do a reverse devid -> path 3513 * translation and issue the appropriate ioctl() to update the path of the vdev. 3514 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3515 * of these checks. 3516 */ 3517char * 3518zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3519 boolean_t verbose) 3520{ 3521 char *path, *devid; 3522 uint64_t value; 3523 char buf[64]; 3524 vdev_stat_t *vs; 3525 uint_t vsc; 3526 int have_stats; 3527 int have_path; 3528 3529 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3530 (uint64_t **)&vs, &vsc) == 0; 3531 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3532 3533 /* 3534 * If the device is not currently present, assume it will not 3535 * come back at the same device path. Display the device by GUID. 3536 */ 3537 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3538 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3539 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3540 &value) == 0); 3541 (void) snprintf(buf, sizeof (buf), "%llu", 3542 (u_longlong_t)value); 3543 path = buf; 3544 } else if (have_path) { 3545 3546 /* 3547 * If the device is dead (faulted, offline, etc) then don't 3548 * bother opening it. Otherwise we may be forcing the user to 3549 * open a misbehaving device, which can have undesirable 3550 * effects. 3551 */ 3552 if ((have_stats == 0 || 3553 vs->vs_state >= VDEV_STATE_DEGRADED) && 3554 zhp != NULL && 3555 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3556 /* 3557 * Determine if the current path is correct. 3558 */ 3559 char *newdevid = path_to_devid(path); 3560 3561 if (newdevid == NULL || 3562 strcmp(devid, newdevid) != 0) { 3563 char *newpath; 3564 3565 if ((newpath = devid_to_path(devid)) != NULL) { 3566 /* 3567 * Update the path appropriately. 3568 */ 3569 set_path(zhp, nv, newpath); 3570 if (nvlist_add_string(nv, 3571 ZPOOL_CONFIG_PATH, newpath) == 0) 3572 verify(nvlist_lookup_string(nv, 3573 ZPOOL_CONFIG_PATH, 3574 &path) == 0); 3575 free(newpath); 3576 } 3577 } 3578 3579 if (newdevid) 3580 devid_str_free(newdevid); 3581 } 3582 3583#ifdef illumos 3584 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 3585 path += strlen(ZFS_DISK_ROOTD); 3586 3587 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3588 &value) == 0 && value) { 3589 int pathlen = strlen(path); 3590 char *tmp = zfs_strdup(hdl, path); 3591 3592 /* 3593 * If it starts with c#, and ends with "s0" or "s1", 3594 * chop the slice off, or if it ends with "s0/old" or 3595 * "s1/old", remove the slice from the middle. 3596 */ 3597 if (CTD_CHECK(tmp)) { 3598 if (strcmp(&tmp[pathlen - 2], "s0") == 0 || 3599 strcmp(&tmp[pathlen - 2], "s1") == 0) { 3600 tmp[pathlen - 2] = '\0'; 3601 } else if (pathlen > 6 && 3602 (strcmp(&tmp[pathlen - 6], "s0/old") == 0 || 3603 strcmp(&tmp[pathlen - 6], "s1/old") == 0)) { 3604 (void) strcpy(&tmp[pathlen - 6], 3605 "/old"); 3606 } 3607 } 3608 return (tmp); 3609 } 3610#else /* !illumos */ 3611 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3612 path += sizeof(_PATH_DEV) - 1; 3613#endif /* illumos */ 3614 } else { 3615 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3616 3617 /* 3618 * If it's a raidz device, we need to stick in the parity level. 3619 */ 3620 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3621 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3622 &value) == 0); 3623 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3624 (u_longlong_t)value); 3625 path = buf; 3626 } 3627 3628 /* 3629 * We identify each top-level vdev by using a <type-id> 3630 * naming convention. 3631 */ 3632 if (verbose) { 3633 uint64_t id; 3634 3635 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3636 &id) == 0); 3637 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3638 (u_longlong_t)id); 3639 path = buf; 3640 } 3641 } 3642 3643 return (zfs_strdup(hdl, path)); 3644} 3645 3646static int 3647zbookmark_mem_compare(const void *a, const void *b) 3648{ 3649 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3650} 3651 3652/* 3653 * Retrieve the persistent error log, uniquify the members, and return to the 3654 * caller. 3655 */ 3656int 3657zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3658{ 3659 zfs_cmd_t zc = { 0 }; 3660 uint64_t count; 3661 zbookmark_phys_t *zb = NULL; 3662 int i; 3663 3664 /* 3665 * Retrieve the raw error list from the kernel. If the number of errors 3666 * has increased, allocate more space and continue until we get the 3667 * entire list. 3668 */ 3669 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3670 &count) == 0); 3671 if (count == 0) 3672 return (0); 3673 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3674 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3675 return (-1); 3676 zc.zc_nvlist_dst_size = count; 3677 (void) strcpy(zc.zc_name, zhp->zpool_name); 3678 for (;;) { 3679 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3680 &zc) != 0) { 3681 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3682 if (errno == ENOMEM) { 3683 void *dst; 3684 3685 count = zc.zc_nvlist_dst_size; 3686 dst = zfs_alloc(zhp->zpool_hdl, count * 3687 sizeof (zbookmark_phys_t)); 3688 if (dst == NULL) 3689 return (-1); 3690 zc.zc_nvlist_dst = (uintptr_t)dst; 3691 } else { 3692 return (-1); 3693 } 3694 } else { 3695 break; 3696 } 3697 } 3698 3699 /* 3700 * Sort the resulting bookmarks. This is a little confusing due to the 3701 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3702 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3703 * _not_ copied as part of the process. So we point the start of our 3704 * array appropriate and decrement the total number of elements. 3705 */ 3706 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3707 zc.zc_nvlist_dst_size; 3708 count -= zc.zc_nvlist_dst_size; 3709 3710 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 3711 3712 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3713 3714 /* 3715 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3716 */ 3717 for (i = 0; i < count; i++) { 3718 nvlist_t *nv; 3719 3720 /* ignoring zb_blkid and zb_level for now */ 3721 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3722 zb[i-1].zb_object == zb[i].zb_object) 3723 continue; 3724 3725 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3726 goto nomem; 3727 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3728 zb[i].zb_objset) != 0) { 3729 nvlist_free(nv); 3730 goto nomem; 3731 } 3732 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3733 zb[i].zb_object) != 0) { 3734 nvlist_free(nv); 3735 goto nomem; 3736 } 3737 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3738 nvlist_free(nv); 3739 goto nomem; 3740 } 3741 nvlist_free(nv); 3742 } 3743 3744 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3745 return (0); 3746 3747nomem: 3748 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3749 return (no_memory(zhp->zpool_hdl)); 3750} 3751 3752/* 3753 * Upgrade a ZFS pool to the latest on-disk version. 3754 */ 3755int 3756zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3757{ 3758 zfs_cmd_t zc = { 0 }; 3759 libzfs_handle_t *hdl = zhp->zpool_hdl; 3760 3761 (void) strcpy(zc.zc_name, zhp->zpool_name); 3762 zc.zc_cookie = new_version; 3763 3764 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3765 return (zpool_standard_error_fmt(hdl, errno, 3766 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3767 zhp->zpool_name)); 3768 return (0); 3769} 3770 3771void 3772zfs_save_arguments(int argc, char **argv, char *string, int len) 3773{ 3774 (void) strlcpy(string, basename(argv[0]), len); 3775 for (int i = 1; i < argc; i++) { 3776 (void) strlcat(string, " ", len); 3777 (void) strlcat(string, argv[i], len); 3778 } 3779} 3780 3781int 3782zpool_log_history(libzfs_handle_t *hdl, const char *message) 3783{ 3784 zfs_cmd_t zc = { 0 }; 3785 nvlist_t *args; 3786 int err; 3787 3788 args = fnvlist_alloc(); 3789 fnvlist_add_string(args, "message", message); 3790 err = zcmd_write_src_nvlist(hdl, &zc, args); 3791 if (err == 0) 3792 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3793 nvlist_free(args); 3794 zcmd_free_nvlists(&zc); 3795 return (err); 3796} 3797 3798/* 3799 * Perform ioctl to get some command history of a pool. 3800 * 3801 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3802 * logical offset of the history buffer to start reading from. 3803 * 3804 * Upon return, 'off' is the next logical offset to read from and 3805 * 'len' is the actual amount of bytes read into 'buf'. 3806 */ 3807static int 3808get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3809{ 3810 zfs_cmd_t zc = { 0 }; 3811 libzfs_handle_t *hdl = zhp->zpool_hdl; 3812 3813 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3814 3815 zc.zc_history = (uint64_t)(uintptr_t)buf; 3816 zc.zc_history_len = *len; 3817 zc.zc_history_offset = *off; 3818 3819 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3820 switch (errno) { 3821 case EPERM: 3822 return (zfs_error_fmt(hdl, EZFS_PERM, 3823 dgettext(TEXT_DOMAIN, 3824 "cannot show history for pool '%s'"), 3825 zhp->zpool_name)); 3826 case ENOENT: 3827 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3828 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3829 "'%s'"), zhp->zpool_name)); 3830 case ENOTSUP: 3831 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3832 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3833 "'%s', pool must be upgraded"), zhp->zpool_name)); 3834 default: 3835 return (zpool_standard_error_fmt(hdl, errno, 3836 dgettext(TEXT_DOMAIN, 3837 "cannot get history for '%s'"), zhp->zpool_name)); 3838 } 3839 } 3840 3841 *len = zc.zc_history_len; 3842 *off = zc.zc_history_offset; 3843 3844 return (0); 3845} 3846 3847/* 3848 * Process the buffer of nvlists, unpacking and storing each nvlist record 3849 * into 'records'. 'leftover' is set to the number of bytes that weren't 3850 * processed as there wasn't a complete record. 3851 */ 3852int 3853zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3854 nvlist_t ***records, uint_t *numrecords) 3855{ 3856 uint64_t reclen; 3857 nvlist_t *nv; 3858 int i; 3859 3860 while (bytes_read > sizeof (reclen)) { 3861 3862 /* get length of packed record (stored as little endian) */ 3863 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3864 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3865 3866 if (bytes_read < sizeof (reclen) + reclen) 3867 break; 3868 3869 /* unpack record */ 3870 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3871 return (ENOMEM); 3872 bytes_read -= sizeof (reclen) + reclen; 3873 buf += sizeof (reclen) + reclen; 3874 3875 /* add record to nvlist array */ 3876 (*numrecords)++; 3877 if (ISP2(*numrecords + 1)) { 3878 *records = realloc(*records, 3879 *numrecords * 2 * sizeof (nvlist_t *)); 3880 } 3881 (*records)[*numrecords - 1] = nv; 3882 } 3883 3884 *leftover = bytes_read; 3885 return (0); 3886} 3887 3888/* from spa_history.c: spa_history_create_obj() */ 3889#define HIS_BUF_LEN_DEF (128 << 10) 3890#define HIS_BUF_LEN_MAX (1 << 30) 3891 3892/* 3893 * Retrieve the command history of a pool. 3894 */ 3895int 3896zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3897{ 3898 char *buf; 3899 uint64_t buflen = HIS_BUF_LEN_DEF; 3900 uint64_t off = 0; 3901 nvlist_t **records = NULL; 3902 uint_t numrecords = 0; 3903 int err, i; 3904 3905 buf = malloc(buflen); 3906 if (buf == NULL) 3907 return (ENOMEM); 3908 do { 3909 uint64_t bytes_read = buflen; 3910 uint64_t leftover; 3911 3912 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3913 break; 3914 3915 /* if nothing else was read in, we're at EOF, just return */ 3916 if (bytes_read == 0) 3917 break; 3918 3919 if ((err = zpool_history_unpack(buf, bytes_read, 3920 &leftover, &records, &numrecords)) != 0) 3921 break; 3922 off -= leftover; 3923 if (leftover == bytes_read) { 3924 /* 3925 * no progress made, because buffer is not big enough 3926 * to hold this record; resize and retry. 3927 */ 3928 buflen *= 2; 3929 free(buf); 3930 buf = NULL; 3931 if ((buflen >= HIS_BUF_LEN_MAX) || 3932 ((buf = malloc(buflen)) == NULL)) { 3933 err = ENOMEM; 3934 break; 3935 } 3936 } 3937 3938 /* CONSTCOND */ 3939 } while (1); 3940 3941 free(buf); 3942 3943 if (!err) { 3944 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3945 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3946 records, numrecords) == 0); 3947 } 3948 for (i = 0; i < numrecords; i++) 3949 nvlist_free(records[i]); 3950 free(records); 3951 3952 return (err); 3953} 3954 3955void 3956zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3957 char *pathname, size_t len) 3958{ 3959 zfs_cmd_t zc = { 0 }; 3960 boolean_t mounted = B_FALSE; 3961 char *mntpnt = NULL; 3962 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 3963 3964 if (dsobj == 0) { 3965 /* special case for the MOS */ 3966 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3967 return; 3968 } 3969 3970 /* get the dataset's name */ 3971 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3972 zc.zc_obj = dsobj; 3973 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3974 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3975 /* just write out a path of two object numbers */ 3976 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3977 dsobj, obj); 3978 return; 3979 } 3980 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3981 3982 /* find out if the dataset is mounted */ 3983 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3984 3985 /* get the corrupted object's path */ 3986 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3987 zc.zc_obj = obj; 3988 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3989 &zc) == 0) { 3990 if (mounted) { 3991 (void) snprintf(pathname, len, "%s%s", mntpnt, 3992 zc.zc_value); 3993 } else { 3994 (void) snprintf(pathname, len, "%s:%s", 3995 dsname, zc.zc_value); 3996 } 3997 } else { 3998 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3999 } 4000 free(mntpnt); 4001} 4002 4003#ifdef illumos 4004/* 4005 * Read the EFI label from the config, if a label does not exist then 4006 * pass back the error to the caller. If the caller has passed a non-NULL 4007 * diskaddr argument then we set it to the starting address of the EFI 4008 * partition. If the caller has passed a non-NULL boolean argument, then 4009 * we set it to indicate if the disk does have efi system partition. 4010 */ 4011static int 4012read_efi_label(nvlist_t *config, diskaddr_t *sb, boolean_t *system) 4013{ 4014 char *path; 4015 int fd; 4016 char diskname[MAXPATHLEN]; 4017 boolean_t boot = B_FALSE; 4018 int err = -1; 4019 int slice; 4020 4021 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 4022 return (err); 4023 4024 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 4025 strrchr(path, '/')); 4026 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 4027 struct dk_gpt *vtoc; 4028 4029 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 4030 for (slice = 0; slice < vtoc->efi_nparts; slice++) { 4031 if (vtoc->efi_parts[slice].p_tag == V_SYSTEM) 4032 boot = B_TRUE; 4033 if (vtoc->efi_parts[slice].p_tag == V_USR) 4034 break; 4035 } 4036 if (sb != NULL && vtoc->efi_parts[slice].p_tag == V_USR) 4037 *sb = vtoc->efi_parts[slice].p_start; 4038 if (system != NULL) 4039 *system = boot; 4040 efi_free(vtoc); 4041 } 4042 (void) close(fd); 4043 } 4044 return (err); 4045} 4046 4047/* 4048 * determine where a partition starts on a disk in the current 4049 * configuration 4050 */ 4051static diskaddr_t 4052find_start_block(nvlist_t *config) 4053{ 4054 nvlist_t **child; 4055 uint_t c, children; 4056 diskaddr_t sb = MAXOFFSET_T; 4057 uint64_t wholedisk; 4058 4059 if (nvlist_lookup_nvlist_array(config, 4060 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 4061 if (nvlist_lookup_uint64(config, 4062 ZPOOL_CONFIG_WHOLE_DISK, 4063 &wholedisk) != 0 || !wholedisk) { 4064 return (MAXOFFSET_T); 4065 } 4066 if (read_efi_label(config, &sb, NULL) < 0) 4067 sb = MAXOFFSET_T; 4068 return (sb); 4069 } 4070 4071 for (c = 0; c < children; c++) { 4072 sb = find_start_block(child[c]); 4073 if (sb != MAXOFFSET_T) { 4074 return (sb); 4075 } 4076 } 4077 return (MAXOFFSET_T); 4078} 4079#endif /* illumos */ 4080 4081/* 4082 * Label an individual disk. The name provided is the short name, 4083 * stripped of any leading /dev path. 4084 */ 4085int 4086zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name, 4087 zpool_boot_label_t boot_type, uint64_t boot_size, int *slice) 4088{ 4089#ifdef illumos 4090 char path[MAXPATHLEN]; 4091 struct dk_gpt *vtoc; 4092 int fd; 4093 size_t resv = EFI_MIN_RESV_SIZE; 4094 uint64_t slice_size; 4095 diskaddr_t start_block; 4096 char errbuf[1024]; 4097 4098 /* prepare an error message just in case */ 4099 (void) snprintf(errbuf, sizeof (errbuf), 4100 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 4101 4102 if (zhp) { 4103 nvlist_t *nvroot; 4104 4105 verify(nvlist_lookup_nvlist(zhp->zpool_config, 4106 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 4107 4108 if (zhp->zpool_start_block == 0) 4109 start_block = find_start_block(nvroot); 4110 else 4111 start_block = zhp->zpool_start_block; 4112 zhp->zpool_start_block = start_block; 4113 } else { 4114 /* new pool */ 4115 start_block = NEW_START_BLOCK; 4116 } 4117 4118 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 4119 BACKUP_SLICE); 4120 4121 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 4122 /* 4123 * This shouldn't happen. We've long since verified that this 4124 * is a valid device. 4125 */ 4126 zfs_error_aux(hdl, 4127 dgettext(TEXT_DOMAIN, "unable to open device")); 4128 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 4129 } 4130 4131 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 4132 /* 4133 * The only way this can fail is if we run out of memory, or we 4134 * were unable to read the disk's capacity 4135 */ 4136 if (errno == ENOMEM) 4137 (void) no_memory(hdl); 4138 4139 (void) close(fd); 4140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4141 "unable to read disk capacity"), name); 4142 4143 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 4144 } 4145 4146 /* 4147 * Why we use V_USR: V_BACKUP confuses users, and is considered 4148 * disposable by some EFI utilities (since EFI doesn't have a backup 4149 * slice). V_UNASSIGNED is supposed to be used only for zero size 4150 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4151 * etc. were all pretty specific. V_USR is as close to reality as we 4152 * can get, in the absence of V_OTHER. 4153 */ 4154 /* first fix the partition start block */ 4155 if (start_block == MAXOFFSET_T) 4156 start_block = NEW_START_BLOCK; 4157 4158 /* 4159 * EFI System partition is using slice 0. 4160 * ZFS is on slice 1 and slice 8 is reserved. 4161 * We assume the GPT partition table without system 4162 * partition has zfs p_start == NEW_START_BLOCK. 4163 * If start_block != NEW_START_BLOCK, it means we have 4164 * system partition. Correct solution would be to query/cache vtoc 4165 * from existing vdev member. 4166 */ 4167 if (boot_type == ZPOOL_CREATE_BOOT_LABEL) { 4168 if (boot_size % vtoc->efi_lbasize != 0) { 4169 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4170 "boot partition size must be a multiple of %d"), 4171 vtoc->efi_lbasize); 4172 (void) close(fd); 4173 efi_free(vtoc); 4174 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4175 } 4176 /* 4177 * System partition size checks. 4178 * Note the 1MB is quite arbitrary value, since we 4179 * are creating dedicated pool, it should be enough 4180 * to hold fat + efi bootloader. May need to be 4181 * adjusted if the bootloader size will grow. 4182 */ 4183 if (boot_size < 1024 * 1024) { 4184 char buf[64]; 4185 zfs_nicenum(boot_size, buf, sizeof (buf)); 4186 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4187 "Specified size %s for EFI System partition is too " 4188 "small, the minimum size is 1MB."), buf); 4189 (void) close(fd); 4190 efi_free(vtoc); 4191 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4192 } 4193 /* 33MB is tested with mkfs -F pcfs */ 4194 if (hdl->libzfs_printerr && 4195 ((vtoc->efi_lbasize == 512 && 4196 boot_size < 33 * 1024 * 1024) || 4197 (vtoc->efi_lbasize == 4096 && 4198 boot_size < 256 * 1024 * 1024))) { 4199 char buf[64]; 4200 zfs_nicenum(boot_size, buf, sizeof (buf)); 4201 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, 4202 "Warning: EFI System partition size %s is " 4203 "not allowing to create FAT32 file\nsystem, which " 4204 "may result in unbootable system.\n"), buf); 4205 } 4206 /* Adjust zfs partition start by size of system partition. */ 4207 start_block += boot_size / vtoc->efi_lbasize; 4208 } 4209 4210 if (start_block == NEW_START_BLOCK) { 4211 /* 4212 * Use default layout. 4213 * ZFS is on slice 0 and slice 8 is reserved. 4214 */ 4215 slice_size = vtoc->efi_last_u_lba + 1; 4216 slice_size -= EFI_MIN_RESV_SIZE; 4217 slice_size -= start_block; 4218 if (slice != NULL) 4219 *slice = 0; 4220 4221 vtoc->efi_parts[0].p_start = start_block; 4222 vtoc->efi_parts[0].p_size = slice_size; 4223 4224 vtoc->efi_parts[0].p_tag = V_USR; 4225 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4226 4227 vtoc->efi_parts[8].p_start = slice_size + start_block; 4228 vtoc->efi_parts[8].p_size = resv; 4229 vtoc->efi_parts[8].p_tag = V_RESERVED; 4230 } else { 4231 slice_size = start_block - NEW_START_BLOCK; 4232 vtoc->efi_parts[0].p_start = NEW_START_BLOCK; 4233 vtoc->efi_parts[0].p_size = slice_size; 4234 vtoc->efi_parts[0].p_tag = V_SYSTEM; 4235 (void) strcpy(vtoc->efi_parts[0].p_name, "loader"); 4236 if (slice != NULL) 4237 *slice = 1; 4238 /* prepare slice 1 */ 4239 slice_size = vtoc->efi_last_u_lba + 1 - slice_size; 4240 slice_size -= resv; 4241 slice_size -= NEW_START_BLOCK; 4242 vtoc->efi_parts[1].p_start = start_block; 4243 vtoc->efi_parts[1].p_size = slice_size; 4244 vtoc->efi_parts[1].p_tag = V_USR; 4245 (void) strcpy(vtoc->efi_parts[1].p_name, "zfs"); 4246 4247 vtoc->efi_parts[8].p_start = slice_size + start_block; 4248 vtoc->efi_parts[8].p_size = resv; 4249 vtoc->efi_parts[8].p_tag = V_RESERVED; 4250 } 4251 4252 if (efi_write(fd, vtoc) != 0) { 4253 /* 4254 * Some block drivers (like pcata) may not support EFI 4255 * GPT labels. Print out a helpful error message dir- 4256 * ecting the user to manually label the disk and give 4257 * a specific slice. 4258 */ 4259 (void) close(fd); 4260 efi_free(vtoc); 4261 4262 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4263 "try using fdisk(1M) and then provide a specific slice")); 4264 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4265 } 4266 4267 (void) close(fd); 4268 efi_free(vtoc); 4269#endif /* illumos */ 4270 return (0); 4271} 4272 4273static boolean_t 4274supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4275{ 4276 char *type; 4277 nvlist_t **child; 4278 uint_t children, c; 4279 4280 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4281 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4282 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4283 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4285 "vdev type '%s' is not supported"), type); 4286 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4287 return (B_FALSE); 4288 } 4289 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4290 &child, &children) == 0) { 4291 for (c = 0; c < children; c++) { 4292 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4293 return (B_FALSE); 4294 } 4295 } 4296 return (B_TRUE); 4297} 4298 4299/* 4300 * Check if this zvol is allowable for use as a dump device; zero if 4301 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4302 * 4303 * Allowable storage configurations include mirrors, all raidz variants, and 4304 * pools with log, cache, and spare devices. Pools which are backed by files or 4305 * have missing/hole vdevs are not suitable. 4306 */ 4307int 4308zvol_check_dump_config(char *arg) 4309{ 4310 zpool_handle_t *zhp = NULL; 4311 nvlist_t *config, *nvroot; 4312 char *p, *volname; 4313 nvlist_t **top; 4314 uint_t toplevels; 4315 libzfs_handle_t *hdl; 4316 char errbuf[1024]; 4317 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 4318 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4319 int ret = 1; 4320 4321 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4322 return (-1); 4323 } 4324 4325 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4326 "dump is not supported on device '%s'"), arg); 4327 4328 if ((hdl = libzfs_init()) == NULL) 4329 return (1); 4330 libzfs_print_on_error(hdl, B_TRUE); 4331 4332 volname = arg + pathlen; 4333 4334 /* check the configuration of the pool */ 4335 if ((p = strchr(volname, '/')) == NULL) { 4336 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4337 "malformed dataset name")); 4338 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4339 return (1); 4340 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) { 4341 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4342 "dataset name is too long")); 4343 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4344 return (1); 4345 } else { 4346 (void) strncpy(poolname, volname, p - volname); 4347 poolname[p - volname] = '\0'; 4348 } 4349 4350 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4351 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4352 "could not open pool '%s'"), poolname); 4353 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4354 goto out; 4355 } 4356 config = zpool_get_config(zhp, NULL); 4357 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4358 &nvroot) != 0) { 4359 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4360 "could not obtain vdev configuration for '%s'"), poolname); 4361 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4362 goto out; 4363 } 4364 4365 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4366 &top, &toplevels) == 0); 4367 4368 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4369 goto out; 4370 } 4371 ret = 0; 4372 4373out: 4374 if (zhp) 4375 zpool_close(zhp); 4376 libzfs_fini(hdl); 4377 return (ret); 4378} 4379 4380int 4381zpool_nextboot(libzfs_handle_t *hdl, uint64_t pool_guid, uint64_t dev_guid, 4382 const char *command) 4383{ 4384 zfs_cmd_t zc = { 0 }; 4385 nvlist_t *args; 4386 char *packed; 4387 size_t size; 4388 int error; 4389 4390 args = fnvlist_alloc(); 4391 fnvlist_add_uint64(args, ZPOOL_CONFIG_POOL_GUID, pool_guid); 4392 fnvlist_add_uint64(args, ZPOOL_CONFIG_GUID, dev_guid); 4393 fnvlist_add_string(args, "command", command); 4394 error = zcmd_write_src_nvlist(hdl, &zc, args); 4395 if (error == 0) 4396 error = ioctl(hdl->libzfs_fd, ZFS_IOC_NEXTBOOT, &zc); 4397 zcmd_free_nvlists(&zc); 4398 nvlist_free(args); 4399 return (error); 4400} 4401