libzfs_pool.c revision 209962
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd 22168404Spjd/* 23209962Smm * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24168404Spjd * Use is subject to license terms. 25168404Spjd */ 26168404Spjd 27168404Spjd#include <sys/types.h> 28168404Spjd#include <sys/stat.h> 29168404Spjd#include <assert.h> 30168404Spjd#include <ctype.h> 31168404Spjd#include <errno.h> 32168404Spjd#include <devid.h> 33168404Spjd#include <dirent.h> 34168404Spjd#include <fcntl.h> 35168404Spjd#include <libintl.h> 36168404Spjd#include <stdio.h> 37168404Spjd#include <stdlib.h> 38168404Spjd#include <strings.h> 39168404Spjd#include <unistd.h> 40185029Spjd#include <zone.h> 41168404Spjd#include <sys/zfs_ioctl.h> 42168404Spjd#include <sys/zio.h> 43168404Spjd#include <strings.h> 44168404Spjd#include <umem.h> 45168404Spjd 46168404Spjd#include "zfs_namecheck.h" 47168404Spjd#include "zfs_prop.h" 48168404Spjd#include "libzfs_impl.h" 49168404Spjd 50185029Spjdstatic int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51185029Spjd 52209962Smm#if defined(__i386) || defined(__amd64) 53209962Smm#define BOOTCMD "installgrub(1M)" 54209962Smm#else 55209962Smm#define BOOTCMD "installboot(1M)" 56209962Smm#endif 57209962Smm 58168404Spjd/* 59185029Spjd * ==================================================================== 60185029Spjd * zpool property functions 61185029Spjd * ==================================================================== 62185029Spjd */ 63185029Spjd 64185029Spjdstatic int 65185029Spjdzpool_get_all_props(zpool_handle_t *zhp) 66185029Spjd{ 67185029Spjd zfs_cmd_t zc = { 0 }; 68185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 69185029Spjd 70185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71185029Spjd 72185029Spjd if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73185029Spjd return (-1); 74185029Spjd 75185029Spjd while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76185029Spjd if (errno == ENOMEM) { 77185029Spjd if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78185029Spjd zcmd_free_nvlists(&zc); 79185029Spjd return (-1); 80185029Spjd } 81185029Spjd } else { 82185029Spjd zcmd_free_nvlists(&zc); 83185029Spjd return (-1); 84185029Spjd } 85185029Spjd } 86185029Spjd 87185029Spjd if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88185029Spjd zcmd_free_nvlists(&zc); 89185029Spjd return (-1); 90185029Spjd } 91185029Spjd 92185029Spjd zcmd_free_nvlists(&zc); 93185029Spjd 94185029Spjd return (0); 95185029Spjd} 96185029Spjd 97185029Spjdstatic int 98185029Spjdzpool_props_refresh(zpool_handle_t *zhp) 99185029Spjd{ 100185029Spjd nvlist_t *old_props; 101185029Spjd 102185029Spjd old_props = zhp->zpool_props; 103185029Spjd 104185029Spjd if (zpool_get_all_props(zhp) != 0) 105185029Spjd return (-1); 106185029Spjd 107185029Spjd nvlist_free(old_props); 108185029Spjd return (0); 109185029Spjd} 110185029Spjd 111185029Spjdstatic char * 112185029Spjdzpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113185029Spjd zprop_source_t *src) 114185029Spjd{ 115185029Spjd nvlist_t *nv, *nvl; 116185029Spjd uint64_t ival; 117185029Spjd char *value; 118185029Spjd zprop_source_t source; 119185029Spjd 120185029Spjd nvl = zhp->zpool_props; 121185029Spjd if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122185029Spjd verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123185029Spjd source = ival; 124185029Spjd verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125185029Spjd } else { 126185029Spjd source = ZPROP_SRC_DEFAULT; 127185029Spjd if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128185029Spjd value = "-"; 129185029Spjd } 130185029Spjd 131185029Spjd if (src) 132185029Spjd *src = source; 133185029Spjd 134185029Spjd return (value); 135185029Spjd} 136185029Spjd 137185029Spjduint64_t 138185029Spjdzpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139185029Spjd{ 140185029Spjd nvlist_t *nv, *nvl; 141185029Spjd uint64_t value; 142185029Spjd zprop_source_t source; 143185029Spjd 144185029Spjd if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145185029Spjd /* 146185029Spjd * zpool_get_all_props() has most likely failed because 147185029Spjd * the pool is faulted, but if all we need is the top level 148185029Spjd * vdev's guid then get it from the zhp config nvlist. 149185029Spjd */ 150185029Spjd if ((prop == ZPOOL_PROP_GUID) && 151185029Spjd (nvlist_lookup_nvlist(zhp->zpool_config, 152185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153185029Spjd (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154185029Spjd == 0)) { 155185029Spjd return (value); 156185029Spjd } 157185029Spjd return (zpool_prop_default_numeric(prop)); 158185029Spjd } 159185029Spjd 160185029Spjd nvl = zhp->zpool_props; 161185029Spjd if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162185029Spjd verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163185029Spjd source = value; 164185029Spjd verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165185029Spjd } else { 166185029Spjd source = ZPROP_SRC_DEFAULT; 167185029Spjd value = zpool_prop_default_numeric(prop); 168185029Spjd } 169185029Spjd 170185029Spjd if (src) 171185029Spjd *src = source; 172185029Spjd 173185029Spjd return (value); 174185029Spjd} 175185029Spjd 176185029Spjd/* 177185029Spjd * Map VDEV STATE to printed strings. 178185029Spjd */ 179185029Spjdchar * 180185029Spjdzpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181185029Spjd{ 182185029Spjd switch (state) { 183185029Spjd case VDEV_STATE_CLOSED: 184185029Spjd case VDEV_STATE_OFFLINE: 185185029Spjd return (gettext("OFFLINE")); 186185029Spjd case VDEV_STATE_REMOVED: 187185029Spjd return (gettext("REMOVED")); 188185029Spjd case VDEV_STATE_CANT_OPEN: 189185029Spjd if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190185029Spjd return (gettext("FAULTED")); 191185029Spjd else 192185029Spjd return (gettext("UNAVAIL")); 193185029Spjd case VDEV_STATE_FAULTED: 194185029Spjd return (gettext("FAULTED")); 195185029Spjd case VDEV_STATE_DEGRADED: 196185029Spjd return (gettext("DEGRADED")); 197185029Spjd case VDEV_STATE_HEALTHY: 198185029Spjd return (gettext("ONLINE")); 199185029Spjd } 200185029Spjd 201185029Spjd return (gettext("UNKNOWN")); 202185029Spjd} 203185029Spjd 204185029Spjd/* 205185029Spjd * Get a zpool property value for 'prop' and return the value in 206185029Spjd * a pre-allocated buffer. 207185029Spjd */ 208185029Spjdint 209185029Spjdzpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 210185029Spjd zprop_source_t *srctype) 211185029Spjd{ 212185029Spjd uint64_t intval; 213185029Spjd const char *strval; 214185029Spjd zprop_source_t src = ZPROP_SRC_NONE; 215185029Spjd nvlist_t *nvroot; 216185029Spjd vdev_stat_t *vs; 217185029Spjd uint_t vsc; 218185029Spjd 219185029Spjd if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 220209962Smm switch (prop) { 221209962Smm case ZPOOL_PROP_NAME: 222185029Spjd (void) strlcpy(buf, zpool_get_name(zhp), len); 223209962Smm break; 224209962Smm 225209962Smm case ZPOOL_PROP_HEALTH: 226185029Spjd (void) strlcpy(buf, "FAULTED", len); 227209962Smm break; 228209962Smm 229209962Smm case ZPOOL_PROP_GUID: 230209962Smm intval = zpool_get_prop_int(zhp, prop, &src); 231209962Smm (void) snprintf(buf, len, "%llu", intval); 232209962Smm break; 233209962Smm 234209962Smm case ZPOOL_PROP_ALTROOT: 235209962Smm case ZPOOL_PROP_CACHEFILE: 236209962Smm if (zhp->zpool_props != NULL || 237209962Smm zpool_get_all_props(zhp) == 0) { 238209962Smm (void) strlcpy(buf, 239209962Smm zpool_get_prop_string(zhp, prop, &src), 240209962Smm len); 241209962Smm if (srctype != NULL) 242209962Smm *srctype = src; 243209962Smm return (0); 244209962Smm } 245209962Smm /* FALLTHROUGH */ 246209962Smm default: 247185029Spjd (void) strlcpy(buf, "-", len); 248209962Smm break; 249209962Smm } 250209962Smm 251209962Smm if (srctype != NULL) 252209962Smm *srctype = src; 253185029Spjd return (0); 254185029Spjd } 255185029Spjd 256185029Spjd if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 257185029Spjd prop != ZPOOL_PROP_NAME) 258185029Spjd return (-1); 259185029Spjd 260185029Spjd switch (zpool_prop_get_type(prop)) { 261185029Spjd case PROP_TYPE_STRING: 262185029Spjd (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 263185029Spjd len); 264185029Spjd break; 265185029Spjd 266185029Spjd case PROP_TYPE_NUMBER: 267185029Spjd intval = zpool_get_prop_int(zhp, prop, &src); 268185029Spjd 269185029Spjd switch (prop) { 270185029Spjd case ZPOOL_PROP_SIZE: 271185029Spjd case ZPOOL_PROP_USED: 272185029Spjd case ZPOOL_PROP_AVAILABLE: 273185029Spjd (void) zfs_nicenum(intval, buf, len); 274185029Spjd break; 275185029Spjd 276185029Spjd case ZPOOL_PROP_CAPACITY: 277185029Spjd (void) snprintf(buf, len, "%llu%%", 278185029Spjd (u_longlong_t)intval); 279185029Spjd break; 280185029Spjd 281185029Spjd case ZPOOL_PROP_HEALTH: 282185029Spjd verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 283185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 284185029Spjd verify(nvlist_lookup_uint64_array(nvroot, 285185029Spjd ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); 286185029Spjd 287185029Spjd (void) strlcpy(buf, zpool_state_to_name(intval, 288185029Spjd vs->vs_aux), len); 289185029Spjd break; 290185029Spjd default: 291185029Spjd (void) snprintf(buf, len, "%llu", intval); 292185029Spjd } 293185029Spjd break; 294185029Spjd 295185029Spjd case PROP_TYPE_INDEX: 296185029Spjd intval = zpool_get_prop_int(zhp, prop, &src); 297185029Spjd if (zpool_prop_index_to_string(prop, intval, &strval) 298185029Spjd != 0) 299185029Spjd return (-1); 300185029Spjd (void) strlcpy(buf, strval, len); 301185029Spjd break; 302185029Spjd 303185029Spjd default: 304185029Spjd abort(); 305185029Spjd } 306185029Spjd 307185029Spjd if (srctype) 308185029Spjd *srctype = src; 309185029Spjd 310185029Spjd return (0); 311185029Spjd} 312185029Spjd 313209962Smmstatic boolean_t 314209962Smmpool_is_bootable(zpool_handle_t *zhp) 315209962Smm{ 316209962Smm char bootfs[ZPOOL_MAXNAMELEN]; 317209962Smm 318209962Smm return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 319209962Smm sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 320209962Smm sizeof (bootfs)) != 0); 321209962Smm} 322209962Smm 323209962Smm 324185029Spjd/* 325185029Spjd * Check if the bootfs name has the same pool name as it is set to. 326185029Spjd * Assuming bootfs is a valid dataset name. 327185029Spjd */ 328185029Spjdstatic boolean_t 329185029Spjdbootfs_name_valid(const char *pool, char *bootfs) 330185029Spjd{ 331185029Spjd int len = strlen(pool); 332185029Spjd 333185029Spjd if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 334185029Spjd return (B_FALSE); 335185029Spjd 336185029Spjd if (strncmp(pool, bootfs, len) == 0 && 337185029Spjd (bootfs[len] == '/' || bootfs[len] == '\0')) 338185029Spjd return (B_TRUE); 339185029Spjd 340185029Spjd return (B_FALSE); 341185029Spjd} 342185029Spjd 343185029Spjd/* 344185029Spjd * Inspect the configuration to determine if any of the devices contain 345185029Spjd * an EFI label. 346185029Spjd */ 347185029Spjdstatic boolean_t 348185029Spjdpool_uses_efi(nvlist_t *config) 349185029Spjd{ 350209962Smm#ifdef sun 351185029Spjd nvlist_t **child; 352185029Spjd uint_t c, children; 353185029Spjd 354185029Spjd if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 355185029Spjd &child, &children) != 0) 356185029Spjd return (read_efi_label(config, NULL) >= 0); 357185029Spjd 358185029Spjd for (c = 0; c < children; c++) { 359185029Spjd if (pool_uses_efi(child[c])) 360185029Spjd return (B_TRUE); 361185029Spjd } 362209962Smm#endif /* sun */ 363185029Spjd return (B_FALSE); 364185029Spjd} 365185029Spjd 366185029Spjd/* 367185029Spjd * Given an nvlist of zpool properties to be set, validate that they are 368185029Spjd * correct, and parse any numeric properties (index, boolean, etc) if they are 369185029Spjd * specified as strings. 370185029Spjd */ 371185029Spjdstatic nvlist_t * 372185029Spjdzpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 373185029Spjd nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf) 374185029Spjd{ 375185029Spjd nvpair_t *elem; 376185029Spjd nvlist_t *retprops; 377185029Spjd zpool_prop_t prop; 378185029Spjd char *strval; 379185029Spjd uint64_t intval; 380185029Spjd char *slash; 381185029Spjd struct stat64 statbuf; 382185029Spjd zpool_handle_t *zhp; 383185029Spjd nvlist_t *nvroot; 384185029Spjd 385185029Spjd if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 386185029Spjd (void) no_memory(hdl); 387185029Spjd return (NULL); 388185029Spjd } 389185029Spjd 390185029Spjd elem = NULL; 391185029Spjd while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 392185029Spjd const char *propname = nvpair_name(elem); 393185029Spjd 394185029Spjd /* 395185029Spjd * Make sure this property is valid and applies to this type. 396185029Spjd */ 397185029Spjd if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 398185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 399185029Spjd "invalid property '%s'"), propname); 400185029Spjd (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 401185029Spjd goto error; 402185029Spjd } 403185029Spjd 404185029Spjd if (zpool_prop_readonly(prop)) { 405185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 406185029Spjd "is readonly"), propname); 407185029Spjd (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 408185029Spjd goto error; 409185029Spjd } 410185029Spjd 411185029Spjd if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 412185029Spjd &strval, &intval, errbuf) != 0) 413185029Spjd goto error; 414185029Spjd 415185029Spjd /* 416185029Spjd * Perform additional checking for specific properties. 417185029Spjd */ 418185029Spjd switch (prop) { 419185029Spjd case ZPOOL_PROP_VERSION: 420185029Spjd if (intval < version || intval > SPA_VERSION) { 421185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 422185029Spjd "property '%s' number %d is invalid."), 423185029Spjd propname, intval); 424185029Spjd (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 425185029Spjd goto error; 426185029Spjd } 427185029Spjd break; 428185029Spjd 429185029Spjd case ZPOOL_PROP_BOOTFS: 430185029Spjd if (create_or_import) { 431185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 432185029Spjd "property '%s' cannot be set at creation " 433185029Spjd "or import time"), propname); 434185029Spjd (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 435185029Spjd goto error; 436185029Spjd } 437185029Spjd 438185029Spjd if (version < SPA_VERSION_BOOTFS) { 439185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 440185029Spjd "pool must be upgraded to support " 441185029Spjd "'%s' property"), propname); 442185029Spjd (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 443185029Spjd goto error; 444185029Spjd } 445185029Spjd 446185029Spjd /* 447185029Spjd * bootfs property value has to be a dataset name and 448185029Spjd * the dataset has to be in the same pool as it sets to. 449185029Spjd */ 450185029Spjd if (strval[0] != '\0' && !bootfs_name_valid(poolname, 451185029Spjd strval)) { 452185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 453185029Spjd "is an invalid name"), strval); 454185029Spjd (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 455185029Spjd goto error; 456185029Spjd } 457185029Spjd 458185029Spjd if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 459185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 460185029Spjd "could not open pool '%s'"), poolname); 461185029Spjd (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 462185029Spjd goto error; 463185029Spjd } 464185029Spjd verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 465185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 466185029Spjd 467185029Spjd#if defined(sun) 468185029Spjd /* 469185029Spjd * bootfs property cannot be set on a disk which has 470185029Spjd * been EFI labeled. 471185029Spjd */ 472185029Spjd if (pool_uses_efi(nvroot)) { 473185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 474185029Spjd "property '%s' not supported on " 475185029Spjd "EFI labeled devices"), propname); 476185029Spjd (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 477185029Spjd zpool_close(zhp); 478185029Spjd goto error; 479185029Spjd } 480185029Spjd#endif 481185029Spjd zpool_close(zhp); 482185029Spjd break; 483185029Spjd 484185029Spjd case ZPOOL_PROP_ALTROOT: 485185029Spjd if (!create_or_import) { 486185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 487185029Spjd "property '%s' can only be set during pool " 488185029Spjd "creation or import"), propname); 489185029Spjd (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 490185029Spjd goto error; 491185029Spjd } 492185029Spjd 493185029Spjd if (strval[0] != '/') { 494185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 495185029Spjd "bad alternate root '%s'"), strval); 496185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 497185029Spjd goto error; 498185029Spjd } 499185029Spjd break; 500185029Spjd 501185029Spjd case ZPOOL_PROP_CACHEFILE: 502185029Spjd if (strval[0] == '\0') 503185029Spjd break; 504185029Spjd 505185029Spjd if (strcmp(strval, "none") == 0) 506185029Spjd break; 507185029Spjd 508185029Spjd if (strval[0] != '/') { 509185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 510185029Spjd "property '%s' must be empty, an " 511185029Spjd "absolute path, or 'none'"), propname); 512185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 513185029Spjd goto error; 514185029Spjd } 515185029Spjd 516185029Spjd slash = strrchr(strval, '/'); 517185029Spjd 518185029Spjd if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 519185029Spjd strcmp(slash, "/..") == 0) { 520185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 521185029Spjd "'%s' is not a valid file"), strval); 522185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 523185029Spjd goto error; 524185029Spjd } 525185029Spjd 526185029Spjd *slash = '\0'; 527185029Spjd 528185029Spjd if (strval[0] != '\0' && 529185029Spjd (stat64(strval, &statbuf) != 0 || 530185029Spjd !S_ISDIR(statbuf.st_mode))) { 531185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 532185029Spjd "'%s' is not a valid directory"), 533185029Spjd strval); 534185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 535185029Spjd goto error; 536185029Spjd } 537185029Spjd 538185029Spjd *slash = '/'; 539185029Spjd break; 540185029Spjd } 541185029Spjd } 542185029Spjd 543185029Spjd return (retprops); 544185029Spjderror: 545185029Spjd nvlist_free(retprops); 546185029Spjd return (NULL); 547185029Spjd} 548185029Spjd 549185029Spjd/* 550185029Spjd * Set zpool property : propname=propval. 551185029Spjd */ 552185029Spjdint 553185029Spjdzpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 554185029Spjd{ 555185029Spjd zfs_cmd_t zc = { 0 }; 556185029Spjd int ret = -1; 557185029Spjd char errbuf[1024]; 558185029Spjd nvlist_t *nvl = NULL; 559185029Spjd nvlist_t *realprops; 560185029Spjd uint64_t version; 561185029Spjd 562185029Spjd (void) snprintf(errbuf, sizeof (errbuf), 563185029Spjd dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 564185029Spjd zhp->zpool_name); 565185029Spjd 566185029Spjd if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 567185029Spjd return (no_memory(zhp->zpool_hdl)); 568185029Spjd 569185029Spjd if (nvlist_add_string(nvl, propname, propval) != 0) { 570185029Spjd nvlist_free(nvl); 571185029Spjd return (no_memory(zhp->zpool_hdl)); 572185029Spjd } 573185029Spjd 574185029Spjd version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 575185029Spjd if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 576185029Spjd zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) { 577185029Spjd nvlist_free(nvl); 578185029Spjd return (-1); 579185029Spjd } 580185029Spjd 581185029Spjd nvlist_free(nvl); 582185029Spjd nvl = realprops; 583185029Spjd 584185029Spjd /* 585185029Spjd * Execute the corresponding ioctl() to set this property. 586185029Spjd */ 587185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 588185029Spjd 589185029Spjd if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 590185029Spjd nvlist_free(nvl); 591185029Spjd return (-1); 592185029Spjd } 593185029Spjd 594185029Spjd ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 595185029Spjd 596185029Spjd zcmd_free_nvlists(&zc); 597185029Spjd nvlist_free(nvl); 598185029Spjd 599185029Spjd if (ret) 600185029Spjd (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 601185029Spjd else 602185029Spjd (void) zpool_props_refresh(zhp); 603185029Spjd 604185029Spjd return (ret); 605185029Spjd} 606185029Spjd 607185029Spjdint 608185029Spjdzpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 609185029Spjd{ 610185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 611185029Spjd zprop_list_t *entry; 612185029Spjd char buf[ZFS_MAXPROPLEN]; 613185029Spjd 614185029Spjd if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 615185029Spjd return (-1); 616185029Spjd 617185029Spjd for (entry = *plp; entry != NULL; entry = entry->pl_next) { 618185029Spjd 619185029Spjd if (entry->pl_fixed) 620185029Spjd continue; 621185029Spjd 622185029Spjd if (entry->pl_prop != ZPROP_INVAL && 623185029Spjd zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 624185029Spjd NULL) == 0) { 625185029Spjd if (strlen(buf) > entry->pl_width) 626185029Spjd entry->pl_width = strlen(buf); 627185029Spjd } 628185029Spjd } 629185029Spjd 630185029Spjd return (0); 631185029Spjd} 632185029Spjd 633185029Spjd 634185029Spjd/* 635168404Spjd * Validate the given pool name, optionally putting an extended error message in 636168404Spjd * 'buf'. 637168404Spjd */ 638185029Spjdboolean_t 639168404Spjdzpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 640168404Spjd{ 641168404Spjd namecheck_err_t why; 642168404Spjd char what; 643168404Spjd int ret; 644168404Spjd 645168404Spjd ret = pool_namecheck(pool, &why, &what); 646168404Spjd 647168404Spjd /* 648168404Spjd * The rules for reserved pool names were extended at a later point. 649168404Spjd * But we need to support users with existing pools that may now be 650168404Spjd * invalid. So we only check for this expanded set of names during a 651168404Spjd * create (or import), and only in userland. 652168404Spjd */ 653168404Spjd if (ret == 0 && !isopen && 654168404Spjd (strncmp(pool, "mirror", 6) == 0 || 655168404Spjd strncmp(pool, "raidz", 5) == 0 || 656185029Spjd strncmp(pool, "spare", 5) == 0 || 657185029Spjd strcmp(pool, "log") == 0)) { 658185029Spjd if (hdl != NULL) 659185029Spjd zfs_error_aux(hdl, 660185029Spjd dgettext(TEXT_DOMAIN, "name is reserved")); 661168404Spjd return (B_FALSE); 662168404Spjd } 663168404Spjd 664168404Spjd 665168404Spjd if (ret != 0) { 666168404Spjd if (hdl != NULL) { 667168404Spjd switch (why) { 668168404Spjd case NAME_ERR_TOOLONG: 669168404Spjd zfs_error_aux(hdl, 670168404Spjd dgettext(TEXT_DOMAIN, "name is too long")); 671168404Spjd break; 672168404Spjd 673168404Spjd case NAME_ERR_INVALCHAR: 674168404Spjd zfs_error_aux(hdl, 675168404Spjd dgettext(TEXT_DOMAIN, "invalid character " 676168404Spjd "'%c' in pool name"), what); 677168404Spjd break; 678168404Spjd 679168404Spjd case NAME_ERR_NOLETTER: 680168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 681168404Spjd "name must begin with a letter")); 682168404Spjd break; 683168404Spjd 684168404Spjd case NAME_ERR_RESERVED: 685168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 686168404Spjd "name is reserved")); 687168404Spjd break; 688168404Spjd 689168404Spjd case NAME_ERR_DISKLIKE: 690168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 691168404Spjd "pool name is reserved")); 692168404Spjd break; 693168404Spjd 694168404Spjd case NAME_ERR_LEADING_SLASH: 695168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 696168404Spjd "leading slash in name")); 697168404Spjd break; 698168404Spjd 699168404Spjd case NAME_ERR_EMPTY_COMPONENT: 700168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 701168404Spjd "empty component in name")); 702168404Spjd break; 703168404Spjd 704168404Spjd case NAME_ERR_TRAILING_SLASH: 705168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 706168404Spjd "trailing slash in name")); 707168404Spjd break; 708168404Spjd 709168404Spjd case NAME_ERR_MULTIPLE_AT: 710168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 711168404Spjd "multiple '@' delimiters in name")); 712168404Spjd break; 713168404Spjd 714168404Spjd } 715168404Spjd } 716168404Spjd return (B_FALSE); 717168404Spjd } 718168404Spjd 719168404Spjd return (B_TRUE); 720168404Spjd} 721168404Spjd 722168404Spjd/* 723168404Spjd * Open a handle to the given pool, even if the pool is currently in the FAULTED 724168404Spjd * state. 725168404Spjd */ 726168404Spjdzpool_handle_t * 727168404Spjdzpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 728168404Spjd{ 729168404Spjd zpool_handle_t *zhp; 730168404Spjd boolean_t missing; 731168404Spjd 732168404Spjd /* 733168404Spjd * Make sure the pool name is valid. 734168404Spjd */ 735168404Spjd if (!zpool_name_valid(hdl, B_TRUE, pool)) { 736168404Spjd (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 737168404Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), 738168404Spjd pool); 739168404Spjd return (NULL); 740168404Spjd } 741168404Spjd 742168404Spjd if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 743168404Spjd return (NULL); 744168404Spjd 745168404Spjd zhp->zpool_hdl = hdl; 746168404Spjd (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 747168404Spjd 748168404Spjd if (zpool_refresh_stats(zhp, &missing) != 0) { 749168404Spjd zpool_close(zhp); 750168404Spjd return (NULL); 751168404Spjd } 752168404Spjd 753168404Spjd if (missing) { 754185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 755168404Spjd (void) zfs_error_fmt(hdl, EZFS_NOENT, 756185029Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 757168404Spjd zpool_close(zhp); 758168404Spjd return (NULL); 759168404Spjd } 760168404Spjd 761168404Spjd return (zhp); 762168404Spjd} 763168404Spjd 764168404Spjd/* 765168404Spjd * Like the above, but silent on error. Used when iterating over pools (because 766168404Spjd * the configuration cache may be out of date). 767168404Spjd */ 768168404Spjdint 769168404Spjdzpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 770168404Spjd{ 771168404Spjd zpool_handle_t *zhp; 772168404Spjd boolean_t missing; 773168404Spjd 774168404Spjd if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 775168404Spjd return (-1); 776168404Spjd 777168404Spjd zhp->zpool_hdl = hdl; 778168404Spjd (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 779168404Spjd 780168404Spjd if (zpool_refresh_stats(zhp, &missing) != 0) { 781168404Spjd zpool_close(zhp); 782168404Spjd return (-1); 783168404Spjd } 784168404Spjd 785168404Spjd if (missing) { 786168404Spjd zpool_close(zhp); 787168404Spjd *ret = NULL; 788168404Spjd return (0); 789168404Spjd } 790168404Spjd 791168404Spjd *ret = zhp; 792168404Spjd return (0); 793168404Spjd} 794168404Spjd 795168404Spjd/* 796168404Spjd * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 797168404Spjd * state. 798168404Spjd */ 799168404Spjdzpool_handle_t * 800168404Spjdzpool_open(libzfs_handle_t *hdl, const char *pool) 801168404Spjd{ 802168404Spjd zpool_handle_t *zhp; 803168404Spjd 804168404Spjd if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 805168404Spjd return (NULL); 806168404Spjd 807168404Spjd if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 808168404Spjd (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 809168404Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 810168404Spjd zpool_close(zhp); 811168404Spjd return (NULL); 812168404Spjd } 813168404Spjd 814168404Spjd return (zhp); 815168404Spjd} 816168404Spjd 817168404Spjd/* 818168404Spjd * Close the handle. Simply frees the memory associated with the handle. 819168404Spjd */ 820168404Spjdvoid 821168404Spjdzpool_close(zpool_handle_t *zhp) 822168404Spjd{ 823168404Spjd if (zhp->zpool_config) 824168404Spjd nvlist_free(zhp->zpool_config); 825168404Spjd if (zhp->zpool_old_config) 826168404Spjd nvlist_free(zhp->zpool_old_config); 827168404Spjd if (zhp->zpool_props) 828168404Spjd nvlist_free(zhp->zpool_props); 829168404Spjd free(zhp); 830168404Spjd} 831168404Spjd 832168404Spjd/* 833168404Spjd * Return the name of the pool. 834168404Spjd */ 835168404Spjdconst char * 836168404Spjdzpool_get_name(zpool_handle_t *zhp) 837168404Spjd{ 838168404Spjd return (zhp->zpool_name); 839168404Spjd} 840168404Spjd 841168404Spjd 842168404Spjd/* 843168404Spjd * Return the state of the pool (ACTIVE or UNAVAILABLE) 844168404Spjd */ 845168404Spjdint 846168404Spjdzpool_get_state(zpool_handle_t *zhp) 847168404Spjd{ 848168404Spjd return (zhp->zpool_state); 849168404Spjd} 850168404Spjd 851168404Spjd/* 852168404Spjd * Create the named pool, using the provided vdev list. It is assumed 853168404Spjd * that the consumer has already validated the contents of the nvlist, so we 854168404Spjd * don't have to worry about error semantics. 855168404Spjd */ 856168404Spjdint 857168404Spjdzpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 858185029Spjd nvlist_t *props, nvlist_t *fsprops) 859168404Spjd{ 860168404Spjd zfs_cmd_t zc = { 0 }; 861185029Spjd nvlist_t *zc_fsprops = NULL; 862185029Spjd nvlist_t *zc_props = NULL; 863168404Spjd char msg[1024]; 864185029Spjd char *altroot; 865185029Spjd int ret = -1; 866168404Spjd 867168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 868168404Spjd "cannot create '%s'"), pool); 869168404Spjd 870168404Spjd if (!zpool_name_valid(hdl, B_FALSE, pool)) 871168404Spjd return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 872168404Spjd 873185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 874168404Spjd return (-1); 875168404Spjd 876185029Spjd if (props) { 877185029Spjd if ((zc_props = zpool_valid_proplist(hdl, pool, props, 878185029Spjd SPA_VERSION_1, B_TRUE, msg)) == NULL) { 879185029Spjd goto create_failed; 880185029Spjd } 881185029Spjd } 882185029Spjd 883185029Spjd if (fsprops) { 884185029Spjd uint64_t zoned; 885185029Spjd char *zonestr; 886185029Spjd 887185029Spjd zoned = ((nvlist_lookup_string(fsprops, 888185029Spjd zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 889185029Spjd strcmp(zonestr, "on") == 0); 890185029Spjd 891185029Spjd if ((zc_fsprops = zfs_valid_proplist(hdl, 892185029Spjd ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 893185029Spjd goto create_failed; 894185029Spjd } 895185029Spjd if (!zc_props && 896185029Spjd (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 897185029Spjd goto create_failed; 898185029Spjd } 899185029Spjd if (nvlist_add_nvlist(zc_props, 900185029Spjd ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 901185029Spjd goto create_failed; 902185029Spjd } 903185029Spjd } 904185029Spjd 905185029Spjd if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 906185029Spjd goto create_failed; 907185029Spjd 908168404Spjd (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 909168404Spjd 910185029Spjd if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 911168404Spjd 912168404Spjd zcmd_free_nvlists(&zc); 913185029Spjd nvlist_free(zc_props); 914185029Spjd nvlist_free(zc_fsprops); 915168404Spjd 916168404Spjd switch (errno) { 917168404Spjd case EBUSY: 918168404Spjd /* 919168404Spjd * This can happen if the user has specified the same 920168404Spjd * device multiple times. We can't reliably detect this 921168404Spjd * until we try to add it and see we already have a 922168404Spjd * label. 923168404Spjd */ 924168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 925168404Spjd "one or more vdevs refer to the same device")); 926168404Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 927168404Spjd 928168404Spjd case EOVERFLOW: 929168404Spjd /* 930168404Spjd * This occurs when one of the devices is below 931168404Spjd * SPA_MINDEVSIZE. Unfortunately, we can't detect which 932168404Spjd * device was the problem device since there's no 933168404Spjd * reliable way to determine device size from userland. 934168404Spjd */ 935168404Spjd { 936168404Spjd char buf[64]; 937168404Spjd 938168404Spjd zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 939168404Spjd 940168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 941168404Spjd "one or more devices is less than the " 942168404Spjd "minimum size (%s)"), buf); 943168404Spjd } 944168404Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 945168404Spjd 946168404Spjd case ENOSPC: 947168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 948168404Spjd "one or more devices is out of space")); 949168404Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 950168404Spjd 951185029Spjd case ENOTBLK: 952185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 953185029Spjd "cache device must be a disk or disk slice")); 954185029Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 955185029Spjd 956168404Spjd default: 957168404Spjd return (zpool_standard_error(hdl, errno, msg)); 958168404Spjd } 959168404Spjd } 960168404Spjd 961168404Spjd /* 962168404Spjd * If this is an alternate root pool, then we automatically set the 963168404Spjd * mountpoint of the root dataset to be '/'. 964168404Spjd */ 965185029Spjd if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 966185029Spjd &altroot) == 0) { 967168404Spjd zfs_handle_t *zhp; 968168404Spjd 969185029Spjd verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 970168404Spjd verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 971168404Spjd "/") == 0); 972168404Spjd 973168404Spjd zfs_close(zhp); 974168404Spjd } 975168404Spjd 976185029Spjdcreate_failed: 977185029Spjd zcmd_free_nvlists(&zc); 978185029Spjd nvlist_free(zc_props); 979185029Spjd nvlist_free(zc_fsprops); 980185029Spjd return (ret); 981168404Spjd} 982168404Spjd 983168404Spjd/* 984168404Spjd * Destroy the given pool. It is up to the caller to ensure that there are no 985168404Spjd * datasets left in the pool. 986168404Spjd */ 987168404Spjdint 988168404Spjdzpool_destroy(zpool_handle_t *zhp) 989168404Spjd{ 990168404Spjd zfs_cmd_t zc = { 0 }; 991168404Spjd zfs_handle_t *zfp = NULL; 992168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 993168404Spjd char msg[1024]; 994168404Spjd 995168404Spjd if (zhp->zpool_state == POOL_STATE_ACTIVE && 996168404Spjd (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, 997168404Spjd ZFS_TYPE_FILESYSTEM)) == NULL) 998168404Spjd return (-1); 999168404Spjd 1000168404Spjd if (zpool_remove_zvol_links(zhp) != 0) 1001168404Spjd return (-1); 1002168404Spjd 1003168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1004168404Spjd 1005185029Spjd if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1006168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1007168404Spjd "cannot destroy '%s'"), zhp->zpool_name); 1008168404Spjd 1009168404Spjd if (errno == EROFS) { 1010168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1011168404Spjd "one or more devices is read only")); 1012168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1013168404Spjd } else { 1014168404Spjd (void) zpool_standard_error(hdl, errno, msg); 1015168404Spjd } 1016168404Spjd 1017168404Spjd if (zfp) 1018168404Spjd zfs_close(zfp); 1019168404Spjd return (-1); 1020168404Spjd } 1021168404Spjd 1022168404Spjd if (zfp) { 1023168404Spjd remove_mountpoint(zfp); 1024168404Spjd zfs_close(zfp); 1025168404Spjd } 1026168404Spjd 1027168404Spjd return (0); 1028168404Spjd} 1029168404Spjd 1030168404Spjd/* 1031168404Spjd * Add the given vdevs to the pool. The caller must have already performed the 1032168404Spjd * necessary verification to ensure that the vdev specification is well-formed. 1033168404Spjd */ 1034168404Spjdint 1035168404Spjdzpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1036168404Spjd{ 1037168404Spjd zfs_cmd_t zc = { 0 }; 1038168404Spjd int ret; 1039168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1040168404Spjd char msg[1024]; 1041185029Spjd nvlist_t **spares, **l2cache; 1042185029Spjd uint_t nspares, nl2cache; 1043168404Spjd 1044168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1045168404Spjd "cannot add to '%s'"), zhp->zpool_name); 1046168404Spjd 1047185029Spjd if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1048185029Spjd SPA_VERSION_SPARES && 1049168404Spjd nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1050168404Spjd &spares, &nspares) == 0) { 1051168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1052168404Spjd "upgraded to add hot spares")); 1053168404Spjd return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1054168404Spjd } 1055168404Spjd 1056209962Smm if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1057209962Smm ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1058209962Smm uint64_t s; 1059209962Smm 1060209962Smm for (s = 0; s < nspares; s++) { 1061209962Smm char *path; 1062209962Smm 1063209962Smm if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1064209962Smm &path) == 0 && pool_uses_efi(spares[s])) { 1065209962Smm zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1066209962Smm "device '%s' contains an EFI label and " 1067209962Smm "cannot be used on root pools."), 1068209962Smm zpool_vdev_name(hdl, NULL, spares[s])); 1069209962Smm return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1070209962Smm } 1071209962Smm } 1072209962Smm } 1073209962Smm 1074185029Spjd if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1075185029Spjd SPA_VERSION_L2CACHE && 1076185029Spjd nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1077185029Spjd &l2cache, &nl2cache) == 0) { 1078185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1079185029Spjd "upgraded to add cache devices")); 1080185029Spjd return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1081185029Spjd } 1082185029Spjd 1083185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1084168404Spjd return (-1); 1085168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1086168404Spjd 1087185029Spjd if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1088168404Spjd switch (errno) { 1089168404Spjd case EBUSY: 1090168404Spjd /* 1091168404Spjd * This can happen if the user has specified the same 1092168404Spjd * device multiple times. We can't reliably detect this 1093168404Spjd * until we try to add it and see we already have a 1094168404Spjd * label. 1095168404Spjd */ 1096168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1097168404Spjd "one or more vdevs refer to the same device")); 1098168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1099168404Spjd break; 1100168404Spjd 1101168404Spjd case EOVERFLOW: 1102168404Spjd /* 1103168404Spjd * This occurrs when one of the devices is below 1104168404Spjd * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1105168404Spjd * device was the problem device since there's no 1106168404Spjd * reliable way to determine device size from userland. 1107168404Spjd */ 1108168404Spjd { 1109168404Spjd char buf[64]; 1110168404Spjd 1111168404Spjd zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1112168404Spjd 1113168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1114168404Spjd "device is less than the minimum " 1115168404Spjd "size (%s)"), buf); 1116168404Spjd } 1117168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1118168404Spjd break; 1119168404Spjd 1120168404Spjd case ENOTSUP: 1121168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1122185029Spjd "pool must be upgraded to add these vdevs")); 1123168404Spjd (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1124168404Spjd break; 1125168404Spjd 1126168404Spjd case EDOM: 1127168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1128185029Spjd "root pool can not have multiple vdevs" 1129185029Spjd " or separate logs")); 1130168404Spjd (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1131168404Spjd break; 1132168404Spjd 1133185029Spjd case ENOTBLK: 1134185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1135185029Spjd "cache device must be a disk or disk slice")); 1136185029Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1137185029Spjd break; 1138185029Spjd 1139168404Spjd default: 1140168404Spjd (void) zpool_standard_error(hdl, errno, msg); 1141168404Spjd } 1142168404Spjd 1143168404Spjd ret = -1; 1144168404Spjd } else { 1145168404Spjd ret = 0; 1146168404Spjd } 1147168404Spjd 1148168404Spjd zcmd_free_nvlists(&zc); 1149168404Spjd 1150168404Spjd return (ret); 1151168404Spjd} 1152168404Spjd 1153168404Spjd/* 1154168404Spjd * Exports the pool from the system. The caller must ensure that there are no 1155168404Spjd * mounted datasets in the pool. 1156168404Spjd */ 1157168404Spjdint 1158207670Smmzpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1159168404Spjd{ 1160168404Spjd zfs_cmd_t zc = { 0 }; 1161185029Spjd char msg[1024]; 1162168404Spjd 1163168404Spjd if (zpool_remove_zvol_links(zhp) != 0) 1164168404Spjd return (-1); 1165168404Spjd 1166185029Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1167185029Spjd "cannot export '%s'"), zhp->zpool_name); 1168185029Spjd 1169168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1170185029Spjd zc.zc_cookie = force; 1171207670Smm zc.zc_guid = hardforce; 1172168404Spjd 1173185029Spjd if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1174185029Spjd switch (errno) { 1175185029Spjd case EXDEV: 1176185029Spjd zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1177185029Spjd "use '-f' to override the following errors:\n" 1178185029Spjd "'%s' has an active shared spare which could be" 1179185029Spjd " used by other pools once '%s' is exported."), 1180185029Spjd zhp->zpool_name, zhp->zpool_name); 1181185029Spjd return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1182185029Spjd msg)); 1183185029Spjd default: 1184185029Spjd return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1185185029Spjd msg)); 1186185029Spjd } 1187185029Spjd } 1188185029Spjd 1189168404Spjd return (0); 1190168404Spjd} 1191168404Spjd 1192207670Smmint 1193207670Smmzpool_export(zpool_handle_t *zhp, boolean_t force) 1194207670Smm{ 1195207670Smm return (zpool_export_common(zhp, force, B_FALSE)); 1196207670Smm} 1197207670Smm 1198207670Smmint 1199207670Smmzpool_export_force(zpool_handle_t *zhp) 1200207670Smm{ 1201207670Smm return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1202207670Smm} 1203207670Smm 1204168404Spjd/* 1205185029Spjd * zpool_import() is a contracted interface. Should be kept the same 1206185029Spjd * if possible. 1207185029Spjd * 1208185029Spjd * Applications should use zpool_import_props() to import a pool with 1209185029Spjd * new properties value to be set. 1210168404Spjd */ 1211168404Spjdint 1212168404Spjdzpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1213185029Spjd char *altroot) 1214168404Spjd{ 1215185029Spjd nvlist_t *props = NULL; 1216185029Spjd int ret; 1217185029Spjd 1218185029Spjd if (altroot != NULL) { 1219185029Spjd if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1220185029Spjd return (zfs_error_fmt(hdl, EZFS_NOMEM, 1221185029Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1222185029Spjd newname)); 1223185029Spjd } 1224185029Spjd 1225185029Spjd if (nvlist_add_string(props, 1226209962Smm zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1227209962Smm nvlist_add_string(props, 1228209962Smm zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1229185029Spjd nvlist_free(props); 1230185029Spjd return (zfs_error_fmt(hdl, EZFS_NOMEM, 1231185029Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1232185029Spjd newname)); 1233185029Spjd } 1234185029Spjd } 1235185029Spjd 1236185029Spjd ret = zpool_import_props(hdl, config, newname, props, B_FALSE); 1237185029Spjd if (props) 1238185029Spjd nvlist_free(props); 1239185029Spjd return (ret); 1240185029Spjd} 1241185029Spjd 1242185029Spjd/* 1243185029Spjd * Import the given pool using the known configuration and a list of 1244185029Spjd * properties to be set. The configuration should have come from 1245185029Spjd * zpool_find_import(). The 'newname' parameters control whether the pool 1246185029Spjd * is imported with a different name. 1247185029Spjd */ 1248185029Spjdint 1249185029Spjdzpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1250185029Spjd nvlist_t *props, boolean_t importfaulted) 1251185029Spjd{ 1252168404Spjd zfs_cmd_t zc = { 0 }; 1253168404Spjd char *thename; 1254168404Spjd char *origname; 1255168404Spjd int ret; 1256185029Spjd char errbuf[1024]; 1257168404Spjd 1258168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1259168404Spjd &origname) == 0); 1260168404Spjd 1261185029Spjd (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1262185029Spjd "cannot import pool '%s'"), origname); 1263185029Spjd 1264168404Spjd if (newname != NULL) { 1265168404Spjd if (!zpool_name_valid(hdl, B_FALSE, newname)) 1266168404Spjd return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1267168404Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1268168404Spjd newname)); 1269168404Spjd thename = (char *)newname; 1270168404Spjd } else { 1271168404Spjd thename = origname; 1272168404Spjd } 1273168404Spjd 1274185029Spjd if (props) { 1275185029Spjd uint64_t version; 1276168404Spjd 1277185029Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1278185029Spjd &version) == 0); 1279185029Spjd 1280185029Spjd if ((props = zpool_valid_proplist(hdl, origname, 1281185029Spjd props, version, B_TRUE, errbuf)) == NULL) { 1282185029Spjd return (-1); 1283185029Spjd } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1284185029Spjd nvlist_free(props); 1285185029Spjd return (-1); 1286185029Spjd } 1287185029Spjd } 1288185029Spjd 1289168404Spjd (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1290168404Spjd 1291168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1292168404Spjd &zc.zc_guid) == 0); 1293168404Spjd 1294185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1295185029Spjd nvlist_free(props); 1296168404Spjd return (-1); 1297185029Spjd } 1298168404Spjd 1299185029Spjd zc.zc_cookie = (uint64_t)importfaulted; 1300168404Spjd ret = 0; 1301185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) { 1302168404Spjd char desc[1024]; 1303168404Spjd if (newname == NULL) 1304168404Spjd (void) snprintf(desc, sizeof (desc), 1305168404Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1306168404Spjd thename); 1307168404Spjd else 1308168404Spjd (void) snprintf(desc, sizeof (desc), 1309168404Spjd dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1310168404Spjd origname, thename); 1311168404Spjd 1312168404Spjd switch (errno) { 1313168404Spjd case ENOTSUP: 1314168404Spjd /* 1315168404Spjd * Unsupported version. 1316168404Spjd */ 1317168404Spjd (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1318168404Spjd break; 1319168404Spjd 1320168404Spjd case EINVAL: 1321168404Spjd (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1322168404Spjd break; 1323168404Spjd 1324168404Spjd default: 1325168404Spjd (void) zpool_standard_error(hdl, errno, desc); 1326168404Spjd } 1327168404Spjd 1328168404Spjd ret = -1; 1329168404Spjd } else { 1330168404Spjd zpool_handle_t *zhp; 1331185029Spjd 1332168404Spjd /* 1333168404Spjd * This should never fail, but play it safe anyway. 1334168404Spjd */ 1335168404Spjd if (zpool_open_silent(hdl, thename, &zhp) != 0) { 1336168404Spjd ret = -1; 1337168404Spjd } else if (zhp != NULL) { 1338168404Spjd ret = zpool_create_zvol_links(zhp); 1339168404Spjd zpool_close(zhp); 1340168404Spjd } 1341185029Spjd 1342168404Spjd } 1343168404Spjd 1344168404Spjd zcmd_free_nvlists(&zc); 1345185029Spjd nvlist_free(props); 1346185029Spjd 1347168404Spjd return (ret); 1348168404Spjd} 1349168404Spjd 1350168404Spjd/* 1351168404Spjd * Scrub the pool. 1352168404Spjd */ 1353168404Spjdint 1354168404Spjdzpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) 1355168404Spjd{ 1356168404Spjd zfs_cmd_t zc = { 0 }; 1357168404Spjd char msg[1024]; 1358168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1359168404Spjd 1360168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1361168404Spjd zc.zc_cookie = type; 1362168404Spjd 1363185029Spjd if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0) 1364168404Spjd return (0); 1365168404Spjd 1366168404Spjd (void) snprintf(msg, sizeof (msg), 1367168404Spjd dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1368168404Spjd 1369168404Spjd if (errno == EBUSY) 1370168404Spjd return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1371168404Spjd else 1372168404Spjd return (zpool_standard_error(hdl, errno, msg)); 1373168404Spjd} 1374168404Spjd 1375168404Spjd/* 1376168404Spjd * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1377168404Spjd * spare; but FALSE if its an INUSE spare. 1378168404Spjd */ 1379168404Spjdstatic nvlist_t * 1380168404Spjdvdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid, 1381185029Spjd boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1382168404Spjd{ 1383168404Spjd uint_t c, children; 1384168404Spjd nvlist_t **child; 1385168404Spjd uint64_t theguid, present; 1386168404Spjd char *path; 1387168404Spjd uint64_t wholedisk = 0; 1388168404Spjd nvlist_t *ret; 1389185029Spjd uint64_t is_log; 1390168404Spjd 1391168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); 1392168404Spjd 1393168404Spjd if (search == NULL && 1394168404Spjd nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { 1395168404Spjd /* 1396168404Spjd * If the device has never been present since import, the only 1397168404Spjd * reliable way to match the vdev is by GUID. 1398168404Spjd */ 1399168404Spjd if (theguid == guid) 1400168404Spjd return (nv); 1401168404Spjd } else if (search != NULL && 1402168404Spjd nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 1403168404Spjd (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1404168404Spjd &wholedisk); 1405168404Spjd if (wholedisk) { 1406168404Spjd /* 1407168404Spjd * For whole disks, the internal path has 's0', but the 1408168404Spjd * path passed in by the user doesn't. 1409168404Spjd */ 1410168404Spjd if (strlen(search) == strlen(path) - 2 && 1411168404Spjd strncmp(search, path, strlen(search)) == 0) 1412168404Spjd return (nv); 1413168404Spjd } else if (strcmp(search, path) == 0) { 1414168404Spjd return (nv); 1415168404Spjd } 1416168404Spjd } 1417168404Spjd 1418168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1419168404Spjd &child, &children) != 0) 1420168404Spjd return (NULL); 1421168404Spjd 1422185029Spjd for (c = 0; c < children; c++) { 1423168404Spjd if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1424185029Spjd avail_spare, l2cache, NULL)) != NULL) { 1425185029Spjd /* 1426185029Spjd * The 'is_log' value is only set for the toplevel 1427185029Spjd * vdev, not the leaf vdevs. So we always lookup the 1428185029Spjd * log device from the root of the vdev tree (where 1429185029Spjd * 'log' is non-NULL). 1430185029Spjd */ 1431185029Spjd if (log != NULL && 1432185029Spjd nvlist_lookup_uint64(child[c], 1433185029Spjd ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1434185029Spjd is_log) { 1435185029Spjd *log = B_TRUE; 1436185029Spjd } 1437168404Spjd return (ret); 1438185029Spjd } 1439185029Spjd } 1440168404Spjd 1441168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1442168404Spjd &child, &children) == 0) { 1443168404Spjd for (c = 0; c < children; c++) { 1444168404Spjd if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1445185029Spjd avail_spare, l2cache, NULL)) != NULL) { 1446168404Spjd *avail_spare = B_TRUE; 1447168404Spjd return (ret); 1448168404Spjd } 1449168404Spjd } 1450168404Spjd } 1451168404Spjd 1452185029Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1453185029Spjd &child, &children) == 0) { 1454185029Spjd for (c = 0; c < children; c++) { 1455185029Spjd if ((ret = vdev_to_nvlist_iter(child[c], search, guid, 1456185029Spjd avail_spare, l2cache, NULL)) != NULL) { 1457185029Spjd *l2cache = B_TRUE; 1458185029Spjd return (ret); 1459185029Spjd } 1460185029Spjd } 1461185029Spjd } 1462185029Spjd 1463168404Spjd return (NULL); 1464168404Spjd} 1465168404Spjd 1466168404Spjdnvlist_t * 1467185029Spjdzpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1468185029Spjd boolean_t *l2cache, boolean_t *log) 1469168404Spjd{ 1470168404Spjd char buf[MAXPATHLEN]; 1471168404Spjd const char *search; 1472168404Spjd char *end; 1473168404Spjd nvlist_t *nvroot; 1474168404Spjd uint64_t guid; 1475168404Spjd 1476168404Spjd guid = strtoull(path, &end, 10); 1477168404Spjd if (guid != 0 && *end == '\0') { 1478168404Spjd search = NULL; 1479168404Spjd } else if (path[0] != '/') { 1480168404Spjd (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 1481168404Spjd search = buf; 1482168404Spjd } else { 1483168404Spjd search = path; 1484168404Spjd } 1485168404Spjd 1486168404Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1487168404Spjd &nvroot) == 0); 1488168404Spjd 1489168404Spjd *avail_spare = B_FALSE; 1490185029Spjd *l2cache = B_FALSE; 1491185029Spjd if (log != NULL) 1492185029Spjd *log = B_FALSE; 1493185029Spjd return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare, 1494185029Spjd l2cache, log)); 1495168404Spjd} 1496168404Spjd 1497185029Spjdstatic int 1498185029Spjdvdev_online(nvlist_t *nv) 1499185029Spjd{ 1500185029Spjd uint64_t ival; 1501185029Spjd 1502185029Spjd if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 1503185029Spjd nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 1504185029Spjd nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 1505185029Spjd return (0); 1506185029Spjd 1507185029Spjd return (1); 1508185029Spjd} 1509185029Spjd 1510168404Spjd/* 1511185029Spjd * Get phys_path for a root pool 1512185029Spjd * Return 0 on success; non-zeron on failure. 1513168404Spjd */ 1514185029Spjdint 1515185029Spjdzpool_get_physpath(zpool_handle_t *zhp, char *physpath) 1516185029Spjd{ 1517185029Spjd nvlist_t *vdev_root; 1518185029Spjd nvlist_t **child; 1519185029Spjd uint_t count; 1520185029Spjd int i; 1521185029Spjd 1522185029Spjd /* 1523185029Spjd * Make sure this is a root pool, as phys_path doesn't mean 1524185029Spjd * anything to a non-root pool. 1525185029Spjd */ 1526209962Smm if (!pool_is_bootable(zhp)) 1527185029Spjd return (-1); 1528185029Spjd 1529185029Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, 1530185029Spjd ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0); 1531185029Spjd 1532185029Spjd if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 1533185029Spjd &child, &count) != 0) 1534185029Spjd return (-2); 1535185029Spjd 1536185029Spjd for (i = 0; i < count; i++) { 1537185029Spjd nvlist_t **child2; 1538185029Spjd uint_t count2; 1539185029Spjd char *type; 1540185029Spjd char *tmppath; 1541185029Spjd int j; 1542185029Spjd 1543185029Spjd if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type) 1544185029Spjd != 0) 1545185029Spjd return (-3); 1546185029Spjd 1547185029Spjd if (strcmp(type, VDEV_TYPE_DISK) == 0) { 1548185029Spjd if (!vdev_online(child[i])) 1549185029Spjd return (-8); 1550185029Spjd verify(nvlist_lookup_string(child[i], 1551185029Spjd ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0); 1552185029Spjd (void) strncpy(physpath, tmppath, strlen(tmppath)); 1553185029Spjd } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) { 1554185029Spjd if (nvlist_lookup_nvlist_array(child[i], 1555185029Spjd ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0) 1556185029Spjd return (-4); 1557185029Spjd 1558185029Spjd for (j = 0; j < count2; j++) { 1559185029Spjd if (!vdev_online(child2[j])) 1560185029Spjd return (-8); 1561185029Spjd if (nvlist_lookup_string(child2[j], 1562185029Spjd ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0) 1563185029Spjd return (-5); 1564185029Spjd 1565185029Spjd if ((strlen(physpath) + strlen(tmppath)) > 1566185029Spjd MAXNAMELEN) 1567185029Spjd return (-6); 1568185029Spjd 1569185029Spjd if (strlen(physpath) == 0) { 1570185029Spjd (void) strncpy(physpath, tmppath, 1571185029Spjd strlen(tmppath)); 1572185029Spjd } else { 1573185029Spjd (void) strcat(physpath, " "); 1574185029Spjd (void) strcat(physpath, tmppath); 1575185029Spjd } 1576185029Spjd } 1577185029Spjd } else { 1578185029Spjd return (-7); 1579185029Spjd } 1580185029Spjd } 1581185029Spjd 1582185029Spjd return (0); 1583185029Spjd} 1584185029Spjd 1585185029Spjd/* 1586185029Spjd * Returns TRUE if the given guid corresponds to the given type. 1587185029Spjd * This is used to check for hot spares (INUSE or not), and level 2 cache 1588185029Spjd * devices. 1589185029Spjd */ 1590168404Spjdstatic boolean_t 1591185029Spjdis_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type) 1592168404Spjd{ 1593185029Spjd uint64_t target_guid; 1594168404Spjd nvlist_t *nvroot; 1595185029Spjd nvlist_t **list; 1596185029Spjd uint_t count; 1597168404Spjd int i; 1598168404Spjd 1599168404Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1600168404Spjd &nvroot) == 0); 1601185029Spjd if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) { 1602185029Spjd for (i = 0; i < count; i++) { 1603185029Spjd verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID, 1604185029Spjd &target_guid) == 0); 1605185029Spjd if (guid == target_guid) 1606168404Spjd return (B_TRUE); 1607168404Spjd } 1608168404Spjd } 1609168404Spjd 1610168404Spjd return (B_FALSE); 1611168404Spjd} 1612168404Spjd 1613168404Spjd/* 1614185029Spjd * Bring the specified vdev online. The 'flags' parameter is a set of the 1615185029Spjd * ZFS_ONLINE_* flags. 1616168404Spjd */ 1617168404Spjdint 1618185029Spjdzpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 1619185029Spjd vdev_state_t *newstate) 1620168404Spjd{ 1621168404Spjd zfs_cmd_t zc = { 0 }; 1622168404Spjd char msg[1024]; 1623168404Spjd nvlist_t *tgt; 1624185029Spjd boolean_t avail_spare, l2cache; 1625168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1626168404Spjd 1627168404Spjd (void) snprintf(msg, sizeof (msg), 1628168404Spjd dgettext(TEXT_DOMAIN, "cannot online %s"), path); 1629168404Spjd 1630168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1631185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1632185029Spjd NULL)) == NULL) 1633168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1634168404Spjd 1635168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1636168404Spjd 1637185029Spjd if (avail_spare || 1638185029Spjd is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1639168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1640168404Spjd 1641185029Spjd zc.zc_cookie = VDEV_STATE_ONLINE; 1642185029Spjd zc.zc_obj = flags; 1643168404Spjd 1644185029Spjd if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) 1645185029Spjd return (zpool_standard_error(hdl, errno, msg)); 1646185029Spjd 1647185029Spjd *newstate = zc.zc_cookie; 1648185029Spjd return (0); 1649168404Spjd} 1650168404Spjd 1651168404Spjd/* 1652168404Spjd * Take the specified vdev offline 1653168404Spjd */ 1654168404Spjdint 1655185029Spjdzpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 1656168404Spjd{ 1657168404Spjd zfs_cmd_t zc = { 0 }; 1658168404Spjd char msg[1024]; 1659168404Spjd nvlist_t *tgt; 1660185029Spjd boolean_t avail_spare, l2cache; 1661168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1662168404Spjd 1663168404Spjd (void) snprintf(msg, sizeof (msg), 1664168404Spjd dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 1665168404Spjd 1666168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1667185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1668185029Spjd NULL)) == NULL) 1669168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1670168404Spjd 1671168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1672168404Spjd 1673185029Spjd if (avail_spare || 1674185029Spjd is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) 1675168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1676168404Spjd 1677185029Spjd zc.zc_cookie = VDEV_STATE_OFFLINE; 1678185029Spjd zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 1679168404Spjd 1680185029Spjd if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1681168404Spjd return (0); 1682168404Spjd 1683168404Spjd switch (errno) { 1684168404Spjd case EBUSY: 1685168404Spjd 1686168404Spjd /* 1687168404Spjd * There are no other replicas of this device. 1688168404Spjd */ 1689168404Spjd return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1690168404Spjd 1691168404Spjd default: 1692168404Spjd return (zpool_standard_error(hdl, errno, msg)); 1693168404Spjd } 1694168404Spjd} 1695168404Spjd 1696168404Spjd/* 1697185029Spjd * Mark the given vdev faulted. 1698185029Spjd */ 1699185029Spjdint 1700185029Spjdzpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid) 1701185029Spjd{ 1702185029Spjd zfs_cmd_t zc = { 0 }; 1703185029Spjd char msg[1024]; 1704185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1705185029Spjd 1706185029Spjd (void) snprintf(msg, sizeof (msg), 1707185029Spjd dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 1708185029Spjd 1709185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1710185029Spjd zc.zc_guid = guid; 1711185029Spjd zc.zc_cookie = VDEV_STATE_FAULTED; 1712185029Spjd 1713185029Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1714185029Spjd return (0); 1715185029Spjd 1716185029Spjd switch (errno) { 1717185029Spjd case EBUSY: 1718185029Spjd 1719185029Spjd /* 1720185029Spjd * There are no other replicas of this device. 1721185029Spjd */ 1722185029Spjd return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 1723185029Spjd 1724185029Spjd default: 1725185029Spjd return (zpool_standard_error(hdl, errno, msg)); 1726185029Spjd } 1727185029Spjd 1728185029Spjd} 1729185029Spjd 1730185029Spjd/* 1731185029Spjd * Mark the given vdev degraded. 1732185029Spjd */ 1733185029Spjdint 1734185029Spjdzpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid) 1735185029Spjd{ 1736185029Spjd zfs_cmd_t zc = { 0 }; 1737185029Spjd char msg[1024]; 1738185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1739185029Spjd 1740185029Spjd (void) snprintf(msg, sizeof (msg), 1741185029Spjd dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 1742185029Spjd 1743185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1744185029Spjd zc.zc_guid = guid; 1745185029Spjd zc.zc_cookie = VDEV_STATE_DEGRADED; 1746185029Spjd 1747185029Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 1748185029Spjd return (0); 1749185029Spjd 1750185029Spjd return (zpool_standard_error(hdl, errno, msg)); 1751185029Spjd} 1752185029Spjd 1753185029Spjd/* 1754168404Spjd * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 1755168404Spjd * a hot spare. 1756168404Spjd */ 1757168404Spjdstatic boolean_t 1758168404Spjdis_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 1759168404Spjd{ 1760168404Spjd nvlist_t **child; 1761168404Spjd uint_t c, children; 1762168404Spjd char *type; 1763168404Spjd 1764168404Spjd if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 1765168404Spjd &children) == 0) { 1766168404Spjd verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 1767168404Spjd &type) == 0); 1768168404Spjd 1769168404Spjd if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 1770168404Spjd children == 2 && child[which] == tgt) 1771168404Spjd return (B_TRUE); 1772168404Spjd 1773168404Spjd for (c = 0; c < children; c++) 1774168404Spjd if (is_replacing_spare(child[c], tgt, which)) 1775168404Spjd return (B_TRUE); 1776168404Spjd } 1777168404Spjd 1778168404Spjd return (B_FALSE); 1779168404Spjd} 1780168404Spjd 1781168404Spjd/* 1782168404Spjd * Attach new_disk (fully described by nvroot) to old_disk. 1783185029Spjd * If 'replacing' is specified, the new disk will replace the old one. 1784168404Spjd */ 1785168404Spjdint 1786168404Spjdzpool_vdev_attach(zpool_handle_t *zhp, 1787168404Spjd const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 1788168404Spjd{ 1789168404Spjd zfs_cmd_t zc = { 0 }; 1790168404Spjd char msg[1024]; 1791168404Spjd int ret; 1792168404Spjd nvlist_t *tgt; 1793185029Spjd boolean_t avail_spare, l2cache, islog; 1794168404Spjd uint64_t val; 1795185029Spjd char *path, *newname; 1796168404Spjd nvlist_t **child; 1797168404Spjd uint_t children; 1798168404Spjd nvlist_t *config_root; 1799168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1800209962Smm boolean_t rootpool = pool_is_bootable(zhp); 1801168404Spjd 1802168404Spjd if (replacing) 1803168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1804168404Spjd "cannot replace %s with %s"), old_disk, new_disk); 1805168404Spjd else 1806168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1807168404Spjd "cannot attach %s to %s"), new_disk, old_disk); 1808168404Spjd 1809209962Smm /* 1810209962Smm * If this is a root pool, make sure that we're not attaching an 1811209962Smm * EFI labeled device. 1812209962Smm */ 1813209962Smm if (rootpool && pool_uses_efi(nvroot)) { 1814209962Smm zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1815209962Smm "EFI labeled devices are not supported on root pools.")); 1816209962Smm return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1817209962Smm } 1818209962Smm 1819168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1820185029Spjd if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 1821185029Spjd &islog)) == 0) 1822168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1823168404Spjd 1824168404Spjd if (avail_spare) 1825168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1826168404Spjd 1827185029Spjd if (l2cache) 1828185029Spjd return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 1829185029Spjd 1830168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1831168404Spjd zc.zc_cookie = replacing; 1832168404Spjd 1833168404Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1834168404Spjd &child, &children) != 0 || children != 1) { 1835168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1836168404Spjd "new device must be a single disk")); 1837168404Spjd return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 1838168404Spjd } 1839168404Spjd 1840168404Spjd verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 1841168404Spjd ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 1842168404Spjd 1843185029Spjd if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL) 1844185029Spjd return (-1); 1845185029Spjd 1846168404Spjd /* 1847168404Spjd * If the target is a hot spare that has been swapped in, we can only 1848168404Spjd * replace it with another hot spare. 1849168404Spjd */ 1850168404Spjd if (replacing && 1851168404Spjd nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 1852185029Spjd (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 1853185029Spjd NULL) == NULL || !avail_spare) && 1854185029Spjd is_replacing_spare(config_root, tgt, 1)) { 1855168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1856168404Spjd "can only be replaced by another hot spare")); 1857185029Spjd free(newname); 1858168404Spjd return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1859168404Spjd } 1860168404Spjd 1861168404Spjd /* 1862168404Spjd * If we are attempting to replace a spare, it canot be applied to an 1863168404Spjd * already spared device. 1864168404Spjd */ 1865168404Spjd if (replacing && 1866168404Spjd nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && 1867185029Spjd zpool_find_vdev(zhp, newname, &avail_spare, 1868185029Spjd &l2cache, NULL) != NULL && avail_spare && 1869168404Spjd is_replacing_spare(config_root, tgt, 0)) { 1870168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1871168404Spjd "device has already been replaced with a spare")); 1872185029Spjd free(newname); 1873168404Spjd return (zfs_error(hdl, EZFS_BADTARGET, msg)); 1874168404Spjd } 1875168404Spjd 1876185029Spjd free(newname); 1877185029Spjd 1878185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1879168404Spjd return (-1); 1880168404Spjd 1881185029Spjd ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc); 1882168404Spjd 1883168404Spjd zcmd_free_nvlists(&zc); 1884168404Spjd 1885209962Smm if (ret == 0) { 1886209962Smm if (rootpool) { 1887209962Smm /* 1888209962Smm * XXX - This should be removed once we can 1889209962Smm * automatically install the bootblocks on the 1890209962Smm * newly attached disk. 1891209962Smm */ 1892209962Smm (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please " 1893209962Smm "be sure to invoke %s to make '%s' bootable.\n"), 1894209962Smm BOOTCMD, new_disk); 1895209962Smm } 1896168404Spjd return (0); 1897209962Smm } 1898168404Spjd 1899168404Spjd switch (errno) { 1900168404Spjd case ENOTSUP: 1901168404Spjd /* 1902168404Spjd * Can't attach to or replace this type of vdev. 1903168404Spjd */ 1904185029Spjd if (replacing) { 1905185029Spjd if (islog) 1906185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1907185029Spjd "cannot replace a log with a spare")); 1908185029Spjd else 1909185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1910185029Spjd "cannot replace a replacing device")); 1911185029Spjd } else { 1912168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1913168404Spjd "can only attach to mirrors and top-level " 1914168404Spjd "disks")); 1915185029Spjd } 1916168404Spjd (void) zfs_error(hdl, EZFS_BADTARGET, msg); 1917168404Spjd break; 1918168404Spjd 1919168404Spjd case EINVAL: 1920168404Spjd /* 1921168404Spjd * The new device must be a single disk. 1922168404Spjd */ 1923168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1924168404Spjd "new device must be a single disk")); 1925168404Spjd (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 1926168404Spjd break; 1927168404Spjd 1928168404Spjd case EBUSY: 1929168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 1930168404Spjd new_disk); 1931168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1932168404Spjd break; 1933168404Spjd 1934168404Spjd case EOVERFLOW: 1935168404Spjd /* 1936168404Spjd * The new device is too small. 1937168404Spjd */ 1938168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1939168404Spjd "device is too small")); 1940168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1941168404Spjd break; 1942168404Spjd 1943168404Spjd case EDOM: 1944168404Spjd /* 1945168404Spjd * The new device has a different alignment requirement. 1946168404Spjd */ 1947168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1948168404Spjd "devices have different sector alignment")); 1949168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1950168404Spjd break; 1951168404Spjd 1952168404Spjd case ENAMETOOLONG: 1953168404Spjd /* 1954168404Spjd * The resulting top-level vdev spec won't fit in the label. 1955168404Spjd */ 1956168404Spjd (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 1957168404Spjd break; 1958168404Spjd 1959168404Spjd default: 1960168404Spjd (void) zpool_standard_error(hdl, errno, msg); 1961168404Spjd } 1962168404Spjd 1963168404Spjd return (-1); 1964168404Spjd} 1965168404Spjd 1966168404Spjd/* 1967168404Spjd * Detach the specified device. 1968168404Spjd */ 1969168404Spjdint 1970168404Spjdzpool_vdev_detach(zpool_handle_t *zhp, const char *path) 1971168404Spjd{ 1972168404Spjd zfs_cmd_t zc = { 0 }; 1973168404Spjd char msg[1024]; 1974168404Spjd nvlist_t *tgt; 1975185029Spjd boolean_t avail_spare, l2cache; 1976168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1977168404Spjd 1978168404Spjd (void) snprintf(msg, sizeof (msg), 1979168404Spjd dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 1980168404Spjd 1981168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1982185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 1983185029Spjd NULL)) == 0) 1984168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 1985168404Spjd 1986168404Spjd if (avail_spare) 1987168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 1988168404Spjd 1989185029Spjd if (l2cache) 1990185029Spjd return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 1991185029Spjd 1992168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 1993168404Spjd 1994185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 1995168404Spjd return (0); 1996168404Spjd 1997168404Spjd switch (errno) { 1998168404Spjd 1999168404Spjd case ENOTSUP: 2000168404Spjd /* 2001168404Spjd * Can't detach from this type of vdev. 2002168404Spjd */ 2003168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2004168404Spjd "applicable to mirror and replacing vdevs")); 2005168404Spjd (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); 2006168404Spjd break; 2007168404Spjd 2008168404Spjd case EBUSY: 2009168404Spjd /* 2010168404Spjd * There are no other replicas of this device. 2011168404Spjd */ 2012168404Spjd (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2013168404Spjd break; 2014168404Spjd 2015168404Spjd default: 2016168404Spjd (void) zpool_standard_error(hdl, errno, msg); 2017168404Spjd } 2018168404Spjd 2019168404Spjd return (-1); 2020168404Spjd} 2021168404Spjd 2022168404Spjd/* 2023185029Spjd * Remove the given device. Currently, this is supported only for hot spares 2024185029Spjd * and level 2 cache devices. 2025168404Spjd */ 2026168404Spjdint 2027168404Spjdzpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2028168404Spjd{ 2029168404Spjd zfs_cmd_t zc = { 0 }; 2030168404Spjd char msg[1024]; 2031168404Spjd nvlist_t *tgt; 2032185029Spjd boolean_t avail_spare, l2cache; 2033168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2034168404Spjd 2035168404Spjd (void) snprintf(msg, sizeof (msg), 2036168404Spjd dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2037168404Spjd 2038168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2039185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2040185029Spjd NULL)) == 0) 2041168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2042168404Spjd 2043185029Spjd if (!avail_spare && !l2cache) { 2044168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2045185029Spjd "only inactive hot spares or cache devices " 2046185029Spjd "can be removed")); 2047168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2048168404Spjd } 2049168404Spjd 2050168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2051168404Spjd 2052185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2053168404Spjd return (0); 2054168404Spjd 2055168404Spjd return (zpool_standard_error(hdl, errno, msg)); 2056168404Spjd} 2057168404Spjd 2058168404Spjd/* 2059168404Spjd * Clear the errors for the pool, or the particular device if specified. 2060168404Spjd */ 2061168404Spjdint 2062168404Spjdzpool_clear(zpool_handle_t *zhp, const char *path) 2063168404Spjd{ 2064168404Spjd zfs_cmd_t zc = { 0 }; 2065168404Spjd char msg[1024]; 2066168404Spjd nvlist_t *tgt; 2067185029Spjd boolean_t avail_spare, l2cache; 2068168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2069168404Spjd 2070168404Spjd if (path) 2071168404Spjd (void) snprintf(msg, sizeof (msg), 2072168404Spjd dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2073168404Spjd path); 2074168404Spjd else 2075168404Spjd (void) snprintf(msg, sizeof (msg), 2076168404Spjd dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2077168404Spjd zhp->zpool_name); 2078168404Spjd 2079168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2080168404Spjd if (path) { 2081185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2082185029Spjd &l2cache, NULL)) == 0) 2083168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2084168404Spjd 2085185029Spjd /* 2086185029Spjd * Don't allow error clearing for hot spares. Do allow 2087185029Spjd * error clearing for l2cache devices. 2088185029Spjd */ 2089168404Spjd if (avail_spare) 2090168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2091168404Spjd 2092168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2093168404Spjd &zc.zc_guid) == 0); 2094168404Spjd } 2095168404Spjd 2096185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) 2097185029Spjd return (0); 2098185029Spjd 2099185029Spjd return (zpool_standard_error(hdl, errno, msg)); 2100185029Spjd} 2101185029Spjd 2102185029Spjd/* 2103185029Spjd * Similar to zpool_clear(), but takes a GUID (used by fmd). 2104185029Spjd */ 2105185029Spjdint 2106185029Spjdzpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 2107185029Spjd{ 2108185029Spjd zfs_cmd_t zc = { 0 }; 2109185029Spjd char msg[1024]; 2110185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2111185029Spjd 2112185029Spjd (void) snprintf(msg, sizeof (msg), 2113185029Spjd dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 2114185029Spjd guid); 2115185029Spjd 2116185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2117185029Spjd zc.zc_guid = guid; 2118185029Spjd 2119168404Spjd if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 2120168404Spjd return (0); 2121168404Spjd 2122168404Spjd return (zpool_standard_error(hdl, errno, msg)); 2123168404Spjd} 2124168404Spjd 2125168404Spjd/* 2126168404Spjd * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool> 2127168404Spjd * hierarchy. 2128168404Spjd */ 2129168404Spjdint 2130168404Spjdzpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *), 2131168404Spjd void *data) 2132168404Spjd{ 2133168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2134168404Spjd char (*paths)[MAXPATHLEN]; 2135168404Spjd char path[MAXPATHLEN]; 2136168404Spjd size_t size = 4; 2137168404Spjd int curr, fd, base, ret = 0; 2138168404Spjd DIR *dirp; 2139168404Spjd struct dirent *dp; 2140168404Spjd struct stat st; 2141168404Spjd 2142168404Spjd if ((base = open(ZVOL_FULL_DEV_DIR, O_RDONLY)) < 0) 2143168404Spjd return (errno == ENOENT ? 0 : -1); 2144168404Spjd 2145168404Spjd snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR, 2146168404Spjd zhp->zpool_name); 2147168404Spjd if (stat(path, &st) != 0) { 2148168404Spjd int err = errno; 2149168404Spjd (void) close(base); 2150168404Spjd return (err == ENOENT ? 0 : -1); 2151168404Spjd } 2152168404Spjd 2153168404Spjd /* 2154168404Spjd * Oddly this wasn't a directory -- ignore that failure since we 2155168404Spjd * know there are no links lower in the (non-existant) hierarchy. 2156168404Spjd */ 2157168404Spjd if (!S_ISDIR(st.st_mode)) { 2158168404Spjd (void) close(base); 2159168404Spjd return (0); 2160168404Spjd } 2161168404Spjd 2162168404Spjd if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) { 2163168404Spjd (void) close(base); 2164168404Spjd return (-1); 2165168404Spjd } 2166168404Spjd 2167168404Spjd (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0])); 2168168404Spjd curr = 0; 2169168404Spjd 2170168404Spjd while (curr >= 0) { 2171168404Spjd snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR, 2172168404Spjd paths[curr]); 2173168404Spjd if (lstat(path, &st) != 0) 2174168404Spjd goto err; 2175168404Spjd 2176168404Spjd if (S_ISDIR(st.st_mode)) { 2177168404Spjd if ((dirp = opendir(path)) == NULL) { 2178168404Spjd goto err; 2179168404Spjd } 2180168404Spjd 2181168404Spjd while ((dp = readdir(dirp)) != NULL) { 2182168404Spjd if (dp->d_name[0] == '.') 2183168404Spjd continue; 2184168404Spjd 2185168404Spjd if (curr + 1 == size) { 2186168404Spjd paths = zfs_realloc(hdl, paths, 2187168404Spjd size * sizeof (paths[0]), 2188168404Spjd size * 2 * sizeof (paths[0])); 2189168404Spjd if (paths == NULL) { 2190168404Spjd (void) closedir(dirp); 2191168404Spjd goto err; 2192168404Spjd } 2193168404Spjd 2194168404Spjd size *= 2; 2195168404Spjd } 2196168404Spjd 2197168404Spjd (void) strlcpy(paths[curr + 1], paths[curr], 2198168404Spjd sizeof (paths[curr + 1])); 2199168404Spjd (void) strlcat(paths[curr], "/", 2200168404Spjd sizeof (paths[curr])); 2201168404Spjd (void) strlcat(paths[curr], dp->d_name, 2202168404Spjd sizeof (paths[curr])); 2203168404Spjd curr++; 2204168404Spjd } 2205168404Spjd 2206168404Spjd (void) closedir(dirp); 2207168404Spjd 2208168404Spjd } else { 2209168404Spjd if ((ret = cb(paths[curr], data)) != 0) 2210168404Spjd break; 2211168404Spjd } 2212168404Spjd 2213168404Spjd curr--; 2214168404Spjd } 2215168404Spjd 2216168404Spjd free(paths); 2217168404Spjd (void) close(base); 2218168404Spjd 2219168404Spjd return (ret); 2220168404Spjd 2221168404Spjderr: 2222168404Spjd free(paths); 2223168404Spjd (void) close(base); 2224168404Spjd return (-1); 2225168404Spjd} 2226168404Spjd 2227168404Spjdtypedef struct zvol_cb { 2228168404Spjd zpool_handle_t *zcb_pool; 2229168404Spjd boolean_t zcb_create; 2230168404Spjd} zvol_cb_t; 2231168404Spjd 2232168404Spjd/*ARGSUSED*/ 2233168404Spjdstatic int 2234168404Spjddo_zvol_create(zfs_handle_t *zhp, void *data) 2235168404Spjd{ 2236185029Spjd int ret = 0; 2237168404Spjd 2238185029Spjd if (ZFS_IS_VOLUME(zhp)) { 2239168404Spjd (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); 2240185029Spjd ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL); 2241185029Spjd } 2242168404Spjd 2243185029Spjd if (ret == 0) 2244185029Spjd ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL); 2245168404Spjd 2246168404Spjd zfs_close(zhp); 2247168404Spjd 2248168404Spjd return (ret); 2249168404Spjd} 2250168404Spjd 2251168404Spjd/* 2252168404Spjd * Iterate over all zvols in the pool and make any necessary minor nodes. 2253168404Spjd */ 2254168404Spjdint 2255168404Spjdzpool_create_zvol_links(zpool_handle_t *zhp) 2256168404Spjd{ 2257168404Spjd zfs_handle_t *zfp; 2258168404Spjd int ret; 2259168404Spjd 2260168404Spjd /* 2261168404Spjd * If the pool is unavailable, just return success. 2262168404Spjd */ 2263168404Spjd if ((zfp = make_dataset_handle(zhp->zpool_hdl, 2264168404Spjd zhp->zpool_name)) == NULL) 2265168404Spjd return (0); 2266168404Spjd 2267185029Spjd ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL); 2268168404Spjd 2269168404Spjd zfs_close(zfp); 2270168404Spjd return (ret); 2271168404Spjd} 2272168404Spjd 2273168404Spjdstatic int 2274168404Spjddo_zvol_remove(const char *dataset, void *data) 2275168404Spjd{ 2276168404Spjd zpool_handle_t *zhp = data; 2277168404Spjd 2278168404Spjd return (zvol_remove_link(zhp->zpool_hdl, dataset)); 2279168404Spjd} 2280168404Spjd 2281168404Spjd/* 2282168404Spjd * Iterate over all zvols in the pool and remove any minor nodes. We iterate 2283168404Spjd * by examining the /dev links so that a corrupted pool doesn't impede this 2284168404Spjd * operation. 2285168404Spjd */ 2286168404Spjdint 2287168404Spjdzpool_remove_zvol_links(zpool_handle_t *zhp) 2288168404Spjd{ 2289168404Spjd return (zpool_iter_zvol(zhp, do_zvol_remove, zhp)); 2290168404Spjd} 2291168404Spjd 2292168404Spjd/* 2293168404Spjd * Convert from a devid string to a path. 2294168404Spjd */ 2295168404Spjdstatic char * 2296168404Spjddevid_to_path(char *devid_str) 2297168404Spjd{ 2298168404Spjd ddi_devid_t devid; 2299168404Spjd char *minor; 2300168404Spjd char *path; 2301168404Spjd devid_nmlist_t *list = NULL; 2302168404Spjd int ret; 2303168404Spjd 2304168404Spjd if (devid_str_decode(devid_str, &devid, &minor) != 0) 2305168404Spjd return (NULL); 2306168404Spjd 2307168404Spjd ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 2308168404Spjd 2309168404Spjd devid_str_free(minor); 2310168404Spjd devid_free(devid); 2311168404Spjd 2312168404Spjd if (ret != 0) 2313168404Spjd return (NULL); 2314168404Spjd 2315168404Spjd if ((path = strdup(list[0].devname)) == NULL) 2316168404Spjd return (NULL); 2317168404Spjd 2318168404Spjd devid_free_nmlist(list); 2319168404Spjd 2320168404Spjd return (path); 2321168404Spjd} 2322168404Spjd 2323168404Spjd/* 2324168404Spjd * Convert from a path to a devid string. 2325168404Spjd */ 2326168404Spjdstatic char * 2327168404Spjdpath_to_devid(const char *path) 2328168404Spjd{ 2329168404Spjd int fd; 2330168404Spjd ddi_devid_t devid; 2331168404Spjd char *minor, *ret; 2332168404Spjd 2333168404Spjd if ((fd = open(path, O_RDONLY)) < 0) 2334168404Spjd return (NULL); 2335168404Spjd 2336168404Spjd minor = NULL; 2337168404Spjd ret = NULL; 2338168404Spjd if (devid_get(fd, &devid) == 0) { 2339168404Spjd if (devid_get_minor_name(fd, &minor) == 0) 2340168404Spjd ret = devid_str_encode(devid, minor); 2341168404Spjd if (minor != NULL) 2342168404Spjd devid_str_free(minor); 2343168404Spjd devid_free(devid); 2344168404Spjd } 2345168404Spjd (void) close(fd); 2346168404Spjd 2347168404Spjd return (ret); 2348168404Spjd} 2349168404Spjd 2350168404Spjd/* 2351168404Spjd * Issue the necessary ioctl() to update the stored path value for the vdev. We 2352168404Spjd * ignore any failure here, since a common case is for an unprivileged user to 2353168404Spjd * type 'zpool status', and we'll display the correct information anyway. 2354168404Spjd */ 2355168404Spjdstatic void 2356168404Spjdset_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 2357168404Spjd{ 2358168404Spjd zfs_cmd_t zc = { 0 }; 2359168404Spjd 2360168404Spjd (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2361168404Spjd (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 2362168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2363168404Spjd &zc.zc_guid) == 0); 2364168404Spjd 2365168404Spjd (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 2366168404Spjd} 2367168404Spjd 2368168404Spjd/* 2369168404Spjd * Given a vdev, return the name to display in iostat. If the vdev has a path, 2370168404Spjd * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 2371168404Spjd * We also check if this is a whole disk, in which case we strip off the 2372168404Spjd * trailing 's0' slice name. 2373168404Spjd * 2374168404Spjd * This routine is also responsible for identifying when disks have been 2375168404Spjd * reconfigured in a new location. The kernel will have opened the device by 2376168404Spjd * devid, but the path will still refer to the old location. To catch this, we 2377168404Spjd * first do a path -> devid translation (which is fast for the common case). If 2378168404Spjd * the devid matches, we're done. If not, we do a reverse devid -> path 2379168404Spjd * translation and issue the appropriate ioctl() to update the path of the vdev. 2380168404Spjd * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 2381168404Spjd * of these checks. 2382168404Spjd */ 2383168404Spjdchar * 2384168404Spjdzpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv) 2385168404Spjd{ 2386168404Spjd char *path, *devid; 2387168404Spjd uint64_t value; 2388168404Spjd char buf[64]; 2389185029Spjd vdev_stat_t *vs; 2390185029Spjd uint_t vsc; 2391168404Spjd 2392168404Spjd if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2393168404Spjd &value) == 0) { 2394168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2395168404Spjd &value) == 0); 2396168404Spjd (void) snprintf(buf, sizeof (buf), "%llu", 2397168404Spjd (u_longlong_t)value); 2398168404Spjd path = buf; 2399168404Spjd } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2400168404Spjd 2401185029Spjd /* 2402185029Spjd * If the device is dead (faulted, offline, etc) then don't 2403185029Spjd * bother opening it. Otherwise we may be forcing the user to 2404185029Spjd * open a misbehaving device, which can have undesirable 2405185029Spjd * effects. 2406185029Spjd */ 2407185029Spjd if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, 2408185029Spjd (uint64_t **)&vs, &vsc) != 0 || 2409185029Spjd vs->vs_state >= VDEV_STATE_DEGRADED) && 2410185029Spjd zhp != NULL && 2411168404Spjd nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 2412168404Spjd /* 2413168404Spjd * Determine if the current path is correct. 2414168404Spjd */ 2415168404Spjd char *newdevid = path_to_devid(path); 2416168404Spjd 2417168404Spjd if (newdevid == NULL || 2418168404Spjd strcmp(devid, newdevid) != 0) { 2419168404Spjd char *newpath; 2420168404Spjd 2421168404Spjd if ((newpath = devid_to_path(devid)) != NULL) { 2422168404Spjd /* 2423168404Spjd * Update the path appropriately. 2424168404Spjd */ 2425168404Spjd set_path(zhp, nv, newpath); 2426168404Spjd if (nvlist_add_string(nv, 2427168404Spjd ZPOOL_CONFIG_PATH, newpath) == 0) 2428168404Spjd verify(nvlist_lookup_string(nv, 2429168404Spjd ZPOOL_CONFIG_PATH, 2430168404Spjd &path) == 0); 2431168404Spjd free(newpath); 2432168404Spjd } 2433168404Spjd } 2434168404Spjd 2435168404Spjd if (newdevid) 2436168404Spjd devid_str_free(newdevid); 2437168404Spjd } 2438168404Spjd 2439168404Spjd if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 2440168404Spjd path += sizeof(_PATH_DEV) - 1; 2441168404Spjd 2442168404Spjd if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2443168404Spjd &value) == 0 && value) { 2444168404Spjd char *tmp = zfs_strdup(hdl, path); 2445168404Spjd if (tmp == NULL) 2446168404Spjd return (NULL); 2447168404Spjd tmp[strlen(path) - 2] = '\0'; 2448168404Spjd return (tmp); 2449168404Spjd } 2450168404Spjd } else { 2451168404Spjd verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 2452168404Spjd 2453168404Spjd /* 2454168404Spjd * If it's a raidz device, we need to stick in the parity level. 2455168404Spjd */ 2456168404Spjd if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 2457168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 2458168404Spjd &value) == 0); 2459168404Spjd (void) snprintf(buf, sizeof (buf), "%s%llu", path, 2460168404Spjd (u_longlong_t)value); 2461168404Spjd path = buf; 2462168404Spjd } 2463168404Spjd } 2464168404Spjd 2465168404Spjd return (zfs_strdup(hdl, path)); 2466168404Spjd} 2467168404Spjd 2468168404Spjdstatic int 2469168404Spjdzbookmark_compare(const void *a, const void *b) 2470168404Spjd{ 2471168404Spjd return (memcmp(a, b, sizeof (zbookmark_t))); 2472168404Spjd} 2473168404Spjd 2474168404Spjd/* 2475168404Spjd * Retrieve the persistent error log, uniquify the members, and return to the 2476168404Spjd * caller. 2477168404Spjd */ 2478168404Spjdint 2479168404Spjdzpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 2480168404Spjd{ 2481168404Spjd zfs_cmd_t zc = { 0 }; 2482168404Spjd uint64_t count; 2483168404Spjd zbookmark_t *zb = NULL; 2484168404Spjd int i; 2485168404Spjd 2486168404Spjd /* 2487168404Spjd * Retrieve the raw error list from the kernel. If the number of errors 2488168404Spjd * has increased, allocate more space and continue until we get the 2489168404Spjd * entire list. 2490168404Spjd */ 2491168404Spjd verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 2492168404Spjd &count) == 0); 2493185029Spjd if (count == 0) 2494185029Spjd return (0); 2495168404Spjd if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 2496168404Spjd count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 2497168404Spjd return (-1); 2498168404Spjd zc.zc_nvlist_dst_size = count; 2499168404Spjd (void) strcpy(zc.zc_name, zhp->zpool_name); 2500168404Spjd for (;;) { 2501168404Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 2502168404Spjd &zc) != 0) { 2503168404Spjd free((void *)(uintptr_t)zc.zc_nvlist_dst); 2504168404Spjd if (errno == ENOMEM) { 2505168404Spjd count = zc.zc_nvlist_dst_size; 2506168404Spjd if ((zc.zc_nvlist_dst = (uintptr_t) 2507168404Spjd zfs_alloc(zhp->zpool_hdl, count * 2508168404Spjd sizeof (zbookmark_t))) == (uintptr_t)NULL) 2509168404Spjd return (-1); 2510168404Spjd } else { 2511168404Spjd return (-1); 2512168404Spjd } 2513168404Spjd } else { 2514168404Spjd break; 2515168404Spjd } 2516168404Spjd } 2517168404Spjd 2518168404Spjd /* 2519168404Spjd * Sort the resulting bookmarks. This is a little confusing due to the 2520168404Spjd * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 2521168404Spjd * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 2522168404Spjd * _not_ copied as part of the process. So we point the start of our 2523168404Spjd * array appropriate and decrement the total number of elements. 2524168404Spjd */ 2525168404Spjd zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 2526168404Spjd zc.zc_nvlist_dst_size; 2527168404Spjd count -= zc.zc_nvlist_dst_size; 2528168404Spjd 2529168404Spjd qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 2530168404Spjd 2531168404Spjd verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 2532168404Spjd 2533168404Spjd /* 2534168404Spjd * Fill in the nverrlistp with nvlist's of dataset and object numbers. 2535168404Spjd */ 2536168404Spjd for (i = 0; i < count; i++) { 2537168404Spjd nvlist_t *nv; 2538168404Spjd 2539168404Spjd /* ignoring zb_blkid and zb_level for now */ 2540168404Spjd if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 2541168404Spjd zb[i-1].zb_object == zb[i].zb_object) 2542168404Spjd continue; 2543168404Spjd 2544168404Spjd if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 2545168404Spjd goto nomem; 2546168404Spjd if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 2547168404Spjd zb[i].zb_objset) != 0) { 2548168404Spjd nvlist_free(nv); 2549168404Spjd goto nomem; 2550168404Spjd } 2551168404Spjd if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 2552168404Spjd zb[i].zb_object) != 0) { 2553168404Spjd nvlist_free(nv); 2554168404Spjd goto nomem; 2555168404Spjd } 2556168404Spjd if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 2557168404Spjd nvlist_free(nv); 2558168404Spjd goto nomem; 2559168404Spjd } 2560168404Spjd nvlist_free(nv); 2561168404Spjd } 2562168404Spjd 2563168404Spjd free((void *)(uintptr_t)zc.zc_nvlist_dst); 2564168404Spjd return (0); 2565168404Spjd 2566168404Spjdnomem: 2567168404Spjd free((void *)(uintptr_t)zc.zc_nvlist_dst); 2568168404Spjd return (no_memory(zhp->zpool_hdl)); 2569168404Spjd} 2570168404Spjd 2571168404Spjd/* 2572168404Spjd * Upgrade a ZFS pool to the latest on-disk version. 2573168404Spjd */ 2574168404Spjdint 2575185029Spjdzpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 2576168404Spjd{ 2577168404Spjd zfs_cmd_t zc = { 0 }; 2578168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2579168404Spjd 2580168404Spjd (void) strcpy(zc.zc_name, zhp->zpool_name); 2581185029Spjd zc.zc_cookie = new_version; 2582185029Spjd 2583185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 2584168404Spjd return (zpool_standard_error_fmt(hdl, errno, 2585168404Spjd dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 2586168404Spjd zhp->zpool_name)); 2587168404Spjd return (0); 2588168404Spjd} 2589168404Spjd 2590168404Spjdvoid 2591185029Spjdzpool_set_history_str(const char *subcommand, int argc, char **argv, 2592185029Spjd char *history_str) 2593168404Spjd{ 2594168404Spjd int i; 2595168404Spjd 2596185029Spjd (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 2597185029Spjd for (i = 1; i < argc; i++) { 2598185029Spjd if (strlen(history_str) + 1 + strlen(argv[i]) > 2599185029Spjd HIS_MAX_RECORD_LEN) 2600168404Spjd break; 2601185029Spjd (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 2602185029Spjd (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 2603168404Spjd } 2604185029Spjd} 2605168404Spjd 2606185029Spjd/* 2607185029Spjd * Stage command history for logging. 2608185029Spjd */ 2609185029Spjdint 2610185029Spjdzpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 2611185029Spjd{ 2612185029Spjd if (history_str == NULL) 2613185029Spjd return (EINVAL); 2614168404Spjd 2615185029Spjd if (strlen(history_str) > HIS_MAX_RECORD_LEN) 2616185029Spjd return (EINVAL); 2617168404Spjd 2618185029Spjd if (hdl->libzfs_log_str != NULL) 2619185029Spjd free(hdl->libzfs_log_str); 2620168404Spjd 2621185029Spjd if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 2622185029Spjd return (no_memory(hdl)); 2623185029Spjd 2624185029Spjd return (0); 2625168404Spjd} 2626168404Spjd 2627168404Spjd/* 2628168404Spjd * Perform ioctl to get some command history of a pool. 2629168404Spjd * 2630168404Spjd * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 2631168404Spjd * logical offset of the history buffer to start reading from. 2632168404Spjd * 2633168404Spjd * Upon return, 'off' is the next logical offset to read from and 2634168404Spjd * 'len' is the actual amount of bytes read into 'buf'. 2635168404Spjd */ 2636168404Spjdstatic int 2637168404Spjdget_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 2638168404Spjd{ 2639168404Spjd zfs_cmd_t zc = { 0 }; 2640168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2641168404Spjd 2642168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2643168404Spjd 2644168404Spjd zc.zc_history = (uint64_t)(uintptr_t)buf; 2645168404Spjd zc.zc_history_len = *len; 2646168404Spjd zc.zc_history_offset = *off; 2647168404Spjd 2648168404Spjd if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 2649168404Spjd switch (errno) { 2650168404Spjd case EPERM: 2651168404Spjd return (zfs_error_fmt(hdl, EZFS_PERM, 2652168404Spjd dgettext(TEXT_DOMAIN, 2653168404Spjd "cannot show history for pool '%s'"), 2654168404Spjd zhp->zpool_name)); 2655168404Spjd case ENOENT: 2656168404Spjd return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 2657168404Spjd dgettext(TEXT_DOMAIN, "cannot get history for pool " 2658168404Spjd "'%s'"), zhp->zpool_name)); 2659168404Spjd case ENOTSUP: 2660168404Spjd return (zfs_error_fmt(hdl, EZFS_BADVERSION, 2661168404Spjd dgettext(TEXT_DOMAIN, "cannot get history for pool " 2662168404Spjd "'%s', pool must be upgraded"), zhp->zpool_name)); 2663168404Spjd default: 2664168404Spjd return (zpool_standard_error_fmt(hdl, errno, 2665168404Spjd dgettext(TEXT_DOMAIN, 2666168404Spjd "cannot get history for '%s'"), zhp->zpool_name)); 2667168404Spjd } 2668168404Spjd } 2669168404Spjd 2670168404Spjd *len = zc.zc_history_len; 2671168404Spjd *off = zc.zc_history_offset; 2672168404Spjd 2673168404Spjd return (0); 2674168404Spjd} 2675168404Spjd 2676168404Spjd/* 2677168404Spjd * Process the buffer of nvlists, unpacking and storing each nvlist record 2678168404Spjd * into 'records'. 'leftover' is set to the number of bytes that weren't 2679168404Spjd * processed as there wasn't a complete record. 2680168404Spjd */ 2681168404Spjdstatic int 2682168404Spjdzpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 2683168404Spjd nvlist_t ***records, uint_t *numrecords) 2684168404Spjd{ 2685168404Spjd uint64_t reclen; 2686168404Spjd nvlist_t *nv; 2687168404Spjd int i; 2688168404Spjd 2689168404Spjd while (bytes_read > sizeof (reclen)) { 2690168404Spjd 2691168404Spjd /* get length of packed record (stored as little endian) */ 2692168404Spjd for (i = 0, reclen = 0; i < sizeof (reclen); i++) 2693168404Spjd reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 2694168404Spjd 2695168404Spjd if (bytes_read < sizeof (reclen) + reclen) 2696168404Spjd break; 2697168404Spjd 2698168404Spjd /* unpack record */ 2699168404Spjd if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 2700168404Spjd return (ENOMEM); 2701168404Spjd bytes_read -= sizeof (reclen) + reclen; 2702168404Spjd buf += sizeof (reclen) + reclen; 2703168404Spjd 2704168404Spjd /* add record to nvlist array */ 2705168404Spjd (*numrecords)++; 2706168404Spjd if (ISP2(*numrecords + 1)) { 2707168404Spjd *records = realloc(*records, 2708168404Spjd *numrecords * 2 * sizeof (nvlist_t *)); 2709168404Spjd } 2710168404Spjd (*records)[*numrecords - 1] = nv; 2711168404Spjd } 2712168404Spjd 2713168404Spjd *leftover = bytes_read; 2714168404Spjd return (0); 2715168404Spjd} 2716168404Spjd 2717168404Spjd#define HIS_BUF_LEN (128*1024) 2718168404Spjd 2719168404Spjd/* 2720168404Spjd * Retrieve the command history of a pool. 2721168404Spjd */ 2722168404Spjdint 2723168404Spjdzpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 2724168404Spjd{ 2725168404Spjd char buf[HIS_BUF_LEN]; 2726168404Spjd uint64_t off = 0; 2727168404Spjd nvlist_t **records = NULL; 2728168404Spjd uint_t numrecords = 0; 2729168404Spjd int err, i; 2730168404Spjd 2731168404Spjd do { 2732168404Spjd uint64_t bytes_read = sizeof (buf); 2733168404Spjd uint64_t leftover; 2734168404Spjd 2735168404Spjd if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 2736168404Spjd break; 2737168404Spjd 2738168404Spjd /* if nothing else was read in, we're at EOF, just return */ 2739168404Spjd if (!bytes_read) 2740168404Spjd break; 2741168404Spjd 2742168404Spjd if ((err = zpool_history_unpack(buf, bytes_read, 2743168404Spjd &leftover, &records, &numrecords)) != 0) 2744168404Spjd break; 2745168404Spjd off -= leftover; 2746168404Spjd 2747168404Spjd /* CONSTCOND */ 2748168404Spjd } while (1); 2749168404Spjd 2750168404Spjd if (!err) { 2751168404Spjd verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 2752168404Spjd verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 2753168404Spjd records, numrecords) == 0); 2754168404Spjd } 2755168404Spjd for (i = 0; i < numrecords; i++) 2756168404Spjd nvlist_free(records[i]); 2757168404Spjd free(records); 2758168404Spjd 2759168404Spjd return (err); 2760168404Spjd} 2761168404Spjd 2762168404Spjdvoid 2763168404Spjdzpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 2764168404Spjd char *pathname, size_t len) 2765168404Spjd{ 2766168404Spjd zfs_cmd_t zc = { 0 }; 2767168404Spjd boolean_t mounted = B_FALSE; 2768168404Spjd char *mntpnt = NULL; 2769168404Spjd char dsname[MAXNAMELEN]; 2770168404Spjd 2771168404Spjd if (dsobj == 0) { 2772168404Spjd /* special case for the MOS */ 2773168404Spjd (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 2774168404Spjd return; 2775168404Spjd } 2776168404Spjd 2777168404Spjd /* get the dataset's name */ 2778168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2779168404Spjd zc.zc_obj = dsobj; 2780168404Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, 2781168404Spjd ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 2782168404Spjd /* just write out a path of two object numbers */ 2783168404Spjd (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 2784168404Spjd dsobj, obj); 2785168404Spjd return; 2786168404Spjd } 2787168404Spjd (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 2788168404Spjd 2789168404Spjd /* find out if the dataset is mounted */ 2790168404Spjd mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 2791168404Spjd 2792168404Spjd /* get the corrupted object's path */ 2793168404Spjd (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 2794168404Spjd zc.zc_obj = obj; 2795168404Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 2796168404Spjd &zc) == 0) { 2797168404Spjd if (mounted) { 2798168404Spjd (void) snprintf(pathname, len, "%s%s", mntpnt, 2799168404Spjd zc.zc_value); 2800168404Spjd } else { 2801168404Spjd (void) snprintf(pathname, len, "%s:%s", 2802168404Spjd dsname, zc.zc_value); 2803168404Spjd } 2804168404Spjd } else { 2805168404Spjd (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 2806168404Spjd } 2807168404Spjd free(mntpnt); 2808168404Spjd} 2809168404Spjd 2810185029Spjd#define RDISK_ROOT "/dev/rdsk" 2811185029Spjd#define BACKUP_SLICE "s2" 2812185029Spjd/* 2813185029Spjd * Don't start the slice at the default block of 34; many storage 2814185029Spjd * devices will use a stripe width of 128k, so start there instead. 2815185029Spjd */ 2816185029Spjd#define NEW_START_BLOCK 256 2817185029Spjd 2818185029Spjd#if defined(sun) 2819185029Spjd/* 2820185029Spjd * Read the EFI label from the config, if a label does not exist then 2821185029Spjd * pass back the error to the caller. If the caller has passed a non-NULL 2822185029Spjd * diskaddr argument then we set it to the starting address of the EFI 2823185029Spjd * partition. 2824185029Spjd */ 2825185029Spjdstatic int 2826185029Spjdread_efi_label(nvlist_t *config, diskaddr_t *sb) 2827168404Spjd{ 2828185029Spjd char *path; 2829185029Spjd int fd; 2830185029Spjd char diskname[MAXPATHLEN]; 2831185029Spjd int err = -1; 2832168404Spjd 2833185029Spjd if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 2834185029Spjd return (err); 2835168404Spjd 2836185029Spjd (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 2837185029Spjd strrchr(path, '/')); 2838185029Spjd if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 2839185029Spjd struct dk_gpt *vtoc; 2840185029Spjd 2841185029Spjd if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 2842185029Spjd if (sb != NULL) 2843185029Spjd *sb = vtoc->efi_parts[0].p_start; 2844185029Spjd efi_free(vtoc); 2845185029Spjd } 2846185029Spjd (void) close(fd); 2847168404Spjd } 2848185029Spjd return (err); 2849185029Spjd} 2850168404Spjd 2851185029Spjd/* 2852185029Spjd * determine where a partition starts on a disk in the current 2853185029Spjd * configuration 2854185029Spjd */ 2855185029Spjdstatic diskaddr_t 2856185029Spjdfind_start_block(nvlist_t *config) 2857185029Spjd{ 2858185029Spjd nvlist_t **child; 2859185029Spjd uint_t c, children; 2860185029Spjd diskaddr_t sb = MAXOFFSET_T; 2861185029Spjd uint64_t wholedisk; 2862168404Spjd 2863185029Spjd if (nvlist_lookup_nvlist_array(config, 2864185029Spjd ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 2865185029Spjd if (nvlist_lookup_uint64(config, 2866185029Spjd ZPOOL_CONFIG_WHOLE_DISK, 2867185029Spjd &wholedisk) != 0 || !wholedisk) { 2868185029Spjd return (MAXOFFSET_T); 2869185029Spjd } 2870185029Spjd if (read_efi_label(config, &sb) < 0) 2871185029Spjd sb = MAXOFFSET_T; 2872185029Spjd return (sb); 2873168404Spjd } 2874168404Spjd 2875185029Spjd for (c = 0; c < children; c++) { 2876185029Spjd sb = find_start_block(child[c]); 2877185029Spjd if (sb != MAXOFFSET_T) { 2878185029Spjd return (sb); 2879185029Spjd } 2880168404Spjd } 2881185029Spjd return (MAXOFFSET_T); 2882185029Spjd} 2883185029Spjd#endif /* sun */ 2884168404Spjd 2885185029Spjd/* 2886185029Spjd * Label an individual disk. The name provided is the short name, 2887185029Spjd * stripped of any leading /dev path. 2888185029Spjd */ 2889185029Spjdint 2890185029Spjdzpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) 2891185029Spjd{ 2892185029Spjd#if defined(sun) 2893185029Spjd char path[MAXPATHLEN]; 2894185029Spjd struct dk_gpt *vtoc; 2895185029Spjd int fd; 2896185029Spjd size_t resv = EFI_MIN_RESV_SIZE; 2897185029Spjd uint64_t slice_size; 2898185029Spjd diskaddr_t start_block; 2899185029Spjd char errbuf[1024]; 2900168404Spjd 2901185029Spjd /* prepare an error message just in case */ 2902185029Spjd (void) snprintf(errbuf, sizeof (errbuf), 2903185029Spjd dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 2904168404Spjd 2905185029Spjd if (zhp) { 2906185029Spjd nvlist_t *nvroot; 2907168404Spjd 2908209962Smm if (pool_is_bootable(zhp)) { 2909209962Smm zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2910209962Smm "EFI labeled devices are not supported on root " 2911209962Smm "pools.")); 2912209962Smm return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 2913209962Smm } 2914209962Smm 2915185029Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, 2916185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 2917168404Spjd 2918185029Spjd if (zhp->zpool_start_block == 0) 2919185029Spjd start_block = find_start_block(nvroot); 2920185029Spjd else 2921185029Spjd start_block = zhp->zpool_start_block; 2922185029Spjd zhp->zpool_start_block = start_block; 2923185029Spjd } else { 2924185029Spjd /* new pool */ 2925185029Spjd start_block = NEW_START_BLOCK; 2926185029Spjd } 2927168404Spjd 2928185029Spjd (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 2929185029Spjd BACKUP_SLICE); 2930168404Spjd 2931185029Spjd if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2932185029Spjd /* 2933185029Spjd * This shouldn't happen. We've long since verified that this 2934185029Spjd * is a valid device. 2935185029Spjd */ 2936185029Spjd zfs_error_aux(hdl, 2937185029Spjd dgettext(TEXT_DOMAIN, "unable to open device")); 2938185029Spjd return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2939185029Spjd } 2940168404Spjd 2941185029Spjd if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 2942185029Spjd /* 2943185029Spjd * The only way this can fail is if we run out of memory, or we 2944185029Spjd * were unable to read the disk's capacity 2945185029Spjd */ 2946185029Spjd if (errno == ENOMEM) 2947185029Spjd (void) no_memory(hdl); 2948168404Spjd 2949185029Spjd (void) close(fd); 2950185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2951185029Spjd "unable to read disk capacity"), name); 2952185029Spjd 2953185029Spjd return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2954168404Spjd } 2955168404Spjd 2956185029Spjd slice_size = vtoc->efi_last_u_lba + 1; 2957185029Spjd slice_size -= EFI_MIN_RESV_SIZE; 2958185029Spjd if (start_block == MAXOFFSET_T) 2959185029Spjd start_block = NEW_START_BLOCK; 2960185029Spjd slice_size -= start_block; 2961168404Spjd 2962185029Spjd vtoc->efi_parts[0].p_start = start_block; 2963185029Spjd vtoc->efi_parts[0].p_size = slice_size; 2964185029Spjd 2965168404Spjd /* 2966185029Spjd * Why we use V_USR: V_BACKUP confuses users, and is considered 2967185029Spjd * disposable by some EFI utilities (since EFI doesn't have a backup 2968185029Spjd * slice). V_UNASSIGNED is supposed to be used only for zero size 2969185029Spjd * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 2970185029Spjd * etc. were all pretty specific. V_USR is as close to reality as we 2971185029Spjd * can get, in the absence of V_OTHER. 2972168404Spjd */ 2973185029Spjd vtoc->efi_parts[0].p_tag = V_USR; 2974185029Spjd (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 2975168404Spjd 2976185029Spjd vtoc->efi_parts[8].p_start = slice_size + start_block; 2977185029Spjd vtoc->efi_parts[8].p_size = resv; 2978185029Spjd vtoc->efi_parts[8].p_tag = V_RESERVED; 2979168404Spjd 2980185029Spjd if (efi_write(fd, vtoc) != 0) { 2981185029Spjd /* 2982185029Spjd * Some block drivers (like pcata) may not support EFI 2983185029Spjd * GPT labels. Print out a helpful error message dir- 2984185029Spjd * ecting the user to manually label the disk and give 2985185029Spjd * a specific slice. 2986185029Spjd */ 2987185029Spjd (void) close(fd); 2988185029Spjd efi_free(vtoc); 2989168404Spjd 2990185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2991185029Spjd "try using fdisk(1M) and then provide a specific slice")); 2992185029Spjd return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 2993168404Spjd } 2994185029Spjd 2995185029Spjd (void) close(fd); 2996185029Spjd efi_free(vtoc); 2997185029Spjd#endif /* sun */ 2998168404Spjd return (0); 2999168404Spjd} 3000168404Spjd 3001185029Spjdstatic boolean_t 3002185029Spjdsupported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3003168404Spjd{ 3004185029Spjd char *type; 3005185029Spjd nvlist_t **child; 3006185029Spjd uint_t children, c; 3007185029Spjd 3008185029Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3009185029Spjd if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3010185029Spjd strcmp(type, VDEV_TYPE_FILE) == 0 || 3011185029Spjd strcmp(type, VDEV_TYPE_LOG) == 0 || 3012185029Spjd strcmp(type, VDEV_TYPE_MISSING) == 0) { 3013185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3014185029Spjd "vdev type '%s' is not supported"), type); 3015185029Spjd (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3016185029Spjd return (B_FALSE); 3017185029Spjd } 3018185029Spjd if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3019185029Spjd &child, &children) == 0) { 3020185029Spjd for (c = 0; c < children; c++) { 3021185029Spjd if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3022185029Spjd return (B_FALSE); 3023185029Spjd } 3024185029Spjd } 3025185029Spjd return (B_TRUE); 3026168404Spjd} 3027168404Spjd 3028185029Spjd/* 3029185029Spjd * check if this zvol is allowable for use as a dump device; zero if 3030185029Spjd * it is, > 0 if it isn't, < 0 if it isn't a zvol 3031185029Spjd */ 3032168404Spjdint 3033185029Spjdzvol_check_dump_config(char *arg) 3034168404Spjd{ 3035185029Spjd zpool_handle_t *zhp = NULL; 3036185029Spjd nvlist_t *config, *nvroot; 3037185029Spjd char *p, *volname; 3038185029Spjd nvlist_t **top; 3039185029Spjd uint_t toplevels; 3040185029Spjd libzfs_handle_t *hdl; 3041185029Spjd char errbuf[1024]; 3042185029Spjd char poolname[ZPOOL_MAXNAMELEN]; 3043185029Spjd int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3044185029Spjd int ret = 1; 3045168404Spjd 3046185029Spjd if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3047168404Spjd return (-1); 3048185029Spjd } 3049168404Spjd 3050185029Spjd (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3051185029Spjd "dump is not supported on device '%s'"), arg); 3052168404Spjd 3053185029Spjd if ((hdl = libzfs_init()) == NULL) 3054185029Spjd return (1); 3055185029Spjd libzfs_print_on_error(hdl, B_TRUE); 3056168404Spjd 3057185029Spjd volname = arg + pathlen; 3058185029Spjd 3059185029Spjd /* check the configuration of the pool */ 3060185029Spjd if ((p = strchr(volname, '/')) == NULL) { 3061185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3062185029Spjd "malformed dataset name")); 3063185029Spjd (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3064185029Spjd return (1); 3065185029Spjd } else if (p - volname >= ZFS_MAXNAMELEN) { 3066185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3067185029Spjd "dataset name is too long")); 3068185029Spjd (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3069185029Spjd return (1); 3070185029Spjd } else { 3071185029Spjd (void) strncpy(poolname, volname, p - volname); 3072185029Spjd poolname[p - volname] = '\0'; 3073168404Spjd } 3074168404Spjd 3075185029Spjd if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3076185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3077185029Spjd "could not open pool '%s'"), poolname); 3078185029Spjd (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3079185029Spjd goto out; 3080185029Spjd } 3081185029Spjd config = zpool_get_config(zhp, NULL); 3082185029Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3083185029Spjd &nvroot) != 0) { 3084185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3085185029Spjd "could not obtain vdev configuration for '%s'"), poolname); 3086185029Spjd (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3087185029Spjd goto out; 3088185029Spjd } 3089185029Spjd 3090185029Spjd verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3091185029Spjd &top, &toplevels) == 0); 3092185029Spjd if (toplevels != 1) { 3093185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3094185029Spjd "'%s' has multiple top level vdevs"), poolname); 3095185029Spjd (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3096185029Spjd goto out; 3097185029Spjd } 3098185029Spjd 3099185029Spjd if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3100185029Spjd goto out; 3101185029Spjd } 3102185029Spjd ret = 0; 3103185029Spjd 3104185029Spjdout: 3105185029Spjd if (zhp) 3106185029Spjd zpool_close(zhp); 3107185029Spjd libzfs_fini(hdl); 3108185029Spjd return (ret); 3109168404Spjd} 3110