libzfs_pool.c revision 307100
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd 22168404Spjd/* 23219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24289562Smav * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25255750Sdelphij * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26297763Smav * Copyright 2016 Nexenta Systems, Inc. 27307050Smav * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28168404Spjd */ 29168404Spjd 30168404Spjd#include <sys/types.h> 31168404Spjd#include <sys/stat.h> 32168404Spjd#include <ctype.h> 33168404Spjd#include <errno.h> 34168404Spjd#include <devid.h> 35168404Spjd#include <fcntl.h> 36168404Spjd#include <libintl.h> 37168404Spjd#include <stdio.h> 38168404Spjd#include <stdlib.h> 39168404Spjd#include <strings.h> 40168404Spjd#include <unistd.h> 41248571Smm#include <libgen.h> 42168404Spjd#include <sys/zfs_ioctl.h> 43219089Spjd#include <dlfcn.h> 44168404Spjd 45168404Spjd#include "zfs_namecheck.h" 46168404Spjd#include "zfs_prop.h" 47168404Spjd#include "libzfs_impl.h" 48219089Spjd#include "zfs_comutil.h" 49236884Smm#include "zfeature_common.h" 50168404Spjd 51185029Spjdstatic int read_efi_label(nvlist_t *config, diskaddr_t *sb); 52185029Spjd 53219089Spjd#define BACKUP_SLICE "s2" 54209962Smm 55219089Spjdtypedef struct prop_flags { 56219089Spjd int create:1; /* Validate property on creation */ 57219089Spjd int import:1; /* Validate property on import */ 58219089Spjd} prop_flags_t; 59219089Spjd 60168404Spjd/* 61185029Spjd * ==================================================================== 62185029Spjd * zpool property functions 63185029Spjd * ==================================================================== 64185029Spjd */ 65185029Spjd 66185029Spjdstatic int 67185029Spjdzpool_get_all_props(zpool_handle_t *zhp) 68185029Spjd{ 69185029Spjd zfs_cmd_t zc = { 0 }; 70185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 71185029Spjd 72185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 73185029Spjd 74185029Spjd if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 75185029Spjd return (-1); 76185029Spjd 77185029Spjd while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 78185029Spjd if (errno == ENOMEM) { 79185029Spjd if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 80185029Spjd zcmd_free_nvlists(&zc); 81185029Spjd return (-1); 82185029Spjd } 83185029Spjd } else { 84185029Spjd zcmd_free_nvlists(&zc); 85185029Spjd return (-1); 86185029Spjd } 87185029Spjd } 88185029Spjd 89185029Spjd if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 90185029Spjd zcmd_free_nvlists(&zc); 91185029Spjd return (-1); 92185029Spjd } 93185029Spjd 94185029Spjd zcmd_free_nvlists(&zc); 95185029Spjd 96185029Spjd return (0); 97185029Spjd} 98185029Spjd 99185029Spjdstatic int 100185029Spjdzpool_props_refresh(zpool_handle_t *zhp) 101185029Spjd{ 102185029Spjd nvlist_t *old_props; 103185029Spjd 104185029Spjd old_props = zhp->zpool_props; 105185029Spjd 106185029Spjd if (zpool_get_all_props(zhp) != 0) 107185029Spjd return (-1); 108185029Spjd 109185029Spjd nvlist_free(old_props); 110185029Spjd return (0); 111185029Spjd} 112185029Spjd 113185029Spjdstatic char * 114185029Spjdzpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 115185029Spjd zprop_source_t *src) 116185029Spjd{ 117185029Spjd nvlist_t *nv, *nvl; 118185029Spjd uint64_t ival; 119185029Spjd char *value; 120185029Spjd zprop_source_t source; 121185029Spjd 122185029Spjd nvl = zhp->zpool_props; 123185029Spjd if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 124185029Spjd verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 125185029Spjd source = ival; 126185029Spjd verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 127185029Spjd } else { 128185029Spjd source = ZPROP_SRC_DEFAULT; 129185029Spjd if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 130185029Spjd value = "-"; 131185029Spjd } 132185029Spjd 133185029Spjd if (src) 134185029Spjd *src = source; 135185029Spjd 136185029Spjd return (value); 137185029Spjd} 138185029Spjd 139185029Spjduint64_t 140185029Spjdzpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 141185029Spjd{ 142185029Spjd nvlist_t *nv, *nvl; 143185029Spjd uint64_t value; 144185029Spjd zprop_source_t source; 145185029Spjd 146185029Spjd if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 147185029Spjd /* 148185029Spjd * zpool_get_all_props() has most likely failed because 149185029Spjd * the pool is faulted, but if all we need is the top level 150185029Spjd * vdev's guid then get it from the zhp config nvlist. 151185029Spjd */ 152185029Spjd if ((prop == ZPOOL_PROP_GUID) && 153185029Spjd (nvlist_lookup_nvlist(zhp->zpool_config, 154185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 155185029Spjd (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 156185029Spjd == 0)) { 157185029Spjd return (value); 158185029Spjd } 159185029Spjd return (zpool_prop_default_numeric(prop)); 160185029Spjd } 161185029Spjd 162185029Spjd nvl = zhp->zpool_props; 163185029Spjd if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 164185029Spjd verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 165185029Spjd source = value; 166185029Spjd verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 167185029Spjd } else { 168185029Spjd source = ZPROP_SRC_DEFAULT; 169185029Spjd value = zpool_prop_default_numeric(prop); 170185029Spjd } 171185029Spjd 172185029Spjd if (src) 173185029Spjd *src = source; 174185029Spjd 175185029Spjd return (value); 176185029Spjd} 177185029Spjd 178185029Spjd/* 179185029Spjd * Map VDEV STATE to printed strings. 180185029Spjd */ 181224169Sgibbsconst char * 182185029Spjdzpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 183185029Spjd{ 184185029Spjd switch (state) { 185185029Spjd case VDEV_STATE_CLOSED: 186185029Spjd case VDEV_STATE_OFFLINE: 187185029Spjd return (gettext("OFFLINE")); 188185029Spjd case VDEV_STATE_REMOVED: 189185029Spjd return (gettext("REMOVED")); 190185029Spjd case VDEV_STATE_CANT_OPEN: 191185029Spjd if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 192185029Spjd return (gettext("FAULTED")); 193219089Spjd else if (aux == VDEV_AUX_SPLIT_POOL) 194219089Spjd return (gettext("SPLIT")); 195185029Spjd else 196185029Spjd return (gettext("UNAVAIL")); 197185029Spjd case VDEV_STATE_FAULTED: 198185029Spjd return (gettext("FAULTED")); 199185029Spjd case VDEV_STATE_DEGRADED: 200185029Spjd return (gettext("DEGRADED")); 201185029Spjd case VDEV_STATE_HEALTHY: 202185029Spjd return (gettext("ONLINE")); 203307050Smav 204307050Smav default: 205307050Smav break; 206185029Spjd } 207185029Spjd 208185029Spjd return (gettext("UNKNOWN")); 209185029Spjd} 210185029Spjd 211185029Spjd/* 212224169Sgibbs * Map POOL STATE to printed strings. 213224169Sgibbs */ 214224169Sgibbsconst char * 215224169Sgibbszpool_pool_state_to_name(pool_state_t state) 216224169Sgibbs{ 217224169Sgibbs switch (state) { 218224169Sgibbs case POOL_STATE_ACTIVE: 219224169Sgibbs return (gettext("ACTIVE")); 220224169Sgibbs case POOL_STATE_EXPORTED: 221224169Sgibbs return (gettext("EXPORTED")); 222224169Sgibbs case POOL_STATE_DESTROYED: 223224169Sgibbs return (gettext("DESTROYED")); 224224169Sgibbs case POOL_STATE_SPARE: 225224169Sgibbs return (gettext("SPARE")); 226224169Sgibbs case POOL_STATE_L2CACHE: 227224169Sgibbs return (gettext("L2CACHE")); 228224169Sgibbs case POOL_STATE_UNINITIALIZED: 229224169Sgibbs return (gettext("UNINITIALIZED")); 230224169Sgibbs case POOL_STATE_UNAVAIL: 231224169Sgibbs return (gettext("UNAVAIL")); 232224169Sgibbs case POOL_STATE_POTENTIALLY_ACTIVE: 233224169Sgibbs return (gettext("POTENTIALLY_ACTIVE")); 234224169Sgibbs } 235224169Sgibbs 236224169Sgibbs return (gettext("UNKNOWN")); 237224169Sgibbs} 238224169Sgibbs 239224169Sgibbs/* 240185029Spjd * Get a zpool property value for 'prop' and return the value in 241185029Spjd * a pre-allocated buffer. 242185029Spjd */ 243185029Spjdint 244185029Spjdzpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 245263889Sdelphij zprop_source_t *srctype, boolean_t literal) 246185029Spjd{ 247185029Spjd uint64_t intval; 248185029Spjd const char *strval; 249185029Spjd zprop_source_t src = ZPROP_SRC_NONE; 250185029Spjd nvlist_t *nvroot; 251185029Spjd vdev_stat_t *vs; 252185029Spjd uint_t vsc; 253185029Spjd 254185029Spjd if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 255209962Smm switch (prop) { 256209962Smm case ZPOOL_PROP_NAME: 257185029Spjd (void) strlcpy(buf, zpool_get_name(zhp), len); 258209962Smm break; 259209962Smm 260209962Smm case ZPOOL_PROP_HEALTH: 261276446Ssmh (void) strlcpy(buf, 262276446Ssmh zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len); 263209962Smm break; 264209962Smm 265209962Smm case ZPOOL_PROP_GUID: 266209962Smm intval = zpool_get_prop_int(zhp, prop, &src); 267209962Smm (void) snprintf(buf, len, "%llu", intval); 268209962Smm break; 269209962Smm 270209962Smm case ZPOOL_PROP_ALTROOT: 271209962Smm case ZPOOL_PROP_CACHEFILE: 272228103Smm case ZPOOL_PROP_COMMENT: 273209962Smm if (zhp->zpool_props != NULL || 274209962Smm zpool_get_all_props(zhp) == 0) { 275209962Smm (void) strlcpy(buf, 276209962Smm zpool_get_prop_string(zhp, prop, &src), 277209962Smm len); 278263889Sdelphij break; 279209962Smm } 280209962Smm /* FALLTHROUGH */ 281209962Smm default: 282185029Spjd (void) strlcpy(buf, "-", len); 283209962Smm break; 284209962Smm } 285209962Smm 286209962Smm if (srctype != NULL) 287209962Smm *srctype = src; 288185029Spjd return (0); 289185029Spjd } 290185029Spjd 291185029Spjd if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 292185029Spjd prop != ZPOOL_PROP_NAME) 293185029Spjd return (-1); 294185029Spjd 295185029Spjd switch (zpool_prop_get_type(prop)) { 296185029Spjd case PROP_TYPE_STRING: 297185029Spjd (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 298185029Spjd len); 299185029Spjd break; 300185029Spjd 301185029Spjd case PROP_TYPE_NUMBER: 302185029Spjd intval = zpool_get_prop_int(zhp, prop, &src); 303185029Spjd 304185029Spjd switch (prop) { 305185029Spjd case ZPOOL_PROP_SIZE: 306219089Spjd case ZPOOL_PROP_ALLOCATED: 307219089Spjd case ZPOOL_PROP_FREE: 308236884Smm case ZPOOL_PROP_FREEING: 309268079Sdelphij case ZPOOL_PROP_LEAKED: 310263889Sdelphij if (literal) { 311263889Sdelphij (void) snprintf(buf, len, "%llu", 312263889Sdelphij (u_longlong_t)intval); 313263889Sdelphij } else { 314263889Sdelphij (void) zfs_nicenum(intval, buf, len); 315263889Sdelphij } 316185029Spjd break; 317272502Sdelphij case ZPOOL_PROP_EXPANDSZ: 318272502Sdelphij if (intval == 0) { 319272502Sdelphij (void) strlcpy(buf, "-", len); 320272502Sdelphij } else if (literal) { 321272502Sdelphij (void) snprintf(buf, len, "%llu", 322272502Sdelphij (u_longlong_t)intval); 323272502Sdelphij } else { 324272502Sdelphij (void) zfs_nicenum(intval, buf, len); 325272502Sdelphij } 326272502Sdelphij break; 327185029Spjd case ZPOOL_PROP_CAPACITY: 328263889Sdelphij if (literal) { 329263889Sdelphij (void) snprintf(buf, len, "%llu", 330263889Sdelphij (u_longlong_t)intval); 331263889Sdelphij } else { 332263889Sdelphij (void) snprintf(buf, len, "%llu%%", 333263889Sdelphij (u_longlong_t)intval); 334263889Sdelphij } 335185029Spjd break; 336269118Sdelphij case ZPOOL_PROP_FRAGMENTATION: 337269118Sdelphij if (intval == UINT64_MAX) { 338269118Sdelphij (void) strlcpy(buf, "-", len); 339269118Sdelphij } else { 340269118Sdelphij (void) snprintf(buf, len, "%llu%%", 341269118Sdelphij (u_longlong_t)intval); 342269118Sdelphij } 343269118Sdelphij break; 344219089Spjd case ZPOOL_PROP_DEDUPRATIO: 345219089Spjd (void) snprintf(buf, len, "%llu.%02llux", 346219089Spjd (u_longlong_t)(intval / 100), 347219089Spjd (u_longlong_t)(intval % 100)); 348219089Spjd break; 349185029Spjd case ZPOOL_PROP_HEALTH: 350185029Spjd verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 351185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 352185029Spjd verify(nvlist_lookup_uint64_array(nvroot, 353219089Spjd ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 354219089Spjd == 0); 355185029Spjd 356185029Spjd (void) strlcpy(buf, zpool_state_to_name(intval, 357185029Spjd vs->vs_aux), len); 358185029Spjd break; 359236884Smm case ZPOOL_PROP_VERSION: 360236884Smm if (intval >= SPA_VERSION_FEATURES) { 361236884Smm (void) snprintf(buf, len, "-"); 362236884Smm break; 363236884Smm } 364236884Smm /* FALLTHROUGH */ 365185029Spjd default: 366185029Spjd (void) snprintf(buf, len, "%llu", intval); 367185029Spjd } 368185029Spjd break; 369185029Spjd 370185029Spjd case PROP_TYPE_INDEX: 371185029Spjd intval = zpool_get_prop_int(zhp, prop, &src); 372185029Spjd if (zpool_prop_index_to_string(prop, intval, &strval) 373185029Spjd != 0) 374185029Spjd return (-1); 375185029Spjd (void) strlcpy(buf, strval, len); 376185029Spjd break; 377185029Spjd 378185029Spjd default: 379185029Spjd abort(); 380185029Spjd } 381185029Spjd 382185029Spjd if (srctype) 383185029Spjd *srctype = src; 384185029Spjd 385185029Spjd return (0); 386185029Spjd} 387185029Spjd 388185029Spjd/* 389185029Spjd * Check if the bootfs name has the same pool name as it is set to. 390185029Spjd * Assuming bootfs is a valid dataset name. 391185029Spjd */ 392185029Spjdstatic boolean_t 393185029Spjdbootfs_name_valid(const char *pool, char *bootfs) 394185029Spjd{ 395185029Spjd int len = strlen(pool); 396185029Spjd 397185029Spjd if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 398185029Spjd return (B_FALSE); 399185029Spjd 400185029Spjd if (strncmp(pool, bootfs, len) == 0 && 401185029Spjd (bootfs[len] == '/' || bootfs[len] == '\0')) 402185029Spjd return (B_TRUE); 403185029Spjd 404185029Spjd return (B_FALSE); 405185029Spjd} 406185029Spjd 407236155Smmboolean_t 408236155Smmzpool_is_bootable(zpool_handle_t *zhp) 409219089Spjd{ 410219089Spjd char bootfs[ZPOOL_MAXNAMELEN]; 411219089Spjd 412219089Spjd return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 413263889Sdelphij sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 414219089Spjd sizeof (bootfs)) != 0); 415219089Spjd} 416219089Spjd 417219089Spjd 418185029Spjd/* 419185029Spjd * Given an nvlist of zpool properties to be set, validate that they are 420185029Spjd * correct, and parse any numeric properties (index, boolean, etc) if they are 421185029Spjd * specified as strings. 422185029Spjd */ 423185029Spjdstatic nvlist_t * 424185029Spjdzpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 425219089Spjd nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 426185029Spjd{ 427185029Spjd nvpair_t *elem; 428185029Spjd nvlist_t *retprops; 429185029Spjd zpool_prop_t prop; 430185029Spjd char *strval; 431185029Spjd uint64_t intval; 432228103Smm char *slash, *check; 433185029Spjd struct stat64 statbuf; 434185029Spjd zpool_handle_t *zhp; 435185029Spjd 436185029Spjd if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 437185029Spjd (void) no_memory(hdl); 438185029Spjd return (NULL); 439185029Spjd } 440185029Spjd 441185029Spjd elem = NULL; 442185029Spjd while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 443185029Spjd const char *propname = nvpair_name(elem); 444185029Spjd 445236884Smm prop = zpool_name_to_prop(propname); 446236884Smm if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 447236884Smm int err; 448236884Smm char *fname = strchr(propname, '@') + 1; 449236884Smm 450259813Sdelphij err = zfeature_lookup_name(fname, NULL); 451236884Smm if (err != 0) { 452236884Smm ASSERT3U(err, ==, ENOENT); 453236884Smm zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 454236884Smm "invalid feature '%s'"), fname); 455236884Smm (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 456236884Smm goto error; 457236884Smm } 458236884Smm 459236884Smm if (nvpair_type(elem) != DATA_TYPE_STRING) { 460236884Smm zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 461236884Smm "'%s' must be a string"), propname); 462236884Smm (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 463236884Smm goto error; 464236884Smm } 465236884Smm 466236884Smm (void) nvpair_value_string(elem, &strval); 467236884Smm if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 468236884Smm zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 469236884Smm "property '%s' can only be set to " 470236884Smm "'enabled'"), propname); 471236884Smm (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 472236884Smm goto error; 473236884Smm } 474236884Smm 475236884Smm if (nvlist_add_uint64(retprops, propname, 0) != 0) { 476236884Smm (void) no_memory(hdl); 477236884Smm goto error; 478236884Smm } 479236884Smm continue; 480236884Smm } 481236884Smm 482185029Spjd /* 483185029Spjd * Make sure this property is valid and applies to this type. 484185029Spjd */ 485236884Smm if (prop == ZPROP_INVAL) { 486185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 487185029Spjd "invalid property '%s'"), propname); 488185029Spjd (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 489185029Spjd goto error; 490185029Spjd } 491185029Spjd 492185029Spjd if (zpool_prop_readonly(prop)) { 493185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 494185029Spjd "is readonly"), propname); 495185029Spjd (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 496185029Spjd goto error; 497185029Spjd } 498185029Spjd 499185029Spjd if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 500185029Spjd &strval, &intval, errbuf) != 0) 501185029Spjd goto error; 502185029Spjd 503185029Spjd /* 504185029Spjd * Perform additional checking for specific properties. 505185029Spjd */ 506185029Spjd switch (prop) { 507185029Spjd case ZPOOL_PROP_VERSION: 508236884Smm if (intval < version || 509236884Smm !SPA_VERSION_IS_SUPPORTED(intval)) { 510185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 511185029Spjd "property '%s' number %d is invalid."), 512185029Spjd propname, intval); 513185029Spjd (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 514185029Spjd goto error; 515185029Spjd } 516185029Spjd break; 517185029Spjd 518185029Spjd case ZPOOL_PROP_BOOTFS: 519219089Spjd if (flags.create || flags.import) { 520185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 521185029Spjd "property '%s' cannot be set at creation " 522185029Spjd "or import time"), propname); 523185029Spjd (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 524185029Spjd goto error; 525185029Spjd } 526185029Spjd 527185029Spjd if (version < SPA_VERSION_BOOTFS) { 528185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 529185029Spjd "pool must be upgraded to support " 530185029Spjd "'%s' property"), propname); 531185029Spjd (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 532185029Spjd goto error; 533185029Spjd } 534185029Spjd 535185029Spjd /* 536185029Spjd * bootfs property value has to be a dataset name and 537185029Spjd * the dataset has to be in the same pool as it sets to. 538185029Spjd */ 539185029Spjd if (strval[0] != '\0' && !bootfs_name_valid(poolname, 540185029Spjd strval)) { 541185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 542185029Spjd "is an invalid name"), strval); 543185029Spjd (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 544185029Spjd goto error; 545185029Spjd } 546185029Spjd 547185029Spjd if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 548185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 549185029Spjd "could not open pool '%s'"), poolname); 550185029Spjd (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 551185029Spjd goto error; 552185029Spjd } 553185029Spjd zpool_close(zhp); 554185029Spjd break; 555185029Spjd 556185029Spjd case ZPOOL_PROP_ALTROOT: 557219089Spjd if (!flags.create && !flags.import) { 558185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 559185029Spjd "property '%s' can only be set during pool " 560185029Spjd "creation or import"), propname); 561185029Spjd (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 562185029Spjd goto error; 563185029Spjd } 564185029Spjd 565185029Spjd if (strval[0] != '/') { 566185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 567185029Spjd "bad alternate root '%s'"), strval); 568185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 569185029Spjd goto error; 570185029Spjd } 571185029Spjd break; 572185029Spjd 573185029Spjd case ZPOOL_PROP_CACHEFILE: 574185029Spjd if (strval[0] == '\0') 575185029Spjd break; 576185029Spjd 577185029Spjd if (strcmp(strval, "none") == 0) 578185029Spjd break; 579185029Spjd 580185029Spjd if (strval[0] != '/') { 581185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 582185029Spjd "property '%s' must be empty, an " 583185029Spjd "absolute path, or 'none'"), propname); 584185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 585185029Spjd goto error; 586185029Spjd } 587185029Spjd 588185029Spjd slash = strrchr(strval, '/'); 589185029Spjd 590185029Spjd if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 591185029Spjd strcmp(slash, "/..") == 0) { 592185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 593185029Spjd "'%s' is not a valid file"), strval); 594185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 595185029Spjd goto error; 596185029Spjd } 597185029Spjd 598185029Spjd *slash = '\0'; 599185029Spjd 600185029Spjd if (strval[0] != '\0' && 601185029Spjd (stat64(strval, &statbuf) != 0 || 602185029Spjd !S_ISDIR(statbuf.st_mode))) { 603185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 604185029Spjd "'%s' is not a valid directory"), 605185029Spjd strval); 606185029Spjd (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 607185029Spjd goto error; 608185029Spjd } 609185029Spjd 610185029Spjd *slash = '/'; 611185029Spjd break; 612219089Spjd 613228103Smm case ZPOOL_PROP_COMMENT: 614228103Smm for (check = strval; *check != '\0'; check++) { 615228103Smm if (!isprint(*check)) { 616228103Smm zfs_error_aux(hdl, 617228103Smm dgettext(TEXT_DOMAIN, 618228103Smm "comment may only have printable " 619228103Smm "characters")); 620228103Smm (void) zfs_error(hdl, EZFS_BADPROP, 621228103Smm errbuf); 622228103Smm goto error; 623228103Smm } 624228103Smm } 625228103Smm if (strlen(strval) > ZPROP_MAX_COMMENT) { 626228103Smm zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 627228103Smm "comment must not exceed %d characters"), 628228103Smm ZPROP_MAX_COMMENT); 629228103Smm (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 630228103Smm goto error; 631228103Smm } 632228103Smm break; 633219089Spjd case ZPOOL_PROP_READONLY: 634219089Spjd if (!flags.import) { 635219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 636219089Spjd "property '%s' can only be set at " 637219089Spjd "import time"), propname); 638219089Spjd (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 639219089Spjd goto error; 640219089Spjd } 641219089Spjd break; 642307050Smav 643307050Smav default: 644307050Smav zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 645307050Smav "property '%s'(%d) not defined"), propname, prop); 646307050Smav break; 647185029Spjd } 648185029Spjd } 649185029Spjd 650185029Spjd return (retprops); 651185029Spjderror: 652185029Spjd nvlist_free(retprops); 653185029Spjd return (NULL); 654185029Spjd} 655185029Spjd 656185029Spjd/* 657185029Spjd * Set zpool property : propname=propval. 658185029Spjd */ 659185029Spjdint 660185029Spjdzpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 661185029Spjd{ 662185029Spjd zfs_cmd_t zc = { 0 }; 663185029Spjd int ret = -1; 664185029Spjd char errbuf[1024]; 665185029Spjd nvlist_t *nvl = NULL; 666185029Spjd nvlist_t *realprops; 667185029Spjd uint64_t version; 668219089Spjd prop_flags_t flags = { 0 }; 669185029Spjd 670185029Spjd (void) snprintf(errbuf, sizeof (errbuf), 671185029Spjd dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 672185029Spjd zhp->zpool_name); 673185029Spjd 674185029Spjd if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 675185029Spjd return (no_memory(zhp->zpool_hdl)); 676185029Spjd 677185029Spjd if (nvlist_add_string(nvl, propname, propval) != 0) { 678185029Spjd nvlist_free(nvl); 679185029Spjd return (no_memory(zhp->zpool_hdl)); 680185029Spjd } 681185029Spjd 682185029Spjd version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 683185029Spjd if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 684219089Spjd zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 685185029Spjd nvlist_free(nvl); 686185029Spjd return (-1); 687185029Spjd } 688185029Spjd 689185029Spjd nvlist_free(nvl); 690185029Spjd nvl = realprops; 691185029Spjd 692185029Spjd /* 693185029Spjd * Execute the corresponding ioctl() to set this property. 694185029Spjd */ 695185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 696185029Spjd 697185029Spjd if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 698185029Spjd nvlist_free(nvl); 699185029Spjd return (-1); 700185029Spjd } 701185029Spjd 702185029Spjd ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 703185029Spjd 704185029Spjd zcmd_free_nvlists(&zc); 705185029Spjd nvlist_free(nvl); 706185029Spjd 707185029Spjd if (ret) 708185029Spjd (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 709185029Spjd else 710185029Spjd (void) zpool_props_refresh(zhp); 711185029Spjd 712185029Spjd return (ret); 713185029Spjd} 714185029Spjd 715185029Spjdint 716185029Spjdzpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 717185029Spjd{ 718185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 719185029Spjd zprop_list_t *entry; 720185029Spjd char buf[ZFS_MAXPROPLEN]; 721236884Smm nvlist_t *features = NULL; 722236884Smm zprop_list_t **last; 723236884Smm boolean_t firstexpand = (NULL == *plp); 724185029Spjd 725185029Spjd if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 726185029Spjd return (-1); 727185029Spjd 728236884Smm last = plp; 729236884Smm while (*last != NULL) 730236884Smm last = &(*last)->pl_next; 731236884Smm 732236884Smm if ((*plp)->pl_all) 733236884Smm features = zpool_get_features(zhp); 734236884Smm 735236884Smm if ((*plp)->pl_all && firstexpand) { 736236884Smm for (int i = 0; i < SPA_FEATURES; i++) { 737236884Smm zprop_list_t *entry = zfs_alloc(hdl, 738236884Smm sizeof (zprop_list_t)); 739236884Smm entry->pl_prop = ZPROP_INVAL; 740236884Smm entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 741236884Smm spa_feature_table[i].fi_uname); 742236884Smm entry->pl_width = strlen(entry->pl_user_prop); 743236884Smm entry->pl_all = B_TRUE; 744236884Smm 745236884Smm *last = entry; 746236884Smm last = &entry->pl_next; 747236884Smm } 748236884Smm } 749236884Smm 750236884Smm /* add any unsupported features */ 751236884Smm for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 752236884Smm nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 753236884Smm char *propname; 754236884Smm boolean_t found; 755236884Smm zprop_list_t *entry; 756236884Smm 757236884Smm if (zfeature_is_supported(nvpair_name(nvp))) 758236884Smm continue; 759236884Smm 760236884Smm propname = zfs_asprintf(hdl, "unsupported@%s", 761236884Smm nvpair_name(nvp)); 762236884Smm 763236884Smm /* 764236884Smm * Before adding the property to the list make sure that no 765236884Smm * other pool already added the same property. 766236884Smm */ 767236884Smm found = B_FALSE; 768236884Smm entry = *plp; 769236884Smm while (entry != NULL) { 770236884Smm if (entry->pl_user_prop != NULL && 771236884Smm strcmp(propname, entry->pl_user_prop) == 0) { 772236884Smm found = B_TRUE; 773236884Smm break; 774236884Smm } 775236884Smm entry = entry->pl_next; 776236884Smm } 777236884Smm if (found) { 778236884Smm free(propname); 779236884Smm continue; 780236884Smm } 781236884Smm 782236884Smm entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 783236884Smm entry->pl_prop = ZPROP_INVAL; 784236884Smm entry->pl_user_prop = propname; 785236884Smm entry->pl_width = strlen(entry->pl_user_prop); 786236884Smm entry->pl_all = B_TRUE; 787236884Smm 788236884Smm *last = entry; 789236884Smm last = &entry->pl_next; 790236884Smm } 791236884Smm 792185029Spjd for (entry = *plp; entry != NULL; entry = entry->pl_next) { 793185029Spjd 794185029Spjd if (entry->pl_fixed) 795185029Spjd continue; 796185029Spjd 797185029Spjd if (entry->pl_prop != ZPROP_INVAL && 798185029Spjd zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 799263889Sdelphij NULL, B_FALSE) == 0) { 800185029Spjd if (strlen(buf) > entry->pl_width) 801185029Spjd entry->pl_width = strlen(buf); 802185029Spjd } 803185029Spjd } 804185029Spjd 805185029Spjd return (0); 806185029Spjd} 807185029Spjd 808236884Smm/* 809236884Smm * Get the state for the given feature on the given ZFS pool. 810236884Smm */ 811236884Smmint 812236884Smmzpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 813236884Smm size_t len) 814236884Smm{ 815236884Smm uint64_t refcount; 816236884Smm boolean_t found = B_FALSE; 817236884Smm nvlist_t *features = zpool_get_features(zhp); 818236884Smm boolean_t supported; 819236884Smm const char *feature = strchr(propname, '@') + 1; 820185029Spjd 821236884Smm supported = zpool_prop_feature(propname); 822236884Smm ASSERT(supported || zpool_prop_unsupported(propname)); 823236884Smm 824236884Smm /* 825236884Smm * Convert from feature name to feature guid. This conversion is 826236884Smm * unecessary for unsupported@... properties because they already 827236884Smm * use guids. 828236884Smm */ 829236884Smm if (supported) { 830236884Smm int ret; 831259813Sdelphij spa_feature_t fid; 832236884Smm 833259813Sdelphij ret = zfeature_lookup_name(feature, &fid); 834236884Smm if (ret != 0) { 835236884Smm (void) strlcpy(buf, "-", len); 836236884Smm return (ENOTSUP); 837236884Smm } 838259813Sdelphij feature = spa_feature_table[fid].fi_guid; 839236884Smm } 840236884Smm 841236884Smm if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 842236884Smm found = B_TRUE; 843236884Smm 844236884Smm if (supported) { 845236884Smm if (!found) { 846236884Smm (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 847236884Smm } else { 848236884Smm if (refcount == 0) 849236884Smm (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 850236884Smm else 851236884Smm (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 852236884Smm } 853236884Smm } else { 854236884Smm if (found) { 855236884Smm if (refcount == 0) { 856236884Smm (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 857236884Smm } else { 858236884Smm (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 859236884Smm } 860236884Smm } else { 861236884Smm (void) strlcpy(buf, "-", len); 862236884Smm return (ENOTSUP); 863236884Smm } 864236884Smm } 865236884Smm 866236884Smm return (0); 867236884Smm} 868236884Smm 869185029Spjd/* 870219089Spjd * Don't start the slice at the default block of 34; many storage 871219089Spjd * devices will use a stripe width of 128k, so start there instead. 872219089Spjd */ 873219089Spjd#define NEW_START_BLOCK 256 874219089Spjd 875219089Spjd/* 876168404Spjd * Validate the given pool name, optionally putting an extended error message in 877168404Spjd * 'buf'. 878168404Spjd */ 879185029Spjdboolean_t 880168404Spjdzpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 881168404Spjd{ 882168404Spjd namecheck_err_t why; 883168404Spjd char what; 884168404Spjd int ret; 885168404Spjd 886168404Spjd ret = pool_namecheck(pool, &why, &what); 887168404Spjd 888168404Spjd /* 889168404Spjd * The rules for reserved pool names were extended at a later point. 890168404Spjd * But we need to support users with existing pools that may now be 891168404Spjd * invalid. So we only check for this expanded set of names during a 892168404Spjd * create (or import), and only in userland. 893168404Spjd */ 894168404Spjd if (ret == 0 && !isopen && 895168404Spjd (strncmp(pool, "mirror", 6) == 0 || 896168404Spjd strncmp(pool, "raidz", 5) == 0 || 897185029Spjd strncmp(pool, "spare", 5) == 0 || 898185029Spjd strcmp(pool, "log") == 0)) { 899185029Spjd if (hdl != NULL) 900185029Spjd zfs_error_aux(hdl, 901185029Spjd dgettext(TEXT_DOMAIN, "name is reserved")); 902168404Spjd return (B_FALSE); 903168404Spjd } 904168404Spjd 905168404Spjd 906168404Spjd if (ret != 0) { 907168404Spjd if (hdl != NULL) { 908168404Spjd switch (why) { 909168404Spjd case NAME_ERR_TOOLONG: 910168404Spjd zfs_error_aux(hdl, 911168404Spjd dgettext(TEXT_DOMAIN, "name is too long")); 912168404Spjd break; 913168404Spjd 914168404Spjd case NAME_ERR_INVALCHAR: 915168404Spjd zfs_error_aux(hdl, 916168404Spjd dgettext(TEXT_DOMAIN, "invalid character " 917168404Spjd "'%c' in pool name"), what); 918168404Spjd break; 919168404Spjd 920168404Spjd case NAME_ERR_NOLETTER: 921168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 922168404Spjd "name must begin with a letter")); 923168404Spjd break; 924168404Spjd 925168404Spjd case NAME_ERR_RESERVED: 926168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 927168404Spjd "name is reserved")); 928168404Spjd break; 929168404Spjd 930168404Spjd case NAME_ERR_DISKLIKE: 931168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 932168404Spjd "pool name is reserved")); 933168404Spjd break; 934168404Spjd 935168404Spjd case NAME_ERR_LEADING_SLASH: 936168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 937168404Spjd "leading slash in name")); 938168404Spjd break; 939168404Spjd 940168404Spjd case NAME_ERR_EMPTY_COMPONENT: 941168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 942168404Spjd "empty component in name")); 943168404Spjd break; 944168404Spjd 945168404Spjd case NAME_ERR_TRAILING_SLASH: 946168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 947168404Spjd "trailing slash in name")); 948168404Spjd break; 949168404Spjd 950168404Spjd case NAME_ERR_MULTIPLE_AT: 951168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 952168404Spjd "multiple '@' delimiters in name")); 953168404Spjd break; 954168404Spjd 955307050Smav default: 956307050Smav zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 957307050Smav "(%d) not defined"), why); 958307050Smav break; 959168404Spjd } 960168404Spjd } 961168404Spjd return (B_FALSE); 962168404Spjd } 963168404Spjd 964168404Spjd return (B_TRUE); 965168404Spjd} 966168404Spjd 967168404Spjd/* 968168404Spjd * Open a handle to the given pool, even if the pool is currently in the FAULTED 969168404Spjd * state. 970168404Spjd */ 971168404Spjdzpool_handle_t * 972168404Spjdzpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 973168404Spjd{ 974168404Spjd zpool_handle_t *zhp; 975168404Spjd boolean_t missing; 976168404Spjd 977168404Spjd /* 978168404Spjd * Make sure the pool name is valid. 979168404Spjd */ 980168404Spjd if (!zpool_name_valid(hdl, B_TRUE, pool)) { 981168404Spjd (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 982168404Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), 983168404Spjd pool); 984168404Spjd return (NULL); 985168404Spjd } 986168404Spjd 987168404Spjd if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 988168404Spjd return (NULL); 989168404Spjd 990168404Spjd zhp->zpool_hdl = hdl; 991168404Spjd (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 992168404Spjd 993168404Spjd if (zpool_refresh_stats(zhp, &missing) != 0) { 994168404Spjd zpool_close(zhp); 995168404Spjd return (NULL); 996168404Spjd } 997168404Spjd 998168404Spjd if (missing) { 999185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1000168404Spjd (void) zfs_error_fmt(hdl, EZFS_NOENT, 1001185029Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1002168404Spjd zpool_close(zhp); 1003168404Spjd return (NULL); 1004168404Spjd } 1005168404Spjd 1006168404Spjd return (zhp); 1007168404Spjd} 1008168404Spjd 1009168404Spjd/* 1010168404Spjd * Like the above, but silent on error. Used when iterating over pools (because 1011168404Spjd * the configuration cache may be out of date). 1012168404Spjd */ 1013168404Spjdint 1014168404Spjdzpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1015168404Spjd{ 1016168404Spjd zpool_handle_t *zhp; 1017168404Spjd boolean_t missing; 1018168404Spjd 1019168404Spjd if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1020168404Spjd return (-1); 1021168404Spjd 1022168404Spjd zhp->zpool_hdl = hdl; 1023168404Spjd (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1024168404Spjd 1025168404Spjd if (zpool_refresh_stats(zhp, &missing) != 0) { 1026168404Spjd zpool_close(zhp); 1027168404Spjd return (-1); 1028168404Spjd } 1029168404Spjd 1030168404Spjd if (missing) { 1031168404Spjd zpool_close(zhp); 1032168404Spjd *ret = NULL; 1033168404Spjd return (0); 1034168404Spjd } 1035168404Spjd 1036168404Spjd *ret = zhp; 1037168404Spjd return (0); 1038168404Spjd} 1039168404Spjd 1040168404Spjd/* 1041168404Spjd * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1042168404Spjd * state. 1043168404Spjd */ 1044168404Spjdzpool_handle_t * 1045168404Spjdzpool_open(libzfs_handle_t *hdl, const char *pool) 1046168404Spjd{ 1047168404Spjd zpool_handle_t *zhp; 1048168404Spjd 1049168404Spjd if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1050168404Spjd return (NULL); 1051168404Spjd 1052168404Spjd if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1053168404Spjd (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1054168404Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1055168404Spjd zpool_close(zhp); 1056168404Spjd return (NULL); 1057168404Spjd } 1058168404Spjd 1059168404Spjd return (zhp); 1060168404Spjd} 1061168404Spjd 1062168404Spjd/* 1063168404Spjd * Close the handle. Simply frees the memory associated with the handle. 1064168404Spjd */ 1065168404Spjdvoid 1066168404Spjdzpool_close(zpool_handle_t *zhp) 1067168404Spjd{ 1068296528Smav nvlist_free(zhp->zpool_config); 1069296528Smav nvlist_free(zhp->zpool_old_config); 1070296528Smav nvlist_free(zhp->zpool_props); 1071168404Spjd free(zhp); 1072168404Spjd} 1073168404Spjd 1074168404Spjd/* 1075168404Spjd * Return the name of the pool. 1076168404Spjd */ 1077168404Spjdconst char * 1078168404Spjdzpool_get_name(zpool_handle_t *zhp) 1079168404Spjd{ 1080168404Spjd return (zhp->zpool_name); 1081168404Spjd} 1082168404Spjd 1083168404Spjd 1084168404Spjd/* 1085168404Spjd * Return the state of the pool (ACTIVE or UNAVAILABLE) 1086168404Spjd */ 1087168404Spjdint 1088168404Spjdzpool_get_state(zpool_handle_t *zhp) 1089168404Spjd{ 1090168404Spjd return (zhp->zpool_state); 1091168404Spjd} 1092168404Spjd 1093168404Spjd/* 1094168404Spjd * Create the named pool, using the provided vdev list. It is assumed 1095168404Spjd * that the consumer has already validated the contents of the nvlist, so we 1096168404Spjd * don't have to worry about error semantics. 1097168404Spjd */ 1098168404Spjdint 1099168404Spjdzpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1100185029Spjd nvlist_t *props, nvlist_t *fsprops) 1101168404Spjd{ 1102168404Spjd zfs_cmd_t zc = { 0 }; 1103185029Spjd nvlist_t *zc_fsprops = NULL; 1104185029Spjd nvlist_t *zc_props = NULL; 1105168404Spjd char msg[1024]; 1106185029Spjd int ret = -1; 1107168404Spjd 1108168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1109168404Spjd "cannot create '%s'"), pool); 1110168404Spjd 1111168404Spjd if (!zpool_name_valid(hdl, B_FALSE, pool)) 1112168404Spjd return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1113168404Spjd 1114185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1115168404Spjd return (-1); 1116168404Spjd 1117185029Spjd if (props) { 1118219089Spjd prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1119219089Spjd 1120185029Spjd if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1121219089Spjd SPA_VERSION_1, flags, msg)) == NULL) { 1122185029Spjd goto create_failed; 1123185029Spjd } 1124185029Spjd } 1125185029Spjd 1126185029Spjd if (fsprops) { 1127185029Spjd uint64_t zoned; 1128185029Spjd char *zonestr; 1129185029Spjd 1130185029Spjd zoned = ((nvlist_lookup_string(fsprops, 1131185029Spjd zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1132185029Spjd strcmp(zonestr, "on") == 0); 1133185029Spjd 1134289500Smav if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1135289500Smav fsprops, zoned, NULL, NULL, msg)) == NULL) { 1136185029Spjd goto create_failed; 1137185029Spjd } 1138185029Spjd if (!zc_props && 1139185029Spjd (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1140185029Spjd goto create_failed; 1141185029Spjd } 1142185029Spjd if (nvlist_add_nvlist(zc_props, 1143185029Spjd ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1144185029Spjd goto create_failed; 1145185029Spjd } 1146185029Spjd } 1147185029Spjd 1148185029Spjd if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1149185029Spjd goto create_failed; 1150185029Spjd 1151168404Spjd (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1152168404Spjd 1153185029Spjd if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1154168404Spjd 1155168404Spjd zcmd_free_nvlists(&zc); 1156185029Spjd nvlist_free(zc_props); 1157185029Spjd nvlist_free(zc_fsprops); 1158168404Spjd 1159168404Spjd switch (errno) { 1160168404Spjd case EBUSY: 1161168404Spjd /* 1162168404Spjd * This can happen if the user has specified the same 1163168404Spjd * device multiple times. We can't reliably detect this 1164168404Spjd * until we try to add it and see we already have a 1165168404Spjd * label. 1166168404Spjd */ 1167168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1168168404Spjd "one or more vdevs refer to the same device")); 1169168404Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 1170168404Spjd 1171289500Smav case ERANGE: 1172289500Smav /* 1173289500Smav * This happens if the record size is smaller or larger 1174289500Smav * than the allowed size range, or not a power of 2. 1175289500Smav * 1176289500Smav * NOTE: although zfs_valid_proplist is called earlier, 1177289500Smav * this case may have slipped through since the 1178289500Smav * pool does not exist yet and it is therefore 1179289500Smav * impossible to read properties e.g. max blocksize 1180289500Smav * from the pool. 1181289500Smav */ 1182289500Smav zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1183289500Smav "record size invalid")); 1184289500Smav return (zfs_error(hdl, EZFS_BADPROP, msg)); 1185289500Smav 1186168404Spjd case EOVERFLOW: 1187168404Spjd /* 1188168404Spjd * This occurs when one of the devices is below 1189168404Spjd * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1190168404Spjd * device was the problem device since there's no 1191168404Spjd * reliable way to determine device size from userland. 1192168404Spjd */ 1193168404Spjd { 1194168404Spjd char buf[64]; 1195168404Spjd 1196168404Spjd zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1197168404Spjd 1198168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1199168404Spjd "one or more devices is less than the " 1200168404Spjd "minimum size (%s)"), buf); 1201168404Spjd } 1202168404Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 1203168404Spjd 1204168404Spjd case ENOSPC: 1205168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1206168404Spjd "one or more devices is out of space")); 1207168404Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 1208168404Spjd 1209185029Spjd case ENOTBLK: 1210185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1211185029Spjd "cache device must be a disk or disk slice")); 1212185029Spjd return (zfs_error(hdl, EZFS_BADDEV, msg)); 1213185029Spjd 1214168404Spjd default: 1215168404Spjd return (zpool_standard_error(hdl, errno, msg)); 1216168404Spjd } 1217168404Spjd } 1218168404Spjd 1219185029Spjdcreate_failed: 1220185029Spjd zcmd_free_nvlists(&zc); 1221185029Spjd nvlist_free(zc_props); 1222185029Spjd nvlist_free(zc_fsprops); 1223185029Spjd return (ret); 1224168404Spjd} 1225168404Spjd 1226168404Spjd/* 1227168404Spjd * Destroy the given pool. It is up to the caller to ensure that there are no 1228168404Spjd * datasets left in the pool. 1229168404Spjd */ 1230168404Spjdint 1231248571Smmzpool_destroy(zpool_handle_t *zhp, const char *log_str) 1232168404Spjd{ 1233168404Spjd zfs_cmd_t zc = { 0 }; 1234168404Spjd zfs_handle_t *zfp = NULL; 1235168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1236168404Spjd char msg[1024]; 1237168404Spjd 1238168404Spjd if (zhp->zpool_state == POOL_STATE_ACTIVE && 1239219089Spjd (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1240168404Spjd return (-1); 1241168404Spjd 1242168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1243248571Smm zc.zc_history = (uint64_t)(uintptr_t)log_str; 1244168404Spjd 1245219089Spjd if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1246168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1247168404Spjd "cannot destroy '%s'"), zhp->zpool_name); 1248168404Spjd 1249168404Spjd if (errno == EROFS) { 1250168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1251168404Spjd "one or more devices is read only")); 1252168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1253168404Spjd } else { 1254168404Spjd (void) zpool_standard_error(hdl, errno, msg); 1255168404Spjd } 1256168404Spjd 1257168404Spjd if (zfp) 1258168404Spjd zfs_close(zfp); 1259168404Spjd return (-1); 1260168404Spjd } 1261168404Spjd 1262168404Spjd if (zfp) { 1263168404Spjd remove_mountpoint(zfp); 1264168404Spjd zfs_close(zfp); 1265168404Spjd } 1266168404Spjd 1267168404Spjd return (0); 1268168404Spjd} 1269168404Spjd 1270168404Spjd/* 1271168404Spjd * Add the given vdevs to the pool. The caller must have already performed the 1272168404Spjd * necessary verification to ensure that the vdev specification is well-formed. 1273168404Spjd */ 1274168404Spjdint 1275168404Spjdzpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1276168404Spjd{ 1277168404Spjd zfs_cmd_t zc = { 0 }; 1278168404Spjd int ret; 1279168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1280168404Spjd char msg[1024]; 1281185029Spjd nvlist_t **spares, **l2cache; 1282185029Spjd uint_t nspares, nl2cache; 1283168404Spjd 1284168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1285168404Spjd "cannot add to '%s'"), zhp->zpool_name); 1286168404Spjd 1287185029Spjd if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1288185029Spjd SPA_VERSION_SPARES && 1289168404Spjd nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1290168404Spjd &spares, &nspares) == 0) { 1291168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1292168404Spjd "upgraded to add hot spares")); 1293168404Spjd return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1294168404Spjd } 1295168404Spjd 1296185029Spjd if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1297185029Spjd SPA_VERSION_L2CACHE && 1298185029Spjd nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1299185029Spjd &l2cache, &nl2cache) == 0) { 1300185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1301185029Spjd "upgraded to add cache devices")); 1302185029Spjd return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1303185029Spjd } 1304185029Spjd 1305185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1306168404Spjd return (-1); 1307168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1308168404Spjd 1309219089Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1310168404Spjd switch (errno) { 1311168404Spjd case EBUSY: 1312168404Spjd /* 1313168404Spjd * This can happen if the user has specified the same 1314168404Spjd * device multiple times. We can't reliably detect this 1315168404Spjd * until we try to add it and see we already have a 1316168404Spjd * label. 1317168404Spjd */ 1318168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1319168404Spjd "one or more vdevs refer to the same device")); 1320168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1321168404Spjd break; 1322168404Spjd 1323168404Spjd case EOVERFLOW: 1324168404Spjd /* 1325168404Spjd * This occurrs when one of the devices is below 1326168404Spjd * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1327168404Spjd * device was the problem device since there's no 1328168404Spjd * reliable way to determine device size from userland. 1329168404Spjd */ 1330168404Spjd { 1331168404Spjd char buf[64]; 1332168404Spjd 1333168404Spjd zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1334168404Spjd 1335168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1336168404Spjd "device is less than the minimum " 1337168404Spjd "size (%s)"), buf); 1338168404Spjd } 1339168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1340168404Spjd break; 1341168404Spjd 1342168404Spjd case ENOTSUP: 1343168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1344185029Spjd "pool must be upgraded to add these vdevs")); 1345168404Spjd (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1346168404Spjd break; 1347168404Spjd 1348168404Spjd case EDOM: 1349168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1350185029Spjd "root pool can not have multiple vdevs" 1351185029Spjd " or separate logs")); 1352168404Spjd (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1353168404Spjd break; 1354168404Spjd 1355185029Spjd case ENOTBLK: 1356185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1357185029Spjd "cache device must be a disk or disk slice")); 1358185029Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 1359185029Spjd break; 1360185029Spjd 1361168404Spjd default: 1362168404Spjd (void) zpool_standard_error(hdl, errno, msg); 1363168404Spjd } 1364168404Spjd 1365168404Spjd ret = -1; 1366168404Spjd } else { 1367168404Spjd ret = 0; 1368168404Spjd } 1369168404Spjd 1370168404Spjd zcmd_free_nvlists(&zc); 1371168404Spjd 1372168404Spjd return (ret); 1373168404Spjd} 1374168404Spjd 1375168404Spjd/* 1376168404Spjd * Exports the pool from the system. The caller must ensure that there are no 1377168404Spjd * mounted datasets in the pool. 1378168404Spjd */ 1379248571Smmstatic int 1380248571Smmzpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1381248571Smm const char *log_str) 1382168404Spjd{ 1383168404Spjd zfs_cmd_t zc = { 0 }; 1384185029Spjd char msg[1024]; 1385168404Spjd 1386185029Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1387185029Spjd "cannot export '%s'"), zhp->zpool_name); 1388185029Spjd 1389168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1390185029Spjd zc.zc_cookie = force; 1391207670Smm zc.zc_guid = hardforce; 1392248571Smm zc.zc_history = (uint64_t)(uintptr_t)log_str; 1393168404Spjd 1394185029Spjd if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1395185029Spjd switch (errno) { 1396185029Spjd case EXDEV: 1397185029Spjd zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1398185029Spjd "use '-f' to override the following errors:\n" 1399185029Spjd "'%s' has an active shared spare which could be" 1400185029Spjd " used by other pools once '%s' is exported."), 1401185029Spjd zhp->zpool_name, zhp->zpool_name); 1402185029Spjd return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1403185029Spjd msg)); 1404185029Spjd default: 1405185029Spjd return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1406185029Spjd msg)); 1407185029Spjd } 1408185029Spjd } 1409185029Spjd 1410168404Spjd return (0); 1411168404Spjd} 1412168404Spjd 1413207670Smmint 1414248571Smmzpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1415207670Smm{ 1416248571Smm return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1417207670Smm} 1418207670Smm 1419207670Smmint 1420248571Smmzpool_export_force(zpool_handle_t *zhp, const char *log_str) 1421207670Smm{ 1422248571Smm return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1423207670Smm} 1424207670Smm 1425219089Spjdstatic void 1426219089Spjdzpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1427219089Spjd nvlist_t *config) 1428219089Spjd{ 1429219089Spjd nvlist_t *nv = NULL; 1430219089Spjd uint64_t rewindto; 1431219089Spjd int64_t loss = -1; 1432219089Spjd struct tm t; 1433219089Spjd char timestr[128]; 1434219089Spjd 1435219089Spjd if (!hdl->libzfs_printerr || config == NULL) 1436219089Spjd return; 1437219089Spjd 1438236884Smm if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1439236884Smm nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1440219089Spjd return; 1441236884Smm } 1442219089Spjd 1443219089Spjd if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1444219089Spjd return; 1445219089Spjd (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1446219089Spjd 1447219089Spjd if (localtime_r((time_t *)&rewindto, &t) != NULL && 1448219089Spjd strftime(timestr, 128, 0, &t) != 0) { 1449219089Spjd if (dryrun) { 1450219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1451219089Spjd "Would be able to return %s " 1452219089Spjd "to its state as of %s.\n"), 1453219089Spjd name, timestr); 1454219089Spjd } else { 1455219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1456219089Spjd "Pool %s returned to its state as of %s.\n"), 1457219089Spjd name, timestr); 1458219089Spjd } 1459219089Spjd if (loss > 120) { 1460219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1461219089Spjd "%s approximately %lld "), 1462219089Spjd dryrun ? "Would discard" : "Discarded", 1463219089Spjd (loss + 30) / 60); 1464219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1465219089Spjd "minutes of transactions.\n")); 1466219089Spjd } else if (loss > 0) { 1467219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1468219089Spjd "%s approximately %lld "), 1469219089Spjd dryrun ? "Would discard" : "Discarded", loss); 1470219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1471219089Spjd "seconds of transactions.\n")); 1472219089Spjd } 1473219089Spjd } 1474219089Spjd} 1475219089Spjd 1476219089Spjdvoid 1477219089Spjdzpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1478219089Spjd nvlist_t *config) 1479219089Spjd{ 1480219089Spjd nvlist_t *nv = NULL; 1481219089Spjd int64_t loss = -1; 1482219089Spjd uint64_t edata = UINT64_MAX; 1483219089Spjd uint64_t rewindto; 1484219089Spjd struct tm t; 1485219089Spjd char timestr[128]; 1486219089Spjd 1487219089Spjd if (!hdl->libzfs_printerr) 1488219089Spjd return; 1489219089Spjd 1490219089Spjd if (reason >= 0) 1491219089Spjd (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1492219089Spjd else 1493219089Spjd (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1494219089Spjd 1495219089Spjd /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1496219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1497236884Smm nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1498219089Spjd nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1499219089Spjd goto no_info; 1500219089Spjd 1501219089Spjd (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1502219089Spjd (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1503219089Spjd &edata); 1504219089Spjd 1505219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1506219089Spjd "Recovery is possible, but will result in some data loss.\n")); 1507219089Spjd 1508219089Spjd if (localtime_r((time_t *)&rewindto, &t) != NULL && 1509219089Spjd strftime(timestr, 128, 0, &t) != 0) { 1510219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1511219089Spjd "\tReturning the pool to its state as of %s\n" 1512219089Spjd "\tshould correct the problem. "), 1513219089Spjd timestr); 1514219089Spjd } else { 1515219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1516219089Spjd "\tReverting the pool to an earlier state " 1517219089Spjd "should correct the problem.\n\t")); 1518219089Spjd } 1519219089Spjd 1520219089Spjd if (loss > 120) { 1521219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1522219089Spjd "Approximately %lld minutes of data\n" 1523219089Spjd "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1524219089Spjd } else if (loss > 0) { 1525219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1526219089Spjd "Approximately %lld seconds of data\n" 1527219089Spjd "\tmust be discarded, irreversibly. "), loss); 1528219089Spjd } 1529219089Spjd if (edata != 0 && edata != UINT64_MAX) { 1530219089Spjd if (edata == 1) { 1531219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1532219089Spjd "After rewind, at least\n" 1533219089Spjd "\tone persistent user-data error will remain. ")); 1534219089Spjd } else { 1535219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1536219089Spjd "After rewind, several\n" 1537219089Spjd "\tpersistent user-data errors will remain. ")); 1538219089Spjd } 1539219089Spjd } 1540219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1541219089Spjd "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1542219089Spjd reason >= 0 ? "clear" : "import", name); 1543219089Spjd 1544219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1545219089Spjd "A scrub of the pool\n" 1546219089Spjd "\tis strongly recommended after recovery.\n")); 1547219089Spjd return; 1548219089Spjd 1549219089Spjdno_info: 1550219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1551219089Spjd "Destroy and re-create the pool from\n\ta backup source.\n")); 1552219089Spjd} 1553219089Spjd 1554168404Spjd/* 1555185029Spjd * zpool_import() is a contracted interface. Should be kept the same 1556185029Spjd * if possible. 1557185029Spjd * 1558185029Spjd * Applications should use zpool_import_props() to import a pool with 1559185029Spjd * new properties value to be set. 1560168404Spjd */ 1561168404Spjdint 1562168404Spjdzpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1563185029Spjd char *altroot) 1564168404Spjd{ 1565185029Spjd nvlist_t *props = NULL; 1566185029Spjd int ret; 1567185029Spjd 1568185029Spjd if (altroot != NULL) { 1569185029Spjd if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1570185029Spjd return (zfs_error_fmt(hdl, EZFS_NOMEM, 1571185029Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1572185029Spjd newname)); 1573185029Spjd } 1574185029Spjd 1575185029Spjd if (nvlist_add_string(props, 1576209962Smm zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1577209962Smm nvlist_add_string(props, 1578209962Smm zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1579185029Spjd nvlist_free(props); 1580185029Spjd return (zfs_error_fmt(hdl, EZFS_NOMEM, 1581185029Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1582185029Spjd newname)); 1583185029Spjd } 1584185029Spjd } 1585185029Spjd 1586219089Spjd ret = zpool_import_props(hdl, config, newname, props, 1587219089Spjd ZFS_IMPORT_NORMAL); 1588296528Smav nvlist_free(props); 1589185029Spjd return (ret); 1590185029Spjd} 1591185029Spjd 1592219089Spjdstatic void 1593219089Spjdprint_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1594219089Spjd int indent) 1595219089Spjd{ 1596219089Spjd nvlist_t **child; 1597219089Spjd uint_t c, children; 1598219089Spjd char *vname; 1599219089Spjd uint64_t is_log = 0; 1600219089Spjd 1601219089Spjd (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1602219089Spjd &is_log); 1603219089Spjd 1604219089Spjd if (name != NULL) 1605219089Spjd (void) printf("\t%*s%s%s\n", indent, "", name, 1606219089Spjd is_log ? " [log]" : ""); 1607219089Spjd 1608219089Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1609219089Spjd &child, &children) != 0) 1610219089Spjd return; 1611219089Spjd 1612219089Spjd for (c = 0; c < children; c++) { 1613219089Spjd vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1614219089Spjd print_vdev_tree(hdl, vname, child[c], indent + 2); 1615219089Spjd free(vname); 1616219089Spjd } 1617219089Spjd} 1618219089Spjd 1619236884Smmvoid 1620236884Smmzpool_print_unsup_feat(nvlist_t *config) 1621236884Smm{ 1622236884Smm nvlist_t *nvinfo, *unsup_feat; 1623236884Smm 1624236884Smm verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1625236884Smm 0); 1626236884Smm verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1627236884Smm &unsup_feat) == 0); 1628236884Smm 1629236884Smm for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1630236884Smm nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1631236884Smm char *desc; 1632236884Smm 1633236884Smm verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1634236884Smm verify(nvpair_value_string(nvp, &desc) == 0); 1635236884Smm 1636236884Smm if (strlen(desc) > 0) 1637236884Smm (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1638236884Smm else 1639236884Smm (void) printf("\t%s\n", nvpair_name(nvp)); 1640236884Smm } 1641236884Smm} 1642236884Smm 1643185029Spjd/* 1644185029Spjd * Import the given pool using the known configuration and a list of 1645185029Spjd * properties to be set. The configuration should have come from 1646185029Spjd * zpool_find_import(). The 'newname' parameters control whether the pool 1647185029Spjd * is imported with a different name. 1648185029Spjd */ 1649185029Spjdint 1650185029Spjdzpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1651219089Spjd nvlist_t *props, int flags) 1652185029Spjd{ 1653168404Spjd zfs_cmd_t zc = { 0 }; 1654219089Spjd zpool_rewind_policy_t policy; 1655219089Spjd nvlist_t *nv = NULL; 1656219089Spjd nvlist_t *nvinfo = NULL; 1657219089Spjd nvlist_t *missing = NULL; 1658168404Spjd char *thename; 1659168404Spjd char *origname; 1660168404Spjd int ret; 1661219089Spjd int error = 0; 1662185029Spjd char errbuf[1024]; 1663168404Spjd 1664168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1665168404Spjd &origname) == 0); 1666168404Spjd 1667185029Spjd (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1668185029Spjd "cannot import pool '%s'"), origname); 1669185029Spjd 1670168404Spjd if (newname != NULL) { 1671168404Spjd if (!zpool_name_valid(hdl, B_FALSE, newname)) 1672168404Spjd return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1673168404Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1674168404Spjd newname)); 1675168404Spjd thename = (char *)newname; 1676168404Spjd } else { 1677168404Spjd thename = origname; 1678168404Spjd } 1679168404Spjd 1680277433Sdelphij if (props != NULL) { 1681185029Spjd uint64_t version; 1682219089Spjd prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1683168404Spjd 1684185029Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1685185029Spjd &version) == 0); 1686185029Spjd 1687185029Spjd if ((props = zpool_valid_proplist(hdl, origname, 1688277433Sdelphij props, version, flags, errbuf)) == NULL) 1689185029Spjd return (-1); 1690277433Sdelphij if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1691185029Spjd nvlist_free(props); 1692185029Spjd return (-1); 1693185029Spjd } 1694277433Sdelphij nvlist_free(props); 1695185029Spjd } 1696185029Spjd 1697168404Spjd (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1698168404Spjd 1699168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1700168404Spjd &zc.zc_guid) == 0); 1701168404Spjd 1702185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1703277433Sdelphij zcmd_free_nvlists(&zc); 1704168404Spjd return (-1); 1705185029Spjd } 1706219089Spjd if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1707277433Sdelphij zcmd_free_nvlists(&zc); 1708219089Spjd return (-1); 1709219089Spjd } 1710168404Spjd 1711219089Spjd zc.zc_cookie = flags; 1712219089Spjd while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1713219089Spjd errno == ENOMEM) { 1714219089Spjd if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1715219089Spjd zcmd_free_nvlists(&zc); 1716219089Spjd return (-1); 1717219089Spjd } 1718219089Spjd } 1719219089Spjd if (ret != 0) 1720219089Spjd error = errno; 1721219089Spjd 1722219089Spjd (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1723277433Sdelphij 1724277433Sdelphij zcmd_free_nvlists(&zc); 1725277433Sdelphij 1726219089Spjd zpool_get_rewind_policy(config, &policy); 1727219089Spjd 1728219089Spjd if (error) { 1729168404Spjd char desc[1024]; 1730219089Spjd 1731219089Spjd /* 1732219089Spjd * Dry-run failed, but we print out what success 1733219089Spjd * looks like if we found a best txg 1734219089Spjd */ 1735219089Spjd if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1736219089Spjd zpool_rewind_exclaim(hdl, newname ? origname : thename, 1737219089Spjd B_TRUE, nv); 1738219089Spjd nvlist_free(nv); 1739219089Spjd return (-1); 1740219089Spjd } 1741219089Spjd 1742168404Spjd if (newname == NULL) 1743168404Spjd (void) snprintf(desc, sizeof (desc), 1744168404Spjd dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1745168404Spjd thename); 1746168404Spjd else 1747168404Spjd (void) snprintf(desc, sizeof (desc), 1748168404Spjd dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1749168404Spjd origname, thename); 1750168404Spjd 1751219089Spjd switch (error) { 1752168404Spjd case ENOTSUP: 1753236884Smm if (nv != NULL && nvlist_lookup_nvlist(nv, 1754236884Smm ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1755236884Smm nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1756236884Smm (void) printf(dgettext(TEXT_DOMAIN, "This " 1757236884Smm "pool uses the following feature(s) not " 1758236884Smm "supported by this system:\n")); 1759236884Smm zpool_print_unsup_feat(nv); 1760236884Smm if (nvlist_exists(nvinfo, 1761236884Smm ZPOOL_CONFIG_CAN_RDONLY)) { 1762236884Smm (void) printf(dgettext(TEXT_DOMAIN, 1763236884Smm "All unsupported features are only " 1764236884Smm "required for writing to the pool." 1765236884Smm "\nThe pool can be imported using " 1766236884Smm "'-o readonly=on'.\n")); 1767236884Smm } 1768236884Smm } 1769168404Spjd /* 1770168404Spjd * Unsupported version. 1771168404Spjd */ 1772168404Spjd (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1773168404Spjd break; 1774168404Spjd 1775168404Spjd case EINVAL: 1776168404Spjd (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1777168404Spjd break; 1778168404Spjd 1779219089Spjd case EROFS: 1780219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1781219089Spjd "one or more devices is read only")); 1782219089Spjd (void) zfs_error(hdl, EZFS_BADDEV, desc); 1783219089Spjd break; 1784219089Spjd 1785219089Spjd case ENXIO: 1786219089Spjd if (nv && nvlist_lookup_nvlist(nv, 1787219089Spjd ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1788219089Spjd nvlist_lookup_nvlist(nvinfo, 1789219089Spjd ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1790219089Spjd (void) printf(dgettext(TEXT_DOMAIN, 1791219089Spjd "The devices below are missing, use " 1792219089Spjd "'-m' to import the pool anyway:\n")); 1793219089Spjd print_vdev_tree(hdl, NULL, missing, 2); 1794219089Spjd (void) printf("\n"); 1795219089Spjd } 1796219089Spjd (void) zpool_standard_error(hdl, error, desc); 1797219089Spjd break; 1798219089Spjd 1799219089Spjd case EEXIST: 1800219089Spjd (void) zpool_standard_error(hdl, error, desc); 1801219089Spjd break; 1802307100Smav case ENAMETOOLONG: 1803307100Smav zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1804307100Smav "new name of at least one dataset is longer than " 1805307100Smav "the maximum allowable length")); 1806307100Smav (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1807307100Smav break; 1808168404Spjd default: 1809219089Spjd (void) zpool_standard_error(hdl, error, desc); 1810219089Spjd zpool_explain_recover(hdl, 1811219089Spjd newname ? origname : thename, -error, nv); 1812219089Spjd break; 1813168404Spjd } 1814168404Spjd 1815219089Spjd nvlist_free(nv); 1816168404Spjd ret = -1; 1817168404Spjd } else { 1818168404Spjd zpool_handle_t *zhp; 1819185029Spjd 1820168404Spjd /* 1821168404Spjd * This should never fail, but play it safe anyway. 1822168404Spjd */ 1823219089Spjd if (zpool_open_silent(hdl, thename, &zhp) != 0) 1824168404Spjd ret = -1; 1825219089Spjd else if (zhp != NULL) 1826168404Spjd zpool_close(zhp); 1827219089Spjd if (policy.zrp_request & 1828219089Spjd (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1829219089Spjd zpool_rewind_exclaim(hdl, newname ? origname : thename, 1830219089Spjd ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1831168404Spjd } 1832219089Spjd nvlist_free(nv); 1833219089Spjd return (0); 1834168404Spjd } 1835168404Spjd 1836168404Spjd return (ret); 1837168404Spjd} 1838168404Spjd 1839168404Spjd/* 1840219089Spjd * Scan the pool. 1841168404Spjd */ 1842168404Spjdint 1843219089Spjdzpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1844168404Spjd{ 1845168404Spjd zfs_cmd_t zc = { 0 }; 1846168404Spjd char msg[1024]; 1847168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 1848168404Spjd 1849168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1850219089Spjd zc.zc_cookie = func; 1851168404Spjd 1852219089Spjd if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1853219089Spjd (errno == ENOENT && func != POOL_SCAN_NONE)) 1854168404Spjd return (0); 1855168404Spjd 1856219089Spjd if (func == POOL_SCAN_SCRUB) { 1857219089Spjd (void) snprintf(msg, sizeof (msg), 1858219089Spjd dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1859219089Spjd } else if (func == POOL_SCAN_NONE) { 1860219089Spjd (void) snprintf(msg, sizeof (msg), 1861219089Spjd dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1862219089Spjd zc.zc_name); 1863219089Spjd } else { 1864219089Spjd assert(!"unexpected result"); 1865219089Spjd } 1866168404Spjd 1867219089Spjd if (errno == EBUSY) { 1868219089Spjd nvlist_t *nvroot; 1869219089Spjd pool_scan_stat_t *ps = NULL; 1870219089Spjd uint_t psc; 1871219089Spjd 1872219089Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, 1873219089Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1874219089Spjd (void) nvlist_lookup_uint64_array(nvroot, 1875219089Spjd ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1876219089Spjd if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1877219089Spjd return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1878219089Spjd else 1879219089Spjd return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1880219089Spjd } else if (errno == ENOENT) { 1881219089Spjd return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1882219089Spjd } else { 1883168404Spjd return (zpool_standard_error(hdl, errno, msg)); 1884219089Spjd } 1885168404Spjd} 1886168404Spjd 1887277239Ssmh#ifdef illumos 1888168404Spjd/* 1889219089Spjd * This provides a very minimal check whether a given string is likely a 1890219089Spjd * c#t#d# style string. Users of this are expected to do their own 1891219089Spjd * verification of the s# part. 1892219089Spjd */ 1893219089Spjd#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1894219089Spjd 1895219089Spjd/* 1896219089Spjd * More elaborate version for ones which may start with "/dev/dsk/" 1897219089Spjd * and the like. 1898219089Spjd */ 1899219089Spjdstatic int 1900289562Smavctd_check_path(char *str) 1901289562Smav{ 1902219089Spjd /* 1903219089Spjd * If it starts with a slash, check the last component. 1904219089Spjd */ 1905219089Spjd if (str && str[0] == '/') { 1906219089Spjd char *tmp = strrchr(str, '/'); 1907219089Spjd 1908219089Spjd /* 1909219089Spjd * If it ends in "/old", check the second-to-last 1910219089Spjd * component of the string instead. 1911219089Spjd */ 1912219089Spjd if (tmp != str && strcmp(tmp, "/old") == 0) { 1913219089Spjd for (tmp--; *tmp != '/'; tmp--) 1914219089Spjd ; 1915219089Spjd } 1916219089Spjd str = tmp + 1; 1917219089Spjd } 1918219089Spjd return (CTD_CHECK(str)); 1919219089Spjd} 1920277239Ssmh#endif 1921219089Spjd 1922219089Spjd/* 1923219089Spjd * Find a vdev that matches the search criteria specified. We use the 1924219089Spjd * the nvpair name to determine how we should look for the device. 1925168404Spjd * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1926168404Spjd * spare; but FALSE if its an INUSE spare. 1927168404Spjd */ 1928168404Spjdstatic nvlist_t * 1929219089Spjdvdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1930219089Spjd boolean_t *l2cache, boolean_t *log) 1931168404Spjd{ 1932168404Spjd uint_t c, children; 1933168404Spjd nvlist_t **child; 1934168404Spjd nvlist_t *ret; 1935185029Spjd uint64_t is_log; 1936219089Spjd char *srchkey; 1937219089Spjd nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1938168404Spjd 1939219089Spjd /* Nothing to look for */ 1940219089Spjd if (search == NULL || pair == NULL) 1941219089Spjd return (NULL); 1942168404Spjd 1943219089Spjd /* Obtain the key we will use to search */ 1944219089Spjd srchkey = nvpair_name(pair); 1945219089Spjd 1946219089Spjd switch (nvpair_type(pair)) { 1947219089Spjd case DATA_TYPE_UINT64: 1948219089Spjd if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1949219089Spjd uint64_t srchval, theguid; 1950219089Spjd 1951219089Spjd verify(nvpair_value_uint64(pair, &srchval) == 0); 1952219089Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1953219089Spjd &theguid) == 0); 1954219089Spjd if (theguid == srchval) 1955219089Spjd return (nv); 1956219089Spjd } 1957219089Spjd break; 1958219089Spjd 1959219089Spjd case DATA_TYPE_STRING: { 1960219089Spjd char *srchval, *val; 1961219089Spjd 1962219089Spjd verify(nvpair_value_string(pair, &srchval) == 0); 1963219089Spjd if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1964219089Spjd break; 1965219089Spjd 1966168404Spjd /* 1967219089Spjd * Search for the requested value. Special cases: 1968219089Spjd * 1969219089Spjd * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1970219089Spjd * "s0" or "s0/old". The "s0" part is hidden from the user, 1971219089Spjd * but included in the string, so this matches around it. 1972219089Spjd * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1973219089Spjd * 1974219089Spjd * Otherwise, all other searches are simple string compares. 1975168404Spjd */ 1976277239Ssmh#ifdef illumos 1977219089Spjd if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1978219089Spjd ctd_check_path(val)) { 1979219089Spjd uint64_t wholedisk = 0; 1980219089Spjd 1981219089Spjd (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1982219089Spjd &wholedisk); 1983219089Spjd if (wholedisk) { 1984219089Spjd int slen = strlen(srchval); 1985219089Spjd int vlen = strlen(val); 1986219089Spjd 1987219089Spjd if (slen != vlen - 2) 1988219089Spjd break; 1989219089Spjd 1990219089Spjd /* 1991219089Spjd * make_leaf_vdev() should only set 1992219089Spjd * wholedisk for ZPOOL_CONFIG_PATHs which 1993219089Spjd * will include "/dev/dsk/", giving plenty of 1994219089Spjd * room for the indices used next. 1995219089Spjd */ 1996219089Spjd ASSERT(vlen >= 6); 1997219089Spjd 1998219089Spjd /* 1999219089Spjd * strings identical except trailing "s0" 2000219089Spjd */ 2001219089Spjd if (strcmp(&val[vlen - 2], "s0") == 0 && 2002219089Spjd strncmp(srchval, val, slen) == 0) 2003219089Spjd return (nv); 2004219089Spjd 2005219089Spjd /* 2006219089Spjd * strings identical except trailing "s0/old" 2007219089Spjd */ 2008219089Spjd if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2009219089Spjd strcmp(&srchval[slen - 4], "/old") == 0 && 2010219089Spjd strncmp(srchval, val, slen - 4) == 0) 2011219089Spjd return (nv); 2012219089Spjd 2013219089Spjd break; 2014219089Spjd } 2015219089Spjd } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2016277239Ssmh#else 2017277239Ssmh if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2018277239Ssmh#endif 2019219089Spjd char *type, *idx, *end, *p; 2020219089Spjd uint64_t id, vdev_id; 2021219089Spjd 2022168404Spjd /* 2023219089Spjd * Determine our vdev type, keeping in mind 2024219089Spjd * that the srchval is composed of a type and 2025219089Spjd * vdev id pair (i.e. mirror-4). 2026168404Spjd */ 2027219089Spjd if ((type = strdup(srchval)) == NULL) 2028219089Spjd return (NULL); 2029219089Spjd 2030219089Spjd if ((p = strrchr(type, '-')) == NULL) { 2031219089Spjd free(type); 2032219089Spjd break; 2033219089Spjd } 2034219089Spjd idx = p + 1; 2035219089Spjd *p = '\0'; 2036219089Spjd 2037219089Spjd /* 2038219089Spjd * If the types don't match then keep looking. 2039219089Spjd */ 2040219089Spjd if (strncmp(val, type, strlen(val)) != 0) { 2041219089Spjd free(type); 2042219089Spjd break; 2043219089Spjd } 2044219089Spjd 2045219089Spjd verify(strncmp(type, VDEV_TYPE_RAIDZ, 2046219089Spjd strlen(VDEV_TYPE_RAIDZ)) == 0 || 2047219089Spjd strncmp(type, VDEV_TYPE_MIRROR, 2048219089Spjd strlen(VDEV_TYPE_MIRROR)) == 0); 2049219089Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2050219089Spjd &id) == 0); 2051219089Spjd 2052219089Spjd errno = 0; 2053219089Spjd vdev_id = strtoull(idx, &end, 10); 2054219089Spjd 2055219089Spjd free(type); 2056219089Spjd if (errno != 0) 2057219089Spjd return (NULL); 2058219089Spjd 2059219089Spjd /* 2060219089Spjd * Now verify that we have the correct vdev id. 2061219089Spjd */ 2062219089Spjd if (vdev_id == id) 2063168404Spjd return (nv); 2064219089Spjd } 2065219089Spjd 2066219089Spjd /* 2067219089Spjd * Common case 2068219089Spjd */ 2069219089Spjd if (strcmp(srchval, val) == 0) 2070168404Spjd return (nv); 2071219089Spjd break; 2072168404Spjd } 2073168404Spjd 2074219089Spjd default: 2075219089Spjd break; 2076219089Spjd } 2077219089Spjd 2078168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2079168404Spjd &child, &children) != 0) 2080168404Spjd return (NULL); 2081168404Spjd 2082185029Spjd for (c = 0; c < children; c++) { 2083219089Spjd if ((ret = vdev_to_nvlist_iter(child[c], search, 2084185029Spjd avail_spare, l2cache, NULL)) != NULL) { 2085185029Spjd /* 2086185029Spjd * The 'is_log' value is only set for the toplevel 2087185029Spjd * vdev, not the leaf vdevs. So we always lookup the 2088185029Spjd * log device from the root of the vdev tree (where 2089185029Spjd * 'log' is non-NULL). 2090185029Spjd */ 2091185029Spjd if (log != NULL && 2092185029Spjd nvlist_lookup_uint64(child[c], 2093185029Spjd ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2094185029Spjd is_log) { 2095185029Spjd *log = B_TRUE; 2096185029Spjd } 2097168404Spjd return (ret); 2098185029Spjd } 2099185029Spjd } 2100168404Spjd 2101168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2102168404Spjd &child, &children) == 0) { 2103168404Spjd for (c = 0; c < children; c++) { 2104219089Spjd if ((ret = vdev_to_nvlist_iter(child[c], search, 2105185029Spjd avail_spare, l2cache, NULL)) != NULL) { 2106168404Spjd *avail_spare = B_TRUE; 2107168404Spjd return (ret); 2108168404Spjd } 2109168404Spjd } 2110168404Spjd } 2111168404Spjd 2112185029Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2113185029Spjd &child, &children) == 0) { 2114185029Spjd for (c = 0; c < children; c++) { 2115219089Spjd if ((ret = vdev_to_nvlist_iter(child[c], search, 2116185029Spjd avail_spare, l2cache, NULL)) != NULL) { 2117185029Spjd *l2cache = B_TRUE; 2118185029Spjd return (ret); 2119185029Spjd } 2120185029Spjd } 2121185029Spjd } 2122185029Spjd 2123168404Spjd return (NULL); 2124168404Spjd} 2125168404Spjd 2126219089Spjd/* 2127219089Spjd * Given a physical path (minus the "/devices" prefix), find the 2128219089Spjd * associated vdev. 2129219089Spjd */ 2130168404Spjdnvlist_t * 2131219089Spjdzpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2132219089Spjd boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2133219089Spjd{ 2134219089Spjd nvlist_t *search, *nvroot, *ret; 2135219089Spjd 2136219089Spjd verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2137219089Spjd verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2138219089Spjd 2139219089Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2140219089Spjd &nvroot) == 0); 2141219089Spjd 2142219089Spjd *avail_spare = B_FALSE; 2143219089Spjd *l2cache = B_FALSE; 2144219089Spjd if (log != NULL) 2145219089Spjd *log = B_FALSE; 2146219089Spjd ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2147219089Spjd nvlist_free(search); 2148219089Spjd 2149219089Spjd return (ret); 2150219089Spjd} 2151219089Spjd 2152219089Spjd/* 2153219089Spjd * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2154219089Spjd */ 2155219089Spjdboolean_t 2156219089Spjdzpool_vdev_is_interior(const char *name) 2157219089Spjd{ 2158219089Spjd if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2159219089Spjd strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2160219089Spjd return (B_TRUE); 2161219089Spjd return (B_FALSE); 2162219089Spjd} 2163219089Spjd 2164219089Spjdnvlist_t * 2165185029Spjdzpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2166185029Spjd boolean_t *l2cache, boolean_t *log) 2167168404Spjd{ 2168168404Spjd char buf[MAXPATHLEN]; 2169168404Spjd char *end; 2170219089Spjd nvlist_t *nvroot, *search, *ret; 2171168404Spjd uint64_t guid; 2172168404Spjd 2173219089Spjd verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2174219089Spjd 2175168404Spjd guid = strtoull(path, &end, 10); 2176168404Spjd if (guid != 0 && *end == '\0') { 2177219089Spjd verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2178219089Spjd } else if (zpool_vdev_is_interior(path)) { 2179219089Spjd verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2180168404Spjd } else if (path[0] != '/') { 2181168404Spjd (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2182219089Spjd verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2183168404Spjd } else { 2184219089Spjd verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2185168404Spjd } 2186168404Spjd 2187168404Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2188168404Spjd &nvroot) == 0); 2189168404Spjd 2190168404Spjd *avail_spare = B_FALSE; 2191185029Spjd *l2cache = B_FALSE; 2192185029Spjd if (log != NULL) 2193185029Spjd *log = B_FALSE; 2194219089Spjd ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2195219089Spjd nvlist_free(search); 2196219089Spjd 2197219089Spjd return (ret); 2198168404Spjd} 2199168404Spjd 2200185029Spjdstatic int 2201185029Spjdvdev_online(nvlist_t *nv) 2202185029Spjd{ 2203185029Spjd uint64_t ival; 2204185029Spjd 2205185029Spjd if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2206185029Spjd nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2207185029Spjd nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2208185029Spjd return (0); 2209185029Spjd 2210185029Spjd return (1); 2211185029Spjd} 2212185029Spjd 2213168404Spjd/* 2214219089Spjd * Helper function for zpool_get_physpaths(). 2215168404Spjd */ 2216219089Spjdstatic int 2217219089Spjdvdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2218219089Spjd size_t *bytes_written) 2219185029Spjd{ 2220219089Spjd size_t bytes_left, pos, rsz; 2221219089Spjd char *tmppath; 2222219089Spjd const char *format; 2223185029Spjd 2224219089Spjd if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2225219089Spjd &tmppath) != 0) 2226219089Spjd return (EZFS_NODEVICE); 2227185029Spjd 2228219089Spjd pos = *bytes_written; 2229219089Spjd bytes_left = physpath_size - pos; 2230219089Spjd format = (pos == 0) ? "%s" : " %s"; 2231185029Spjd 2232219089Spjd rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2233219089Spjd *bytes_written += rsz; 2234185029Spjd 2235219089Spjd if (rsz >= bytes_left) { 2236219089Spjd /* if physpath was not copied properly, clear it */ 2237219089Spjd if (bytes_left != 0) { 2238219089Spjd physpath[pos] = 0; 2239219089Spjd } 2240219089Spjd return (EZFS_NOSPC); 2241219089Spjd } 2242219089Spjd return (0); 2243219089Spjd} 2244185029Spjd 2245219089Spjdstatic int 2246219089Spjdvdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2247219089Spjd size_t *rsz, boolean_t is_spare) 2248219089Spjd{ 2249219089Spjd char *type; 2250219089Spjd int ret; 2251185029Spjd 2252219089Spjd if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2253219089Spjd return (EZFS_INVALCONFIG); 2254185029Spjd 2255219089Spjd if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2256219089Spjd /* 2257219089Spjd * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2258219089Spjd * For a spare vdev, we only want to boot from the active 2259219089Spjd * spare device. 2260219089Spjd */ 2261219089Spjd if (is_spare) { 2262219089Spjd uint64_t spare = 0; 2263219089Spjd (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2264219089Spjd &spare); 2265219089Spjd if (!spare) 2266219089Spjd return (EZFS_INVALCONFIG); 2267219089Spjd } 2268185029Spjd 2269219089Spjd if (vdev_online(nv)) { 2270219089Spjd if ((ret = vdev_get_one_physpath(nv, physpath, 2271219089Spjd phypath_size, rsz)) != 0) 2272219089Spjd return (ret); 2273219089Spjd } 2274219089Spjd } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2275219089Spjd strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2276219089Spjd (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2277219089Spjd nvlist_t **child; 2278219089Spjd uint_t count; 2279219089Spjd int i, ret; 2280185029Spjd 2281219089Spjd if (nvlist_lookup_nvlist_array(nv, 2282219089Spjd ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2283219089Spjd return (EZFS_INVALCONFIG); 2284219089Spjd 2285219089Spjd for (i = 0; i < count; i++) { 2286219089Spjd ret = vdev_get_physpaths(child[i], physpath, 2287219089Spjd phypath_size, rsz, is_spare); 2288219089Spjd if (ret == EZFS_NOSPC) 2289219089Spjd return (ret); 2290185029Spjd } 2291185029Spjd } 2292185029Spjd 2293219089Spjd return (EZFS_POOL_INVALARG); 2294185029Spjd} 2295185029Spjd 2296185029Spjd/* 2297219089Spjd * Get phys_path for a root pool config. 2298219089Spjd * Return 0 on success; non-zero on failure. 2299185029Spjd */ 2300219089Spjdstatic int 2301219089Spjdzpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2302168404Spjd{ 2303219089Spjd size_t rsz; 2304219089Spjd nvlist_t *vdev_root; 2305219089Spjd nvlist_t **child; 2306185029Spjd uint_t count; 2307219089Spjd char *type; 2308168404Spjd 2309219089Spjd rsz = 0; 2310219089Spjd 2311219089Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2312219089Spjd &vdev_root) != 0) 2313219089Spjd return (EZFS_INVALCONFIG); 2314219089Spjd 2315219089Spjd if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2316219089Spjd nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2317219089Spjd &child, &count) != 0) 2318219089Spjd return (EZFS_INVALCONFIG); 2319219089Spjd 2320219089Spjd /* 2321289527Smav * root pool can only have a single top-level vdev. 2322219089Spjd */ 2323289527Smav if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2324219089Spjd return (EZFS_POOL_INVALARG); 2325219089Spjd 2326219089Spjd (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2327219089Spjd B_FALSE); 2328219089Spjd 2329219089Spjd /* No online devices */ 2330219089Spjd if (rsz == 0) 2331219089Spjd return (EZFS_NODEVICE); 2332219089Spjd 2333219089Spjd return (0); 2334219089Spjd} 2335219089Spjd 2336219089Spjd/* 2337219089Spjd * Get phys_path for a root pool 2338219089Spjd * Return 0 on success; non-zero on failure. 2339219089Spjd */ 2340219089Spjdint 2341219089Spjdzpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2342219089Spjd{ 2343219089Spjd return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2344219089Spjd phypath_size)); 2345219089Spjd} 2346219089Spjd 2347219089Spjd/* 2348219089Spjd * If the device has being dynamically expanded then we need to relabel 2349219089Spjd * the disk to use the new unallocated space. 2350219089Spjd */ 2351219089Spjdstatic int 2352219089Spjdzpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2353219089Spjd{ 2354277300Ssmh#ifdef illumos 2355219089Spjd char path[MAXPATHLEN]; 2356219089Spjd char errbuf[1024]; 2357219089Spjd int fd, error; 2358219089Spjd int (*_efi_use_whole_disk)(int); 2359219089Spjd 2360219089Spjd if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2361219089Spjd "efi_use_whole_disk")) == NULL) 2362219089Spjd return (-1); 2363219089Spjd 2364297763Smav (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2365219089Spjd 2366219089Spjd if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2367219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2368219089Spjd "relabel '%s': unable to open device"), name); 2369219089Spjd return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2370168404Spjd } 2371168404Spjd 2372219089Spjd /* 2373219089Spjd * It's possible that we might encounter an error if the device 2374219089Spjd * does not have any unallocated space left. If so, we simply 2375219089Spjd * ignore that error and continue on. 2376219089Spjd */ 2377219089Spjd error = _efi_use_whole_disk(fd); 2378219089Spjd (void) close(fd); 2379219089Spjd if (error && error != VT_ENOSPC) { 2380219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2381219089Spjd "relabel '%s': unable to read disk capacity"), name); 2382219089Spjd return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2383219089Spjd } 2384277300Ssmh#endif /* illumos */ 2385219089Spjd return (0); 2386168404Spjd} 2387168404Spjd 2388168404Spjd/* 2389185029Spjd * Bring the specified vdev online. The 'flags' parameter is a set of the 2390185029Spjd * ZFS_ONLINE_* flags. 2391168404Spjd */ 2392168404Spjdint 2393185029Spjdzpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2394185029Spjd vdev_state_t *newstate) 2395168404Spjd{ 2396168404Spjd zfs_cmd_t zc = { 0 }; 2397168404Spjd char msg[1024]; 2398168404Spjd nvlist_t *tgt; 2399219089Spjd boolean_t avail_spare, l2cache, islog; 2400168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2401168404Spjd 2402219089Spjd if (flags & ZFS_ONLINE_EXPAND) { 2403219089Spjd (void) snprintf(msg, sizeof (msg), 2404219089Spjd dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2405219089Spjd } else { 2406219089Spjd (void) snprintf(msg, sizeof (msg), 2407219089Spjd dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2408219089Spjd } 2409168404Spjd 2410168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2411185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2412219089Spjd &islog)) == NULL) 2413168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2414168404Spjd 2415168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2416168404Spjd 2417219089Spjd if (avail_spare) 2418168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2419168404Spjd 2420219089Spjd if (flags & ZFS_ONLINE_EXPAND || 2421219089Spjd zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2422219089Spjd char *pathname = NULL; 2423219089Spjd uint64_t wholedisk = 0; 2424219089Spjd 2425219089Spjd (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2426219089Spjd &wholedisk); 2427219089Spjd verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2428219089Spjd &pathname) == 0); 2429219089Spjd 2430219089Spjd /* 2431219089Spjd * XXX - L2ARC 1.0 devices can't support expansion. 2432219089Spjd */ 2433219089Spjd if (l2cache) { 2434219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2435219089Spjd "cannot expand cache devices")); 2436219089Spjd return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2437219089Spjd } 2438219089Spjd 2439219089Spjd if (wholedisk) { 2440297763Smav pathname += strlen(ZFS_DISK_ROOT) + 1; 2441219089Spjd (void) zpool_relabel_disk(hdl, pathname); 2442219089Spjd } 2443219089Spjd } 2444219089Spjd 2445185029Spjd zc.zc_cookie = VDEV_STATE_ONLINE; 2446185029Spjd zc.zc_obj = flags; 2447168404Spjd 2448219089Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2449219089Spjd if (errno == EINVAL) { 2450219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2451219089Spjd "from this pool into a new one. Use '%s' " 2452219089Spjd "instead"), "zpool detach"); 2453219089Spjd return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2454219089Spjd } 2455185029Spjd return (zpool_standard_error(hdl, errno, msg)); 2456219089Spjd } 2457185029Spjd 2458185029Spjd *newstate = zc.zc_cookie; 2459185029Spjd return (0); 2460168404Spjd} 2461168404Spjd 2462168404Spjd/* 2463168404Spjd * Take the specified vdev offline 2464168404Spjd */ 2465168404Spjdint 2466185029Spjdzpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2467168404Spjd{ 2468168404Spjd zfs_cmd_t zc = { 0 }; 2469168404Spjd char msg[1024]; 2470168404Spjd nvlist_t *tgt; 2471185029Spjd boolean_t avail_spare, l2cache; 2472168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2473168404Spjd 2474168404Spjd (void) snprintf(msg, sizeof (msg), 2475168404Spjd dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2476168404Spjd 2477168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2478185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2479185029Spjd NULL)) == NULL) 2480168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2481168404Spjd 2482168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2483168404Spjd 2484219089Spjd if (avail_spare) 2485168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2486168404Spjd 2487185029Spjd zc.zc_cookie = VDEV_STATE_OFFLINE; 2488185029Spjd zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2489168404Spjd 2490219089Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2491168404Spjd return (0); 2492168404Spjd 2493168404Spjd switch (errno) { 2494168404Spjd case EBUSY: 2495168404Spjd 2496168404Spjd /* 2497168404Spjd * There are no other replicas of this device. 2498168404Spjd */ 2499168404Spjd return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2500168404Spjd 2501219089Spjd case EEXIST: 2502219089Spjd /* 2503219089Spjd * The log device has unplayed logs 2504219089Spjd */ 2505219089Spjd return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2506219089Spjd 2507168404Spjd default: 2508168404Spjd return (zpool_standard_error(hdl, errno, msg)); 2509168404Spjd } 2510168404Spjd} 2511168404Spjd 2512168404Spjd/* 2513185029Spjd * Mark the given vdev faulted. 2514185029Spjd */ 2515185029Spjdint 2516219089Spjdzpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2517185029Spjd{ 2518185029Spjd zfs_cmd_t zc = { 0 }; 2519185029Spjd char msg[1024]; 2520185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2521185029Spjd 2522185029Spjd (void) snprintf(msg, sizeof (msg), 2523185029Spjd dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2524185029Spjd 2525185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2526185029Spjd zc.zc_guid = guid; 2527185029Spjd zc.zc_cookie = VDEV_STATE_FAULTED; 2528219089Spjd zc.zc_obj = aux; 2529185029Spjd 2530219089Spjd if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2531185029Spjd return (0); 2532185029Spjd 2533185029Spjd switch (errno) { 2534185029Spjd case EBUSY: 2535185029Spjd 2536185029Spjd /* 2537185029Spjd * There are no other replicas of this device. 2538185029Spjd */ 2539185029Spjd return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2540185029Spjd 2541185029Spjd default: 2542185029Spjd return (zpool_standard_error(hdl, errno, msg)); 2543185029Spjd } 2544185029Spjd 2545185029Spjd} 2546185029Spjd 2547185029Spjd/* 2548185029Spjd * Mark the given vdev degraded. 2549185029Spjd */ 2550185029Spjdint 2551219089Spjdzpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2552185029Spjd{ 2553185029Spjd zfs_cmd_t zc = { 0 }; 2554185029Spjd char msg[1024]; 2555185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2556185029Spjd 2557185029Spjd (void) snprintf(msg, sizeof (msg), 2558185029Spjd dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2559185029Spjd 2560185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2561185029Spjd zc.zc_guid = guid; 2562185029Spjd zc.zc_cookie = VDEV_STATE_DEGRADED; 2563219089Spjd zc.zc_obj = aux; 2564185029Spjd 2565219089Spjd if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2566185029Spjd return (0); 2567185029Spjd 2568185029Spjd return (zpool_standard_error(hdl, errno, msg)); 2569185029Spjd} 2570185029Spjd 2571185029Spjd/* 2572168404Spjd * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2573168404Spjd * a hot spare. 2574168404Spjd */ 2575168404Spjdstatic boolean_t 2576168404Spjdis_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2577168404Spjd{ 2578168404Spjd nvlist_t **child; 2579168404Spjd uint_t c, children; 2580168404Spjd char *type; 2581168404Spjd 2582168404Spjd if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2583168404Spjd &children) == 0) { 2584168404Spjd verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2585168404Spjd &type) == 0); 2586168404Spjd 2587168404Spjd if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2588168404Spjd children == 2 && child[which] == tgt) 2589168404Spjd return (B_TRUE); 2590168404Spjd 2591168404Spjd for (c = 0; c < children; c++) 2592168404Spjd if (is_replacing_spare(child[c], tgt, which)) 2593168404Spjd return (B_TRUE); 2594168404Spjd } 2595168404Spjd 2596168404Spjd return (B_FALSE); 2597168404Spjd} 2598168404Spjd 2599168404Spjd/* 2600168404Spjd * Attach new_disk (fully described by nvroot) to old_disk. 2601185029Spjd * If 'replacing' is specified, the new disk will replace the old one. 2602168404Spjd */ 2603168404Spjdint 2604168404Spjdzpool_vdev_attach(zpool_handle_t *zhp, 2605168404Spjd const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2606168404Spjd{ 2607168404Spjd zfs_cmd_t zc = { 0 }; 2608168404Spjd char msg[1024]; 2609168404Spjd int ret; 2610168404Spjd nvlist_t *tgt; 2611185029Spjd boolean_t avail_spare, l2cache, islog; 2612168404Spjd uint64_t val; 2613219089Spjd char *newname; 2614168404Spjd nvlist_t **child; 2615168404Spjd uint_t children; 2616168404Spjd nvlist_t *config_root; 2617168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2618236155Smm boolean_t rootpool = zpool_is_bootable(zhp); 2619168404Spjd 2620168404Spjd if (replacing) 2621168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2622168404Spjd "cannot replace %s with %s"), old_disk, new_disk); 2623168404Spjd else 2624168404Spjd (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2625168404Spjd "cannot attach %s to %s"), new_disk, old_disk); 2626168404Spjd 2627168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2628185029Spjd if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2629185029Spjd &islog)) == 0) 2630168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2631168404Spjd 2632168404Spjd if (avail_spare) 2633168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2634168404Spjd 2635185029Spjd if (l2cache) 2636185029Spjd return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2637185029Spjd 2638168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2639168404Spjd zc.zc_cookie = replacing; 2640168404Spjd 2641168404Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2642168404Spjd &child, &children) != 0 || children != 1) { 2643168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2644168404Spjd "new device must be a single disk")); 2645168404Spjd return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2646168404Spjd } 2647168404Spjd 2648168404Spjd verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2649168404Spjd ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2650168404Spjd 2651219089Spjd if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2652185029Spjd return (-1); 2653185029Spjd 2654168404Spjd /* 2655168404Spjd * If the target is a hot spare that has been swapped in, we can only 2656168404Spjd * replace it with another hot spare. 2657168404Spjd */ 2658168404Spjd if (replacing && 2659168404Spjd nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2660185029Spjd (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2661185029Spjd NULL) == NULL || !avail_spare) && 2662185029Spjd is_replacing_spare(config_root, tgt, 1)) { 2663168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2664168404Spjd "can only be replaced by another hot spare")); 2665185029Spjd free(newname); 2666168404Spjd return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2667168404Spjd } 2668168404Spjd 2669185029Spjd free(newname); 2670185029Spjd 2671185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2672168404Spjd return (-1); 2673168404Spjd 2674219089Spjd ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2675168404Spjd 2676168404Spjd zcmd_free_nvlists(&zc); 2677168404Spjd 2678209962Smm if (ret == 0) { 2679209962Smm if (rootpool) { 2680219089Spjd /* 2681219089Spjd * XXX need a better way to prevent user from 2682219089Spjd * booting up a half-baked vdev. 2683219089Spjd */ 2684219089Spjd (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2685219089Spjd "sure to wait until resilver is done " 2686219089Spjd "before rebooting.\n")); 2687219089Spjd (void) fprintf(stderr, "\n"); 2688216293Smm (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2689216293Smm "you boot from pool '%s', you may need to update\n" 2690216293Smm "boot code on newly attached disk '%s'.\n\n" 2691216293Smm "Assuming you use GPT partitioning and 'da0' is " 2692216293Smm "your new boot disk\n" 2693216293Smm "you may use the following command:\n\n" 2694216293Smm "\tgpart bootcode -b /boot/pmbr -p " 2695216293Smm "/boot/gptzfsboot -i 1 da0\n\n"), 2696216293Smm zhp->zpool_name, new_disk); 2697209962Smm } 2698168404Spjd return (0); 2699209962Smm } 2700168404Spjd 2701168404Spjd switch (errno) { 2702168404Spjd case ENOTSUP: 2703168404Spjd /* 2704168404Spjd * Can't attach to or replace this type of vdev. 2705168404Spjd */ 2706185029Spjd if (replacing) { 2707219089Spjd uint64_t version = zpool_get_prop_int(zhp, 2708219089Spjd ZPOOL_PROP_VERSION, NULL); 2709219089Spjd 2710185029Spjd if (islog) 2711185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2712185029Spjd "cannot replace a log with a spare")); 2713219089Spjd else if (version >= SPA_VERSION_MULTI_REPLACE) 2714219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2715219089Spjd "already in replacing/spare config; wait " 2716219089Spjd "for completion or use 'zpool detach'")); 2717185029Spjd else 2718185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2719185029Spjd "cannot replace a replacing device")); 2720185029Spjd } else { 2721168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2722168404Spjd "can only attach to mirrors and top-level " 2723168404Spjd "disks")); 2724185029Spjd } 2725168404Spjd (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2726168404Spjd break; 2727168404Spjd 2728168404Spjd case EINVAL: 2729168404Spjd /* 2730168404Spjd * The new device must be a single disk. 2731168404Spjd */ 2732168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2733168404Spjd "new device must be a single disk")); 2734168404Spjd (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2735168404Spjd break; 2736168404Spjd 2737168404Spjd case EBUSY: 2738168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2739168404Spjd new_disk); 2740168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 2741168404Spjd break; 2742168404Spjd 2743168404Spjd case EOVERFLOW: 2744168404Spjd /* 2745168404Spjd * The new device is too small. 2746168404Spjd */ 2747168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2748168404Spjd "device is too small")); 2749168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 2750168404Spjd break; 2751168404Spjd 2752168404Spjd case EDOM: 2753168404Spjd /* 2754168404Spjd * The new device has a different alignment requirement. 2755168404Spjd */ 2756168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2757168404Spjd "devices have different sector alignment")); 2758168404Spjd (void) zfs_error(hdl, EZFS_BADDEV, msg); 2759168404Spjd break; 2760168404Spjd 2761168404Spjd case ENAMETOOLONG: 2762168404Spjd /* 2763168404Spjd * The resulting top-level vdev spec won't fit in the label. 2764168404Spjd */ 2765168404Spjd (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2766168404Spjd break; 2767168404Spjd 2768168404Spjd default: 2769168404Spjd (void) zpool_standard_error(hdl, errno, msg); 2770168404Spjd } 2771168404Spjd 2772168404Spjd return (-1); 2773168404Spjd} 2774168404Spjd 2775168404Spjd/* 2776168404Spjd * Detach the specified device. 2777168404Spjd */ 2778168404Spjdint 2779168404Spjdzpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2780168404Spjd{ 2781168404Spjd zfs_cmd_t zc = { 0 }; 2782168404Spjd char msg[1024]; 2783168404Spjd nvlist_t *tgt; 2784185029Spjd boolean_t avail_spare, l2cache; 2785168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2786168404Spjd 2787168404Spjd (void) snprintf(msg, sizeof (msg), 2788168404Spjd dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2789168404Spjd 2790168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2791185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2792185029Spjd NULL)) == 0) 2793168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2794168404Spjd 2795168404Spjd if (avail_spare) 2796168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2797168404Spjd 2798185029Spjd if (l2cache) 2799185029Spjd return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2800185029Spjd 2801168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2802168404Spjd 2803185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2804168404Spjd return (0); 2805168404Spjd 2806168404Spjd switch (errno) { 2807168404Spjd 2808168404Spjd case ENOTSUP: 2809168404Spjd /* 2810168404Spjd * Can't detach from this type of vdev. 2811168404Spjd */ 2812168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2813168404Spjd "applicable to mirror and replacing vdevs")); 2814219089Spjd (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2815168404Spjd break; 2816168404Spjd 2817168404Spjd case EBUSY: 2818168404Spjd /* 2819168404Spjd * There are no other replicas of this device. 2820168404Spjd */ 2821168404Spjd (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2822168404Spjd break; 2823168404Spjd 2824168404Spjd default: 2825168404Spjd (void) zpool_standard_error(hdl, errno, msg); 2826168404Spjd } 2827168404Spjd 2828168404Spjd return (-1); 2829168404Spjd} 2830168404Spjd 2831168404Spjd/* 2832219089Spjd * Find a mirror vdev in the source nvlist. 2833219089Spjd * 2834219089Spjd * The mchild array contains a list of disks in one of the top-level mirrors 2835219089Spjd * of the source pool. The schild array contains a list of disks that the 2836219089Spjd * user specified on the command line. We loop over the mchild array to 2837219089Spjd * see if any entry in the schild array matches. 2838219089Spjd * 2839219089Spjd * If a disk in the mchild array is found in the schild array, we return 2840219089Spjd * the index of that entry. Otherwise we return -1. 2841219089Spjd */ 2842219089Spjdstatic int 2843219089Spjdfind_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2844219089Spjd nvlist_t **schild, uint_t schildren) 2845219089Spjd{ 2846219089Spjd uint_t mc; 2847219089Spjd 2848219089Spjd for (mc = 0; mc < mchildren; mc++) { 2849219089Spjd uint_t sc; 2850219089Spjd char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2851219089Spjd mchild[mc], B_FALSE); 2852219089Spjd 2853219089Spjd for (sc = 0; sc < schildren; sc++) { 2854219089Spjd char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2855219089Spjd schild[sc], B_FALSE); 2856219089Spjd boolean_t result = (strcmp(mpath, spath) == 0); 2857219089Spjd 2858219089Spjd free(spath); 2859219089Spjd if (result) { 2860219089Spjd free(mpath); 2861219089Spjd return (mc); 2862219089Spjd } 2863219089Spjd } 2864219089Spjd 2865219089Spjd free(mpath); 2866219089Spjd } 2867219089Spjd 2868219089Spjd return (-1); 2869219089Spjd} 2870219089Spjd 2871219089Spjd/* 2872219089Spjd * Split a mirror pool. If newroot points to null, then a new nvlist 2873219089Spjd * is generated and it is the responsibility of the caller to free it. 2874219089Spjd */ 2875219089Spjdint 2876219089Spjdzpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2877219089Spjd nvlist_t *props, splitflags_t flags) 2878219089Spjd{ 2879219089Spjd zfs_cmd_t zc = { 0 }; 2880219089Spjd char msg[1024]; 2881219089Spjd nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2882219089Spjd nvlist_t **varray = NULL, *zc_props = NULL; 2883219089Spjd uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2884219089Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 2885219089Spjd uint64_t vers; 2886219089Spjd boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2887219089Spjd int retval = 0; 2888219089Spjd 2889219089Spjd (void) snprintf(msg, sizeof (msg), 2890219089Spjd dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2891219089Spjd 2892219089Spjd if (!zpool_name_valid(hdl, B_FALSE, newname)) 2893219089Spjd return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2894219089Spjd 2895219089Spjd if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2896219089Spjd (void) fprintf(stderr, gettext("Internal error: unable to " 2897219089Spjd "retrieve pool configuration\n")); 2898219089Spjd return (-1); 2899219089Spjd } 2900219089Spjd 2901219089Spjd verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2902219089Spjd == 0); 2903219089Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2904219089Spjd 2905219089Spjd if (props) { 2906219089Spjd prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2907219089Spjd if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2908219089Spjd props, vers, flags, msg)) == NULL) 2909219089Spjd return (-1); 2910219089Spjd } 2911219089Spjd 2912219089Spjd if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2913219089Spjd &children) != 0) { 2914219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2915219089Spjd "Source pool is missing vdev tree")); 2916296528Smav nvlist_free(zc_props); 2917219089Spjd return (-1); 2918219089Spjd } 2919219089Spjd 2920219089Spjd varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2921219089Spjd vcount = 0; 2922219089Spjd 2923219089Spjd if (*newroot == NULL || 2924219089Spjd nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2925219089Spjd &newchild, &newchildren) != 0) 2926219089Spjd newchildren = 0; 2927219089Spjd 2928219089Spjd for (c = 0; c < children; c++) { 2929219089Spjd uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2930219089Spjd char *type; 2931219089Spjd nvlist_t **mchild, *vdev; 2932219089Spjd uint_t mchildren; 2933219089Spjd int entry; 2934219089Spjd 2935219089Spjd /* 2936219089Spjd * Unlike cache & spares, slogs are stored in the 2937219089Spjd * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2938219089Spjd */ 2939219089Spjd (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2940219089Spjd &is_log); 2941219089Spjd (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2942219089Spjd &is_hole); 2943219089Spjd if (is_log || is_hole) { 2944219089Spjd /* 2945219089Spjd * Create a hole vdev and put it in the config. 2946219089Spjd */ 2947219089Spjd if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2948219089Spjd goto out; 2949219089Spjd if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2950219089Spjd VDEV_TYPE_HOLE) != 0) 2951219089Spjd goto out; 2952219089Spjd if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2953219089Spjd 1) != 0) 2954219089Spjd goto out; 2955219089Spjd if (lastlog == 0) 2956219089Spjd lastlog = vcount; 2957219089Spjd varray[vcount++] = vdev; 2958219089Spjd continue; 2959219089Spjd } 2960219089Spjd lastlog = 0; 2961219089Spjd verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2962219089Spjd == 0); 2963219089Spjd if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2964219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2965219089Spjd "Source pool must be composed only of mirrors\n")); 2966219089Spjd retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2967219089Spjd goto out; 2968219089Spjd } 2969219089Spjd 2970219089Spjd verify(nvlist_lookup_nvlist_array(child[c], 2971219089Spjd ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2972219089Spjd 2973219089Spjd /* find or add an entry for this top-level vdev */ 2974219089Spjd if (newchildren > 0 && 2975219089Spjd (entry = find_vdev_entry(zhp, mchild, mchildren, 2976219089Spjd newchild, newchildren)) >= 0) { 2977219089Spjd /* We found a disk that the user specified. */ 2978219089Spjd vdev = mchild[entry]; 2979219089Spjd ++found; 2980219089Spjd } else { 2981219089Spjd /* User didn't specify a disk for this vdev. */ 2982219089Spjd vdev = mchild[mchildren - 1]; 2983219089Spjd } 2984219089Spjd 2985219089Spjd if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2986219089Spjd goto out; 2987219089Spjd } 2988219089Spjd 2989219089Spjd /* did we find every disk the user specified? */ 2990219089Spjd if (found != newchildren) { 2991219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2992219089Spjd "include at most one disk from each mirror")); 2993219089Spjd retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2994219089Spjd goto out; 2995219089Spjd } 2996219089Spjd 2997219089Spjd /* Prepare the nvlist for populating. */ 2998219089Spjd if (*newroot == NULL) { 2999219089Spjd if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3000219089Spjd goto out; 3001219089Spjd freelist = B_TRUE; 3002219089Spjd if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3003219089Spjd VDEV_TYPE_ROOT) != 0) 3004219089Spjd goto out; 3005219089Spjd } else { 3006219089Spjd verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3007219089Spjd } 3008219089Spjd 3009219089Spjd /* Add all the children we found */ 3010219089Spjd if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3011219089Spjd lastlog == 0 ? vcount : lastlog) != 0) 3012219089Spjd goto out; 3013219089Spjd 3014219089Spjd /* 3015219089Spjd * If we're just doing a dry run, exit now with success. 3016219089Spjd */ 3017219089Spjd if (flags.dryrun) { 3018219089Spjd memory_err = B_FALSE; 3019219089Spjd freelist = B_FALSE; 3020219089Spjd goto out; 3021219089Spjd } 3022219089Spjd 3023219089Spjd /* now build up the config list & call the ioctl */ 3024219089Spjd if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3025219089Spjd goto out; 3026219089Spjd 3027219089Spjd if (nvlist_add_nvlist(newconfig, 3028219089Spjd ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3029219089Spjd nvlist_add_string(newconfig, 3030219089Spjd ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3031219089Spjd nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3032219089Spjd goto out; 3033219089Spjd 3034219089Spjd /* 3035219089Spjd * The new pool is automatically part of the namespace unless we 3036219089Spjd * explicitly export it. 3037219089Spjd */ 3038219089Spjd if (!flags.import) 3039219089Spjd zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3040219089Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3041219089Spjd (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3042219089Spjd if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3043219089Spjd goto out; 3044219089Spjd if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3045219089Spjd goto out; 3046219089Spjd 3047219089Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3048219089Spjd retval = zpool_standard_error(hdl, errno, msg); 3049219089Spjd goto out; 3050219089Spjd } 3051219089Spjd 3052219089Spjd freelist = B_FALSE; 3053219089Spjd memory_err = B_FALSE; 3054219089Spjd 3055219089Spjdout: 3056219089Spjd if (varray != NULL) { 3057219089Spjd int v; 3058219089Spjd 3059219089Spjd for (v = 0; v < vcount; v++) 3060219089Spjd nvlist_free(varray[v]); 3061219089Spjd free(varray); 3062219089Spjd } 3063219089Spjd zcmd_free_nvlists(&zc); 3064296528Smav nvlist_free(zc_props); 3065296528Smav nvlist_free(newconfig); 3066219089Spjd if (freelist) { 3067219089Spjd nvlist_free(*newroot); 3068219089Spjd *newroot = NULL; 3069219089Spjd } 3070219089Spjd 3071219089Spjd if (retval != 0) 3072219089Spjd return (retval); 3073219089Spjd 3074219089Spjd if (memory_err) 3075219089Spjd return (no_memory(hdl)); 3076219089Spjd 3077219089Spjd return (0); 3078219089Spjd} 3079219089Spjd 3080219089Spjd/* 3081185029Spjd * Remove the given device. Currently, this is supported only for hot spares 3082185029Spjd * and level 2 cache devices. 3083168404Spjd */ 3084168404Spjdint 3085168404Spjdzpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3086168404Spjd{ 3087168404Spjd zfs_cmd_t zc = { 0 }; 3088168404Spjd char msg[1024]; 3089168404Spjd nvlist_t *tgt; 3090219089Spjd boolean_t avail_spare, l2cache, islog; 3091168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 3092219089Spjd uint64_t version; 3093168404Spjd 3094168404Spjd (void) snprintf(msg, sizeof (msg), 3095168404Spjd dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3096168404Spjd 3097168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3098185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3099219089Spjd &islog)) == 0) 3100168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3101219089Spjd /* 3102219089Spjd * XXX - this should just go away. 3103219089Spjd */ 3104219089Spjd if (!avail_spare && !l2cache && !islog) { 3105168404Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3106219089Spjd "only inactive hot spares, cache, top-level, " 3107219089Spjd "or log devices can be removed")); 3108168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3109168404Spjd } 3110168404Spjd 3111219089Spjd version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3112219089Spjd if (islog && version < SPA_VERSION_HOLES) { 3113219089Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3114219089Spjd "pool must be upgrade to support log removal")); 3115219089Spjd return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3116219089Spjd } 3117219089Spjd 3118168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3119168404Spjd 3120185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3121168404Spjd return (0); 3122168404Spjd 3123168404Spjd return (zpool_standard_error(hdl, errno, msg)); 3124168404Spjd} 3125168404Spjd 3126168404Spjd/* 3127168404Spjd * Clear the errors for the pool, or the particular device if specified. 3128168404Spjd */ 3129168404Spjdint 3130219089Spjdzpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3131168404Spjd{ 3132168404Spjd zfs_cmd_t zc = { 0 }; 3133168404Spjd char msg[1024]; 3134168404Spjd nvlist_t *tgt; 3135219089Spjd zpool_rewind_policy_t policy; 3136185029Spjd boolean_t avail_spare, l2cache; 3137168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 3138219089Spjd nvlist_t *nvi = NULL; 3139219089Spjd int error; 3140168404Spjd 3141168404Spjd if (path) 3142168404Spjd (void) snprintf(msg, sizeof (msg), 3143168404Spjd dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3144168404Spjd path); 3145168404Spjd else 3146168404Spjd (void) snprintf(msg, sizeof (msg), 3147168404Spjd dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3148168404Spjd zhp->zpool_name); 3149168404Spjd 3150168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3151168404Spjd if (path) { 3152185029Spjd if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3153185029Spjd &l2cache, NULL)) == 0) 3154168404Spjd return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3155168404Spjd 3156185029Spjd /* 3157185029Spjd * Don't allow error clearing for hot spares. Do allow 3158185029Spjd * error clearing for l2cache devices. 3159185029Spjd */ 3160168404Spjd if (avail_spare) 3161168404Spjd return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3162168404Spjd 3163168404Spjd verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3164168404Spjd &zc.zc_guid) == 0); 3165168404Spjd } 3166168404Spjd 3167219089Spjd zpool_get_rewind_policy(rewindnvl, &policy); 3168219089Spjd zc.zc_cookie = policy.zrp_request; 3169219089Spjd 3170219089Spjd if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3171219089Spjd return (-1); 3172219089Spjd 3173219089Spjd if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3174219089Spjd return (-1); 3175219089Spjd 3176219089Spjd while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3177219089Spjd errno == ENOMEM) { 3178219089Spjd if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3179219089Spjd zcmd_free_nvlists(&zc); 3180219089Spjd return (-1); 3181219089Spjd } 3182219089Spjd } 3183219089Spjd 3184219089Spjd if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3185219089Spjd errno != EPERM && errno != EACCES)) { 3186219089Spjd if (policy.zrp_request & 3187219089Spjd (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3188219089Spjd (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3189219089Spjd zpool_rewind_exclaim(hdl, zc.zc_name, 3190219089Spjd ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3191219089Spjd nvi); 3192219089Spjd nvlist_free(nvi); 3193219089Spjd } 3194219089Spjd zcmd_free_nvlists(&zc); 3195185029Spjd return (0); 3196219089Spjd } 3197185029Spjd 3198219089Spjd zcmd_free_nvlists(&zc); 3199185029Spjd return (zpool_standard_error(hdl, errno, msg)); 3200185029Spjd} 3201185029Spjd 3202185029Spjd/* 3203185029Spjd * Similar to zpool_clear(), but takes a GUID (used by fmd). 3204185029Spjd */ 3205185029Spjdint 3206185029Spjdzpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3207185029Spjd{ 3208185029Spjd zfs_cmd_t zc = { 0 }; 3209185029Spjd char msg[1024]; 3210185029Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 3211185029Spjd 3212185029Spjd (void) snprintf(msg, sizeof (msg), 3213185029Spjd dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3214185029Spjd guid); 3215185029Spjd 3216185029Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3217185029Spjd zc.zc_guid = guid; 3218219089Spjd zc.zc_cookie = ZPOOL_NO_REWIND; 3219185029Spjd 3220168404Spjd if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3221168404Spjd return (0); 3222168404Spjd 3223168404Spjd return (zpool_standard_error(hdl, errno, msg)); 3224168404Spjd} 3225168404Spjd 3226168404Spjd/* 3227228103Smm * Change the GUID for a pool. 3228228103Smm */ 3229228103Smmint 3230228103Smmzpool_reguid(zpool_handle_t *zhp) 3231228103Smm{ 3232228103Smm char msg[1024]; 3233228103Smm libzfs_handle_t *hdl = zhp->zpool_hdl; 3234228103Smm zfs_cmd_t zc = { 0 }; 3235228103Smm 3236228103Smm (void) snprintf(msg, sizeof (msg), 3237228103Smm dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3238228103Smm 3239228103Smm (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3240228103Smm if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3241228103Smm return (0); 3242228103Smm 3243228103Smm return (zpool_standard_error(hdl, errno, msg)); 3244228103Smm} 3245228103Smm 3246228103Smm/* 3247236155Smm * Reopen the pool. 3248236155Smm */ 3249236155Smmint 3250236155Smmzpool_reopen(zpool_handle_t *zhp) 3251236155Smm{ 3252236155Smm zfs_cmd_t zc = { 0 }; 3253236155Smm char msg[1024]; 3254236155Smm libzfs_handle_t *hdl = zhp->zpool_hdl; 3255236155Smm 3256236155Smm (void) snprintf(msg, sizeof (msg), 3257236155Smm dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3258236155Smm zhp->zpool_name); 3259236155Smm 3260236155Smm (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3261236155Smm if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3262236155Smm return (0); 3263236155Smm return (zpool_standard_error(hdl, errno, msg)); 3264236155Smm} 3265236155Smm 3266236155Smm/* 3267168404Spjd * Convert from a devid string to a path. 3268168404Spjd */ 3269168404Spjdstatic char * 3270168404Spjddevid_to_path(char *devid_str) 3271168404Spjd{ 3272168404Spjd ddi_devid_t devid; 3273168404Spjd char *minor; 3274168404Spjd char *path; 3275168404Spjd devid_nmlist_t *list = NULL; 3276168404Spjd int ret; 3277168404Spjd 3278168404Spjd if (devid_str_decode(devid_str, &devid, &minor) != 0) 3279168404Spjd return (NULL); 3280168404Spjd 3281168404Spjd ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3282168404Spjd 3283168404Spjd devid_str_free(minor); 3284168404Spjd devid_free(devid); 3285168404Spjd 3286168404Spjd if (ret != 0) 3287168404Spjd return (NULL); 3288168404Spjd 3289277433Sdelphij /* 3290277433Sdelphij * In a case the strdup() fails, we will just return NULL below. 3291277433Sdelphij */ 3292277433Sdelphij path = strdup(list[0].devname); 3293168404Spjd 3294168404Spjd devid_free_nmlist(list); 3295168404Spjd 3296168404Spjd return (path); 3297168404Spjd} 3298168404Spjd 3299168404Spjd/* 3300168404Spjd * Convert from a path to a devid string. 3301168404Spjd */ 3302168404Spjdstatic char * 3303168404Spjdpath_to_devid(const char *path) 3304168404Spjd{ 3305265821Smav#ifdef have_devid 3306168404Spjd int fd; 3307168404Spjd ddi_devid_t devid; 3308168404Spjd char *minor, *ret; 3309168404Spjd 3310168404Spjd if ((fd = open(path, O_RDONLY)) < 0) 3311168404Spjd return (NULL); 3312168404Spjd 3313168404Spjd minor = NULL; 3314168404Spjd ret = NULL; 3315168404Spjd if (devid_get(fd, &devid) == 0) { 3316168404Spjd if (devid_get_minor_name(fd, &minor) == 0) 3317168404Spjd ret = devid_str_encode(devid, minor); 3318168404Spjd if (minor != NULL) 3319168404Spjd devid_str_free(minor); 3320168404Spjd devid_free(devid); 3321168404Spjd } 3322168404Spjd (void) close(fd); 3323168404Spjd 3324168404Spjd return (ret); 3325265821Smav#else 3326265821Smav return (NULL); 3327265821Smav#endif 3328168404Spjd} 3329168404Spjd 3330168404Spjd/* 3331168404Spjd * Issue the necessary ioctl() to update the stored path value for the vdev. We 3332168404Spjd * ignore any failure here, since a common case is for an unprivileged user to 3333168404Spjd * type 'zpool status', and we'll display the correct information anyway. 3334168404Spjd */ 3335168404Spjdstatic void 3336168404Spjdset_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3337168404Spjd{ 3338168404Spjd zfs_cmd_t zc = { 0 }; 3339168404Spjd 3340168404Spjd (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3341168404Spjd (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3342168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3343168404Spjd &zc.zc_guid) == 0); 3344168404Spjd 3345168404Spjd (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3346168404Spjd} 3347168404Spjd 3348168404Spjd/* 3349168404Spjd * Given a vdev, return the name to display in iostat. If the vdev has a path, 3350168404Spjd * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3351168404Spjd * We also check if this is a whole disk, in which case we strip off the 3352168404Spjd * trailing 's0' slice name. 3353168404Spjd * 3354168404Spjd * This routine is also responsible for identifying when disks have been 3355168404Spjd * reconfigured in a new location. The kernel will have opened the device by 3356168404Spjd * devid, but the path will still refer to the old location. To catch this, we 3357168404Spjd * first do a path -> devid translation (which is fast for the common case). If 3358168404Spjd * the devid matches, we're done. If not, we do a reverse devid -> path 3359168404Spjd * translation and issue the appropriate ioctl() to update the path of the vdev. 3360168404Spjd * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3361168404Spjd * of these checks. 3362168404Spjd */ 3363168404Spjdchar * 3364219089Spjdzpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3365219089Spjd boolean_t verbose) 3366168404Spjd{ 3367168404Spjd char *path, *devid; 3368168404Spjd uint64_t value; 3369168404Spjd char buf[64]; 3370185029Spjd vdev_stat_t *vs; 3371185029Spjd uint_t vsc; 3372224170Sgibbs int have_stats; 3373224170Sgibbs int have_path; 3374168404Spjd 3375224170Sgibbs have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3376224170Sgibbs (uint64_t **)&vs, &vsc) == 0; 3377224170Sgibbs have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3378224170Sgibbs 3379224170Sgibbs /* 3380224170Sgibbs * If the device is not currently present, assume it will not 3381224170Sgibbs * come back at the same device path. Display the device by GUID. 3382224170Sgibbs */ 3383224170Sgibbs if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3384224170Sgibbs have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3385168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3386168404Spjd &value) == 0); 3387168404Spjd (void) snprintf(buf, sizeof (buf), "%llu", 3388168404Spjd (u_longlong_t)value); 3389168404Spjd path = buf; 3390224170Sgibbs } else if (have_path) { 3391168404Spjd 3392185029Spjd /* 3393185029Spjd * If the device is dead (faulted, offline, etc) then don't 3394185029Spjd * bother opening it. Otherwise we may be forcing the user to 3395185029Spjd * open a misbehaving device, which can have undesirable 3396185029Spjd * effects. 3397185029Spjd */ 3398224170Sgibbs if ((have_stats == 0 || 3399185029Spjd vs->vs_state >= VDEV_STATE_DEGRADED) && 3400185029Spjd zhp != NULL && 3401168404Spjd nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3402168404Spjd /* 3403168404Spjd * Determine if the current path is correct. 3404168404Spjd */ 3405168404Spjd char *newdevid = path_to_devid(path); 3406168404Spjd 3407168404Spjd if (newdevid == NULL || 3408168404Spjd strcmp(devid, newdevid) != 0) { 3409168404Spjd char *newpath; 3410168404Spjd 3411168404Spjd if ((newpath = devid_to_path(devid)) != NULL) { 3412168404Spjd /* 3413168404Spjd * Update the path appropriately. 3414168404Spjd */ 3415168404Spjd set_path(zhp, nv, newpath); 3416168404Spjd if (nvlist_add_string(nv, 3417168404Spjd ZPOOL_CONFIG_PATH, newpath) == 0) 3418168404Spjd verify(nvlist_lookup_string(nv, 3419168404Spjd ZPOOL_CONFIG_PATH, 3420168404Spjd &path) == 0); 3421168404Spjd free(newpath); 3422168404Spjd } 3423168404Spjd } 3424168404Spjd 3425168404Spjd if (newdevid) 3426168404Spjd devid_str_free(newdevid); 3427168404Spjd } 3428168404Spjd 3429277300Ssmh#ifdef illumos 3430297763Smav if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 3431297763Smav path += strlen(ZFS_DISK_ROOTD); 3432168404Spjd 3433168404Spjd if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3434168404Spjd &value) == 0 && value) { 3435219089Spjd int pathlen = strlen(path); 3436168404Spjd char *tmp = zfs_strdup(hdl, path); 3437219089Spjd 3438219089Spjd /* 3439219089Spjd * If it starts with c#, and ends with "s0", chop 3440219089Spjd * the "s0" off, or if it ends with "s0/old", remove 3441219089Spjd * the "s0" from the middle. 3442219089Spjd */ 3443219089Spjd if (CTD_CHECK(tmp)) { 3444219089Spjd if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3445219089Spjd tmp[pathlen - 2] = '\0'; 3446219089Spjd } else if (pathlen > 6 && 3447219089Spjd strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3448219089Spjd (void) strcpy(&tmp[pathlen - 6], 3449219089Spjd "/old"); 3450219089Spjd } 3451219089Spjd } 3452168404Spjd return (tmp); 3453168404Spjd } 3454277300Ssmh#else /* !illumos */ 3455219089Spjd if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3456219089Spjd path += sizeof(_PATH_DEV) - 1; 3457277300Ssmh#endif /* illumos */ 3458168404Spjd } else { 3459168404Spjd verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3460168404Spjd 3461168404Spjd /* 3462168404Spjd * If it's a raidz device, we need to stick in the parity level. 3463168404Spjd */ 3464168404Spjd if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3465168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3466168404Spjd &value) == 0); 3467168404Spjd (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3468168404Spjd (u_longlong_t)value); 3469168404Spjd path = buf; 3470168404Spjd } 3471219089Spjd 3472219089Spjd /* 3473219089Spjd * We identify each top-level vdev by using a <type-id> 3474219089Spjd * naming convention. 3475219089Spjd */ 3476219089Spjd if (verbose) { 3477219089Spjd uint64_t id; 3478219089Spjd 3479219089Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3480219089Spjd &id) == 0); 3481219089Spjd (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3482219089Spjd (u_longlong_t)id); 3483219089Spjd path = buf; 3484219089Spjd } 3485168404Spjd } 3486168404Spjd 3487168404Spjd return (zfs_strdup(hdl, path)); 3488168404Spjd} 3489168404Spjd 3490168404Spjdstatic int 3491286705Smavzbookmark_mem_compare(const void *a, const void *b) 3492168404Spjd{ 3493268123Sdelphij return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3494168404Spjd} 3495168404Spjd 3496168404Spjd/* 3497168404Spjd * Retrieve the persistent error log, uniquify the members, and return to the 3498168404Spjd * caller. 3499168404Spjd */ 3500168404Spjdint 3501168404Spjdzpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3502168404Spjd{ 3503168404Spjd zfs_cmd_t zc = { 0 }; 3504168404Spjd uint64_t count; 3505268123Sdelphij zbookmark_phys_t *zb = NULL; 3506168404Spjd int i; 3507168404Spjd 3508168404Spjd /* 3509168404Spjd * Retrieve the raw error list from the kernel. If the number of errors 3510168404Spjd * has increased, allocate more space and continue until we get the 3511168404Spjd * entire list. 3512168404Spjd */ 3513168404Spjd verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3514168404Spjd &count) == 0); 3515185029Spjd if (count == 0) 3516185029Spjd return (0); 3517168404Spjd if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3518268123Sdelphij count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3519168404Spjd return (-1); 3520168404Spjd zc.zc_nvlist_dst_size = count; 3521168404Spjd (void) strcpy(zc.zc_name, zhp->zpool_name); 3522168404Spjd for (;;) { 3523168404Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3524168404Spjd &zc) != 0) { 3525168404Spjd free((void *)(uintptr_t)zc.zc_nvlist_dst); 3526168404Spjd if (errno == ENOMEM) { 3527268123Sdelphij void *dst; 3528268123Sdelphij 3529168404Spjd count = zc.zc_nvlist_dst_size; 3530268123Sdelphij dst = zfs_alloc(zhp->zpool_hdl, count * 3531268123Sdelphij sizeof (zbookmark_phys_t)); 3532268123Sdelphij if (dst == NULL) 3533168404Spjd return (-1); 3534268123Sdelphij zc.zc_nvlist_dst = (uintptr_t)dst; 3535168404Spjd } else { 3536168404Spjd return (-1); 3537168404Spjd } 3538168404Spjd } else { 3539168404Spjd break; 3540168404Spjd } 3541168404Spjd } 3542168404Spjd 3543168404Spjd /* 3544168404Spjd * Sort the resulting bookmarks. This is a little confusing due to the 3545168404Spjd * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3546168404Spjd * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3547168404Spjd * _not_ copied as part of the process. So we point the start of our 3548168404Spjd * array appropriate and decrement the total number of elements. 3549168404Spjd */ 3550268123Sdelphij zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3551168404Spjd zc.zc_nvlist_dst_size; 3552168404Spjd count -= zc.zc_nvlist_dst_size; 3553168404Spjd 3554286705Smav qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 3555168404Spjd 3556168404Spjd verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3557168404Spjd 3558168404Spjd /* 3559168404Spjd * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3560168404Spjd */ 3561168404Spjd for (i = 0; i < count; i++) { 3562168404Spjd nvlist_t *nv; 3563168404Spjd 3564168404Spjd /* ignoring zb_blkid and zb_level for now */ 3565168404Spjd if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3566168404Spjd zb[i-1].zb_object == zb[i].zb_object) 3567168404Spjd continue; 3568168404Spjd 3569168404Spjd if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3570168404Spjd goto nomem; 3571168404Spjd if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3572168404Spjd zb[i].zb_objset) != 0) { 3573168404Spjd nvlist_free(nv); 3574168404Spjd goto nomem; 3575168404Spjd } 3576168404Spjd if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3577168404Spjd zb[i].zb_object) != 0) { 3578168404Spjd nvlist_free(nv); 3579168404Spjd goto nomem; 3580168404Spjd } 3581168404Spjd if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3582168404Spjd nvlist_free(nv); 3583168404Spjd goto nomem; 3584168404Spjd } 3585168404Spjd nvlist_free(nv); 3586168404Spjd } 3587168404Spjd 3588168404Spjd free((void *)(uintptr_t)zc.zc_nvlist_dst); 3589168404Spjd return (0); 3590168404Spjd 3591168404Spjdnomem: 3592168404Spjd free((void *)(uintptr_t)zc.zc_nvlist_dst); 3593168404Spjd return (no_memory(zhp->zpool_hdl)); 3594168404Spjd} 3595168404Spjd 3596168404Spjd/* 3597168404Spjd * Upgrade a ZFS pool to the latest on-disk version. 3598168404Spjd */ 3599168404Spjdint 3600185029Spjdzpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3601168404Spjd{ 3602168404Spjd zfs_cmd_t zc = { 0 }; 3603168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 3604168404Spjd 3605168404Spjd (void) strcpy(zc.zc_name, zhp->zpool_name); 3606185029Spjd zc.zc_cookie = new_version; 3607185029Spjd 3608185029Spjd if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3609168404Spjd return (zpool_standard_error_fmt(hdl, errno, 3610168404Spjd dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3611168404Spjd zhp->zpool_name)); 3612168404Spjd return (0); 3613168404Spjd} 3614168404Spjd 3615168404Spjdvoid 3616248571Smmzfs_save_arguments(int argc, char **argv, char *string, int len) 3617168404Spjd{ 3618248571Smm (void) strlcpy(string, basename(argv[0]), len); 3619248571Smm for (int i = 1; i < argc; i++) { 3620248571Smm (void) strlcat(string, " ", len); 3621248571Smm (void) strlcat(string, argv[i], len); 3622168404Spjd } 3623185029Spjd} 3624168404Spjd 3625185029Spjdint 3626248571Smmzpool_log_history(libzfs_handle_t *hdl, const char *message) 3627185029Spjd{ 3628248571Smm zfs_cmd_t zc = { 0 }; 3629248571Smm nvlist_t *args; 3630248571Smm int err; 3631168404Spjd 3632248571Smm args = fnvlist_alloc(); 3633248571Smm fnvlist_add_string(args, "message", message); 3634248571Smm err = zcmd_write_src_nvlist(hdl, &zc, args); 3635248571Smm if (err == 0) 3636248571Smm err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3637248571Smm nvlist_free(args); 3638248571Smm zcmd_free_nvlists(&zc); 3639248571Smm return (err); 3640168404Spjd} 3641168404Spjd 3642168404Spjd/* 3643168404Spjd * Perform ioctl to get some command history of a pool. 3644168404Spjd * 3645168404Spjd * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3646168404Spjd * logical offset of the history buffer to start reading from. 3647168404Spjd * 3648168404Spjd * Upon return, 'off' is the next logical offset to read from and 3649168404Spjd * 'len' is the actual amount of bytes read into 'buf'. 3650168404Spjd */ 3651168404Spjdstatic int 3652168404Spjdget_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3653168404Spjd{ 3654168404Spjd zfs_cmd_t zc = { 0 }; 3655168404Spjd libzfs_handle_t *hdl = zhp->zpool_hdl; 3656168404Spjd 3657168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3658168404Spjd 3659168404Spjd zc.zc_history = (uint64_t)(uintptr_t)buf; 3660168404Spjd zc.zc_history_len = *len; 3661168404Spjd zc.zc_history_offset = *off; 3662168404Spjd 3663168404Spjd if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3664168404Spjd switch (errno) { 3665168404Spjd case EPERM: 3666168404Spjd return (zfs_error_fmt(hdl, EZFS_PERM, 3667168404Spjd dgettext(TEXT_DOMAIN, 3668168404Spjd "cannot show history for pool '%s'"), 3669168404Spjd zhp->zpool_name)); 3670168404Spjd case ENOENT: 3671168404Spjd return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3672168404Spjd dgettext(TEXT_DOMAIN, "cannot get history for pool " 3673168404Spjd "'%s'"), zhp->zpool_name)); 3674168404Spjd case ENOTSUP: 3675168404Spjd return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3676168404Spjd dgettext(TEXT_DOMAIN, "cannot get history for pool " 3677168404Spjd "'%s', pool must be upgraded"), zhp->zpool_name)); 3678168404Spjd default: 3679168404Spjd return (zpool_standard_error_fmt(hdl, errno, 3680168404Spjd dgettext(TEXT_DOMAIN, 3681168404Spjd "cannot get history for '%s'"), zhp->zpool_name)); 3682168404Spjd } 3683168404Spjd } 3684168404Spjd 3685168404Spjd *len = zc.zc_history_len; 3686168404Spjd *off = zc.zc_history_offset; 3687168404Spjd 3688168404Spjd return (0); 3689168404Spjd} 3690168404Spjd 3691168404Spjd/* 3692168404Spjd * Process the buffer of nvlists, unpacking and storing each nvlist record 3693168404Spjd * into 'records'. 'leftover' is set to the number of bytes that weren't 3694168404Spjd * processed as there wasn't a complete record. 3695168404Spjd */ 3696219089Spjdint 3697168404Spjdzpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3698168404Spjd nvlist_t ***records, uint_t *numrecords) 3699168404Spjd{ 3700168404Spjd uint64_t reclen; 3701168404Spjd nvlist_t *nv; 3702168404Spjd int i; 3703168404Spjd 3704168404Spjd while (bytes_read > sizeof (reclen)) { 3705168404Spjd 3706168404Spjd /* get length of packed record (stored as little endian) */ 3707168404Spjd for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3708168404Spjd reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3709168404Spjd 3710168404Spjd if (bytes_read < sizeof (reclen) + reclen) 3711168404Spjd break; 3712168404Spjd 3713168404Spjd /* unpack record */ 3714168404Spjd if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3715168404Spjd return (ENOMEM); 3716168404Spjd bytes_read -= sizeof (reclen) + reclen; 3717168404Spjd buf += sizeof (reclen) + reclen; 3718168404Spjd 3719168404Spjd /* add record to nvlist array */ 3720168404Spjd (*numrecords)++; 3721168404Spjd if (ISP2(*numrecords + 1)) { 3722168404Spjd *records = realloc(*records, 3723168404Spjd *numrecords * 2 * sizeof (nvlist_t *)); 3724168404Spjd } 3725168404Spjd (*records)[*numrecords - 1] = nv; 3726168404Spjd } 3727168404Spjd 3728168404Spjd *leftover = bytes_read; 3729168404Spjd return (0); 3730168404Spjd} 3731168404Spjd 3732264467Sdelphij/* from spa_history.c: spa_history_create_obj() */ 3733264467Sdelphij#define HIS_BUF_LEN_DEF (128 << 10) 3734264467Sdelphij#define HIS_BUF_LEN_MAX (1 << 30) 3735168404Spjd 3736168404Spjd/* 3737168404Spjd * Retrieve the command history of a pool. 3738168404Spjd */ 3739168404Spjdint 3740168404Spjdzpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3741168404Spjd{ 3742289528Smav char *buf; 3743289528Smav uint64_t buflen = HIS_BUF_LEN_DEF; 3744168404Spjd uint64_t off = 0; 3745168404Spjd nvlist_t **records = NULL; 3746168404Spjd uint_t numrecords = 0; 3747168404Spjd int err, i; 3748168404Spjd 3749289528Smav buf = malloc(buflen); 3750289528Smav if (buf == NULL) 3751264467Sdelphij return (ENOMEM); 3752168404Spjd do { 3753289528Smav uint64_t bytes_read = buflen; 3754168404Spjd uint64_t leftover; 3755168404Spjd 3756168404Spjd if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3757168404Spjd break; 3758168404Spjd 3759168404Spjd /* if nothing else was read in, we're at EOF, just return */ 3760264467Sdelphij if (bytes_read == 0) 3761168404Spjd break; 3762168404Spjd 3763168404Spjd if ((err = zpool_history_unpack(buf, bytes_read, 3764168404Spjd &leftover, &records, &numrecords)) != 0) 3765168404Spjd break; 3766168404Spjd off -= leftover; 3767264467Sdelphij if (leftover == bytes_read) { 3768289528Smav /* 3769289528Smav * no progress made, because buffer is not big enough 3770289528Smav * to hold this record; resize and retry. 3771289528Smav */ 3772289528Smav buflen *= 2; 3773264467Sdelphij free(buf); 3774264467Sdelphij buf = NULL; 3775289528Smav if ((buflen >= HIS_BUF_LEN_MAX) || 3776289528Smav ((buf = malloc(buflen)) == NULL)) { 3777264467Sdelphij err = ENOMEM; 3778264467Sdelphij break; 3779264467Sdelphij } 3780264467Sdelphij } 3781264467Sdelphij 3782168404Spjd /* CONSTCOND */ 3783168404Spjd } while (1); 3784289528Smav 3785264467Sdelphij free(buf); 3786168404Spjd 3787168404Spjd if (!err) { 3788168404Spjd verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3789168404Spjd verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3790168404Spjd records, numrecords) == 0); 3791168404Spjd } 3792168404Spjd for (i = 0; i < numrecords; i++) 3793168404Spjd nvlist_free(records[i]); 3794168404Spjd free(records); 3795168404Spjd 3796168404Spjd return (err); 3797168404Spjd} 3798168404Spjd 3799168404Spjdvoid 3800168404Spjdzpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3801168404Spjd char *pathname, size_t len) 3802168404Spjd{ 3803168404Spjd zfs_cmd_t zc = { 0 }; 3804168404Spjd boolean_t mounted = B_FALSE; 3805168404Spjd char *mntpnt = NULL; 3806168404Spjd char dsname[MAXNAMELEN]; 3807168404Spjd 3808168404Spjd if (dsobj == 0) { 3809168404Spjd /* special case for the MOS */ 3810168404Spjd (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3811168404Spjd return; 3812168404Spjd } 3813168404Spjd 3814168404Spjd /* get the dataset's name */ 3815168404Spjd (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3816168404Spjd zc.zc_obj = dsobj; 3817168404Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, 3818168404Spjd ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3819168404Spjd /* just write out a path of two object numbers */ 3820168404Spjd (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3821168404Spjd dsobj, obj); 3822168404Spjd return; 3823168404Spjd } 3824168404Spjd (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3825168404Spjd 3826168404Spjd /* find out if the dataset is mounted */ 3827168404Spjd mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3828168404Spjd 3829168404Spjd /* get the corrupted object's path */ 3830168404Spjd (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3831168404Spjd zc.zc_obj = obj; 3832168404Spjd if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3833168404Spjd &zc) == 0) { 3834168404Spjd if (mounted) { 3835168404Spjd (void) snprintf(pathname, len, "%s%s", mntpnt, 3836168404Spjd zc.zc_value); 3837168404Spjd } else { 3838168404Spjd (void) snprintf(pathname, len, "%s:%s", 3839168404Spjd dsname, zc.zc_value); 3840168404Spjd } 3841168404Spjd } else { 3842168404Spjd (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3843168404Spjd } 3844168404Spjd free(mntpnt); 3845168404Spjd} 3846168404Spjd 3847277300Ssmh#ifdef illumos 3848185029Spjd/* 3849185029Spjd * Read the EFI label from the config, if a label does not exist then 3850185029Spjd * pass back the error to the caller. If the caller has passed a non-NULL 3851185029Spjd * diskaddr argument then we set it to the starting address of the EFI 3852185029Spjd * partition. 3853185029Spjd */ 3854185029Spjdstatic int 3855185029Spjdread_efi_label(nvlist_t *config, diskaddr_t *sb) 3856168404Spjd{ 3857185029Spjd char *path; 3858185029Spjd int fd; 3859185029Spjd char diskname[MAXPATHLEN]; 3860185029Spjd int err = -1; 3861168404Spjd 3862185029Spjd if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3863185029Spjd return (err); 3864168404Spjd 3865297763Smav (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 3866185029Spjd strrchr(path, '/')); 3867185029Spjd if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3868185029Spjd struct dk_gpt *vtoc; 3869185029Spjd 3870185029Spjd if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3871185029Spjd if (sb != NULL) 3872185029Spjd *sb = vtoc->efi_parts[0].p_start; 3873185029Spjd efi_free(vtoc); 3874185029Spjd } 3875185029Spjd (void) close(fd); 3876168404Spjd } 3877185029Spjd return (err); 3878185029Spjd} 3879168404Spjd 3880185029Spjd/* 3881185029Spjd * determine where a partition starts on a disk in the current 3882185029Spjd * configuration 3883185029Spjd */ 3884185029Spjdstatic diskaddr_t 3885185029Spjdfind_start_block(nvlist_t *config) 3886185029Spjd{ 3887185029Spjd nvlist_t **child; 3888185029Spjd uint_t c, children; 3889185029Spjd diskaddr_t sb = MAXOFFSET_T; 3890185029Spjd uint64_t wholedisk; 3891168404Spjd 3892185029Spjd if (nvlist_lookup_nvlist_array(config, 3893185029Spjd ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3894185029Spjd if (nvlist_lookup_uint64(config, 3895185029Spjd ZPOOL_CONFIG_WHOLE_DISK, 3896185029Spjd &wholedisk) != 0 || !wholedisk) { 3897185029Spjd return (MAXOFFSET_T); 3898185029Spjd } 3899185029Spjd if (read_efi_label(config, &sb) < 0) 3900185029Spjd sb = MAXOFFSET_T; 3901185029Spjd return (sb); 3902168404Spjd } 3903168404Spjd 3904185029Spjd for (c = 0; c < children; c++) { 3905185029Spjd sb = find_start_block(child[c]); 3906185029Spjd if (sb != MAXOFFSET_T) { 3907185029Spjd return (sb); 3908185029Spjd } 3909168404Spjd } 3910185029Spjd return (MAXOFFSET_T); 3911185029Spjd} 3912277300Ssmh#endif /* illumos */ 3913168404Spjd 3914185029Spjd/* 3915185029Spjd * Label an individual disk. The name provided is the short name, 3916185029Spjd * stripped of any leading /dev path. 3917185029Spjd */ 3918185029Spjdint 3919224169Sgibbszpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3920185029Spjd{ 3921277300Ssmh#ifdef illumos 3922185029Spjd char path[MAXPATHLEN]; 3923185029Spjd struct dk_gpt *vtoc; 3924185029Spjd int fd; 3925185029Spjd size_t resv = EFI_MIN_RESV_SIZE; 3926185029Spjd uint64_t slice_size; 3927185029Spjd diskaddr_t start_block; 3928185029Spjd char errbuf[1024]; 3929168404Spjd 3930185029Spjd /* prepare an error message just in case */ 3931185029Spjd (void) snprintf(errbuf, sizeof (errbuf), 3932185029Spjd dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3933168404Spjd 3934185029Spjd if (zhp) { 3935185029Spjd nvlist_t *nvroot; 3936168404Spjd 3937185029Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, 3938185029Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3939168404Spjd 3940185029Spjd if (zhp->zpool_start_block == 0) 3941185029Spjd start_block = find_start_block(nvroot); 3942185029Spjd else 3943185029Spjd start_block = zhp->zpool_start_block; 3944185029Spjd zhp->zpool_start_block = start_block; 3945185029Spjd } else { 3946185029Spjd /* new pool */ 3947185029Spjd start_block = NEW_START_BLOCK; 3948185029Spjd } 3949168404Spjd 3950297763Smav (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 3951185029Spjd BACKUP_SLICE); 3952168404Spjd 3953185029Spjd if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3954185029Spjd /* 3955185029Spjd * This shouldn't happen. We've long since verified that this 3956185029Spjd * is a valid device. 3957185029Spjd */ 3958185029Spjd zfs_error_aux(hdl, 3959185029Spjd dgettext(TEXT_DOMAIN, "unable to open device")); 3960185029Spjd return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3961185029Spjd } 3962168404Spjd 3963185029Spjd if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3964185029Spjd /* 3965185029Spjd * The only way this can fail is if we run out of memory, or we 3966185029Spjd * were unable to read the disk's capacity 3967185029Spjd */ 3968185029Spjd if (errno == ENOMEM) 3969185029Spjd (void) no_memory(hdl); 3970168404Spjd 3971185029Spjd (void) close(fd); 3972185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3973185029Spjd "unable to read disk capacity"), name); 3974185029Spjd 3975185029Spjd return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3976168404Spjd } 3977168404Spjd 3978185029Spjd slice_size = vtoc->efi_last_u_lba + 1; 3979185029Spjd slice_size -= EFI_MIN_RESV_SIZE; 3980185029Spjd if (start_block == MAXOFFSET_T) 3981185029Spjd start_block = NEW_START_BLOCK; 3982185029Spjd slice_size -= start_block; 3983168404Spjd 3984185029Spjd vtoc->efi_parts[0].p_start = start_block; 3985185029Spjd vtoc->efi_parts[0].p_size = slice_size; 3986185029Spjd 3987168404Spjd /* 3988185029Spjd * Why we use V_USR: V_BACKUP confuses users, and is considered 3989185029Spjd * disposable by some EFI utilities (since EFI doesn't have a backup 3990185029Spjd * slice). V_UNASSIGNED is supposed to be used only for zero size 3991185029Spjd * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3992185029Spjd * etc. were all pretty specific. V_USR is as close to reality as we 3993185029Spjd * can get, in the absence of V_OTHER. 3994168404Spjd */ 3995185029Spjd vtoc->efi_parts[0].p_tag = V_USR; 3996185029Spjd (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3997168404Spjd 3998185029Spjd vtoc->efi_parts[8].p_start = slice_size + start_block; 3999185029Spjd vtoc->efi_parts[8].p_size = resv; 4000185029Spjd vtoc->efi_parts[8].p_tag = V_RESERVED; 4001168404Spjd 4002185029Spjd if (efi_write(fd, vtoc) != 0) { 4003185029Spjd /* 4004185029Spjd * Some block drivers (like pcata) may not support EFI 4005185029Spjd * GPT labels. Print out a helpful error message dir- 4006185029Spjd * ecting the user to manually label the disk and give 4007185029Spjd * a specific slice. 4008185029Spjd */ 4009185029Spjd (void) close(fd); 4010185029Spjd efi_free(vtoc); 4011168404Spjd 4012185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4013185029Spjd "try using fdisk(1M) and then provide a specific slice")); 4014185029Spjd return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4015168404Spjd } 4016185029Spjd 4017185029Spjd (void) close(fd); 4018185029Spjd efi_free(vtoc); 4019277300Ssmh#endif /* illumos */ 4020168404Spjd return (0); 4021168404Spjd} 4022168404Spjd 4023185029Spjdstatic boolean_t 4024185029Spjdsupported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4025168404Spjd{ 4026185029Spjd char *type; 4027185029Spjd nvlist_t **child; 4028185029Spjd uint_t children, c; 4029185029Spjd 4030185029Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4031255750Sdelphij if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4032219089Spjd strcmp(type, VDEV_TYPE_HOLE) == 0 || 4033185029Spjd strcmp(type, VDEV_TYPE_MISSING) == 0) { 4034185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4035185029Spjd "vdev type '%s' is not supported"), type); 4036185029Spjd (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4037185029Spjd return (B_FALSE); 4038185029Spjd } 4039185029Spjd if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4040185029Spjd &child, &children) == 0) { 4041185029Spjd for (c = 0; c < children; c++) { 4042185029Spjd if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4043185029Spjd return (B_FALSE); 4044185029Spjd } 4045185029Spjd } 4046185029Spjd return (B_TRUE); 4047168404Spjd} 4048168404Spjd 4049185029Spjd/* 4050255750Sdelphij * Check if this zvol is allowable for use as a dump device; zero if 4051255750Sdelphij * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4052255750Sdelphij * 4053255750Sdelphij * Allowable storage configurations include mirrors, all raidz variants, and 4054255750Sdelphij * pools with log, cache, and spare devices. Pools which are backed by files or 4055255750Sdelphij * have missing/hole vdevs are not suitable. 4056185029Spjd */ 4057168404Spjdint 4058185029Spjdzvol_check_dump_config(char *arg) 4059168404Spjd{ 4060185029Spjd zpool_handle_t *zhp = NULL; 4061185029Spjd nvlist_t *config, *nvroot; 4062185029Spjd char *p, *volname; 4063185029Spjd nvlist_t **top; 4064185029Spjd uint_t toplevels; 4065185029Spjd libzfs_handle_t *hdl; 4066185029Spjd char errbuf[1024]; 4067185029Spjd char poolname[ZPOOL_MAXNAMELEN]; 4068185029Spjd int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4069185029Spjd int ret = 1; 4070168404Spjd 4071185029Spjd if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4072168404Spjd return (-1); 4073185029Spjd } 4074168404Spjd 4075185029Spjd (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4076185029Spjd "dump is not supported on device '%s'"), arg); 4077168404Spjd 4078185029Spjd if ((hdl = libzfs_init()) == NULL) 4079185029Spjd return (1); 4080185029Spjd libzfs_print_on_error(hdl, B_TRUE); 4081168404Spjd 4082185029Spjd volname = arg + pathlen; 4083185029Spjd 4084185029Spjd /* check the configuration of the pool */ 4085185029Spjd if ((p = strchr(volname, '/')) == NULL) { 4086185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4087185029Spjd "malformed dataset name")); 4088185029Spjd (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4089185029Spjd return (1); 4090185029Spjd } else if (p - volname >= ZFS_MAXNAMELEN) { 4091185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4092185029Spjd "dataset name is too long")); 4093185029Spjd (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4094185029Spjd return (1); 4095185029Spjd } else { 4096185029Spjd (void) strncpy(poolname, volname, p - volname); 4097185029Spjd poolname[p - volname] = '\0'; 4098168404Spjd } 4099168404Spjd 4100185029Spjd if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4101185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4102185029Spjd "could not open pool '%s'"), poolname); 4103185029Spjd (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4104185029Spjd goto out; 4105185029Spjd } 4106185029Spjd config = zpool_get_config(zhp, NULL); 4107185029Spjd if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4108185029Spjd &nvroot) != 0) { 4109185029Spjd zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4110185029Spjd "could not obtain vdev configuration for '%s'"), poolname); 4111185029Spjd (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4112185029Spjd goto out; 4113185029Spjd } 4114185029Spjd 4115185029Spjd verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4116185029Spjd &top, &toplevels) == 0); 4117185029Spjd 4118185029Spjd if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4119185029Spjd goto out; 4120185029Spjd } 4121185029Spjd ret = 0; 4122185029Spjd 4123185029Spjdout: 4124185029Spjd if (zhp) 4125185029Spjd zpool_close(zhp); 4126185029Spjd libzfs_fini(hdl); 4127185029Spjd return (ret); 4128168404Spjd} 4129