1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21278177Sdelphij 22168404Spjd/* 23219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24307121Smav * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25284805Savg * Copyright 2015 RackTop Systems. 26299430Smav * Copyright 2016 Nexenta Systems, Inc. 27168404Spjd */ 28168404Spjd 29168404Spjd/* 30168404Spjd * Pool import support functions. 31168404Spjd * 32168404Spjd * To import a pool, we rely on reading the configuration information from the 33168404Spjd * ZFS label of each device. If we successfully read the label, then we 34168404Spjd * organize the configuration information in the following hierarchy: 35168404Spjd * 36168404Spjd * pool guid -> toplevel vdev guid -> label txg 37168404Spjd * 38168404Spjd * Duplicate entries matching this same tuple will be discarded. Once we have 39168404Spjd * examined every device, we pick the best label txg config for each toplevel 40168404Spjd * vdev. We then arrange these toplevel vdevs into a complete pool config, and 41168404Spjd * update any paths that have changed. Finally, we attempt to import the pool 42168404Spjd * using our derived config, and record the results. 43168404Spjd */ 44168404Spjd 45219089Spjd#include <ctype.h> 46168404Spjd#include <devid.h> 47168404Spjd#include <dirent.h> 48168404Spjd#include <errno.h> 49168404Spjd#include <libintl.h> 50219089Spjd#include <stddef.h> 51168404Spjd#include <stdlib.h> 52168404Spjd#include <string.h> 53168404Spjd#include <sys/stat.h> 54168404Spjd#include <unistd.h> 55168404Spjd#include <fcntl.h> 56219089Spjd#include <thread_pool.h> 57168404Spjd#include <libgeom.h> 58168404Spjd 59168404Spjd#include <sys/vdev_impl.h> 60168404Spjd 61168404Spjd#include "libzfs.h" 62168404Spjd#include "libzfs_impl.h" 63168404Spjd 64168404Spjd/* 65168404Spjd * Intermediate structures used to gather configuration information. 66168404Spjd */ 67168404Spjdtypedef struct config_entry { 68168404Spjd uint64_t ce_txg; 69168404Spjd nvlist_t *ce_config; 70168404Spjd struct config_entry *ce_next; 71168404Spjd} config_entry_t; 72168404Spjd 73168404Spjdtypedef struct vdev_entry { 74168404Spjd uint64_t ve_guid; 75168404Spjd config_entry_t *ve_configs; 76168404Spjd struct vdev_entry *ve_next; 77168404Spjd} vdev_entry_t; 78168404Spjd 79168404Spjdtypedef struct pool_entry { 80168404Spjd uint64_t pe_guid; 81168404Spjd vdev_entry_t *pe_vdevs; 82168404Spjd struct pool_entry *pe_next; 83168404Spjd} pool_entry_t; 84168404Spjd 85168404Spjdtypedef struct name_entry { 86168404Spjd char *ne_name; 87168404Spjd uint64_t ne_guid; 88168404Spjd struct name_entry *ne_next; 89168404Spjd} name_entry_t; 90168404Spjd 91168404Spjdtypedef struct pool_list { 92168404Spjd pool_entry_t *pools; 93168404Spjd name_entry_t *names; 94168404Spjd} pool_list_t; 95168404Spjd 96168404Spjdstatic char * 97168404Spjdget_devid(const char *path) 98168404Spjd{ 99266611Smav#ifdef have_devid 100168404Spjd int fd; 101168404Spjd ddi_devid_t devid; 102168404Spjd char *minor, *ret; 103168404Spjd 104168404Spjd if ((fd = open(path, O_RDONLY)) < 0) 105168404Spjd return (NULL); 106168404Spjd 107168404Spjd minor = NULL; 108168404Spjd ret = NULL; 109168404Spjd if (devid_get(fd, &devid) == 0) { 110168404Spjd if (devid_get_minor_name(fd, &minor) == 0) 111168404Spjd ret = devid_str_encode(devid, minor); 112168404Spjd if (minor != NULL) 113168404Spjd devid_str_free(minor); 114168404Spjd devid_free(devid); 115168404Spjd } 116168404Spjd (void) close(fd); 117168404Spjd 118168404Spjd return (ret); 119266611Smav#else 120266611Smav return (NULL); 121266611Smav#endif 122168404Spjd} 123168404Spjd 124219089Spjd 125168404Spjd/* 126168404Spjd * Go through and fix up any path and/or devid information for the given vdev 127168404Spjd * configuration. 128168404Spjd */ 129168404Spjdstatic int 130168404Spjdfix_paths(nvlist_t *nv, name_entry_t *names) 131168404Spjd{ 132168404Spjd nvlist_t **child; 133168404Spjd uint_t c, children; 134168404Spjd uint64_t guid; 135168404Spjd name_entry_t *ne, *best; 136168404Spjd char *path, *devid; 137168404Spjd int matched; 138168404Spjd 139168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 140168404Spjd &child, &children) == 0) { 141168404Spjd for (c = 0; c < children; c++) 142168404Spjd if (fix_paths(child[c], names) != 0) 143168404Spjd return (-1); 144168404Spjd return (0); 145168404Spjd } 146168404Spjd 147168404Spjd /* 148168404Spjd * This is a leaf (file or disk) vdev. In either case, go through 149168404Spjd * the name list and see if we find a matching guid. If so, replace 150168404Spjd * the path and see if we can calculate a new devid. 151168404Spjd * 152168404Spjd * There may be multiple names associated with a particular guid, in 153168404Spjd * which case we have overlapping slices or multiple paths to the same 154168404Spjd * disk. If this is the case, then we want to pick the path that is 155168404Spjd * the most similar to the original, where "most similar" is the number 156168404Spjd * of matching characters starting from the end of the path. This will 157168404Spjd * preserve slice numbers even if the disks have been reorganized, and 158168404Spjd * will also catch preferred disk names if multiple paths exist. 159168404Spjd */ 160168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); 161168404Spjd if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) 162168404Spjd path = NULL; 163168404Spjd 164168404Spjd matched = 0; 165168404Spjd best = NULL; 166168404Spjd for (ne = names; ne != NULL; ne = ne->ne_next) { 167168404Spjd if (ne->ne_guid == guid) { 168168404Spjd const char *src, *dst; 169168404Spjd int count; 170168404Spjd 171168404Spjd if (path == NULL) { 172168404Spjd best = ne; 173168404Spjd break; 174168404Spjd } 175168404Spjd 176168404Spjd src = ne->ne_name + strlen(ne->ne_name) - 1; 177168404Spjd dst = path + strlen(path) - 1; 178168404Spjd for (count = 0; src >= ne->ne_name && dst >= path; 179168404Spjd src--, dst--, count++) 180168404Spjd if (*src != *dst) 181168404Spjd break; 182168404Spjd 183168404Spjd /* 184168404Spjd * At this point, 'count' is the number of characters 185168404Spjd * matched from the end. 186168404Spjd */ 187168404Spjd if (count > matched || best == NULL) { 188168404Spjd best = ne; 189168404Spjd matched = count; 190168404Spjd } 191168404Spjd } 192168404Spjd } 193168404Spjd 194168404Spjd if (best == NULL) 195168404Spjd return (0); 196168404Spjd 197168404Spjd if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) 198168404Spjd return (-1); 199168404Spjd 200168404Spjd if ((devid = get_devid(best->ne_name)) == NULL) { 201168404Spjd (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); 202168404Spjd } else { 203278177Sdelphij if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) { 204278177Sdelphij devid_str_free(devid); 205168404Spjd return (-1); 206278177Sdelphij } 207168404Spjd devid_str_free(devid); 208168404Spjd } 209168404Spjd 210168404Spjd return (0); 211168404Spjd} 212168404Spjd 213168404Spjd/* 214168404Spjd * Add the given configuration to the list of known devices. 215168404Spjd */ 216168404Spjdstatic int 217168404Spjdadd_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, 218168404Spjd nvlist_t *config) 219168404Spjd{ 220168404Spjd uint64_t pool_guid, vdev_guid, top_guid, txg, state; 221168404Spjd pool_entry_t *pe; 222168404Spjd vdev_entry_t *ve; 223168404Spjd config_entry_t *ce; 224168404Spjd name_entry_t *ne; 225168404Spjd 226168404Spjd /* 227185029Spjd * If this is a hot spare not currently in use or level 2 cache 228185029Spjd * device, add it to the list of names to translate, but don't do 229185029Spjd * anything else. 230168404Spjd */ 231168404Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 232185029Spjd &state) == 0 && 233185029Spjd (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && 234168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { 235168404Spjd if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 236168926Spjd return (-1); 237168404Spjd 238168404Spjd if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 239168404Spjd free(ne); 240168404Spjd return (-1); 241168404Spjd } 242168404Spjd ne->ne_guid = vdev_guid; 243168404Spjd ne->ne_next = pl->names; 244168404Spjd pl->names = ne; 245168404Spjd return (0); 246168404Spjd } 247168404Spjd 248168404Spjd /* 249168404Spjd * If we have a valid config but cannot read any of these fields, then 250168404Spjd * it means we have a half-initialized label. In vdev_label_init() 251168404Spjd * we write a label with txg == 0 so that we can identify the device 252168404Spjd * in case the user refers to the same disk later on. If we fail to 253168404Spjd * create the pool, we'll be left with a label in this state 254168404Spjd * which should not be considered part of a valid pool. 255168404Spjd */ 256168404Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 257168404Spjd &pool_guid) != 0 || 258168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 259168404Spjd &vdev_guid) != 0 || 260168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, 261168404Spjd &top_guid) != 0 || 262168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 263168404Spjd &txg) != 0 || txg == 0) { 264168404Spjd nvlist_free(config); 265168404Spjd return (0); 266168404Spjd } 267168404Spjd 268168404Spjd /* 269168404Spjd * First, see if we know about this pool. If not, then add it to the 270168404Spjd * list of known pools. 271168404Spjd */ 272168404Spjd for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 273168404Spjd if (pe->pe_guid == pool_guid) 274168404Spjd break; 275168404Spjd } 276168404Spjd 277168404Spjd if (pe == NULL) { 278168404Spjd if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { 279168404Spjd nvlist_free(config); 280168404Spjd return (-1); 281168404Spjd } 282168404Spjd pe->pe_guid = pool_guid; 283168404Spjd pe->pe_next = pl->pools; 284168404Spjd pl->pools = pe; 285168404Spjd } 286168404Spjd 287168404Spjd /* 288168404Spjd * Second, see if we know about this toplevel vdev. Add it if its 289168404Spjd * missing. 290168404Spjd */ 291168404Spjd for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 292168404Spjd if (ve->ve_guid == top_guid) 293168404Spjd break; 294168404Spjd } 295168404Spjd 296168404Spjd if (ve == NULL) { 297168404Spjd if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { 298168404Spjd nvlist_free(config); 299168404Spjd return (-1); 300168404Spjd } 301168404Spjd ve->ve_guid = top_guid; 302168404Spjd ve->ve_next = pe->pe_vdevs; 303168404Spjd pe->pe_vdevs = ve; 304168404Spjd } 305168404Spjd 306168404Spjd /* 307168404Spjd * Third, see if we have a config with a matching transaction group. If 308168404Spjd * so, then we do nothing. Otherwise, add it to the list of known 309168404Spjd * configs. 310168404Spjd */ 311168404Spjd for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { 312168404Spjd if (ce->ce_txg == txg) 313168404Spjd break; 314168404Spjd } 315168404Spjd 316168404Spjd if (ce == NULL) { 317168404Spjd if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { 318168404Spjd nvlist_free(config); 319168404Spjd return (-1); 320168404Spjd } 321168404Spjd ce->ce_txg = txg; 322168404Spjd ce->ce_config = config; 323168404Spjd ce->ce_next = ve->ve_configs; 324168404Spjd ve->ve_configs = ce; 325168404Spjd } else { 326168404Spjd nvlist_free(config); 327168404Spjd } 328168404Spjd 329168404Spjd /* 330168404Spjd * At this point we've successfully added our config to the list of 331168404Spjd * known configs. The last thing to do is add the vdev guid -> path 332168404Spjd * mappings so that we can fix up the configuration as necessary before 333168404Spjd * doing the import. 334168404Spjd */ 335168404Spjd if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 336168404Spjd return (-1); 337168404Spjd 338168404Spjd if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 339168404Spjd free(ne); 340168404Spjd return (-1); 341168404Spjd } 342168404Spjd 343168404Spjd ne->ne_guid = vdev_guid; 344168404Spjd ne->ne_next = pl->names; 345168404Spjd pl->names = ne; 346168404Spjd 347168404Spjd return (0); 348168404Spjd} 349168404Spjd 350168404Spjd/* 351168404Spjd * Returns true if the named pool matches the given GUID. 352168404Spjd */ 353168404Spjdstatic int 354168404Spjdpool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, 355168404Spjd boolean_t *isactive) 356168404Spjd{ 357168404Spjd zpool_handle_t *zhp; 358168404Spjd uint64_t theguid; 359168404Spjd 360168404Spjd if (zpool_open_silent(hdl, name, &zhp) != 0) 361168404Spjd return (-1); 362168404Spjd 363168404Spjd if (zhp == NULL) { 364168404Spjd *isactive = B_FALSE; 365168404Spjd return (0); 366168404Spjd } 367168404Spjd 368168404Spjd verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 369168404Spjd &theguid) == 0); 370168404Spjd 371168404Spjd zpool_close(zhp); 372168404Spjd 373168404Spjd *isactive = (theguid == guid); 374168404Spjd return (0); 375168404Spjd} 376168404Spjd 377185029Spjdstatic nvlist_t * 378185029Spjdrefresh_config(libzfs_handle_t *hdl, nvlist_t *config) 379185029Spjd{ 380185029Spjd nvlist_t *nvl; 381185029Spjd zfs_cmd_t zc = { 0 }; 382185029Spjd int err; 383185029Spjd 384185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) 385185029Spjd return (NULL); 386185029Spjd 387185029Spjd if (zcmd_alloc_dst_nvlist(hdl, &zc, 388185029Spjd zc.zc_nvlist_conf_size * 2) != 0) { 389185029Spjd zcmd_free_nvlists(&zc); 390185029Spjd return (NULL); 391185029Spjd } 392185029Spjd 393185029Spjd while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, 394185029Spjd &zc)) != 0 && errno == ENOMEM) { 395185029Spjd if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 396185029Spjd zcmd_free_nvlists(&zc); 397185029Spjd return (NULL); 398185029Spjd } 399185029Spjd } 400185029Spjd 401185029Spjd if (err) { 402185029Spjd zcmd_free_nvlists(&zc); 403185029Spjd return (NULL); 404185029Spjd } 405185029Spjd 406185029Spjd if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { 407185029Spjd zcmd_free_nvlists(&zc); 408185029Spjd return (NULL); 409185029Spjd } 410185029Spjd 411185029Spjd zcmd_free_nvlists(&zc); 412185029Spjd return (nvl); 413185029Spjd} 414185029Spjd 415168404Spjd/* 416219089Spjd * Determine if the vdev id is a hole in the namespace. 417219089Spjd */ 418219089Spjdboolean_t 419219089Spjdvdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id) 420219089Spjd{ 421219089Spjd for (int c = 0; c < holes; c++) { 422219089Spjd 423219089Spjd /* Top-level is a hole */ 424219089Spjd if (hole_array[c] == id) 425219089Spjd return (B_TRUE); 426219089Spjd } 427219089Spjd return (B_FALSE); 428219089Spjd} 429219089Spjd 430219089Spjd/* 431168404Spjd * Convert our list of pools into the definitive set of configurations. We 432168404Spjd * start by picking the best config for each toplevel vdev. Once that's done, 433168404Spjd * we assemble the toplevel vdevs into a full config for the pool. We make a 434168404Spjd * pass to fix up any incorrect paths, and then add it to the main list to 435168404Spjd * return to the user. 436168404Spjd */ 437168404Spjdstatic nvlist_t * 438185029Spjdget_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) 439168404Spjd{ 440168404Spjd pool_entry_t *pe; 441168404Spjd vdev_entry_t *ve; 442168404Spjd config_entry_t *ce; 443307121Smav nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot; 444185029Spjd nvlist_t **spares, **l2cache; 445185029Spjd uint_t i, nspares, nl2cache; 446168404Spjd boolean_t config_seen; 447168404Spjd uint64_t best_txg; 448307121Smav char *name, *hostname = NULL; 449239620Smm uint64_t guid; 450168404Spjd uint_t children = 0; 451168404Spjd nvlist_t **child = NULL; 452219089Spjd uint_t holes; 453219089Spjd uint64_t *hole_array, max_id; 454168404Spjd uint_t c; 455168404Spjd boolean_t isactive; 456168498Spjd uint64_t hostid; 457185029Spjd nvlist_t *nvl; 458185029Spjd boolean_t found_one = B_FALSE; 459219089Spjd boolean_t valid_top_config = B_FALSE; 460168404Spjd 461168404Spjd if (nvlist_alloc(&ret, 0, 0) != 0) 462168404Spjd goto nomem; 463168404Spjd 464168404Spjd for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 465219089Spjd uint64_t id, max_txg = 0; 466168404Spjd 467168404Spjd if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) 468168404Spjd goto nomem; 469168404Spjd config_seen = B_FALSE; 470168404Spjd 471168404Spjd /* 472168404Spjd * Iterate over all toplevel vdevs. Grab the pool configuration 473168404Spjd * from the first one we find, and then go through the rest and 474168404Spjd * add them as necessary to the 'vdevs' member of the config. 475168404Spjd */ 476168404Spjd for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 477168404Spjd 478168404Spjd /* 479168404Spjd * Determine the best configuration for this vdev by 480168404Spjd * selecting the config with the latest transaction 481168404Spjd * group. 482168404Spjd */ 483168404Spjd best_txg = 0; 484168404Spjd for (ce = ve->ve_configs; ce != NULL; 485168404Spjd ce = ce->ce_next) { 486168404Spjd 487168404Spjd if (ce->ce_txg > best_txg) { 488168404Spjd tmp = ce->ce_config; 489168404Spjd best_txg = ce->ce_txg; 490168404Spjd } 491168404Spjd } 492168404Spjd 493219089Spjd /* 494219089Spjd * We rely on the fact that the max txg for the 495219089Spjd * pool will contain the most up-to-date information 496219089Spjd * about the valid top-levels in the vdev namespace. 497219089Spjd */ 498219089Spjd if (best_txg > max_txg) { 499219089Spjd (void) nvlist_remove(config, 500219089Spjd ZPOOL_CONFIG_VDEV_CHILDREN, 501219089Spjd DATA_TYPE_UINT64); 502219089Spjd (void) nvlist_remove(config, 503219089Spjd ZPOOL_CONFIG_HOLE_ARRAY, 504219089Spjd DATA_TYPE_UINT64_ARRAY); 505219089Spjd 506219089Spjd max_txg = best_txg; 507219089Spjd hole_array = NULL; 508219089Spjd holes = 0; 509219089Spjd max_id = 0; 510219089Spjd valid_top_config = B_FALSE; 511219089Spjd 512219089Spjd if (nvlist_lookup_uint64(tmp, 513219089Spjd ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { 514219089Spjd verify(nvlist_add_uint64(config, 515219089Spjd ZPOOL_CONFIG_VDEV_CHILDREN, 516219089Spjd max_id) == 0); 517219089Spjd valid_top_config = B_TRUE; 518219089Spjd } 519219089Spjd 520219089Spjd if (nvlist_lookup_uint64_array(tmp, 521219089Spjd ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, 522219089Spjd &holes) == 0) { 523219089Spjd verify(nvlist_add_uint64_array(config, 524219089Spjd ZPOOL_CONFIG_HOLE_ARRAY, 525219089Spjd hole_array, holes) == 0); 526219089Spjd } 527219089Spjd } 528219089Spjd 529168404Spjd if (!config_seen) { 530168404Spjd /* 531168404Spjd * Copy the relevant pieces of data to the pool 532168404Spjd * configuration: 533168404Spjd * 534168404Spjd * version 535239620Smm * pool guid 536239620Smm * name 537228103Smm * comment (if available) 538239620Smm * pool state 539168498Spjd * hostid (if available) 540168498Spjd * hostname (if available) 541168404Spjd */ 542246631Smm uint64_t state, version; 543239620Smm char *comment = NULL; 544168404Spjd 545239620Smm version = fnvlist_lookup_uint64(tmp, 546239620Smm ZPOOL_CONFIG_VERSION); 547239620Smm fnvlist_add_uint64(config, 548239620Smm ZPOOL_CONFIG_VERSION, version); 549239620Smm guid = fnvlist_lookup_uint64(tmp, 550239620Smm ZPOOL_CONFIG_POOL_GUID); 551239620Smm fnvlist_add_uint64(config, 552239620Smm ZPOOL_CONFIG_POOL_GUID, guid); 553239620Smm name = fnvlist_lookup_string(tmp, 554239620Smm ZPOOL_CONFIG_POOL_NAME); 555239620Smm fnvlist_add_string(config, 556239620Smm ZPOOL_CONFIG_POOL_NAME, name); 557228103Smm 558228103Smm if (nvlist_lookup_string(tmp, 559239620Smm ZPOOL_CONFIG_COMMENT, &comment) == 0) 560239620Smm fnvlist_add_string(config, 561239620Smm ZPOOL_CONFIG_COMMENT, comment); 562228103Smm 563239620Smm state = fnvlist_lookup_uint64(tmp, 564239620Smm ZPOOL_CONFIG_POOL_STATE); 565239620Smm fnvlist_add_uint64(config, 566239620Smm ZPOOL_CONFIG_POOL_STATE, state); 567228103Smm 568168498Spjd hostid = 0; 569168498Spjd if (nvlist_lookup_uint64(tmp, 570168498Spjd ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 571239620Smm fnvlist_add_uint64(config, 572239620Smm ZPOOL_CONFIG_HOSTID, hostid); 573239620Smm hostname = fnvlist_lookup_string(tmp, 574239620Smm ZPOOL_CONFIG_HOSTNAME); 575239620Smm fnvlist_add_string(config, 576239620Smm ZPOOL_CONFIG_HOSTNAME, hostname); 577168498Spjd } 578168404Spjd 579168404Spjd config_seen = B_TRUE; 580168404Spjd } 581168404Spjd 582168404Spjd /* 583168404Spjd * Add this top-level vdev to the child array. 584168404Spjd */ 585168404Spjd verify(nvlist_lookup_nvlist(tmp, 586168404Spjd ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); 587168404Spjd verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, 588168404Spjd &id) == 0); 589219089Spjd 590168404Spjd if (id >= children) { 591168404Spjd nvlist_t **newchild; 592168404Spjd 593168404Spjd newchild = zfs_alloc(hdl, (id + 1) * 594168404Spjd sizeof (nvlist_t *)); 595168404Spjd if (newchild == NULL) 596168404Spjd goto nomem; 597168404Spjd 598168404Spjd for (c = 0; c < children; c++) 599168404Spjd newchild[c] = child[c]; 600168404Spjd 601168404Spjd free(child); 602168404Spjd child = newchild; 603168404Spjd children = id + 1; 604168404Spjd } 605168404Spjd if (nvlist_dup(nvtop, &child[id], 0) != 0) 606168404Spjd goto nomem; 607168404Spjd 608168404Spjd } 609168404Spjd 610219089Spjd /* 611219089Spjd * If we have information about all the top-levels then 612219089Spjd * clean up the nvlist which we've constructed. This 613219089Spjd * means removing any extraneous devices that are 614219089Spjd * beyond the valid range or adding devices to the end 615219089Spjd * of our array which appear to be missing. 616219089Spjd */ 617219089Spjd if (valid_top_config) { 618219089Spjd if (max_id < children) { 619219089Spjd for (c = max_id; c < children; c++) 620219089Spjd nvlist_free(child[c]); 621219089Spjd children = max_id; 622219089Spjd } else if (max_id > children) { 623219089Spjd nvlist_t **newchild; 624219089Spjd 625219089Spjd newchild = zfs_alloc(hdl, (max_id) * 626219089Spjd sizeof (nvlist_t *)); 627219089Spjd if (newchild == NULL) 628219089Spjd goto nomem; 629219089Spjd 630219089Spjd for (c = 0; c < children; c++) 631219089Spjd newchild[c] = child[c]; 632219089Spjd 633219089Spjd free(child); 634219089Spjd child = newchild; 635219089Spjd children = max_id; 636219089Spjd } 637219089Spjd } 638219089Spjd 639168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 640168404Spjd &guid) == 0); 641168404Spjd 642168404Spjd /* 643219089Spjd * The vdev namespace may contain holes as a result of 644219089Spjd * device removal. We must add them back into the vdev 645219089Spjd * tree before we process any missing devices. 646219089Spjd */ 647219089Spjd if (holes > 0) { 648219089Spjd ASSERT(valid_top_config); 649219089Spjd 650219089Spjd for (c = 0; c < children; c++) { 651219089Spjd nvlist_t *holey; 652219089Spjd 653219089Spjd if (child[c] != NULL || 654219089Spjd !vdev_is_hole(hole_array, holes, c)) 655219089Spjd continue; 656219089Spjd 657219089Spjd if (nvlist_alloc(&holey, NV_UNIQUE_NAME, 658219089Spjd 0) != 0) 659219089Spjd goto nomem; 660219089Spjd 661219089Spjd /* 662219089Spjd * Holes in the namespace are treated as 663219089Spjd * "hole" top-level vdevs and have a 664219089Spjd * special flag set on them. 665219089Spjd */ 666219089Spjd if (nvlist_add_string(holey, 667219089Spjd ZPOOL_CONFIG_TYPE, 668219089Spjd VDEV_TYPE_HOLE) != 0 || 669219089Spjd nvlist_add_uint64(holey, 670219089Spjd ZPOOL_CONFIG_ID, c) != 0 || 671219089Spjd nvlist_add_uint64(holey, 672278177Sdelphij ZPOOL_CONFIG_GUID, 0ULL) != 0) { 673278177Sdelphij nvlist_free(holey); 674219089Spjd goto nomem; 675278177Sdelphij } 676219089Spjd child[c] = holey; 677219089Spjd } 678219089Spjd } 679219089Spjd 680219089Spjd /* 681168404Spjd * Look for any missing top-level vdevs. If this is the case, 682168404Spjd * create a faked up 'missing' vdev as a placeholder. We cannot 683168404Spjd * simply compress the child array, because the kernel performs 684168404Spjd * certain checks to make sure the vdev IDs match their location 685168404Spjd * in the configuration. 686168404Spjd */ 687219089Spjd for (c = 0; c < children; c++) { 688168404Spjd if (child[c] == NULL) { 689168404Spjd nvlist_t *missing; 690168404Spjd if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 691168404Spjd 0) != 0) 692168404Spjd goto nomem; 693168404Spjd if (nvlist_add_string(missing, 694168404Spjd ZPOOL_CONFIG_TYPE, 695168404Spjd VDEV_TYPE_MISSING) != 0 || 696168404Spjd nvlist_add_uint64(missing, 697168404Spjd ZPOOL_CONFIG_ID, c) != 0 || 698168404Spjd nvlist_add_uint64(missing, 699168404Spjd ZPOOL_CONFIG_GUID, 0ULL) != 0) { 700168404Spjd nvlist_free(missing); 701168404Spjd goto nomem; 702168404Spjd } 703168404Spjd child[c] = missing; 704168404Spjd } 705219089Spjd } 706168404Spjd 707168404Spjd /* 708168404Spjd * Put all of this pool's top-level vdevs into a root vdev. 709168404Spjd */ 710168404Spjd if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) 711168404Spjd goto nomem; 712168404Spjd if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 713168404Spjd VDEV_TYPE_ROOT) != 0 || 714168404Spjd nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || 715168404Spjd nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || 716168404Spjd nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 717168404Spjd child, children) != 0) { 718168404Spjd nvlist_free(nvroot); 719168404Spjd goto nomem; 720168404Spjd } 721168404Spjd 722168404Spjd for (c = 0; c < children; c++) 723168404Spjd nvlist_free(child[c]); 724168404Spjd free(child); 725168404Spjd children = 0; 726168404Spjd child = NULL; 727168404Spjd 728168404Spjd /* 729168404Spjd * Go through and fix up any paths and/or devids based on our 730168404Spjd * known list of vdev GUID -> path mappings. 731168404Spjd */ 732168404Spjd if (fix_paths(nvroot, pl->names) != 0) { 733168404Spjd nvlist_free(nvroot); 734168404Spjd goto nomem; 735168404Spjd } 736168404Spjd 737168404Spjd /* 738168404Spjd * Add the root vdev to this pool's configuration. 739168404Spjd */ 740168404Spjd if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 741168404Spjd nvroot) != 0) { 742168404Spjd nvlist_free(nvroot); 743168404Spjd goto nomem; 744168404Spjd } 745168404Spjd nvlist_free(nvroot); 746168404Spjd 747168404Spjd /* 748185029Spjd * zdb uses this path to report on active pools that were 749185029Spjd * imported or created using -R. 750185029Spjd */ 751185029Spjd if (active_ok) 752185029Spjd goto add_pool; 753185029Spjd 754185029Spjd /* 755168404Spjd * Determine if this pool is currently active, in which case we 756168404Spjd * can't actually import it. 757168404Spjd */ 758168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 759168404Spjd &name) == 0); 760168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 761168404Spjd &guid) == 0); 762168404Spjd 763168404Spjd if (pool_active(hdl, name, guid, &isactive) != 0) 764168404Spjd goto error; 765168404Spjd 766168404Spjd if (isactive) { 767168404Spjd nvlist_free(config); 768168404Spjd config = NULL; 769168404Spjd continue; 770168404Spjd } 771168404Spjd 772219089Spjd if ((nvl = refresh_config(hdl, config)) == NULL) { 773219089Spjd nvlist_free(config); 774219089Spjd config = NULL; 775219089Spjd continue; 776219089Spjd } 777168404Spjd 778168404Spjd nvlist_free(config); 779185029Spjd config = nvl; 780168404Spjd 781168404Spjd /* 782168404Spjd * Go through and update the paths for spares, now that we have 783168404Spjd * them. 784168404Spjd */ 785168404Spjd verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 786168404Spjd &nvroot) == 0); 787168404Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 788168404Spjd &spares, &nspares) == 0) { 789168404Spjd for (i = 0; i < nspares; i++) { 790168404Spjd if (fix_paths(spares[i], pl->names) != 0) 791168404Spjd goto nomem; 792168404Spjd } 793168404Spjd } 794168404Spjd 795168404Spjd /* 796185029Spjd * Update the paths for l2cache devices. 797185029Spjd */ 798185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 799185029Spjd &l2cache, &nl2cache) == 0) { 800185029Spjd for (i = 0; i < nl2cache; i++) { 801185029Spjd if (fix_paths(l2cache[i], pl->names) != 0) 802185029Spjd goto nomem; 803185029Spjd } 804185029Spjd } 805185029Spjd 806185029Spjd /* 807168498Spjd * Restore the original information read from the actual label. 808168498Spjd */ 809168498Spjd (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, 810168498Spjd DATA_TYPE_UINT64); 811168498Spjd (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, 812168498Spjd DATA_TYPE_STRING); 813168498Spjd if (hostid != 0) { 814168498Spjd verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, 815168498Spjd hostid) == 0); 816168498Spjd verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, 817168498Spjd hostname) == 0); 818168498Spjd } 819168498Spjd 820185029Spjdadd_pool: 821168498Spjd /* 822168404Spjd * Add this pool to the list of configs. 823168404Spjd */ 824168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 825168404Spjd &name) == 0); 826168404Spjd if (nvlist_add_nvlist(ret, name, config) != 0) 827168404Spjd goto nomem; 828168404Spjd 829185029Spjd found_one = B_TRUE; 830168404Spjd nvlist_free(config); 831168404Spjd config = NULL; 832168404Spjd } 833168404Spjd 834185029Spjd if (!found_one) { 835185029Spjd nvlist_free(ret); 836185029Spjd ret = NULL; 837185029Spjd } 838185029Spjd 839168404Spjd return (ret); 840168404Spjd 841168404Spjdnomem: 842168404Spjd (void) no_memory(hdl); 843168404Spjderror: 844168404Spjd nvlist_free(config); 845168404Spjd nvlist_free(ret); 846168404Spjd for (c = 0; c < children; c++) 847168404Spjd nvlist_free(child[c]); 848168404Spjd free(child); 849168404Spjd 850168404Spjd return (NULL); 851168404Spjd} 852168404Spjd 853168404Spjd/* 854168404Spjd * Return the offset of the given label. 855168404Spjd */ 856168404Spjdstatic uint64_t 857185029Spjdlabel_offset(uint64_t size, int l) 858168404Spjd{ 859185029Spjd ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); 860168404Spjd return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 861168404Spjd 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); 862168404Spjd} 863168404Spjd 864168404Spjd/* 865168404Spjd * Given a file descriptor, read the label information and return an nvlist 866168404Spjd * describing the configuration, if there is one. 867324256Savg * Return 0 on success, or -1 on failure 868168404Spjd */ 869168404Spjdint 870168404Spjdzpool_read_label(int fd, nvlist_t **config) 871168404Spjd{ 872168404Spjd struct stat64 statbuf; 873168404Spjd int l; 874168404Spjd vdev_label_t *label; 875185029Spjd uint64_t state, txg, size; 876168404Spjd 877168404Spjd *config = NULL; 878168404Spjd 879168404Spjd if (fstat64(fd, &statbuf) == -1) 880324256Savg return (-1); 881185029Spjd size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); 882168404Spjd 883168404Spjd if ((label = malloc(sizeof (vdev_label_t))) == NULL) 884168404Spjd return (-1); 885168404Spjd 886168404Spjd for (l = 0; l < VDEV_LABELS; l++) { 887185029Spjd if (pread64(fd, label, sizeof (vdev_label_t), 888185029Spjd label_offset(size, l)) != sizeof (vdev_label_t)) 889168404Spjd continue; 890168404Spjd 891168404Spjd if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 892168404Spjd sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) 893168404Spjd continue; 894168404Spjd 895168404Spjd if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, 896185029Spjd &state) != 0 || state > POOL_STATE_L2CACHE) { 897168404Spjd nvlist_free(*config); 898168404Spjd continue; 899168404Spjd } 900168404Spjd 901185029Spjd if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 902168404Spjd (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, 903168404Spjd &txg) != 0 || txg == 0)) { 904168404Spjd nvlist_free(*config); 905168404Spjd continue; 906168404Spjd } 907168404Spjd 908168404Spjd free(label); 909168404Spjd return (0); 910168404Spjd } 911168404Spjd 912168404Spjd free(label); 913168404Spjd *config = NULL; 914332094Savg errno = ENOENT; 915324256Savg return (-1); 916168404Spjd} 917168404Spjd 918219089Spjdtypedef struct rdsk_node { 919219089Spjd char *rn_name; 920219089Spjd int rn_dfd; 921219089Spjd libzfs_handle_t *rn_hdl; 922219089Spjd nvlist_t *rn_config; 923219089Spjd avl_tree_t *rn_avl; 924219089Spjd avl_node_t rn_node; 925219089Spjd boolean_t rn_nozpool; 926219089Spjd} rdsk_node_t; 927219089Spjd 928185029Spjdstatic int 929219089Spjdslice_cache_compare(const void *arg1, const void *arg2) 930168404Spjd{ 931219089Spjd const char *nm1 = ((rdsk_node_t *)arg1)->rn_name; 932219089Spjd const char *nm2 = ((rdsk_node_t *)arg2)->rn_name; 933219089Spjd char *nm1slice, *nm2slice; 934219089Spjd int rv; 935219089Spjd 936219089Spjd /* 937219089Spjd * slices zero and two are the most likely to provide results, 938219089Spjd * so put those first 939219089Spjd */ 940219089Spjd nm1slice = strstr(nm1, "s0"); 941219089Spjd nm2slice = strstr(nm2, "s0"); 942219089Spjd if (nm1slice && !nm2slice) { 943219089Spjd return (-1); 944219089Spjd } 945219089Spjd if (!nm1slice && nm2slice) { 946219089Spjd return (1); 947219089Spjd } 948219089Spjd nm1slice = strstr(nm1, "s2"); 949219089Spjd nm2slice = strstr(nm2, "s2"); 950219089Spjd if (nm1slice && !nm2slice) { 951219089Spjd return (-1); 952219089Spjd } 953219089Spjd if (!nm1slice && nm2slice) { 954219089Spjd return (1); 955219089Spjd } 956219089Spjd 957219089Spjd rv = strcmp(nm1, nm2); 958219089Spjd if (rv == 0) 959219089Spjd return (0); 960219089Spjd return (rv > 0 ? 1 : -1); 961219089Spjd} 962219089Spjd 963297077Smav#ifdef illumos 964219089Spjdstatic void 965219089Spjdcheck_one_slice(avl_tree_t *r, char *diskname, uint_t partno, 966219089Spjd diskaddr_t size, uint_t blksz) 967219089Spjd{ 968219089Spjd rdsk_node_t tmpnode; 969219089Spjd rdsk_node_t *node; 970219089Spjd char sname[MAXNAMELEN]; 971219089Spjd 972219089Spjd tmpnode.rn_name = &sname[0]; 973219089Spjd (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u", 974219089Spjd diskname, partno); 975219089Spjd /* 976219089Spjd * protect against division by zero for disk labels that 977219089Spjd * contain a bogus sector size 978219089Spjd */ 979219089Spjd if (blksz == 0) 980219089Spjd blksz = DEV_BSIZE; 981219089Spjd /* too small to contain a zpool? */ 982219089Spjd if ((size < (SPA_MINDEVSIZE / blksz)) && 983219089Spjd (node = avl_find(r, &tmpnode, NULL))) 984219089Spjd node->rn_nozpool = B_TRUE; 985219089Spjd} 986297077Smav#endif /* illumos */ 987219089Spjd 988219089Spjdstatic void 989219089Spjdnozpool_all_slices(avl_tree_t *r, const char *sname) 990219089Spjd{ 991297077Smav#ifdef illumos 992219089Spjd char diskname[MAXNAMELEN]; 993219089Spjd char *ptr; 994219089Spjd int i; 995219089Spjd 996219089Spjd (void) strncpy(diskname, sname, MAXNAMELEN); 997219089Spjd if (((ptr = strrchr(diskname, 's')) == NULL) && 998219089Spjd ((ptr = strrchr(diskname, 'p')) == NULL)) 999219089Spjd return; 1000219089Spjd ptr[0] = 's'; 1001219089Spjd ptr[1] = '\0'; 1002219089Spjd for (i = 0; i < NDKMAP; i++) 1003219089Spjd check_one_slice(r, diskname, i, 0, 1); 1004219089Spjd ptr[0] = 'p'; 1005219089Spjd for (i = 0; i <= FD_NUMPART; i++) 1006219089Spjd check_one_slice(r, diskname, i, 0, 1); 1007297077Smav#endif /* illumos */ 1008219089Spjd} 1009219089Spjd 1010297077Smav#ifdef illumos 1011219089Spjdstatic void 1012219089Spjdcheck_slices(avl_tree_t *r, int fd, const char *sname) 1013219089Spjd{ 1014219089Spjd struct extvtoc vtoc; 1015219089Spjd struct dk_gpt *gpt; 1016219089Spjd char diskname[MAXNAMELEN]; 1017219089Spjd char *ptr; 1018219089Spjd int i; 1019219089Spjd 1020219089Spjd (void) strncpy(diskname, sname, MAXNAMELEN); 1021219089Spjd if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1])) 1022219089Spjd return; 1023219089Spjd ptr[1] = '\0'; 1024219089Spjd 1025219089Spjd if (read_extvtoc(fd, &vtoc) >= 0) { 1026219089Spjd for (i = 0; i < NDKMAP; i++) 1027219089Spjd check_one_slice(r, diskname, i, 1028219089Spjd vtoc.v_part[i].p_size, vtoc.v_sectorsz); 1029219089Spjd } else if (efi_alloc_and_read(fd, &gpt) >= 0) { 1030219089Spjd /* 1031219089Spjd * on x86 we'll still have leftover links that point 1032219089Spjd * to slices s[9-15], so use NDKMAP instead 1033219089Spjd */ 1034219089Spjd for (i = 0; i < NDKMAP; i++) 1035219089Spjd check_one_slice(r, diskname, i, 1036219089Spjd gpt->efi_parts[i].p_size, gpt->efi_lbasize); 1037219089Spjd /* nodes p[1-4] are never used with EFI labels */ 1038219089Spjd ptr[0] = 'p'; 1039219089Spjd for (i = 1; i <= FD_NUMPART; i++) 1040219089Spjd check_one_slice(r, diskname, i, 0, 1); 1041219089Spjd efi_free(gpt); 1042219089Spjd } 1043260339Smav} 1044297077Smav#endif /* illumos */ 1045219089Spjd 1046219089Spjdstatic void 1047219089Spjdzpool_open_func(void *arg) 1048219089Spjd{ 1049219089Spjd rdsk_node_t *rn = arg; 1050219089Spjd struct stat64 statbuf; 1051185029Spjd nvlist_t *config; 1052219089Spjd int fd; 1053168404Spjd 1054219089Spjd if (rn->rn_nozpool) 1055219089Spjd return; 1056219089Spjd if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { 1057219089Spjd /* symlink to a device that's no longer there */ 1058219089Spjd if (errno == ENOENT) 1059219089Spjd nozpool_all_slices(rn->rn_avl, rn->rn_name); 1060219089Spjd return; 1061219089Spjd } 1062168404Spjd /* 1063219089Spjd * Ignore failed stats. We only want regular 1064219089Spjd * files, character devs and block devs. 1065168404Spjd */ 1066219089Spjd if (fstat64(fd, &statbuf) != 0 || 1067219089Spjd (!S_ISREG(statbuf.st_mode) && 1068219089Spjd !S_ISCHR(statbuf.st_mode) && 1069219089Spjd !S_ISBLK(statbuf.st_mode))) { 1070219089Spjd (void) close(fd); 1071219089Spjd return; 1072219089Spjd } 1073219089Spjd /* this file is too small to hold a zpool */ 1074297077Smav#ifdef illumos 1075219089Spjd if (S_ISREG(statbuf.st_mode) && 1076219089Spjd statbuf.st_size < SPA_MINDEVSIZE) { 1077219089Spjd (void) close(fd); 1078219089Spjd return; 1079219089Spjd } else if (!S_ISREG(statbuf.st_mode)) { 1080219089Spjd /* 1081219089Spjd * Try to read the disk label first so we don't have to 1082219089Spjd * open a bunch of minor nodes that can't have a zpool. 1083219089Spjd */ 1084219089Spjd check_slices(rn->rn_avl, fd, rn->rn_name); 1085219089Spjd } 1086297077Smav#else /* !illumos */ 1087260339Smav if (statbuf.st_size < SPA_MINDEVSIZE) { 1088260339Smav (void) close(fd); 1089260339Smav return; 1090260339Smav } 1091297077Smav#endif /* illumos */ 1092168404Spjd 1093324256Savg if ((zpool_read_label(fd, &config)) != 0 && errno == ENOMEM) { 1094219089Spjd (void) close(fd); 1095219089Spjd (void) no_memory(rn->rn_hdl); 1096219089Spjd return; 1097219089Spjd } 1098219089Spjd (void) close(fd); 1099168404Spjd 1100219089Spjd rn->rn_config = config; 1101219089Spjd} 1102168404Spjd 1103219089Spjd/* 1104299430Smav * Given a file descriptor, clear (zero) the label information. 1105219089Spjd */ 1106219089Spjdint 1107219089Spjdzpool_clear_label(int fd) 1108219089Spjd{ 1109219089Spjd struct stat64 statbuf; 1110219089Spjd int l; 1111219089Spjd vdev_label_t *label; 1112219089Spjd uint64_t size; 1113168404Spjd 1114219089Spjd if (fstat64(fd, &statbuf) == -1) 1115219089Spjd return (0); 1116219089Spjd size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); 1117168404Spjd 1118219089Spjd if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL) 1119219089Spjd return (-1); 1120168404Spjd 1121219089Spjd for (l = 0; l < VDEV_LABELS; l++) { 1122219089Spjd if (pwrite64(fd, label, sizeof (vdev_label_t), 1123278177Sdelphij label_offset(size, l)) != sizeof (vdev_label_t)) { 1124278177Sdelphij free(label); 1125219089Spjd return (-1); 1126278177Sdelphij } 1127185029Spjd } 1128219089Spjd 1129219089Spjd free(label); 1130219089Spjd return (0); 1131185029Spjd} 1132185029Spjd 1133185029Spjd/* 1134185029Spjd * Given a list of directories to search, find all pools stored on disk. This 1135185029Spjd * includes partial pools which are not available to import. If no args are 1136185029Spjd * given (argc is 0), then the default directory (/dev/dsk) is searched. 1137185029Spjd * poolname or guid (but not both) are provided by the caller when trying 1138185029Spjd * to import a specific pool. 1139185029Spjd */ 1140185029Spjdstatic nvlist_t * 1141219089Spjdzpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg) 1142185029Spjd{ 1143219089Spjd int i, dirs = iarg->paths; 1144185029Spjd struct dirent64 *dp; 1145185029Spjd char path[MAXPATHLEN]; 1146219089Spjd char *end, **dir = iarg->path; 1147185029Spjd size_t pathleft; 1148219089Spjd nvlist_t *ret = NULL; 1149235479Savg static char *default_dir = "/dev"; 1150185029Spjd pool_list_t pools = { 0 }; 1151185029Spjd pool_entry_t *pe, *penext; 1152185029Spjd vdev_entry_t *ve, *venext; 1153185029Spjd config_entry_t *ce, *cenext; 1154185029Spjd name_entry_t *ne, *nenext; 1155219089Spjd avl_tree_t slice_cache; 1156219089Spjd rdsk_node_t *slice; 1157219089Spjd void *cookie; 1158185029Spjd 1159219089Spjd if (dirs == 0) { 1160219089Spjd dirs = 1; 1161219089Spjd dir = &default_dir; 1162185029Spjd } 1163185029Spjd 1164185029Spjd /* 1165185029Spjd * Go through and read the label configuration information from every 1166185029Spjd * possible device, organizing the information according to pool GUID 1167185029Spjd * and toplevel GUID. 1168185029Spjd */ 1169219089Spjd for (i = 0; i < dirs; i++) { 1170219089Spjd tpool_t *t; 1171299430Smav char rdsk[MAXPATHLEN]; 1172185029Spjd int dfd; 1173278177Sdelphij boolean_t config_failed = B_FALSE; 1174278177Sdelphij DIR *dirp; 1175185029Spjd 1176185029Spjd /* use realpath to normalize the path */ 1177219089Spjd if (realpath(dir[i], path) == 0) { 1178185029Spjd (void) zfs_error_fmt(hdl, EZFS_BADPATH, 1179219089Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]); 1180185029Spjd goto error; 1181185029Spjd } 1182185029Spjd end = &path[strlen(path)]; 1183185029Spjd *end++ = '/'; 1184185029Spjd *end = 0; 1185185029Spjd pathleft = &path[sizeof (path)] - end; 1186185029Spjd 1187299430Smav#ifdef illumos 1188185029Spjd /* 1189185029Spjd * Using raw devices instead of block devices when we're 1190185029Spjd * reading the labels skips a bunch of slow operations during 1191185029Spjd * close(2) processing, so we replace /dev/dsk with /dev/rdsk. 1192185029Spjd */ 1193299430Smav if (strcmp(path, ZFS_DISK_ROOTD) == 0) 1194299430Smav (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk)); 1195185029Spjd else 1196299430Smav#endif 1197299430Smav (void) strlcpy(rdsk, path, sizeof (rdsk)); 1198185029Spjd 1199219089Spjd if ((dfd = open64(rdsk, O_RDONLY)) < 0 || 1200219089Spjd (dirp = fdopendir(dfd)) == NULL) { 1201278177Sdelphij if (dfd >= 0) 1202278177Sdelphij (void) close(dfd); 1203185029Spjd zfs_error_aux(hdl, strerror(errno)); 1204185029Spjd (void) zfs_error_fmt(hdl, EZFS_BADPATH, 1205185029Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1206185029Spjd rdsk); 1207185029Spjd goto error; 1208185029Spjd } 1209185029Spjd 1210219089Spjd avl_create(&slice_cache, slice_cache_compare, 1211219089Spjd sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node)); 1212219089Spjd 1213219089Spjd if (strcmp(rdsk, "/dev/") == 0) { 1214219089Spjd struct gmesh mesh; 1215219089Spjd struct gclass *mp; 1216219089Spjd struct ggeom *gp; 1217219089Spjd struct gprovider *pp; 1218219089Spjd 1219219089Spjd errno = geom_gettree(&mesh); 1220219089Spjd if (errno != 0) { 1221219089Spjd zfs_error_aux(hdl, strerror(errno)); 1222219089Spjd (void) zfs_error_fmt(hdl, EZFS_BADPATH, 1223219089Spjd dgettext(TEXT_DOMAIN, "cannot get GEOM tree")); 1224219089Spjd goto error; 1225219089Spjd } 1226219089Spjd 1227219089Spjd LIST_FOREACH(mp, &mesh.lg_class, lg_class) { 1228219089Spjd LIST_FOREACH(gp, &mp->lg_geom, lg_geom) { 1229219089Spjd LIST_FOREACH(pp, &gp->lg_provider, lg_provider) { 1230219089Spjd slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); 1231219089Spjd slice->rn_name = zfs_strdup(hdl, pp->lg_name); 1232219089Spjd slice->rn_avl = &slice_cache; 1233219089Spjd slice->rn_dfd = dfd; 1234219089Spjd slice->rn_hdl = hdl; 1235219089Spjd slice->rn_nozpool = B_FALSE; 1236219089Spjd avl_add(&slice_cache, slice); 1237219089Spjd } 1238219089Spjd } 1239219089Spjd } 1240219089Spjd 1241219089Spjd geom_deletetree(&mesh); 1242219089Spjd goto skipdir; 1243219089Spjd } 1244219089Spjd 1245185029Spjd /* 1246185029Spjd * This is not MT-safe, but we have no MT consumers of libzfs 1247185029Spjd */ 1248185029Spjd while ((dp = readdir64(dirp)) != NULL) { 1249185029Spjd const char *name = dp->d_name; 1250185029Spjd if (name[0] == '.' && 1251185029Spjd (name[1] == 0 || (name[1] == '.' && name[2] == 0))) 1252185029Spjd continue; 1253185029Spjd 1254219089Spjd slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); 1255219089Spjd slice->rn_name = zfs_strdup(hdl, name); 1256219089Spjd slice->rn_avl = &slice_cache; 1257219089Spjd slice->rn_dfd = dfd; 1258219089Spjd slice->rn_hdl = hdl; 1259219089Spjd slice->rn_nozpool = B_FALSE; 1260219089Spjd avl_add(&slice_cache, slice); 1261219089Spjd } 1262219089Spjdskipdir: 1263219089Spjd /* 1264219089Spjd * create a thread pool to do all of this in parallel; 1265219089Spjd * rn_nozpool is not protected, so this is racy in that 1266219089Spjd * multiple tasks could decide that the same slice can 1267219089Spjd * not hold a zpool, which is benign. Also choose 1268219089Spjd * double the number of processors; we hold a lot of 1269219089Spjd * locks in the kernel, so going beyond this doesn't 1270219089Spjd * buy us much. 1271219089Spjd */ 1272219089Spjd t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 1273219089Spjd 0, NULL); 1274219089Spjd for (slice = avl_first(&slice_cache); slice; 1275219089Spjd (slice = avl_walk(&slice_cache, slice, 1276219089Spjd AVL_AFTER))) 1277219089Spjd (void) tpool_dispatch(t, zpool_open_func, slice); 1278219089Spjd tpool_wait(t); 1279219089Spjd tpool_destroy(t); 1280185029Spjd 1281219089Spjd cookie = NULL; 1282219089Spjd while ((slice = avl_destroy_nodes(&slice_cache, 1283219089Spjd &cookie)) != NULL) { 1284278177Sdelphij if (slice->rn_config != NULL && !config_failed) { 1285219089Spjd nvlist_t *config = slice->rn_config; 1286185029Spjd boolean_t matched = B_TRUE; 1287185029Spjd 1288219089Spjd if (iarg->poolname != NULL) { 1289185029Spjd char *pname; 1290185029Spjd 1291185029Spjd matched = nvlist_lookup_string(config, 1292185029Spjd ZPOOL_CONFIG_POOL_NAME, 1293185029Spjd &pname) == 0 && 1294219089Spjd strcmp(iarg->poolname, pname) == 0; 1295219089Spjd } else if (iarg->guid != 0) { 1296185029Spjd uint64_t this_guid; 1297185029Spjd 1298185029Spjd matched = nvlist_lookup_uint64(config, 1299185029Spjd ZPOOL_CONFIG_POOL_GUID, 1300185029Spjd &this_guid) == 0 && 1301219089Spjd iarg->guid == this_guid; 1302185029Spjd } 1303185029Spjd if (!matched) { 1304185029Spjd nvlist_free(config); 1305278177Sdelphij } else { 1306278177Sdelphij /* 1307278177Sdelphij * use the non-raw path for the config 1308278177Sdelphij */ 1309278177Sdelphij (void) strlcpy(end, slice->rn_name, 1310278177Sdelphij pathleft); 1311278177Sdelphij if (add_config(hdl, &pools, path, 1312278177Sdelphij config) != 0) 1313278177Sdelphij config_failed = B_TRUE; 1314185029Spjd } 1315168404Spjd } 1316219089Spjd free(slice->rn_name); 1317219089Spjd free(slice); 1318168404Spjd } 1319219089Spjd avl_destroy(&slice_cache); 1320185029Spjd 1321185029Spjd (void) closedir(dirp); 1322278177Sdelphij 1323278177Sdelphij if (config_failed) 1324278177Sdelphij goto error; 1325168404Spjd } 1326168404Spjd 1327219089Spjd ret = get_configs(hdl, &pools, iarg->can_be_active); 1328168404Spjd 1329168404Spjderror: 1330168404Spjd for (pe = pools.pools; pe != NULL; pe = penext) { 1331168404Spjd penext = pe->pe_next; 1332168404Spjd for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { 1333168404Spjd venext = ve->ve_next; 1334168404Spjd for (ce = ve->ve_configs; ce != NULL; ce = cenext) { 1335168404Spjd cenext = ce->ce_next; 1336297115Smav nvlist_free(ce->ce_config); 1337168404Spjd free(ce); 1338168404Spjd } 1339168404Spjd free(ve); 1340168404Spjd } 1341168404Spjd free(pe); 1342168404Spjd } 1343168404Spjd 1344168404Spjd for (ne = pools.names; ne != NULL; ne = nenext) { 1345168404Spjd nenext = ne->ne_next; 1346278177Sdelphij free(ne->ne_name); 1347168404Spjd free(ne); 1348168404Spjd } 1349168404Spjd 1350168404Spjd return (ret); 1351168404Spjd} 1352168404Spjd 1353185029Spjdnvlist_t * 1354185029Spjdzpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) 1355185029Spjd{ 1356219089Spjd importargs_t iarg = { 0 }; 1357185029Spjd 1358219089Spjd iarg.paths = argc; 1359219089Spjd iarg.path = argv; 1360185029Spjd 1361219089Spjd return (zpool_find_import_impl(hdl, &iarg)); 1362185029Spjd} 1363185029Spjd 1364185029Spjd/* 1365185029Spjd * Given a cache file, return the contents as a list of importable pools. 1366185029Spjd * poolname or guid (but not both) are provided by the caller when trying 1367185029Spjd * to import a specific pool. 1368185029Spjd */ 1369185029Spjdnvlist_t * 1370185029Spjdzpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, 1371185029Spjd char *poolname, uint64_t guid) 1372185029Spjd{ 1373185029Spjd char *buf; 1374185029Spjd int fd; 1375185029Spjd struct stat64 statbuf; 1376185029Spjd nvlist_t *raw, *src, *dst; 1377185029Spjd nvlist_t *pools; 1378185029Spjd nvpair_t *elem; 1379185029Spjd char *name; 1380185029Spjd uint64_t this_guid; 1381185029Spjd boolean_t active; 1382185029Spjd 1383185029Spjd verify(poolname == NULL || guid == 0); 1384185029Spjd 1385185029Spjd if ((fd = open(cachefile, O_RDONLY)) < 0) { 1386185029Spjd zfs_error_aux(hdl, "%s", strerror(errno)); 1387185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1388185029Spjd dgettext(TEXT_DOMAIN, "failed to open cache file")); 1389185029Spjd return (NULL); 1390185029Spjd } 1391185029Spjd 1392185029Spjd if (fstat64(fd, &statbuf) != 0) { 1393185029Spjd zfs_error_aux(hdl, "%s", strerror(errno)); 1394185029Spjd (void) close(fd); 1395185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1396185029Spjd dgettext(TEXT_DOMAIN, "failed to get size of cache file")); 1397185029Spjd return (NULL); 1398185029Spjd } 1399185029Spjd 1400185029Spjd if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { 1401185029Spjd (void) close(fd); 1402185029Spjd return (NULL); 1403185029Spjd } 1404185029Spjd 1405185029Spjd if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 1406185029Spjd (void) close(fd); 1407185029Spjd free(buf); 1408185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1409185029Spjd dgettext(TEXT_DOMAIN, 1410185029Spjd "failed to read cache file contents")); 1411185029Spjd return (NULL); 1412185029Spjd } 1413185029Spjd 1414185029Spjd (void) close(fd); 1415185029Spjd 1416185029Spjd if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { 1417185029Spjd free(buf); 1418185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1419185029Spjd dgettext(TEXT_DOMAIN, 1420185029Spjd "invalid or corrupt cache file contents")); 1421185029Spjd return (NULL); 1422185029Spjd } 1423185029Spjd 1424185029Spjd free(buf); 1425185029Spjd 1426185029Spjd /* 1427185029Spjd * Go through and get the current state of the pools and refresh their 1428185029Spjd * state. 1429185029Spjd */ 1430185029Spjd if (nvlist_alloc(&pools, 0, 0) != 0) { 1431185029Spjd (void) no_memory(hdl); 1432185029Spjd nvlist_free(raw); 1433185029Spjd return (NULL); 1434185029Spjd } 1435185029Spjd 1436185029Spjd elem = NULL; 1437185029Spjd while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { 1438272453Sdelphij src = fnvpair_value_nvlist(elem); 1439185029Spjd 1440272453Sdelphij name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME); 1441185029Spjd if (poolname != NULL && strcmp(poolname, name) != 0) 1442185029Spjd continue; 1443185029Spjd 1444272453Sdelphij this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID); 1445272453Sdelphij if (guid != 0 && guid != this_guid) 1446272453Sdelphij continue; 1447185029Spjd 1448185029Spjd if (pool_active(hdl, name, this_guid, &active) != 0) { 1449185029Spjd nvlist_free(raw); 1450185029Spjd nvlist_free(pools); 1451185029Spjd return (NULL); 1452185029Spjd } 1453185029Spjd 1454185029Spjd if (active) 1455185029Spjd continue; 1456185029Spjd 1457185029Spjd if ((dst = refresh_config(hdl, src)) == NULL) { 1458185029Spjd nvlist_free(raw); 1459185029Spjd nvlist_free(pools); 1460185029Spjd return (NULL); 1461185029Spjd } 1462185029Spjd 1463185029Spjd if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { 1464185029Spjd (void) no_memory(hdl); 1465185029Spjd nvlist_free(dst); 1466185029Spjd nvlist_free(raw); 1467185029Spjd nvlist_free(pools); 1468185029Spjd return (NULL); 1469185029Spjd } 1470185029Spjd nvlist_free(dst); 1471185029Spjd } 1472185029Spjd 1473185029Spjd nvlist_free(raw); 1474185029Spjd return (pools); 1475185029Spjd} 1476185029Spjd 1477219089Spjdstatic int 1478219089Spjdname_or_guid_exists(zpool_handle_t *zhp, void *data) 1479219089Spjd{ 1480219089Spjd importargs_t *import = data; 1481219089Spjd int found = 0; 1482185029Spjd 1483219089Spjd if (import->poolname != NULL) { 1484219089Spjd char *pool_name; 1485219089Spjd 1486219089Spjd verify(nvlist_lookup_string(zhp->zpool_config, 1487219089Spjd ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0); 1488219089Spjd if (strcmp(pool_name, import->poolname) == 0) 1489219089Spjd found = 1; 1490219089Spjd } else { 1491219089Spjd uint64_t pool_guid; 1492219089Spjd 1493219089Spjd verify(nvlist_lookup_uint64(zhp->zpool_config, 1494219089Spjd ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0); 1495219089Spjd if (pool_guid == import->guid) 1496219089Spjd found = 1; 1497219089Spjd } 1498219089Spjd 1499219089Spjd zpool_close(zhp); 1500219089Spjd return (found); 1501219089Spjd} 1502219089Spjd 1503219089Spjdnvlist_t * 1504219089Spjdzpool_search_import(libzfs_handle_t *hdl, importargs_t *import) 1505219089Spjd{ 1506219089Spjd verify(import->poolname == NULL || import->guid == 0); 1507219089Spjd 1508219089Spjd if (import->unique) 1509219089Spjd import->exists = zpool_iter(hdl, name_or_guid_exists, import); 1510219089Spjd 1511219089Spjd if (import->cachefile != NULL) 1512219089Spjd return (zpool_find_import_cached(hdl, import->cachefile, 1513219089Spjd import->poolname, import->guid)); 1514219089Spjd 1515219089Spjd return (zpool_find_import_impl(hdl, import)); 1516219089Spjd} 1517219089Spjd 1518168404Spjdboolean_t 1519168404Spjdfind_guid(nvlist_t *nv, uint64_t guid) 1520168404Spjd{ 1521168404Spjd uint64_t tmp; 1522168404Spjd nvlist_t **child; 1523168404Spjd uint_t c, children; 1524168404Spjd 1525168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); 1526168404Spjd if (tmp == guid) 1527168404Spjd return (B_TRUE); 1528168404Spjd 1529168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1530168404Spjd &child, &children) == 0) { 1531168404Spjd for (c = 0; c < children; c++) 1532168404Spjd if (find_guid(child[c], guid)) 1533168404Spjd return (B_TRUE); 1534168404Spjd } 1535168404Spjd 1536168404Spjd return (B_FALSE); 1537168404Spjd} 1538168404Spjd 1539185029Spjdtypedef struct aux_cbdata { 1540185029Spjd const char *cb_type; 1541168404Spjd uint64_t cb_guid; 1542168404Spjd zpool_handle_t *cb_zhp; 1543185029Spjd} aux_cbdata_t; 1544168404Spjd 1545168404Spjdstatic int 1546185029Spjdfind_aux(zpool_handle_t *zhp, void *data) 1547168404Spjd{ 1548185029Spjd aux_cbdata_t *cbp = data; 1549185029Spjd nvlist_t **list; 1550185029Spjd uint_t i, count; 1551168404Spjd uint64_t guid; 1552168404Spjd nvlist_t *nvroot; 1553168404Spjd 1554168404Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1555168404Spjd &nvroot) == 0); 1556168404Spjd 1557185029Spjd if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, 1558185029Spjd &list, &count) == 0) { 1559185029Spjd for (i = 0; i < count; i++) { 1560185029Spjd verify(nvlist_lookup_uint64(list[i], 1561168404Spjd ZPOOL_CONFIG_GUID, &guid) == 0); 1562168404Spjd if (guid == cbp->cb_guid) { 1563168404Spjd cbp->cb_zhp = zhp; 1564168404Spjd return (1); 1565168404Spjd } 1566168404Spjd } 1567168404Spjd } 1568168404Spjd 1569168404Spjd zpool_close(zhp); 1570168404Spjd return (0); 1571168404Spjd} 1572168404Spjd 1573168404Spjd/* 1574168404Spjd * Determines if the pool is in use. If so, it returns true and the state of 1575168404Spjd * the pool as well as the name of the pool. Both strings are allocated and 1576168404Spjd * must be freed by the caller. 1577168404Spjd */ 1578168404Spjdint 1579168404Spjdzpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, 1580168404Spjd boolean_t *inuse) 1581168404Spjd{ 1582168404Spjd nvlist_t *config; 1583168404Spjd char *name; 1584168404Spjd boolean_t ret; 1585168404Spjd uint64_t guid, vdev_guid; 1586168404Spjd zpool_handle_t *zhp; 1587168404Spjd nvlist_t *pool_config; 1588168404Spjd uint64_t stateval, isspare; 1589185029Spjd aux_cbdata_t cb = { 0 }; 1590168404Spjd boolean_t isactive; 1591168404Spjd 1592168404Spjd *inuse = B_FALSE; 1593168404Spjd 1594324256Savg if (zpool_read_label(fd, &config) != 0 && errno == ENOMEM) { 1595168404Spjd (void) no_memory(hdl); 1596168404Spjd return (-1); 1597168404Spjd } 1598168404Spjd 1599168404Spjd if (config == NULL) 1600168404Spjd return (0); 1601168404Spjd 1602168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1603168404Spjd &stateval) == 0); 1604168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 1605168404Spjd &vdev_guid) == 0); 1606168404Spjd 1607185029Spjd if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { 1608168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1609168404Spjd &name) == 0); 1610168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1611168404Spjd &guid) == 0); 1612168404Spjd } 1613168404Spjd 1614168404Spjd switch (stateval) { 1615168404Spjd case POOL_STATE_EXPORTED: 1616219089Spjd /* 1617219089Spjd * A pool with an exported state may in fact be imported 1618219089Spjd * read-only, so check the in-core state to see if it's 1619219089Spjd * active and imported read-only. If it is, set 1620219089Spjd * its state to active. 1621219089Spjd */ 1622219089Spjd if (pool_active(hdl, name, guid, &isactive) == 0 && isactive && 1623263127Sdelphij (zhp = zpool_open_canfail(hdl, name)) != NULL) { 1624263127Sdelphij if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL)) 1625263127Sdelphij stateval = POOL_STATE_ACTIVE; 1626219089Spjd 1627263127Sdelphij /* 1628263127Sdelphij * All we needed the zpool handle for is the 1629263127Sdelphij * readonly prop check. 1630263127Sdelphij */ 1631263127Sdelphij zpool_close(zhp); 1632263127Sdelphij } 1633263127Sdelphij 1634168404Spjd ret = B_TRUE; 1635168404Spjd break; 1636168404Spjd 1637168404Spjd case POOL_STATE_ACTIVE: 1638168404Spjd /* 1639168404Spjd * For an active pool, we have to determine if it's really part 1640168404Spjd * of a currently active pool (in which case the pool will exist 1641168404Spjd * and the guid will be the same), or whether it's part of an 1642168404Spjd * active pool that was disconnected without being explicitly 1643168404Spjd * exported. 1644168404Spjd */ 1645168404Spjd if (pool_active(hdl, name, guid, &isactive) != 0) { 1646168404Spjd nvlist_free(config); 1647168404Spjd return (-1); 1648168404Spjd } 1649168404Spjd 1650168404Spjd if (isactive) { 1651168404Spjd /* 1652168404Spjd * Because the device may have been removed while 1653168404Spjd * offlined, we only report it as active if the vdev is 1654168404Spjd * still present in the config. Otherwise, pretend like 1655168404Spjd * it's not in use. 1656168404Spjd */ 1657168404Spjd if ((zhp = zpool_open_canfail(hdl, name)) != NULL && 1658168404Spjd (pool_config = zpool_get_config(zhp, NULL)) 1659168404Spjd != NULL) { 1660168404Spjd nvlist_t *nvroot; 1661168404Spjd 1662168404Spjd verify(nvlist_lookup_nvlist(pool_config, 1663168404Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1664168404Spjd ret = find_guid(nvroot, vdev_guid); 1665168404Spjd } else { 1666168404Spjd ret = B_FALSE; 1667168404Spjd } 1668168404Spjd 1669168404Spjd /* 1670168404Spjd * If this is an active spare within another pool, we 1671168404Spjd * treat it like an unused hot spare. This allows the 1672168404Spjd * user to create a pool with a hot spare that currently 1673168404Spjd * in use within another pool. Since we return B_TRUE, 1674168404Spjd * libdiskmgt will continue to prevent generic consumers 1675168404Spjd * from using the device. 1676168404Spjd */ 1677168404Spjd if (ret && nvlist_lookup_uint64(config, 1678168404Spjd ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) 1679168404Spjd stateval = POOL_STATE_SPARE; 1680168404Spjd 1681168404Spjd if (zhp != NULL) 1682168404Spjd zpool_close(zhp); 1683168404Spjd } else { 1684168404Spjd stateval = POOL_STATE_POTENTIALLY_ACTIVE; 1685168404Spjd ret = B_TRUE; 1686168404Spjd } 1687168404Spjd break; 1688168404Spjd 1689168404Spjd case POOL_STATE_SPARE: 1690168404Spjd /* 1691168404Spjd * For a hot spare, it can be either definitively in use, or 1692168404Spjd * potentially active. To determine if it's in use, we iterate 1693168404Spjd * over all pools in the system and search for one with a spare 1694168404Spjd * with a matching guid. 1695168404Spjd * 1696168404Spjd * Due to the shared nature of spares, we don't actually report 1697168404Spjd * the potentially active case as in use. This means the user 1698168404Spjd * can freely create pools on the hot spares of exported pools, 1699168404Spjd * but to do otherwise makes the resulting code complicated, and 1700168404Spjd * we end up having to deal with this case anyway. 1701168404Spjd */ 1702168404Spjd cb.cb_zhp = NULL; 1703168404Spjd cb.cb_guid = vdev_guid; 1704185029Spjd cb.cb_type = ZPOOL_CONFIG_SPARES; 1705185029Spjd if (zpool_iter(hdl, find_aux, &cb) == 1) { 1706168404Spjd name = (char *)zpool_get_name(cb.cb_zhp); 1707278177Sdelphij ret = B_TRUE; 1708168404Spjd } else { 1709278177Sdelphij ret = B_FALSE; 1710168404Spjd } 1711168404Spjd break; 1712168404Spjd 1713185029Spjd case POOL_STATE_L2CACHE: 1714185029Spjd 1715185029Spjd /* 1716185029Spjd * Check if any pool is currently using this l2cache device. 1717185029Spjd */ 1718185029Spjd cb.cb_zhp = NULL; 1719185029Spjd cb.cb_guid = vdev_guid; 1720185029Spjd cb.cb_type = ZPOOL_CONFIG_L2CACHE; 1721185029Spjd if (zpool_iter(hdl, find_aux, &cb) == 1) { 1722185029Spjd name = (char *)zpool_get_name(cb.cb_zhp); 1723278177Sdelphij ret = B_TRUE; 1724185029Spjd } else { 1725278177Sdelphij ret = B_FALSE; 1726185029Spjd } 1727185029Spjd break; 1728185029Spjd 1729168404Spjd default: 1730168404Spjd ret = B_FALSE; 1731168404Spjd } 1732168404Spjd 1733168404Spjd 1734168404Spjd if (ret) { 1735168404Spjd if ((*namestr = zfs_strdup(hdl, name)) == NULL) { 1736185029Spjd if (cb.cb_zhp) 1737185029Spjd zpool_close(cb.cb_zhp); 1738168404Spjd nvlist_free(config); 1739168404Spjd return (-1); 1740168404Spjd } 1741168404Spjd *state = (pool_state_t)stateval; 1742168404Spjd } 1743168404Spjd 1744168404Spjd if (cb.cb_zhp) 1745168404Spjd zpool_close(cb.cb_zhp); 1746168404Spjd 1747168404Spjd nvlist_free(config); 1748168404Spjd *inuse = ret; 1749168404Spjd return (0); 1750168404Spjd} 1751