1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23272453Sdelphij * Copyright (c) 2013 by Delphix. All rights reserved. 24263127Sdelphij * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 25168404Spjd */ 26168404Spjd 27168404Spjd/* 28168404Spjd * Pool import support functions. 29168404Spjd * 30168404Spjd * To import a pool, we rely on reading the configuration information from the 31168404Spjd * ZFS label of each device. If we successfully read the label, then we 32168404Spjd * organize the configuration information in the following hierarchy: 33168404Spjd * 34168404Spjd * pool guid -> toplevel vdev guid -> label txg 35168404Spjd * 36168404Spjd * Duplicate entries matching this same tuple will be discarded. Once we have 37168404Spjd * examined every device, we pick the best label txg config for each toplevel 38168404Spjd * vdev. We then arrange these toplevel vdevs into a complete pool config, and 39168404Spjd * update any paths that have changed. Finally, we attempt to import the pool 40168404Spjd * using our derived config, and record the results. 41168404Spjd */ 42168404Spjd 43219089Spjd#include <ctype.h> 44168404Spjd#include <devid.h> 45168404Spjd#include <dirent.h> 46168404Spjd#include <errno.h> 47168404Spjd#include <libintl.h> 48219089Spjd#include <stddef.h> 49168404Spjd#include <stdlib.h> 50168404Spjd#include <string.h> 51168404Spjd#include <sys/stat.h> 52168404Spjd#include <unistd.h> 53168404Spjd#include <fcntl.h> 54219089Spjd#include <thread_pool.h> 55168404Spjd#include <libgeom.h> 56168404Spjd 57168404Spjd#include <sys/vdev_impl.h> 58168404Spjd 59168404Spjd#include "libzfs.h" 60168404Spjd#include "libzfs_impl.h" 61168404Spjd 62168404Spjd/* 63168404Spjd * Intermediate structures used to gather configuration information. 64168404Spjd */ 65168404Spjdtypedef struct config_entry { 66168404Spjd uint64_t ce_txg; 67168404Spjd nvlist_t *ce_config; 68168404Spjd struct config_entry *ce_next; 69168404Spjd} config_entry_t; 70168404Spjd 71168404Spjdtypedef struct vdev_entry { 72168404Spjd uint64_t ve_guid; 73168404Spjd config_entry_t *ve_configs; 74168404Spjd struct vdev_entry *ve_next; 75168404Spjd} vdev_entry_t; 76168404Spjd 77168404Spjdtypedef struct pool_entry { 78168404Spjd uint64_t pe_guid; 79168404Spjd vdev_entry_t *pe_vdevs; 80168404Spjd struct pool_entry *pe_next; 81168404Spjd} pool_entry_t; 82168404Spjd 83168404Spjdtypedef struct name_entry { 84168404Spjd char *ne_name; 85168404Spjd uint64_t ne_guid; 86168404Spjd struct name_entry *ne_next; 87168404Spjd} name_entry_t; 88168404Spjd 89168404Spjdtypedef struct pool_list { 90168404Spjd pool_entry_t *pools; 91168404Spjd name_entry_t *names; 92168404Spjd} pool_list_t; 93168404Spjd 94168404Spjdstatic char * 95168404Spjdget_devid(const char *path) 96168404Spjd{ 97266611Smav#ifdef have_devid 98168404Spjd int fd; 99168404Spjd ddi_devid_t devid; 100168404Spjd char *minor, *ret; 101168404Spjd 102168404Spjd if ((fd = open(path, O_RDONLY)) < 0) 103168404Spjd return (NULL); 104168404Spjd 105168404Spjd minor = NULL; 106168404Spjd ret = NULL; 107168404Spjd if (devid_get(fd, &devid) == 0) { 108168404Spjd if (devid_get_minor_name(fd, &minor) == 0) 109168404Spjd ret = devid_str_encode(devid, minor); 110168404Spjd if (minor != NULL) 111168404Spjd devid_str_free(minor); 112168404Spjd devid_free(devid); 113168404Spjd } 114168404Spjd (void) close(fd); 115168404Spjd 116168404Spjd return (ret); 117266611Smav#else 118266611Smav return (NULL); 119266611Smav#endif 120168404Spjd} 121168404Spjd 122219089Spjd 123168404Spjd/* 124168404Spjd * Go through and fix up any path and/or devid information for the given vdev 125168404Spjd * configuration. 126168404Spjd */ 127168404Spjdstatic int 128168404Spjdfix_paths(nvlist_t *nv, name_entry_t *names) 129168404Spjd{ 130168404Spjd nvlist_t **child; 131168404Spjd uint_t c, children; 132168404Spjd uint64_t guid; 133168404Spjd name_entry_t *ne, *best; 134168404Spjd char *path, *devid; 135168404Spjd int matched; 136168404Spjd 137168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 138168404Spjd &child, &children) == 0) { 139168404Spjd for (c = 0; c < children; c++) 140168404Spjd if (fix_paths(child[c], names) != 0) 141168404Spjd return (-1); 142168404Spjd return (0); 143168404Spjd } 144168404Spjd 145168404Spjd /* 146168404Spjd * This is a leaf (file or disk) vdev. In either case, go through 147168404Spjd * the name list and see if we find a matching guid. If so, replace 148168404Spjd * the path and see if we can calculate a new devid. 149168404Spjd * 150168404Spjd * There may be multiple names associated with a particular guid, in 151168404Spjd * which case we have overlapping slices or multiple paths to the same 152168404Spjd * disk. If this is the case, then we want to pick the path that is 153168404Spjd * the most similar to the original, where "most similar" is the number 154168404Spjd * of matching characters starting from the end of the path. This will 155168404Spjd * preserve slice numbers even if the disks have been reorganized, and 156168404Spjd * will also catch preferred disk names if multiple paths exist. 157168404Spjd */ 158168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); 159168404Spjd if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) 160168404Spjd path = NULL; 161168404Spjd 162168404Spjd matched = 0; 163168404Spjd best = NULL; 164168404Spjd for (ne = names; ne != NULL; ne = ne->ne_next) { 165168404Spjd if (ne->ne_guid == guid) { 166168404Spjd const char *src, *dst; 167168404Spjd int count; 168168404Spjd 169168404Spjd if (path == NULL) { 170168404Spjd best = ne; 171168404Spjd break; 172168404Spjd } 173168404Spjd 174168404Spjd src = ne->ne_name + strlen(ne->ne_name) - 1; 175168404Spjd dst = path + strlen(path) - 1; 176168404Spjd for (count = 0; src >= ne->ne_name && dst >= path; 177168404Spjd src--, dst--, count++) 178168404Spjd if (*src != *dst) 179168404Spjd break; 180168404Spjd 181168404Spjd /* 182168404Spjd * At this point, 'count' is the number of characters 183168404Spjd * matched from the end. 184168404Spjd */ 185168404Spjd if (count > matched || best == NULL) { 186168404Spjd best = ne; 187168404Spjd matched = count; 188168404Spjd } 189168404Spjd } 190168404Spjd } 191168404Spjd 192168404Spjd if (best == NULL) 193168404Spjd return (0); 194168404Spjd 195168404Spjd if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) 196168404Spjd return (-1); 197168404Spjd 198168404Spjd if ((devid = get_devid(best->ne_name)) == NULL) { 199168404Spjd (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); 200168404Spjd } else { 201168404Spjd if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) 202168404Spjd return (-1); 203168404Spjd devid_str_free(devid); 204168404Spjd } 205168404Spjd 206168404Spjd return (0); 207168404Spjd} 208168404Spjd 209168404Spjd/* 210168404Spjd * Add the given configuration to the list of known devices. 211168404Spjd */ 212168404Spjdstatic int 213168404Spjdadd_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, 214168404Spjd nvlist_t *config) 215168404Spjd{ 216168404Spjd uint64_t pool_guid, vdev_guid, top_guid, txg, state; 217168404Spjd pool_entry_t *pe; 218168404Spjd vdev_entry_t *ve; 219168404Spjd config_entry_t *ce; 220168404Spjd name_entry_t *ne; 221168404Spjd 222168404Spjd /* 223185029Spjd * If this is a hot spare not currently in use or level 2 cache 224185029Spjd * device, add it to the list of names to translate, but don't do 225185029Spjd * anything else. 226168404Spjd */ 227168404Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 228185029Spjd &state) == 0 && 229185029Spjd (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && 230168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { 231168404Spjd if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 232168926Spjd return (-1); 233168404Spjd 234168404Spjd if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 235168404Spjd free(ne); 236168404Spjd return (-1); 237168404Spjd } 238168404Spjd ne->ne_guid = vdev_guid; 239168404Spjd ne->ne_next = pl->names; 240168404Spjd pl->names = ne; 241168404Spjd return (0); 242168404Spjd } 243168404Spjd 244168404Spjd /* 245168404Spjd * If we have a valid config but cannot read any of these fields, then 246168404Spjd * it means we have a half-initialized label. In vdev_label_init() 247168404Spjd * we write a label with txg == 0 so that we can identify the device 248168404Spjd * in case the user refers to the same disk later on. If we fail to 249168404Spjd * create the pool, we'll be left with a label in this state 250168404Spjd * which should not be considered part of a valid pool. 251168404Spjd */ 252168404Spjd if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 253168404Spjd &pool_guid) != 0 || 254168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 255168404Spjd &vdev_guid) != 0 || 256168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, 257168404Spjd &top_guid) != 0 || 258168404Spjd nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 259168404Spjd &txg) != 0 || txg == 0) { 260168404Spjd nvlist_free(config); 261168404Spjd return (0); 262168404Spjd } 263168404Spjd 264168404Spjd /* 265168404Spjd * First, see if we know about this pool. If not, then add it to the 266168404Spjd * list of known pools. 267168404Spjd */ 268168404Spjd for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 269168404Spjd if (pe->pe_guid == pool_guid) 270168404Spjd break; 271168404Spjd } 272168404Spjd 273168404Spjd if (pe == NULL) { 274168404Spjd if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { 275168404Spjd nvlist_free(config); 276168404Spjd return (-1); 277168404Spjd } 278168404Spjd pe->pe_guid = pool_guid; 279168404Spjd pe->pe_next = pl->pools; 280168404Spjd pl->pools = pe; 281168404Spjd } 282168404Spjd 283168404Spjd /* 284168404Spjd * Second, see if we know about this toplevel vdev. Add it if its 285168404Spjd * missing. 286168404Spjd */ 287168404Spjd for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 288168404Spjd if (ve->ve_guid == top_guid) 289168404Spjd break; 290168404Spjd } 291168404Spjd 292168404Spjd if (ve == NULL) { 293168404Spjd if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { 294168404Spjd nvlist_free(config); 295168404Spjd return (-1); 296168404Spjd } 297168404Spjd ve->ve_guid = top_guid; 298168404Spjd ve->ve_next = pe->pe_vdevs; 299168404Spjd pe->pe_vdevs = ve; 300168404Spjd } 301168404Spjd 302168404Spjd /* 303168404Spjd * Third, see if we have a config with a matching transaction group. If 304168404Spjd * so, then we do nothing. Otherwise, add it to the list of known 305168404Spjd * configs. 306168404Spjd */ 307168404Spjd for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { 308168404Spjd if (ce->ce_txg == txg) 309168404Spjd break; 310168404Spjd } 311168404Spjd 312168404Spjd if (ce == NULL) { 313168404Spjd if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { 314168404Spjd nvlist_free(config); 315168404Spjd return (-1); 316168404Spjd } 317168404Spjd ce->ce_txg = txg; 318168404Spjd ce->ce_config = config; 319168404Spjd ce->ce_next = ve->ve_configs; 320168404Spjd ve->ve_configs = ce; 321168404Spjd } else { 322168404Spjd nvlist_free(config); 323168404Spjd } 324168404Spjd 325168404Spjd /* 326168404Spjd * At this point we've successfully added our config to the list of 327168404Spjd * known configs. The last thing to do is add the vdev guid -> path 328168404Spjd * mappings so that we can fix up the configuration as necessary before 329168404Spjd * doing the import. 330168404Spjd */ 331168404Spjd if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 332168404Spjd return (-1); 333168404Spjd 334168404Spjd if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 335168404Spjd free(ne); 336168404Spjd return (-1); 337168404Spjd } 338168404Spjd 339168404Spjd ne->ne_guid = vdev_guid; 340168404Spjd ne->ne_next = pl->names; 341168404Spjd pl->names = ne; 342168404Spjd 343168404Spjd return (0); 344168404Spjd} 345168404Spjd 346168404Spjd/* 347168404Spjd * Returns true if the named pool matches the given GUID. 348168404Spjd */ 349168404Spjdstatic int 350168404Spjdpool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, 351168404Spjd boolean_t *isactive) 352168404Spjd{ 353168404Spjd zpool_handle_t *zhp; 354168404Spjd uint64_t theguid; 355168404Spjd 356168404Spjd if (zpool_open_silent(hdl, name, &zhp) != 0) 357168404Spjd return (-1); 358168404Spjd 359168404Spjd if (zhp == NULL) { 360168404Spjd *isactive = B_FALSE; 361168404Spjd return (0); 362168404Spjd } 363168404Spjd 364168404Spjd verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 365168404Spjd &theguid) == 0); 366168404Spjd 367168404Spjd zpool_close(zhp); 368168404Spjd 369168404Spjd *isactive = (theguid == guid); 370168404Spjd return (0); 371168404Spjd} 372168404Spjd 373185029Spjdstatic nvlist_t * 374185029Spjdrefresh_config(libzfs_handle_t *hdl, nvlist_t *config) 375185029Spjd{ 376185029Spjd nvlist_t *nvl; 377185029Spjd zfs_cmd_t zc = { 0 }; 378185029Spjd int err; 379185029Spjd 380185029Spjd if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) 381185029Spjd return (NULL); 382185029Spjd 383185029Spjd if (zcmd_alloc_dst_nvlist(hdl, &zc, 384185029Spjd zc.zc_nvlist_conf_size * 2) != 0) { 385185029Spjd zcmd_free_nvlists(&zc); 386185029Spjd return (NULL); 387185029Spjd } 388185029Spjd 389185029Spjd while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, 390185029Spjd &zc)) != 0 && errno == ENOMEM) { 391185029Spjd if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 392185029Spjd zcmd_free_nvlists(&zc); 393185029Spjd return (NULL); 394185029Spjd } 395185029Spjd } 396185029Spjd 397185029Spjd if (err) { 398185029Spjd zcmd_free_nvlists(&zc); 399185029Spjd return (NULL); 400185029Spjd } 401185029Spjd 402185029Spjd if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { 403185029Spjd zcmd_free_nvlists(&zc); 404185029Spjd return (NULL); 405185029Spjd } 406185029Spjd 407185029Spjd zcmd_free_nvlists(&zc); 408185029Spjd return (nvl); 409185029Spjd} 410185029Spjd 411168404Spjd/* 412219089Spjd * Determine if the vdev id is a hole in the namespace. 413219089Spjd */ 414219089Spjdboolean_t 415219089Spjdvdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id) 416219089Spjd{ 417219089Spjd for (int c = 0; c < holes; c++) { 418219089Spjd 419219089Spjd /* Top-level is a hole */ 420219089Spjd if (hole_array[c] == id) 421219089Spjd return (B_TRUE); 422219089Spjd } 423219089Spjd return (B_FALSE); 424219089Spjd} 425219089Spjd 426219089Spjd/* 427168404Spjd * Convert our list of pools into the definitive set of configurations. We 428168404Spjd * start by picking the best config for each toplevel vdev. Once that's done, 429168404Spjd * we assemble the toplevel vdevs into a full config for the pool. We make a 430168404Spjd * pass to fix up any incorrect paths, and then add it to the main list to 431168404Spjd * return to the user. 432168404Spjd */ 433168404Spjdstatic nvlist_t * 434185029Spjdget_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) 435168404Spjd{ 436168404Spjd pool_entry_t *pe; 437168404Spjd vdev_entry_t *ve; 438168404Spjd config_entry_t *ce; 439168404Spjd nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot; 440185029Spjd nvlist_t **spares, **l2cache; 441185029Spjd uint_t i, nspares, nl2cache; 442168404Spjd boolean_t config_seen; 443168404Spjd uint64_t best_txg; 444239620Smm char *name, *hostname; 445239620Smm uint64_t guid; 446168404Spjd uint_t children = 0; 447168404Spjd nvlist_t **child = NULL; 448219089Spjd uint_t holes; 449219089Spjd uint64_t *hole_array, max_id; 450168404Spjd uint_t c; 451168404Spjd boolean_t isactive; 452168498Spjd uint64_t hostid; 453185029Spjd nvlist_t *nvl; 454185029Spjd boolean_t found_one = B_FALSE; 455219089Spjd boolean_t valid_top_config = B_FALSE; 456168404Spjd 457168404Spjd if (nvlist_alloc(&ret, 0, 0) != 0) 458168404Spjd goto nomem; 459168404Spjd 460168404Spjd for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 461219089Spjd uint64_t id, max_txg = 0; 462168404Spjd 463168404Spjd if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) 464168404Spjd goto nomem; 465168404Spjd config_seen = B_FALSE; 466168404Spjd 467168404Spjd /* 468168404Spjd * Iterate over all toplevel vdevs. Grab the pool configuration 469168404Spjd * from the first one we find, and then go through the rest and 470168404Spjd * add them as necessary to the 'vdevs' member of the config. 471168404Spjd */ 472168404Spjd for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 473168404Spjd 474168404Spjd /* 475168404Spjd * Determine the best configuration for this vdev by 476168404Spjd * selecting the config with the latest transaction 477168404Spjd * group. 478168404Spjd */ 479168404Spjd best_txg = 0; 480168404Spjd for (ce = ve->ve_configs; ce != NULL; 481168404Spjd ce = ce->ce_next) { 482168404Spjd 483168404Spjd if (ce->ce_txg > best_txg) { 484168404Spjd tmp = ce->ce_config; 485168404Spjd best_txg = ce->ce_txg; 486168404Spjd } 487168404Spjd } 488168404Spjd 489219089Spjd /* 490219089Spjd * We rely on the fact that the max txg for the 491219089Spjd * pool will contain the most up-to-date information 492219089Spjd * about the valid top-levels in the vdev namespace. 493219089Spjd */ 494219089Spjd if (best_txg > max_txg) { 495219089Spjd (void) nvlist_remove(config, 496219089Spjd ZPOOL_CONFIG_VDEV_CHILDREN, 497219089Spjd DATA_TYPE_UINT64); 498219089Spjd (void) nvlist_remove(config, 499219089Spjd ZPOOL_CONFIG_HOLE_ARRAY, 500219089Spjd DATA_TYPE_UINT64_ARRAY); 501219089Spjd 502219089Spjd max_txg = best_txg; 503219089Spjd hole_array = NULL; 504219089Spjd holes = 0; 505219089Spjd max_id = 0; 506219089Spjd valid_top_config = B_FALSE; 507219089Spjd 508219089Spjd if (nvlist_lookup_uint64(tmp, 509219089Spjd ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { 510219089Spjd verify(nvlist_add_uint64(config, 511219089Spjd ZPOOL_CONFIG_VDEV_CHILDREN, 512219089Spjd max_id) == 0); 513219089Spjd valid_top_config = B_TRUE; 514219089Spjd } 515219089Spjd 516219089Spjd if (nvlist_lookup_uint64_array(tmp, 517219089Spjd ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, 518219089Spjd &holes) == 0) { 519219089Spjd verify(nvlist_add_uint64_array(config, 520219089Spjd ZPOOL_CONFIG_HOLE_ARRAY, 521219089Spjd hole_array, holes) == 0); 522219089Spjd } 523219089Spjd } 524219089Spjd 525168404Spjd if (!config_seen) { 526168404Spjd /* 527168404Spjd * Copy the relevant pieces of data to the pool 528168404Spjd * configuration: 529168404Spjd * 530168404Spjd * version 531239620Smm * pool guid 532239620Smm * name 533228103Smm * comment (if available) 534239620Smm * pool state 535168498Spjd * hostid (if available) 536168498Spjd * hostname (if available) 537168404Spjd */ 538246631Smm uint64_t state, version; 539239620Smm char *comment = NULL; 540168404Spjd 541239620Smm version = fnvlist_lookup_uint64(tmp, 542239620Smm ZPOOL_CONFIG_VERSION); 543239620Smm fnvlist_add_uint64(config, 544239620Smm ZPOOL_CONFIG_VERSION, version); 545239620Smm guid = fnvlist_lookup_uint64(tmp, 546239620Smm ZPOOL_CONFIG_POOL_GUID); 547239620Smm fnvlist_add_uint64(config, 548239620Smm ZPOOL_CONFIG_POOL_GUID, guid); 549239620Smm name = fnvlist_lookup_string(tmp, 550239620Smm ZPOOL_CONFIG_POOL_NAME); 551239620Smm fnvlist_add_string(config, 552239620Smm ZPOOL_CONFIG_POOL_NAME, name); 553228103Smm 554228103Smm if (nvlist_lookup_string(tmp, 555239620Smm ZPOOL_CONFIG_COMMENT, &comment) == 0) 556239620Smm fnvlist_add_string(config, 557239620Smm ZPOOL_CONFIG_COMMENT, comment); 558228103Smm 559239620Smm state = fnvlist_lookup_uint64(tmp, 560239620Smm ZPOOL_CONFIG_POOL_STATE); 561239620Smm fnvlist_add_uint64(config, 562239620Smm ZPOOL_CONFIG_POOL_STATE, state); 563228103Smm 564168498Spjd hostid = 0; 565168498Spjd if (nvlist_lookup_uint64(tmp, 566168498Spjd ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 567239620Smm fnvlist_add_uint64(config, 568239620Smm ZPOOL_CONFIG_HOSTID, hostid); 569239620Smm hostname = fnvlist_lookup_string(tmp, 570239620Smm ZPOOL_CONFIG_HOSTNAME); 571239620Smm fnvlist_add_string(config, 572239620Smm ZPOOL_CONFIG_HOSTNAME, hostname); 573168498Spjd } 574168404Spjd 575168404Spjd config_seen = B_TRUE; 576168404Spjd } 577168404Spjd 578168404Spjd /* 579168404Spjd * Add this top-level vdev to the child array. 580168404Spjd */ 581168404Spjd verify(nvlist_lookup_nvlist(tmp, 582168404Spjd ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); 583168404Spjd verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, 584168404Spjd &id) == 0); 585219089Spjd 586168404Spjd if (id >= children) { 587168404Spjd nvlist_t **newchild; 588168404Spjd 589168404Spjd newchild = zfs_alloc(hdl, (id + 1) * 590168404Spjd sizeof (nvlist_t *)); 591168404Spjd if (newchild == NULL) 592168404Spjd goto nomem; 593168404Spjd 594168404Spjd for (c = 0; c < children; c++) 595168404Spjd newchild[c] = child[c]; 596168404Spjd 597168404Spjd free(child); 598168404Spjd child = newchild; 599168404Spjd children = id + 1; 600168404Spjd } 601168404Spjd if (nvlist_dup(nvtop, &child[id], 0) != 0) 602168404Spjd goto nomem; 603168404Spjd 604168404Spjd } 605168404Spjd 606219089Spjd /* 607219089Spjd * If we have information about all the top-levels then 608219089Spjd * clean up the nvlist which we've constructed. This 609219089Spjd * means removing any extraneous devices that are 610219089Spjd * beyond the valid range or adding devices to the end 611219089Spjd * of our array which appear to be missing. 612219089Spjd */ 613219089Spjd if (valid_top_config) { 614219089Spjd if (max_id < children) { 615219089Spjd for (c = max_id; c < children; c++) 616219089Spjd nvlist_free(child[c]); 617219089Spjd children = max_id; 618219089Spjd } else if (max_id > children) { 619219089Spjd nvlist_t **newchild; 620219089Spjd 621219089Spjd newchild = zfs_alloc(hdl, (max_id) * 622219089Spjd sizeof (nvlist_t *)); 623219089Spjd if (newchild == NULL) 624219089Spjd goto nomem; 625219089Spjd 626219089Spjd for (c = 0; c < children; c++) 627219089Spjd newchild[c] = child[c]; 628219089Spjd 629219089Spjd free(child); 630219089Spjd child = newchild; 631219089Spjd children = max_id; 632219089Spjd } 633219089Spjd } 634219089Spjd 635168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 636168404Spjd &guid) == 0); 637168404Spjd 638168404Spjd /* 639219089Spjd * The vdev namespace may contain holes as a result of 640219089Spjd * device removal. We must add them back into the vdev 641219089Spjd * tree before we process any missing devices. 642219089Spjd */ 643219089Spjd if (holes > 0) { 644219089Spjd ASSERT(valid_top_config); 645219089Spjd 646219089Spjd for (c = 0; c < children; c++) { 647219089Spjd nvlist_t *holey; 648219089Spjd 649219089Spjd if (child[c] != NULL || 650219089Spjd !vdev_is_hole(hole_array, holes, c)) 651219089Spjd continue; 652219089Spjd 653219089Spjd if (nvlist_alloc(&holey, NV_UNIQUE_NAME, 654219089Spjd 0) != 0) 655219089Spjd goto nomem; 656219089Spjd 657219089Spjd /* 658219089Spjd * Holes in the namespace are treated as 659219089Spjd * "hole" top-level vdevs and have a 660219089Spjd * special flag set on them. 661219089Spjd */ 662219089Spjd if (nvlist_add_string(holey, 663219089Spjd ZPOOL_CONFIG_TYPE, 664219089Spjd VDEV_TYPE_HOLE) != 0 || 665219089Spjd nvlist_add_uint64(holey, 666219089Spjd ZPOOL_CONFIG_ID, c) != 0 || 667219089Spjd nvlist_add_uint64(holey, 668219089Spjd ZPOOL_CONFIG_GUID, 0ULL) != 0) 669219089Spjd goto nomem; 670219089Spjd child[c] = holey; 671219089Spjd } 672219089Spjd } 673219089Spjd 674219089Spjd /* 675168404Spjd * Look for any missing top-level vdevs. If this is the case, 676168404Spjd * create a faked up 'missing' vdev as a placeholder. We cannot 677168404Spjd * simply compress the child array, because the kernel performs 678168404Spjd * certain checks to make sure the vdev IDs match their location 679168404Spjd * in the configuration. 680168404Spjd */ 681219089Spjd for (c = 0; c < children; c++) { 682168404Spjd if (child[c] == NULL) { 683168404Spjd nvlist_t *missing; 684168404Spjd if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 685168404Spjd 0) != 0) 686168404Spjd goto nomem; 687168404Spjd if (nvlist_add_string(missing, 688168404Spjd ZPOOL_CONFIG_TYPE, 689168404Spjd VDEV_TYPE_MISSING) != 0 || 690168404Spjd nvlist_add_uint64(missing, 691168404Spjd ZPOOL_CONFIG_ID, c) != 0 || 692168404Spjd nvlist_add_uint64(missing, 693168404Spjd ZPOOL_CONFIG_GUID, 0ULL) != 0) { 694168404Spjd nvlist_free(missing); 695168404Spjd goto nomem; 696168404Spjd } 697168404Spjd child[c] = missing; 698168404Spjd } 699219089Spjd } 700168404Spjd 701168404Spjd /* 702168404Spjd * Put all of this pool's top-level vdevs into a root vdev. 703168404Spjd */ 704168404Spjd if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) 705168404Spjd goto nomem; 706168404Spjd if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 707168404Spjd VDEV_TYPE_ROOT) != 0 || 708168404Spjd nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || 709168404Spjd nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || 710168404Spjd nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 711168404Spjd child, children) != 0) { 712168404Spjd nvlist_free(nvroot); 713168404Spjd goto nomem; 714168404Spjd } 715168404Spjd 716168404Spjd for (c = 0; c < children; c++) 717168404Spjd nvlist_free(child[c]); 718168404Spjd free(child); 719168404Spjd children = 0; 720168404Spjd child = NULL; 721168404Spjd 722168404Spjd /* 723168404Spjd * Go through and fix up any paths and/or devids based on our 724168404Spjd * known list of vdev GUID -> path mappings. 725168404Spjd */ 726168404Spjd if (fix_paths(nvroot, pl->names) != 0) { 727168404Spjd nvlist_free(nvroot); 728168404Spjd goto nomem; 729168404Spjd } 730168404Spjd 731168404Spjd /* 732168404Spjd * Add the root vdev to this pool's configuration. 733168404Spjd */ 734168404Spjd if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 735168404Spjd nvroot) != 0) { 736168404Spjd nvlist_free(nvroot); 737168404Spjd goto nomem; 738168404Spjd } 739168404Spjd nvlist_free(nvroot); 740168404Spjd 741168404Spjd /* 742185029Spjd * zdb uses this path to report on active pools that were 743185029Spjd * imported or created using -R. 744185029Spjd */ 745185029Spjd if (active_ok) 746185029Spjd goto add_pool; 747185029Spjd 748185029Spjd /* 749168404Spjd * Determine if this pool is currently active, in which case we 750168404Spjd * can't actually import it. 751168404Spjd */ 752168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 753168404Spjd &name) == 0); 754168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 755168404Spjd &guid) == 0); 756168404Spjd 757168404Spjd if (pool_active(hdl, name, guid, &isactive) != 0) 758168404Spjd goto error; 759168404Spjd 760168404Spjd if (isactive) { 761168404Spjd nvlist_free(config); 762168404Spjd config = NULL; 763168404Spjd continue; 764168404Spjd } 765168404Spjd 766219089Spjd if ((nvl = refresh_config(hdl, config)) == NULL) { 767219089Spjd nvlist_free(config); 768219089Spjd config = NULL; 769219089Spjd continue; 770219089Spjd } 771168404Spjd 772168404Spjd nvlist_free(config); 773185029Spjd config = nvl; 774168404Spjd 775168404Spjd /* 776168404Spjd * Go through and update the paths for spares, now that we have 777168404Spjd * them. 778168404Spjd */ 779168404Spjd verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 780168404Spjd &nvroot) == 0); 781168404Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 782168404Spjd &spares, &nspares) == 0) { 783168404Spjd for (i = 0; i < nspares; i++) { 784168404Spjd if (fix_paths(spares[i], pl->names) != 0) 785168404Spjd goto nomem; 786168404Spjd } 787168404Spjd } 788168404Spjd 789168404Spjd /* 790185029Spjd * Update the paths for l2cache devices. 791185029Spjd */ 792185029Spjd if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 793185029Spjd &l2cache, &nl2cache) == 0) { 794185029Spjd for (i = 0; i < nl2cache; i++) { 795185029Spjd if (fix_paths(l2cache[i], pl->names) != 0) 796185029Spjd goto nomem; 797185029Spjd } 798185029Spjd } 799185029Spjd 800185029Spjd /* 801168498Spjd * Restore the original information read from the actual label. 802168498Spjd */ 803168498Spjd (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, 804168498Spjd DATA_TYPE_UINT64); 805168498Spjd (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, 806168498Spjd DATA_TYPE_STRING); 807168498Spjd if (hostid != 0) { 808168498Spjd verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, 809168498Spjd hostid) == 0); 810168498Spjd verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, 811168498Spjd hostname) == 0); 812168498Spjd } 813168498Spjd 814185029Spjdadd_pool: 815168498Spjd /* 816168404Spjd * Add this pool to the list of configs. 817168404Spjd */ 818168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 819168404Spjd &name) == 0); 820168404Spjd if (nvlist_add_nvlist(ret, name, config) != 0) 821168404Spjd goto nomem; 822168404Spjd 823185029Spjd found_one = B_TRUE; 824168404Spjd nvlist_free(config); 825168404Spjd config = NULL; 826168404Spjd } 827168404Spjd 828185029Spjd if (!found_one) { 829185029Spjd nvlist_free(ret); 830185029Spjd ret = NULL; 831185029Spjd } 832185029Spjd 833168404Spjd return (ret); 834168404Spjd 835168404Spjdnomem: 836168404Spjd (void) no_memory(hdl); 837168404Spjderror: 838168404Spjd nvlist_free(config); 839168404Spjd nvlist_free(ret); 840168404Spjd for (c = 0; c < children; c++) 841168404Spjd nvlist_free(child[c]); 842168404Spjd free(child); 843168404Spjd 844168404Spjd return (NULL); 845168404Spjd} 846168404Spjd 847168404Spjd/* 848168404Spjd * Return the offset of the given label. 849168404Spjd */ 850168404Spjdstatic uint64_t 851185029Spjdlabel_offset(uint64_t size, int l) 852168404Spjd{ 853185029Spjd ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); 854168404Spjd return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 855168404Spjd 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); 856168404Spjd} 857168404Spjd 858168404Spjd/* 859168404Spjd * Given a file descriptor, read the label information and return an nvlist 860168404Spjd * describing the configuration, if there is one. 861168404Spjd */ 862168404Spjdint 863168404Spjdzpool_read_label(int fd, nvlist_t **config) 864168404Spjd{ 865168404Spjd struct stat64 statbuf; 866168404Spjd int l; 867168404Spjd vdev_label_t *label; 868185029Spjd uint64_t state, txg, size; 869168404Spjd 870168404Spjd *config = NULL; 871168404Spjd 872168404Spjd if (fstat64(fd, &statbuf) == -1) 873168404Spjd return (0); 874185029Spjd size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); 875168404Spjd 876168404Spjd if ((label = malloc(sizeof (vdev_label_t))) == NULL) 877168404Spjd return (-1); 878168404Spjd 879168404Spjd for (l = 0; l < VDEV_LABELS; l++) { 880185029Spjd if (pread64(fd, label, sizeof (vdev_label_t), 881185029Spjd label_offset(size, l)) != sizeof (vdev_label_t)) 882168404Spjd continue; 883168404Spjd 884168404Spjd if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 885168404Spjd sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) 886168404Spjd continue; 887168404Spjd 888168404Spjd if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, 889185029Spjd &state) != 0 || state > POOL_STATE_L2CACHE) { 890168404Spjd nvlist_free(*config); 891168404Spjd continue; 892168404Spjd } 893168404Spjd 894185029Spjd if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && 895168404Spjd (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, 896168404Spjd &txg) != 0 || txg == 0)) { 897168404Spjd nvlist_free(*config); 898168404Spjd continue; 899168404Spjd } 900168404Spjd 901168404Spjd free(label); 902168404Spjd return (0); 903168404Spjd } 904168404Spjd 905168404Spjd free(label); 906168404Spjd *config = NULL; 907168404Spjd return (0); 908168404Spjd} 909168404Spjd 910219089Spjdtypedef struct rdsk_node { 911219089Spjd char *rn_name; 912219089Spjd int rn_dfd; 913219089Spjd libzfs_handle_t *rn_hdl; 914219089Spjd nvlist_t *rn_config; 915219089Spjd avl_tree_t *rn_avl; 916219089Spjd avl_node_t rn_node; 917219089Spjd boolean_t rn_nozpool; 918219089Spjd} rdsk_node_t; 919219089Spjd 920185029Spjdstatic int 921219089Spjdslice_cache_compare(const void *arg1, const void *arg2) 922168404Spjd{ 923219089Spjd const char *nm1 = ((rdsk_node_t *)arg1)->rn_name; 924219089Spjd const char *nm2 = ((rdsk_node_t *)arg2)->rn_name; 925219089Spjd char *nm1slice, *nm2slice; 926219089Spjd int rv; 927219089Spjd 928219089Spjd /* 929219089Spjd * slices zero and two are the most likely to provide results, 930219089Spjd * so put those first 931219089Spjd */ 932219089Spjd nm1slice = strstr(nm1, "s0"); 933219089Spjd nm2slice = strstr(nm2, "s0"); 934219089Spjd if (nm1slice && !nm2slice) { 935219089Spjd return (-1); 936219089Spjd } 937219089Spjd if (!nm1slice && nm2slice) { 938219089Spjd return (1); 939219089Spjd } 940219089Spjd nm1slice = strstr(nm1, "s2"); 941219089Spjd nm2slice = strstr(nm2, "s2"); 942219089Spjd if (nm1slice && !nm2slice) { 943219089Spjd return (-1); 944219089Spjd } 945219089Spjd if (!nm1slice && nm2slice) { 946219089Spjd return (1); 947219089Spjd } 948219089Spjd 949219089Spjd rv = strcmp(nm1, nm2); 950219089Spjd if (rv == 0) 951219089Spjd return (0); 952219089Spjd return (rv > 0 ? 1 : -1); 953219089Spjd} 954219089Spjd 955219089Spjd#ifdef sun 956219089Spjdstatic void 957219089Spjdcheck_one_slice(avl_tree_t *r, char *diskname, uint_t partno, 958219089Spjd diskaddr_t size, uint_t blksz) 959219089Spjd{ 960219089Spjd rdsk_node_t tmpnode; 961219089Spjd rdsk_node_t *node; 962219089Spjd char sname[MAXNAMELEN]; 963219089Spjd 964219089Spjd tmpnode.rn_name = &sname[0]; 965219089Spjd (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u", 966219089Spjd diskname, partno); 967219089Spjd /* 968219089Spjd * protect against division by zero for disk labels that 969219089Spjd * contain a bogus sector size 970219089Spjd */ 971219089Spjd if (blksz == 0) 972219089Spjd blksz = DEV_BSIZE; 973219089Spjd /* too small to contain a zpool? */ 974219089Spjd if ((size < (SPA_MINDEVSIZE / blksz)) && 975219089Spjd (node = avl_find(r, &tmpnode, NULL))) 976219089Spjd node->rn_nozpool = B_TRUE; 977219089Spjd} 978219089Spjd#endif /* sun */ 979219089Spjd 980219089Spjdstatic void 981219089Spjdnozpool_all_slices(avl_tree_t *r, const char *sname) 982219089Spjd{ 983219089Spjd#ifdef sun 984219089Spjd char diskname[MAXNAMELEN]; 985219089Spjd char *ptr; 986219089Spjd int i; 987219089Spjd 988219089Spjd (void) strncpy(diskname, sname, MAXNAMELEN); 989219089Spjd if (((ptr = strrchr(diskname, 's')) == NULL) && 990219089Spjd ((ptr = strrchr(diskname, 'p')) == NULL)) 991219089Spjd return; 992219089Spjd ptr[0] = 's'; 993219089Spjd ptr[1] = '\0'; 994219089Spjd for (i = 0; i < NDKMAP; i++) 995219089Spjd check_one_slice(r, diskname, i, 0, 1); 996219089Spjd ptr[0] = 'p'; 997219089Spjd for (i = 0; i <= FD_NUMPART; i++) 998219089Spjd check_one_slice(r, diskname, i, 0, 1); 999219089Spjd#endif /* sun */ 1000219089Spjd} 1001219089Spjd 1002260339Smav#ifdef sun 1003219089Spjdstatic void 1004219089Spjdcheck_slices(avl_tree_t *r, int fd, const char *sname) 1005219089Spjd{ 1006219089Spjd struct extvtoc vtoc; 1007219089Spjd struct dk_gpt *gpt; 1008219089Spjd char diskname[MAXNAMELEN]; 1009219089Spjd char *ptr; 1010219089Spjd int i; 1011219089Spjd 1012219089Spjd (void) strncpy(diskname, sname, MAXNAMELEN); 1013219089Spjd if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1])) 1014219089Spjd return; 1015219089Spjd ptr[1] = '\0'; 1016219089Spjd 1017219089Spjd if (read_extvtoc(fd, &vtoc) >= 0) { 1018219089Spjd for (i = 0; i < NDKMAP; i++) 1019219089Spjd check_one_slice(r, diskname, i, 1020219089Spjd vtoc.v_part[i].p_size, vtoc.v_sectorsz); 1021219089Spjd } else if (efi_alloc_and_read(fd, &gpt) >= 0) { 1022219089Spjd /* 1023219089Spjd * on x86 we'll still have leftover links that point 1024219089Spjd * to slices s[9-15], so use NDKMAP instead 1025219089Spjd */ 1026219089Spjd for (i = 0; i < NDKMAP; i++) 1027219089Spjd check_one_slice(r, diskname, i, 1028219089Spjd gpt->efi_parts[i].p_size, gpt->efi_lbasize); 1029219089Spjd /* nodes p[1-4] are never used with EFI labels */ 1030219089Spjd ptr[0] = 'p'; 1031219089Spjd for (i = 1; i <= FD_NUMPART; i++) 1032219089Spjd check_one_slice(r, diskname, i, 0, 1); 1033219089Spjd efi_free(gpt); 1034219089Spjd } 1035260339Smav} 1036219089Spjd#endif /* sun */ 1037219089Spjd 1038219089Spjdstatic void 1039219089Spjdzpool_open_func(void *arg) 1040219089Spjd{ 1041219089Spjd rdsk_node_t *rn = arg; 1042219089Spjd struct stat64 statbuf; 1043185029Spjd nvlist_t *config; 1044219089Spjd int fd; 1045168404Spjd 1046219089Spjd if (rn->rn_nozpool) 1047219089Spjd return; 1048219089Spjd if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) { 1049219089Spjd /* symlink to a device that's no longer there */ 1050219089Spjd if (errno == ENOENT) 1051219089Spjd nozpool_all_slices(rn->rn_avl, rn->rn_name); 1052219089Spjd return; 1053219089Spjd } 1054168404Spjd /* 1055219089Spjd * Ignore failed stats. We only want regular 1056219089Spjd * files, character devs and block devs. 1057168404Spjd */ 1058219089Spjd if (fstat64(fd, &statbuf) != 0 || 1059219089Spjd (!S_ISREG(statbuf.st_mode) && 1060219089Spjd !S_ISCHR(statbuf.st_mode) && 1061219089Spjd !S_ISBLK(statbuf.st_mode))) { 1062219089Spjd (void) close(fd); 1063219089Spjd return; 1064219089Spjd } 1065219089Spjd /* this file is too small to hold a zpool */ 1066260339Smav#ifdef sun 1067219089Spjd if (S_ISREG(statbuf.st_mode) && 1068219089Spjd statbuf.st_size < SPA_MINDEVSIZE) { 1069219089Spjd (void) close(fd); 1070219089Spjd return; 1071219089Spjd } else if (!S_ISREG(statbuf.st_mode)) { 1072219089Spjd /* 1073219089Spjd * Try to read the disk label first so we don't have to 1074219089Spjd * open a bunch of minor nodes that can't have a zpool. 1075219089Spjd */ 1076219089Spjd check_slices(rn->rn_avl, fd, rn->rn_name); 1077219089Spjd } 1078260339Smav#else /* !sun */ 1079260339Smav if (statbuf.st_size < SPA_MINDEVSIZE) { 1080260339Smav (void) close(fd); 1081260339Smav return; 1082260339Smav } 1083260339Smav#endif /* sun */ 1084168404Spjd 1085219089Spjd if ((zpool_read_label(fd, &config)) != 0) { 1086219089Spjd (void) close(fd); 1087219089Spjd (void) no_memory(rn->rn_hdl); 1088219089Spjd return; 1089219089Spjd } 1090219089Spjd (void) close(fd); 1091168404Spjd 1092168404Spjd 1093219089Spjd rn->rn_config = config; 1094219089Spjd if (config != NULL) { 1095219089Spjd assert(rn->rn_nozpool == B_FALSE); 1096219089Spjd } 1097219089Spjd} 1098168404Spjd 1099219089Spjd/* 1100219089Spjd * Given a file descriptor, clear (zero) the label information. This function 1101224171Sgibbs * is used in the appliance stack as part of the ZFS sysevent module and 1102224171Sgibbs * to implement the "zpool labelclear" command. 1103219089Spjd */ 1104219089Spjdint 1105219089Spjdzpool_clear_label(int fd) 1106219089Spjd{ 1107219089Spjd struct stat64 statbuf; 1108219089Spjd int l; 1109219089Spjd vdev_label_t *label; 1110219089Spjd uint64_t size; 1111168404Spjd 1112219089Spjd if (fstat64(fd, &statbuf) == -1) 1113219089Spjd return (0); 1114219089Spjd size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); 1115168404Spjd 1116219089Spjd if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL) 1117219089Spjd return (-1); 1118168404Spjd 1119219089Spjd for (l = 0; l < VDEV_LABELS; l++) { 1120219089Spjd if (pwrite64(fd, label, sizeof (vdev_label_t), 1121219089Spjd label_offset(size, l)) != sizeof (vdev_label_t)) 1122219089Spjd return (-1); 1123185029Spjd } 1124219089Spjd 1125219089Spjd free(label); 1126219089Spjd return (0); 1127185029Spjd} 1128185029Spjd 1129185029Spjd/* 1130185029Spjd * Given a list of directories to search, find all pools stored on disk. This 1131185029Spjd * includes partial pools which are not available to import. If no args are 1132185029Spjd * given (argc is 0), then the default directory (/dev/dsk) is searched. 1133185029Spjd * poolname or guid (but not both) are provided by the caller when trying 1134185029Spjd * to import a specific pool. 1135185029Spjd */ 1136185029Spjdstatic nvlist_t * 1137219089Spjdzpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg) 1138185029Spjd{ 1139219089Spjd int i, dirs = iarg->paths; 1140185029Spjd DIR *dirp = NULL; 1141185029Spjd struct dirent64 *dp; 1142185029Spjd char path[MAXPATHLEN]; 1143219089Spjd char *end, **dir = iarg->path; 1144185029Spjd size_t pathleft; 1145219089Spjd nvlist_t *ret = NULL; 1146235479Savg static char *default_dir = "/dev"; 1147185029Spjd pool_list_t pools = { 0 }; 1148185029Spjd pool_entry_t *pe, *penext; 1149185029Spjd vdev_entry_t *ve, *venext; 1150185029Spjd config_entry_t *ce, *cenext; 1151185029Spjd name_entry_t *ne, *nenext; 1152219089Spjd avl_tree_t slice_cache; 1153219089Spjd rdsk_node_t *slice; 1154219089Spjd void *cookie; 1155185029Spjd 1156219089Spjd if (dirs == 0) { 1157219089Spjd dirs = 1; 1158219089Spjd dir = &default_dir; 1159185029Spjd } 1160185029Spjd 1161185029Spjd /* 1162185029Spjd * Go through and read the label configuration information from every 1163185029Spjd * possible device, organizing the information according to pool GUID 1164185029Spjd * and toplevel GUID. 1165185029Spjd */ 1166219089Spjd for (i = 0; i < dirs; i++) { 1167219089Spjd tpool_t *t; 1168185029Spjd char *rdsk; 1169185029Spjd int dfd; 1170185029Spjd 1171185029Spjd /* use realpath to normalize the path */ 1172219089Spjd if (realpath(dir[i], path) == 0) { 1173185029Spjd (void) zfs_error_fmt(hdl, EZFS_BADPATH, 1174219089Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]); 1175185029Spjd goto error; 1176185029Spjd } 1177185029Spjd end = &path[strlen(path)]; 1178185029Spjd *end++ = '/'; 1179185029Spjd *end = 0; 1180185029Spjd pathleft = &path[sizeof (path)] - end; 1181185029Spjd 1182185029Spjd /* 1183185029Spjd * Using raw devices instead of block devices when we're 1184185029Spjd * reading the labels skips a bunch of slow operations during 1185185029Spjd * close(2) processing, so we replace /dev/dsk with /dev/rdsk. 1186185029Spjd */ 1187185029Spjd if (strcmp(path, "/dev/dsk/") == 0) 1188219089Spjd rdsk = "/dev/"; 1189185029Spjd else 1190185029Spjd rdsk = path; 1191185029Spjd 1192219089Spjd if ((dfd = open64(rdsk, O_RDONLY)) < 0 || 1193219089Spjd (dirp = fdopendir(dfd)) == NULL) { 1194185029Spjd zfs_error_aux(hdl, strerror(errno)); 1195185029Spjd (void) zfs_error_fmt(hdl, EZFS_BADPATH, 1196185029Spjd dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1197185029Spjd rdsk); 1198185029Spjd goto error; 1199185029Spjd } 1200185029Spjd 1201219089Spjd avl_create(&slice_cache, slice_cache_compare, 1202219089Spjd sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node)); 1203219089Spjd 1204219089Spjd if (strcmp(rdsk, "/dev/") == 0) { 1205219089Spjd struct gmesh mesh; 1206219089Spjd struct gclass *mp; 1207219089Spjd struct ggeom *gp; 1208219089Spjd struct gprovider *pp; 1209219089Spjd 1210219089Spjd errno = geom_gettree(&mesh); 1211219089Spjd if (errno != 0) { 1212219089Spjd zfs_error_aux(hdl, strerror(errno)); 1213219089Spjd (void) zfs_error_fmt(hdl, EZFS_BADPATH, 1214219089Spjd dgettext(TEXT_DOMAIN, "cannot get GEOM tree")); 1215219089Spjd goto error; 1216219089Spjd } 1217219089Spjd 1218219089Spjd LIST_FOREACH(mp, &mesh.lg_class, lg_class) { 1219219089Spjd LIST_FOREACH(gp, &mp->lg_geom, lg_geom) { 1220219089Spjd LIST_FOREACH(pp, &gp->lg_provider, lg_provider) { 1221219089Spjd slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); 1222219089Spjd slice->rn_name = zfs_strdup(hdl, pp->lg_name); 1223219089Spjd slice->rn_avl = &slice_cache; 1224219089Spjd slice->rn_dfd = dfd; 1225219089Spjd slice->rn_hdl = hdl; 1226219089Spjd slice->rn_nozpool = B_FALSE; 1227219089Spjd avl_add(&slice_cache, slice); 1228219089Spjd } 1229219089Spjd } 1230219089Spjd } 1231219089Spjd 1232219089Spjd geom_deletetree(&mesh); 1233219089Spjd goto skipdir; 1234219089Spjd } 1235219089Spjd 1236185029Spjd /* 1237185029Spjd * This is not MT-safe, but we have no MT consumers of libzfs 1238185029Spjd */ 1239185029Spjd while ((dp = readdir64(dirp)) != NULL) { 1240185029Spjd const char *name = dp->d_name; 1241185029Spjd if (name[0] == '.' && 1242185029Spjd (name[1] == 0 || (name[1] == '.' && name[2] == 0))) 1243185029Spjd continue; 1244185029Spjd 1245219089Spjd slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); 1246219089Spjd slice->rn_name = zfs_strdup(hdl, name); 1247219089Spjd slice->rn_avl = &slice_cache; 1248219089Spjd slice->rn_dfd = dfd; 1249219089Spjd slice->rn_hdl = hdl; 1250219089Spjd slice->rn_nozpool = B_FALSE; 1251219089Spjd avl_add(&slice_cache, slice); 1252219089Spjd } 1253219089Spjdskipdir: 1254219089Spjd /* 1255219089Spjd * create a thread pool to do all of this in parallel; 1256219089Spjd * rn_nozpool is not protected, so this is racy in that 1257219089Spjd * multiple tasks could decide that the same slice can 1258219089Spjd * not hold a zpool, which is benign. Also choose 1259219089Spjd * double the number of processors; we hold a lot of 1260219089Spjd * locks in the kernel, so going beyond this doesn't 1261219089Spjd * buy us much. 1262219089Spjd */ 1263219089Spjd t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 1264219089Spjd 0, NULL); 1265219089Spjd for (slice = avl_first(&slice_cache); slice; 1266219089Spjd (slice = avl_walk(&slice_cache, slice, 1267219089Spjd AVL_AFTER))) 1268219089Spjd (void) tpool_dispatch(t, zpool_open_func, slice); 1269219089Spjd tpool_wait(t); 1270219089Spjd tpool_destroy(t); 1271185029Spjd 1272219089Spjd cookie = NULL; 1273219089Spjd while ((slice = avl_destroy_nodes(&slice_cache, 1274219089Spjd &cookie)) != NULL) { 1275219089Spjd if (slice->rn_config != NULL) { 1276219089Spjd nvlist_t *config = slice->rn_config; 1277185029Spjd boolean_t matched = B_TRUE; 1278185029Spjd 1279219089Spjd if (iarg->poolname != NULL) { 1280185029Spjd char *pname; 1281185029Spjd 1282185029Spjd matched = nvlist_lookup_string(config, 1283185029Spjd ZPOOL_CONFIG_POOL_NAME, 1284185029Spjd &pname) == 0 && 1285219089Spjd strcmp(iarg->poolname, pname) == 0; 1286219089Spjd } else if (iarg->guid != 0) { 1287185029Spjd uint64_t this_guid; 1288185029Spjd 1289185029Spjd matched = nvlist_lookup_uint64(config, 1290185029Spjd ZPOOL_CONFIG_POOL_GUID, 1291185029Spjd &this_guid) == 0 && 1292219089Spjd iarg->guid == this_guid; 1293185029Spjd } 1294185029Spjd if (!matched) { 1295185029Spjd nvlist_free(config); 1296185029Spjd config = NULL; 1297185029Spjd continue; 1298185029Spjd } 1299185029Spjd /* use the non-raw path for the config */ 1300219089Spjd (void) strlcpy(end, slice->rn_name, pathleft); 1301168404Spjd if (add_config(hdl, &pools, path, config) != 0) 1302168404Spjd goto error; 1303168404Spjd } 1304219089Spjd free(slice->rn_name); 1305219089Spjd free(slice); 1306168404Spjd } 1307219089Spjd avl_destroy(&slice_cache); 1308185029Spjd 1309185029Spjd (void) closedir(dirp); 1310185029Spjd dirp = NULL; 1311168404Spjd } 1312168404Spjd 1313219089Spjd ret = get_configs(hdl, &pools, iarg->can_be_active); 1314168404Spjd 1315168404Spjderror: 1316168404Spjd for (pe = pools.pools; pe != NULL; pe = penext) { 1317168404Spjd penext = pe->pe_next; 1318168404Spjd for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { 1319168404Spjd venext = ve->ve_next; 1320168404Spjd for (ce = ve->ve_configs; ce != NULL; ce = cenext) { 1321168404Spjd cenext = ce->ce_next; 1322168404Spjd if (ce->ce_config) 1323168404Spjd nvlist_free(ce->ce_config); 1324168404Spjd free(ce); 1325168404Spjd } 1326168404Spjd free(ve); 1327168404Spjd } 1328168404Spjd free(pe); 1329168404Spjd } 1330168404Spjd 1331168404Spjd for (ne = pools.names; ne != NULL; ne = nenext) { 1332168404Spjd nenext = ne->ne_next; 1333168404Spjd if (ne->ne_name) 1334168404Spjd free(ne->ne_name); 1335168404Spjd free(ne); 1336168404Spjd } 1337168404Spjd 1338185029Spjd if (dirp) 1339185029Spjd (void) closedir(dirp); 1340185029Spjd 1341168404Spjd return (ret); 1342168404Spjd} 1343168404Spjd 1344185029Spjdnvlist_t * 1345185029Spjdzpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) 1346185029Spjd{ 1347219089Spjd importargs_t iarg = { 0 }; 1348185029Spjd 1349219089Spjd iarg.paths = argc; 1350219089Spjd iarg.path = argv; 1351185029Spjd 1352219089Spjd return (zpool_find_import_impl(hdl, &iarg)); 1353185029Spjd} 1354185029Spjd 1355185029Spjd/* 1356185029Spjd * Given a cache file, return the contents as a list of importable pools. 1357185029Spjd * poolname or guid (but not both) are provided by the caller when trying 1358185029Spjd * to import a specific pool. 1359185029Spjd */ 1360185029Spjdnvlist_t * 1361185029Spjdzpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, 1362185029Spjd char *poolname, uint64_t guid) 1363185029Spjd{ 1364185029Spjd char *buf; 1365185029Spjd int fd; 1366185029Spjd struct stat64 statbuf; 1367185029Spjd nvlist_t *raw, *src, *dst; 1368185029Spjd nvlist_t *pools; 1369185029Spjd nvpair_t *elem; 1370185029Spjd char *name; 1371185029Spjd uint64_t this_guid; 1372185029Spjd boolean_t active; 1373185029Spjd 1374185029Spjd verify(poolname == NULL || guid == 0); 1375185029Spjd 1376185029Spjd if ((fd = open(cachefile, O_RDONLY)) < 0) { 1377185029Spjd zfs_error_aux(hdl, "%s", strerror(errno)); 1378185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1379185029Spjd dgettext(TEXT_DOMAIN, "failed to open cache file")); 1380185029Spjd return (NULL); 1381185029Spjd } 1382185029Spjd 1383185029Spjd if (fstat64(fd, &statbuf) != 0) { 1384185029Spjd zfs_error_aux(hdl, "%s", strerror(errno)); 1385185029Spjd (void) close(fd); 1386185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1387185029Spjd dgettext(TEXT_DOMAIN, "failed to get size of cache file")); 1388185029Spjd return (NULL); 1389185029Spjd } 1390185029Spjd 1391185029Spjd if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { 1392185029Spjd (void) close(fd); 1393185029Spjd return (NULL); 1394185029Spjd } 1395185029Spjd 1396185029Spjd if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 1397185029Spjd (void) close(fd); 1398185029Spjd free(buf); 1399185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1400185029Spjd dgettext(TEXT_DOMAIN, 1401185029Spjd "failed to read cache file contents")); 1402185029Spjd return (NULL); 1403185029Spjd } 1404185029Spjd 1405185029Spjd (void) close(fd); 1406185029Spjd 1407185029Spjd if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { 1408185029Spjd free(buf); 1409185029Spjd (void) zfs_error(hdl, EZFS_BADCACHE, 1410185029Spjd dgettext(TEXT_DOMAIN, 1411185029Spjd "invalid or corrupt cache file contents")); 1412185029Spjd return (NULL); 1413185029Spjd } 1414185029Spjd 1415185029Spjd free(buf); 1416185029Spjd 1417185029Spjd /* 1418185029Spjd * Go through and get the current state of the pools and refresh their 1419185029Spjd * state. 1420185029Spjd */ 1421185029Spjd if (nvlist_alloc(&pools, 0, 0) != 0) { 1422185029Spjd (void) no_memory(hdl); 1423185029Spjd nvlist_free(raw); 1424185029Spjd return (NULL); 1425185029Spjd } 1426185029Spjd 1427185029Spjd elem = NULL; 1428185029Spjd while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { 1429272453Sdelphij src = fnvpair_value_nvlist(elem); 1430185029Spjd 1431272453Sdelphij name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME); 1432185029Spjd if (poolname != NULL && strcmp(poolname, name) != 0) 1433185029Spjd continue; 1434185029Spjd 1435272453Sdelphij this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID); 1436272453Sdelphij if (guid != 0 && guid != this_guid) 1437272453Sdelphij continue; 1438185029Spjd 1439185029Spjd if (pool_active(hdl, name, this_guid, &active) != 0) { 1440185029Spjd nvlist_free(raw); 1441185029Spjd nvlist_free(pools); 1442185029Spjd return (NULL); 1443185029Spjd } 1444185029Spjd 1445185029Spjd if (active) 1446185029Spjd continue; 1447185029Spjd 1448185029Spjd if ((dst = refresh_config(hdl, src)) == NULL) { 1449185029Spjd nvlist_free(raw); 1450185029Spjd nvlist_free(pools); 1451185029Spjd return (NULL); 1452185029Spjd } 1453185029Spjd 1454185029Spjd if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { 1455185029Spjd (void) no_memory(hdl); 1456185029Spjd nvlist_free(dst); 1457185029Spjd nvlist_free(raw); 1458185029Spjd nvlist_free(pools); 1459185029Spjd return (NULL); 1460185029Spjd } 1461185029Spjd nvlist_free(dst); 1462185029Spjd } 1463185029Spjd 1464185029Spjd nvlist_free(raw); 1465185029Spjd return (pools); 1466185029Spjd} 1467185029Spjd 1468219089Spjdstatic int 1469219089Spjdname_or_guid_exists(zpool_handle_t *zhp, void *data) 1470219089Spjd{ 1471219089Spjd importargs_t *import = data; 1472219089Spjd int found = 0; 1473185029Spjd 1474219089Spjd if (import->poolname != NULL) { 1475219089Spjd char *pool_name; 1476219089Spjd 1477219089Spjd verify(nvlist_lookup_string(zhp->zpool_config, 1478219089Spjd ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0); 1479219089Spjd if (strcmp(pool_name, import->poolname) == 0) 1480219089Spjd found = 1; 1481219089Spjd } else { 1482219089Spjd uint64_t pool_guid; 1483219089Spjd 1484219089Spjd verify(nvlist_lookup_uint64(zhp->zpool_config, 1485219089Spjd ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0); 1486219089Spjd if (pool_guid == import->guid) 1487219089Spjd found = 1; 1488219089Spjd } 1489219089Spjd 1490219089Spjd zpool_close(zhp); 1491219089Spjd return (found); 1492219089Spjd} 1493219089Spjd 1494219089Spjdnvlist_t * 1495219089Spjdzpool_search_import(libzfs_handle_t *hdl, importargs_t *import) 1496219089Spjd{ 1497219089Spjd verify(import->poolname == NULL || import->guid == 0); 1498219089Spjd 1499219089Spjd if (import->unique) 1500219089Spjd import->exists = zpool_iter(hdl, name_or_guid_exists, import); 1501219089Spjd 1502219089Spjd if (import->cachefile != NULL) 1503219089Spjd return (zpool_find_import_cached(hdl, import->cachefile, 1504219089Spjd import->poolname, import->guid)); 1505219089Spjd 1506219089Spjd return (zpool_find_import_impl(hdl, import)); 1507219089Spjd} 1508219089Spjd 1509168404Spjdboolean_t 1510168404Spjdfind_guid(nvlist_t *nv, uint64_t guid) 1511168404Spjd{ 1512168404Spjd uint64_t tmp; 1513168404Spjd nvlist_t **child; 1514168404Spjd uint_t c, children; 1515168404Spjd 1516168404Spjd verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); 1517168404Spjd if (tmp == guid) 1518168404Spjd return (B_TRUE); 1519168404Spjd 1520168404Spjd if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1521168404Spjd &child, &children) == 0) { 1522168404Spjd for (c = 0; c < children; c++) 1523168404Spjd if (find_guid(child[c], guid)) 1524168404Spjd return (B_TRUE); 1525168404Spjd } 1526168404Spjd 1527168404Spjd return (B_FALSE); 1528168404Spjd} 1529168404Spjd 1530185029Spjdtypedef struct aux_cbdata { 1531185029Spjd const char *cb_type; 1532168404Spjd uint64_t cb_guid; 1533168404Spjd zpool_handle_t *cb_zhp; 1534185029Spjd} aux_cbdata_t; 1535168404Spjd 1536168404Spjdstatic int 1537185029Spjdfind_aux(zpool_handle_t *zhp, void *data) 1538168404Spjd{ 1539185029Spjd aux_cbdata_t *cbp = data; 1540185029Spjd nvlist_t **list; 1541185029Spjd uint_t i, count; 1542168404Spjd uint64_t guid; 1543168404Spjd nvlist_t *nvroot; 1544168404Spjd 1545168404Spjd verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1546168404Spjd &nvroot) == 0); 1547168404Spjd 1548185029Spjd if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, 1549185029Spjd &list, &count) == 0) { 1550185029Spjd for (i = 0; i < count; i++) { 1551185029Spjd verify(nvlist_lookup_uint64(list[i], 1552168404Spjd ZPOOL_CONFIG_GUID, &guid) == 0); 1553168404Spjd if (guid == cbp->cb_guid) { 1554168404Spjd cbp->cb_zhp = zhp; 1555168404Spjd return (1); 1556168404Spjd } 1557168404Spjd } 1558168404Spjd } 1559168404Spjd 1560168404Spjd zpool_close(zhp); 1561168404Spjd return (0); 1562168404Spjd} 1563168404Spjd 1564168404Spjd/* 1565168404Spjd * Determines if the pool is in use. If so, it returns true and the state of 1566168404Spjd * the pool as well as the name of the pool. Both strings are allocated and 1567168404Spjd * must be freed by the caller. 1568168404Spjd */ 1569168404Spjdint 1570168404Spjdzpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, 1571168404Spjd boolean_t *inuse) 1572168404Spjd{ 1573168404Spjd nvlist_t *config; 1574168404Spjd char *name; 1575168404Spjd boolean_t ret; 1576168404Spjd uint64_t guid, vdev_guid; 1577168404Spjd zpool_handle_t *zhp; 1578168404Spjd nvlist_t *pool_config; 1579168404Spjd uint64_t stateval, isspare; 1580185029Spjd aux_cbdata_t cb = { 0 }; 1581168404Spjd boolean_t isactive; 1582168404Spjd 1583168404Spjd *inuse = B_FALSE; 1584168404Spjd 1585168404Spjd if (zpool_read_label(fd, &config) != 0) { 1586168404Spjd (void) no_memory(hdl); 1587168404Spjd return (-1); 1588168404Spjd } 1589168404Spjd 1590168404Spjd if (config == NULL) 1591168404Spjd return (0); 1592168404Spjd 1593168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1594168404Spjd &stateval) == 0); 1595168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 1596168404Spjd &vdev_guid) == 0); 1597168404Spjd 1598185029Spjd if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { 1599168404Spjd verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1600168404Spjd &name) == 0); 1601168404Spjd verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1602168404Spjd &guid) == 0); 1603168404Spjd } 1604168404Spjd 1605168404Spjd switch (stateval) { 1606168404Spjd case POOL_STATE_EXPORTED: 1607219089Spjd /* 1608219089Spjd * A pool with an exported state may in fact be imported 1609219089Spjd * read-only, so check the in-core state to see if it's 1610219089Spjd * active and imported read-only. If it is, set 1611219089Spjd * its state to active. 1612219089Spjd */ 1613219089Spjd if (pool_active(hdl, name, guid, &isactive) == 0 && isactive && 1614263127Sdelphij (zhp = zpool_open_canfail(hdl, name)) != NULL) { 1615263127Sdelphij if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL)) 1616263127Sdelphij stateval = POOL_STATE_ACTIVE; 1617219089Spjd 1618263127Sdelphij /* 1619263127Sdelphij * All we needed the zpool handle for is the 1620263127Sdelphij * readonly prop check. 1621263127Sdelphij */ 1622263127Sdelphij zpool_close(zhp); 1623263127Sdelphij } 1624263127Sdelphij 1625168404Spjd ret = B_TRUE; 1626168404Spjd break; 1627168404Spjd 1628168404Spjd case POOL_STATE_ACTIVE: 1629168404Spjd /* 1630168404Spjd * For an active pool, we have to determine if it's really part 1631168404Spjd * of a currently active pool (in which case the pool will exist 1632168404Spjd * and the guid will be the same), or whether it's part of an 1633168404Spjd * active pool that was disconnected without being explicitly 1634168404Spjd * exported. 1635168404Spjd */ 1636168404Spjd if (pool_active(hdl, name, guid, &isactive) != 0) { 1637168404Spjd nvlist_free(config); 1638168404Spjd return (-1); 1639168404Spjd } 1640168404Spjd 1641168404Spjd if (isactive) { 1642168404Spjd /* 1643168404Spjd * Because the device may have been removed while 1644168404Spjd * offlined, we only report it as active if the vdev is 1645168404Spjd * still present in the config. Otherwise, pretend like 1646168404Spjd * it's not in use. 1647168404Spjd */ 1648168404Spjd if ((zhp = zpool_open_canfail(hdl, name)) != NULL && 1649168404Spjd (pool_config = zpool_get_config(zhp, NULL)) 1650168404Spjd != NULL) { 1651168404Spjd nvlist_t *nvroot; 1652168404Spjd 1653168404Spjd verify(nvlist_lookup_nvlist(pool_config, 1654168404Spjd ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1655168404Spjd ret = find_guid(nvroot, vdev_guid); 1656168404Spjd } else { 1657168404Spjd ret = B_FALSE; 1658168404Spjd } 1659168404Spjd 1660168404Spjd /* 1661168404Spjd * If this is an active spare within another pool, we 1662168404Spjd * treat it like an unused hot spare. This allows the 1663168404Spjd * user to create a pool with a hot spare that currently 1664168404Spjd * in use within another pool. Since we return B_TRUE, 1665168404Spjd * libdiskmgt will continue to prevent generic consumers 1666168404Spjd * from using the device. 1667168404Spjd */ 1668168404Spjd if (ret && nvlist_lookup_uint64(config, 1669168404Spjd ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) 1670168404Spjd stateval = POOL_STATE_SPARE; 1671168404Spjd 1672168404Spjd if (zhp != NULL) 1673168404Spjd zpool_close(zhp); 1674168404Spjd } else { 1675168404Spjd stateval = POOL_STATE_POTENTIALLY_ACTIVE; 1676168404Spjd ret = B_TRUE; 1677168404Spjd } 1678168404Spjd break; 1679168404Spjd 1680168404Spjd case POOL_STATE_SPARE: 1681168404Spjd /* 1682168404Spjd * For a hot spare, it can be either definitively in use, or 1683168404Spjd * potentially active. To determine if it's in use, we iterate 1684168404Spjd * over all pools in the system and search for one with a spare 1685168404Spjd * with a matching guid. 1686168404Spjd * 1687168404Spjd * Due to the shared nature of spares, we don't actually report 1688168404Spjd * the potentially active case as in use. This means the user 1689168404Spjd * can freely create pools on the hot spares of exported pools, 1690168404Spjd * but to do otherwise makes the resulting code complicated, and 1691168404Spjd * we end up having to deal with this case anyway. 1692168404Spjd */ 1693168404Spjd cb.cb_zhp = NULL; 1694168404Spjd cb.cb_guid = vdev_guid; 1695185029Spjd cb.cb_type = ZPOOL_CONFIG_SPARES; 1696185029Spjd if (zpool_iter(hdl, find_aux, &cb) == 1) { 1697168404Spjd name = (char *)zpool_get_name(cb.cb_zhp); 1698168404Spjd ret = TRUE; 1699168404Spjd } else { 1700168404Spjd ret = FALSE; 1701168404Spjd } 1702168404Spjd break; 1703168404Spjd 1704185029Spjd case POOL_STATE_L2CACHE: 1705185029Spjd 1706185029Spjd /* 1707185029Spjd * Check if any pool is currently using this l2cache device. 1708185029Spjd */ 1709185029Spjd cb.cb_zhp = NULL; 1710185029Spjd cb.cb_guid = vdev_guid; 1711185029Spjd cb.cb_type = ZPOOL_CONFIG_L2CACHE; 1712185029Spjd if (zpool_iter(hdl, find_aux, &cb) == 1) { 1713185029Spjd name = (char *)zpool_get_name(cb.cb_zhp); 1714185029Spjd ret = TRUE; 1715185029Spjd } else { 1716185029Spjd ret = FALSE; 1717185029Spjd } 1718185029Spjd break; 1719185029Spjd 1720168404Spjd default: 1721168404Spjd ret = B_FALSE; 1722168404Spjd } 1723168404Spjd 1724168404Spjd 1725168404Spjd if (ret) { 1726168404Spjd if ((*namestr = zfs_strdup(hdl, name)) == NULL) { 1727185029Spjd if (cb.cb_zhp) 1728185029Spjd zpool_close(cb.cb_zhp); 1729168404Spjd nvlist_free(config); 1730168404Spjd return (-1); 1731168404Spjd } 1732168404Spjd *state = (pool_state_t)stateval; 1733168404Spjd } 1734168404Spjd 1735168404Spjd if (cb.cb_zhp) 1736168404Spjd zpool_close(cb.cb_zhp); 1737168404Spjd 1738168404Spjd nvlist_free(config); 1739168404Spjd *inuse = ret; 1740168404Spjd return (0); 1741168404Spjd} 1742