1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
| 1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
|
25 * Copyright (c) 2011 by Delphix. All rights reserved.
| 25 * Copyright (c) 2012 by Delphix. All rights reserved.
|
26 */ 27 28#include <sys/types.h> 29#include <sys/stat.h> 30#include <ctype.h> 31#include <errno.h> 32#include <devid.h> 33#include <fcntl.h> 34#include <libintl.h> 35#include <stdio.h> 36#include <stdlib.h> 37#include <strings.h> 38#include <unistd.h> 39#include <sys/zfs_ioctl.h> 40#include <dlfcn.h> 41 42#include "zfs_namecheck.h" 43#include "zfs_prop.h" 44#include "libzfs_impl.h" 45#include "zfs_comutil.h" 46 47static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 48 49#define DISK_ROOT "/dev/dsk" 50#define RDISK_ROOT "/dev/rdsk" 51#define BACKUP_SLICE "s2" 52 53typedef struct prop_flags { 54 int create:1; /* Validate property on creation */ 55 int import:1; /* Validate property on import */ 56} prop_flags_t; 57 58/* 59 * ==================================================================== 60 * zpool property functions 61 * ==================================================================== 62 */ 63 64static int 65zpool_get_all_props(zpool_handle_t *zhp) 66{ 67 zfs_cmd_t zc = { 0 }; 68 libzfs_handle_t *hdl = zhp->zpool_hdl; 69 70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71 72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73 return (-1); 74 75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76 if (errno == ENOMEM) { 77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } else { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } 86 87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 92 zcmd_free_nvlists(&zc); 93 94 return (0); 95} 96 97static int 98zpool_props_refresh(zpool_handle_t *zhp) 99{ 100 nvlist_t *old_props; 101 102 old_props = zhp->zpool_props; 103 104 if (zpool_get_all_props(zhp) != 0) 105 return (-1); 106 107 nvlist_free(old_props); 108 return (0); 109} 110 111static char * 112zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113 zprop_source_t *src) 114{ 115 nvlist_t *nv, *nvl; 116 uint64_t ival; 117 char *value; 118 zprop_source_t source; 119 120 nvl = zhp->zpool_props; 121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123 source = ival; 124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125 } else { 126 source = ZPROP_SRC_DEFAULT; 127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128 value = "-"; 129 } 130 131 if (src) 132 *src = source; 133 134 return (value); 135} 136 137uint64_t 138zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139{ 140 nvlist_t *nv, *nvl; 141 uint64_t value; 142 zprop_source_t source; 143 144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145 /* 146 * zpool_get_all_props() has most likely failed because 147 * the pool is faulted, but if all we need is the top level 148 * vdev's guid then get it from the zhp config nvlist. 149 */ 150 if ((prop == ZPOOL_PROP_GUID) && 151 (nvlist_lookup_nvlist(zhp->zpool_config, 152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154 == 0)) { 155 return (value); 156 } 157 return (zpool_prop_default_numeric(prop)); 158 } 159 160 nvl = zhp->zpool_props; 161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163 source = value; 164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165 } else { 166 source = ZPROP_SRC_DEFAULT; 167 value = zpool_prop_default_numeric(prop); 168 } 169 170 if (src) 171 *src = source; 172 173 return (value); 174} 175 176/* 177 * Map VDEV STATE to printed strings. 178 */ 179const char * 180zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181{ 182 switch (state) { 183 case VDEV_STATE_CLOSED: 184 case VDEV_STATE_OFFLINE: 185 return (gettext("OFFLINE")); 186 case VDEV_STATE_REMOVED: 187 return (gettext("REMOVED")); 188 case VDEV_STATE_CANT_OPEN: 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190 return (gettext("FAULTED")); 191 else if (aux == VDEV_AUX_SPLIT_POOL) 192 return (gettext("SPLIT")); 193 else 194 return (gettext("UNAVAIL")); 195 case VDEV_STATE_FAULTED: 196 return (gettext("FAULTED")); 197 case VDEV_STATE_DEGRADED: 198 return (gettext("DEGRADED")); 199 case VDEV_STATE_HEALTHY: 200 return (gettext("ONLINE")); 201 } 202 203 return (gettext("UNKNOWN")); 204} 205 206/* 207 * Map POOL STATE to printed strings. 208 */ 209const char * 210zpool_pool_state_to_name(pool_state_t state) 211{ 212 switch (state) { 213 case POOL_STATE_ACTIVE: 214 return (gettext("ACTIVE")); 215 case POOL_STATE_EXPORTED: 216 return (gettext("EXPORTED")); 217 case POOL_STATE_DESTROYED: 218 return (gettext("DESTROYED")); 219 case POOL_STATE_SPARE: 220 return (gettext("SPARE")); 221 case POOL_STATE_L2CACHE: 222 return (gettext("L2CACHE")); 223 case POOL_STATE_UNINITIALIZED: 224 return (gettext("UNINITIALIZED")); 225 case POOL_STATE_UNAVAIL: 226 return (gettext("UNAVAIL")); 227 case POOL_STATE_POTENTIALLY_ACTIVE: 228 return (gettext("POTENTIALLY_ACTIVE")); 229 } 230 231 return (gettext("UNKNOWN")); 232} 233 234/* 235 * Get a zpool property value for 'prop' and return the value in 236 * a pre-allocated buffer. 237 */ 238int 239zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 240 zprop_source_t *srctype) 241{ 242 uint64_t intval; 243 const char *strval; 244 zprop_source_t src = ZPROP_SRC_NONE; 245 nvlist_t *nvroot; 246 vdev_stat_t *vs; 247 uint_t vsc; 248 249 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 250 switch (prop) { 251 case ZPOOL_PROP_NAME: 252 (void) strlcpy(buf, zpool_get_name(zhp), len); 253 break; 254 255 case ZPOOL_PROP_HEALTH: 256 (void) strlcpy(buf, "FAULTED", len); 257 break; 258 259 case ZPOOL_PROP_GUID: 260 intval = zpool_get_prop_int(zhp, prop, &src); 261 (void) snprintf(buf, len, "%llu", intval); 262 break; 263 264 case ZPOOL_PROP_ALTROOT: 265 case ZPOOL_PROP_CACHEFILE: 266 case ZPOOL_PROP_COMMENT: 267 if (zhp->zpool_props != NULL || 268 zpool_get_all_props(zhp) == 0) { 269 (void) strlcpy(buf, 270 zpool_get_prop_string(zhp, prop, &src), 271 len); 272 if (srctype != NULL) 273 *srctype = src; 274 return (0); 275 } 276 /* FALLTHROUGH */ 277 default: 278 (void) strlcpy(buf, "-", len); 279 break; 280 } 281 282 if (srctype != NULL) 283 *srctype = src; 284 return (0); 285 } 286 287 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 288 prop != ZPOOL_PROP_NAME) 289 return (-1); 290 291 switch (zpool_prop_get_type(prop)) { 292 case PROP_TYPE_STRING: 293 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 294 len); 295 break; 296 297 case PROP_TYPE_NUMBER: 298 intval = zpool_get_prop_int(zhp, prop, &src); 299 300 switch (prop) { 301 case ZPOOL_PROP_SIZE: 302 case ZPOOL_PROP_ALLOCATED: 303 case ZPOOL_PROP_FREE:
| 26 */ 27 28#include <sys/types.h> 29#include <sys/stat.h> 30#include <ctype.h> 31#include <errno.h> 32#include <devid.h> 33#include <fcntl.h> 34#include <libintl.h> 35#include <stdio.h> 36#include <stdlib.h> 37#include <strings.h> 38#include <unistd.h> 39#include <sys/zfs_ioctl.h> 40#include <dlfcn.h> 41 42#include "zfs_namecheck.h" 43#include "zfs_prop.h" 44#include "libzfs_impl.h" 45#include "zfs_comutil.h" 46 47static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 48 49#define DISK_ROOT "/dev/dsk" 50#define RDISK_ROOT "/dev/rdsk" 51#define BACKUP_SLICE "s2" 52 53typedef struct prop_flags { 54 int create:1; /* Validate property on creation */ 55 int import:1; /* Validate property on import */ 56} prop_flags_t; 57 58/* 59 * ==================================================================== 60 * zpool property functions 61 * ==================================================================== 62 */ 63 64static int 65zpool_get_all_props(zpool_handle_t *zhp) 66{ 67 zfs_cmd_t zc = { 0 }; 68 libzfs_handle_t *hdl = zhp->zpool_hdl; 69 70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 71 72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 73 return (-1); 74 75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 76 if (errno == ENOMEM) { 77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 78 zcmd_free_nvlists(&zc); 79 return (-1); 80 } 81 } else { 82 zcmd_free_nvlists(&zc); 83 return (-1); 84 } 85 } 86 87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 88 zcmd_free_nvlists(&zc); 89 return (-1); 90 } 91 92 zcmd_free_nvlists(&zc); 93 94 return (0); 95} 96 97static int 98zpool_props_refresh(zpool_handle_t *zhp) 99{ 100 nvlist_t *old_props; 101 102 old_props = zhp->zpool_props; 103 104 if (zpool_get_all_props(zhp) != 0) 105 return (-1); 106 107 nvlist_free(old_props); 108 return (0); 109} 110 111static char * 112zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 113 zprop_source_t *src) 114{ 115 nvlist_t *nv, *nvl; 116 uint64_t ival; 117 char *value; 118 zprop_source_t source; 119 120 nvl = zhp->zpool_props; 121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 123 source = ival; 124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 125 } else { 126 source = ZPROP_SRC_DEFAULT; 127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 128 value = "-"; 129 } 130 131 if (src) 132 *src = source; 133 134 return (value); 135} 136 137uint64_t 138zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 139{ 140 nvlist_t *nv, *nvl; 141 uint64_t value; 142 zprop_source_t source; 143 144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 145 /* 146 * zpool_get_all_props() has most likely failed because 147 * the pool is faulted, but if all we need is the top level 148 * vdev's guid then get it from the zhp config nvlist. 149 */ 150 if ((prop == ZPOOL_PROP_GUID) && 151 (nvlist_lookup_nvlist(zhp->zpool_config, 152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 154 == 0)) { 155 return (value); 156 } 157 return (zpool_prop_default_numeric(prop)); 158 } 159 160 nvl = zhp->zpool_props; 161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 163 source = value; 164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 165 } else { 166 source = ZPROP_SRC_DEFAULT; 167 value = zpool_prop_default_numeric(prop); 168 } 169 170 if (src) 171 *src = source; 172 173 return (value); 174} 175 176/* 177 * Map VDEV STATE to printed strings. 178 */ 179const char * 180zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 181{ 182 switch (state) { 183 case VDEV_STATE_CLOSED: 184 case VDEV_STATE_OFFLINE: 185 return (gettext("OFFLINE")); 186 case VDEV_STATE_REMOVED: 187 return (gettext("REMOVED")); 188 case VDEV_STATE_CANT_OPEN: 189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 190 return (gettext("FAULTED")); 191 else if (aux == VDEV_AUX_SPLIT_POOL) 192 return (gettext("SPLIT")); 193 else 194 return (gettext("UNAVAIL")); 195 case VDEV_STATE_FAULTED: 196 return (gettext("FAULTED")); 197 case VDEV_STATE_DEGRADED: 198 return (gettext("DEGRADED")); 199 case VDEV_STATE_HEALTHY: 200 return (gettext("ONLINE")); 201 } 202 203 return (gettext("UNKNOWN")); 204} 205 206/* 207 * Map POOL STATE to printed strings. 208 */ 209const char * 210zpool_pool_state_to_name(pool_state_t state) 211{ 212 switch (state) { 213 case POOL_STATE_ACTIVE: 214 return (gettext("ACTIVE")); 215 case POOL_STATE_EXPORTED: 216 return (gettext("EXPORTED")); 217 case POOL_STATE_DESTROYED: 218 return (gettext("DESTROYED")); 219 case POOL_STATE_SPARE: 220 return (gettext("SPARE")); 221 case POOL_STATE_L2CACHE: 222 return (gettext("L2CACHE")); 223 case POOL_STATE_UNINITIALIZED: 224 return (gettext("UNINITIALIZED")); 225 case POOL_STATE_UNAVAIL: 226 return (gettext("UNAVAIL")); 227 case POOL_STATE_POTENTIALLY_ACTIVE: 228 return (gettext("POTENTIALLY_ACTIVE")); 229 } 230 231 return (gettext("UNKNOWN")); 232} 233 234/* 235 * Get a zpool property value for 'prop' and return the value in 236 * a pre-allocated buffer. 237 */ 238int 239zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 240 zprop_source_t *srctype) 241{ 242 uint64_t intval; 243 const char *strval; 244 zprop_source_t src = ZPROP_SRC_NONE; 245 nvlist_t *nvroot; 246 vdev_stat_t *vs; 247 uint_t vsc; 248 249 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 250 switch (prop) { 251 case ZPOOL_PROP_NAME: 252 (void) strlcpy(buf, zpool_get_name(zhp), len); 253 break; 254 255 case ZPOOL_PROP_HEALTH: 256 (void) strlcpy(buf, "FAULTED", len); 257 break; 258 259 case ZPOOL_PROP_GUID: 260 intval = zpool_get_prop_int(zhp, prop, &src); 261 (void) snprintf(buf, len, "%llu", intval); 262 break; 263 264 case ZPOOL_PROP_ALTROOT: 265 case ZPOOL_PROP_CACHEFILE: 266 case ZPOOL_PROP_COMMENT: 267 if (zhp->zpool_props != NULL || 268 zpool_get_all_props(zhp) == 0) { 269 (void) strlcpy(buf, 270 zpool_get_prop_string(zhp, prop, &src), 271 len); 272 if (srctype != NULL) 273 *srctype = src; 274 return (0); 275 } 276 /* FALLTHROUGH */ 277 default: 278 (void) strlcpy(buf, "-", len); 279 break; 280 } 281 282 if (srctype != NULL) 283 *srctype = src; 284 return (0); 285 } 286 287 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 288 prop != ZPOOL_PROP_NAME) 289 return (-1); 290 291 switch (zpool_prop_get_type(prop)) { 292 case PROP_TYPE_STRING: 293 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 294 len); 295 break; 296 297 case PROP_TYPE_NUMBER: 298 intval = zpool_get_prop_int(zhp, prop, &src); 299 300 switch (prop) { 301 case ZPOOL_PROP_SIZE: 302 case ZPOOL_PROP_ALLOCATED: 303 case ZPOOL_PROP_FREE:
|
| 304 case ZPOOL_PROP_EXPANDSZ:
|
304 (void) zfs_nicenum(intval, buf, len); 305 break; 306 307 case ZPOOL_PROP_CAPACITY: 308 (void) snprintf(buf, len, "%llu%%", 309 (u_longlong_t)intval); 310 break; 311 312 case ZPOOL_PROP_DEDUPRATIO: 313 (void) snprintf(buf, len, "%llu.%02llux", 314 (u_longlong_t)(intval / 100), 315 (u_longlong_t)(intval % 100)); 316 break; 317 318 case ZPOOL_PROP_HEALTH: 319 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 320 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 321 verify(nvlist_lookup_uint64_array(nvroot, 322 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 323 == 0); 324 325 (void) strlcpy(buf, zpool_state_to_name(intval, 326 vs->vs_aux), len); 327 break; 328 default: 329 (void) snprintf(buf, len, "%llu", intval); 330 } 331 break; 332 333 case PROP_TYPE_INDEX: 334 intval = zpool_get_prop_int(zhp, prop, &src); 335 if (zpool_prop_index_to_string(prop, intval, &strval) 336 != 0) 337 return (-1); 338 (void) strlcpy(buf, strval, len); 339 break; 340 341 default: 342 abort(); 343 } 344 345 if (srctype) 346 *srctype = src; 347 348 return (0); 349} 350 351/* 352 * Check if the bootfs name has the same pool name as it is set to. 353 * Assuming bootfs is a valid dataset name. 354 */ 355static boolean_t 356bootfs_name_valid(const char *pool, char *bootfs) 357{ 358 int len = strlen(pool); 359 360 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 361 return (B_FALSE); 362 363 if (strncmp(pool, bootfs, len) == 0 && 364 (bootfs[len] == '/' || bootfs[len] == '\0')) 365 return (B_TRUE); 366 367 return (B_FALSE); 368} 369 370/* 371 * Inspect the configuration to determine if any of the devices contain 372 * an EFI label. 373 */ 374static boolean_t 375pool_uses_efi(nvlist_t *config) 376{ 377#ifdef sun 378 nvlist_t **child; 379 uint_t c, children; 380 381 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 382 &child, &children) != 0) 383 return (read_efi_label(config, NULL) >= 0); 384 385 for (c = 0; c < children; c++) { 386 if (pool_uses_efi(child[c])) 387 return (B_TRUE); 388 } 389#endif /* sun */ 390 return (B_FALSE); 391} 392
| 305 (void) zfs_nicenum(intval, buf, len); 306 break; 307 308 case ZPOOL_PROP_CAPACITY: 309 (void) snprintf(buf, len, "%llu%%", 310 (u_longlong_t)intval); 311 break; 312 313 case ZPOOL_PROP_DEDUPRATIO: 314 (void) snprintf(buf, len, "%llu.%02llux", 315 (u_longlong_t)(intval / 100), 316 (u_longlong_t)(intval % 100)); 317 break; 318 319 case ZPOOL_PROP_HEALTH: 320 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 321 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 322 verify(nvlist_lookup_uint64_array(nvroot, 323 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 324 == 0); 325 326 (void) strlcpy(buf, zpool_state_to_name(intval, 327 vs->vs_aux), len); 328 break; 329 default: 330 (void) snprintf(buf, len, "%llu", intval); 331 } 332 break; 333 334 case PROP_TYPE_INDEX: 335 intval = zpool_get_prop_int(zhp, prop, &src); 336 if (zpool_prop_index_to_string(prop, intval, &strval) 337 != 0) 338 return (-1); 339 (void) strlcpy(buf, strval, len); 340 break; 341 342 default: 343 abort(); 344 } 345 346 if (srctype) 347 *srctype = src; 348 349 return (0); 350} 351 352/* 353 * Check if the bootfs name has the same pool name as it is set to. 354 * Assuming bootfs is a valid dataset name. 355 */ 356static boolean_t 357bootfs_name_valid(const char *pool, char *bootfs) 358{ 359 int len = strlen(pool); 360 361 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 362 return (B_FALSE); 363 364 if (strncmp(pool, bootfs, len) == 0 && 365 (bootfs[len] == '/' || bootfs[len] == '\0')) 366 return (B_TRUE); 367 368 return (B_FALSE); 369} 370 371/* 372 * Inspect the configuration to determine if any of the devices contain 373 * an EFI label. 374 */ 375static boolean_t 376pool_uses_efi(nvlist_t *config) 377{ 378#ifdef sun 379 nvlist_t **child; 380 uint_t c, children; 381 382 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 383 &child, &children) != 0) 384 return (read_efi_label(config, NULL) >= 0); 385 386 for (c = 0; c < children; c++) { 387 if (pool_uses_efi(child[c])) 388 return (B_TRUE); 389 } 390#endif /* sun */ 391 return (B_FALSE); 392} 393
|
393static boolean_t 394pool_is_bootable(zpool_handle_t *zhp)
| 394boolean_t 395zpool_is_bootable(zpool_handle_t *zhp)
|
395{ 396 char bootfs[ZPOOL_MAXNAMELEN]; 397 398 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 399 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 400 sizeof (bootfs)) != 0); 401} 402 403 404/* 405 * Given an nvlist of zpool properties to be set, validate that they are 406 * correct, and parse any numeric properties (index, boolean, etc) if they are 407 * specified as strings. 408 */ 409static nvlist_t * 410zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 411 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 412{ 413 nvpair_t *elem; 414 nvlist_t *retprops; 415 zpool_prop_t prop; 416 char *strval; 417 uint64_t intval; 418 char *slash, *check; 419 struct stat64 statbuf; 420 zpool_handle_t *zhp; 421 nvlist_t *nvroot; 422 423 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 424 (void) no_memory(hdl); 425 return (NULL); 426 } 427 428 elem = NULL; 429 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 430 const char *propname = nvpair_name(elem); 431 432 /* 433 * Make sure this property is valid and applies to this type. 434 */ 435 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 437 "invalid property '%s'"), propname); 438 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 439 goto error; 440 } 441 442 if (zpool_prop_readonly(prop)) { 443 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 444 "is readonly"), propname); 445 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 446 goto error; 447 } 448 449 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 450 &strval, &intval, errbuf) != 0) 451 goto error; 452 453 /* 454 * Perform additional checking for specific properties. 455 */ 456 switch (prop) { 457 case ZPOOL_PROP_VERSION: 458 if (intval < version || intval > SPA_VERSION) { 459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 460 "property '%s' number %d is invalid."), 461 propname, intval); 462 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 463 goto error; 464 } 465 break; 466 467 case ZPOOL_PROP_BOOTFS: 468 if (flags.create || flags.import) { 469 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 470 "property '%s' cannot be set at creation " 471 "or import time"), propname); 472 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 473 goto error; 474 } 475 476 if (version < SPA_VERSION_BOOTFS) { 477 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 478 "pool must be upgraded to support " 479 "'%s' property"), propname); 480 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 481 goto error; 482 } 483 484 /* 485 * bootfs property value has to be a dataset name and 486 * the dataset has to be in the same pool as it sets to. 487 */ 488 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 489 strval)) { 490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 491 "is an invalid name"), strval); 492 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 493 goto error; 494 } 495 496 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 498 "could not open pool '%s'"), poolname); 499 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 500 goto error; 501 } 502 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 503 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 504 505#ifdef sun 506 /* 507 * bootfs property cannot be set on a disk which has 508 * been EFI labeled. 509 */ 510 if (pool_uses_efi(nvroot)) { 511 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 512 "property '%s' not supported on " 513 "EFI labeled devices"), propname); 514 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 515 zpool_close(zhp); 516 goto error; 517 } 518#endif /* sun */ 519 zpool_close(zhp); 520 break; 521 522 case ZPOOL_PROP_ALTROOT: 523 if (!flags.create && !flags.import) { 524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 525 "property '%s' can only be set during pool " 526 "creation or import"), propname); 527 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 528 goto error; 529 } 530 531 if (strval[0] != '/') { 532 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 533 "bad alternate root '%s'"), strval); 534 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 535 goto error; 536 } 537 break; 538 539 case ZPOOL_PROP_CACHEFILE: 540 if (strval[0] == '\0') 541 break; 542 543 if (strcmp(strval, "none") == 0) 544 break; 545 546 if (strval[0] != '/') { 547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 548 "property '%s' must be empty, an " 549 "absolute path, or 'none'"), propname); 550 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 551 goto error; 552 } 553 554 slash = strrchr(strval, '/'); 555 556 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 557 strcmp(slash, "/..") == 0) { 558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 559 "'%s' is not a valid file"), strval); 560 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 561 goto error; 562 } 563 564 *slash = '\0'; 565 566 if (strval[0] != '\0' && 567 (stat64(strval, &statbuf) != 0 || 568 !S_ISDIR(statbuf.st_mode))) { 569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 570 "'%s' is not a valid directory"), 571 strval); 572 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 573 goto error; 574 } 575 576 *slash = '/'; 577 break; 578 579 case ZPOOL_PROP_COMMENT: 580 for (check = strval; *check != '\0'; check++) { 581 if (!isprint(*check)) { 582 zfs_error_aux(hdl, 583 dgettext(TEXT_DOMAIN, 584 "comment may only have printable " 585 "characters")); 586 (void) zfs_error(hdl, EZFS_BADPROP, 587 errbuf); 588 goto error; 589 } 590 } 591 if (strlen(strval) > ZPROP_MAX_COMMENT) { 592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 593 "comment must not exceed %d characters"), 594 ZPROP_MAX_COMMENT); 595 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 596 goto error; 597 } 598 break; 599 case ZPOOL_PROP_READONLY: 600 if (!flags.import) { 601 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 602 "property '%s' can only be set at " 603 "import time"), propname); 604 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 605 goto error; 606 } 607 break; 608 } 609 } 610 611 return (retprops); 612error: 613 nvlist_free(retprops); 614 return (NULL); 615} 616 617/* 618 * Set zpool property : propname=propval. 619 */ 620int 621zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 622{ 623 zfs_cmd_t zc = { 0 }; 624 int ret = -1; 625 char errbuf[1024]; 626 nvlist_t *nvl = NULL; 627 nvlist_t *realprops; 628 uint64_t version; 629 prop_flags_t flags = { 0 }; 630 631 (void) snprintf(errbuf, sizeof (errbuf), 632 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 633 zhp->zpool_name); 634 635 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 636 return (no_memory(zhp->zpool_hdl)); 637 638 if (nvlist_add_string(nvl, propname, propval) != 0) { 639 nvlist_free(nvl); 640 return (no_memory(zhp->zpool_hdl)); 641 } 642 643 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 644 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 645 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 646 nvlist_free(nvl); 647 return (-1); 648 } 649 650 nvlist_free(nvl); 651 nvl = realprops; 652 653 /* 654 * Execute the corresponding ioctl() to set this property. 655 */ 656 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 657 658 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 659 nvlist_free(nvl); 660 return (-1); 661 } 662 663 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 664 665 zcmd_free_nvlists(&zc); 666 nvlist_free(nvl); 667 668 if (ret) 669 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 670 else 671 (void) zpool_props_refresh(zhp); 672 673 return (ret); 674} 675 676int 677zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 678{ 679 libzfs_handle_t *hdl = zhp->zpool_hdl; 680 zprop_list_t *entry; 681 char buf[ZFS_MAXPROPLEN]; 682 683 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 684 return (-1); 685 686 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 687 688 if (entry->pl_fixed) 689 continue; 690 691 if (entry->pl_prop != ZPROP_INVAL && 692 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 693 NULL) == 0) { 694 if (strlen(buf) > entry->pl_width) 695 entry->pl_width = strlen(buf); 696 } 697 } 698 699 return (0); 700} 701 702 703/* 704 * Don't start the slice at the default block of 34; many storage 705 * devices will use a stripe width of 128k, so start there instead. 706 */ 707#define NEW_START_BLOCK 256 708 709/* 710 * Validate the given pool name, optionally putting an extended error message in 711 * 'buf'. 712 */ 713boolean_t 714zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 715{ 716 namecheck_err_t why; 717 char what; 718 int ret; 719 720 ret = pool_namecheck(pool, &why, &what); 721 722 /* 723 * The rules for reserved pool names were extended at a later point. 724 * But we need to support users with existing pools that may now be 725 * invalid. So we only check for this expanded set of names during a 726 * create (or import), and only in userland. 727 */ 728 if (ret == 0 && !isopen && 729 (strncmp(pool, "mirror", 6) == 0 || 730 strncmp(pool, "raidz", 5) == 0 || 731 strncmp(pool, "spare", 5) == 0 || 732 strcmp(pool, "log") == 0)) { 733 if (hdl != NULL) 734 zfs_error_aux(hdl, 735 dgettext(TEXT_DOMAIN, "name is reserved")); 736 return (B_FALSE); 737 } 738 739 740 if (ret != 0) { 741 if (hdl != NULL) { 742 switch (why) { 743 case NAME_ERR_TOOLONG: 744 zfs_error_aux(hdl, 745 dgettext(TEXT_DOMAIN, "name is too long")); 746 break; 747 748 case NAME_ERR_INVALCHAR: 749 zfs_error_aux(hdl, 750 dgettext(TEXT_DOMAIN, "invalid character " 751 "'%c' in pool name"), what); 752 break; 753 754 case NAME_ERR_NOLETTER: 755 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 756 "name must begin with a letter")); 757 break; 758 759 case NAME_ERR_RESERVED: 760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 761 "name is reserved")); 762 break; 763 764 case NAME_ERR_DISKLIKE: 765 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 766 "pool name is reserved")); 767 break; 768 769 case NAME_ERR_LEADING_SLASH: 770 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 771 "leading slash in name")); 772 break; 773 774 case NAME_ERR_EMPTY_COMPONENT: 775 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 776 "empty component in name")); 777 break; 778 779 case NAME_ERR_TRAILING_SLASH: 780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 781 "trailing slash in name")); 782 break; 783 784 case NAME_ERR_MULTIPLE_AT: 785 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 786 "multiple '@' delimiters in name")); 787 break; 788 789 } 790 } 791 return (B_FALSE); 792 } 793 794 return (B_TRUE); 795} 796 797/* 798 * Open a handle to the given pool, even if the pool is currently in the FAULTED 799 * state. 800 */ 801zpool_handle_t * 802zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 803{ 804 zpool_handle_t *zhp; 805 boolean_t missing; 806 807 /* 808 * Make sure the pool name is valid. 809 */ 810 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 811 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 812 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 813 pool); 814 return (NULL); 815 } 816 817 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 818 return (NULL); 819 820 zhp->zpool_hdl = hdl; 821 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 822 823 if (zpool_refresh_stats(zhp, &missing) != 0) { 824 zpool_close(zhp); 825 return (NULL); 826 } 827 828 if (missing) { 829 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 830 (void) zfs_error_fmt(hdl, EZFS_NOENT, 831 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 832 zpool_close(zhp); 833 return (NULL); 834 } 835 836 return (zhp); 837} 838 839/* 840 * Like the above, but silent on error. Used when iterating over pools (because 841 * the configuration cache may be out of date). 842 */ 843int 844zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 845{ 846 zpool_handle_t *zhp; 847 boolean_t missing; 848 849 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 850 return (-1); 851 852 zhp->zpool_hdl = hdl; 853 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 854 855 if (zpool_refresh_stats(zhp, &missing) != 0) { 856 zpool_close(zhp); 857 return (-1); 858 } 859 860 if (missing) { 861 zpool_close(zhp); 862 *ret = NULL; 863 return (0); 864 } 865 866 *ret = zhp; 867 return (0); 868} 869 870/* 871 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 872 * state. 873 */ 874zpool_handle_t * 875zpool_open(libzfs_handle_t *hdl, const char *pool) 876{ 877 zpool_handle_t *zhp; 878 879 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 880 return (NULL); 881 882 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 883 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 884 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 885 zpool_close(zhp); 886 return (NULL); 887 } 888 889 return (zhp); 890} 891 892/* 893 * Close the handle. Simply frees the memory associated with the handle. 894 */ 895void 896zpool_close(zpool_handle_t *zhp) 897{ 898 if (zhp->zpool_config) 899 nvlist_free(zhp->zpool_config); 900 if (zhp->zpool_old_config) 901 nvlist_free(zhp->zpool_old_config); 902 if (zhp->zpool_props) 903 nvlist_free(zhp->zpool_props); 904 free(zhp); 905} 906 907/* 908 * Return the name of the pool. 909 */ 910const char * 911zpool_get_name(zpool_handle_t *zhp) 912{ 913 return (zhp->zpool_name); 914} 915 916 917/* 918 * Return the state of the pool (ACTIVE or UNAVAILABLE) 919 */ 920int 921zpool_get_state(zpool_handle_t *zhp) 922{ 923 return (zhp->zpool_state); 924} 925 926/* 927 * Create the named pool, using the provided vdev list. It is assumed 928 * that the consumer has already validated the contents of the nvlist, so we 929 * don't have to worry about error semantics. 930 */ 931int 932zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 933 nvlist_t *props, nvlist_t *fsprops) 934{ 935 zfs_cmd_t zc = { 0 }; 936 nvlist_t *zc_fsprops = NULL; 937 nvlist_t *zc_props = NULL; 938 char msg[1024]; 939 char *altroot; 940 int ret = -1; 941 942 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 943 "cannot create '%s'"), pool); 944 945 if (!zpool_name_valid(hdl, B_FALSE, pool)) 946 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 947 948 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 949 return (-1); 950 951 if (props) { 952 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 953 954 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 955 SPA_VERSION_1, flags, msg)) == NULL) { 956 goto create_failed; 957 } 958 } 959 960 if (fsprops) { 961 uint64_t zoned; 962 char *zonestr; 963 964 zoned = ((nvlist_lookup_string(fsprops, 965 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 966 strcmp(zonestr, "on") == 0); 967 968 if ((zc_fsprops = zfs_valid_proplist(hdl, 969 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 970 goto create_failed; 971 } 972 if (!zc_props && 973 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 974 goto create_failed; 975 } 976 if (nvlist_add_nvlist(zc_props, 977 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 978 goto create_failed; 979 } 980 } 981 982 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 983 goto create_failed; 984 985 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 986 987 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 988 989 zcmd_free_nvlists(&zc); 990 nvlist_free(zc_props); 991 nvlist_free(zc_fsprops); 992 993 switch (errno) { 994 case EBUSY: 995 /* 996 * This can happen if the user has specified the same 997 * device multiple times. We can't reliably detect this 998 * until we try to add it and see we already have a 999 * label. 1000 */ 1001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1002 "one or more vdevs refer to the same device")); 1003 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1004 1005 case EOVERFLOW: 1006 /* 1007 * This occurs when one of the devices is below 1008 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1009 * device was the problem device since there's no 1010 * reliable way to determine device size from userland. 1011 */ 1012 { 1013 char buf[64]; 1014 1015 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1016 1017 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1018 "one or more devices is less than the " 1019 "minimum size (%s)"), buf); 1020 } 1021 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1022 1023 case ENOSPC: 1024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1025 "one or more devices is out of space")); 1026 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1027 1028 case ENOTBLK: 1029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1030 "cache device must be a disk or disk slice")); 1031 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1032 1033 default: 1034 return (zpool_standard_error(hdl, errno, msg)); 1035 } 1036 } 1037 1038 /* 1039 * If this is an alternate root pool, then we automatically set the 1040 * mountpoint of the root dataset to be '/'. 1041 */ 1042 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 1043 &altroot) == 0) { 1044 zfs_handle_t *zhp; 1045 1046 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 1047 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1048 "/") == 0); 1049 1050 zfs_close(zhp); 1051 } 1052 1053create_failed: 1054 zcmd_free_nvlists(&zc); 1055 nvlist_free(zc_props); 1056 nvlist_free(zc_fsprops); 1057 return (ret); 1058} 1059 1060/* 1061 * Destroy the given pool. It is up to the caller to ensure that there are no 1062 * datasets left in the pool. 1063 */ 1064int 1065zpool_destroy(zpool_handle_t *zhp) 1066{ 1067 zfs_cmd_t zc = { 0 }; 1068 zfs_handle_t *zfp = NULL; 1069 libzfs_handle_t *hdl = zhp->zpool_hdl; 1070 char msg[1024]; 1071 1072 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1073 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1074 return (-1); 1075 1076 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1077 1078 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1079 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1080 "cannot destroy '%s'"), zhp->zpool_name); 1081 1082 if (errno == EROFS) { 1083 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1084 "one or more devices is read only")); 1085 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1086 } else { 1087 (void) zpool_standard_error(hdl, errno, msg); 1088 } 1089 1090 if (zfp) 1091 zfs_close(zfp); 1092 return (-1); 1093 } 1094 1095 if (zfp) { 1096 remove_mountpoint(zfp); 1097 zfs_close(zfp); 1098 } 1099 1100 return (0); 1101} 1102 1103/* 1104 * Add the given vdevs to the pool. The caller must have already performed the 1105 * necessary verification to ensure that the vdev specification is well-formed. 1106 */ 1107int 1108zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1109{ 1110 zfs_cmd_t zc = { 0 }; 1111 int ret; 1112 libzfs_handle_t *hdl = zhp->zpool_hdl; 1113 char msg[1024]; 1114 nvlist_t **spares, **l2cache; 1115 uint_t nspares, nl2cache; 1116 1117 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1118 "cannot add to '%s'"), zhp->zpool_name); 1119 1120 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1121 SPA_VERSION_SPARES && 1122 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1123 &spares, &nspares) == 0) { 1124 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1125 "upgraded to add hot spares")); 1126 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1127 } 1128
| 396{ 397 char bootfs[ZPOOL_MAXNAMELEN]; 398 399 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 400 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", 401 sizeof (bootfs)) != 0); 402} 403 404 405/* 406 * Given an nvlist of zpool properties to be set, validate that they are 407 * correct, and parse any numeric properties (index, boolean, etc) if they are 408 * specified as strings. 409 */ 410static nvlist_t * 411zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 412 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 413{ 414 nvpair_t *elem; 415 nvlist_t *retprops; 416 zpool_prop_t prop; 417 char *strval; 418 uint64_t intval; 419 char *slash, *check; 420 struct stat64 statbuf; 421 zpool_handle_t *zhp; 422 nvlist_t *nvroot; 423 424 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 425 (void) no_memory(hdl); 426 return (NULL); 427 } 428 429 elem = NULL; 430 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 431 const char *propname = nvpair_name(elem); 432 433 /* 434 * Make sure this property is valid and applies to this type. 435 */ 436 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { 437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 438 "invalid property '%s'"), propname); 439 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 440 goto error; 441 } 442 443 if (zpool_prop_readonly(prop)) { 444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 445 "is readonly"), propname); 446 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 447 goto error; 448 } 449 450 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 451 &strval, &intval, errbuf) != 0) 452 goto error; 453 454 /* 455 * Perform additional checking for specific properties. 456 */ 457 switch (prop) { 458 case ZPOOL_PROP_VERSION: 459 if (intval < version || intval > SPA_VERSION) { 460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 461 "property '%s' number %d is invalid."), 462 propname, intval); 463 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 464 goto error; 465 } 466 break; 467 468 case ZPOOL_PROP_BOOTFS: 469 if (flags.create || flags.import) { 470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 471 "property '%s' cannot be set at creation " 472 "or import time"), propname); 473 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 474 goto error; 475 } 476 477 if (version < SPA_VERSION_BOOTFS) { 478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 479 "pool must be upgraded to support " 480 "'%s' property"), propname); 481 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 482 goto error; 483 } 484 485 /* 486 * bootfs property value has to be a dataset name and 487 * the dataset has to be in the same pool as it sets to. 488 */ 489 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 490 strval)) { 491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 492 "is an invalid name"), strval); 493 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 494 goto error; 495 } 496 497 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 499 "could not open pool '%s'"), poolname); 500 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 501 goto error; 502 } 503 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 504 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 505 506#ifdef sun 507 /* 508 * bootfs property cannot be set on a disk which has 509 * been EFI labeled. 510 */ 511 if (pool_uses_efi(nvroot)) { 512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 513 "property '%s' not supported on " 514 "EFI labeled devices"), propname); 515 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 516 zpool_close(zhp); 517 goto error; 518 } 519#endif /* sun */ 520 zpool_close(zhp); 521 break; 522 523 case ZPOOL_PROP_ALTROOT: 524 if (!flags.create && !flags.import) { 525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 526 "property '%s' can only be set during pool " 527 "creation or import"), propname); 528 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 529 goto error; 530 } 531 532 if (strval[0] != '/') { 533 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 534 "bad alternate root '%s'"), strval); 535 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 536 goto error; 537 } 538 break; 539 540 case ZPOOL_PROP_CACHEFILE: 541 if (strval[0] == '\0') 542 break; 543 544 if (strcmp(strval, "none") == 0) 545 break; 546 547 if (strval[0] != '/') { 548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 549 "property '%s' must be empty, an " 550 "absolute path, or 'none'"), propname); 551 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 552 goto error; 553 } 554 555 slash = strrchr(strval, '/'); 556 557 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 558 strcmp(slash, "/..") == 0) { 559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 560 "'%s' is not a valid file"), strval); 561 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 562 goto error; 563 } 564 565 *slash = '\0'; 566 567 if (strval[0] != '\0' && 568 (stat64(strval, &statbuf) != 0 || 569 !S_ISDIR(statbuf.st_mode))) { 570 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 571 "'%s' is not a valid directory"), 572 strval); 573 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 574 goto error; 575 } 576 577 *slash = '/'; 578 break; 579 580 case ZPOOL_PROP_COMMENT: 581 for (check = strval; *check != '\0'; check++) { 582 if (!isprint(*check)) { 583 zfs_error_aux(hdl, 584 dgettext(TEXT_DOMAIN, 585 "comment may only have printable " 586 "characters")); 587 (void) zfs_error(hdl, EZFS_BADPROP, 588 errbuf); 589 goto error; 590 } 591 } 592 if (strlen(strval) > ZPROP_MAX_COMMENT) { 593 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 594 "comment must not exceed %d characters"), 595 ZPROP_MAX_COMMENT); 596 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 597 goto error; 598 } 599 break; 600 case ZPOOL_PROP_READONLY: 601 if (!flags.import) { 602 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 603 "property '%s' can only be set at " 604 "import time"), propname); 605 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 606 goto error; 607 } 608 break; 609 } 610 } 611 612 return (retprops); 613error: 614 nvlist_free(retprops); 615 return (NULL); 616} 617 618/* 619 * Set zpool property : propname=propval. 620 */ 621int 622zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 623{ 624 zfs_cmd_t zc = { 0 }; 625 int ret = -1; 626 char errbuf[1024]; 627 nvlist_t *nvl = NULL; 628 nvlist_t *realprops; 629 uint64_t version; 630 prop_flags_t flags = { 0 }; 631 632 (void) snprintf(errbuf, sizeof (errbuf), 633 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 634 zhp->zpool_name); 635 636 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 637 return (no_memory(zhp->zpool_hdl)); 638 639 if (nvlist_add_string(nvl, propname, propval) != 0) { 640 nvlist_free(nvl); 641 return (no_memory(zhp->zpool_hdl)); 642 } 643 644 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 645 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 646 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 647 nvlist_free(nvl); 648 return (-1); 649 } 650 651 nvlist_free(nvl); 652 nvl = realprops; 653 654 /* 655 * Execute the corresponding ioctl() to set this property. 656 */ 657 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 658 659 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 660 nvlist_free(nvl); 661 return (-1); 662 } 663 664 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 665 666 zcmd_free_nvlists(&zc); 667 nvlist_free(nvl); 668 669 if (ret) 670 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 671 else 672 (void) zpool_props_refresh(zhp); 673 674 return (ret); 675} 676 677int 678zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 679{ 680 libzfs_handle_t *hdl = zhp->zpool_hdl; 681 zprop_list_t *entry; 682 char buf[ZFS_MAXPROPLEN]; 683 684 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 685 return (-1); 686 687 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 688 689 if (entry->pl_fixed) 690 continue; 691 692 if (entry->pl_prop != ZPROP_INVAL && 693 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 694 NULL) == 0) { 695 if (strlen(buf) > entry->pl_width) 696 entry->pl_width = strlen(buf); 697 } 698 } 699 700 return (0); 701} 702 703 704/* 705 * Don't start the slice at the default block of 34; many storage 706 * devices will use a stripe width of 128k, so start there instead. 707 */ 708#define NEW_START_BLOCK 256 709 710/* 711 * Validate the given pool name, optionally putting an extended error message in 712 * 'buf'. 713 */ 714boolean_t 715zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 716{ 717 namecheck_err_t why; 718 char what; 719 int ret; 720 721 ret = pool_namecheck(pool, &why, &what); 722 723 /* 724 * The rules for reserved pool names were extended at a later point. 725 * But we need to support users with existing pools that may now be 726 * invalid. So we only check for this expanded set of names during a 727 * create (or import), and only in userland. 728 */ 729 if (ret == 0 && !isopen && 730 (strncmp(pool, "mirror", 6) == 0 || 731 strncmp(pool, "raidz", 5) == 0 || 732 strncmp(pool, "spare", 5) == 0 || 733 strcmp(pool, "log") == 0)) { 734 if (hdl != NULL) 735 zfs_error_aux(hdl, 736 dgettext(TEXT_DOMAIN, "name is reserved")); 737 return (B_FALSE); 738 } 739 740 741 if (ret != 0) { 742 if (hdl != NULL) { 743 switch (why) { 744 case NAME_ERR_TOOLONG: 745 zfs_error_aux(hdl, 746 dgettext(TEXT_DOMAIN, "name is too long")); 747 break; 748 749 case NAME_ERR_INVALCHAR: 750 zfs_error_aux(hdl, 751 dgettext(TEXT_DOMAIN, "invalid character " 752 "'%c' in pool name"), what); 753 break; 754 755 case NAME_ERR_NOLETTER: 756 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 757 "name must begin with a letter")); 758 break; 759 760 case NAME_ERR_RESERVED: 761 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 762 "name is reserved")); 763 break; 764 765 case NAME_ERR_DISKLIKE: 766 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 767 "pool name is reserved")); 768 break; 769 770 case NAME_ERR_LEADING_SLASH: 771 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 772 "leading slash in name")); 773 break; 774 775 case NAME_ERR_EMPTY_COMPONENT: 776 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 777 "empty component in name")); 778 break; 779 780 case NAME_ERR_TRAILING_SLASH: 781 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 782 "trailing slash in name")); 783 break; 784 785 case NAME_ERR_MULTIPLE_AT: 786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 787 "multiple '@' delimiters in name")); 788 break; 789 790 } 791 } 792 return (B_FALSE); 793 } 794 795 return (B_TRUE); 796} 797 798/* 799 * Open a handle to the given pool, even if the pool is currently in the FAULTED 800 * state. 801 */ 802zpool_handle_t * 803zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 804{ 805 zpool_handle_t *zhp; 806 boolean_t missing; 807 808 /* 809 * Make sure the pool name is valid. 810 */ 811 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 812 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 813 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 814 pool); 815 return (NULL); 816 } 817 818 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 819 return (NULL); 820 821 zhp->zpool_hdl = hdl; 822 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 823 824 if (zpool_refresh_stats(zhp, &missing) != 0) { 825 zpool_close(zhp); 826 return (NULL); 827 } 828 829 if (missing) { 830 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 831 (void) zfs_error_fmt(hdl, EZFS_NOENT, 832 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 833 zpool_close(zhp); 834 return (NULL); 835 } 836 837 return (zhp); 838} 839 840/* 841 * Like the above, but silent on error. Used when iterating over pools (because 842 * the configuration cache may be out of date). 843 */ 844int 845zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 846{ 847 zpool_handle_t *zhp; 848 boolean_t missing; 849 850 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 851 return (-1); 852 853 zhp->zpool_hdl = hdl; 854 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 855 856 if (zpool_refresh_stats(zhp, &missing) != 0) { 857 zpool_close(zhp); 858 return (-1); 859 } 860 861 if (missing) { 862 zpool_close(zhp); 863 *ret = NULL; 864 return (0); 865 } 866 867 *ret = zhp; 868 return (0); 869} 870 871/* 872 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 873 * state. 874 */ 875zpool_handle_t * 876zpool_open(libzfs_handle_t *hdl, const char *pool) 877{ 878 zpool_handle_t *zhp; 879 880 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 881 return (NULL); 882 883 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 884 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 885 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 886 zpool_close(zhp); 887 return (NULL); 888 } 889 890 return (zhp); 891} 892 893/* 894 * Close the handle. Simply frees the memory associated with the handle. 895 */ 896void 897zpool_close(zpool_handle_t *zhp) 898{ 899 if (zhp->zpool_config) 900 nvlist_free(zhp->zpool_config); 901 if (zhp->zpool_old_config) 902 nvlist_free(zhp->zpool_old_config); 903 if (zhp->zpool_props) 904 nvlist_free(zhp->zpool_props); 905 free(zhp); 906} 907 908/* 909 * Return the name of the pool. 910 */ 911const char * 912zpool_get_name(zpool_handle_t *zhp) 913{ 914 return (zhp->zpool_name); 915} 916 917 918/* 919 * Return the state of the pool (ACTIVE or UNAVAILABLE) 920 */ 921int 922zpool_get_state(zpool_handle_t *zhp) 923{ 924 return (zhp->zpool_state); 925} 926 927/* 928 * Create the named pool, using the provided vdev list. It is assumed 929 * that the consumer has already validated the contents of the nvlist, so we 930 * don't have to worry about error semantics. 931 */ 932int 933zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 934 nvlist_t *props, nvlist_t *fsprops) 935{ 936 zfs_cmd_t zc = { 0 }; 937 nvlist_t *zc_fsprops = NULL; 938 nvlist_t *zc_props = NULL; 939 char msg[1024]; 940 char *altroot; 941 int ret = -1; 942 943 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 944 "cannot create '%s'"), pool); 945 946 if (!zpool_name_valid(hdl, B_FALSE, pool)) 947 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 948 949 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 950 return (-1); 951 952 if (props) { 953 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 954 955 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 956 SPA_VERSION_1, flags, msg)) == NULL) { 957 goto create_failed; 958 } 959 } 960 961 if (fsprops) { 962 uint64_t zoned; 963 char *zonestr; 964 965 zoned = ((nvlist_lookup_string(fsprops, 966 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 967 strcmp(zonestr, "on") == 0); 968 969 if ((zc_fsprops = zfs_valid_proplist(hdl, 970 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 971 goto create_failed; 972 } 973 if (!zc_props && 974 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 975 goto create_failed; 976 } 977 if (nvlist_add_nvlist(zc_props, 978 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 979 goto create_failed; 980 } 981 } 982 983 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 984 goto create_failed; 985 986 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 987 988 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 989 990 zcmd_free_nvlists(&zc); 991 nvlist_free(zc_props); 992 nvlist_free(zc_fsprops); 993 994 switch (errno) { 995 case EBUSY: 996 /* 997 * This can happen if the user has specified the same 998 * device multiple times. We can't reliably detect this 999 * until we try to add it and see we already have a 1000 * label. 1001 */ 1002 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1003 "one or more vdevs refer to the same device")); 1004 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1005 1006 case EOVERFLOW: 1007 /* 1008 * This occurs when one of the devices is below 1009 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1010 * device was the problem device since there's no 1011 * reliable way to determine device size from userland. 1012 */ 1013 { 1014 char buf[64]; 1015 1016 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1017 1018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1019 "one or more devices is less than the " 1020 "minimum size (%s)"), buf); 1021 } 1022 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1023 1024 case ENOSPC: 1025 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1026 "one or more devices is out of space")); 1027 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1028 1029 case ENOTBLK: 1030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1031 "cache device must be a disk or disk slice")); 1032 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1033 1034 default: 1035 return (zpool_standard_error(hdl, errno, msg)); 1036 } 1037 } 1038 1039 /* 1040 * If this is an alternate root pool, then we automatically set the 1041 * mountpoint of the root dataset to be '/'. 1042 */ 1043 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), 1044 &altroot) == 0) { 1045 zfs_handle_t *zhp; 1046 1047 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); 1048 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1049 "/") == 0); 1050 1051 zfs_close(zhp); 1052 } 1053 1054create_failed: 1055 zcmd_free_nvlists(&zc); 1056 nvlist_free(zc_props); 1057 nvlist_free(zc_fsprops); 1058 return (ret); 1059} 1060 1061/* 1062 * Destroy the given pool. It is up to the caller to ensure that there are no 1063 * datasets left in the pool. 1064 */ 1065int 1066zpool_destroy(zpool_handle_t *zhp) 1067{ 1068 zfs_cmd_t zc = { 0 }; 1069 zfs_handle_t *zfp = NULL; 1070 libzfs_handle_t *hdl = zhp->zpool_hdl; 1071 char msg[1024]; 1072 1073 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1074 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1075 return (-1); 1076 1077 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1078 1079 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1080 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1081 "cannot destroy '%s'"), zhp->zpool_name); 1082 1083 if (errno == EROFS) { 1084 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1085 "one or more devices is read only")); 1086 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1087 } else { 1088 (void) zpool_standard_error(hdl, errno, msg); 1089 } 1090 1091 if (zfp) 1092 zfs_close(zfp); 1093 return (-1); 1094 } 1095 1096 if (zfp) { 1097 remove_mountpoint(zfp); 1098 zfs_close(zfp); 1099 } 1100 1101 return (0); 1102} 1103 1104/* 1105 * Add the given vdevs to the pool. The caller must have already performed the 1106 * necessary verification to ensure that the vdev specification is well-formed. 1107 */ 1108int 1109zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1110{ 1111 zfs_cmd_t zc = { 0 }; 1112 int ret; 1113 libzfs_handle_t *hdl = zhp->zpool_hdl; 1114 char msg[1024]; 1115 nvlist_t **spares, **l2cache; 1116 uint_t nspares, nl2cache; 1117 1118 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1119 "cannot add to '%s'"), zhp->zpool_name); 1120 1121 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1122 SPA_VERSION_SPARES && 1123 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1124 &spares, &nspares) == 0) { 1125 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1126 "upgraded to add hot spares")); 1127 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1128 } 1129
|
1129 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
| 1130 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
|
1130 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1131 uint64_t s; 1132 1133 for (s = 0; s < nspares; s++) { 1134 char *path; 1135 1136 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1137 &path) == 0 && pool_uses_efi(spares[s])) { 1138 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1139 "device '%s' contains an EFI label and " 1140 "cannot be used on root pools."), 1141 zpool_vdev_name(hdl, NULL, spares[s], 1142 B_FALSE)); 1143 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1144 } 1145 } 1146 } 1147 1148 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1149 SPA_VERSION_L2CACHE && 1150 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1151 &l2cache, &nl2cache) == 0) { 1152 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1153 "upgraded to add cache devices")); 1154 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1155 } 1156 1157 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1158 return (-1); 1159 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1160 1161 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1162 switch (errno) { 1163 case EBUSY: 1164 /* 1165 * This can happen if the user has specified the same 1166 * device multiple times. We can't reliably detect this 1167 * until we try to add it and see we already have a 1168 * label. 1169 */ 1170 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1171 "one or more vdevs refer to the same device")); 1172 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1173 break; 1174 1175 case EOVERFLOW: 1176 /* 1177 * This occurrs when one of the devices is below 1178 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1179 * device was the problem device since there's no 1180 * reliable way to determine device size from userland. 1181 */ 1182 { 1183 char buf[64]; 1184 1185 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1186 1187 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1188 "device is less than the minimum " 1189 "size (%s)"), buf); 1190 } 1191 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1192 break; 1193 1194 case ENOTSUP: 1195 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1196 "pool must be upgraded to add these vdevs")); 1197 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1198 break; 1199 1200 case EDOM: 1201 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1202 "root pool can not have multiple vdevs" 1203 " or separate logs")); 1204 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1205 break; 1206 1207 case ENOTBLK: 1208 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1209 "cache device must be a disk or disk slice")); 1210 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1211 break; 1212 1213 default: 1214 (void) zpool_standard_error(hdl, errno, msg); 1215 } 1216 1217 ret = -1; 1218 } else { 1219 ret = 0; 1220 } 1221 1222 zcmd_free_nvlists(&zc); 1223 1224 return (ret); 1225} 1226 1227/* 1228 * Exports the pool from the system. The caller must ensure that there are no 1229 * mounted datasets in the pool. 1230 */ 1231int 1232zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1233{ 1234 zfs_cmd_t zc = { 0 }; 1235 char msg[1024]; 1236 1237 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1238 "cannot export '%s'"), zhp->zpool_name); 1239 1240 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1241 zc.zc_cookie = force; 1242 zc.zc_guid = hardforce; 1243 1244 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1245 switch (errno) { 1246 case EXDEV: 1247 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1248 "use '-f' to override the following errors:\n" 1249 "'%s' has an active shared spare which could be" 1250 " used by other pools once '%s' is exported."), 1251 zhp->zpool_name, zhp->zpool_name); 1252 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1253 msg)); 1254 default: 1255 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1256 msg)); 1257 } 1258 } 1259 1260 return (0); 1261} 1262 1263int 1264zpool_export(zpool_handle_t *zhp, boolean_t force) 1265{ 1266 return (zpool_export_common(zhp, force, B_FALSE)); 1267} 1268 1269int 1270zpool_export_force(zpool_handle_t *zhp) 1271{ 1272 return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1273} 1274 1275static void 1276zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1277 nvlist_t *config) 1278{ 1279 nvlist_t *nv = NULL; 1280 uint64_t rewindto; 1281 int64_t loss = -1; 1282 struct tm t; 1283 char timestr[128]; 1284 1285 if (!hdl->libzfs_printerr || config == NULL) 1286 return; 1287 1288 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0) 1289 return; 1290 1291 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1292 return; 1293 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1294 1295 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1296 strftime(timestr, 128, 0, &t) != 0) { 1297 if (dryrun) { 1298 (void) printf(dgettext(TEXT_DOMAIN, 1299 "Would be able to return %s " 1300 "to its state as of %s.\n"), 1301 name, timestr); 1302 } else { 1303 (void) printf(dgettext(TEXT_DOMAIN, 1304 "Pool %s returned to its state as of %s.\n"), 1305 name, timestr); 1306 } 1307 if (loss > 120) { 1308 (void) printf(dgettext(TEXT_DOMAIN, 1309 "%s approximately %lld "), 1310 dryrun ? "Would discard" : "Discarded", 1311 (loss + 30) / 60); 1312 (void) printf(dgettext(TEXT_DOMAIN, 1313 "minutes of transactions.\n")); 1314 } else if (loss > 0) { 1315 (void) printf(dgettext(TEXT_DOMAIN, 1316 "%s approximately %lld "), 1317 dryrun ? "Would discard" : "Discarded", loss); 1318 (void) printf(dgettext(TEXT_DOMAIN, 1319 "seconds of transactions.\n")); 1320 } 1321 } 1322} 1323 1324void 1325zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1326 nvlist_t *config) 1327{ 1328 nvlist_t *nv = NULL; 1329 int64_t loss = -1; 1330 uint64_t edata = UINT64_MAX; 1331 uint64_t rewindto; 1332 struct tm t; 1333 char timestr[128]; 1334 1335 if (!hdl->libzfs_printerr) 1336 return; 1337 1338 if (reason >= 0) 1339 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1340 else 1341 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1342 1343 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1344 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1345 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1346 goto no_info; 1347 1348 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1349 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1350 &edata); 1351 1352 (void) printf(dgettext(TEXT_DOMAIN, 1353 "Recovery is possible, but will result in some data loss.\n")); 1354 1355 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1356 strftime(timestr, 128, 0, &t) != 0) { 1357 (void) printf(dgettext(TEXT_DOMAIN, 1358 "\tReturning the pool to its state as of %s\n" 1359 "\tshould correct the problem. "), 1360 timestr); 1361 } else { 1362 (void) printf(dgettext(TEXT_DOMAIN, 1363 "\tReverting the pool to an earlier state " 1364 "should correct the problem.\n\t")); 1365 } 1366 1367 if (loss > 120) { 1368 (void) printf(dgettext(TEXT_DOMAIN, 1369 "Approximately %lld minutes of data\n" 1370 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1371 } else if (loss > 0) { 1372 (void) printf(dgettext(TEXT_DOMAIN, 1373 "Approximately %lld seconds of data\n" 1374 "\tmust be discarded, irreversibly. "), loss); 1375 } 1376 if (edata != 0 && edata != UINT64_MAX) { 1377 if (edata == 1) { 1378 (void) printf(dgettext(TEXT_DOMAIN, 1379 "After rewind, at least\n" 1380 "\tone persistent user-data error will remain. ")); 1381 } else { 1382 (void) printf(dgettext(TEXT_DOMAIN, 1383 "After rewind, several\n" 1384 "\tpersistent user-data errors will remain. ")); 1385 } 1386 } 1387 (void) printf(dgettext(TEXT_DOMAIN, 1388 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1389 reason >= 0 ? "clear" : "import", name); 1390 1391 (void) printf(dgettext(TEXT_DOMAIN, 1392 "A scrub of the pool\n" 1393 "\tis strongly recommended after recovery.\n")); 1394 return; 1395 1396no_info: 1397 (void) printf(dgettext(TEXT_DOMAIN, 1398 "Destroy and re-create the pool from\n\ta backup source.\n")); 1399} 1400 1401/* 1402 * zpool_import() is a contracted interface. Should be kept the same 1403 * if possible. 1404 * 1405 * Applications should use zpool_import_props() to import a pool with 1406 * new properties value to be set. 1407 */ 1408int 1409zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1410 char *altroot) 1411{ 1412 nvlist_t *props = NULL; 1413 int ret; 1414 1415 if (altroot != NULL) { 1416 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1417 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1418 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1419 newname)); 1420 } 1421 1422 if (nvlist_add_string(props, 1423 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1424 nvlist_add_string(props, 1425 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1426 nvlist_free(props); 1427 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1428 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1429 newname)); 1430 } 1431 } 1432 1433 ret = zpool_import_props(hdl, config, newname, props, 1434 ZFS_IMPORT_NORMAL); 1435 if (props) 1436 nvlist_free(props); 1437 return (ret); 1438} 1439 1440static void 1441print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1442 int indent) 1443{ 1444 nvlist_t **child; 1445 uint_t c, children; 1446 char *vname; 1447 uint64_t is_log = 0; 1448 1449 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1450 &is_log); 1451 1452 if (name != NULL) 1453 (void) printf("\t%*s%s%s\n", indent, "", name, 1454 is_log ? " [log]" : ""); 1455 1456 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1457 &child, &children) != 0) 1458 return; 1459 1460 for (c = 0; c < children; c++) { 1461 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1462 print_vdev_tree(hdl, vname, child[c], indent + 2); 1463 free(vname); 1464 } 1465} 1466 1467/* 1468 * Import the given pool using the known configuration and a list of 1469 * properties to be set. The configuration should have come from 1470 * zpool_find_import(). The 'newname' parameters control whether the pool 1471 * is imported with a different name. 1472 */ 1473int 1474zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1475 nvlist_t *props, int flags) 1476{ 1477 zfs_cmd_t zc = { 0 }; 1478 zpool_rewind_policy_t policy; 1479 nvlist_t *nv = NULL; 1480 nvlist_t *nvinfo = NULL; 1481 nvlist_t *missing = NULL; 1482 char *thename; 1483 char *origname; 1484 int ret; 1485 int error = 0; 1486 char errbuf[1024]; 1487 1488 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1489 &origname) == 0); 1490 1491 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1492 "cannot import pool '%s'"), origname); 1493 1494 if (newname != NULL) { 1495 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1496 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1497 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1498 newname)); 1499 thename = (char *)newname; 1500 } else { 1501 thename = origname; 1502 } 1503 1504 if (props) { 1505 uint64_t version; 1506 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1507 1508 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1509 &version) == 0); 1510 1511 if ((props = zpool_valid_proplist(hdl, origname, 1512 props, version, flags, errbuf)) == NULL) { 1513 return (-1); 1514 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1515 nvlist_free(props); 1516 return (-1); 1517 } 1518 } 1519 1520 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1521 1522 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1523 &zc.zc_guid) == 0); 1524 1525 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1526 nvlist_free(props); 1527 return (-1); 1528 } 1529 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1530 nvlist_free(props); 1531 return (-1); 1532 } 1533 1534 zc.zc_cookie = flags; 1535 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1536 errno == ENOMEM) { 1537 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1538 zcmd_free_nvlists(&zc); 1539 return (-1); 1540 } 1541 } 1542 if (ret != 0) 1543 error = errno; 1544 1545 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1546 zpool_get_rewind_policy(config, &policy); 1547 1548 if (error) { 1549 char desc[1024]; 1550 1551 /* 1552 * Dry-run failed, but we print out what success 1553 * looks like if we found a best txg 1554 */ 1555 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1556 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1557 B_TRUE, nv); 1558 nvlist_free(nv); 1559 return (-1); 1560 } 1561 1562 if (newname == NULL) 1563 (void) snprintf(desc, sizeof (desc), 1564 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1565 thename); 1566 else 1567 (void) snprintf(desc, sizeof (desc), 1568 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1569 origname, thename); 1570 1571 switch (error) { 1572 case ENOTSUP: 1573 /* 1574 * Unsupported version. 1575 */ 1576 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1577 break; 1578 1579 case EINVAL: 1580 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1581 break; 1582 1583 case EROFS: 1584 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1585 "one or more devices is read only")); 1586 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1587 break; 1588 1589 case ENXIO: 1590 if (nv && nvlist_lookup_nvlist(nv, 1591 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1592 nvlist_lookup_nvlist(nvinfo, 1593 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1594 (void) printf(dgettext(TEXT_DOMAIN, 1595 "The devices below are missing, use " 1596 "'-m' to import the pool anyway:\n")); 1597 print_vdev_tree(hdl, NULL, missing, 2); 1598 (void) printf("\n"); 1599 } 1600 (void) zpool_standard_error(hdl, error, desc); 1601 break; 1602 1603 case EEXIST: 1604 (void) zpool_standard_error(hdl, error, desc); 1605 break; 1606 1607 default: 1608 (void) zpool_standard_error(hdl, error, desc); 1609 zpool_explain_recover(hdl, 1610 newname ? origname : thename, -error, nv); 1611 break; 1612 } 1613 1614 nvlist_free(nv); 1615 ret = -1; 1616 } else { 1617 zpool_handle_t *zhp; 1618 1619 /* 1620 * This should never fail, but play it safe anyway. 1621 */ 1622 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1623 ret = -1; 1624 else if (zhp != NULL) 1625 zpool_close(zhp); 1626 if (policy.zrp_request & 1627 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1628 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1629 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1630 } 1631 nvlist_free(nv); 1632 return (0); 1633 } 1634 1635 zcmd_free_nvlists(&zc); 1636 nvlist_free(props); 1637 1638 return (ret); 1639} 1640 1641/* 1642 * Scan the pool. 1643 */ 1644int 1645zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1646{ 1647 zfs_cmd_t zc = { 0 }; 1648 char msg[1024]; 1649 libzfs_handle_t *hdl = zhp->zpool_hdl; 1650 1651 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1652 zc.zc_cookie = func; 1653 1654 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1655 (errno == ENOENT && func != POOL_SCAN_NONE)) 1656 return (0); 1657 1658 if (func == POOL_SCAN_SCRUB) { 1659 (void) snprintf(msg, sizeof (msg), 1660 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1661 } else if (func == POOL_SCAN_NONE) { 1662 (void) snprintf(msg, sizeof (msg), 1663 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1664 zc.zc_name); 1665 } else { 1666 assert(!"unexpected result"); 1667 } 1668 1669 if (errno == EBUSY) { 1670 nvlist_t *nvroot; 1671 pool_scan_stat_t *ps = NULL; 1672 uint_t psc; 1673 1674 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1675 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1676 (void) nvlist_lookup_uint64_array(nvroot, 1677 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1678 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1679 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1680 else 1681 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1682 } else if (errno == ENOENT) { 1683 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1684 } else { 1685 return (zpool_standard_error(hdl, errno, msg)); 1686 } 1687} 1688 1689/* 1690 * This provides a very minimal check whether a given string is likely a 1691 * c#t#d# style string. Users of this are expected to do their own 1692 * verification of the s# part. 1693 */ 1694#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1695 1696/* 1697 * More elaborate version for ones which may start with "/dev/dsk/" 1698 * and the like. 1699 */ 1700static int 1701ctd_check_path(char *str) { 1702 /* 1703 * If it starts with a slash, check the last component. 1704 */ 1705 if (str && str[0] == '/') { 1706 char *tmp = strrchr(str, '/'); 1707 1708 /* 1709 * If it ends in "/old", check the second-to-last 1710 * component of the string instead. 1711 */ 1712 if (tmp != str && strcmp(tmp, "/old") == 0) { 1713 for (tmp--; *tmp != '/'; tmp--) 1714 ; 1715 } 1716 str = tmp + 1; 1717 } 1718 return (CTD_CHECK(str)); 1719} 1720 1721/* 1722 * Find a vdev that matches the search criteria specified. We use the 1723 * the nvpair name to determine how we should look for the device. 1724 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1725 * spare; but FALSE if its an INUSE spare. 1726 */ 1727static nvlist_t * 1728vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1729 boolean_t *l2cache, boolean_t *log) 1730{ 1731 uint_t c, children; 1732 nvlist_t **child; 1733 nvlist_t *ret; 1734 uint64_t is_log; 1735 char *srchkey; 1736 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1737 1738 /* Nothing to look for */ 1739 if (search == NULL || pair == NULL) 1740 return (NULL); 1741 1742 /* Obtain the key we will use to search */ 1743 srchkey = nvpair_name(pair); 1744 1745 switch (nvpair_type(pair)) { 1746 case DATA_TYPE_UINT64: 1747 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1748 uint64_t srchval, theguid; 1749 1750 verify(nvpair_value_uint64(pair, &srchval) == 0); 1751 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1752 &theguid) == 0); 1753 if (theguid == srchval) 1754 return (nv); 1755 } 1756 break; 1757 1758 case DATA_TYPE_STRING: { 1759 char *srchval, *val; 1760 1761 verify(nvpair_value_string(pair, &srchval) == 0); 1762 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1763 break; 1764 1765 /* 1766 * Search for the requested value. Special cases: 1767 * 1768 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1769 * "s0" or "s0/old". The "s0" part is hidden from the user, 1770 * but included in the string, so this matches around it. 1771 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1772 * 1773 * Otherwise, all other searches are simple string compares. 1774 */ 1775 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1776 ctd_check_path(val)) { 1777 uint64_t wholedisk = 0; 1778 1779 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1780 &wholedisk); 1781 if (wholedisk) { 1782 int slen = strlen(srchval); 1783 int vlen = strlen(val); 1784 1785 if (slen != vlen - 2) 1786 break; 1787 1788 /* 1789 * make_leaf_vdev() should only set 1790 * wholedisk for ZPOOL_CONFIG_PATHs which 1791 * will include "/dev/dsk/", giving plenty of 1792 * room for the indices used next. 1793 */ 1794 ASSERT(vlen >= 6); 1795 1796 /* 1797 * strings identical except trailing "s0" 1798 */ 1799 if (strcmp(&val[vlen - 2], "s0") == 0 && 1800 strncmp(srchval, val, slen) == 0) 1801 return (nv); 1802 1803 /* 1804 * strings identical except trailing "s0/old" 1805 */ 1806 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1807 strcmp(&srchval[slen - 4], "/old") == 0 && 1808 strncmp(srchval, val, slen - 4) == 0) 1809 return (nv); 1810 1811 break; 1812 } 1813 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1814 char *type, *idx, *end, *p; 1815 uint64_t id, vdev_id; 1816 1817 /* 1818 * Determine our vdev type, keeping in mind 1819 * that the srchval is composed of a type and 1820 * vdev id pair (i.e. mirror-4). 1821 */ 1822 if ((type = strdup(srchval)) == NULL) 1823 return (NULL); 1824 1825 if ((p = strrchr(type, '-')) == NULL) { 1826 free(type); 1827 break; 1828 } 1829 idx = p + 1; 1830 *p = '\0'; 1831 1832 /* 1833 * If the types don't match then keep looking. 1834 */ 1835 if (strncmp(val, type, strlen(val)) != 0) { 1836 free(type); 1837 break; 1838 } 1839 1840 verify(strncmp(type, VDEV_TYPE_RAIDZ, 1841 strlen(VDEV_TYPE_RAIDZ)) == 0 || 1842 strncmp(type, VDEV_TYPE_MIRROR, 1843 strlen(VDEV_TYPE_MIRROR)) == 0); 1844 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 1845 &id) == 0); 1846 1847 errno = 0; 1848 vdev_id = strtoull(idx, &end, 10); 1849 1850 free(type); 1851 if (errno != 0) 1852 return (NULL); 1853 1854 /* 1855 * Now verify that we have the correct vdev id. 1856 */ 1857 if (vdev_id == id) 1858 return (nv); 1859 } 1860 1861 /* 1862 * Common case 1863 */ 1864 if (strcmp(srchval, val) == 0) 1865 return (nv); 1866 break; 1867 } 1868 1869 default: 1870 break; 1871 } 1872 1873 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1874 &child, &children) != 0) 1875 return (NULL); 1876 1877 for (c = 0; c < children; c++) { 1878 if ((ret = vdev_to_nvlist_iter(child[c], search, 1879 avail_spare, l2cache, NULL)) != NULL) { 1880 /* 1881 * The 'is_log' value is only set for the toplevel 1882 * vdev, not the leaf vdevs. So we always lookup the 1883 * log device from the root of the vdev tree (where 1884 * 'log' is non-NULL). 1885 */ 1886 if (log != NULL && 1887 nvlist_lookup_uint64(child[c], 1888 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1889 is_log) { 1890 *log = B_TRUE; 1891 } 1892 return (ret); 1893 } 1894 } 1895 1896 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1897 &child, &children) == 0) { 1898 for (c = 0; c < children; c++) { 1899 if ((ret = vdev_to_nvlist_iter(child[c], search, 1900 avail_spare, l2cache, NULL)) != NULL) { 1901 *avail_spare = B_TRUE; 1902 return (ret); 1903 } 1904 } 1905 } 1906 1907 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1908 &child, &children) == 0) { 1909 for (c = 0; c < children; c++) { 1910 if ((ret = vdev_to_nvlist_iter(child[c], search, 1911 avail_spare, l2cache, NULL)) != NULL) { 1912 *l2cache = B_TRUE; 1913 return (ret); 1914 } 1915 } 1916 } 1917 1918 return (NULL); 1919} 1920 1921/* 1922 * Given a physical path (minus the "/devices" prefix), find the 1923 * associated vdev. 1924 */ 1925nvlist_t * 1926zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 1927 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1928{ 1929 nvlist_t *search, *nvroot, *ret; 1930 1931 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1932 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 1933 1934 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1935 &nvroot) == 0); 1936 1937 *avail_spare = B_FALSE; 1938 *l2cache = B_FALSE; 1939 if (log != NULL) 1940 *log = B_FALSE; 1941 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1942 nvlist_free(search); 1943 1944 return (ret); 1945} 1946 1947/* 1948 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 1949 */ 1950boolean_t 1951zpool_vdev_is_interior(const char *name) 1952{ 1953 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 1954 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 1955 return (B_TRUE); 1956 return (B_FALSE); 1957} 1958 1959nvlist_t * 1960zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1961 boolean_t *l2cache, boolean_t *log) 1962{ 1963 char buf[MAXPATHLEN]; 1964 char *end; 1965 nvlist_t *nvroot, *search, *ret; 1966 uint64_t guid; 1967 1968 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1969 1970 guid = strtoull(path, &end, 10); 1971 if (guid != 0 && *end == '\0') { 1972 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 1973 } else if (zpool_vdev_is_interior(path)) { 1974 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 1975 } else if (path[0] != '/') { 1976 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 1977 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 1978 } else { 1979 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 1980 } 1981 1982 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1983 &nvroot) == 0); 1984 1985 *avail_spare = B_FALSE; 1986 *l2cache = B_FALSE; 1987 if (log != NULL) 1988 *log = B_FALSE; 1989 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1990 nvlist_free(search); 1991 1992 return (ret); 1993} 1994 1995static int 1996vdev_online(nvlist_t *nv) 1997{ 1998 uint64_t ival; 1999 2000 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2001 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2002 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2003 return (0); 2004 2005 return (1); 2006} 2007 2008/* 2009 * Helper function for zpool_get_physpaths(). 2010 */ 2011static int 2012vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2013 size_t *bytes_written) 2014{ 2015 size_t bytes_left, pos, rsz; 2016 char *tmppath; 2017 const char *format; 2018 2019 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2020 &tmppath) != 0) 2021 return (EZFS_NODEVICE); 2022 2023 pos = *bytes_written; 2024 bytes_left = physpath_size - pos; 2025 format = (pos == 0) ? "%s" : " %s"; 2026 2027 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2028 *bytes_written += rsz; 2029 2030 if (rsz >= bytes_left) { 2031 /* if physpath was not copied properly, clear it */ 2032 if (bytes_left != 0) { 2033 physpath[pos] = 0; 2034 } 2035 return (EZFS_NOSPC); 2036 } 2037 return (0); 2038} 2039 2040static int 2041vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2042 size_t *rsz, boolean_t is_spare) 2043{ 2044 char *type; 2045 int ret; 2046 2047 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2048 return (EZFS_INVALCONFIG); 2049 2050 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2051 /* 2052 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2053 * For a spare vdev, we only want to boot from the active 2054 * spare device. 2055 */ 2056 if (is_spare) { 2057 uint64_t spare = 0; 2058 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2059 &spare); 2060 if (!spare) 2061 return (EZFS_INVALCONFIG); 2062 } 2063 2064 if (vdev_online(nv)) { 2065 if ((ret = vdev_get_one_physpath(nv, physpath, 2066 phypath_size, rsz)) != 0) 2067 return (ret); 2068 } 2069 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2070 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2071 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2072 nvlist_t **child; 2073 uint_t count; 2074 int i, ret; 2075 2076 if (nvlist_lookup_nvlist_array(nv, 2077 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2078 return (EZFS_INVALCONFIG); 2079 2080 for (i = 0; i < count; i++) { 2081 ret = vdev_get_physpaths(child[i], physpath, 2082 phypath_size, rsz, is_spare); 2083 if (ret == EZFS_NOSPC) 2084 return (ret); 2085 } 2086 } 2087 2088 return (EZFS_POOL_INVALARG); 2089} 2090 2091/* 2092 * Get phys_path for a root pool config. 2093 * Return 0 on success; non-zero on failure. 2094 */ 2095static int 2096zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2097{ 2098 size_t rsz; 2099 nvlist_t *vdev_root; 2100 nvlist_t **child; 2101 uint_t count; 2102 char *type; 2103 2104 rsz = 0; 2105 2106 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2107 &vdev_root) != 0) 2108 return (EZFS_INVALCONFIG); 2109 2110 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2111 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2112 &child, &count) != 0) 2113 return (EZFS_INVALCONFIG); 2114 2115 /* 2116 * root pool can not have EFI labeled disks and can only have 2117 * a single top-level vdev. 2118 */ 2119 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2120 pool_uses_efi(vdev_root)) 2121 return (EZFS_POOL_INVALARG); 2122 2123 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2124 B_FALSE); 2125 2126 /* No online devices */ 2127 if (rsz == 0) 2128 return (EZFS_NODEVICE); 2129 2130 return (0); 2131} 2132 2133/* 2134 * Get phys_path for a root pool 2135 * Return 0 on success; non-zero on failure. 2136 */ 2137int 2138zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2139{ 2140 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2141 phypath_size)); 2142} 2143 2144/* 2145 * If the device has being dynamically expanded then we need to relabel 2146 * the disk to use the new unallocated space. 2147 */ 2148static int 2149zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2150{ 2151#ifdef sun 2152 char path[MAXPATHLEN]; 2153 char errbuf[1024]; 2154 int fd, error; 2155 int (*_efi_use_whole_disk)(int); 2156 2157 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2158 "efi_use_whole_disk")) == NULL) 2159 return (-1); 2160 2161 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2162 2163 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2164 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2165 "relabel '%s': unable to open device"), name); 2166 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2167 } 2168 2169 /* 2170 * It's possible that we might encounter an error if the device 2171 * does not have any unallocated space left. If so, we simply 2172 * ignore that error and continue on. 2173 */ 2174 error = _efi_use_whole_disk(fd); 2175 (void) close(fd); 2176 if (error && error != VT_ENOSPC) { 2177 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2178 "relabel '%s': unable to read disk capacity"), name); 2179 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2180 } 2181#endif /* sun */ 2182 return (0); 2183} 2184 2185/* 2186 * Bring the specified vdev online. The 'flags' parameter is a set of the 2187 * ZFS_ONLINE_* flags. 2188 */ 2189int 2190zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2191 vdev_state_t *newstate) 2192{ 2193 zfs_cmd_t zc = { 0 }; 2194 char msg[1024]; 2195 nvlist_t *tgt; 2196 boolean_t avail_spare, l2cache, islog; 2197 libzfs_handle_t *hdl = zhp->zpool_hdl; 2198 2199 if (flags & ZFS_ONLINE_EXPAND) { 2200 (void) snprintf(msg, sizeof (msg), 2201 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2202 } else { 2203 (void) snprintf(msg, sizeof (msg), 2204 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2205 } 2206 2207 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2208 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2209 &islog)) == NULL) 2210 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2211 2212 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2213 2214 if (avail_spare) 2215 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2216 2217 if (flags & ZFS_ONLINE_EXPAND || 2218 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2219 char *pathname = NULL; 2220 uint64_t wholedisk = 0; 2221 2222 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2223 &wholedisk); 2224 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2225 &pathname) == 0); 2226 2227 /* 2228 * XXX - L2ARC 1.0 devices can't support expansion. 2229 */ 2230 if (l2cache) { 2231 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2232 "cannot expand cache devices")); 2233 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2234 } 2235 2236 if (wholedisk) { 2237 pathname += strlen(DISK_ROOT) + 1; 2238 (void) zpool_relabel_disk(hdl, pathname); 2239 } 2240 } 2241 2242 zc.zc_cookie = VDEV_STATE_ONLINE; 2243 zc.zc_obj = flags; 2244 2245 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2246 if (errno == EINVAL) { 2247 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2248 "from this pool into a new one. Use '%s' " 2249 "instead"), "zpool detach"); 2250 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2251 } 2252 return (zpool_standard_error(hdl, errno, msg)); 2253 } 2254 2255 *newstate = zc.zc_cookie; 2256 return (0); 2257} 2258 2259/* 2260 * Take the specified vdev offline 2261 */ 2262int 2263zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2264{ 2265 zfs_cmd_t zc = { 0 }; 2266 char msg[1024]; 2267 nvlist_t *tgt; 2268 boolean_t avail_spare, l2cache; 2269 libzfs_handle_t *hdl = zhp->zpool_hdl; 2270 2271 (void) snprintf(msg, sizeof (msg), 2272 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2273 2274 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2275 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2276 NULL)) == NULL) 2277 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2278 2279 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2280 2281 if (avail_spare) 2282 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2283 2284 zc.zc_cookie = VDEV_STATE_OFFLINE; 2285 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2286 2287 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2288 return (0); 2289 2290 switch (errno) { 2291 case EBUSY: 2292 2293 /* 2294 * There are no other replicas of this device. 2295 */ 2296 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2297 2298 case EEXIST: 2299 /* 2300 * The log device has unplayed logs 2301 */ 2302 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2303 2304 default: 2305 return (zpool_standard_error(hdl, errno, msg)); 2306 } 2307} 2308 2309/* 2310 * Mark the given vdev faulted. 2311 */ 2312int 2313zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2314{ 2315 zfs_cmd_t zc = { 0 }; 2316 char msg[1024]; 2317 libzfs_handle_t *hdl = zhp->zpool_hdl; 2318 2319 (void) snprintf(msg, sizeof (msg), 2320 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2321 2322 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2323 zc.zc_guid = guid; 2324 zc.zc_cookie = VDEV_STATE_FAULTED; 2325 zc.zc_obj = aux; 2326 2327 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2328 return (0); 2329 2330 switch (errno) { 2331 case EBUSY: 2332 2333 /* 2334 * There are no other replicas of this device. 2335 */ 2336 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2337 2338 default: 2339 return (zpool_standard_error(hdl, errno, msg)); 2340 } 2341 2342} 2343 2344/* 2345 * Mark the given vdev degraded. 2346 */ 2347int 2348zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2349{ 2350 zfs_cmd_t zc = { 0 }; 2351 char msg[1024]; 2352 libzfs_handle_t *hdl = zhp->zpool_hdl; 2353 2354 (void) snprintf(msg, sizeof (msg), 2355 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2356 2357 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2358 zc.zc_guid = guid; 2359 zc.zc_cookie = VDEV_STATE_DEGRADED; 2360 zc.zc_obj = aux; 2361 2362 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2363 return (0); 2364 2365 return (zpool_standard_error(hdl, errno, msg)); 2366} 2367 2368/* 2369 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2370 * a hot spare. 2371 */ 2372static boolean_t 2373is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2374{ 2375 nvlist_t **child; 2376 uint_t c, children; 2377 char *type; 2378 2379 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2380 &children) == 0) { 2381 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2382 &type) == 0); 2383 2384 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2385 children == 2 && child[which] == tgt) 2386 return (B_TRUE); 2387 2388 for (c = 0; c < children; c++) 2389 if (is_replacing_spare(child[c], tgt, which)) 2390 return (B_TRUE); 2391 } 2392 2393 return (B_FALSE); 2394} 2395 2396/* 2397 * Attach new_disk (fully described by nvroot) to old_disk. 2398 * If 'replacing' is specified, the new disk will replace the old one. 2399 */ 2400int 2401zpool_vdev_attach(zpool_handle_t *zhp, 2402 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2403{ 2404 zfs_cmd_t zc = { 0 }; 2405 char msg[1024]; 2406 int ret; 2407 nvlist_t *tgt; 2408 boolean_t avail_spare, l2cache, islog; 2409 uint64_t val; 2410 char *newname; 2411 nvlist_t **child; 2412 uint_t children; 2413 nvlist_t *config_root; 2414 libzfs_handle_t *hdl = zhp->zpool_hdl;
| 1131 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1132 uint64_t s; 1133 1134 for (s = 0; s < nspares; s++) { 1135 char *path; 1136 1137 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1138 &path) == 0 && pool_uses_efi(spares[s])) { 1139 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1140 "device '%s' contains an EFI label and " 1141 "cannot be used on root pools."), 1142 zpool_vdev_name(hdl, NULL, spares[s], 1143 B_FALSE)); 1144 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1145 } 1146 } 1147 } 1148 1149 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1150 SPA_VERSION_L2CACHE && 1151 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1152 &l2cache, &nl2cache) == 0) { 1153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1154 "upgraded to add cache devices")); 1155 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1156 } 1157 1158 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1159 return (-1); 1160 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1161 1162 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1163 switch (errno) { 1164 case EBUSY: 1165 /* 1166 * This can happen if the user has specified the same 1167 * device multiple times. We can't reliably detect this 1168 * until we try to add it and see we already have a 1169 * label. 1170 */ 1171 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1172 "one or more vdevs refer to the same device")); 1173 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1174 break; 1175 1176 case EOVERFLOW: 1177 /* 1178 * This occurrs when one of the devices is below 1179 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1180 * device was the problem device since there's no 1181 * reliable way to determine device size from userland. 1182 */ 1183 { 1184 char buf[64]; 1185 1186 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1187 1188 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1189 "device is less than the minimum " 1190 "size (%s)"), buf); 1191 } 1192 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1193 break; 1194 1195 case ENOTSUP: 1196 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1197 "pool must be upgraded to add these vdevs")); 1198 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1199 break; 1200 1201 case EDOM: 1202 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1203 "root pool can not have multiple vdevs" 1204 " or separate logs")); 1205 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1206 break; 1207 1208 case ENOTBLK: 1209 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1210 "cache device must be a disk or disk slice")); 1211 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1212 break; 1213 1214 default: 1215 (void) zpool_standard_error(hdl, errno, msg); 1216 } 1217 1218 ret = -1; 1219 } else { 1220 ret = 0; 1221 } 1222 1223 zcmd_free_nvlists(&zc); 1224 1225 return (ret); 1226} 1227 1228/* 1229 * Exports the pool from the system. The caller must ensure that there are no 1230 * mounted datasets in the pool. 1231 */ 1232int 1233zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) 1234{ 1235 zfs_cmd_t zc = { 0 }; 1236 char msg[1024]; 1237 1238 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1239 "cannot export '%s'"), zhp->zpool_name); 1240 1241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1242 zc.zc_cookie = force; 1243 zc.zc_guid = hardforce; 1244 1245 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1246 switch (errno) { 1247 case EXDEV: 1248 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1249 "use '-f' to override the following errors:\n" 1250 "'%s' has an active shared spare which could be" 1251 " used by other pools once '%s' is exported."), 1252 zhp->zpool_name, zhp->zpool_name); 1253 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1254 msg)); 1255 default: 1256 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1257 msg)); 1258 } 1259 } 1260 1261 return (0); 1262} 1263 1264int 1265zpool_export(zpool_handle_t *zhp, boolean_t force) 1266{ 1267 return (zpool_export_common(zhp, force, B_FALSE)); 1268} 1269 1270int 1271zpool_export_force(zpool_handle_t *zhp) 1272{ 1273 return (zpool_export_common(zhp, B_TRUE, B_TRUE)); 1274} 1275 1276static void 1277zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1278 nvlist_t *config) 1279{ 1280 nvlist_t *nv = NULL; 1281 uint64_t rewindto; 1282 int64_t loss = -1; 1283 struct tm t; 1284 char timestr[128]; 1285 1286 if (!hdl->libzfs_printerr || config == NULL) 1287 return; 1288 1289 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0) 1290 return; 1291 1292 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1293 return; 1294 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1295 1296 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1297 strftime(timestr, 128, 0, &t) != 0) { 1298 if (dryrun) { 1299 (void) printf(dgettext(TEXT_DOMAIN, 1300 "Would be able to return %s " 1301 "to its state as of %s.\n"), 1302 name, timestr); 1303 } else { 1304 (void) printf(dgettext(TEXT_DOMAIN, 1305 "Pool %s returned to its state as of %s.\n"), 1306 name, timestr); 1307 } 1308 if (loss > 120) { 1309 (void) printf(dgettext(TEXT_DOMAIN, 1310 "%s approximately %lld "), 1311 dryrun ? "Would discard" : "Discarded", 1312 (loss + 30) / 60); 1313 (void) printf(dgettext(TEXT_DOMAIN, 1314 "minutes of transactions.\n")); 1315 } else if (loss > 0) { 1316 (void) printf(dgettext(TEXT_DOMAIN, 1317 "%s approximately %lld "), 1318 dryrun ? "Would discard" : "Discarded", loss); 1319 (void) printf(dgettext(TEXT_DOMAIN, 1320 "seconds of transactions.\n")); 1321 } 1322 } 1323} 1324 1325void 1326zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1327 nvlist_t *config) 1328{ 1329 nvlist_t *nv = NULL; 1330 int64_t loss = -1; 1331 uint64_t edata = UINT64_MAX; 1332 uint64_t rewindto; 1333 struct tm t; 1334 char timestr[128]; 1335 1336 if (!hdl->libzfs_printerr) 1337 return; 1338 1339 if (reason >= 0) 1340 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1341 else 1342 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1343 1344 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1345 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1346 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1347 goto no_info; 1348 1349 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1350 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1351 &edata); 1352 1353 (void) printf(dgettext(TEXT_DOMAIN, 1354 "Recovery is possible, but will result in some data loss.\n")); 1355 1356 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1357 strftime(timestr, 128, 0, &t) != 0) { 1358 (void) printf(dgettext(TEXT_DOMAIN, 1359 "\tReturning the pool to its state as of %s\n" 1360 "\tshould correct the problem. "), 1361 timestr); 1362 } else { 1363 (void) printf(dgettext(TEXT_DOMAIN, 1364 "\tReverting the pool to an earlier state " 1365 "should correct the problem.\n\t")); 1366 } 1367 1368 if (loss > 120) { 1369 (void) printf(dgettext(TEXT_DOMAIN, 1370 "Approximately %lld minutes of data\n" 1371 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1372 } else if (loss > 0) { 1373 (void) printf(dgettext(TEXT_DOMAIN, 1374 "Approximately %lld seconds of data\n" 1375 "\tmust be discarded, irreversibly. "), loss); 1376 } 1377 if (edata != 0 && edata != UINT64_MAX) { 1378 if (edata == 1) { 1379 (void) printf(dgettext(TEXT_DOMAIN, 1380 "After rewind, at least\n" 1381 "\tone persistent user-data error will remain. ")); 1382 } else { 1383 (void) printf(dgettext(TEXT_DOMAIN, 1384 "After rewind, several\n" 1385 "\tpersistent user-data errors will remain. ")); 1386 } 1387 } 1388 (void) printf(dgettext(TEXT_DOMAIN, 1389 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1390 reason >= 0 ? "clear" : "import", name); 1391 1392 (void) printf(dgettext(TEXT_DOMAIN, 1393 "A scrub of the pool\n" 1394 "\tis strongly recommended after recovery.\n")); 1395 return; 1396 1397no_info: 1398 (void) printf(dgettext(TEXT_DOMAIN, 1399 "Destroy and re-create the pool from\n\ta backup source.\n")); 1400} 1401 1402/* 1403 * zpool_import() is a contracted interface. Should be kept the same 1404 * if possible. 1405 * 1406 * Applications should use zpool_import_props() to import a pool with 1407 * new properties value to be set. 1408 */ 1409int 1410zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1411 char *altroot) 1412{ 1413 nvlist_t *props = NULL; 1414 int ret; 1415 1416 if (altroot != NULL) { 1417 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1418 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1419 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1420 newname)); 1421 } 1422 1423 if (nvlist_add_string(props, 1424 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1425 nvlist_add_string(props, 1426 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1427 nvlist_free(props); 1428 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1429 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1430 newname)); 1431 } 1432 } 1433 1434 ret = zpool_import_props(hdl, config, newname, props, 1435 ZFS_IMPORT_NORMAL); 1436 if (props) 1437 nvlist_free(props); 1438 return (ret); 1439} 1440 1441static void 1442print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1443 int indent) 1444{ 1445 nvlist_t **child; 1446 uint_t c, children; 1447 char *vname; 1448 uint64_t is_log = 0; 1449 1450 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1451 &is_log); 1452 1453 if (name != NULL) 1454 (void) printf("\t%*s%s%s\n", indent, "", name, 1455 is_log ? " [log]" : ""); 1456 1457 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1458 &child, &children) != 0) 1459 return; 1460 1461 for (c = 0; c < children; c++) { 1462 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1463 print_vdev_tree(hdl, vname, child[c], indent + 2); 1464 free(vname); 1465 } 1466} 1467 1468/* 1469 * Import the given pool using the known configuration and a list of 1470 * properties to be set. The configuration should have come from 1471 * zpool_find_import(). The 'newname' parameters control whether the pool 1472 * is imported with a different name. 1473 */ 1474int 1475zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1476 nvlist_t *props, int flags) 1477{ 1478 zfs_cmd_t zc = { 0 }; 1479 zpool_rewind_policy_t policy; 1480 nvlist_t *nv = NULL; 1481 nvlist_t *nvinfo = NULL; 1482 nvlist_t *missing = NULL; 1483 char *thename; 1484 char *origname; 1485 int ret; 1486 int error = 0; 1487 char errbuf[1024]; 1488 1489 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1490 &origname) == 0); 1491 1492 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1493 "cannot import pool '%s'"), origname); 1494 1495 if (newname != NULL) { 1496 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1497 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1498 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1499 newname)); 1500 thename = (char *)newname; 1501 } else { 1502 thename = origname; 1503 } 1504 1505 if (props) { 1506 uint64_t version; 1507 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1508 1509 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1510 &version) == 0); 1511 1512 if ((props = zpool_valid_proplist(hdl, origname, 1513 props, version, flags, errbuf)) == NULL) { 1514 return (-1); 1515 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1516 nvlist_free(props); 1517 return (-1); 1518 } 1519 } 1520 1521 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1522 1523 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1524 &zc.zc_guid) == 0); 1525 1526 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1527 nvlist_free(props); 1528 return (-1); 1529 } 1530 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1531 nvlist_free(props); 1532 return (-1); 1533 } 1534 1535 zc.zc_cookie = flags; 1536 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1537 errno == ENOMEM) { 1538 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1539 zcmd_free_nvlists(&zc); 1540 return (-1); 1541 } 1542 } 1543 if (ret != 0) 1544 error = errno; 1545 1546 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1547 zpool_get_rewind_policy(config, &policy); 1548 1549 if (error) { 1550 char desc[1024]; 1551 1552 /* 1553 * Dry-run failed, but we print out what success 1554 * looks like if we found a best txg 1555 */ 1556 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1557 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1558 B_TRUE, nv); 1559 nvlist_free(nv); 1560 return (-1); 1561 } 1562 1563 if (newname == NULL) 1564 (void) snprintf(desc, sizeof (desc), 1565 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1566 thename); 1567 else 1568 (void) snprintf(desc, sizeof (desc), 1569 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1570 origname, thename); 1571 1572 switch (error) { 1573 case ENOTSUP: 1574 /* 1575 * Unsupported version. 1576 */ 1577 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1578 break; 1579 1580 case EINVAL: 1581 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1582 break; 1583 1584 case EROFS: 1585 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1586 "one or more devices is read only")); 1587 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1588 break; 1589 1590 case ENXIO: 1591 if (nv && nvlist_lookup_nvlist(nv, 1592 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1593 nvlist_lookup_nvlist(nvinfo, 1594 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1595 (void) printf(dgettext(TEXT_DOMAIN, 1596 "The devices below are missing, use " 1597 "'-m' to import the pool anyway:\n")); 1598 print_vdev_tree(hdl, NULL, missing, 2); 1599 (void) printf("\n"); 1600 } 1601 (void) zpool_standard_error(hdl, error, desc); 1602 break; 1603 1604 case EEXIST: 1605 (void) zpool_standard_error(hdl, error, desc); 1606 break; 1607 1608 default: 1609 (void) zpool_standard_error(hdl, error, desc); 1610 zpool_explain_recover(hdl, 1611 newname ? origname : thename, -error, nv); 1612 break; 1613 } 1614 1615 nvlist_free(nv); 1616 ret = -1; 1617 } else { 1618 zpool_handle_t *zhp; 1619 1620 /* 1621 * This should never fail, but play it safe anyway. 1622 */ 1623 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1624 ret = -1; 1625 else if (zhp != NULL) 1626 zpool_close(zhp); 1627 if (policy.zrp_request & 1628 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1629 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1630 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1631 } 1632 nvlist_free(nv); 1633 return (0); 1634 } 1635 1636 zcmd_free_nvlists(&zc); 1637 nvlist_free(props); 1638 1639 return (ret); 1640} 1641 1642/* 1643 * Scan the pool. 1644 */ 1645int 1646zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1647{ 1648 zfs_cmd_t zc = { 0 }; 1649 char msg[1024]; 1650 libzfs_handle_t *hdl = zhp->zpool_hdl; 1651 1652 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1653 zc.zc_cookie = func; 1654 1655 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1656 (errno == ENOENT && func != POOL_SCAN_NONE)) 1657 return (0); 1658 1659 if (func == POOL_SCAN_SCRUB) { 1660 (void) snprintf(msg, sizeof (msg), 1661 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1662 } else if (func == POOL_SCAN_NONE) { 1663 (void) snprintf(msg, sizeof (msg), 1664 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1665 zc.zc_name); 1666 } else { 1667 assert(!"unexpected result"); 1668 } 1669 1670 if (errno == EBUSY) { 1671 nvlist_t *nvroot; 1672 pool_scan_stat_t *ps = NULL; 1673 uint_t psc; 1674 1675 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1676 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1677 (void) nvlist_lookup_uint64_array(nvroot, 1678 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1679 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1680 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1681 else 1682 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1683 } else if (errno == ENOENT) { 1684 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1685 } else { 1686 return (zpool_standard_error(hdl, errno, msg)); 1687 } 1688} 1689 1690/* 1691 * This provides a very minimal check whether a given string is likely a 1692 * c#t#d# style string. Users of this are expected to do their own 1693 * verification of the s# part. 1694 */ 1695#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1696 1697/* 1698 * More elaborate version for ones which may start with "/dev/dsk/" 1699 * and the like. 1700 */ 1701static int 1702ctd_check_path(char *str) { 1703 /* 1704 * If it starts with a slash, check the last component. 1705 */ 1706 if (str && str[0] == '/') { 1707 char *tmp = strrchr(str, '/'); 1708 1709 /* 1710 * If it ends in "/old", check the second-to-last 1711 * component of the string instead. 1712 */ 1713 if (tmp != str && strcmp(tmp, "/old") == 0) { 1714 for (tmp--; *tmp != '/'; tmp--) 1715 ; 1716 } 1717 str = tmp + 1; 1718 } 1719 return (CTD_CHECK(str)); 1720} 1721 1722/* 1723 * Find a vdev that matches the search criteria specified. We use the 1724 * the nvpair name to determine how we should look for the device. 1725 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1726 * spare; but FALSE if its an INUSE spare. 1727 */ 1728static nvlist_t * 1729vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1730 boolean_t *l2cache, boolean_t *log) 1731{ 1732 uint_t c, children; 1733 nvlist_t **child; 1734 nvlist_t *ret; 1735 uint64_t is_log; 1736 char *srchkey; 1737 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1738 1739 /* Nothing to look for */ 1740 if (search == NULL || pair == NULL) 1741 return (NULL); 1742 1743 /* Obtain the key we will use to search */ 1744 srchkey = nvpair_name(pair); 1745 1746 switch (nvpair_type(pair)) { 1747 case DATA_TYPE_UINT64: 1748 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1749 uint64_t srchval, theguid; 1750 1751 verify(nvpair_value_uint64(pair, &srchval) == 0); 1752 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1753 &theguid) == 0); 1754 if (theguid == srchval) 1755 return (nv); 1756 } 1757 break; 1758 1759 case DATA_TYPE_STRING: { 1760 char *srchval, *val; 1761 1762 verify(nvpair_value_string(pair, &srchval) == 0); 1763 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1764 break; 1765 1766 /* 1767 * Search for the requested value. Special cases: 1768 * 1769 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1770 * "s0" or "s0/old". The "s0" part is hidden from the user, 1771 * but included in the string, so this matches around it. 1772 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1773 * 1774 * Otherwise, all other searches are simple string compares. 1775 */ 1776 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1777 ctd_check_path(val)) { 1778 uint64_t wholedisk = 0; 1779 1780 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1781 &wholedisk); 1782 if (wholedisk) { 1783 int slen = strlen(srchval); 1784 int vlen = strlen(val); 1785 1786 if (slen != vlen - 2) 1787 break; 1788 1789 /* 1790 * make_leaf_vdev() should only set 1791 * wholedisk for ZPOOL_CONFIG_PATHs which 1792 * will include "/dev/dsk/", giving plenty of 1793 * room for the indices used next. 1794 */ 1795 ASSERT(vlen >= 6); 1796 1797 /* 1798 * strings identical except trailing "s0" 1799 */ 1800 if (strcmp(&val[vlen - 2], "s0") == 0 && 1801 strncmp(srchval, val, slen) == 0) 1802 return (nv); 1803 1804 /* 1805 * strings identical except trailing "s0/old" 1806 */ 1807 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 1808 strcmp(&srchval[slen - 4], "/old") == 0 && 1809 strncmp(srchval, val, slen - 4) == 0) 1810 return (nv); 1811 1812 break; 1813 } 1814 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 1815 char *type, *idx, *end, *p; 1816 uint64_t id, vdev_id; 1817 1818 /* 1819 * Determine our vdev type, keeping in mind 1820 * that the srchval is composed of a type and 1821 * vdev id pair (i.e. mirror-4). 1822 */ 1823 if ((type = strdup(srchval)) == NULL) 1824 return (NULL); 1825 1826 if ((p = strrchr(type, '-')) == NULL) { 1827 free(type); 1828 break; 1829 } 1830 idx = p + 1; 1831 *p = '\0'; 1832 1833 /* 1834 * If the types don't match then keep looking. 1835 */ 1836 if (strncmp(val, type, strlen(val)) != 0) { 1837 free(type); 1838 break; 1839 } 1840 1841 verify(strncmp(type, VDEV_TYPE_RAIDZ, 1842 strlen(VDEV_TYPE_RAIDZ)) == 0 || 1843 strncmp(type, VDEV_TYPE_MIRROR, 1844 strlen(VDEV_TYPE_MIRROR)) == 0); 1845 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 1846 &id) == 0); 1847 1848 errno = 0; 1849 vdev_id = strtoull(idx, &end, 10); 1850 1851 free(type); 1852 if (errno != 0) 1853 return (NULL); 1854 1855 /* 1856 * Now verify that we have the correct vdev id. 1857 */ 1858 if (vdev_id == id) 1859 return (nv); 1860 } 1861 1862 /* 1863 * Common case 1864 */ 1865 if (strcmp(srchval, val) == 0) 1866 return (nv); 1867 break; 1868 } 1869 1870 default: 1871 break; 1872 } 1873 1874 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1875 &child, &children) != 0) 1876 return (NULL); 1877 1878 for (c = 0; c < children; c++) { 1879 if ((ret = vdev_to_nvlist_iter(child[c], search, 1880 avail_spare, l2cache, NULL)) != NULL) { 1881 /* 1882 * The 'is_log' value is only set for the toplevel 1883 * vdev, not the leaf vdevs. So we always lookup the 1884 * log device from the root of the vdev tree (where 1885 * 'log' is non-NULL). 1886 */ 1887 if (log != NULL && 1888 nvlist_lookup_uint64(child[c], 1889 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 1890 is_log) { 1891 *log = B_TRUE; 1892 } 1893 return (ret); 1894 } 1895 } 1896 1897 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 1898 &child, &children) == 0) { 1899 for (c = 0; c < children; c++) { 1900 if ((ret = vdev_to_nvlist_iter(child[c], search, 1901 avail_spare, l2cache, NULL)) != NULL) { 1902 *avail_spare = B_TRUE; 1903 return (ret); 1904 } 1905 } 1906 } 1907 1908 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 1909 &child, &children) == 0) { 1910 for (c = 0; c < children; c++) { 1911 if ((ret = vdev_to_nvlist_iter(child[c], search, 1912 avail_spare, l2cache, NULL)) != NULL) { 1913 *l2cache = B_TRUE; 1914 return (ret); 1915 } 1916 } 1917 } 1918 1919 return (NULL); 1920} 1921 1922/* 1923 * Given a physical path (minus the "/devices" prefix), find the 1924 * associated vdev. 1925 */ 1926nvlist_t * 1927zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 1928 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 1929{ 1930 nvlist_t *search, *nvroot, *ret; 1931 1932 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1933 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 1934 1935 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1936 &nvroot) == 0); 1937 1938 *avail_spare = B_FALSE; 1939 *l2cache = B_FALSE; 1940 if (log != NULL) 1941 *log = B_FALSE; 1942 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1943 nvlist_free(search); 1944 1945 return (ret); 1946} 1947 1948/* 1949 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 1950 */ 1951boolean_t 1952zpool_vdev_is_interior(const char *name) 1953{ 1954 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 1955 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 1956 return (B_TRUE); 1957 return (B_FALSE); 1958} 1959 1960nvlist_t * 1961zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 1962 boolean_t *l2cache, boolean_t *log) 1963{ 1964 char buf[MAXPATHLEN]; 1965 char *end; 1966 nvlist_t *nvroot, *search, *ret; 1967 uint64_t guid; 1968 1969 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1970 1971 guid = strtoull(path, &end, 10); 1972 if (guid != 0 && *end == '\0') { 1973 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 1974 } else if (zpool_vdev_is_interior(path)) { 1975 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 1976 } else if (path[0] != '/') { 1977 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 1978 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 1979 } else { 1980 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 1981 } 1982 1983 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 1984 &nvroot) == 0); 1985 1986 *avail_spare = B_FALSE; 1987 *l2cache = B_FALSE; 1988 if (log != NULL) 1989 *log = B_FALSE; 1990 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 1991 nvlist_free(search); 1992 1993 return (ret); 1994} 1995 1996static int 1997vdev_online(nvlist_t *nv) 1998{ 1999 uint64_t ival; 2000 2001 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2002 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2003 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2004 return (0); 2005 2006 return (1); 2007} 2008 2009/* 2010 * Helper function for zpool_get_physpaths(). 2011 */ 2012static int 2013vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2014 size_t *bytes_written) 2015{ 2016 size_t bytes_left, pos, rsz; 2017 char *tmppath; 2018 const char *format; 2019 2020 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2021 &tmppath) != 0) 2022 return (EZFS_NODEVICE); 2023 2024 pos = *bytes_written; 2025 bytes_left = physpath_size - pos; 2026 format = (pos == 0) ? "%s" : " %s"; 2027 2028 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2029 *bytes_written += rsz; 2030 2031 if (rsz >= bytes_left) { 2032 /* if physpath was not copied properly, clear it */ 2033 if (bytes_left != 0) { 2034 physpath[pos] = 0; 2035 } 2036 return (EZFS_NOSPC); 2037 } 2038 return (0); 2039} 2040 2041static int 2042vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2043 size_t *rsz, boolean_t is_spare) 2044{ 2045 char *type; 2046 int ret; 2047 2048 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2049 return (EZFS_INVALCONFIG); 2050 2051 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2052 /* 2053 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2054 * For a spare vdev, we only want to boot from the active 2055 * spare device. 2056 */ 2057 if (is_spare) { 2058 uint64_t spare = 0; 2059 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2060 &spare); 2061 if (!spare) 2062 return (EZFS_INVALCONFIG); 2063 } 2064 2065 if (vdev_online(nv)) { 2066 if ((ret = vdev_get_one_physpath(nv, physpath, 2067 phypath_size, rsz)) != 0) 2068 return (ret); 2069 } 2070 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2071 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2072 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2073 nvlist_t **child; 2074 uint_t count; 2075 int i, ret; 2076 2077 if (nvlist_lookup_nvlist_array(nv, 2078 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2079 return (EZFS_INVALCONFIG); 2080 2081 for (i = 0; i < count; i++) { 2082 ret = vdev_get_physpaths(child[i], physpath, 2083 phypath_size, rsz, is_spare); 2084 if (ret == EZFS_NOSPC) 2085 return (ret); 2086 } 2087 } 2088 2089 return (EZFS_POOL_INVALARG); 2090} 2091 2092/* 2093 * Get phys_path for a root pool config. 2094 * Return 0 on success; non-zero on failure. 2095 */ 2096static int 2097zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2098{ 2099 size_t rsz; 2100 nvlist_t *vdev_root; 2101 nvlist_t **child; 2102 uint_t count; 2103 char *type; 2104 2105 rsz = 0; 2106 2107 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2108 &vdev_root) != 0) 2109 return (EZFS_INVALCONFIG); 2110 2111 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2112 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2113 &child, &count) != 0) 2114 return (EZFS_INVALCONFIG); 2115 2116 /* 2117 * root pool can not have EFI labeled disks and can only have 2118 * a single top-level vdev. 2119 */ 2120 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2121 pool_uses_efi(vdev_root)) 2122 return (EZFS_POOL_INVALARG); 2123 2124 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2125 B_FALSE); 2126 2127 /* No online devices */ 2128 if (rsz == 0) 2129 return (EZFS_NODEVICE); 2130 2131 return (0); 2132} 2133 2134/* 2135 * Get phys_path for a root pool 2136 * Return 0 on success; non-zero on failure. 2137 */ 2138int 2139zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2140{ 2141 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2142 phypath_size)); 2143} 2144 2145/* 2146 * If the device has being dynamically expanded then we need to relabel 2147 * the disk to use the new unallocated space. 2148 */ 2149static int 2150zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2151{ 2152#ifdef sun 2153 char path[MAXPATHLEN]; 2154 char errbuf[1024]; 2155 int fd, error; 2156 int (*_efi_use_whole_disk)(int); 2157 2158 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2159 "efi_use_whole_disk")) == NULL) 2160 return (-1); 2161 2162 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2163 2164 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2165 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2166 "relabel '%s': unable to open device"), name); 2167 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2168 } 2169 2170 /* 2171 * It's possible that we might encounter an error if the device 2172 * does not have any unallocated space left. If so, we simply 2173 * ignore that error and continue on. 2174 */ 2175 error = _efi_use_whole_disk(fd); 2176 (void) close(fd); 2177 if (error && error != VT_ENOSPC) { 2178 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2179 "relabel '%s': unable to read disk capacity"), name); 2180 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2181 } 2182#endif /* sun */ 2183 return (0); 2184} 2185 2186/* 2187 * Bring the specified vdev online. The 'flags' parameter is a set of the 2188 * ZFS_ONLINE_* flags. 2189 */ 2190int 2191zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2192 vdev_state_t *newstate) 2193{ 2194 zfs_cmd_t zc = { 0 }; 2195 char msg[1024]; 2196 nvlist_t *tgt; 2197 boolean_t avail_spare, l2cache, islog; 2198 libzfs_handle_t *hdl = zhp->zpool_hdl; 2199 2200 if (flags & ZFS_ONLINE_EXPAND) { 2201 (void) snprintf(msg, sizeof (msg), 2202 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2203 } else { 2204 (void) snprintf(msg, sizeof (msg), 2205 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2206 } 2207 2208 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2209 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2210 &islog)) == NULL) 2211 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2212 2213 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2214 2215 if (avail_spare) 2216 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2217 2218 if (flags & ZFS_ONLINE_EXPAND || 2219 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2220 char *pathname = NULL; 2221 uint64_t wholedisk = 0; 2222 2223 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2224 &wholedisk); 2225 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2226 &pathname) == 0); 2227 2228 /* 2229 * XXX - L2ARC 1.0 devices can't support expansion. 2230 */ 2231 if (l2cache) { 2232 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2233 "cannot expand cache devices")); 2234 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2235 } 2236 2237 if (wholedisk) { 2238 pathname += strlen(DISK_ROOT) + 1; 2239 (void) zpool_relabel_disk(hdl, pathname); 2240 } 2241 } 2242 2243 zc.zc_cookie = VDEV_STATE_ONLINE; 2244 zc.zc_obj = flags; 2245 2246 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2247 if (errno == EINVAL) { 2248 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2249 "from this pool into a new one. Use '%s' " 2250 "instead"), "zpool detach"); 2251 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2252 } 2253 return (zpool_standard_error(hdl, errno, msg)); 2254 } 2255 2256 *newstate = zc.zc_cookie; 2257 return (0); 2258} 2259 2260/* 2261 * Take the specified vdev offline 2262 */ 2263int 2264zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2265{ 2266 zfs_cmd_t zc = { 0 }; 2267 char msg[1024]; 2268 nvlist_t *tgt; 2269 boolean_t avail_spare, l2cache; 2270 libzfs_handle_t *hdl = zhp->zpool_hdl; 2271 2272 (void) snprintf(msg, sizeof (msg), 2273 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2274 2275 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2276 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2277 NULL)) == NULL) 2278 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2279 2280 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2281 2282 if (avail_spare) 2283 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2284 2285 zc.zc_cookie = VDEV_STATE_OFFLINE; 2286 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2287 2288 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2289 return (0); 2290 2291 switch (errno) { 2292 case EBUSY: 2293 2294 /* 2295 * There are no other replicas of this device. 2296 */ 2297 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2298 2299 case EEXIST: 2300 /* 2301 * The log device has unplayed logs 2302 */ 2303 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2304 2305 default: 2306 return (zpool_standard_error(hdl, errno, msg)); 2307 } 2308} 2309 2310/* 2311 * Mark the given vdev faulted. 2312 */ 2313int 2314zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2315{ 2316 zfs_cmd_t zc = { 0 }; 2317 char msg[1024]; 2318 libzfs_handle_t *hdl = zhp->zpool_hdl; 2319 2320 (void) snprintf(msg, sizeof (msg), 2321 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2322 2323 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2324 zc.zc_guid = guid; 2325 zc.zc_cookie = VDEV_STATE_FAULTED; 2326 zc.zc_obj = aux; 2327 2328 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2329 return (0); 2330 2331 switch (errno) { 2332 case EBUSY: 2333 2334 /* 2335 * There are no other replicas of this device. 2336 */ 2337 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2338 2339 default: 2340 return (zpool_standard_error(hdl, errno, msg)); 2341 } 2342 2343} 2344 2345/* 2346 * Mark the given vdev degraded. 2347 */ 2348int 2349zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2350{ 2351 zfs_cmd_t zc = { 0 }; 2352 char msg[1024]; 2353 libzfs_handle_t *hdl = zhp->zpool_hdl; 2354 2355 (void) snprintf(msg, sizeof (msg), 2356 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2357 2358 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2359 zc.zc_guid = guid; 2360 zc.zc_cookie = VDEV_STATE_DEGRADED; 2361 zc.zc_obj = aux; 2362 2363 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2364 return (0); 2365 2366 return (zpool_standard_error(hdl, errno, msg)); 2367} 2368 2369/* 2370 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2371 * a hot spare. 2372 */ 2373static boolean_t 2374is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2375{ 2376 nvlist_t **child; 2377 uint_t c, children; 2378 char *type; 2379 2380 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2381 &children) == 0) { 2382 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2383 &type) == 0); 2384 2385 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2386 children == 2 && child[which] == tgt) 2387 return (B_TRUE); 2388 2389 for (c = 0; c < children; c++) 2390 if (is_replacing_spare(child[c], tgt, which)) 2391 return (B_TRUE); 2392 } 2393 2394 return (B_FALSE); 2395} 2396 2397/* 2398 * Attach new_disk (fully described by nvroot) to old_disk. 2399 * If 'replacing' is specified, the new disk will replace the old one. 2400 */ 2401int 2402zpool_vdev_attach(zpool_handle_t *zhp, 2403 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2404{ 2405 zfs_cmd_t zc = { 0 }; 2406 char msg[1024]; 2407 int ret; 2408 nvlist_t *tgt; 2409 boolean_t avail_spare, l2cache, islog; 2410 uint64_t val; 2411 char *newname; 2412 nvlist_t **child; 2413 uint_t children; 2414 nvlist_t *config_root; 2415 libzfs_handle_t *hdl = zhp->zpool_hdl;
|
2415 boolean_t rootpool = pool_is_bootable(zhp);
| 2416 boolean_t rootpool = zpool_is_bootable(zhp);
|
2416 2417 if (replacing) 2418 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2419 "cannot replace %s with %s"), old_disk, new_disk); 2420 else 2421 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2422 "cannot attach %s to %s"), new_disk, old_disk); 2423 2424 /* 2425 * If this is a root pool, make sure that we're not attaching an 2426 * EFI labeled device. 2427 */ 2428 if (rootpool && pool_uses_efi(nvroot)) { 2429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2430 "EFI labeled devices are not supported on root pools.")); 2431 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2432 } 2433 2434 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2435 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2436 &islog)) == 0) 2437 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2438 2439 if (avail_spare) 2440 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2441 2442 if (l2cache) 2443 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2444 2445 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2446 zc.zc_cookie = replacing; 2447 2448 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2449 &child, &children) != 0 || children != 1) { 2450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2451 "new device must be a single disk")); 2452 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2453 } 2454 2455 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2456 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2457 2458 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2459 return (-1); 2460 2461 /* 2462 * If the target is a hot spare that has been swapped in, we can only 2463 * replace it with another hot spare. 2464 */ 2465 if (replacing && 2466 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2467 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2468 NULL) == NULL || !avail_spare) && 2469 is_replacing_spare(config_root, tgt, 1)) { 2470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2471 "can only be replaced by another hot spare")); 2472 free(newname); 2473 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2474 } 2475 2476 free(newname); 2477 2478 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2479 return (-1); 2480 2481 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2482 2483 zcmd_free_nvlists(&zc); 2484 2485 if (ret == 0) { 2486 if (rootpool) { 2487 /* 2488 * XXX need a better way to prevent user from 2489 * booting up a half-baked vdev. 2490 */ 2491 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2492 "sure to wait until resilver is done " 2493 "before rebooting.\n")); 2494 (void) fprintf(stderr, "\n"); 2495 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2496 "you boot from pool '%s', you may need to update\n" 2497 "boot code on newly attached disk '%s'.\n\n" 2498 "Assuming you use GPT partitioning and 'da0' is " 2499 "your new boot disk\n" 2500 "you may use the following command:\n\n" 2501 "\tgpart bootcode -b /boot/pmbr -p " 2502 "/boot/gptzfsboot -i 1 da0\n\n"), 2503 zhp->zpool_name, new_disk); 2504 } 2505 return (0); 2506 } 2507 2508 switch (errno) { 2509 case ENOTSUP: 2510 /* 2511 * Can't attach to or replace this type of vdev. 2512 */ 2513 if (replacing) { 2514 uint64_t version = zpool_get_prop_int(zhp, 2515 ZPOOL_PROP_VERSION, NULL); 2516 2517 if (islog) 2518 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2519 "cannot replace a log with a spare")); 2520 else if (version >= SPA_VERSION_MULTI_REPLACE) 2521 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2522 "already in replacing/spare config; wait " 2523 "for completion or use 'zpool detach'")); 2524 else 2525 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2526 "cannot replace a replacing device")); 2527 } else { 2528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2529 "can only attach to mirrors and top-level " 2530 "disks")); 2531 } 2532 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2533 break; 2534 2535 case EINVAL: 2536 /* 2537 * The new device must be a single disk. 2538 */ 2539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2540 "new device must be a single disk")); 2541 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2542 break; 2543 2544 case EBUSY: 2545 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2546 new_disk); 2547 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2548 break; 2549 2550 case EOVERFLOW: 2551 /* 2552 * The new device is too small. 2553 */ 2554 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2555 "device is too small")); 2556 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2557 break; 2558 2559 case EDOM: 2560 /* 2561 * The new device has a different alignment requirement. 2562 */ 2563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2564 "devices have different sector alignment")); 2565 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2566 break; 2567 2568 case ENAMETOOLONG: 2569 /* 2570 * The resulting top-level vdev spec won't fit in the label. 2571 */ 2572 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2573 break; 2574 2575 default: 2576 (void) zpool_standard_error(hdl, errno, msg); 2577 } 2578 2579 return (-1); 2580} 2581 2582/* 2583 * Detach the specified device. 2584 */ 2585int 2586zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2587{ 2588 zfs_cmd_t zc = { 0 }; 2589 char msg[1024]; 2590 nvlist_t *tgt; 2591 boolean_t avail_spare, l2cache; 2592 libzfs_handle_t *hdl = zhp->zpool_hdl; 2593 2594 (void) snprintf(msg, sizeof (msg), 2595 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2596 2597 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2598 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2599 NULL)) == 0) 2600 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2601 2602 if (avail_spare) 2603 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2604 2605 if (l2cache) 2606 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2607 2608 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2609 2610 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2611 return (0); 2612 2613 switch (errno) { 2614 2615 case ENOTSUP: 2616 /* 2617 * Can't detach from this type of vdev. 2618 */ 2619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2620 "applicable to mirror and replacing vdevs")); 2621 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2622 break; 2623 2624 case EBUSY: 2625 /* 2626 * There are no other replicas of this device. 2627 */ 2628 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2629 break; 2630 2631 default: 2632 (void) zpool_standard_error(hdl, errno, msg); 2633 } 2634 2635 return (-1); 2636} 2637 2638/* 2639 * Find a mirror vdev in the source nvlist. 2640 * 2641 * The mchild array contains a list of disks in one of the top-level mirrors 2642 * of the source pool. The schild array contains a list of disks that the 2643 * user specified on the command line. We loop over the mchild array to 2644 * see if any entry in the schild array matches. 2645 * 2646 * If a disk in the mchild array is found in the schild array, we return 2647 * the index of that entry. Otherwise we return -1. 2648 */ 2649static int 2650find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2651 nvlist_t **schild, uint_t schildren) 2652{ 2653 uint_t mc; 2654 2655 for (mc = 0; mc < mchildren; mc++) { 2656 uint_t sc; 2657 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2658 mchild[mc], B_FALSE); 2659 2660 for (sc = 0; sc < schildren; sc++) { 2661 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2662 schild[sc], B_FALSE); 2663 boolean_t result = (strcmp(mpath, spath) == 0); 2664 2665 free(spath); 2666 if (result) { 2667 free(mpath); 2668 return (mc); 2669 } 2670 } 2671 2672 free(mpath); 2673 } 2674 2675 return (-1); 2676} 2677 2678/* 2679 * Split a mirror pool. If newroot points to null, then a new nvlist 2680 * is generated and it is the responsibility of the caller to free it. 2681 */ 2682int 2683zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2684 nvlist_t *props, splitflags_t flags) 2685{ 2686 zfs_cmd_t zc = { 0 }; 2687 char msg[1024]; 2688 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2689 nvlist_t **varray = NULL, *zc_props = NULL; 2690 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2691 libzfs_handle_t *hdl = zhp->zpool_hdl; 2692 uint64_t vers; 2693 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2694 int retval = 0; 2695 2696 (void) snprintf(msg, sizeof (msg), 2697 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2698 2699 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2700 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2701 2702 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2703 (void) fprintf(stderr, gettext("Internal error: unable to " 2704 "retrieve pool configuration\n")); 2705 return (-1); 2706 } 2707 2708 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2709 == 0); 2710 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2711 2712 if (props) { 2713 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2714 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2715 props, vers, flags, msg)) == NULL) 2716 return (-1); 2717 } 2718 2719 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2720 &children) != 0) { 2721 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2722 "Source pool is missing vdev tree")); 2723 if (zc_props) 2724 nvlist_free(zc_props); 2725 return (-1); 2726 } 2727 2728 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2729 vcount = 0; 2730 2731 if (*newroot == NULL || 2732 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2733 &newchild, &newchildren) != 0) 2734 newchildren = 0; 2735 2736 for (c = 0; c < children; c++) { 2737 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2738 char *type; 2739 nvlist_t **mchild, *vdev; 2740 uint_t mchildren; 2741 int entry; 2742 2743 /* 2744 * Unlike cache & spares, slogs are stored in the 2745 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2746 */ 2747 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2748 &is_log); 2749 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2750 &is_hole); 2751 if (is_log || is_hole) { 2752 /* 2753 * Create a hole vdev and put it in the config. 2754 */ 2755 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2756 goto out; 2757 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2758 VDEV_TYPE_HOLE) != 0) 2759 goto out; 2760 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2761 1) != 0) 2762 goto out; 2763 if (lastlog == 0) 2764 lastlog = vcount; 2765 varray[vcount++] = vdev; 2766 continue; 2767 } 2768 lastlog = 0; 2769 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2770 == 0); 2771 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2772 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2773 "Source pool must be composed only of mirrors\n")); 2774 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2775 goto out; 2776 } 2777 2778 verify(nvlist_lookup_nvlist_array(child[c], 2779 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2780 2781 /* find or add an entry for this top-level vdev */ 2782 if (newchildren > 0 && 2783 (entry = find_vdev_entry(zhp, mchild, mchildren, 2784 newchild, newchildren)) >= 0) { 2785 /* We found a disk that the user specified. */ 2786 vdev = mchild[entry]; 2787 ++found; 2788 } else { 2789 /* User didn't specify a disk for this vdev. */ 2790 vdev = mchild[mchildren - 1]; 2791 } 2792 2793 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2794 goto out; 2795 } 2796 2797 /* did we find every disk the user specified? */ 2798 if (found != newchildren) { 2799 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2800 "include at most one disk from each mirror")); 2801 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2802 goto out; 2803 } 2804 2805 /* Prepare the nvlist for populating. */ 2806 if (*newroot == NULL) { 2807 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2808 goto out; 2809 freelist = B_TRUE; 2810 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2811 VDEV_TYPE_ROOT) != 0) 2812 goto out; 2813 } else { 2814 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2815 } 2816 2817 /* Add all the children we found */ 2818 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2819 lastlog == 0 ? vcount : lastlog) != 0) 2820 goto out; 2821 2822 /* 2823 * If we're just doing a dry run, exit now with success. 2824 */ 2825 if (flags.dryrun) { 2826 memory_err = B_FALSE; 2827 freelist = B_FALSE; 2828 goto out; 2829 } 2830 2831 /* now build up the config list & call the ioctl */ 2832 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 2833 goto out; 2834 2835 if (nvlist_add_nvlist(newconfig, 2836 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 2837 nvlist_add_string(newconfig, 2838 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 2839 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 2840 goto out; 2841 2842 /* 2843 * The new pool is automatically part of the namespace unless we 2844 * explicitly export it. 2845 */ 2846 if (!flags.import) 2847 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 2848 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2849 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 2850 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 2851 goto out; 2852 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 2853 goto out; 2854 2855 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 2856 retval = zpool_standard_error(hdl, errno, msg); 2857 goto out; 2858 } 2859 2860 freelist = B_FALSE; 2861 memory_err = B_FALSE; 2862 2863out: 2864 if (varray != NULL) { 2865 int v; 2866 2867 for (v = 0; v < vcount; v++) 2868 nvlist_free(varray[v]); 2869 free(varray); 2870 } 2871 zcmd_free_nvlists(&zc); 2872 if (zc_props) 2873 nvlist_free(zc_props); 2874 if (newconfig) 2875 nvlist_free(newconfig); 2876 if (freelist) { 2877 nvlist_free(*newroot); 2878 *newroot = NULL; 2879 } 2880 2881 if (retval != 0) 2882 return (retval); 2883 2884 if (memory_err) 2885 return (no_memory(hdl)); 2886 2887 return (0); 2888} 2889 2890/* 2891 * Remove the given device. Currently, this is supported only for hot spares 2892 * and level 2 cache devices. 2893 */ 2894int 2895zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2896{ 2897 zfs_cmd_t zc = { 0 }; 2898 char msg[1024]; 2899 nvlist_t *tgt; 2900 boolean_t avail_spare, l2cache, islog; 2901 libzfs_handle_t *hdl = zhp->zpool_hdl; 2902 uint64_t version; 2903 2904 (void) snprintf(msg, sizeof (msg), 2905 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2906 2907 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2908 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2909 &islog)) == 0) 2910 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2911 /* 2912 * XXX - this should just go away. 2913 */ 2914 if (!avail_spare && !l2cache && !islog) { 2915 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2916 "only inactive hot spares, cache, top-level, " 2917 "or log devices can be removed")); 2918 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2919 } 2920 2921 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 2922 if (islog && version < SPA_VERSION_HOLES) { 2923 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2924 "pool must be upgrade to support log removal")); 2925 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 2926 } 2927 2928 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2929 2930 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2931 return (0); 2932 2933 return (zpool_standard_error(hdl, errno, msg)); 2934} 2935 2936/* 2937 * Clear the errors for the pool, or the particular device if specified. 2938 */ 2939int 2940zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 2941{ 2942 zfs_cmd_t zc = { 0 }; 2943 char msg[1024]; 2944 nvlist_t *tgt; 2945 zpool_rewind_policy_t policy; 2946 boolean_t avail_spare, l2cache; 2947 libzfs_handle_t *hdl = zhp->zpool_hdl; 2948 nvlist_t *nvi = NULL; 2949 int error; 2950 2951 if (path) 2952 (void) snprintf(msg, sizeof (msg), 2953 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2954 path); 2955 else 2956 (void) snprintf(msg, sizeof (msg), 2957 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2958 zhp->zpool_name); 2959 2960 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2961 if (path) { 2962 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2963 &l2cache, NULL)) == 0) 2964 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2965 2966 /* 2967 * Don't allow error clearing for hot spares. Do allow 2968 * error clearing for l2cache devices. 2969 */ 2970 if (avail_spare) 2971 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2972 2973 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2974 &zc.zc_guid) == 0); 2975 } 2976 2977 zpool_get_rewind_policy(rewindnvl, &policy); 2978 zc.zc_cookie = policy.zrp_request; 2979 2980 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 2981 return (-1); 2982 2983 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 2984 return (-1); 2985 2986 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 2987 errno == ENOMEM) { 2988 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 2989 zcmd_free_nvlists(&zc); 2990 return (-1); 2991 } 2992 } 2993 2994 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 2995 errno != EPERM && errno != EACCES)) { 2996 if (policy.zrp_request & 2997 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2998 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 2999 zpool_rewind_exclaim(hdl, zc.zc_name, 3000 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3001 nvi); 3002 nvlist_free(nvi); 3003 } 3004 zcmd_free_nvlists(&zc); 3005 return (0); 3006 } 3007 3008 zcmd_free_nvlists(&zc); 3009 return (zpool_standard_error(hdl, errno, msg)); 3010} 3011 3012/* 3013 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3014 */ 3015int 3016zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3017{ 3018 zfs_cmd_t zc = { 0 }; 3019 char msg[1024]; 3020 libzfs_handle_t *hdl = zhp->zpool_hdl; 3021 3022 (void) snprintf(msg, sizeof (msg), 3023 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3024 guid); 3025 3026 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3027 zc.zc_guid = guid; 3028 zc.zc_cookie = ZPOOL_NO_REWIND; 3029 3030 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3031 return (0); 3032 3033 return (zpool_standard_error(hdl, errno, msg)); 3034} 3035 3036/* 3037 * Change the GUID for a pool. 3038 */ 3039int 3040zpool_reguid(zpool_handle_t *zhp) 3041{ 3042 char msg[1024]; 3043 libzfs_handle_t *hdl = zhp->zpool_hdl; 3044 zfs_cmd_t zc = { 0 }; 3045 3046 (void) snprintf(msg, sizeof (msg), 3047 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3048 3049 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3050 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3051 return (0); 3052 3053 return (zpool_standard_error(hdl, errno, msg)); 3054} 3055 3056/*
| 2417 2418 if (replacing) 2419 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2420 "cannot replace %s with %s"), old_disk, new_disk); 2421 else 2422 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2423 "cannot attach %s to %s"), new_disk, old_disk); 2424 2425 /* 2426 * If this is a root pool, make sure that we're not attaching an 2427 * EFI labeled device. 2428 */ 2429 if (rootpool && pool_uses_efi(nvroot)) { 2430 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2431 "EFI labeled devices are not supported on root pools.")); 2432 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2433 } 2434 2435 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2436 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2437 &islog)) == 0) 2438 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2439 2440 if (avail_spare) 2441 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2442 2443 if (l2cache) 2444 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2445 2446 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2447 zc.zc_cookie = replacing; 2448 2449 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2450 &child, &children) != 0 || children != 1) { 2451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2452 "new device must be a single disk")); 2453 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2454 } 2455 2456 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2457 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2458 2459 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2460 return (-1); 2461 2462 /* 2463 * If the target is a hot spare that has been swapped in, we can only 2464 * replace it with another hot spare. 2465 */ 2466 if (replacing && 2467 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2468 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2469 NULL) == NULL || !avail_spare) && 2470 is_replacing_spare(config_root, tgt, 1)) { 2471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2472 "can only be replaced by another hot spare")); 2473 free(newname); 2474 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2475 } 2476 2477 free(newname); 2478 2479 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2480 return (-1); 2481 2482 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2483 2484 zcmd_free_nvlists(&zc); 2485 2486 if (ret == 0) { 2487 if (rootpool) { 2488 /* 2489 * XXX need a better way to prevent user from 2490 * booting up a half-baked vdev. 2491 */ 2492 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2493 "sure to wait until resilver is done " 2494 "before rebooting.\n")); 2495 (void) fprintf(stderr, "\n"); 2496 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2497 "you boot from pool '%s', you may need to update\n" 2498 "boot code on newly attached disk '%s'.\n\n" 2499 "Assuming you use GPT partitioning and 'da0' is " 2500 "your new boot disk\n" 2501 "you may use the following command:\n\n" 2502 "\tgpart bootcode -b /boot/pmbr -p " 2503 "/boot/gptzfsboot -i 1 da0\n\n"), 2504 zhp->zpool_name, new_disk); 2505 } 2506 return (0); 2507 } 2508 2509 switch (errno) { 2510 case ENOTSUP: 2511 /* 2512 * Can't attach to or replace this type of vdev. 2513 */ 2514 if (replacing) { 2515 uint64_t version = zpool_get_prop_int(zhp, 2516 ZPOOL_PROP_VERSION, NULL); 2517 2518 if (islog) 2519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2520 "cannot replace a log with a spare")); 2521 else if (version >= SPA_VERSION_MULTI_REPLACE) 2522 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2523 "already in replacing/spare config; wait " 2524 "for completion or use 'zpool detach'")); 2525 else 2526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2527 "cannot replace a replacing device")); 2528 } else { 2529 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2530 "can only attach to mirrors and top-level " 2531 "disks")); 2532 } 2533 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2534 break; 2535 2536 case EINVAL: 2537 /* 2538 * The new device must be a single disk. 2539 */ 2540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2541 "new device must be a single disk")); 2542 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2543 break; 2544 2545 case EBUSY: 2546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2547 new_disk); 2548 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2549 break; 2550 2551 case EOVERFLOW: 2552 /* 2553 * The new device is too small. 2554 */ 2555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2556 "device is too small")); 2557 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2558 break; 2559 2560 case EDOM: 2561 /* 2562 * The new device has a different alignment requirement. 2563 */ 2564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2565 "devices have different sector alignment")); 2566 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2567 break; 2568 2569 case ENAMETOOLONG: 2570 /* 2571 * The resulting top-level vdev spec won't fit in the label. 2572 */ 2573 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2574 break; 2575 2576 default: 2577 (void) zpool_standard_error(hdl, errno, msg); 2578 } 2579 2580 return (-1); 2581} 2582 2583/* 2584 * Detach the specified device. 2585 */ 2586int 2587zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2588{ 2589 zfs_cmd_t zc = { 0 }; 2590 char msg[1024]; 2591 nvlist_t *tgt; 2592 boolean_t avail_spare, l2cache; 2593 libzfs_handle_t *hdl = zhp->zpool_hdl; 2594 2595 (void) snprintf(msg, sizeof (msg), 2596 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2597 2598 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2599 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2600 NULL)) == 0) 2601 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2602 2603 if (avail_spare) 2604 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2605 2606 if (l2cache) 2607 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2608 2609 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2610 2611 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2612 return (0); 2613 2614 switch (errno) { 2615 2616 case ENOTSUP: 2617 /* 2618 * Can't detach from this type of vdev. 2619 */ 2620 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2621 "applicable to mirror and replacing vdevs")); 2622 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2623 break; 2624 2625 case EBUSY: 2626 /* 2627 * There are no other replicas of this device. 2628 */ 2629 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2630 break; 2631 2632 default: 2633 (void) zpool_standard_error(hdl, errno, msg); 2634 } 2635 2636 return (-1); 2637} 2638 2639/* 2640 * Find a mirror vdev in the source nvlist. 2641 * 2642 * The mchild array contains a list of disks in one of the top-level mirrors 2643 * of the source pool. The schild array contains a list of disks that the 2644 * user specified on the command line. We loop over the mchild array to 2645 * see if any entry in the schild array matches. 2646 * 2647 * If a disk in the mchild array is found in the schild array, we return 2648 * the index of that entry. Otherwise we return -1. 2649 */ 2650static int 2651find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2652 nvlist_t **schild, uint_t schildren) 2653{ 2654 uint_t mc; 2655 2656 for (mc = 0; mc < mchildren; mc++) { 2657 uint_t sc; 2658 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2659 mchild[mc], B_FALSE); 2660 2661 for (sc = 0; sc < schildren; sc++) { 2662 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2663 schild[sc], B_FALSE); 2664 boolean_t result = (strcmp(mpath, spath) == 0); 2665 2666 free(spath); 2667 if (result) { 2668 free(mpath); 2669 return (mc); 2670 } 2671 } 2672 2673 free(mpath); 2674 } 2675 2676 return (-1); 2677} 2678 2679/* 2680 * Split a mirror pool. If newroot points to null, then a new nvlist 2681 * is generated and it is the responsibility of the caller to free it. 2682 */ 2683int 2684zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2685 nvlist_t *props, splitflags_t flags) 2686{ 2687 zfs_cmd_t zc = { 0 }; 2688 char msg[1024]; 2689 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2690 nvlist_t **varray = NULL, *zc_props = NULL; 2691 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2692 libzfs_handle_t *hdl = zhp->zpool_hdl; 2693 uint64_t vers; 2694 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2695 int retval = 0; 2696 2697 (void) snprintf(msg, sizeof (msg), 2698 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2699 2700 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2701 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2702 2703 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2704 (void) fprintf(stderr, gettext("Internal error: unable to " 2705 "retrieve pool configuration\n")); 2706 return (-1); 2707 } 2708 2709 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2710 == 0); 2711 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2712 2713 if (props) { 2714 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2715 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2716 props, vers, flags, msg)) == NULL) 2717 return (-1); 2718 } 2719 2720 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2721 &children) != 0) { 2722 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2723 "Source pool is missing vdev tree")); 2724 if (zc_props) 2725 nvlist_free(zc_props); 2726 return (-1); 2727 } 2728 2729 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2730 vcount = 0; 2731 2732 if (*newroot == NULL || 2733 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2734 &newchild, &newchildren) != 0) 2735 newchildren = 0; 2736 2737 for (c = 0; c < children; c++) { 2738 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2739 char *type; 2740 nvlist_t **mchild, *vdev; 2741 uint_t mchildren; 2742 int entry; 2743 2744 /* 2745 * Unlike cache & spares, slogs are stored in the 2746 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2747 */ 2748 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2749 &is_log); 2750 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2751 &is_hole); 2752 if (is_log || is_hole) { 2753 /* 2754 * Create a hole vdev and put it in the config. 2755 */ 2756 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2757 goto out; 2758 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2759 VDEV_TYPE_HOLE) != 0) 2760 goto out; 2761 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2762 1) != 0) 2763 goto out; 2764 if (lastlog == 0) 2765 lastlog = vcount; 2766 varray[vcount++] = vdev; 2767 continue; 2768 } 2769 lastlog = 0; 2770 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2771 == 0); 2772 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2774 "Source pool must be composed only of mirrors\n")); 2775 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2776 goto out; 2777 } 2778 2779 verify(nvlist_lookup_nvlist_array(child[c], 2780 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2781 2782 /* find or add an entry for this top-level vdev */ 2783 if (newchildren > 0 && 2784 (entry = find_vdev_entry(zhp, mchild, mchildren, 2785 newchild, newchildren)) >= 0) { 2786 /* We found a disk that the user specified. */ 2787 vdev = mchild[entry]; 2788 ++found; 2789 } else { 2790 /* User didn't specify a disk for this vdev. */ 2791 vdev = mchild[mchildren - 1]; 2792 } 2793 2794 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2795 goto out; 2796 } 2797 2798 /* did we find every disk the user specified? */ 2799 if (found != newchildren) { 2800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2801 "include at most one disk from each mirror")); 2802 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2803 goto out; 2804 } 2805 2806 /* Prepare the nvlist for populating. */ 2807 if (*newroot == NULL) { 2808 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 2809 goto out; 2810 freelist = B_TRUE; 2811 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 2812 VDEV_TYPE_ROOT) != 0) 2813 goto out; 2814 } else { 2815 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 2816 } 2817 2818 /* Add all the children we found */ 2819 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 2820 lastlog == 0 ? vcount : lastlog) != 0) 2821 goto out; 2822 2823 /* 2824 * If we're just doing a dry run, exit now with success. 2825 */ 2826 if (flags.dryrun) { 2827 memory_err = B_FALSE; 2828 freelist = B_FALSE; 2829 goto out; 2830 } 2831 2832 /* now build up the config list & call the ioctl */ 2833 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 2834 goto out; 2835 2836 if (nvlist_add_nvlist(newconfig, 2837 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 2838 nvlist_add_string(newconfig, 2839 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 2840 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 2841 goto out; 2842 2843 /* 2844 * The new pool is automatically part of the namespace unless we 2845 * explicitly export it. 2846 */ 2847 if (!flags.import) 2848 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 2849 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2850 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 2851 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 2852 goto out; 2853 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 2854 goto out; 2855 2856 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 2857 retval = zpool_standard_error(hdl, errno, msg); 2858 goto out; 2859 } 2860 2861 freelist = B_FALSE; 2862 memory_err = B_FALSE; 2863 2864out: 2865 if (varray != NULL) { 2866 int v; 2867 2868 for (v = 0; v < vcount; v++) 2869 nvlist_free(varray[v]); 2870 free(varray); 2871 } 2872 zcmd_free_nvlists(&zc); 2873 if (zc_props) 2874 nvlist_free(zc_props); 2875 if (newconfig) 2876 nvlist_free(newconfig); 2877 if (freelist) { 2878 nvlist_free(*newroot); 2879 *newroot = NULL; 2880 } 2881 2882 if (retval != 0) 2883 return (retval); 2884 2885 if (memory_err) 2886 return (no_memory(hdl)); 2887 2888 return (0); 2889} 2890 2891/* 2892 * Remove the given device. Currently, this is supported only for hot spares 2893 * and level 2 cache devices. 2894 */ 2895int 2896zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 2897{ 2898 zfs_cmd_t zc = { 0 }; 2899 char msg[1024]; 2900 nvlist_t *tgt; 2901 boolean_t avail_spare, l2cache, islog; 2902 libzfs_handle_t *hdl = zhp->zpool_hdl; 2903 uint64_t version; 2904 2905 (void) snprintf(msg, sizeof (msg), 2906 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 2907 2908 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2909 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2910 &islog)) == 0) 2911 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2912 /* 2913 * XXX - this should just go away. 2914 */ 2915 if (!avail_spare && !l2cache && !islog) { 2916 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2917 "only inactive hot spares, cache, top-level, " 2918 "or log devices can be removed")); 2919 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2920 } 2921 2922 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 2923 if (islog && version < SPA_VERSION_HOLES) { 2924 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2925 "pool must be upgrade to support log removal")); 2926 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 2927 } 2928 2929 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2930 2931 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 2932 return (0); 2933 2934 return (zpool_standard_error(hdl, errno, msg)); 2935} 2936 2937/* 2938 * Clear the errors for the pool, or the particular device if specified. 2939 */ 2940int 2941zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 2942{ 2943 zfs_cmd_t zc = { 0 }; 2944 char msg[1024]; 2945 nvlist_t *tgt; 2946 zpool_rewind_policy_t policy; 2947 boolean_t avail_spare, l2cache; 2948 libzfs_handle_t *hdl = zhp->zpool_hdl; 2949 nvlist_t *nvi = NULL; 2950 int error; 2951 2952 if (path) 2953 (void) snprintf(msg, sizeof (msg), 2954 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2955 path); 2956 else 2957 (void) snprintf(msg, sizeof (msg), 2958 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 2959 zhp->zpool_name); 2960 2961 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2962 if (path) { 2963 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 2964 &l2cache, NULL)) == 0) 2965 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2966 2967 /* 2968 * Don't allow error clearing for hot spares. Do allow 2969 * error clearing for l2cache devices. 2970 */ 2971 if (avail_spare) 2972 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2973 2974 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 2975 &zc.zc_guid) == 0); 2976 } 2977 2978 zpool_get_rewind_policy(rewindnvl, &policy); 2979 zc.zc_cookie = policy.zrp_request; 2980 2981 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 2982 return (-1); 2983 2984 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 2985 return (-1); 2986 2987 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 2988 errno == ENOMEM) { 2989 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 2990 zcmd_free_nvlists(&zc); 2991 return (-1); 2992 } 2993 } 2994 2995 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 2996 errno != EPERM && errno != EACCES)) { 2997 if (policy.zrp_request & 2998 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 2999 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3000 zpool_rewind_exclaim(hdl, zc.zc_name, 3001 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3002 nvi); 3003 nvlist_free(nvi); 3004 } 3005 zcmd_free_nvlists(&zc); 3006 return (0); 3007 } 3008 3009 zcmd_free_nvlists(&zc); 3010 return (zpool_standard_error(hdl, errno, msg)); 3011} 3012 3013/* 3014 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3015 */ 3016int 3017zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3018{ 3019 zfs_cmd_t zc = { 0 }; 3020 char msg[1024]; 3021 libzfs_handle_t *hdl = zhp->zpool_hdl; 3022 3023 (void) snprintf(msg, sizeof (msg), 3024 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3025 guid); 3026 3027 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3028 zc.zc_guid = guid; 3029 zc.zc_cookie = ZPOOL_NO_REWIND; 3030 3031 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3032 return (0); 3033 3034 return (zpool_standard_error(hdl, errno, msg)); 3035} 3036 3037/* 3038 * Change the GUID for a pool. 3039 */ 3040int 3041zpool_reguid(zpool_handle_t *zhp) 3042{ 3043 char msg[1024]; 3044 libzfs_handle_t *hdl = zhp->zpool_hdl; 3045 zfs_cmd_t zc = { 0 }; 3046 3047 (void) snprintf(msg, sizeof (msg), 3048 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3049 3050 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3051 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3052 return (0); 3053 3054 return (zpool_standard_error(hdl, errno, msg)); 3055} 3056 3057/*
|
| 3058 * Reopen the pool. 3059 */ 3060int 3061zpool_reopen(zpool_handle_t *zhp) 3062{ 3063 zfs_cmd_t zc = { 0 }; 3064 char msg[1024]; 3065 libzfs_handle_t *hdl = zhp->zpool_hdl; 3066 3067 (void) snprintf(msg, sizeof (msg), 3068 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3069 zhp->zpool_name); 3070 3071 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3072 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3073 return (0); 3074 return (zpool_standard_error(hdl, errno, msg)); 3075} 3076 3077/*
|
3057 * Convert from a devid string to a path. 3058 */ 3059static char * 3060devid_to_path(char *devid_str) 3061{ 3062 ddi_devid_t devid; 3063 char *minor; 3064 char *path; 3065 devid_nmlist_t *list = NULL; 3066 int ret; 3067 3068 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3069 return (NULL); 3070 3071 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3072 3073 devid_str_free(minor); 3074 devid_free(devid); 3075 3076 if (ret != 0) 3077 return (NULL); 3078 3079 if ((path = strdup(list[0].devname)) == NULL) 3080 return (NULL); 3081 3082 devid_free_nmlist(list); 3083 3084 return (path); 3085} 3086 3087/* 3088 * Convert from a path to a devid string. 3089 */ 3090static char * 3091path_to_devid(const char *path) 3092{ 3093 int fd; 3094 ddi_devid_t devid; 3095 char *minor, *ret; 3096 3097 if ((fd = open(path, O_RDONLY)) < 0) 3098 return (NULL); 3099 3100 minor = NULL; 3101 ret = NULL; 3102 if (devid_get(fd, &devid) == 0) { 3103 if (devid_get_minor_name(fd, &minor) == 0) 3104 ret = devid_str_encode(devid, minor); 3105 if (minor != NULL) 3106 devid_str_free(minor); 3107 devid_free(devid); 3108 } 3109 (void) close(fd); 3110 3111 return (ret); 3112} 3113 3114/* 3115 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3116 * ignore any failure here, since a common case is for an unprivileged user to 3117 * type 'zpool status', and we'll display the correct information anyway. 3118 */ 3119static void 3120set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3121{ 3122 zfs_cmd_t zc = { 0 }; 3123 3124 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3125 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3126 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3127 &zc.zc_guid) == 0); 3128 3129 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3130} 3131 3132/* 3133 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3134 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3135 * We also check if this is a whole disk, in which case we strip off the 3136 * trailing 's0' slice name. 3137 * 3138 * This routine is also responsible for identifying when disks have been 3139 * reconfigured in a new location. The kernel will have opened the device by 3140 * devid, but the path will still refer to the old location. To catch this, we 3141 * first do a path -> devid translation (which is fast for the common case). If 3142 * the devid matches, we're done. If not, we do a reverse devid -> path 3143 * translation and issue the appropriate ioctl() to update the path of the vdev. 3144 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3145 * of these checks. 3146 */ 3147char * 3148zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3149 boolean_t verbose) 3150{ 3151 char *path, *devid; 3152 uint64_t value; 3153 char buf[64]; 3154 vdev_stat_t *vs; 3155 uint_t vsc; 3156 int have_stats; 3157 int have_path; 3158 3159 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3160 (uint64_t **)&vs, &vsc) == 0; 3161 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3162 3163 /* 3164 * If the device is not currently present, assume it will not 3165 * come back at the same device path. Display the device by GUID. 3166 */ 3167 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3168 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3169 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3170 &value) == 0); 3171 (void) snprintf(buf, sizeof (buf), "%llu", 3172 (u_longlong_t)value); 3173 path = buf; 3174 } else if (have_path) { 3175 3176 /* 3177 * If the device is dead (faulted, offline, etc) then don't 3178 * bother opening it. Otherwise we may be forcing the user to 3179 * open a misbehaving device, which can have undesirable 3180 * effects. 3181 */ 3182 if ((have_stats == 0 || 3183 vs->vs_state >= VDEV_STATE_DEGRADED) && 3184 zhp != NULL && 3185 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3186 /* 3187 * Determine if the current path is correct. 3188 */ 3189 char *newdevid = path_to_devid(path); 3190 3191 if (newdevid == NULL || 3192 strcmp(devid, newdevid) != 0) { 3193 char *newpath; 3194 3195 if ((newpath = devid_to_path(devid)) != NULL) { 3196 /* 3197 * Update the path appropriately. 3198 */ 3199 set_path(zhp, nv, newpath); 3200 if (nvlist_add_string(nv, 3201 ZPOOL_CONFIG_PATH, newpath) == 0) 3202 verify(nvlist_lookup_string(nv, 3203 ZPOOL_CONFIG_PATH, 3204 &path) == 0); 3205 free(newpath); 3206 } 3207 } 3208 3209 if (newdevid) 3210 devid_str_free(newdevid); 3211 } 3212 3213#ifdef sun 3214 if (strncmp(path, "/dev/dsk/", 9) == 0) 3215 path += 9; 3216 3217 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3218 &value) == 0 && value) { 3219 int pathlen = strlen(path); 3220 char *tmp = zfs_strdup(hdl, path); 3221 3222 /* 3223 * If it starts with c#, and ends with "s0", chop 3224 * the "s0" off, or if it ends with "s0/old", remove 3225 * the "s0" from the middle. 3226 */ 3227 if (CTD_CHECK(tmp)) { 3228 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3229 tmp[pathlen - 2] = '\0'; 3230 } else if (pathlen > 6 && 3231 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3232 (void) strcpy(&tmp[pathlen - 6], 3233 "/old"); 3234 } 3235 } 3236 return (tmp); 3237 } 3238#else /* !sun */ 3239 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3240 path += sizeof(_PATH_DEV) - 1; 3241#endif /* !sun */ 3242 } else { 3243 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3244 3245 /* 3246 * If it's a raidz device, we need to stick in the parity level. 3247 */ 3248 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3249 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3250 &value) == 0); 3251 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3252 (u_longlong_t)value); 3253 path = buf; 3254 } 3255 3256 /* 3257 * We identify each top-level vdev by using a <type-id> 3258 * naming convention. 3259 */ 3260 if (verbose) { 3261 uint64_t id; 3262 3263 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3264 &id) == 0); 3265 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3266 (u_longlong_t)id); 3267 path = buf; 3268 } 3269 } 3270 3271 return (zfs_strdup(hdl, path)); 3272} 3273 3274static int 3275zbookmark_compare(const void *a, const void *b) 3276{ 3277 return (memcmp(a, b, sizeof (zbookmark_t))); 3278} 3279 3280/* 3281 * Retrieve the persistent error log, uniquify the members, and return to the 3282 * caller. 3283 */ 3284int 3285zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3286{ 3287 zfs_cmd_t zc = { 0 }; 3288 uint64_t count; 3289 zbookmark_t *zb = NULL; 3290 int i; 3291 3292 /* 3293 * Retrieve the raw error list from the kernel. If the number of errors 3294 * has increased, allocate more space and continue until we get the 3295 * entire list. 3296 */ 3297 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3298 &count) == 0); 3299 if (count == 0) 3300 return (0); 3301 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3302 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3303 return (-1); 3304 zc.zc_nvlist_dst_size = count; 3305 (void) strcpy(zc.zc_name, zhp->zpool_name); 3306 for (;;) { 3307 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3308 &zc) != 0) { 3309 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3310 if (errno == ENOMEM) { 3311 count = zc.zc_nvlist_dst_size; 3312 if ((zc.zc_nvlist_dst = (uintptr_t) 3313 zfs_alloc(zhp->zpool_hdl, count * 3314 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3315 return (-1); 3316 } else { 3317 return (-1); 3318 } 3319 } else { 3320 break; 3321 } 3322 } 3323 3324 /* 3325 * Sort the resulting bookmarks. This is a little confusing due to the 3326 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3327 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3328 * _not_ copied as part of the process. So we point the start of our 3329 * array appropriate and decrement the total number of elements. 3330 */ 3331 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3332 zc.zc_nvlist_dst_size; 3333 count -= zc.zc_nvlist_dst_size; 3334 3335 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3336 3337 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3338 3339 /* 3340 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3341 */ 3342 for (i = 0; i < count; i++) { 3343 nvlist_t *nv; 3344 3345 /* ignoring zb_blkid and zb_level for now */ 3346 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3347 zb[i-1].zb_object == zb[i].zb_object) 3348 continue; 3349 3350 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3351 goto nomem; 3352 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3353 zb[i].zb_objset) != 0) { 3354 nvlist_free(nv); 3355 goto nomem; 3356 } 3357 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3358 zb[i].zb_object) != 0) { 3359 nvlist_free(nv); 3360 goto nomem; 3361 } 3362 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3363 nvlist_free(nv); 3364 goto nomem; 3365 } 3366 nvlist_free(nv); 3367 } 3368 3369 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3370 return (0); 3371 3372nomem: 3373 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3374 return (no_memory(zhp->zpool_hdl)); 3375} 3376 3377/* 3378 * Upgrade a ZFS pool to the latest on-disk version. 3379 */ 3380int 3381zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3382{ 3383 zfs_cmd_t zc = { 0 }; 3384 libzfs_handle_t *hdl = zhp->zpool_hdl; 3385 3386 (void) strcpy(zc.zc_name, zhp->zpool_name); 3387 zc.zc_cookie = new_version; 3388 3389 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3390 return (zpool_standard_error_fmt(hdl, errno, 3391 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3392 zhp->zpool_name)); 3393 return (0); 3394} 3395 3396void 3397zpool_set_history_str(const char *subcommand, int argc, char **argv, 3398 char *history_str) 3399{ 3400 int i; 3401 3402 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 3403 for (i = 1; i < argc; i++) { 3404 if (strlen(history_str) + 1 + strlen(argv[i]) > 3405 HIS_MAX_RECORD_LEN) 3406 break; 3407 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 3408 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 3409 } 3410} 3411 3412/* 3413 * Stage command history for logging. 3414 */ 3415int 3416zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 3417{ 3418 if (history_str == NULL) 3419 return (EINVAL); 3420 3421 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 3422 return (EINVAL); 3423 3424 if (hdl->libzfs_log_str != NULL) 3425 free(hdl->libzfs_log_str); 3426 3427 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 3428 return (no_memory(hdl)); 3429 3430 return (0); 3431} 3432 3433/* 3434 * Perform ioctl to get some command history of a pool. 3435 * 3436 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3437 * logical offset of the history buffer to start reading from. 3438 * 3439 * Upon return, 'off' is the next logical offset to read from and 3440 * 'len' is the actual amount of bytes read into 'buf'. 3441 */ 3442static int 3443get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3444{ 3445 zfs_cmd_t zc = { 0 }; 3446 libzfs_handle_t *hdl = zhp->zpool_hdl; 3447 3448 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3449 3450 zc.zc_history = (uint64_t)(uintptr_t)buf; 3451 zc.zc_history_len = *len; 3452 zc.zc_history_offset = *off; 3453 3454 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3455 switch (errno) { 3456 case EPERM: 3457 return (zfs_error_fmt(hdl, EZFS_PERM, 3458 dgettext(TEXT_DOMAIN, 3459 "cannot show history for pool '%s'"), 3460 zhp->zpool_name)); 3461 case ENOENT: 3462 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3463 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3464 "'%s'"), zhp->zpool_name)); 3465 case ENOTSUP: 3466 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3467 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3468 "'%s', pool must be upgraded"), zhp->zpool_name)); 3469 default: 3470 return (zpool_standard_error_fmt(hdl, errno, 3471 dgettext(TEXT_DOMAIN, 3472 "cannot get history for '%s'"), zhp->zpool_name)); 3473 } 3474 } 3475 3476 *len = zc.zc_history_len; 3477 *off = zc.zc_history_offset; 3478 3479 return (0); 3480} 3481 3482/* 3483 * Process the buffer of nvlists, unpacking and storing each nvlist record 3484 * into 'records'. 'leftover' is set to the number of bytes that weren't 3485 * processed as there wasn't a complete record. 3486 */ 3487int 3488zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3489 nvlist_t ***records, uint_t *numrecords) 3490{ 3491 uint64_t reclen; 3492 nvlist_t *nv; 3493 int i; 3494 3495 while (bytes_read > sizeof (reclen)) { 3496 3497 /* get length of packed record (stored as little endian) */ 3498 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3499 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3500 3501 if (bytes_read < sizeof (reclen) + reclen) 3502 break; 3503 3504 /* unpack record */ 3505 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3506 return (ENOMEM); 3507 bytes_read -= sizeof (reclen) + reclen; 3508 buf += sizeof (reclen) + reclen; 3509 3510 /* add record to nvlist array */ 3511 (*numrecords)++; 3512 if (ISP2(*numrecords + 1)) { 3513 *records = realloc(*records, 3514 *numrecords * 2 * sizeof (nvlist_t *)); 3515 } 3516 (*records)[*numrecords - 1] = nv; 3517 } 3518 3519 *leftover = bytes_read; 3520 return (0); 3521} 3522 3523#define HIS_BUF_LEN (128*1024) 3524 3525/* 3526 * Retrieve the command history of a pool. 3527 */ 3528int 3529zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3530{ 3531 char buf[HIS_BUF_LEN]; 3532 uint64_t off = 0; 3533 nvlist_t **records = NULL; 3534 uint_t numrecords = 0; 3535 int err, i; 3536 3537 do { 3538 uint64_t bytes_read = sizeof (buf); 3539 uint64_t leftover; 3540 3541 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3542 break; 3543 3544 /* if nothing else was read in, we're at EOF, just return */ 3545 if (!bytes_read) 3546 break; 3547 3548 if ((err = zpool_history_unpack(buf, bytes_read, 3549 &leftover, &records, &numrecords)) != 0) 3550 break; 3551 off -= leftover; 3552 3553 /* CONSTCOND */ 3554 } while (1); 3555 3556 if (!err) { 3557 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3558 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3559 records, numrecords) == 0); 3560 } 3561 for (i = 0; i < numrecords; i++) 3562 nvlist_free(records[i]); 3563 free(records); 3564 3565 return (err); 3566} 3567 3568void 3569zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3570 char *pathname, size_t len) 3571{ 3572 zfs_cmd_t zc = { 0 }; 3573 boolean_t mounted = B_FALSE; 3574 char *mntpnt = NULL; 3575 char dsname[MAXNAMELEN]; 3576 3577 if (dsobj == 0) { 3578 /* special case for the MOS */ 3579 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3580 return; 3581 } 3582 3583 /* get the dataset's name */ 3584 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3585 zc.zc_obj = dsobj; 3586 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3587 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3588 /* just write out a path of two object numbers */ 3589 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3590 dsobj, obj); 3591 return; 3592 } 3593 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3594 3595 /* find out if the dataset is mounted */ 3596 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3597 3598 /* get the corrupted object's path */ 3599 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3600 zc.zc_obj = obj; 3601 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3602 &zc) == 0) { 3603 if (mounted) { 3604 (void) snprintf(pathname, len, "%s%s", mntpnt, 3605 zc.zc_value); 3606 } else { 3607 (void) snprintf(pathname, len, "%s:%s", 3608 dsname, zc.zc_value); 3609 } 3610 } else { 3611 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3612 } 3613 free(mntpnt); 3614} 3615 3616#ifdef sun 3617/* 3618 * Read the EFI label from the config, if a label does not exist then 3619 * pass back the error to the caller. If the caller has passed a non-NULL 3620 * diskaddr argument then we set it to the starting address of the EFI 3621 * partition. 3622 */ 3623static int 3624read_efi_label(nvlist_t *config, diskaddr_t *sb) 3625{ 3626 char *path; 3627 int fd; 3628 char diskname[MAXPATHLEN]; 3629 int err = -1; 3630 3631 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3632 return (err); 3633 3634 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3635 strrchr(path, '/')); 3636 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3637 struct dk_gpt *vtoc; 3638 3639 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3640 if (sb != NULL) 3641 *sb = vtoc->efi_parts[0].p_start; 3642 efi_free(vtoc); 3643 } 3644 (void) close(fd); 3645 } 3646 return (err); 3647} 3648 3649/* 3650 * determine where a partition starts on a disk in the current 3651 * configuration 3652 */ 3653static diskaddr_t 3654find_start_block(nvlist_t *config) 3655{ 3656 nvlist_t **child; 3657 uint_t c, children; 3658 diskaddr_t sb = MAXOFFSET_T; 3659 uint64_t wholedisk; 3660 3661 if (nvlist_lookup_nvlist_array(config, 3662 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3663 if (nvlist_lookup_uint64(config, 3664 ZPOOL_CONFIG_WHOLE_DISK, 3665 &wholedisk) != 0 || !wholedisk) { 3666 return (MAXOFFSET_T); 3667 } 3668 if (read_efi_label(config, &sb) < 0) 3669 sb = MAXOFFSET_T; 3670 return (sb); 3671 } 3672 3673 for (c = 0; c < children; c++) { 3674 sb = find_start_block(child[c]); 3675 if (sb != MAXOFFSET_T) { 3676 return (sb); 3677 } 3678 } 3679 return (MAXOFFSET_T); 3680} 3681#endif /* sun */ 3682 3683/* 3684 * Label an individual disk. The name provided is the short name, 3685 * stripped of any leading /dev path. 3686 */ 3687int 3688zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3689{ 3690#ifdef sun 3691 char path[MAXPATHLEN]; 3692 struct dk_gpt *vtoc; 3693 int fd; 3694 size_t resv = EFI_MIN_RESV_SIZE; 3695 uint64_t slice_size; 3696 diskaddr_t start_block; 3697 char errbuf[1024]; 3698 3699 /* prepare an error message just in case */ 3700 (void) snprintf(errbuf, sizeof (errbuf), 3701 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3702 3703 if (zhp) { 3704 nvlist_t *nvroot; 3705
| 3078 * Convert from a devid string to a path. 3079 */ 3080static char * 3081devid_to_path(char *devid_str) 3082{ 3083 ddi_devid_t devid; 3084 char *minor; 3085 char *path; 3086 devid_nmlist_t *list = NULL; 3087 int ret; 3088 3089 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3090 return (NULL); 3091 3092 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3093 3094 devid_str_free(minor); 3095 devid_free(devid); 3096 3097 if (ret != 0) 3098 return (NULL); 3099 3100 if ((path = strdup(list[0].devname)) == NULL) 3101 return (NULL); 3102 3103 devid_free_nmlist(list); 3104 3105 return (path); 3106} 3107 3108/* 3109 * Convert from a path to a devid string. 3110 */ 3111static char * 3112path_to_devid(const char *path) 3113{ 3114 int fd; 3115 ddi_devid_t devid; 3116 char *minor, *ret; 3117 3118 if ((fd = open(path, O_RDONLY)) < 0) 3119 return (NULL); 3120 3121 minor = NULL; 3122 ret = NULL; 3123 if (devid_get(fd, &devid) == 0) { 3124 if (devid_get_minor_name(fd, &minor) == 0) 3125 ret = devid_str_encode(devid, minor); 3126 if (minor != NULL) 3127 devid_str_free(minor); 3128 devid_free(devid); 3129 } 3130 (void) close(fd); 3131 3132 return (ret); 3133} 3134 3135/* 3136 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3137 * ignore any failure here, since a common case is for an unprivileged user to 3138 * type 'zpool status', and we'll display the correct information anyway. 3139 */ 3140static void 3141set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3142{ 3143 zfs_cmd_t zc = { 0 }; 3144 3145 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3146 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3147 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3148 &zc.zc_guid) == 0); 3149 3150 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3151} 3152 3153/* 3154 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3155 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3156 * We also check if this is a whole disk, in which case we strip off the 3157 * trailing 's0' slice name. 3158 * 3159 * This routine is also responsible for identifying when disks have been 3160 * reconfigured in a new location. The kernel will have opened the device by 3161 * devid, but the path will still refer to the old location. To catch this, we 3162 * first do a path -> devid translation (which is fast for the common case). If 3163 * the devid matches, we're done. If not, we do a reverse devid -> path 3164 * translation and issue the appropriate ioctl() to update the path of the vdev. 3165 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3166 * of these checks. 3167 */ 3168char * 3169zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3170 boolean_t verbose) 3171{ 3172 char *path, *devid; 3173 uint64_t value; 3174 char buf[64]; 3175 vdev_stat_t *vs; 3176 uint_t vsc; 3177 int have_stats; 3178 int have_path; 3179 3180 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3181 (uint64_t **)&vs, &vsc) == 0; 3182 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3183 3184 /* 3185 * If the device is not currently present, assume it will not 3186 * come back at the same device path. Display the device by GUID. 3187 */ 3188 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3189 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3190 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3191 &value) == 0); 3192 (void) snprintf(buf, sizeof (buf), "%llu", 3193 (u_longlong_t)value); 3194 path = buf; 3195 } else if (have_path) { 3196 3197 /* 3198 * If the device is dead (faulted, offline, etc) then don't 3199 * bother opening it. Otherwise we may be forcing the user to 3200 * open a misbehaving device, which can have undesirable 3201 * effects. 3202 */ 3203 if ((have_stats == 0 || 3204 vs->vs_state >= VDEV_STATE_DEGRADED) && 3205 zhp != NULL && 3206 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3207 /* 3208 * Determine if the current path is correct. 3209 */ 3210 char *newdevid = path_to_devid(path); 3211 3212 if (newdevid == NULL || 3213 strcmp(devid, newdevid) != 0) { 3214 char *newpath; 3215 3216 if ((newpath = devid_to_path(devid)) != NULL) { 3217 /* 3218 * Update the path appropriately. 3219 */ 3220 set_path(zhp, nv, newpath); 3221 if (nvlist_add_string(nv, 3222 ZPOOL_CONFIG_PATH, newpath) == 0) 3223 verify(nvlist_lookup_string(nv, 3224 ZPOOL_CONFIG_PATH, 3225 &path) == 0); 3226 free(newpath); 3227 } 3228 } 3229 3230 if (newdevid) 3231 devid_str_free(newdevid); 3232 } 3233 3234#ifdef sun 3235 if (strncmp(path, "/dev/dsk/", 9) == 0) 3236 path += 9; 3237 3238 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3239 &value) == 0 && value) { 3240 int pathlen = strlen(path); 3241 char *tmp = zfs_strdup(hdl, path); 3242 3243 /* 3244 * If it starts with c#, and ends with "s0", chop 3245 * the "s0" off, or if it ends with "s0/old", remove 3246 * the "s0" from the middle. 3247 */ 3248 if (CTD_CHECK(tmp)) { 3249 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3250 tmp[pathlen - 2] = '\0'; 3251 } else if (pathlen > 6 && 3252 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3253 (void) strcpy(&tmp[pathlen - 6], 3254 "/old"); 3255 } 3256 } 3257 return (tmp); 3258 } 3259#else /* !sun */ 3260 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3261 path += sizeof(_PATH_DEV) - 1; 3262#endif /* !sun */ 3263 } else { 3264 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3265 3266 /* 3267 * If it's a raidz device, we need to stick in the parity level. 3268 */ 3269 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3270 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3271 &value) == 0); 3272 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3273 (u_longlong_t)value); 3274 path = buf; 3275 } 3276 3277 /* 3278 * We identify each top-level vdev by using a <type-id> 3279 * naming convention. 3280 */ 3281 if (verbose) { 3282 uint64_t id; 3283 3284 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3285 &id) == 0); 3286 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3287 (u_longlong_t)id); 3288 path = buf; 3289 } 3290 } 3291 3292 return (zfs_strdup(hdl, path)); 3293} 3294 3295static int 3296zbookmark_compare(const void *a, const void *b) 3297{ 3298 return (memcmp(a, b, sizeof (zbookmark_t))); 3299} 3300 3301/* 3302 * Retrieve the persistent error log, uniquify the members, and return to the 3303 * caller. 3304 */ 3305int 3306zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3307{ 3308 zfs_cmd_t zc = { 0 }; 3309 uint64_t count; 3310 zbookmark_t *zb = NULL; 3311 int i; 3312 3313 /* 3314 * Retrieve the raw error list from the kernel. If the number of errors 3315 * has increased, allocate more space and continue until we get the 3316 * entire list. 3317 */ 3318 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3319 &count) == 0); 3320 if (count == 0) 3321 return (0); 3322 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3323 count * sizeof (zbookmark_t))) == (uintptr_t)NULL) 3324 return (-1); 3325 zc.zc_nvlist_dst_size = count; 3326 (void) strcpy(zc.zc_name, zhp->zpool_name); 3327 for (;;) { 3328 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3329 &zc) != 0) { 3330 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3331 if (errno == ENOMEM) { 3332 count = zc.zc_nvlist_dst_size; 3333 if ((zc.zc_nvlist_dst = (uintptr_t) 3334 zfs_alloc(zhp->zpool_hdl, count * 3335 sizeof (zbookmark_t))) == (uintptr_t)NULL) 3336 return (-1); 3337 } else { 3338 return (-1); 3339 } 3340 } else { 3341 break; 3342 } 3343 } 3344 3345 /* 3346 * Sort the resulting bookmarks. This is a little confusing due to the 3347 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3348 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3349 * _not_ copied as part of the process. So we point the start of our 3350 * array appropriate and decrement the total number of elements. 3351 */ 3352 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + 3353 zc.zc_nvlist_dst_size; 3354 count -= zc.zc_nvlist_dst_size; 3355 3356 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); 3357 3358 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3359 3360 /* 3361 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3362 */ 3363 for (i = 0; i < count; i++) { 3364 nvlist_t *nv; 3365 3366 /* ignoring zb_blkid and zb_level for now */ 3367 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3368 zb[i-1].zb_object == zb[i].zb_object) 3369 continue; 3370 3371 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3372 goto nomem; 3373 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3374 zb[i].zb_objset) != 0) { 3375 nvlist_free(nv); 3376 goto nomem; 3377 } 3378 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3379 zb[i].zb_object) != 0) { 3380 nvlist_free(nv); 3381 goto nomem; 3382 } 3383 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3384 nvlist_free(nv); 3385 goto nomem; 3386 } 3387 nvlist_free(nv); 3388 } 3389 3390 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3391 return (0); 3392 3393nomem: 3394 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3395 return (no_memory(zhp->zpool_hdl)); 3396} 3397 3398/* 3399 * Upgrade a ZFS pool to the latest on-disk version. 3400 */ 3401int 3402zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3403{ 3404 zfs_cmd_t zc = { 0 }; 3405 libzfs_handle_t *hdl = zhp->zpool_hdl; 3406 3407 (void) strcpy(zc.zc_name, zhp->zpool_name); 3408 zc.zc_cookie = new_version; 3409 3410 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3411 return (zpool_standard_error_fmt(hdl, errno, 3412 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3413 zhp->zpool_name)); 3414 return (0); 3415} 3416 3417void 3418zpool_set_history_str(const char *subcommand, int argc, char **argv, 3419 char *history_str) 3420{ 3421 int i; 3422 3423 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); 3424 for (i = 1; i < argc; i++) { 3425 if (strlen(history_str) + 1 + strlen(argv[i]) > 3426 HIS_MAX_RECORD_LEN) 3427 break; 3428 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); 3429 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); 3430 } 3431} 3432 3433/* 3434 * Stage command history for logging. 3435 */ 3436int 3437zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) 3438{ 3439 if (history_str == NULL) 3440 return (EINVAL); 3441 3442 if (strlen(history_str) > HIS_MAX_RECORD_LEN) 3443 return (EINVAL); 3444 3445 if (hdl->libzfs_log_str != NULL) 3446 free(hdl->libzfs_log_str); 3447 3448 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) 3449 return (no_memory(hdl)); 3450 3451 return (0); 3452} 3453 3454/* 3455 * Perform ioctl to get some command history of a pool. 3456 * 3457 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3458 * logical offset of the history buffer to start reading from. 3459 * 3460 * Upon return, 'off' is the next logical offset to read from and 3461 * 'len' is the actual amount of bytes read into 'buf'. 3462 */ 3463static int 3464get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3465{ 3466 zfs_cmd_t zc = { 0 }; 3467 libzfs_handle_t *hdl = zhp->zpool_hdl; 3468 3469 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3470 3471 zc.zc_history = (uint64_t)(uintptr_t)buf; 3472 zc.zc_history_len = *len; 3473 zc.zc_history_offset = *off; 3474 3475 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3476 switch (errno) { 3477 case EPERM: 3478 return (zfs_error_fmt(hdl, EZFS_PERM, 3479 dgettext(TEXT_DOMAIN, 3480 "cannot show history for pool '%s'"), 3481 zhp->zpool_name)); 3482 case ENOENT: 3483 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3484 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3485 "'%s'"), zhp->zpool_name)); 3486 case ENOTSUP: 3487 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3488 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3489 "'%s', pool must be upgraded"), zhp->zpool_name)); 3490 default: 3491 return (zpool_standard_error_fmt(hdl, errno, 3492 dgettext(TEXT_DOMAIN, 3493 "cannot get history for '%s'"), zhp->zpool_name)); 3494 } 3495 } 3496 3497 *len = zc.zc_history_len; 3498 *off = zc.zc_history_offset; 3499 3500 return (0); 3501} 3502 3503/* 3504 * Process the buffer of nvlists, unpacking and storing each nvlist record 3505 * into 'records'. 'leftover' is set to the number of bytes that weren't 3506 * processed as there wasn't a complete record. 3507 */ 3508int 3509zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3510 nvlist_t ***records, uint_t *numrecords) 3511{ 3512 uint64_t reclen; 3513 nvlist_t *nv; 3514 int i; 3515 3516 while (bytes_read > sizeof (reclen)) { 3517 3518 /* get length of packed record (stored as little endian) */ 3519 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3520 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3521 3522 if (bytes_read < sizeof (reclen) + reclen) 3523 break; 3524 3525 /* unpack record */ 3526 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3527 return (ENOMEM); 3528 bytes_read -= sizeof (reclen) + reclen; 3529 buf += sizeof (reclen) + reclen; 3530 3531 /* add record to nvlist array */ 3532 (*numrecords)++; 3533 if (ISP2(*numrecords + 1)) { 3534 *records = realloc(*records, 3535 *numrecords * 2 * sizeof (nvlist_t *)); 3536 } 3537 (*records)[*numrecords - 1] = nv; 3538 } 3539 3540 *leftover = bytes_read; 3541 return (0); 3542} 3543 3544#define HIS_BUF_LEN (128*1024) 3545 3546/* 3547 * Retrieve the command history of a pool. 3548 */ 3549int 3550zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3551{ 3552 char buf[HIS_BUF_LEN]; 3553 uint64_t off = 0; 3554 nvlist_t **records = NULL; 3555 uint_t numrecords = 0; 3556 int err, i; 3557 3558 do { 3559 uint64_t bytes_read = sizeof (buf); 3560 uint64_t leftover; 3561 3562 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3563 break; 3564 3565 /* if nothing else was read in, we're at EOF, just return */ 3566 if (!bytes_read) 3567 break; 3568 3569 if ((err = zpool_history_unpack(buf, bytes_read, 3570 &leftover, &records, &numrecords)) != 0) 3571 break; 3572 off -= leftover; 3573 3574 /* CONSTCOND */ 3575 } while (1); 3576 3577 if (!err) { 3578 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3579 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3580 records, numrecords) == 0); 3581 } 3582 for (i = 0; i < numrecords; i++) 3583 nvlist_free(records[i]); 3584 free(records); 3585 3586 return (err); 3587} 3588 3589void 3590zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3591 char *pathname, size_t len) 3592{ 3593 zfs_cmd_t zc = { 0 }; 3594 boolean_t mounted = B_FALSE; 3595 char *mntpnt = NULL; 3596 char dsname[MAXNAMELEN]; 3597 3598 if (dsobj == 0) { 3599 /* special case for the MOS */ 3600 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3601 return; 3602 } 3603 3604 /* get the dataset's name */ 3605 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3606 zc.zc_obj = dsobj; 3607 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3608 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3609 /* just write out a path of two object numbers */ 3610 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3611 dsobj, obj); 3612 return; 3613 } 3614 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3615 3616 /* find out if the dataset is mounted */ 3617 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3618 3619 /* get the corrupted object's path */ 3620 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3621 zc.zc_obj = obj; 3622 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3623 &zc) == 0) { 3624 if (mounted) { 3625 (void) snprintf(pathname, len, "%s%s", mntpnt, 3626 zc.zc_value); 3627 } else { 3628 (void) snprintf(pathname, len, "%s:%s", 3629 dsname, zc.zc_value); 3630 } 3631 } else { 3632 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3633 } 3634 free(mntpnt); 3635} 3636 3637#ifdef sun 3638/* 3639 * Read the EFI label from the config, if a label does not exist then 3640 * pass back the error to the caller. If the caller has passed a non-NULL 3641 * diskaddr argument then we set it to the starting address of the EFI 3642 * partition. 3643 */ 3644static int 3645read_efi_label(nvlist_t *config, diskaddr_t *sb) 3646{ 3647 char *path; 3648 int fd; 3649 char diskname[MAXPATHLEN]; 3650 int err = -1; 3651 3652 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3653 return (err); 3654 3655 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3656 strrchr(path, '/')); 3657 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3658 struct dk_gpt *vtoc; 3659 3660 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3661 if (sb != NULL) 3662 *sb = vtoc->efi_parts[0].p_start; 3663 efi_free(vtoc); 3664 } 3665 (void) close(fd); 3666 } 3667 return (err); 3668} 3669 3670/* 3671 * determine where a partition starts on a disk in the current 3672 * configuration 3673 */ 3674static diskaddr_t 3675find_start_block(nvlist_t *config) 3676{ 3677 nvlist_t **child; 3678 uint_t c, children; 3679 diskaddr_t sb = MAXOFFSET_T; 3680 uint64_t wholedisk; 3681 3682 if (nvlist_lookup_nvlist_array(config, 3683 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3684 if (nvlist_lookup_uint64(config, 3685 ZPOOL_CONFIG_WHOLE_DISK, 3686 &wholedisk) != 0 || !wholedisk) { 3687 return (MAXOFFSET_T); 3688 } 3689 if (read_efi_label(config, &sb) < 0) 3690 sb = MAXOFFSET_T; 3691 return (sb); 3692 } 3693 3694 for (c = 0; c < children; c++) { 3695 sb = find_start_block(child[c]); 3696 if (sb != MAXOFFSET_T) { 3697 return (sb); 3698 } 3699 } 3700 return (MAXOFFSET_T); 3701} 3702#endif /* sun */ 3703 3704/* 3705 * Label an individual disk. The name provided is the short name, 3706 * stripped of any leading /dev path. 3707 */ 3708int 3709zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3710{ 3711#ifdef sun 3712 char path[MAXPATHLEN]; 3713 struct dk_gpt *vtoc; 3714 int fd; 3715 size_t resv = EFI_MIN_RESV_SIZE; 3716 uint64_t slice_size; 3717 diskaddr_t start_block; 3718 char errbuf[1024]; 3719 3720 /* prepare an error message just in case */ 3721 (void) snprintf(errbuf, sizeof (errbuf), 3722 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3723 3724 if (zhp) { 3725 nvlist_t *nvroot; 3726
|
3706 if (pool_is_bootable(zhp)) {
| 3727 if (zpool_is_bootable(zhp)) {
|
3707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3708 "EFI labeled devices are not supported on root " 3709 "pools.")); 3710 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3711 } 3712 3713 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3714 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3715 3716 if (zhp->zpool_start_block == 0) 3717 start_block = find_start_block(nvroot); 3718 else 3719 start_block = zhp->zpool_start_block; 3720 zhp->zpool_start_block = start_block; 3721 } else { 3722 /* new pool */ 3723 start_block = NEW_START_BLOCK; 3724 } 3725 3726 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3727 BACKUP_SLICE); 3728 3729 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3730 /* 3731 * This shouldn't happen. We've long since verified that this 3732 * is a valid device. 3733 */ 3734 zfs_error_aux(hdl, 3735 dgettext(TEXT_DOMAIN, "unable to open device")); 3736 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3737 } 3738 3739 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3740 /* 3741 * The only way this can fail is if we run out of memory, or we 3742 * were unable to read the disk's capacity 3743 */ 3744 if (errno == ENOMEM) 3745 (void) no_memory(hdl); 3746 3747 (void) close(fd); 3748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3749 "unable to read disk capacity"), name); 3750 3751 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3752 } 3753 3754 slice_size = vtoc->efi_last_u_lba + 1; 3755 slice_size -= EFI_MIN_RESV_SIZE; 3756 if (start_block == MAXOFFSET_T) 3757 start_block = NEW_START_BLOCK; 3758 slice_size -= start_block; 3759 3760 vtoc->efi_parts[0].p_start = start_block; 3761 vtoc->efi_parts[0].p_size = slice_size; 3762 3763 /* 3764 * Why we use V_USR: V_BACKUP confuses users, and is considered 3765 * disposable by some EFI utilities (since EFI doesn't have a backup 3766 * slice). V_UNASSIGNED is supposed to be used only for zero size 3767 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3768 * etc. were all pretty specific. V_USR is as close to reality as we 3769 * can get, in the absence of V_OTHER. 3770 */ 3771 vtoc->efi_parts[0].p_tag = V_USR; 3772 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3773 3774 vtoc->efi_parts[8].p_start = slice_size + start_block; 3775 vtoc->efi_parts[8].p_size = resv; 3776 vtoc->efi_parts[8].p_tag = V_RESERVED; 3777 3778 if (efi_write(fd, vtoc) != 0) { 3779 /* 3780 * Some block drivers (like pcata) may not support EFI 3781 * GPT labels. Print out a helpful error message dir- 3782 * ecting the user to manually label the disk and give 3783 * a specific slice. 3784 */ 3785 (void) close(fd); 3786 efi_free(vtoc); 3787 3788 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3789 "try using fdisk(1M) and then provide a specific slice")); 3790 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3791 } 3792 3793 (void) close(fd); 3794 efi_free(vtoc); 3795#endif /* sun */ 3796 return (0); 3797} 3798 3799static boolean_t 3800supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3801{ 3802 char *type; 3803 nvlist_t **child; 3804 uint_t children, c; 3805 3806 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3807 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3808 strcmp(type, VDEV_TYPE_FILE) == 0 || 3809 strcmp(type, VDEV_TYPE_LOG) == 0 || 3810 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3811 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3812 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3813 "vdev type '%s' is not supported"), type); 3814 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3815 return (B_FALSE); 3816 } 3817 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3818 &child, &children) == 0) { 3819 for (c = 0; c < children; c++) { 3820 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3821 return (B_FALSE); 3822 } 3823 } 3824 return (B_TRUE); 3825} 3826 3827/* 3828 * check if this zvol is allowable for use as a dump device; zero if 3829 * it is, > 0 if it isn't, < 0 if it isn't a zvol 3830 */ 3831int 3832zvol_check_dump_config(char *arg) 3833{ 3834 zpool_handle_t *zhp = NULL; 3835 nvlist_t *config, *nvroot; 3836 char *p, *volname; 3837 nvlist_t **top; 3838 uint_t toplevels; 3839 libzfs_handle_t *hdl; 3840 char errbuf[1024]; 3841 char poolname[ZPOOL_MAXNAMELEN]; 3842 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3843 int ret = 1; 3844 3845 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3846 return (-1); 3847 } 3848 3849 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3850 "dump is not supported on device '%s'"), arg); 3851 3852 if ((hdl = libzfs_init()) == NULL) 3853 return (1); 3854 libzfs_print_on_error(hdl, B_TRUE); 3855 3856 volname = arg + pathlen; 3857 3858 /* check the configuration of the pool */ 3859 if ((p = strchr(volname, '/')) == NULL) { 3860 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3861 "malformed dataset name")); 3862 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3863 return (1); 3864 } else if (p - volname >= ZFS_MAXNAMELEN) { 3865 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3866 "dataset name is too long")); 3867 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3868 return (1); 3869 } else { 3870 (void) strncpy(poolname, volname, p - volname); 3871 poolname[p - volname] = '\0'; 3872 } 3873 3874 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3875 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3876 "could not open pool '%s'"), poolname); 3877 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3878 goto out; 3879 } 3880 config = zpool_get_config(zhp, NULL); 3881 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3882 &nvroot) != 0) { 3883 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3884 "could not obtain vdev configuration for '%s'"), poolname); 3885 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3886 goto out; 3887 } 3888 3889 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3890 &top, &toplevels) == 0); 3891 if (toplevels != 1) { 3892 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3893 "'%s' has multiple top level vdevs"), poolname); 3894 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3895 goto out; 3896 } 3897 3898 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3899 goto out; 3900 } 3901 ret = 0; 3902 3903out: 3904 if (zhp) 3905 zpool_close(zhp); 3906 libzfs_fini(hdl); 3907 return (ret); 3908}
| 3728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3729 "EFI labeled devices are not supported on root " 3730 "pools.")); 3731 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3732 } 3733 3734 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3735 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3736 3737 if (zhp->zpool_start_block == 0) 3738 start_block = find_start_block(nvroot); 3739 else 3740 start_block = zhp->zpool_start_block; 3741 zhp->zpool_start_block = start_block; 3742 } else { 3743 /* new pool */ 3744 start_block = NEW_START_BLOCK; 3745 } 3746 3747 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 3748 BACKUP_SLICE); 3749 3750 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3751 /* 3752 * This shouldn't happen. We've long since verified that this 3753 * is a valid device. 3754 */ 3755 zfs_error_aux(hdl, 3756 dgettext(TEXT_DOMAIN, "unable to open device")); 3757 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3758 } 3759 3760 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3761 /* 3762 * The only way this can fail is if we run out of memory, or we 3763 * were unable to read the disk's capacity 3764 */ 3765 if (errno == ENOMEM) 3766 (void) no_memory(hdl); 3767 3768 (void) close(fd); 3769 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3770 "unable to read disk capacity"), name); 3771 3772 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3773 } 3774 3775 slice_size = vtoc->efi_last_u_lba + 1; 3776 slice_size -= EFI_MIN_RESV_SIZE; 3777 if (start_block == MAXOFFSET_T) 3778 start_block = NEW_START_BLOCK; 3779 slice_size -= start_block; 3780 3781 vtoc->efi_parts[0].p_start = start_block; 3782 vtoc->efi_parts[0].p_size = slice_size; 3783 3784 /* 3785 * Why we use V_USR: V_BACKUP confuses users, and is considered 3786 * disposable by some EFI utilities (since EFI doesn't have a backup 3787 * slice). V_UNASSIGNED is supposed to be used only for zero size 3788 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3789 * etc. were all pretty specific. V_USR is as close to reality as we 3790 * can get, in the absence of V_OTHER. 3791 */ 3792 vtoc->efi_parts[0].p_tag = V_USR; 3793 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3794 3795 vtoc->efi_parts[8].p_start = slice_size + start_block; 3796 vtoc->efi_parts[8].p_size = resv; 3797 vtoc->efi_parts[8].p_tag = V_RESERVED; 3798 3799 if (efi_write(fd, vtoc) != 0) { 3800 /* 3801 * Some block drivers (like pcata) may not support EFI 3802 * GPT labels. Print out a helpful error message dir- 3803 * ecting the user to manually label the disk and give 3804 * a specific slice. 3805 */ 3806 (void) close(fd); 3807 efi_free(vtoc); 3808 3809 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3810 "try using fdisk(1M) and then provide a specific slice")); 3811 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 3812 } 3813 3814 (void) close(fd); 3815 efi_free(vtoc); 3816#endif /* sun */ 3817 return (0); 3818} 3819 3820static boolean_t 3821supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 3822{ 3823 char *type; 3824 nvlist_t **child; 3825 uint_t children, c; 3826 3827 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 3828 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 3829 strcmp(type, VDEV_TYPE_FILE) == 0 || 3830 strcmp(type, VDEV_TYPE_LOG) == 0 || 3831 strcmp(type, VDEV_TYPE_HOLE) == 0 || 3832 strcmp(type, VDEV_TYPE_MISSING) == 0) { 3833 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3834 "vdev type '%s' is not supported"), type); 3835 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 3836 return (B_FALSE); 3837 } 3838 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 3839 &child, &children) == 0) { 3840 for (c = 0; c < children; c++) { 3841 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 3842 return (B_FALSE); 3843 } 3844 } 3845 return (B_TRUE); 3846} 3847 3848/* 3849 * check if this zvol is allowable for use as a dump device; zero if 3850 * it is, > 0 if it isn't, < 0 if it isn't a zvol 3851 */ 3852int 3853zvol_check_dump_config(char *arg) 3854{ 3855 zpool_handle_t *zhp = NULL; 3856 nvlist_t *config, *nvroot; 3857 char *p, *volname; 3858 nvlist_t **top; 3859 uint_t toplevels; 3860 libzfs_handle_t *hdl; 3861 char errbuf[1024]; 3862 char poolname[ZPOOL_MAXNAMELEN]; 3863 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 3864 int ret = 1; 3865 3866 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 3867 return (-1); 3868 } 3869 3870 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 3871 "dump is not supported on device '%s'"), arg); 3872 3873 if ((hdl = libzfs_init()) == NULL) 3874 return (1); 3875 libzfs_print_on_error(hdl, B_TRUE); 3876 3877 volname = arg + pathlen; 3878 3879 /* check the configuration of the pool */ 3880 if ((p = strchr(volname, '/')) == NULL) { 3881 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3882 "malformed dataset name")); 3883 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 3884 return (1); 3885 } else if (p - volname >= ZFS_MAXNAMELEN) { 3886 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3887 "dataset name is too long")); 3888 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 3889 return (1); 3890 } else { 3891 (void) strncpy(poolname, volname, p - volname); 3892 poolname[p - volname] = '\0'; 3893 } 3894 3895 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 3896 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3897 "could not open pool '%s'"), poolname); 3898 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 3899 goto out; 3900 } 3901 config = zpool_get_config(zhp, NULL); 3902 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3903 &nvroot) != 0) { 3904 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3905 "could not obtain vdev configuration for '%s'"), poolname); 3906 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 3907 goto out; 3908 } 3909 3910 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3911 &top, &toplevels) == 0); 3912 if (toplevels != 1) { 3913 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3914 "'%s' has multiple top level vdevs"), poolname); 3915 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); 3916 goto out; 3917 } 3918 3919 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 3920 goto out; 3921 } 3922 ret = 0; 3923 3924out: 3925 if (zhp) 3926 zpool_close(zhp); 3927 libzfs_fini(hdl); 3928 return (ret); 3929}
|