Deleted Added
full compact
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 */
26
27#include <stdio.h>
28#include <unistd.h>
29#include <stdio_ext.h>
30#include <stdlib.h>
31#include <ctype.h>
32#include <sys/zfs_context.h>
33#include <sys/spa.h>
34#include <sys/spa_impl.h>
35#include <sys/dmu.h>
36#include <sys/zap.h>
37#include <sys/fs/zfs.h>
38#include <sys/zfs_znode.h>
39#include <sys/zfs_sa.h>
40#include <sys/sa.h>
41#include <sys/sa_impl.h>
42#include <sys/vdev.h>
43#include <sys/vdev_impl.h>
44#include <sys/metaslab_impl.h>
45#include <sys/dmu_objset.h>
46#include <sys/dsl_dir.h>
47#include <sys/dsl_dataset.h>
48#include <sys/dsl_pool.h>
49#include <sys/dbuf.h>
50#include <sys/zil.h>
51#include <sys/zil_impl.h>
52#include <sys/stat.h>
53#include <sys/resource.h>
54#include <sys/dmu_traverse.h>
55#include <sys/zio_checksum.h>
56#include <sys/zio_compress.h>
57#include <sys/zfs_fuid.h>
58#include <sys/arc.h>
59#include <sys/ddt.h>
60#include <sys/zfeature.h>
61#include <zfs_comutil.h>
62#undef ZFS_MAXNAMELEN
63#undef verify
64#include <libzfs.h>
65
66#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
67 zio_compress_table[(idx)].ci_name : "UNKNOWN")
68#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
69 zio_checksum_table[(idx)].ci_name : "UNKNOWN")
70#define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \
71 dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ? \
72 dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN")
73#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
74 (((idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA) ? \
75 DMU_OT_ZAP_OTHER : DMU_OT_NUMTYPES))
76
77#ifndef lint
78extern boolean_t zfs_recover;
79extern uint64_t zfs_arc_max, zfs_arc_meta_limit;
80extern int zfs_vdev_async_read_max_active;
81#else
82boolean_t zfs_recover;
83uint64_t zfs_arc_max, zfs_arc_meta_limit;
84int zfs_vdev_async_read_max_active;
85#endif
86
87const char cmdname[] = "zdb";
88uint8_t dump_opt[256];
89
90typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
91
92extern void dump_intent_log(zilog_t *);
93uint64_t *zopt_object = NULL;
94int zopt_objects = 0;
95libzfs_handle_t *g_zfs;
96uint64_t max_inflight = 1000;
97
98/*
99 * These libumem hooks provide a reasonable set of defaults for the allocator's
100 * debugging facilities.
101 */
102const char *
103_umem_debug_init()
104{
105 return ("default,verbose"); /* $UMEM_DEBUG setting */
106}
107
108const char *
109_umem_logging_init(void)
110{
111 return ("fail,contents"); /* $UMEM_LOGGING setting */
112}
113
114static void
115usage(void)
116{
117 (void) fprintf(stderr,
118 "Usage: %s [-CumMdibcsDvhLXFPA] [-t txg] [-e [-p path...]] "
119 "[-U config] [-I inflight I/Os] [-x dumpdir] poolname [object...]\n"
120 " %s [-divPA] [-e -p path...] [-U config] dataset "
121 "[object...]\n"
122 " %s -mM [-LXFPA] [-t txg] [-e [-p path...]] [-U config] "
123 "poolname [vdev [metaslab...]]\n"
124 " %s -R [-A] [-e [-p path...]] poolname "
125 "vdev:offset:size[:flags]\n"
126 " %s -S [-PA] [-e [-p path...]] [-U config] poolname\n"
127 " %s -l [-uA] device\n"
128 " %s -C [-A] [-U config]\n\n",
129 cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname);
130
131 (void) fprintf(stderr, " Dataset name must include at least one "
132 "separator character '/' or '@'\n");
133 (void) fprintf(stderr, " If dataset name is specified, only that "
134 "dataset is dumped\n");
135 (void) fprintf(stderr, " If object numbers are specified, only "
136 "those objects are dumped\n\n");
137 (void) fprintf(stderr, " Options to control amount of output:\n");
138 (void) fprintf(stderr, " -u uberblock\n");
139 (void) fprintf(stderr, " -d dataset(s)\n");
140 (void) fprintf(stderr, " -i intent logs\n");
141 (void) fprintf(stderr, " -C config (or cachefile if alone)\n");
142 (void) fprintf(stderr, " -h pool history\n");
143 (void) fprintf(stderr, " -b block statistics\n");
144 (void) fprintf(stderr, " -m metaslabs\n");
145 (void) fprintf(stderr, " -M metaslab groups\n");
146 (void) fprintf(stderr, " -c checksum all metadata (twice for "
147 "all data) blocks\n");
148 (void) fprintf(stderr, " -s report stats on zdb's I/O\n");
149 (void) fprintf(stderr, " -D dedup statistics\n");
150 (void) fprintf(stderr, " -S simulate dedup to measure effect\n");
151 (void) fprintf(stderr, " -v verbose (applies to all others)\n");
152 (void) fprintf(stderr, " -l dump label contents\n");
153 (void) fprintf(stderr, " -L disable leak tracking (do not "
154 "load spacemaps)\n");
155 (void) fprintf(stderr, " -R read and display block from a "
156 "device\n\n");
157 (void) fprintf(stderr, " Below options are intended for use "
158 "with other options:\n");
159 (void) fprintf(stderr, " -A ignore assertions (-A), enable "
160 "panic recovery (-AA) or both (-AAA)\n");
161 (void) fprintf(stderr, " -F attempt automatic rewind within "
162 "safe range of transaction groups\n");
163 (void) fprintf(stderr, " -U <cachefile_path> -- use alternate "
164 "cachefile\n");
165 (void) fprintf(stderr, " -X attempt extreme rewind (does not "
166 "work with dataset)\n");
167 (void) fprintf(stderr, " -e pool is exported/destroyed/"
168 "has altroot/not in a cachefile\n");
169 (void) fprintf(stderr, " -p <path> -- use one or more with "
170 "-e to specify path to vdev dir\n");
171 (void) fprintf(stderr, " -x <dumpdir> -- "
172 "dump all read blocks into specified directory\n");
173 (void) fprintf(stderr, " -P print numbers in parseable form\n");
174 (void) fprintf(stderr, " -t <txg> -- highest txg to use when "
175 "searching for uberblocks\n");
176 (void) fprintf(stderr, " -I <number of inflight I/Os> -- "
177 "specify the maximum number of "
178 "checksumming I/Os [default is 200]\n");
179 (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
180 "to make only that option verbose\n");
181 (void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
182 exit(1);
183}
184
185/*
186 * Called for usage errors that are discovered after a call to spa_open(),
187 * dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
188 */
189
190static void
191fatal(const char *fmt, ...)
192{
193 va_list ap;
194
195 va_start(ap, fmt);
196 (void) fprintf(stderr, "%s: ", cmdname);
197 (void) vfprintf(stderr, fmt, ap);
198 va_end(ap);
199 (void) fprintf(stderr, "\n");
200
201 exit(1);
202}
203
204/* ARGSUSED */
205static void
206dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
207{
208 nvlist_t *nv;
209 size_t nvsize = *(uint64_t *)data;
210 char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
211
212 VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
213
214 VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
215
216 umem_free(packed, nvsize);
217
218 dump_nvlist(nv, 8);
219
220 nvlist_free(nv);
221}
222
223/* ARGSUSED */
224static void
225dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
226{
227 spa_history_phys_t *shp = data;
228
229 if (shp == NULL)
230 return;
231
232 (void) printf("\t\tpool_create_len = %llu\n",
233 (u_longlong_t)shp->sh_pool_create_len);
234 (void) printf("\t\tphys_max_off = %llu\n",
235 (u_longlong_t)shp->sh_phys_max_off);
236 (void) printf("\t\tbof = %llu\n",
237 (u_longlong_t)shp->sh_bof);
238 (void) printf("\t\teof = %llu\n",
239 (u_longlong_t)shp->sh_eof);
240 (void) printf("\t\trecords_lost = %llu\n",
241 (u_longlong_t)shp->sh_records_lost);
242}
243
244static void
245zdb_nicenum(uint64_t num, char *buf)
246{
247 if (dump_opt['P'])
248 (void) sprintf(buf, "%llu", (longlong_t)num);
249 else
250 nicenum(num, buf);
251}
252
253const char histo_stars[] = "****************************************";
254const int histo_width = sizeof (histo_stars) - 1;
255
256static void
257dump_histogram(const uint64_t *histo, int size, int offset)
258{
259 int i;
260 int minidx = size - 1;
261 int maxidx = 0;
262 uint64_t max = 0;
263
264 for (i = 0; i < size; i++) {
265 if (histo[i] > max)
266 max = histo[i];
267 if (histo[i] > 0 && i > maxidx)
268 maxidx = i;
269 if (histo[i] > 0 && i < minidx)
270 minidx = i;
271 }
272
273 if (max < histo_width)
274 max = histo_width;
275
276 for (i = minidx; i <= maxidx; i++) {
277 (void) printf("\t\t\t%3u: %6llu %s\n",
278 i + offset, (u_longlong_t)histo[i],
279 &histo_stars[(max - histo[i]) * histo_width / max]);
280 }
281}
282
283static void
284dump_zap_stats(objset_t *os, uint64_t object)
285{
286 int error;
287 zap_stats_t zs;
288
289 error = zap_get_stats(os, object, &zs);
290 if (error)
291 return;
292
293 if (zs.zs_ptrtbl_len == 0) {
294 ASSERT(zs.zs_num_blocks == 1);
295 (void) printf("\tmicrozap: %llu bytes, %llu entries\n",
296 (u_longlong_t)zs.zs_blocksize,
297 (u_longlong_t)zs.zs_num_entries);
298 return;
299 }
300
301 (void) printf("\tFat ZAP stats:\n");
302
303 (void) printf("\t\tPointer table:\n");
304 (void) printf("\t\t\t%llu elements\n",
305 (u_longlong_t)zs.zs_ptrtbl_len);
306 (void) printf("\t\t\tzt_blk: %llu\n",
307 (u_longlong_t)zs.zs_ptrtbl_zt_blk);
308 (void) printf("\t\t\tzt_numblks: %llu\n",
309 (u_longlong_t)zs.zs_ptrtbl_zt_numblks);
310 (void) printf("\t\t\tzt_shift: %llu\n",
311 (u_longlong_t)zs.zs_ptrtbl_zt_shift);
312 (void) printf("\t\t\tzt_blks_copied: %llu\n",
313 (u_longlong_t)zs.zs_ptrtbl_blks_copied);
314 (void) printf("\t\t\tzt_nextblk: %llu\n",
315 (u_longlong_t)zs.zs_ptrtbl_nextblk);
316
317 (void) printf("\t\tZAP entries: %llu\n",
318 (u_longlong_t)zs.zs_num_entries);
319 (void) printf("\t\tLeaf blocks: %llu\n",
320 (u_longlong_t)zs.zs_num_leafs);
321 (void) printf("\t\tTotal blocks: %llu\n",
322 (u_longlong_t)zs.zs_num_blocks);
323 (void) printf("\t\tzap_block_type: 0x%llx\n",
324 (u_longlong_t)zs.zs_block_type);
325 (void) printf("\t\tzap_magic: 0x%llx\n",
326 (u_longlong_t)zs.zs_magic);
327 (void) printf("\t\tzap_salt: 0x%llx\n",
328 (u_longlong_t)zs.zs_salt);
329
330 (void) printf("\t\tLeafs with 2^n pointers:\n");
331 dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
332
333 (void) printf("\t\tBlocks with n*5 entries:\n");
334 dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
335
336 (void) printf("\t\tBlocks n/10 full:\n");
337 dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
338
339 (void) printf("\t\tEntries with n chunks:\n");
340 dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
341
342 (void) printf("\t\tBuckets with n entries:\n");
343 dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
344}
345
346/*ARGSUSED*/
347static void
348dump_none(objset_t *os, uint64_t object, void *data, size_t size)
349{
350}
351
352/*ARGSUSED*/
353static void
354dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
355{
356 (void) printf("\tUNKNOWN OBJECT TYPE\n");
357}
358
359/*ARGSUSED*/
360void
361dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
362{
363}
364
365/*ARGSUSED*/
366static void
367dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
368{
369}
370
371/*ARGSUSED*/
372static void
373dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
374{
375 zap_cursor_t zc;
376 zap_attribute_t attr;
377 void *prop;
378 int i;
379
380 dump_zap_stats(os, object);
381 (void) printf("\n");
382
383 for (zap_cursor_init(&zc, os, object);
384 zap_cursor_retrieve(&zc, &attr) == 0;
385 zap_cursor_advance(&zc)) {
386 (void) printf("\t\t%s = ", attr.za_name);
387 if (attr.za_num_integers == 0) {
388 (void) printf("\n");
389 continue;
390 }
391 prop = umem_zalloc(attr.za_num_integers *
392 attr.za_integer_length, UMEM_NOFAIL);
393 (void) zap_lookup(os, object, attr.za_name,
394 attr.za_integer_length, attr.za_num_integers, prop);
395 if (attr.za_integer_length == 1) {
396 (void) printf("%s", (char *)prop);
397 } else {
398 for (i = 0; i < attr.za_num_integers; i++) {
399 switch (attr.za_integer_length) {
400 case 2:
401 (void) printf("%u ",
402 ((uint16_t *)prop)[i]);
403 break;
404 case 4:
405 (void) printf("%u ",
406 ((uint32_t *)prop)[i]);
407 break;
408 case 8:
409 (void) printf("%lld ",
410 (u_longlong_t)((int64_t *)prop)[i]);
411 break;
412 }
413 }
414 }
415 (void) printf("\n");
416 umem_free(prop, attr.za_num_integers * attr.za_integer_length);
417 }
418 zap_cursor_fini(&zc);
419}
420
421/*ARGSUSED*/
422static void
423dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
424{
425 dump_zap_stats(os, object);
426 /* contents are printed elsewhere, properly decoded */
427}
428
429/*ARGSUSED*/
430static void
431dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
432{
433 zap_cursor_t zc;
434 zap_attribute_t attr;
435
436 dump_zap_stats(os, object);
437 (void) printf("\n");
438
439 for (zap_cursor_init(&zc, os, object);
440 zap_cursor_retrieve(&zc, &attr) == 0;
441 zap_cursor_advance(&zc)) {
442 (void) printf("\t\t%s = ", attr.za_name);
443 if (attr.za_num_integers == 0) {
444 (void) printf("\n");
445 continue;
446 }
447 (void) printf(" %llx : [%d:%d:%d]\n",
448 (u_longlong_t)attr.za_first_integer,
449 (int)ATTR_LENGTH(attr.za_first_integer),
450 (int)ATTR_BSWAP(attr.za_first_integer),
451 (int)ATTR_NUM(attr.za_first_integer));
452 }
453 zap_cursor_fini(&zc);
454}
455
456/*ARGSUSED*/
457static void
458dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
459{
460 zap_cursor_t zc;
461 zap_attribute_t attr;
462 uint16_t *layout_attrs;
463 int i;
464
465 dump_zap_stats(os, object);
466 (void) printf("\n");
467
468 for (zap_cursor_init(&zc, os, object);
469 zap_cursor_retrieve(&zc, &attr) == 0;
470 zap_cursor_advance(&zc)) {
471 (void) printf("\t\t%s = [", attr.za_name);
472 if (attr.za_num_integers == 0) {
473 (void) printf("\n");
474 continue;
475 }
476
477 VERIFY(attr.za_integer_length == 2);
478 layout_attrs = umem_zalloc(attr.za_num_integers *
479 attr.za_integer_length, UMEM_NOFAIL);
480
481 VERIFY(zap_lookup(os, object, attr.za_name,
482 attr.za_integer_length,
483 attr.za_num_integers, layout_attrs) == 0);
484
485 for (i = 0; i != attr.za_num_integers; i++)
486 (void) printf(" %d ", (int)layout_attrs[i]);
487 (void) printf("]\n");
488 umem_free(layout_attrs,
489 attr.za_num_integers * attr.za_integer_length);
490 }
491 zap_cursor_fini(&zc);
492}
493
494/*ARGSUSED*/
495static void
496dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
497{
498 zap_cursor_t zc;
499 zap_attribute_t attr;
500 const char *typenames[] = {
501 /* 0 */ "not specified",
502 /* 1 */ "FIFO",
503 /* 2 */ "Character Device",
504 /* 3 */ "3 (invalid)",
505 /* 4 */ "Directory",
506 /* 5 */ "5 (invalid)",
507 /* 6 */ "Block Device",
508 /* 7 */ "7 (invalid)",
509 /* 8 */ "Regular File",
510 /* 9 */ "9 (invalid)",
511 /* 10 */ "Symbolic Link",
512 /* 11 */ "11 (invalid)",
513 /* 12 */ "Socket",
514 /* 13 */ "Door",
515 /* 14 */ "Event Port",
516 /* 15 */ "15 (invalid)",
517 };
518
519 dump_zap_stats(os, object);
520 (void) printf("\n");
521
522 for (zap_cursor_init(&zc, os, object);
523 zap_cursor_retrieve(&zc, &attr) == 0;
524 zap_cursor_advance(&zc)) {
525 (void) printf("\t\t%s = %lld (type: %s)\n",
526 attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
527 typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
528 }
529 zap_cursor_fini(&zc);
530}
531
532int
533get_dtl_refcount(vdev_t *vd)
534{
535 int refcount = 0;
536
537 if (vd->vdev_ops->vdev_op_leaf) {
538 space_map_t *sm = vd->vdev_dtl_sm;
539
540 if (sm != NULL &&
541 sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
542 return (1);
543 return (0);
544 }
545
546 for (int c = 0; c < vd->vdev_children; c++)
547 refcount += get_dtl_refcount(vd->vdev_child[c]);
548 return (refcount);
549}
550
551int
552get_metaslab_refcount(vdev_t *vd)
553{
554 int refcount = 0;
555
556 if (vd->vdev_top == vd && !vd->vdev_removing) {
557 for (int m = 0; m < vd->vdev_ms_count; m++) {
558 space_map_t *sm = vd->vdev_ms[m]->ms_sm;
559
560 if (sm != NULL &&
561 sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
562 refcount++;
563 }
564 }
565 for (int c = 0; c < vd->vdev_children; c++)
566 refcount += get_metaslab_refcount(vd->vdev_child[c]);
567
568 return (refcount);
569}
570
571static int
572verify_spacemap_refcounts(spa_t *spa)
573{
574 uint64_t expected_refcount = 0;
575 uint64_t actual_refcount;
576
577 (void) feature_get_refcount(spa,
578 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
579 &expected_refcount);
580 actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
581 actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
582
583 if (expected_refcount != actual_refcount) {
584 (void) printf("space map refcount mismatch: expected %lld != "
585 "actual %lld\n",
586 (longlong_t)expected_refcount,
587 (longlong_t)actual_refcount);
588 return (2);
589 }
590 return (0);
591}
592
593static void
594dump_spacemap(objset_t *os, space_map_t *sm)
595{
596 uint64_t alloc, offset, entry;
597 char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
598 "INVALID", "INVALID", "INVALID", "INVALID" };
599
600 if (sm == NULL)
601 return;
602
603 /*
604 * Print out the freelist entries in both encoded and decoded form.
605 */
606 alloc = 0;
607 for (offset = 0; offset < space_map_length(sm);
608 offset += sizeof (entry)) {
609 uint8_t mapshift = sm->sm_shift;
610
611 VERIFY0(dmu_read(os, space_map_object(sm), offset,
612 sizeof (entry), &entry, DMU_READ_PREFETCH));
613 if (SM_DEBUG_DECODE(entry)) {
614
615 (void) printf("\t [%6llu] %s: txg %llu, pass %llu\n",
616 (u_longlong_t)(offset / sizeof (entry)),
617 ddata[SM_DEBUG_ACTION_DECODE(entry)],
618 (u_longlong_t)SM_DEBUG_TXG_DECODE(entry),
619 (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry));
620 } else {
621 (void) printf("\t [%6llu] %c range:"
622 " %010llx-%010llx size: %06llx\n",
623 (u_longlong_t)(offset / sizeof (entry)),
624 SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F',
625 (u_longlong_t)((SM_OFFSET_DECODE(entry) <<
626 mapshift) + sm->sm_start),
627 (u_longlong_t)((SM_OFFSET_DECODE(entry) <<
628 mapshift) + sm->sm_start +
629 (SM_RUN_DECODE(entry) << mapshift)),
630 (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift));
631 if (SM_TYPE_DECODE(entry) == SM_ALLOC)
632 alloc += SM_RUN_DECODE(entry) << mapshift;
633 else
634 alloc -= SM_RUN_DECODE(entry) << mapshift;
635 }
636 }
637 if (alloc != space_map_allocated(sm)) {
638 (void) printf("space_map_object alloc (%llu) INCONSISTENT "
639 "with space map summary (%llu)\n",
640 (u_longlong_t)space_map_allocated(sm), (u_longlong_t)alloc);
641 }
642}
643
644static void
645dump_metaslab_stats(metaslab_t *msp)
646{
647 char maxbuf[32];
648 range_tree_t *rt = msp->ms_tree;
649 avl_tree_t *t = &msp->ms_size_tree;
650 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
651
652 zdb_nicenum(metaslab_block_maxsize(msp), maxbuf);
653
654 (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
655 "segments", avl_numnodes(t), "maxsize", maxbuf,
656 "freepct", free_pct);
657 (void) printf("\tIn-memory histogram:\n");
658 dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
659}
660
661static void
662dump_metaslab(metaslab_t *msp)
663{
664 vdev_t *vd = msp->ms_group->mg_vd;
665 spa_t *spa = vd->vdev_spa;
666 space_map_t *sm = msp->ms_sm;
667 char freebuf[32];
668
669 zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf);
670
671 (void) printf(
672 "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
673 (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
674 (u_longlong_t)space_map_object(sm), freebuf);
675
676 if (dump_opt['m'] > 2 && !dump_opt['L']) {
677 mutex_enter(&msp->ms_lock);
678 metaslab_load_wait(msp);
679 if (!msp->ms_loaded) {
680 VERIFY0(metaslab_load(msp));
681 range_tree_stat_verify(msp->ms_tree);
682 }
683 dump_metaslab_stats(msp);
684 metaslab_unload(msp);
685 mutex_exit(&msp->ms_lock);
686 }
687
688 if (dump_opt['m'] > 1 && sm != NULL &&
689 spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
690 /*
691 * The space map histogram represents free space in chunks
692 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
693 */
694 (void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
695 (u_longlong_t)msp->ms_fragmentation);
696 dump_histogram(sm->sm_phys->smp_histogram,
697 SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
698 }
699
700 if (dump_opt['d'] > 5 || dump_opt['m'] > 3) {
701 ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
702
703 mutex_enter(&msp->ms_lock);
704 dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
705 mutex_exit(&msp->ms_lock);
706 }
707}
708
709static void
710print_vdev_metaslab_header(vdev_t *vd)
711{
712 (void) printf("\tvdev %10llu\n\t%-10s%5llu %-19s %-15s %-10s\n",
713 (u_longlong_t)vd->vdev_id,
714 "metaslabs", (u_longlong_t)vd->vdev_ms_count,
715 "offset", "spacemap", "free");
716 (void) printf("\t%15s %19s %15s %10s\n",
717 "---------------", "-------------------",
718 "---------------", "-------------");
719}
720
721static void
722dump_metaslab_groups(spa_t *spa)
723{
724 vdev_t *rvd = spa->spa_root_vdev;
725 metaslab_class_t *mc = spa_normal_class(spa);
726 uint64_t fragmentation;
727
728 metaslab_class_histogram_verify(mc);
729
730 for (int c = 0; c < rvd->vdev_children; c++) {
731 vdev_t *tvd = rvd->vdev_child[c];
732 metaslab_group_t *mg = tvd->vdev_mg;
733
734 if (mg->mg_class != mc)
735 continue;
736
737 metaslab_group_histogram_verify(mg);
738 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
739
740 (void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
741 "fragmentation",
742 (u_longlong_t)tvd->vdev_id,
743 (u_longlong_t)tvd->vdev_ms_count);
744 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
745 (void) printf("%3s\n", "-");
746 } else {
747 (void) printf("%3llu%%\n",
748 (u_longlong_t)mg->mg_fragmentation);
749 }
750 dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
751 }
752
753 (void) printf("\tpool %s\tfragmentation", spa_name(spa));
754 fragmentation = metaslab_class_fragmentation(mc);
755 if (fragmentation == ZFS_FRAG_INVALID)
756 (void) printf("\t%3s\n", "-");
757 else
758 (void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
759 dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
760}
761
762static void
763dump_metaslabs(spa_t *spa)
764{
765 vdev_t *vd, *rvd = spa->spa_root_vdev;
766 uint64_t m, c = 0, children = rvd->vdev_children;
767
768 (void) printf("\nMetaslabs:\n");
769
770 if (!dump_opt['d'] && zopt_objects > 0) {
771 c = zopt_object[0];
772
773 if (c >= children)
774 (void) fatal("bad vdev id: %llu", (u_longlong_t)c);
775
776 if (zopt_objects > 1) {
777 vd = rvd->vdev_child[c];
778 print_vdev_metaslab_header(vd);
779
780 for (m = 1; m < zopt_objects; m++) {
781 if (zopt_object[m] < vd->vdev_ms_count)
782 dump_metaslab(
783 vd->vdev_ms[zopt_object[m]]);
784 else
785 (void) fprintf(stderr, "bad metaslab "
786 "number %llu\n",
787 (u_longlong_t)zopt_object[m]);
788 }
789 (void) printf("\n");
790 return;
791 }
792 children = c + 1;
793 }
794 for (; c < children; c++) {
795 vd = rvd->vdev_child[c];
796 print_vdev_metaslab_header(vd);
797
798 for (m = 0; m < vd->vdev_ms_count; m++)
799 dump_metaslab(vd->vdev_ms[m]);
800 (void) printf("\n");
801 }
802}
803
804static void
805dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
806{
807 const ddt_phys_t *ddp = dde->dde_phys;
808 const ddt_key_t *ddk = &dde->dde_key;
809 char *types[4] = { "ditto", "single", "double", "triple" };
810 char blkbuf[BP_SPRINTF_LEN];
811 blkptr_t blk;
812
813 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
814 if (ddp->ddp_phys_birth == 0)
815 continue;
816 ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
817 snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
818 (void) printf("index %llx refcnt %llu %s %s\n",
819 (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
820 types[p], blkbuf);
821 }
822}
823
824static void
825dump_dedup_ratio(const ddt_stat_t *dds)
826{
827 double rL, rP, rD, D, dedup, compress, copies;
828
829 if (dds->dds_blocks == 0)
830 return;
831
832 rL = (double)dds->dds_ref_lsize;
833 rP = (double)dds->dds_ref_psize;
834 rD = (double)dds->dds_ref_dsize;
835 D = (double)dds->dds_dsize;
836
837 dedup = rD / D;
838 compress = rL / rP;
839 copies = rD / rP;
840
841 (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
842 "dedup * compress / copies = %.2f\n\n",
843 dedup, compress, copies, dedup * compress / copies);
844}
845
846static void
847dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
848{
849 char name[DDT_NAMELEN];
850 ddt_entry_t dde;
851 uint64_t walk = 0;
852 dmu_object_info_t doi;
853 uint64_t count, dspace, mspace;
854 int error;
855
856 error = ddt_object_info(ddt, type, class, &doi);
857
858 if (error == ENOENT)
859 return;
860 ASSERT(error == 0);
861
862 error = ddt_object_count(ddt, type, class, &count);
863 ASSERT(error == 0);
864 if (count == 0)
865 return;
866
867 dspace = doi.doi_physical_blocks_512 << 9;
868 mspace = doi.doi_fill_count * doi.doi_data_block_size;
869
870 ddt_object_name(ddt, type, class, name);
871
872 (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
873 name,
874 (u_longlong_t)count,
875 (u_longlong_t)(dspace / count),
876 (u_longlong_t)(mspace / count));
877
878 if (dump_opt['D'] < 3)
879 return;
880
881 zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
882
883 if (dump_opt['D'] < 4)
884 return;
885
886 if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
887 return;
888
889 (void) printf("%s contents:\n\n", name);
890
891 while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
892 dump_dde(ddt, &dde, walk);
893
894 ASSERT(error == ENOENT);
895
896 (void) printf("\n");
897}
898
899static void
900dump_all_ddts(spa_t *spa)
901{
902 ddt_histogram_t ddh_total = { 0 };
903 ddt_stat_t dds_total = { 0 };
904
905 for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
906 ddt_t *ddt = spa->spa_ddt[c];
907 for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
908 for (enum ddt_class class = 0; class < DDT_CLASSES;
909 class++) {
910 dump_ddt(ddt, type, class);
911 }
912 }
913 }
914
915 ddt_get_dedup_stats(spa, &dds_total);
916
917 if (dds_total.dds_blocks == 0) {
918 (void) printf("All DDTs are empty\n");
919 return;
920 }
921
922 (void) printf("\n");
923
924 if (dump_opt['D'] > 1) {
925 (void) printf("DDT histogram (aggregated over all DDTs):\n");
926 ddt_get_dedup_histogram(spa, &ddh_total);
927 zpool_dump_ddt(&dds_total, &ddh_total);
928 }
929
930 dump_dedup_ratio(&dds_total);
931}
932
933static void
934dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
935{
936 char *prefix = arg;
937
938 (void) printf("%s [%llu,%llu) length %llu\n",
939 prefix,
940 (u_longlong_t)start,
941 (u_longlong_t)(start + size),
942 (u_longlong_t)(size));
943}
944
945static void
946dump_dtl(vdev_t *vd, int indent)
947{
948 spa_t *spa = vd->vdev_spa;
949 boolean_t required;
950 char *name[DTL_TYPES] = { "missing", "partial", "scrub", "outage" };
951 char prefix[256];
952
953 spa_vdev_state_enter(spa, SCL_NONE);
954 required = vdev_dtl_required(vd);
955 (void) spa_vdev_state_exit(spa, NULL, 0);
956
957 if (indent == 0)
958 (void) printf("\nDirty time logs:\n\n");
959
960 (void) printf("\t%*s%s [%s]\n", indent, "",
961 vd->vdev_path ? vd->vdev_path :
962 vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
963 required ? "DTL-required" : "DTL-expendable");
964
965 for (int t = 0; t < DTL_TYPES; t++) {
966 range_tree_t *rt = vd->vdev_dtl[t];
967 if (range_tree_space(rt) == 0)
968 continue;
969 (void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
970 indent + 2, "", name[t]);
971 mutex_enter(rt->rt_lock);
972 range_tree_walk(rt, dump_dtl_seg, prefix);
973 mutex_exit(rt->rt_lock);
974 if (dump_opt['d'] > 5 && vd->vdev_children == 0)
975 dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm);
976 }
977
978 for (int c = 0; c < vd->vdev_children; c++)
979 dump_dtl(vd->vdev_child[c], indent + 4);
980}
981
982/* from spa_history.c: spa_history_create_obj() */
983#define HIS_BUF_LEN_DEF (128 << 10)
984#define HIS_BUF_LEN_MAX (1 << 30)
985
986static void
987dump_history(spa_t *spa)
988{
989 nvlist_t **events = NULL;
990 char *buf = NULL;
991 uint64_t bufsize = HIS_BUF_LEN_DEF;
992 uint64_t resid, len, off = 0;
993 uint_t num = 0;
994 int error;
995 time_t tsec;
996 struct tm t;
997 char tbuf[30];
998 char internalstr[MAXPATHLEN];
999
1000 if ((buf = malloc(bufsize)) == NULL)
1001 (void) fprintf(stderr, "Unable to read history: "
1002 "out of memory\n");
1003 do {
1004 len = bufsize;
1005
1006 if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
1007 (void) fprintf(stderr, "Unable to read history: "
1008 "error %d\n", error);
1009 return;
1010 }
1011
1012 if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
1013 break;
1014 off -= resid;
1015
1016 /*
1017 * If the history block is too big, double the buffer
1018 * size and try again.
1019 */
1020 if (resid == len) {
1021 free(buf);
1022 buf = NULL;
1023
1024 bufsize <<= 1;
1025 if ((bufsize >= HIS_BUF_LEN_MAX) ||
1026 ((buf = malloc(bufsize)) == NULL)) {
1027 (void) fprintf(stderr, "Unable to read history: "
1028 "out of memory\n");
1029 return;
1030 }
1031 }
1032 } while (len != 0);
1033 free(buf);
1034
1035 (void) printf("\nHistory:\n");
1036 for (int i = 0; i < num; i++) {
1037 uint64_t time, txg, ievent;
1038 char *cmd, *intstr;
1039 boolean_t printed = B_FALSE;
1040
1041 if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME,
1042 &time) != 0)
1043 goto next;
1044 if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD,
1045 &cmd) != 0) {
1046 if (nvlist_lookup_uint64(events[i],
1047 ZPOOL_HIST_INT_EVENT, &ievent) != 0)
1048 goto next;
1049 verify(nvlist_lookup_uint64(events[i],
1050 ZPOOL_HIST_TXG, &txg) == 0);
1051 verify(nvlist_lookup_string(events[i],
1052 ZPOOL_HIST_INT_STR, &intstr) == 0);
1053 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
1054 goto next;
1055
1056 (void) snprintf(internalstr,
1057 sizeof (internalstr),
1058 "[internal %s txg:%lld] %s",
1059 zfs_history_event_names[ievent], txg,
1060 intstr);
1061 cmd = internalstr;
1062 }
1063 tsec = time;
1064 (void) localtime_r(&tsec, &t);
1065 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
1066 (void) printf("%s %s\n", tbuf, cmd);
1067 printed = B_TRUE;
1068
1069next:
1070 if (dump_opt['h'] > 1) {
1071 if (!printed)
1072 (void) printf("unrecognized record:\n");
1073 dump_nvlist(events[i], 2);
1074 }
1075 }
1076}
1077
1078/*ARGSUSED*/
1079static void
1080dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
1081{
1082}
1083
1084static uint64_t
1085blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
1086 const zbookmark_phys_t *zb)
1087{
1088 if (dnp == NULL) {
1089 ASSERT(zb->zb_level < 0);
1090 if (zb->zb_object == 0)
1091 return (zb->zb_blkid);
1092 return (zb->zb_blkid * BP_GET_LSIZE(bp));
1093 }
1094
1095 ASSERT(zb->zb_level >= 0);
1096
1097 return ((zb->zb_blkid <<
1098 (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
1099 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
1100}
1101
1102static void
1103snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp)
1104{
1105 const dva_t *dva = bp->blk_dva;
1106 int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
1107
1108 if (dump_opt['b'] >= 6) {
1109 snprintf_blkptr(blkbuf, buflen, bp);
1110 return;
1111 }
1112
1113 if (BP_IS_EMBEDDED(bp)) {
1114 (void) sprintf(blkbuf,
1115 "EMBEDDED et=%u %llxL/%llxP B=%llu",
1116 (int)BPE_GET_ETYPE(bp),
1117 (u_longlong_t)BPE_GET_LSIZE(bp),
1118 (u_longlong_t)BPE_GET_PSIZE(bp),
1119 (u_longlong_t)bp->blk_birth);
1120 return;
1121 }
1122
1123 blkbuf[0] = '\0';
1124 for (int i = 0; i < ndvas; i++)
1125 (void) snprintf(blkbuf + strlen(blkbuf),
1126 buflen - strlen(blkbuf), "%llu:%llx:%llx ",
1127 (u_longlong_t)DVA_GET_VDEV(&dva[i]),
1128 (u_longlong_t)DVA_GET_OFFSET(&dva[i]),
1129 (u_longlong_t)DVA_GET_ASIZE(&dva[i]));
1130
1131 if (BP_IS_HOLE(bp)) {
1132 (void) snprintf(blkbuf + strlen(blkbuf),
1133 buflen - strlen(blkbuf), "B=%llu",
1134 (u_longlong_t)bp->blk_birth);
1135 } else {
1136 (void) snprintf(blkbuf + strlen(blkbuf),
1137 buflen - strlen(blkbuf),
1138 "%llxL/%llxP F=%llu B=%llu/%llu",
1139 (u_longlong_t)BP_GET_LSIZE(bp),
1140 (u_longlong_t)BP_GET_PSIZE(bp),
1141 (u_longlong_t)BP_GET_FILL(bp),
1142 (u_longlong_t)bp->blk_birth,
1143 (u_longlong_t)BP_PHYSICAL_BIRTH(bp));
1144 }
1145}
1146
1147static void
1148print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb,
1149 const dnode_phys_t *dnp)
1150{
1151 char blkbuf[BP_SPRINTF_LEN];
1152 int l;
1153
1154 if (!BP_IS_EMBEDDED(bp)) {
1155 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
1156 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
1157 }
1158
1159 (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
1160
1161 ASSERT(zb->zb_level >= 0);
1162
1163 for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
1164 if (l == zb->zb_level) {
1165 (void) printf("L%llx", (u_longlong_t)zb->zb_level);
1166 } else {
1167 (void) printf(" ");
1168 }
1169 }
1170
1171 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
1172 (void) printf("%s\n", blkbuf);
1173}
1174
1175static int
1176visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
1177 blkptr_t *bp, const zbookmark_phys_t *zb)
1178{
1179 int err = 0;
1180
1181 if (bp->blk_birth == 0)
1182 return (0);
1183
1184 print_indirect(bp, zb, dnp);
1185
1186 if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
1187 uint32_t flags = ARC_WAIT;
1187 arc_flags_t flags = ARC_FLAG_WAIT;
1188 int i;
1189 blkptr_t *cbp;
1190 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1191 arc_buf_t *buf;
1192 uint64_t fill = 0;
1193
1194 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
1195 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
1196 if (err)
1197 return (err);
1198 ASSERT(buf->b_data);
1199
1200 /* recursively visit blocks below this */
1201 cbp = buf->b_data;
1202 for (i = 0; i < epb; i++, cbp++) {
1203 zbookmark_phys_t czb;
1204
1205 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1206 zb->zb_level - 1,
1207 zb->zb_blkid * epb + i);
1208 err = visit_indirect(spa, dnp, cbp, &czb);
1209 if (err)
1210 break;
1211 fill += BP_GET_FILL(cbp);
1212 }
1213 if (!err)
1214 ASSERT3U(fill, ==, BP_GET_FILL(bp));
1215 (void) arc_buf_remove_ref(buf, &buf);
1216 }
1217
1218 return (err);
1219}
1220
1221/*ARGSUSED*/
1222static void
1223dump_indirect(dnode_t *dn)
1224{
1225 dnode_phys_t *dnp = dn->dn_phys;
1226 int j;
1227 zbookmark_phys_t czb;
1228
1229 (void) printf("Indirect blocks:\n");
1230
1231 SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
1232 dn->dn_object, dnp->dn_nlevels - 1, 0);
1233 for (j = 0; j < dnp->dn_nblkptr; j++) {
1234 czb.zb_blkid = j;
1235 (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
1236 &dnp->dn_blkptr[j], &czb);
1237 }
1238
1239 (void) printf("\n");
1240}
1241
1242/*ARGSUSED*/
1243static void
1244dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
1245{
1246 dsl_dir_phys_t *dd = data;
1247 time_t crtime;
1248 char nice[32];
1249
1250 if (dd == NULL)
1251 return;
1252
1253 ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
1254
1255 crtime = dd->dd_creation_time;
1256 (void) printf("\t\tcreation_time = %s", ctime(&crtime));
1257 (void) printf("\t\thead_dataset_obj = %llu\n",
1258 (u_longlong_t)dd->dd_head_dataset_obj);
1259 (void) printf("\t\tparent_dir_obj = %llu\n",
1260 (u_longlong_t)dd->dd_parent_obj);
1261 (void) printf("\t\torigin_obj = %llu\n",
1262 (u_longlong_t)dd->dd_origin_obj);
1263 (void) printf("\t\tchild_dir_zapobj = %llu\n",
1264 (u_longlong_t)dd->dd_child_dir_zapobj);
1265 zdb_nicenum(dd->dd_used_bytes, nice);
1266 (void) printf("\t\tused_bytes = %s\n", nice);
1267 zdb_nicenum(dd->dd_compressed_bytes, nice);
1268 (void) printf("\t\tcompressed_bytes = %s\n", nice);
1269 zdb_nicenum(dd->dd_uncompressed_bytes, nice);
1270 (void) printf("\t\tuncompressed_bytes = %s\n", nice);
1271 zdb_nicenum(dd->dd_quota, nice);
1272 (void) printf("\t\tquota = %s\n", nice);
1273 zdb_nicenum(dd->dd_reserved, nice);
1274 (void) printf("\t\treserved = %s\n", nice);
1275 (void) printf("\t\tprops_zapobj = %llu\n",
1276 (u_longlong_t)dd->dd_props_zapobj);
1277 (void) printf("\t\tdeleg_zapobj = %llu\n",
1278 (u_longlong_t)dd->dd_deleg_zapobj);
1279 (void) printf("\t\tflags = %llx\n",
1280 (u_longlong_t)dd->dd_flags);
1281
1282#define DO(which) \
1283 zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice); \
1284 (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
1285 DO(HEAD);
1286 DO(SNAP);
1287 DO(CHILD);
1288 DO(CHILD_RSRV);
1289 DO(REFRSRV);
1290#undef DO
1291}
1292
1293/*ARGSUSED*/
1294static void
1295dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
1296{
1297 dsl_dataset_phys_t *ds = data;
1298 time_t crtime;
1299 char used[32], compressed[32], uncompressed[32], unique[32];
1300 char blkbuf[BP_SPRINTF_LEN];
1301
1302 if (ds == NULL)
1303 return;
1304
1305 ASSERT(size == sizeof (*ds));
1306 crtime = ds->ds_creation_time;
1307 zdb_nicenum(ds->ds_referenced_bytes, used);
1308 zdb_nicenum(ds->ds_compressed_bytes, compressed);
1309 zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed);
1310 zdb_nicenum(ds->ds_unique_bytes, unique);
1311 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
1312
1313 (void) printf("\t\tdir_obj = %llu\n",
1314 (u_longlong_t)ds->ds_dir_obj);
1315 (void) printf("\t\tprev_snap_obj = %llu\n",
1316 (u_longlong_t)ds->ds_prev_snap_obj);
1317 (void) printf("\t\tprev_snap_txg = %llu\n",
1318 (u_longlong_t)ds->ds_prev_snap_txg);
1319 (void) printf("\t\tnext_snap_obj = %llu\n",
1320 (u_longlong_t)ds->ds_next_snap_obj);
1321 (void) printf("\t\tsnapnames_zapobj = %llu\n",
1322 (u_longlong_t)ds->ds_snapnames_zapobj);
1323 (void) printf("\t\tnum_children = %llu\n",
1324 (u_longlong_t)ds->ds_num_children);
1325 (void) printf("\t\tuserrefs_obj = %llu\n",
1326 (u_longlong_t)ds->ds_userrefs_obj);
1327 (void) printf("\t\tcreation_time = %s", ctime(&crtime));
1328 (void) printf("\t\tcreation_txg = %llu\n",
1329 (u_longlong_t)ds->ds_creation_txg);
1330 (void) printf("\t\tdeadlist_obj = %llu\n",
1331 (u_longlong_t)ds->ds_deadlist_obj);
1332 (void) printf("\t\tused_bytes = %s\n", used);
1333 (void) printf("\t\tcompressed_bytes = %s\n", compressed);
1334 (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
1335 (void) printf("\t\tunique = %s\n", unique);
1336 (void) printf("\t\tfsid_guid = %llu\n",
1337 (u_longlong_t)ds->ds_fsid_guid);
1338 (void) printf("\t\tguid = %llu\n",
1339 (u_longlong_t)ds->ds_guid);
1340 (void) printf("\t\tflags = %llx\n",
1341 (u_longlong_t)ds->ds_flags);
1342 (void) printf("\t\tnext_clones_obj = %llu\n",
1343 (u_longlong_t)ds->ds_next_clones_obj);
1344 (void) printf("\t\tprops_obj = %llu\n",
1345 (u_longlong_t)ds->ds_props_obj);
1346 (void) printf("\t\tbp = %s\n", blkbuf);
1347}
1348
1349/* ARGSUSED */
1350static int
1351dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1352{
1353 char blkbuf[BP_SPRINTF_LEN];
1354
1355 if (bp->blk_birth != 0) {
1356 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
1357 (void) printf("\t%s\n", blkbuf);
1358 }
1359 return (0);
1360}
1361
1362static void
1363dump_bptree(objset_t *os, uint64_t obj, char *name)
1364{
1365 char bytes[32];
1366 bptree_phys_t *bt;
1367 dmu_buf_t *db;
1368
1369 if (dump_opt['d'] < 3)
1370 return;
1371
1372 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
1373 bt = db->db_data;
1374 zdb_nicenum(bt->bt_bytes, bytes);
1375 (void) printf("\n %s: %llu datasets, %s\n",
1376 name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
1377 dmu_buf_rele(db, FTAG);
1378
1379 if (dump_opt['d'] < 5)
1380 return;
1381
1382 (void) printf("\n");
1383
1384 (void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
1385}
1386
1387/* ARGSUSED */
1388static int
1389dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1390{
1391 char blkbuf[BP_SPRINTF_LEN];
1392
1393 ASSERT(bp->blk_birth != 0);
1394 snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
1395 (void) printf("\t%s\n", blkbuf);
1396 return (0);
1397}
1398
1399static void
1400dump_bpobj(bpobj_t *bpo, char *name, int indent)
1401{
1402 char bytes[32];
1403 char comp[32];
1404 char uncomp[32];
1405
1406 if (dump_opt['d'] < 3)
1407 return;
1408
1409 zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes);
1410 if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
1411 zdb_nicenum(bpo->bpo_phys->bpo_comp, comp);
1412 zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp);
1413 (void) printf(" %*s: object %llu, %llu local blkptrs, "
1414 "%llu subobjs, %s (%s/%s comp)\n",
1415 indent * 8, name,
1416 (u_longlong_t)bpo->bpo_object,
1417 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
1418 (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
1419 bytes, comp, uncomp);
1420
1421 for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
1422 uint64_t subobj;
1423 bpobj_t subbpo;
1424 int error;
1425 VERIFY0(dmu_read(bpo->bpo_os,
1426 bpo->bpo_phys->bpo_subobjs,
1427 i * sizeof (subobj), sizeof (subobj), &subobj, 0));
1428 error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
1429 if (error != 0) {
1430 (void) printf("ERROR %u while trying to open "
1431 "subobj id %llu\n",
1432 error, (u_longlong_t)subobj);
1433 continue;
1434 }
1435 dump_bpobj(&subbpo, "subobj", indent + 1);
1436 bpobj_close(&subbpo);
1437 }
1438 } else {
1439 (void) printf(" %*s: object %llu, %llu blkptrs, %s\n",
1440 indent * 8, name,
1441 (u_longlong_t)bpo->bpo_object,
1442 (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
1443 bytes);
1444 }
1445
1446 if (dump_opt['d'] < 5)
1447 return;
1448
1449
1450 if (indent == 0) {
1451 (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
1452 (void) printf("\n");
1453 }
1454}
1455
1456static void
1457dump_deadlist(dsl_deadlist_t *dl)
1458{
1459 dsl_deadlist_entry_t *dle;
1460 uint64_t unused;
1461 char bytes[32];
1462 char comp[32];
1463 char uncomp[32];
1464
1465 if (dump_opt['d'] < 3)
1466 return;
1467
1468 if (dl->dl_oldfmt) {
1469 dump_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
1470 return;
1471 }
1472
1473 zdb_nicenum(dl->dl_phys->dl_used, bytes);
1474 zdb_nicenum(dl->dl_phys->dl_comp, comp);
1475 zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp);
1476 (void) printf("\n Deadlist: %s (%s/%s comp)\n",
1477 bytes, comp, uncomp);
1478
1479 if (dump_opt['d'] < 4)
1480 return;
1481
1482 (void) printf("\n");
1483
1484 /* force the tree to be loaded */
1485 dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused);
1486
1487 for (dle = avl_first(&dl->dl_tree); dle;
1488 dle = AVL_NEXT(&dl->dl_tree, dle)) {
1489 if (dump_opt['d'] >= 5) {
1490 char buf[128];
1491 (void) snprintf(buf, sizeof (buf), "mintxg %llu -> ",
1492 (longlong_t)dle->dle_mintxg,
1493 (longlong_t)dle->dle_bpobj.bpo_object);
1494
1495 dump_bpobj(&dle->dle_bpobj, buf, 0);
1496 } else {
1497 (void) printf("mintxg %llu -> obj %llu\n",
1498 (longlong_t)dle->dle_mintxg,
1499 (longlong_t)dle->dle_bpobj.bpo_object);
1500
1501 }
1502 }
1503}
1504
1505static avl_tree_t idx_tree;
1506static avl_tree_t domain_tree;
1507static boolean_t fuid_table_loaded;
1508static boolean_t sa_loaded;
1509sa_attr_type_t *sa_attr_table;
1510
1511static void
1512fuid_table_destroy()
1513{
1514 if (fuid_table_loaded) {
1515 zfs_fuid_table_destroy(&idx_tree, &domain_tree);
1516 fuid_table_loaded = B_FALSE;
1517 }
1518}
1519
1520/*
1521 * print uid or gid information.
1522 * For normal POSIX id just the id is printed in decimal format.
1523 * For CIFS files with FUID the fuid is printed in hex followed by
1524 * the domain-rid string.
1525 */
1526static void
1527print_idstr(uint64_t id, const char *id_type)
1528{
1529 if (FUID_INDEX(id)) {
1530 char *domain;
1531
1532 domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
1533 (void) printf("\t%s %llx [%s-%d]\n", id_type,
1534 (u_longlong_t)id, domain, (int)FUID_RID(id));
1535 } else {
1536 (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
1537 }
1538
1539}
1540
1541static void
1542dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
1543{
1544 uint32_t uid_idx, gid_idx;
1545
1546 uid_idx = FUID_INDEX(uid);
1547 gid_idx = FUID_INDEX(gid);
1548
1549 /* Load domain table, if not already loaded */
1550 if (!fuid_table_loaded && (uid_idx || gid_idx)) {
1551 uint64_t fuid_obj;
1552
1553 /* first find the fuid object. It lives in the master node */
1554 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
1555 8, 1, &fuid_obj) == 0);
1556 zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
1557 (void) zfs_fuid_table_load(os, fuid_obj,
1558 &idx_tree, &domain_tree);
1559 fuid_table_loaded = B_TRUE;
1560 }
1561
1562 print_idstr(uid, "uid");
1563 print_idstr(gid, "gid");
1564}
1565
1566/*ARGSUSED*/
1567static void
1568dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
1569{
1570 char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
1571 sa_handle_t *hdl;
1572 uint64_t xattr, rdev, gen;
1573 uint64_t uid, gid, mode, fsize, parent, links;
1574 uint64_t pflags;
1575 uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
1576 time_t z_crtime, z_atime, z_mtime, z_ctime;
1577 sa_bulk_attr_t bulk[12];
1578 int idx = 0;
1579 int error;
1580
1581 if (!sa_loaded) {
1582 uint64_t sa_attrs = 0;
1583 uint64_t version;
1584
1585 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1586 8, 1, &version) == 0);
1587 if (version >= ZPL_VERSION_SA) {
1588 VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
1589 8, 1, &sa_attrs) == 0);
1590 }
1591 if ((error = sa_setup(os, sa_attrs, zfs_attr_table,
1592 ZPL_END, &sa_attr_table)) != 0) {
1593 (void) printf("sa_setup failed errno %d, can't "
1594 "display znode contents\n", error);
1595 return;
1596 }
1597 sa_loaded = B_TRUE;
1598 }
1599
1600 if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
1601 (void) printf("Failed to get handle for SA znode\n");
1602 return;
1603 }
1604
1605 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
1606 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
1607 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
1608 &links, 8);
1609 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
1610 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
1611 &mode, 8);
1612 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
1613 NULL, &parent, 8);
1614 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
1615 &fsize, 8);
1616 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
1617 acctm, 16);
1618 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
1619 modtm, 16);
1620 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
1621 crtm, 16);
1622 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
1623 chgtm, 16);
1624 SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
1625 &pflags, 8);
1626
1627 if (sa_bulk_lookup(hdl, bulk, idx)) {
1628 (void) sa_handle_destroy(hdl);
1629 return;
1630 }
1631
1632 error = zfs_obj_to_path(os, object, path, sizeof (path));
1633 if (error != 0) {
1634 (void) snprintf(path, sizeof (path), "\?\?\?<object#%llu>",
1635 (u_longlong_t)object);
1636 }
1637 if (dump_opt['d'] < 3) {
1638 (void) printf("\t%s\n", path);
1639 (void) sa_handle_destroy(hdl);
1640 return;
1641 }
1642
1643 z_crtime = (time_t)crtm[0];
1644 z_atime = (time_t)acctm[0];
1645 z_mtime = (time_t)modtm[0];
1646 z_ctime = (time_t)chgtm[0];
1647
1648 (void) printf("\tpath %s\n", path);
1649 dump_uidgid(os, uid, gid);
1650 (void) printf("\tatime %s", ctime(&z_atime));
1651 (void) printf("\tmtime %s", ctime(&z_mtime));
1652 (void) printf("\tctime %s", ctime(&z_ctime));
1653 (void) printf("\tcrtime %s", ctime(&z_crtime));
1654 (void) printf("\tgen %llu\n", (u_longlong_t)gen);
1655 (void) printf("\tmode %llo\n", (u_longlong_t)mode);
1656 (void) printf("\tsize %llu\n", (u_longlong_t)fsize);
1657 (void) printf("\tparent %llu\n", (u_longlong_t)parent);
1658 (void) printf("\tlinks %llu\n", (u_longlong_t)links);
1659 (void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
1660 if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
1661 sizeof (uint64_t)) == 0)
1662 (void) printf("\txattr %llu\n", (u_longlong_t)xattr);
1663 if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
1664 sizeof (uint64_t)) == 0)
1665 (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
1666 sa_handle_destroy(hdl);
1667}
1668
1669/*ARGSUSED*/
1670static void
1671dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
1672{
1673}
1674
1675/*ARGSUSED*/
1676static void
1677dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
1678{
1679}
1680
1681static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
1682 dump_none, /* unallocated */
1683 dump_zap, /* object directory */
1684 dump_uint64, /* object array */
1685 dump_none, /* packed nvlist */
1686 dump_packed_nvlist, /* packed nvlist size */
1687 dump_none, /* bplist */
1688 dump_none, /* bplist header */
1689 dump_none, /* SPA space map header */
1690 dump_none, /* SPA space map */
1691 dump_none, /* ZIL intent log */
1692 dump_dnode, /* DMU dnode */
1693 dump_dmu_objset, /* DMU objset */
1694 dump_dsl_dir, /* DSL directory */
1695 dump_zap, /* DSL directory child map */
1696 dump_zap, /* DSL dataset snap map */
1697 dump_zap, /* DSL props */
1698 dump_dsl_dataset, /* DSL dataset */
1699 dump_znode, /* ZFS znode */
1700 dump_acl, /* ZFS V0 ACL */
1701 dump_uint8, /* ZFS plain file */
1702 dump_zpldir, /* ZFS directory */
1703 dump_zap, /* ZFS master node */
1704 dump_zap, /* ZFS delete queue */
1705 dump_uint8, /* zvol object */
1706 dump_zap, /* zvol prop */
1707 dump_uint8, /* other uint8[] */
1708 dump_uint64, /* other uint64[] */
1709 dump_zap, /* other ZAP */
1710 dump_zap, /* persistent error log */
1711 dump_uint8, /* SPA history */
1712 dump_history_offsets, /* SPA history offsets */
1713 dump_zap, /* Pool properties */
1714 dump_zap, /* DSL permissions */
1715 dump_acl, /* ZFS ACL */
1716 dump_uint8, /* ZFS SYSACL */
1717 dump_none, /* FUID nvlist */
1718 dump_packed_nvlist, /* FUID nvlist size */
1719 dump_zap, /* DSL dataset next clones */
1720 dump_zap, /* DSL scrub queue */
1721 dump_zap, /* ZFS user/group used */
1722 dump_zap, /* ZFS user/group quota */
1723 dump_zap, /* snapshot refcount tags */
1724 dump_ddt_zap, /* DDT ZAP object */
1725 dump_zap, /* DDT statistics */
1726 dump_znode, /* SA object */
1727 dump_zap, /* SA Master Node */
1728 dump_sa_attrs, /* SA attribute registration */
1729 dump_sa_layouts, /* SA attribute layouts */
1730 dump_zap, /* DSL scrub translations */
1731 dump_none, /* fake dedup BP */
1732 dump_zap, /* deadlist */
1733 dump_none, /* deadlist hdr */
1734 dump_zap, /* dsl clones */
1735 dump_none, /* bpobj subobjs */
1736 dump_unknown, /* Unknown type, must be last */
1737};
1738
1739static void
1740dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header)
1741{
1742 dmu_buf_t *db = NULL;
1743 dmu_object_info_t doi;
1744 dnode_t *dn;
1745 void *bonus = NULL;
1746 size_t bsize = 0;
1747 char iblk[32], dblk[32], lsize[32], asize[32], fill[32];
1748 char bonus_size[32];
1749 char aux[50];
1750 int error;
1751
1752 if (*print_header) {
1753 (void) printf("\n%10s %3s %5s %5s %5s %5s %6s %s\n",
1754 "Object", "lvl", "iblk", "dblk", "dsize", "lsize",
1755 "%full", "type");
1756 *print_header = 0;
1757 }
1758
1759 if (object == 0) {
1760 dn = DMU_META_DNODE(os);
1761 } else {
1762 error = dmu_bonus_hold(os, object, FTAG, &db);
1763 if (error)
1764 fatal("dmu_bonus_hold(%llu) failed, errno %u",
1765 object, error);
1766 bonus = db->db_data;
1767 bsize = db->db_size;
1768 dn = DB_DNODE((dmu_buf_impl_t *)db);
1769 }
1770 dmu_object_info_from_dnode(dn, &doi);
1771
1772 zdb_nicenum(doi.doi_metadata_block_size, iblk);
1773 zdb_nicenum(doi.doi_data_block_size, dblk);
1774 zdb_nicenum(doi.doi_max_offset, lsize);
1775 zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize);
1776 zdb_nicenum(doi.doi_bonus_size, bonus_size);
1777 (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
1778 doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
1779 doi.doi_max_offset);
1780
1781 aux[0] = '\0';
1782
1783 if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
1784 (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)",
1785 ZDB_CHECKSUM_NAME(doi.doi_checksum));
1786 }
1787
1788 if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
1789 (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)",
1790 ZDB_COMPRESS_NAME(doi.doi_compress));
1791 }
1792
1793 (void) printf("%10lld %3u %5s %5s %5s %5s %6s %s%s\n",
1794 (u_longlong_t)object, doi.doi_indirection, iblk, dblk,
1795 asize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux);
1796
1797 if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
1798 (void) printf("%10s %3s %5s %5s %5s %5s %6s %s\n",
1799 "", "", "", "", "", bonus_size, "bonus",
1800 ZDB_OT_NAME(doi.doi_bonus_type));
1801 }
1802
1803 if (verbosity >= 4) {
1804 (void) printf("\tdnode flags: %s%s%s\n",
1805 (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
1806 "USED_BYTES " : "",
1807 (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
1808 "USERUSED_ACCOUNTED " : "",
1809 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
1810 "SPILL_BLKPTR" : "");
1811 (void) printf("\tdnode maxblkid: %llu\n",
1812 (longlong_t)dn->dn_phys->dn_maxblkid);
1813
1814 object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, object,
1815 bonus, bsize);
1816 object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, NULL, 0);
1817 *print_header = 1;
1818 }
1819
1820 if (verbosity >= 5)
1821 dump_indirect(dn);
1822
1823 if (verbosity >= 5) {
1824 /*
1825 * Report the list of segments that comprise the object.
1826 */
1827 uint64_t start = 0;
1828 uint64_t end;
1829 uint64_t blkfill = 1;
1830 int minlvl = 1;
1831
1832 if (dn->dn_type == DMU_OT_DNODE) {
1833 minlvl = 0;
1834 blkfill = DNODES_PER_BLOCK;
1835 }
1836
1837 for (;;) {
1838 char segsize[32];
1839 error = dnode_next_offset(dn,
1840 0, &start, minlvl, blkfill, 0);
1841 if (error)
1842 break;
1843 end = start;
1844 error = dnode_next_offset(dn,
1845 DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
1846 zdb_nicenum(end - start, segsize);
1847 (void) printf("\t\tsegment [%016llx, %016llx)"
1848 " size %5s\n", (u_longlong_t)start,
1849 (u_longlong_t)end, segsize);
1850 if (error)
1851 break;
1852 start = end;
1853 }
1854 }
1855
1856 if (db != NULL)
1857 dmu_buf_rele(db, FTAG);
1858}
1859
1860static char *objset_types[DMU_OST_NUMTYPES] = {
1861 "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
1862
1863static void
1864dump_dir(objset_t *os)
1865{
1866 dmu_objset_stats_t dds;
1867 uint64_t object, object_count;
1868 uint64_t refdbytes, usedobjs, scratch;
1869 char numbuf[32];
1870 char blkbuf[BP_SPRINTF_LEN + 20];
1871 char osname[MAXNAMELEN];
1872 char *type = "UNKNOWN";
1873 int verbosity = dump_opt['d'];
1874 int print_header = 1;
1875 int i, error;
1876
1877 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
1878 dmu_objset_fast_stat(os, &dds);
1879 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
1880
1881 if (dds.dds_type < DMU_OST_NUMTYPES)
1882 type = objset_types[dds.dds_type];
1883
1884 if (dds.dds_type == DMU_OST_META) {
1885 dds.dds_creation_txg = TXG_INITIAL;
1886 usedobjs = BP_GET_FILL(os->os_rootbp);
1887 refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
1888 dd_used_bytes;
1889 } else {
1890 dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
1891 }
1892
1893 ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
1894
1895 zdb_nicenum(refdbytes, numbuf);
1896
1897 if (verbosity >= 4) {
1898 (void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
1899 (void) snprintf_blkptr(blkbuf + strlen(blkbuf),
1900 sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
1901 } else {
1902 blkbuf[0] = '\0';
1903 }
1904
1905 dmu_objset_name(os, osname);
1906
1907 (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
1908 "%s, %llu objects%s\n",
1909 osname, type, (u_longlong_t)dmu_objset_id(os),
1910 (u_longlong_t)dds.dds_creation_txg,
1911 numbuf, (u_longlong_t)usedobjs, blkbuf);
1912
1913 if (zopt_objects != 0) {
1914 for (i = 0; i < zopt_objects; i++)
1915 dump_object(os, zopt_object[i], verbosity,
1916 &print_header);
1917 (void) printf("\n");
1918 return;
1919 }
1920
1921 if (dump_opt['i'] != 0 || verbosity >= 2)
1922 dump_intent_log(dmu_objset_zil(os));
1923
1924 if (dmu_objset_ds(os) != NULL)
1925 dump_deadlist(&dmu_objset_ds(os)->ds_deadlist);
1926
1927 if (verbosity < 2)
1928 return;
1929
1930 if (BP_IS_HOLE(os->os_rootbp))
1931 return;
1932
1933 dump_object(os, 0, verbosity, &print_header);
1934 object_count = 0;
1935 if (DMU_USERUSED_DNODE(os) != NULL &&
1936 DMU_USERUSED_DNODE(os)->dn_type != 0) {
1937 dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header);
1938 dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header);
1939 }
1940
1941 object = 0;
1942 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
1943 dump_object(os, object, verbosity, &print_header);
1944 object_count++;
1945 }
1946
1947 ASSERT3U(object_count, ==, usedobjs);
1948
1949 (void) printf("\n");
1950
1951 if (error != ESRCH) {
1952 (void) fprintf(stderr, "dmu_object_next() = %d\n", error);
1953 abort();
1954 }
1955}
1956
1957static void
1958dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
1959{
1960 time_t timestamp = ub->ub_timestamp;
1961
1962 (void) printf(header ? header : "");
1963 (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
1964 (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
1965 (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
1966 (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
1967 (void) printf("\ttimestamp = %llu UTC = %s",
1968 (u_longlong_t)ub->ub_timestamp, asctime(localtime(&timestamp)));
1969 if (dump_opt['u'] >= 3) {
1970 char blkbuf[BP_SPRINTF_LEN];
1971 snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
1972 (void) printf("\trootbp = %s\n", blkbuf);
1973 }
1974 (void) printf(footer ? footer : "");
1975}
1976
1977static void
1978dump_config(spa_t *spa)
1979{
1980 dmu_buf_t *db;
1981 size_t nvsize = 0;
1982 int error = 0;
1983
1984
1985 error = dmu_bonus_hold(spa->spa_meta_objset,
1986 spa->spa_config_object, FTAG, &db);
1987
1988 if (error == 0) {
1989 nvsize = *(uint64_t *)db->db_data;
1990 dmu_buf_rele(db, FTAG);
1991
1992 (void) printf("\nMOS Configuration:\n");
1993 dump_packed_nvlist(spa->spa_meta_objset,
1994 spa->spa_config_object, (void *)&nvsize, 1);
1995 } else {
1996 (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
1997 (u_longlong_t)spa->spa_config_object, error);
1998 }
1999}
2000
2001static void
2002dump_cachefile(const char *cachefile)
2003{
2004 int fd;
2005 struct stat64 statbuf;
2006 char *buf;
2007 nvlist_t *config;
2008
2009 if ((fd = open64(cachefile, O_RDONLY)) < 0) {
2010 (void) printf("cannot open '%s': %s\n", cachefile,
2011 strerror(errno));
2012 exit(1);
2013 }
2014
2015 if (fstat64(fd, &statbuf) != 0) {
2016 (void) printf("failed to stat '%s': %s\n", cachefile,
2017 strerror(errno));
2018 exit(1);
2019 }
2020
2021 if ((buf = malloc(statbuf.st_size)) == NULL) {
2022 (void) fprintf(stderr, "failed to allocate %llu bytes\n",
2023 (u_longlong_t)statbuf.st_size);
2024 exit(1);
2025 }
2026
2027 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
2028 (void) fprintf(stderr, "failed to read %llu bytes\n",
2029 (u_longlong_t)statbuf.st_size);
2030 exit(1);
2031 }
2032
2033 (void) close(fd);
2034
2035 if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
2036 (void) fprintf(stderr, "failed to unpack nvlist\n");
2037 exit(1);
2038 }
2039
2040 free(buf);
2041
2042 dump_nvlist(config, 0);
2043
2044 nvlist_free(config);
2045}
2046
2047#define ZDB_MAX_UB_HEADER_SIZE 32
2048
2049static void
2050dump_label_uberblocks(vdev_label_t *lbl, uint64_t ashift)
2051{
2052 vdev_t vd;
2053 vdev_t *vdp = &vd;
2054 char header[ZDB_MAX_UB_HEADER_SIZE];
2055
2056 vd.vdev_ashift = ashift;
2057 vdp->vdev_top = vdp;
2058
2059 for (int i = 0; i < VDEV_UBERBLOCK_COUNT(vdp); i++) {
2060 uint64_t uoff = VDEV_UBERBLOCK_OFFSET(vdp, i);
2061 uberblock_t *ub = (void *)((char *)lbl + uoff);
2062
2063 if (uberblock_verify(ub))
2064 continue;
2065 (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
2066 "Uberblock[%d]\n", i);
2067 dump_uberblock(ub, header, "");
2068 }
2069}
2070
2071static void
2072dump_label(const char *dev)
2073{
2074 int fd;
2075 vdev_label_t label;
2076 char *path, *buf = label.vl_vdev_phys.vp_nvlist;
2077 size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist);
2078 struct stat64 statbuf;
2079 uint64_t psize, ashift;
2080 int len = strlen(dev) + 1;
2081
2082 if (strncmp(dev, "/dev/dsk/", 9) == 0) {
2083 len++;
2084 path = malloc(len);
2085 (void) snprintf(path, len, "%s%s", "/dev/rdsk/", dev + 9);
2086 } else {
2087 path = strdup(dev);
2088 }
2089
2090 if ((fd = open64(path, O_RDONLY)) < 0) {
2091 (void) printf("cannot open '%s': %s\n", path, strerror(errno));
2092 free(path);
2093 exit(1);
2094 }
2095
2096 if (fstat64(fd, &statbuf) != 0) {
2097 (void) printf("failed to stat '%s': %s\n", path,
2098 strerror(errno));
2099 free(path);
2100 (void) close(fd);
2101 exit(1);
2102 }
2103
2104 if (S_ISBLK(statbuf.st_mode)) {
2105 (void) printf("cannot use '%s': character device required\n",
2106 path);
2107 free(path);
2108 (void) close(fd);
2109 exit(1);
2110 }
2111
2112 psize = statbuf.st_size;
2113 psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
2114
2115 for (int l = 0; l < VDEV_LABELS; l++) {
2116 nvlist_t *config = NULL;
2117
2118 (void) printf("--------------------------------------------\n");
2119 (void) printf("LABEL %d\n", l);
2120 (void) printf("--------------------------------------------\n");
2121
2122 if (pread64(fd, &label, sizeof (label),
2123 vdev_label_offset(psize, l, 0)) != sizeof (label)) {
2124 (void) printf("failed to read label %d\n", l);
2125 continue;
2126 }
2127
2128 if (nvlist_unpack(buf, buflen, &config, 0) != 0) {
2129 (void) printf("failed to unpack label %d\n", l);
2130 ashift = SPA_MINBLOCKSHIFT;
2131 } else {
2132 nvlist_t *vdev_tree = NULL;
2133
2134 dump_nvlist(config, 4);
2135 if ((nvlist_lookup_nvlist(config,
2136 ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
2137 (nvlist_lookup_uint64(vdev_tree,
2138 ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
2139 ashift = SPA_MINBLOCKSHIFT;
2140 nvlist_free(config);
2141 }
2142 if (dump_opt['u'])
2143 dump_label_uberblocks(&label, ashift);
2144 }
2145
2146 free(path);
2147 (void) close(fd);
2148}
2149
2150static uint64_t num_large_blocks;
2151
2152/*ARGSUSED*/
2153static int
2154dump_one_dir(const char *dsname, void *arg)
2155{
2156 int error;
2157 objset_t *os;
2158
2159 error = dmu_objset_own(dsname, DMU_OST_ANY, B_TRUE, FTAG, &os);
2160 if (error) {
2161 (void) printf("Could not open %s, error %d\n", dsname, error);
2162 return (0);
2163 }
2164 if (dmu_objset_ds(os)->ds_large_blocks)
2165 num_large_blocks++;
2166 dump_dir(os);
2167 dmu_objset_disown(os, FTAG);
2168 fuid_table_destroy();
2169 sa_loaded = B_FALSE;
2170 return (0);
2171}
2172
2173/*
2174 * Block statistics.
2175 */
2176#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
2177typedef struct zdb_blkstats {
2178 uint64_t zb_asize;
2179 uint64_t zb_lsize;
2180 uint64_t zb_psize;
2181 uint64_t zb_count;
2182 uint64_t zb_gangs;
2183 uint64_t zb_ditto_samevdev;
2184 uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
2185} zdb_blkstats_t;
2186
2187/*
2188 * Extended object types to report deferred frees and dedup auto-ditto blocks.
2189 */
2190#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
2191#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
2192#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
2193#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
2194
2195static char *zdb_ot_extname[] = {
2196 "deferred free",
2197 "dedup ditto",
2198 "other",
2199 "Total",
2200};
2201
2202#define ZB_TOTAL DN_MAX_LEVELS
2203
2204typedef struct zdb_cb {
2205 zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
2206 uint64_t zcb_dedup_asize;
2207 uint64_t zcb_dedup_blocks;
2208 uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
2209 uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
2210 [BPE_PAYLOAD_SIZE];
2211 uint64_t zcb_start;
2212 uint64_t zcb_lastprint;
2213 uint64_t zcb_totalasize;
2214 uint64_t zcb_errors[256];
2215 int zcb_readfails;
2216 int zcb_haderrors;
2217 spa_t *zcb_spa;
2218} zdb_cb_t;
2219
2220static void
2221zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
2222 dmu_object_type_t type)
2223{
2224 uint64_t refcnt = 0;
2225
2226 ASSERT(type < ZDB_OT_TOTAL);
2227
2228 if (zilog && zil_bp_tree_add(zilog, bp) != 0)
2229 return;
2230
2231 for (int i = 0; i < 4; i++) {
2232 int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
2233 int t = (i & 1) ? type : ZDB_OT_TOTAL;
2234 int equal;
2235 zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
2236
2237 zb->zb_asize += BP_GET_ASIZE(bp);
2238 zb->zb_lsize += BP_GET_LSIZE(bp);
2239 zb->zb_psize += BP_GET_PSIZE(bp);
2240 zb->zb_count++;
2241
2242 /*
2243 * The histogram is only big enough to record blocks up to
2244 * SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
2245 * "other", bucket.
2246 */
2247 int idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
2248 idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
2249 zb->zb_psize_histogram[idx]++;
2250
2251 zb->zb_gangs += BP_COUNT_GANG(bp);
2252
2253 switch (BP_GET_NDVAS(bp)) {
2254 case 2:
2255 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
2256 DVA_GET_VDEV(&bp->blk_dva[1]))
2257 zb->zb_ditto_samevdev++;
2258 break;
2259 case 3:
2260 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
2261 DVA_GET_VDEV(&bp->blk_dva[1])) +
2262 (DVA_GET_VDEV(&bp->blk_dva[0]) ==
2263 DVA_GET_VDEV(&bp->blk_dva[2])) +
2264 (DVA_GET_VDEV(&bp->blk_dva[1]) ==
2265 DVA_GET_VDEV(&bp->blk_dva[2]));
2266 if (equal != 0)
2267 zb->zb_ditto_samevdev++;
2268 break;
2269 }
2270
2271 }
2272
2273 if (BP_IS_EMBEDDED(bp)) {
2274 zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
2275 zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
2276 [BPE_GET_PSIZE(bp)]++;
2277 return;
2278 }
2279
2280 if (dump_opt['L'])
2281 return;
2282
2283 if (BP_GET_DEDUP(bp)) {
2284 ddt_t *ddt;
2285 ddt_entry_t *dde;
2286
2287 ddt = ddt_select(zcb->zcb_spa, bp);
2288 ddt_enter(ddt);
2289 dde = ddt_lookup(ddt, bp, B_FALSE);
2290
2291 if (dde == NULL) {
2292 refcnt = 0;
2293 } else {
2294 ddt_phys_t *ddp = ddt_phys_select(dde, bp);
2295 ddt_phys_decref(ddp);
2296 refcnt = ddp->ddp_refcnt;
2297 if (ddt_phys_total_refcnt(dde) == 0)
2298 ddt_remove(ddt, dde);
2299 }
2300 ddt_exit(ddt);
2301 }
2302
2303 VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
2304 refcnt ? 0 : spa_first_txg(zcb->zcb_spa),
2305 bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
2306}
2307
2308/* ARGSUSED */
2309static void
2310zdb_blkptr_done(zio_t *zio)
2311{
2312 spa_t *spa = zio->io_spa;
2313 blkptr_t *bp = zio->io_bp;
2314 int ioerr = zio->io_error;
2315 zdb_cb_t *zcb = zio->io_private;
2316 zbookmark_phys_t *zb = &zio->io_bookmark;
2317
2318 zio_data_buf_free(zio->io_data, zio->io_size);
2319
2320 mutex_enter(&spa->spa_scrub_lock);
2321 spa->spa_scrub_inflight--;
2322 cv_broadcast(&spa->spa_scrub_io_cv);
2323
2324 if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
2325 char blkbuf[BP_SPRINTF_LEN];
2326
2327 zcb->zcb_haderrors = 1;
2328 zcb->zcb_errors[ioerr]++;
2329
2330 if (dump_opt['b'] >= 2)
2331 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
2332 else
2333 blkbuf[0] = '\0';
2334
2335 (void) printf("zdb_blkptr_cb: "
2336 "Got error %d reading "
2337 "<%llu, %llu, %lld, %llx> %s -- skipping\n",
2338 ioerr,
2339 (u_longlong_t)zb->zb_objset,
2340 (u_longlong_t)zb->zb_object,
2341 (u_longlong_t)zb->zb_level,
2342 (u_longlong_t)zb->zb_blkid,
2343 blkbuf);
2344 }
2345 mutex_exit(&spa->spa_scrub_lock);
2346}
2347
2348/* ARGSUSED */
2349static int
2350zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2351 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2352{
2353 zdb_cb_t *zcb = arg;
2354 dmu_object_type_t type;
2355 boolean_t is_metadata;
2356
2357 if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
2358 char blkbuf[BP_SPRINTF_LEN];
2359 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
2360 (void) printf("objset %llu object %llu "
2361 "level %lld offset 0x%llx %s\n",
2362 (u_longlong_t)zb->zb_objset,
2363 (u_longlong_t)zb->zb_object,
2364 (longlong_t)zb->zb_level,
2365 (u_longlong_t)blkid2offset(dnp, bp, zb),
2366 blkbuf);
2367 }
2368
2369 if (BP_IS_HOLE(bp))
2370 return (0);
2371
2372 type = BP_GET_TYPE(bp);
2373
2374 zdb_count_block(zcb, zilog, bp,
2375 (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
2376
2377 is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
2378
2379 if (!BP_IS_EMBEDDED(bp) &&
2380 (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
2381 size_t size = BP_GET_PSIZE(bp);
2382 void *data = zio_data_buf_alloc(size);
2383 int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
2384
2385 /* If it's an intent log block, failure is expected. */
2386 if (zb->zb_level == ZB_ZIL_LEVEL)
2387 flags |= ZIO_FLAG_SPECULATIVE;
2388
2389 mutex_enter(&spa->spa_scrub_lock);
2390 while (spa->spa_scrub_inflight > max_inflight)
2391 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2392 spa->spa_scrub_inflight++;
2393 mutex_exit(&spa->spa_scrub_lock);
2394
2395 zio_nowait(zio_read(NULL, spa, bp, data, size,
2396 zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
2397 }
2398
2399 zcb->zcb_readfails = 0;
2400
2401 /* only call gethrtime() every 100 blocks */
2402 static int iters;
2403 if (++iters > 100)
2404 iters = 0;
2405 else
2406 return (0);
2407
2408 if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
2409 uint64_t now = gethrtime();
2410 char buf[10];
2411 uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
2412 int kb_per_sec =
2413 1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
2414 int sec_remaining =
2415 (zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
2416
2417 zfs_nicenum(bytes, buf, sizeof (buf));
2418 (void) fprintf(stderr,
2419 "\r%5s completed (%4dMB/s) "
2420 "estimated time remaining: %uhr %02umin %02usec ",
2421 buf, kb_per_sec / 1024,
2422 sec_remaining / 60 / 60,
2423 sec_remaining / 60 % 60,
2424 sec_remaining % 60);
2425
2426 zcb->zcb_lastprint = now;
2427 }
2428
2429 return (0);
2430}
2431
2432static void
2433zdb_leak(void *arg, uint64_t start, uint64_t size)
2434{
2435 vdev_t *vd = arg;
2436
2437 (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
2438 (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
2439}
2440
2441static metaslab_ops_t zdb_metaslab_ops = {
2442 NULL /* alloc */
2443};
2444
2445static void
2446zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
2447{
2448 ddt_bookmark_t ddb = { 0 };
2449 ddt_entry_t dde;
2450 int error;
2451
2452 while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
2453 blkptr_t blk;
2454 ddt_phys_t *ddp = dde.dde_phys;
2455
2456 if (ddb.ddb_class == DDT_CLASS_UNIQUE)
2457 return;
2458
2459 ASSERT(ddt_phys_total_refcnt(&dde) > 1);
2460
2461 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2462 if (ddp->ddp_phys_birth == 0)
2463 continue;
2464 ddt_bp_create(ddb.ddb_checksum,
2465 &dde.dde_key, ddp, &blk);
2466 if (p == DDT_PHYS_DITTO) {
2467 zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
2468 } else {
2469 zcb->zcb_dedup_asize +=
2470 BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
2471 zcb->zcb_dedup_blocks++;
2472 }
2473 }
2474 if (!dump_opt['L']) {
2475 ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
2476 ddt_enter(ddt);
2477 VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
2478 ddt_exit(ddt);
2479 }
2480 }
2481
2482 ASSERT(error == ENOENT);
2483}
2484
2485static void
2486zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
2487{
2488 zcb->zcb_spa = spa;
2489
2490 if (!dump_opt['L']) {
2491 vdev_t *rvd = spa->spa_root_vdev;
2492 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2493 vdev_t *vd = rvd->vdev_child[c];
2494 for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
2495 metaslab_t *msp = vd->vdev_ms[m];
2496 mutex_enter(&msp->ms_lock);
2497 metaslab_unload(msp);
2498
2499 /*
2500 * For leak detection, we overload the metaslab
2501 * ms_tree to contain allocated segments
2502 * instead of free segments. As a result,
2503 * we can't use the normal metaslab_load/unload
2504 * interfaces.
2505 */
2506 if (msp->ms_sm != NULL) {
2507 (void) fprintf(stderr,
2508 "\rloading space map for "
2509 "vdev %llu of %llu, "
2510 "metaslab %llu of %llu ...",
2511 (longlong_t)c,
2512 (longlong_t)rvd->vdev_children,
2513 (longlong_t)m,
2514 (longlong_t)vd->vdev_ms_count);
2515
2516 msp->ms_ops = &zdb_metaslab_ops;
2517
2518 /*
2519 * We don't want to spend the CPU
2520 * manipulating the size-ordered
2521 * tree, so clear the range_tree
2522 * ops.
2523 */
2524 msp->ms_tree->rt_ops = NULL;
2525 VERIFY0(space_map_load(msp->ms_sm,
2526 msp->ms_tree, SM_ALLOC));
2527 msp->ms_loaded = B_TRUE;
2528 }
2529 mutex_exit(&msp->ms_lock);
2530 }
2531 }
2532 (void) fprintf(stderr, "\n");
2533 }
2534
2535 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
2536
2537 zdb_ddt_leak_init(spa, zcb);
2538
2539 spa_config_exit(spa, SCL_CONFIG, FTAG);
2540}
2541
2542static void
2543zdb_leak_fini(spa_t *spa)
2544{
2545 if (!dump_opt['L']) {
2546 vdev_t *rvd = spa->spa_root_vdev;
2547 for (int c = 0; c < rvd->vdev_children; c++) {
2548 vdev_t *vd = rvd->vdev_child[c];
2549 for (int m = 0; m < vd->vdev_ms_count; m++) {
2550 metaslab_t *msp = vd->vdev_ms[m];
2551 mutex_enter(&msp->ms_lock);
2552
2553 /*
2554 * The ms_tree has been overloaded to
2555 * contain allocated segments. Now that we
2556 * finished traversing all blocks, any
2557 * block that remains in the ms_tree
2558 * represents an allocated block that we
2559 * did not claim during the traversal.
2560 * Claimed blocks would have been removed
2561 * from the ms_tree.
2562 */
2563 range_tree_vacate(msp->ms_tree, zdb_leak, vd);
2564 msp->ms_loaded = B_FALSE;
2565
2566 mutex_exit(&msp->ms_lock);
2567 }
2568 }
2569 }
2570}
2571
2572/* ARGSUSED */
2573static int
2574count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2575{
2576 zdb_cb_t *zcb = arg;
2577
2578 if (dump_opt['b'] >= 5) {
2579 char blkbuf[BP_SPRINTF_LEN];
2580 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
2581 (void) printf("[%s] %s\n",
2582 "deferred free", blkbuf);
2583 }
2584 zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
2585 return (0);
2586}
2587
2588static int
2589dump_block_stats(spa_t *spa)
2590{
2591 zdb_cb_t zcb = { 0 };
2592 zdb_blkstats_t *zb, *tzb;
2593 uint64_t norm_alloc, norm_space, total_alloc, total_found;
2594 int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD;
2595 boolean_t leaks = B_FALSE;
2596
2597 (void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
2598 (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
2599 (dump_opt['c'] == 1) ? "metadata " : "",
2600 dump_opt['c'] ? "checksums " : "",
2601 (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
2602 !dump_opt['L'] ? "nothing leaked " : "");
2603
2604 /*
2605 * Load all space maps as SM_ALLOC maps, then traverse the pool
2606 * claiming each block we discover. If the pool is perfectly
2607 * consistent, the space maps will be empty when we're done.
2608 * Anything left over is a leak; any block we can't claim (because
2609 * it's not part of any space map) is a double allocation,
2610 * reference to a freed block, or an unclaimed log block.
2611 */
2612 zdb_leak_init(spa, &zcb);
2613
2614 /*
2615 * If there's a deferred-free bplist, process that first.
2616 */
2617 (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
2618 count_block_cb, &zcb, NULL);
2619 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
2620 (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
2621 count_block_cb, &zcb, NULL);
2622 }
2623 if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
2624 VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
2625 spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
2626 &zcb, NULL));
2627 }
2628
2629 if (dump_opt['c'] > 1)
2630 flags |= TRAVERSE_PREFETCH_DATA;
2631
2632 zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
2633 zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
2634 zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
2635
2636 /*
2637 * If we've traversed the data blocks then we need to wait for those
2638 * I/Os to complete. We leverage "The Godfather" zio to wait on
2639 * all async I/Os to complete.
2640 */
2641 if (dump_opt['c']) {
2642 for (int i = 0; i < max_ncpus; i++) {
2643 (void) zio_wait(spa->spa_async_zio_root[i]);
2644 spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2645 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2646 ZIO_FLAG_GODFATHER);
2647 }
2648 }
2649
2650 if (zcb.zcb_haderrors) {
2651 (void) printf("\nError counts:\n\n");
2652 (void) printf("\t%5s %s\n", "errno", "count");
2653 for (int e = 0; e < 256; e++) {
2654 if (zcb.zcb_errors[e] != 0) {
2655 (void) printf("\t%5d %llu\n",
2656 e, (u_longlong_t)zcb.zcb_errors[e]);
2657 }
2658 }
2659 }
2660
2661 /*
2662 * Report any leaked segments.
2663 */
2664 zdb_leak_fini(spa);
2665
2666 tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
2667
2668 norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
2669 norm_space = metaslab_class_get_space(spa_normal_class(spa));
2670
2671 total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa));
2672 total_found = tzb->zb_asize - zcb.zcb_dedup_asize;
2673
2674 if (total_found == total_alloc) {
2675 if (!dump_opt['L'])
2676 (void) printf("\n\tNo leaks (block sum matches space"
2677 " maps exactly)\n");
2678 } else {
2679 (void) printf("block traversal size %llu != alloc %llu "
2680 "(%s %lld)\n",
2681 (u_longlong_t)total_found,
2682 (u_longlong_t)total_alloc,
2683 (dump_opt['L']) ? "unreachable" : "leaked",
2684 (longlong_t)(total_alloc - total_found));
2685 leaks = B_TRUE;
2686 }
2687
2688 if (tzb->zb_count == 0)
2689 return (2);
2690
2691 (void) printf("\n");
2692 (void) printf("\tbp count: %10llu\n",
2693 (u_longlong_t)tzb->zb_count);
2694 (void) printf("\tganged count: %10llu\n",
2695 (longlong_t)tzb->zb_gangs);
2696 (void) printf("\tbp logical: %10llu avg: %6llu\n",
2697 (u_longlong_t)tzb->zb_lsize,
2698 (u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
2699 (void) printf("\tbp physical: %10llu avg:"
2700 " %6llu compression: %6.2f\n",
2701 (u_longlong_t)tzb->zb_psize,
2702 (u_longlong_t)(tzb->zb_psize / tzb->zb_count),
2703 (double)tzb->zb_lsize / tzb->zb_psize);
2704 (void) printf("\tbp allocated: %10llu avg:"
2705 " %6llu compression: %6.2f\n",
2706 (u_longlong_t)tzb->zb_asize,
2707 (u_longlong_t)(tzb->zb_asize / tzb->zb_count),
2708 (double)tzb->zb_lsize / tzb->zb_asize);
2709 (void) printf("\tbp deduped: %10llu ref>1:"
2710 " %6llu deduplication: %6.2f\n",
2711 (u_longlong_t)zcb.zcb_dedup_asize,
2712 (u_longlong_t)zcb.zcb_dedup_blocks,
2713 (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
2714 (void) printf("\tSPA allocated: %10llu used: %5.2f%%\n",
2715 (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
2716
2717 for (bp_embedded_type_t i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
2718 if (zcb.zcb_embedded_blocks[i] == 0)
2719 continue;
2720 (void) printf("\n");
2721 (void) printf("\tadditional, non-pointer bps of type %u: "
2722 "%10llu\n",
2723 i, (u_longlong_t)zcb.zcb_embedded_blocks[i]);
2724
2725 if (dump_opt['b'] >= 3) {
2726 (void) printf("\t number of (compressed) bytes: "
2727 "number of bps\n");
2728 dump_histogram(zcb.zcb_embedded_histogram[i],
2729 sizeof (zcb.zcb_embedded_histogram[i]) /
2730 sizeof (zcb.zcb_embedded_histogram[i][0]), 0);
2731 }
2732 }
2733
2734 if (tzb->zb_ditto_samevdev != 0) {
2735 (void) printf("\tDittoed blocks on same vdev: %llu\n",
2736 (longlong_t)tzb->zb_ditto_samevdev);
2737 }
2738
2739 if (dump_opt['b'] >= 2) {
2740 int l, t, level;
2741 (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
2742 "\t avg\t comp\t%%Total\tType\n");
2743
2744 for (t = 0; t <= ZDB_OT_TOTAL; t++) {
2745 char csize[32], lsize[32], psize[32], asize[32];
2746 char avg[32], gang[32];
2747 char *typename;
2748
2749 if (t < DMU_OT_NUMTYPES)
2750 typename = dmu_ot[t].ot_name;
2751 else
2752 typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
2753
2754 if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
2755 (void) printf("%6s\t%5s\t%5s\t%5s"
2756 "\t%5s\t%5s\t%6s\t%s\n",
2757 "-",
2758 "-",
2759 "-",
2760 "-",
2761 "-",
2762 "-",
2763 "-",
2764 typename);
2765 continue;
2766 }
2767
2768 for (l = ZB_TOTAL - 1; l >= -1; l--) {
2769 level = (l == -1 ? ZB_TOTAL : l);
2770 zb = &zcb.zcb_type[level][t];
2771
2772 if (zb->zb_asize == 0)
2773 continue;
2774
2775 if (dump_opt['b'] < 3 && level != ZB_TOTAL)
2776 continue;
2777
2778 if (level == 0 && zb->zb_asize ==
2779 zcb.zcb_type[ZB_TOTAL][t].zb_asize)
2780 continue;
2781
2782 zdb_nicenum(zb->zb_count, csize);
2783 zdb_nicenum(zb->zb_lsize, lsize);
2784 zdb_nicenum(zb->zb_psize, psize);
2785 zdb_nicenum(zb->zb_asize, asize);
2786 zdb_nicenum(zb->zb_asize / zb->zb_count, avg);
2787 zdb_nicenum(zb->zb_gangs, gang);
2788
2789 (void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
2790 "\t%5.2f\t%6.2f\t",
2791 csize, lsize, psize, asize, avg,
2792 (double)zb->zb_lsize / zb->zb_psize,
2793 100.0 * zb->zb_asize / tzb->zb_asize);
2794
2795 if (level == ZB_TOTAL)
2796 (void) printf("%s\n", typename);
2797 else
2798 (void) printf(" L%d %s\n",
2799 level, typename);
2800
2801 if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
2802 (void) printf("\t number of ganged "
2803 "blocks: %s\n", gang);
2804 }
2805
2806 if (dump_opt['b'] >= 4) {
2807 (void) printf("psize "
2808 "(in 512-byte sectors): "
2809 "number of blocks\n");
2810 dump_histogram(zb->zb_psize_histogram,
2811 PSIZE_HISTO_SIZE, 0);
2812 }
2813 }
2814 }
2815 }
2816
2817 (void) printf("\n");
2818
2819 if (leaks)
2820 return (2);
2821
2822 if (zcb.zcb_haderrors)
2823 return (3);
2824
2825 return (0);
2826}
2827
2828typedef struct zdb_ddt_entry {
2829 ddt_key_t zdde_key;
2830 uint64_t zdde_ref_blocks;
2831 uint64_t zdde_ref_lsize;
2832 uint64_t zdde_ref_psize;
2833 uint64_t zdde_ref_dsize;
2834 avl_node_t zdde_node;
2835} zdb_ddt_entry_t;
2836
2837/* ARGSUSED */
2838static int
2839zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2840 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2841{
2842 avl_tree_t *t = arg;
2843 avl_index_t where;
2844 zdb_ddt_entry_t *zdde, zdde_search;
2845
2846 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
2847 return (0);
2848
2849 if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
2850 (void) printf("traversing objset %llu, %llu objects, "
2851 "%lu blocks so far\n",
2852 (u_longlong_t)zb->zb_objset,
2853 (u_longlong_t)BP_GET_FILL(bp),
2854 avl_numnodes(t));
2855 }
2856
2857 if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
2858 BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
2859 return (0);
2860
2861 ddt_key_fill(&zdde_search.zdde_key, bp);
2862
2863 zdde = avl_find(t, &zdde_search, &where);
2864
2865 if (zdde == NULL) {
2866 zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
2867 zdde->zdde_key = zdde_search.zdde_key;
2868 avl_insert(t, zdde, where);
2869 }
2870
2871 zdde->zdde_ref_blocks += 1;
2872 zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
2873 zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
2874 zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
2875
2876 return (0);
2877}
2878
2879static void
2880dump_simulated_ddt(spa_t *spa)
2881{
2882 avl_tree_t t;
2883 void *cookie = NULL;
2884 zdb_ddt_entry_t *zdde;
2885 ddt_histogram_t ddh_total = { 0 };
2886 ddt_stat_t dds_total = { 0 };
2887
2888 avl_create(&t, ddt_entry_compare,
2889 sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
2890
2891 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
2892
2893 (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
2894 zdb_ddt_add_cb, &t);
2895
2896 spa_config_exit(spa, SCL_CONFIG, FTAG);
2897
2898 while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
2899 ddt_stat_t dds;
2900 uint64_t refcnt = zdde->zdde_ref_blocks;
2901 ASSERT(refcnt != 0);
2902
2903 dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
2904 dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
2905 dds.dds_psize = zdde->zdde_ref_psize / refcnt;
2906 dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
2907
2908 dds.dds_ref_blocks = zdde->zdde_ref_blocks;
2909 dds.dds_ref_lsize = zdde->zdde_ref_lsize;
2910 dds.dds_ref_psize = zdde->zdde_ref_psize;
2911 dds.dds_ref_dsize = zdde->zdde_ref_dsize;
2912
2913 ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
2914 &dds, 0);
2915
2916 umem_free(zdde, sizeof (*zdde));
2917 }
2918
2919 avl_destroy(&t);
2920
2921 ddt_histogram_stat(&dds_total, &ddh_total);
2922
2923 (void) printf("Simulated DDT histogram:\n");
2924
2925 zpool_dump_ddt(&dds_total, &ddh_total);
2926
2927 dump_dedup_ratio(&dds_total);
2928}
2929
2930static void
2931dump_zpool(spa_t *spa)
2932{
2933 dsl_pool_t *dp = spa_get_dsl(spa);
2934 int rc = 0;
2935
2936 if (dump_opt['S']) {
2937 dump_simulated_ddt(spa);
2938 return;
2939 }
2940
2941 if (!dump_opt['e'] && dump_opt['C'] > 1) {
2942 (void) printf("\nCached configuration:\n");
2943 dump_nvlist(spa->spa_config, 8);
2944 }
2945
2946 if (dump_opt['C'])
2947 dump_config(spa);
2948
2949 if (dump_opt['u'])
2950 dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
2951
2952 if (dump_opt['D'])
2953 dump_all_ddts(spa);
2954
2955 if (dump_opt['d'] > 2 || dump_opt['m'])
2956 dump_metaslabs(spa);
2957 if (dump_opt['M'])
2958 dump_metaslab_groups(spa);
2959
2960 if (dump_opt['d'] || dump_opt['i']) {
2961 uint64_t refcount;
2962 dump_dir(dp->dp_meta_objset);
2963 if (dump_opt['d'] >= 3) {
2964 dump_bpobj(&spa->spa_deferred_bpobj,
2965 "Deferred frees", 0);
2966 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
2967 dump_bpobj(&spa->spa_dsl_pool->dp_free_bpobj,
2968 "Pool snapshot frees", 0);
2969 }
2970
2971 if (spa_feature_is_active(spa,
2972 SPA_FEATURE_ASYNC_DESTROY)) {
2973 dump_bptree(spa->spa_meta_objset,
2974 spa->spa_dsl_pool->dp_bptree_obj,
2975 "Pool dataset frees");
2976 }
2977 dump_dtl(spa->spa_root_vdev, 0);
2978 }
2979 (void) dmu_objset_find(spa_name(spa), dump_one_dir,
2980 NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
2981
2982 (void) feature_get_refcount(spa,
2983 &spa_feature_table[SPA_FEATURE_LARGE_BLOCKS], &refcount);
2984 if (num_large_blocks != refcount) {
2985 (void) printf("large_blocks feature refcount mismatch: "
2986 "expected %lld != actual %lld\n",
2987 (longlong_t)num_large_blocks,
2988 (longlong_t)refcount);
2989 rc = 2;
2990 } else {
2991 (void) printf("Verified large_blocks feature refcount "
2992 "is correct (%llu)\n", (longlong_t)refcount);
2993 }
2994 }
2995 if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
2996 rc = dump_block_stats(spa);
2997
2998 if (rc == 0)
2999 rc = verify_spacemap_refcounts(spa);
3000
3001 if (dump_opt['s'])
3002 show_pool_stats(spa);
3003
3004 if (dump_opt['h'])
3005 dump_history(spa);
3006
3007 if (rc != 0)
3008 exit(rc);
3009}
3010
3011#define ZDB_FLAG_CHECKSUM 0x0001
3012#define ZDB_FLAG_DECOMPRESS 0x0002
3013#define ZDB_FLAG_BSWAP 0x0004
3014#define ZDB_FLAG_GBH 0x0008
3015#define ZDB_FLAG_INDIRECT 0x0010
3016#define ZDB_FLAG_PHYS 0x0020
3017#define ZDB_FLAG_RAW 0x0040
3018#define ZDB_FLAG_PRINT_BLKPTR 0x0080
3019
3020int flagbits[256];
3021
3022static void
3023zdb_print_blkptr(blkptr_t *bp, int flags)
3024{
3025 char blkbuf[BP_SPRINTF_LEN];
3026
3027 if (flags & ZDB_FLAG_BSWAP)
3028 byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
3029
3030 snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
3031 (void) printf("%s\n", blkbuf);
3032}
3033
3034static void
3035zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
3036{
3037 int i;
3038
3039 for (i = 0; i < nbps; i++)
3040 zdb_print_blkptr(&bp[i], flags);
3041}
3042
3043static void
3044zdb_dump_gbh(void *buf, int flags)
3045{
3046 zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
3047}
3048
3049static void
3050zdb_dump_block_raw(void *buf, uint64_t size, int flags)
3051{
3052 if (flags & ZDB_FLAG_BSWAP)
3053 byteswap_uint64_array(buf, size);
3054 (void) write(1, buf, size);
3055}
3056
3057static void
3058zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
3059{
3060 uint64_t *d = (uint64_t *)buf;
3061 int nwords = size / sizeof (uint64_t);
3062 int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
3063 int i, j;
3064 char *hdr, *c;
3065
3066
3067 if (do_bswap)
3068 hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
3069 else
3070 hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
3071
3072 (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
3073
3074 for (i = 0; i < nwords; i += 2) {
3075 (void) printf("%06llx: %016llx %016llx ",
3076 (u_longlong_t)(i * sizeof (uint64_t)),
3077 (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
3078 (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
3079
3080 c = (char *)&d[i];
3081 for (j = 0; j < 2 * sizeof (uint64_t); j++)
3082 (void) printf("%c", isprint(c[j]) ? c[j] : '.');
3083 (void) printf("\n");
3084 }
3085}
3086
3087/*
3088 * There are two acceptable formats:
3089 * leaf_name - For example: c1t0d0 or /tmp/ztest.0a
3090 * child[.child]* - For example: 0.1.1
3091 *
3092 * The second form can be used to specify arbitrary vdevs anywhere
3093 * in the heirarchy. For example, in a pool with a mirror of
3094 * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
3095 */
3096static vdev_t *
3097zdb_vdev_lookup(vdev_t *vdev, char *path)
3098{
3099 char *s, *p, *q;
3100 int i;
3101
3102 if (vdev == NULL)
3103 return (NULL);
3104
3105 /* First, assume the x.x.x.x format */
3106 i = (int)strtoul(path, &s, 10);
3107 if (s == path || (s && *s != '.' && *s != '\0'))
3108 goto name;
3109 if (i < 0 || i >= vdev->vdev_children)
3110 return (NULL);
3111
3112 vdev = vdev->vdev_child[i];
3113 if (*s == '\0')
3114 return (vdev);
3115 return (zdb_vdev_lookup(vdev, s+1));
3116
3117name:
3118 for (i = 0; i < vdev->vdev_children; i++) {
3119 vdev_t *vc = vdev->vdev_child[i];
3120
3121 if (vc->vdev_path == NULL) {
3122 vc = zdb_vdev_lookup(vc, path);
3123 if (vc == NULL)
3124 continue;
3125 else
3126 return (vc);
3127 }
3128
3129 p = strrchr(vc->vdev_path, '/');
3130 p = p ? p + 1 : vc->vdev_path;
3131 q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
3132
3133 if (strcmp(vc->vdev_path, path) == 0)
3134 return (vc);
3135 if (strcmp(p, path) == 0)
3136 return (vc);
3137 if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
3138 return (vc);
3139 }
3140
3141 return (NULL);
3142}
3143
3144/*
3145 * Read a block from a pool and print it out. The syntax of the
3146 * block descriptor is:
3147 *
3148 * pool:vdev_specifier:offset:size[:flags]
3149 *
3150 * pool - The name of the pool you wish to read from
3151 * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
3152 * offset - offset, in hex, in bytes
3153 * size - Amount of data to read, in hex, in bytes
3154 * flags - A string of characters specifying options
3155 * b: Decode a blkptr at given offset within block
3156 * *c: Calculate and display checksums
3157 * d: Decompress data before dumping
3158 * e: Byteswap data before dumping
3159 * g: Display data as a gang block header
3160 * i: Display as an indirect block
3161 * p: Do I/O to physical offset
3162 * r: Dump raw data to stdout
3163 *
3164 * * = not yet implemented
3165 */
3166static void
3167zdb_read_block(char *thing, spa_t *spa)
3168{
3169 blkptr_t blk, *bp = &blk;
3170 dva_t *dva = bp->blk_dva;
3171 int flags = 0;
3172 uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0;
3173 zio_t *zio;
3174 vdev_t *vd;
3175 void *pbuf, *lbuf, *buf;
3176 char *s, *p, *dup, *vdev, *flagstr;
3177 int i, error;
3178
3179 dup = strdup(thing);
3180 s = strtok(dup, ":");
3181 vdev = s ? s : "";
3182 s = strtok(NULL, ":");
3183 offset = strtoull(s ? s : "", NULL, 16);
3184 s = strtok(NULL, ":");
3185 size = strtoull(s ? s : "", NULL, 16);
3186 s = strtok(NULL, ":");
3187 flagstr = s ? s : "";
3188
3189 s = NULL;
3190 if (size == 0)
3191 s = "size must not be zero";
3192 if (!IS_P2ALIGNED(size, DEV_BSIZE))
3193 s = "size must be a multiple of sector size";
3194 if (!IS_P2ALIGNED(offset, DEV_BSIZE))
3195 s = "offset must be a multiple of sector size";
3196 if (s) {
3197 (void) printf("Invalid block specifier: %s - %s\n", thing, s);
3198 free(dup);
3199 return;
3200 }
3201
3202 for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) {
3203 for (i = 0; flagstr[i]; i++) {
3204 int bit = flagbits[(uchar_t)flagstr[i]];
3205
3206 if (bit == 0) {
3207 (void) printf("***Invalid flag: %c\n",
3208 flagstr[i]);
3209 continue;
3210 }
3211 flags |= bit;
3212
3213 /* If it's not something with an argument, keep going */
3214 if ((bit & (ZDB_FLAG_CHECKSUM |
3215 ZDB_FLAG_PRINT_BLKPTR)) == 0)
3216 continue;
3217
3218 p = &flagstr[i + 1];
3219 if (bit == ZDB_FLAG_PRINT_BLKPTR)
3220 blkptr_offset = strtoull(p, &p, 16);
3221 if (*p != ':' && *p != '\0') {
3222 (void) printf("***Invalid flag arg: '%s'\n", s);
3223 free(dup);
3224 return;
3225 }
3226 i += p - &flagstr[i + 1]; /* skip over the number */
3227 }
3228 }
3229
3230 vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
3231 if (vd == NULL) {
3232 (void) printf("***Invalid vdev: %s\n", vdev);
3233 free(dup);
3234 return;
3235 } else {
3236 if (vd->vdev_path)
3237 (void) fprintf(stderr, "Found vdev: %s\n",
3238 vd->vdev_path);
3239 else
3240 (void) fprintf(stderr, "Found vdev type: %s\n",
3241 vd->vdev_ops->vdev_op_type);
3242 }
3243
3244 psize = size;
3245 lsize = size;
3246
3247 pbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
3248 lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
3249
3250 BP_ZERO(bp);
3251
3252 DVA_SET_VDEV(&dva[0], vd->vdev_id);
3253 DVA_SET_OFFSET(&dva[0], offset);
3254 DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
3255 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
3256
3257 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
3258
3259 BP_SET_LSIZE(bp, lsize);
3260 BP_SET_PSIZE(bp, psize);
3261 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
3262 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
3263 BP_SET_TYPE(bp, DMU_OT_NONE);
3264 BP_SET_LEVEL(bp, 0);
3265 BP_SET_DEDUP(bp, 0);
3266 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
3267
3268 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3269 zio = zio_root(spa, NULL, NULL, 0);
3270
3271 if (vd == vd->vdev_top) {
3272 /*
3273 * Treat this as a normal block read.
3274 */
3275 zio_nowait(zio_read(zio, spa, bp, pbuf, psize, NULL, NULL,
3276 ZIO_PRIORITY_SYNC_READ,
3277 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
3278 } else {
3279 /*
3280 * Treat this as a vdev child I/O.
3281 */
3282 zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pbuf, psize,
3283 ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
3284 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE |
3285 ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
3286 ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL));
3287 }
3288
3289 error = zio_wait(zio);
3290 spa_config_exit(spa, SCL_STATE, FTAG);
3291
3292 if (error) {
3293 (void) printf("Read of %s failed, error: %d\n", thing, error);
3294 goto out;
3295 }
3296
3297 if (flags & ZDB_FLAG_DECOMPRESS) {
3298 /*
3299 * We don't know how the data was compressed, so just try
3300 * every decompress function at every inflated blocksize.
3301 */
3302 enum zio_compress c;
3303 void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
3304 void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
3305
3306 bcopy(pbuf, pbuf2, psize);
3307
3308 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf + psize,
3309 SPA_MAXBLOCKSIZE - psize) == 0);
3310
3311 VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize,
3312 SPA_MAXBLOCKSIZE - psize) == 0);
3313
3314 for (lsize = SPA_MAXBLOCKSIZE; lsize > psize;
3315 lsize -= SPA_MINBLOCKSIZE) {
3316 for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
3317 if (zio_decompress_data(c, pbuf, lbuf,
3318 psize, lsize) == 0 &&
3319 zio_decompress_data(c, pbuf2, lbuf2,
3320 psize, lsize) == 0 &&
3321 bcmp(lbuf, lbuf2, lsize) == 0)
3322 break;
3323 }
3324 if (c != ZIO_COMPRESS_FUNCTIONS)
3325 break;
3326 lsize -= SPA_MINBLOCKSIZE;
3327 }
3328
3329 umem_free(pbuf2, SPA_MAXBLOCKSIZE);
3330 umem_free(lbuf2, SPA_MAXBLOCKSIZE);
3331
3332 if (lsize <= psize) {
3333 (void) printf("Decompress of %s failed\n", thing);
3334 goto out;
3335 }
3336 buf = lbuf;
3337 size = lsize;
3338 } else {
3339 buf = pbuf;
3340 size = psize;
3341 }
3342
3343 if (flags & ZDB_FLAG_PRINT_BLKPTR)
3344 zdb_print_blkptr((blkptr_t *)(void *)
3345 ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
3346 else if (flags & ZDB_FLAG_RAW)
3347 zdb_dump_block_raw(buf, size, flags);
3348 else if (flags & ZDB_FLAG_INDIRECT)
3349 zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t),
3350 flags);
3351 else if (flags & ZDB_FLAG_GBH)
3352 zdb_dump_gbh(buf, flags);
3353 else
3354 zdb_dump_block(thing, buf, size, flags);
3355
3356out:
3357 umem_free(pbuf, SPA_MAXBLOCKSIZE);
3358 umem_free(lbuf, SPA_MAXBLOCKSIZE);
3359 free(dup);
3360}
3361
3362static boolean_t
3363pool_match(nvlist_t *cfg, char *tgt)
3364{
3365 uint64_t v, guid = strtoull(tgt, NULL, 0);
3366 char *s;
3367
3368 if (guid != 0) {
3369 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
3370 return (v == guid);
3371 } else {
3372 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
3373 return (strcmp(s, tgt) == 0);
3374 }
3375 return (B_FALSE);
3376}
3377
3378static char *
3379find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv)
3380{
3381 nvlist_t *pools;
3382 nvlist_t *match = NULL;
3383 char *name = NULL;
3384 char *sepp = NULL;
3385 char sep;
3386 int count = 0;
3387 importargs_t args = { 0 };
3388
3389 args.paths = dirc;
3390 args.path = dirv;
3391 args.can_be_active = B_TRUE;
3392
3393 if ((sepp = strpbrk(*target, "/@")) != NULL) {
3394 sep = *sepp;
3395 *sepp = '\0';
3396 }
3397
3398 pools = zpool_search_import(g_zfs, &args);
3399
3400 if (pools != NULL) {
3401 nvpair_t *elem = NULL;
3402 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3403 verify(nvpair_value_nvlist(elem, configp) == 0);
3404 if (pool_match(*configp, *target)) {
3405 count++;
3406 if (match != NULL) {
3407 /* print previously found config */
3408 if (name != NULL) {
3409 (void) printf("%s\n", name);
3410 dump_nvlist(match, 8);
3411 name = NULL;
3412 }
3413 (void) printf("%s\n",
3414 nvpair_name(elem));
3415 dump_nvlist(*configp, 8);
3416 } else {
3417 match = *configp;
3418 name = nvpair_name(elem);
3419 }
3420 }
3421 }
3422 }
3423 if (count > 1)
3424 (void) fatal("\tMatched %d pools - use pool GUID "
3425 "instead of pool name or \n"
3426 "\tpool name part of a dataset name to select pool", count);
3427
3428 if (sepp)
3429 *sepp = sep;
3430 /*
3431 * If pool GUID was specified for pool id, replace it with pool name
3432 */
3433 if (name && (strstr(*target, name) != *target)) {
3434 int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0);
3435
3436 *target = umem_alloc(sz, UMEM_NOFAIL);
3437 (void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : "");
3438 }
3439
3440 *configp = name ? match : NULL;
3441
3442 return (name);
3443}
3444
3445int
3446main(int argc, char **argv)
3447{
3448 int i, c;
3449 struct rlimit rl = { 1024, 1024 };
3450 spa_t *spa = NULL;
3451 objset_t *os = NULL;
3452 int dump_all = 1;
3453 int verbose = 0;
3454 int error = 0;
3455 char **searchdirs = NULL;
3456 int nsearch = 0;
3457 char *target;
3458 nvlist_t *policy = NULL;
3459 uint64_t max_txg = UINT64_MAX;
3460 int rewind = ZPOOL_NEVER_REWIND;
3461
3462 (void) setrlimit(RLIMIT_NOFILE, &rl);
3463 (void) enable_extended_FILE_stdio(-1, -1);
3464
3465 dprintf_setup(&argc, argv);
3466
3467 while ((c = getopt(argc, argv,
3468 "bcdhilmMI:suCDRSAFLXx:evp:t:U:P")) != -1) {
3469 switch (c) {
3470 case 'b':
3471 case 'c':
3472 case 'd':
3473 case 'h':
3474 case 'i':
3475 case 'l':
3476 case 'm':
3477 case 's':
3478 case 'u':
3479 case 'C':
3480 case 'D':
3481 case 'M':
3482 case 'R':
3483 case 'S':
3484 dump_opt[c]++;
3485 dump_all = 0;
3486 break;
3487 case 'A':
3488 case 'F':
3489 case 'L':
3490 case 'X':
3491 case 'e':
3492 case 'P':
3493 dump_opt[c]++;
3494 break;
3495 case 'I':
3496 max_inflight = strtoull(optarg, NULL, 0);
3497 if (max_inflight == 0) {
3498 (void) fprintf(stderr, "maximum number "
3499 "of inflight I/Os must be greater "
3500 "than 0\n");
3501 usage();
3502 }
3503 break;
3504 case 'p':
3505 if (searchdirs == NULL) {
3506 searchdirs = umem_alloc(sizeof (char *),
3507 UMEM_NOFAIL);
3508 } else {
3509 char **tmp = umem_alloc((nsearch + 1) *
3510 sizeof (char *), UMEM_NOFAIL);
3511 bcopy(searchdirs, tmp, nsearch *
3512 sizeof (char *));
3513 umem_free(searchdirs,
3514 nsearch * sizeof (char *));
3515 searchdirs = tmp;
3516 }
3517 searchdirs[nsearch++] = optarg;
3518 break;
3519 case 't':
3520 max_txg = strtoull(optarg, NULL, 0);
3521 if (max_txg < TXG_INITIAL) {
3522 (void) fprintf(stderr, "incorrect txg "
3523 "specified: %s\n", optarg);
3524 usage();
3525 }
3526 break;
3527 case 'U':
3528 spa_config_path = optarg;
3529 break;
3530 case 'v':
3531 verbose++;
3532 break;
3533 case 'x':
3534 vn_dumpdir = optarg;
3535 break;
3536 default:
3537 usage();
3538 break;
3539 }
3540 }
3541
3542 if (!dump_opt['e'] && searchdirs != NULL) {
3543 (void) fprintf(stderr, "-p option requires use of -e\n");
3544 usage();
3545 }
3546
3547 /*
3548 * ZDB does not typically re-read blocks; therefore limit the ARC
3549 * to 256 MB, which can be used entirely for metadata.
3550 */
3551 zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024;
3552
3553 /*
3554 * "zdb -c" uses checksum-verifying scrub i/os which are async reads.
3555 * "zdb -b" uses traversal prefetch which uses async reads.
3556 * For good performance, let several of them be active at once.
3557 */
3558 zfs_vdev_async_read_max_active = 10;
3559
3560 kernel_init(FREAD);
3561 g_zfs = libzfs_init();
3562 ASSERT(g_zfs != NULL);
3563
3564 if (dump_all)
3565 verbose = MAX(verbose, 1);
3566
3567 for (c = 0; c < 256; c++) {
3568 if (dump_all && !strchr("elAFLRSXP", c))
3569 dump_opt[c] = 1;
3570 if (dump_opt[c])
3571 dump_opt[c] += verbose;
3572 }
3573
3574 aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2);
3575 zfs_recover = (dump_opt['A'] > 1);
3576
3577 argc -= optind;
3578 argv += optind;
3579
3580 if (argc < 2 && dump_opt['R'])
3581 usage();
3582 if (argc < 1) {
3583 if (!dump_opt['e'] && dump_opt['C']) {
3584 dump_cachefile(spa_config_path);
3585 return (0);
3586 }
3587 usage();
3588 }
3589
3590 if (dump_opt['l']) {
3591 dump_label(argv[0]);
3592 return (0);
3593 }
3594
3595 if (dump_opt['X'] || dump_opt['F'])
3596 rewind = ZPOOL_DO_REWIND |
3597 (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
3598
3599 if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
3600 nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 ||
3601 nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0)
3602 fatal("internal error: %s", strerror(ENOMEM));
3603
3604 error = 0;
3605 target = argv[0];
3606
3607 if (dump_opt['e']) {
3608 nvlist_t *cfg = NULL;
3609 char *name = find_zpool(&target, &cfg, nsearch, searchdirs);
3610
3611 error = ENOENT;
3612 if (name) {
3613 if (dump_opt['C'] > 1) {
3614 (void) printf("\nConfiguration for import:\n");
3615 dump_nvlist(cfg, 8);
3616 }
3617 if (nvlist_add_nvlist(cfg,
3618 ZPOOL_REWIND_POLICY, policy) != 0) {
3619 fatal("can't open '%s': %s",
3620 target, strerror(ENOMEM));
3621 }
3622 if ((error = spa_import(name, cfg, NULL,
3623 ZFS_IMPORT_MISSING_LOG)) != 0) {
3624 error = spa_import(name, cfg, NULL,
3625 ZFS_IMPORT_VERBATIM);
3626 }
3627 }
3628 }
3629
3630 if (error == 0) {
3631 if (strpbrk(target, "/@") == NULL || dump_opt['R']) {
3632 error = spa_open_rewind(target, &spa, FTAG, policy,
3633 NULL);
3634 if (error) {
3635 /*
3636 * If we're missing the log device then
3637 * try opening the pool after clearing the
3638 * log state.
3639 */
3640 mutex_enter(&spa_namespace_lock);
3641 if ((spa = spa_lookup(target)) != NULL &&
3642 spa->spa_log_state == SPA_LOG_MISSING) {
3643 spa->spa_log_state = SPA_LOG_CLEAR;
3644 error = 0;
3645 }
3646 mutex_exit(&spa_namespace_lock);
3647
3648 if (!error) {
3649 error = spa_open_rewind(target, &spa,
3650 FTAG, policy, NULL);
3651 }
3652 }
3653 } else {
3654 error = dmu_objset_own(target, DMU_OST_ANY,
3655 B_TRUE, FTAG, &os);
3656 }
3657 }
3658 nvlist_free(policy);
3659
3660 if (error)
3661 fatal("can't open '%s': %s", target, strerror(error));
3662
3663 argv++;
3664 argc--;
3665 if (!dump_opt['R']) {
3666 if (argc > 0) {
3667 zopt_objects = argc;
3668 zopt_object = calloc(zopt_objects, sizeof (uint64_t));
3669 for (i = 0; i < zopt_objects; i++) {
3670 errno = 0;
3671 zopt_object[i] = strtoull(argv[i], NULL, 0);
3672 if (zopt_object[i] == 0 && errno != 0)
3673 fatal("bad number %s: %s",
3674 argv[i], strerror(errno));
3675 }
3676 }
3677 if (os != NULL) {
3678 dump_dir(os);
3679 } else if (zopt_objects > 0 && !dump_opt['m']) {
3680 dump_dir(spa->spa_meta_objset);
3681 } else {
3682 dump_zpool(spa);
3683 }
3684 } else {
3685 flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
3686 flagbits['c'] = ZDB_FLAG_CHECKSUM;
3687 flagbits['d'] = ZDB_FLAG_DECOMPRESS;
3688 flagbits['e'] = ZDB_FLAG_BSWAP;
3689 flagbits['g'] = ZDB_FLAG_GBH;
3690 flagbits['i'] = ZDB_FLAG_INDIRECT;
3691 flagbits['p'] = ZDB_FLAG_PHYS;
3692 flagbits['r'] = ZDB_FLAG_RAW;
3693
3694 for (i = 0; i < argc; i++)
3695 zdb_read_block(argv[i], spa);
3696 }
3697
3698 (os != NULL) ? dmu_objset_disown(os, FTAG) : spa_close(spa, FTAG);
3699
3700 fuid_table_destroy();
3701 sa_loaded = B_FALSE;
3702
3703 libzfs_fini(g_zfs);
3704 kernel_fini();
3705
3706 return (0);
3707}