Deleted Added
full compact
zfsimpl.h (198420) zfsimpl.h (201143)
1/*-
2 * Copyright (c) 2002 McAfee, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and McAfee Research,, the Security Research Division of
7 * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as
8 * part of the DARPA CHATS research program
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31/*
32 * CDDL HEADER START
33 *
34 * The contents of this file are subject to the terms of the
35 * Common Development and Distribution License (the "License").
36 * You may not use this file except in compliance with the License.
37 *
38 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
39 * or http://www.opensolaris.org/os/licensing.
40 * See the License for the specific language governing permissions
41 * and limitations under the License.
42 *
43 * When distributing Covered Code, include this CDDL HEADER in each
44 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
45 * If applicable, add the following below this CDDL HEADER, with the
46 * fields enclosed by brackets "[]" replaced with your own identifying
47 * information: Portions Copyright [yyyy] [name of copyright owner]
48 *
49 * CDDL HEADER END
50 */
51/*
52 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
53 * Use is subject to license terms.
54 */
55
56/* CRC64 table */
57#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
58
59/*
60 * Macros for various sorts of alignment and rounding when the alignment
61 * is known to be a power of 2.
62 */
63#define P2ALIGN(x, align) ((x) & -(align))
64#define P2PHASE(x, align) ((x) & ((align) - 1))
65#define P2NPHASE(x, align) (-(x) & ((align) - 1))
66#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
67#define P2END(x, align) (-(~(x) & -(align)))
68#define P2PHASEUP(x, align, phase) ((phase) - (((phase) - (x)) & -(align)))
69#define P2CROSS(x, y, align) (((x) ^ (y)) > (align) - 1)
70
71/*
72 * General-purpose 32-bit and 64-bit bitfield encodings.
73 */
74#define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len))
75#define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len))
76#define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low))
77#define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low))
78
79#define BF32_GET(x, low, len) BF32_DECODE(x, low, len)
80#define BF64_GET(x, low, len) BF64_DECODE(x, low, len)
81
82#define BF32_SET(x, low, len, val) \
83 ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len))
84#define BF64_SET(x, low, len, val) \
85 ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len))
86
87#define BF32_GET_SB(x, low, len, shift, bias) \
88 ((BF32_GET(x, low, len) + (bias)) << (shift))
89#define BF64_GET_SB(x, low, len, shift, bias) \
90 ((BF64_GET(x, low, len) + (bias)) << (shift))
91
92#define BF32_SET_SB(x, low, len, shift, bias, val) \
93 BF32_SET(x, low, len, ((val) >> (shift)) - (bias))
94#define BF64_SET_SB(x, low, len, shift, bias, val) \
95 BF64_SET(x, low, len, ((val) >> (shift)) - (bias))
96
97/*
98 * We currently support nine block sizes, from 512 bytes to 128K.
99 * We could go higher, but the benefits are near-zero and the cost
100 * of COWing a giant block to modify one byte would become excessive.
101 */
102#define SPA_MINBLOCKSHIFT 9
103#define SPA_MAXBLOCKSHIFT 17
104#define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT)
105#define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT)
106
107#define SPA_BLOCKSIZES (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)
108
109/*
110 * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
111 * The ASIZE encoding should be at least 64 times larger (6 more bits)
112 * to support up to 4-way RAID-Z mirror mode with worst-case gang block
113 * overhead, three DVAs per bp, plus one more bit in case we do anything
114 * else that expands the ASIZE.
115 */
116#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
117#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
118#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
119
120/*
121 * All SPA data is represented by 128-bit data virtual addresses (DVAs).
122 * The members of the dva_t should be considered opaque outside the SPA.
123 */
124typedef struct dva {
125 uint64_t dva_word[2];
126} dva_t;
127
128/*
129 * Each block has a 256-bit checksum -- strong enough for cryptographic hashes.
130 */
131typedef struct zio_cksum {
132 uint64_t zc_word[4];
133} zio_cksum_t;
134
135/*
136 * Each block is described by its DVAs, time of birth, checksum, etc.
137 * The word-by-word, bit-by-bit layout of the blkptr is as follows:
138 *
139 * 64 56 48 40 32 24 16 8 0
140 * +-------+-------+-------+-------+-------+-------+-------+-------+
141 * 0 | vdev1 | GRID | ASIZE |
142 * +-------+-------+-------+-------+-------+-------+-------+-------+
143 * 1 |G| offset1 |
144 * +-------+-------+-------+-------+-------+-------+-------+-------+
145 * 2 | vdev2 | GRID | ASIZE |
146 * +-------+-------+-------+-------+-------+-------+-------+-------+
147 * 3 |G| offset2 |
148 * +-------+-------+-------+-------+-------+-------+-------+-------+
149 * 4 | vdev3 | GRID | ASIZE |
150 * +-------+-------+-------+-------+-------+-------+-------+-------+
151 * 5 |G| offset3 |
152 * +-------+-------+-------+-------+-------+-------+-------+-------+
153 * 6 |E| lvl | type | cksum | comp | PSIZE | LSIZE |
154 * +-------+-------+-------+-------+-------+-------+-------+-------+
155 * 7 | padding |
156 * +-------+-------+-------+-------+-------+-------+-------+-------+
157 * 8 | padding |
158 * +-------+-------+-------+-------+-------+-------+-------+-------+
159 * 9 | padding |
160 * +-------+-------+-------+-------+-------+-------+-------+-------+
161 * a | birth txg |
162 * +-------+-------+-------+-------+-------+-------+-------+-------+
163 * b | fill count |
164 * +-------+-------+-------+-------+-------+-------+-------+-------+
165 * c | checksum[0] |
166 * +-------+-------+-------+-------+-------+-------+-------+-------+
167 * d | checksum[1] |
168 * +-------+-------+-------+-------+-------+-------+-------+-------+
169 * e | checksum[2] |
170 * +-------+-------+-------+-------+-------+-------+-------+-------+
171 * f | checksum[3] |
172 * +-------+-------+-------+-------+-------+-------+-------+-------+
173 *
174 * Legend:
175 *
176 * vdev virtual device ID
177 * offset offset into virtual device
178 * LSIZE logical size
179 * PSIZE physical size (after compression)
180 * ASIZE allocated size (including RAID-Z parity and gang block headers)
181 * GRID RAID-Z layout information (reserved for future use)
182 * cksum checksum function
183 * comp compression function
184 * G gang block indicator
185 * E endianness
186 * type DMU object type
187 * lvl level of indirection
188 * birth txg transaction group in which the block was born
189 * fill count number of non-zero blocks under this bp
190 * checksum[4] 256-bit checksum of the data this bp describes
191 */
192typedef struct blkptr {
193 dva_t blk_dva[3]; /* 128-bit Data Virtual Address */
194 uint64_t blk_prop; /* size, compression, type, etc */
195 uint64_t blk_pad[3]; /* Extra space for the future */
196 uint64_t blk_birth; /* transaction group at birth */
197 uint64_t blk_fill; /* fill count */
198 zio_cksum_t blk_cksum; /* 256-bit checksum */
199} blkptr_t;
200
201#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
202#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
203
204/*
205 * Macros to get and set fields in a bp or DVA.
206 */
207#define DVA_GET_ASIZE(dva) \
208 BF64_GET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0)
209#define DVA_SET_ASIZE(dva, x) \
210 BF64_SET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0, x)
211
212#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
213#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
214
215#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32)
216#define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x)
217
218#define DVA_GET_OFFSET(dva) \
219 BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
220#define DVA_SET_OFFSET(dva, x) \
221 BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
222
223#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
224#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
225
226#define BP_GET_LSIZE(bp) \
227 (BP_IS_HOLE(bp) ? 0 : \
228 BF64_GET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1))
229#define BP_SET_LSIZE(bp, x) \
230 BF64_SET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1, x)
231
232#define BP_GET_PSIZE(bp) \
233 BF64_GET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1)
234#define BP_SET_PSIZE(bp, x) \
235 BF64_SET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1, x)
236
237#define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 8)
238#define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 8, x)
239
240#define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8)
241#define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x)
242
243#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
244#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
245
246#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
247#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
248
249#define BP_GET_BYTEORDER(bp) (0 - BF64_GET((bp)->blk_prop, 63, 1))
250#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
251
252#define BP_GET_ASIZE(bp) \
253 (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
254 DVA_GET_ASIZE(&(bp)->blk_dva[2]))
255
256#define BP_GET_UCSIZE(bp) \
257 ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \
258 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp));
259
260#define BP_GET_NDVAS(bp) \
261 (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
262 !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
263 !!DVA_GET_ASIZE(&(bp)->blk_dva[2]))
264
265#define BP_COUNT_GANG(bp) \
266 (DVA_GET_GANG(&(bp)->blk_dva[0]) + \
267 DVA_GET_GANG(&(bp)->blk_dva[1]) + \
268 DVA_GET_GANG(&(bp)->blk_dva[2]))
269
270#define DVA_EQUAL(dva1, dva2) \
271 ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
272 (dva1)->dva_word[0] == (dva2)->dva_word[0])
273
274#define ZIO_CHECKSUM_EQUAL(zc1, zc2) \
275 (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \
276 ((zc1).zc_word[1] - (zc2).zc_word[1]) | \
277 ((zc1).zc_word[2] - (zc2).zc_word[2]) | \
278 ((zc1).zc_word[3] - (zc2).zc_word[3])))
279
280
281#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
282
283#define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \
284{ \
285 (zcp)->zc_word[0] = w0; \
286 (zcp)->zc_word[1] = w1; \
287 (zcp)->zc_word[2] = w2; \
288 (zcp)->zc_word[3] = w3; \
289}
290
291#define BP_IDENTITY(bp) (&(bp)->blk_dva[0])
292#define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp))
293#define BP_IS_HOLE(bp) ((bp)->blk_birth == 0)
294#define BP_IS_OLDER(bp, txg) (!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg))
295
296#define BP_ZERO(bp) \
297{ \
298 (bp)->blk_dva[0].dva_word[0] = 0; \
299 (bp)->blk_dva[0].dva_word[1] = 0; \
300 (bp)->blk_dva[1].dva_word[0] = 0; \
301 (bp)->blk_dva[1].dva_word[1] = 0; \
302 (bp)->blk_dva[2].dva_word[0] = 0; \
303 (bp)->blk_dva[2].dva_word[1] = 0; \
304 (bp)->blk_prop = 0; \
305 (bp)->blk_pad[0] = 0; \
306 (bp)->blk_pad[1] = 0; \
307 (bp)->blk_pad[2] = 0; \
308 (bp)->blk_birth = 0; \
309 (bp)->blk_fill = 0; \
310 ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
311}
312
313#define ZBT_MAGIC 0x210da7ab10c7a11ULL /* zio data bloc tail */
314
315typedef struct zio_block_tail {
316 uint64_t zbt_magic; /* for validation, endianness */
317 zio_cksum_t zbt_cksum; /* 256-bit checksum */
318} zio_block_tail_t;
319
320#define VDEV_SKIP_SIZE (8 << 10)
321#define VDEV_BOOT_HEADER_SIZE (8 << 10)
322#define VDEV_PHYS_SIZE (112 << 10)
323#define VDEV_UBERBLOCK_RING (128 << 10)
324
325#define VDEV_UBERBLOCK_SHIFT(vd) \
326 MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT)
327#define VDEV_UBERBLOCK_COUNT(vd) \
328 (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
329#define VDEV_UBERBLOCK_OFFSET(vd, n) \
330 offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
331#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
332
333/* ZFS boot block */
334#define VDEV_BOOT_MAGIC 0x2f5b007b10cULL
335#define VDEV_BOOT_VERSION 1 /* version number */
336
337typedef struct vdev_boot_header {
338 uint64_t vb_magic; /* VDEV_BOOT_MAGIC */
339 uint64_t vb_version; /* VDEV_BOOT_VERSION */
340 uint64_t vb_offset; /* start offset (bytes) */
341 uint64_t vb_size; /* size (bytes) */
342 char vb_pad[VDEV_BOOT_HEADER_SIZE - 4 * sizeof (uint64_t)];
343} vdev_boot_header_t;
344
345typedef struct vdev_phys {
346 char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_block_tail_t)];
347 zio_block_tail_t vp_zbt;
348} vdev_phys_t;
349
350typedef struct vdev_label {
351 char vl_pad[VDEV_SKIP_SIZE]; /* 8K */
352 vdev_boot_header_t vl_boot_header; /* 8K */
353 vdev_phys_t vl_vdev_phys; /* 112K */
354 char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
355} vdev_label_t; /* 256K total */
356
357/*
358 * vdev_dirty() flags
359 */
360#define VDD_METASLAB 0x01
361#define VDD_DTL 0x02
362
363/*
364 * Size and offset of embedded boot loader region on each label.
365 * The total size of the first two labels plus the boot area is 4MB.
366 */
367#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
368#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
369
370/*
371 * Size of label regions at the start and end of each leaf device.
372 */
373#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
374#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
375#define VDEV_LABELS 4
376
377/*
378 * Gang block headers are self-checksumming and contain an array
379 * of block pointers.
380 */
381#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
382#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
383 sizeof (zio_block_tail_t)) / sizeof (blkptr_t))
384#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
385 sizeof (zio_block_tail_t) - \
386 (SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
387 sizeof (uint64_t))
388
389typedef struct zio_gbh {
390 blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
391 uint64_t zg_filler[SPA_GBH_FILLER];
392 zio_block_tail_t zg_tail;
393} zio_gbh_phys_t;
394
395enum zio_checksum {
396 ZIO_CHECKSUM_INHERIT = 0,
397 ZIO_CHECKSUM_ON,
398 ZIO_CHECKSUM_OFF,
399 ZIO_CHECKSUM_LABEL,
400 ZIO_CHECKSUM_GANG_HEADER,
401 ZIO_CHECKSUM_ZILOG,
402 ZIO_CHECKSUM_FLETCHER_2,
403 ZIO_CHECKSUM_FLETCHER_4,
404 ZIO_CHECKSUM_SHA256,
405 ZIO_CHECKSUM_FUNCTIONS
406};
407
408#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_2
409#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
410
411enum zio_compress {
412 ZIO_COMPRESS_INHERIT = 0,
413 ZIO_COMPRESS_ON,
414 ZIO_COMPRESS_OFF,
415 ZIO_COMPRESS_LZJB,
416 ZIO_COMPRESS_EMPTY,
417 ZIO_COMPRESS_GZIP_1,
418 ZIO_COMPRESS_GZIP_2,
419 ZIO_COMPRESS_GZIP_3,
420 ZIO_COMPRESS_GZIP_4,
421 ZIO_COMPRESS_GZIP_5,
422 ZIO_COMPRESS_GZIP_6,
423 ZIO_COMPRESS_GZIP_7,
424 ZIO_COMPRESS_GZIP_8,
425 ZIO_COMPRESS_GZIP_9,
426 ZIO_COMPRESS_FUNCTIONS
427};
428
429#define ZIO_COMPRESS_ON_VALUE ZIO_COMPRESS_LZJB
430#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF
431
432/* nvlist pack encoding */
433#define NV_ENCODE_NATIVE 0
434#define NV_ENCODE_XDR 1
435
436typedef enum {
437 DATA_TYPE_UNKNOWN = 0,
438 DATA_TYPE_BOOLEAN,
439 DATA_TYPE_BYTE,
440 DATA_TYPE_INT16,
441 DATA_TYPE_UINT16,
442 DATA_TYPE_INT32,
443 DATA_TYPE_UINT32,
444 DATA_TYPE_INT64,
445 DATA_TYPE_UINT64,
446 DATA_TYPE_STRING,
447 DATA_TYPE_BYTE_ARRAY,
448 DATA_TYPE_INT16_ARRAY,
449 DATA_TYPE_UINT16_ARRAY,
450 DATA_TYPE_INT32_ARRAY,
451 DATA_TYPE_UINT32_ARRAY,
452 DATA_TYPE_INT64_ARRAY,
453 DATA_TYPE_UINT64_ARRAY,
454 DATA_TYPE_STRING_ARRAY,
455 DATA_TYPE_HRTIME,
456 DATA_TYPE_NVLIST,
457 DATA_TYPE_NVLIST_ARRAY,
458 DATA_TYPE_BOOLEAN_VALUE,
459 DATA_TYPE_INT8,
460 DATA_TYPE_UINT8,
461 DATA_TYPE_BOOLEAN_ARRAY,
462 DATA_TYPE_INT8_ARRAY,
463 DATA_TYPE_UINT8_ARRAY
464} data_type_t;
465
466/*
467 * On-disk version number.
468 */
469#define SPA_VERSION_1 1ULL
470#define SPA_VERSION_2 2ULL
471#define SPA_VERSION_3 3ULL
472#define SPA_VERSION_4 4ULL
473#define SPA_VERSION_5 5ULL
474#define SPA_VERSION_6 6ULL
475#define SPA_VERSION_7 7ULL
476#define SPA_VERSION_8 8ULL
477#define SPA_VERSION_9 9ULL
478#define SPA_VERSION_10 10ULL
479#define SPA_VERSION_11 11ULL
480#define SPA_VERSION_12 12ULL
481#define SPA_VERSION_13 13ULL
1/*-
2 * Copyright (c) 2002 McAfee, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and McAfee Research,, the Security Research Division of
7 * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as
8 * part of the DARPA CHATS research program
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31/*
32 * CDDL HEADER START
33 *
34 * The contents of this file are subject to the terms of the
35 * Common Development and Distribution License (the "License").
36 * You may not use this file except in compliance with the License.
37 *
38 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
39 * or http://www.opensolaris.org/os/licensing.
40 * See the License for the specific language governing permissions
41 * and limitations under the License.
42 *
43 * When distributing Covered Code, include this CDDL HEADER in each
44 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
45 * If applicable, add the following below this CDDL HEADER, with the
46 * fields enclosed by brackets "[]" replaced with your own identifying
47 * information: Portions Copyright [yyyy] [name of copyright owner]
48 *
49 * CDDL HEADER END
50 */
51/*
52 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
53 * Use is subject to license terms.
54 */
55
56/* CRC64 table */
57#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
58
59/*
60 * Macros for various sorts of alignment and rounding when the alignment
61 * is known to be a power of 2.
62 */
63#define P2ALIGN(x, align) ((x) & -(align))
64#define P2PHASE(x, align) ((x) & ((align) - 1))
65#define P2NPHASE(x, align) (-(x) & ((align) - 1))
66#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
67#define P2END(x, align) (-(~(x) & -(align)))
68#define P2PHASEUP(x, align, phase) ((phase) - (((phase) - (x)) & -(align)))
69#define P2CROSS(x, y, align) (((x) ^ (y)) > (align) - 1)
70
71/*
72 * General-purpose 32-bit and 64-bit bitfield encodings.
73 */
74#define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len))
75#define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len))
76#define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low))
77#define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low))
78
79#define BF32_GET(x, low, len) BF32_DECODE(x, low, len)
80#define BF64_GET(x, low, len) BF64_DECODE(x, low, len)
81
82#define BF32_SET(x, low, len, val) \
83 ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len))
84#define BF64_SET(x, low, len, val) \
85 ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len))
86
87#define BF32_GET_SB(x, low, len, shift, bias) \
88 ((BF32_GET(x, low, len) + (bias)) << (shift))
89#define BF64_GET_SB(x, low, len, shift, bias) \
90 ((BF64_GET(x, low, len) + (bias)) << (shift))
91
92#define BF32_SET_SB(x, low, len, shift, bias, val) \
93 BF32_SET(x, low, len, ((val) >> (shift)) - (bias))
94#define BF64_SET_SB(x, low, len, shift, bias, val) \
95 BF64_SET(x, low, len, ((val) >> (shift)) - (bias))
96
97/*
98 * We currently support nine block sizes, from 512 bytes to 128K.
99 * We could go higher, but the benefits are near-zero and the cost
100 * of COWing a giant block to modify one byte would become excessive.
101 */
102#define SPA_MINBLOCKSHIFT 9
103#define SPA_MAXBLOCKSHIFT 17
104#define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT)
105#define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT)
106
107#define SPA_BLOCKSIZES (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)
108
109/*
110 * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
111 * The ASIZE encoding should be at least 64 times larger (6 more bits)
112 * to support up to 4-way RAID-Z mirror mode with worst-case gang block
113 * overhead, three DVAs per bp, plus one more bit in case we do anything
114 * else that expands the ASIZE.
115 */
116#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
117#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
118#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
119
120/*
121 * All SPA data is represented by 128-bit data virtual addresses (DVAs).
122 * The members of the dva_t should be considered opaque outside the SPA.
123 */
124typedef struct dva {
125 uint64_t dva_word[2];
126} dva_t;
127
128/*
129 * Each block has a 256-bit checksum -- strong enough for cryptographic hashes.
130 */
131typedef struct zio_cksum {
132 uint64_t zc_word[4];
133} zio_cksum_t;
134
135/*
136 * Each block is described by its DVAs, time of birth, checksum, etc.
137 * The word-by-word, bit-by-bit layout of the blkptr is as follows:
138 *
139 * 64 56 48 40 32 24 16 8 0
140 * +-------+-------+-------+-------+-------+-------+-------+-------+
141 * 0 | vdev1 | GRID | ASIZE |
142 * +-------+-------+-------+-------+-------+-------+-------+-------+
143 * 1 |G| offset1 |
144 * +-------+-------+-------+-------+-------+-------+-------+-------+
145 * 2 | vdev2 | GRID | ASIZE |
146 * +-------+-------+-------+-------+-------+-------+-------+-------+
147 * 3 |G| offset2 |
148 * +-------+-------+-------+-------+-------+-------+-------+-------+
149 * 4 | vdev3 | GRID | ASIZE |
150 * +-------+-------+-------+-------+-------+-------+-------+-------+
151 * 5 |G| offset3 |
152 * +-------+-------+-------+-------+-------+-------+-------+-------+
153 * 6 |E| lvl | type | cksum | comp | PSIZE | LSIZE |
154 * +-------+-------+-------+-------+-------+-------+-------+-------+
155 * 7 | padding |
156 * +-------+-------+-------+-------+-------+-------+-------+-------+
157 * 8 | padding |
158 * +-------+-------+-------+-------+-------+-------+-------+-------+
159 * 9 | padding |
160 * +-------+-------+-------+-------+-------+-------+-------+-------+
161 * a | birth txg |
162 * +-------+-------+-------+-------+-------+-------+-------+-------+
163 * b | fill count |
164 * +-------+-------+-------+-------+-------+-------+-------+-------+
165 * c | checksum[0] |
166 * +-------+-------+-------+-------+-------+-------+-------+-------+
167 * d | checksum[1] |
168 * +-------+-------+-------+-------+-------+-------+-------+-------+
169 * e | checksum[2] |
170 * +-------+-------+-------+-------+-------+-------+-------+-------+
171 * f | checksum[3] |
172 * +-------+-------+-------+-------+-------+-------+-------+-------+
173 *
174 * Legend:
175 *
176 * vdev virtual device ID
177 * offset offset into virtual device
178 * LSIZE logical size
179 * PSIZE physical size (after compression)
180 * ASIZE allocated size (including RAID-Z parity and gang block headers)
181 * GRID RAID-Z layout information (reserved for future use)
182 * cksum checksum function
183 * comp compression function
184 * G gang block indicator
185 * E endianness
186 * type DMU object type
187 * lvl level of indirection
188 * birth txg transaction group in which the block was born
189 * fill count number of non-zero blocks under this bp
190 * checksum[4] 256-bit checksum of the data this bp describes
191 */
192typedef struct blkptr {
193 dva_t blk_dva[3]; /* 128-bit Data Virtual Address */
194 uint64_t blk_prop; /* size, compression, type, etc */
195 uint64_t blk_pad[3]; /* Extra space for the future */
196 uint64_t blk_birth; /* transaction group at birth */
197 uint64_t blk_fill; /* fill count */
198 zio_cksum_t blk_cksum; /* 256-bit checksum */
199} blkptr_t;
200
201#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
202#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
203
204/*
205 * Macros to get and set fields in a bp or DVA.
206 */
207#define DVA_GET_ASIZE(dva) \
208 BF64_GET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0)
209#define DVA_SET_ASIZE(dva, x) \
210 BF64_SET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0, x)
211
212#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
213#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
214
215#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32)
216#define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x)
217
218#define DVA_GET_OFFSET(dva) \
219 BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
220#define DVA_SET_OFFSET(dva, x) \
221 BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
222
223#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
224#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
225
226#define BP_GET_LSIZE(bp) \
227 (BP_IS_HOLE(bp) ? 0 : \
228 BF64_GET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1))
229#define BP_SET_LSIZE(bp, x) \
230 BF64_SET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1, x)
231
232#define BP_GET_PSIZE(bp) \
233 BF64_GET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1)
234#define BP_SET_PSIZE(bp, x) \
235 BF64_SET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1, x)
236
237#define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 8)
238#define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 8, x)
239
240#define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8)
241#define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x)
242
243#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
244#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
245
246#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
247#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
248
249#define BP_GET_BYTEORDER(bp) (0 - BF64_GET((bp)->blk_prop, 63, 1))
250#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
251
252#define BP_GET_ASIZE(bp) \
253 (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
254 DVA_GET_ASIZE(&(bp)->blk_dva[2]))
255
256#define BP_GET_UCSIZE(bp) \
257 ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \
258 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp));
259
260#define BP_GET_NDVAS(bp) \
261 (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
262 !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
263 !!DVA_GET_ASIZE(&(bp)->blk_dva[2]))
264
265#define BP_COUNT_GANG(bp) \
266 (DVA_GET_GANG(&(bp)->blk_dva[0]) + \
267 DVA_GET_GANG(&(bp)->blk_dva[1]) + \
268 DVA_GET_GANG(&(bp)->blk_dva[2]))
269
270#define DVA_EQUAL(dva1, dva2) \
271 ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
272 (dva1)->dva_word[0] == (dva2)->dva_word[0])
273
274#define ZIO_CHECKSUM_EQUAL(zc1, zc2) \
275 (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \
276 ((zc1).zc_word[1] - (zc2).zc_word[1]) | \
277 ((zc1).zc_word[2] - (zc2).zc_word[2]) | \
278 ((zc1).zc_word[3] - (zc2).zc_word[3])))
279
280
281#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
282
283#define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \
284{ \
285 (zcp)->zc_word[0] = w0; \
286 (zcp)->zc_word[1] = w1; \
287 (zcp)->zc_word[2] = w2; \
288 (zcp)->zc_word[3] = w3; \
289}
290
291#define BP_IDENTITY(bp) (&(bp)->blk_dva[0])
292#define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp))
293#define BP_IS_HOLE(bp) ((bp)->blk_birth == 0)
294#define BP_IS_OLDER(bp, txg) (!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg))
295
296#define BP_ZERO(bp) \
297{ \
298 (bp)->blk_dva[0].dva_word[0] = 0; \
299 (bp)->blk_dva[0].dva_word[1] = 0; \
300 (bp)->blk_dva[1].dva_word[0] = 0; \
301 (bp)->blk_dva[1].dva_word[1] = 0; \
302 (bp)->blk_dva[2].dva_word[0] = 0; \
303 (bp)->blk_dva[2].dva_word[1] = 0; \
304 (bp)->blk_prop = 0; \
305 (bp)->blk_pad[0] = 0; \
306 (bp)->blk_pad[1] = 0; \
307 (bp)->blk_pad[2] = 0; \
308 (bp)->blk_birth = 0; \
309 (bp)->blk_fill = 0; \
310 ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
311}
312
313#define ZBT_MAGIC 0x210da7ab10c7a11ULL /* zio data bloc tail */
314
315typedef struct zio_block_tail {
316 uint64_t zbt_magic; /* for validation, endianness */
317 zio_cksum_t zbt_cksum; /* 256-bit checksum */
318} zio_block_tail_t;
319
320#define VDEV_SKIP_SIZE (8 << 10)
321#define VDEV_BOOT_HEADER_SIZE (8 << 10)
322#define VDEV_PHYS_SIZE (112 << 10)
323#define VDEV_UBERBLOCK_RING (128 << 10)
324
325#define VDEV_UBERBLOCK_SHIFT(vd) \
326 MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT)
327#define VDEV_UBERBLOCK_COUNT(vd) \
328 (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
329#define VDEV_UBERBLOCK_OFFSET(vd, n) \
330 offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
331#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
332
333/* ZFS boot block */
334#define VDEV_BOOT_MAGIC 0x2f5b007b10cULL
335#define VDEV_BOOT_VERSION 1 /* version number */
336
337typedef struct vdev_boot_header {
338 uint64_t vb_magic; /* VDEV_BOOT_MAGIC */
339 uint64_t vb_version; /* VDEV_BOOT_VERSION */
340 uint64_t vb_offset; /* start offset (bytes) */
341 uint64_t vb_size; /* size (bytes) */
342 char vb_pad[VDEV_BOOT_HEADER_SIZE - 4 * sizeof (uint64_t)];
343} vdev_boot_header_t;
344
345typedef struct vdev_phys {
346 char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_block_tail_t)];
347 zio_block_tail_t vp_zbt;
348} vdev_phys_t;
349
350typedef struct vdev_label {
351 char vl_pad[VDEV_SKIP_SIZE]; /* 8K */
352 vdev_boot_header_t vl_boot_header; /* 8K */
353 vdev_phys_t vl_vdev_phys; /* 112K */
354 char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
355} vdev_label_t; /* 256K total */
356
357/*
358 * vdev_dirty() flags
359 */
360#define VDD_METASLAB 0x01
361#define VDD_DTL 0x02
362
363/*
364 * Size and offset of embedded boot loader region on each label.
365 * The total size of the first two labels plus the boot area is 4MB.
366 */
367#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
368#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
369
370/*
371 * Size of label regions at the start and end of each leaf device.
372 */
373#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
374#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
375#define VDEV_LABELS 4
376
377/*
378 * Gang block headers are self-checksumming and contain an array
379 * of block pointers.
380 */
381#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
382#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
383 sizeof (zio_block_tail_t)) / sizeof (blkptr_t))
384#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
385 sizeof (zio_block_tail_t) - \
386 (SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
387 sizeof (uint64_t))
388
389typedef struct zio_gbh {
390 blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
391 uint64_t zg_filler[SPA_GBH_FILLER];
392 zio_block_tail_t zg_tail;
393} zio_gbh_phys_t;
394
395enum zio_checksum {
396 ZIO_CHECKSUM_INHERIT = 0,
397 ZIO_CHECKSUM_ON,
398 ZIO_CHECKSUM_OFF,
399 ZIO_CHECKSUM_LABEL,
400 ZIO_CHECKSUM_GANG_HEADER,
401 ZIO_CHECKSUM_ZILOG,
402 ZIO_CHECKSUM_FLETCHER_2,
403 ZIO_CHECKSUM_FLETCHER_4,
404 ZIO_CHECKSUM_SHA256,
405 ZIO_CHECKSUM_FUNCTIONS
406};
407
408#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_2
409#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
410
411enum zio_compress {
412 ZIO_COMPRESS_INHERIT = 0,
413 ZIO_COMPRESS_ON,
414 ZIO_COMPRESS_OFF,
415 ZIO_COMPRESS_LZJB,
416 ZIO_COMPRESS_EMPTY,
417 ZIO_COMPRESS_GZIP_1,
418 ZIO_COMPRESS_GZIP_2,
419 ZIO_COMPRESS_GZIP_3,
420 ZIO_COMPRESS_GZIP_4,
421 ZIO_COMPRESS_GZIP_5,
422 ZIO_COMPRESS_GZIP_6,
423 ZIO_COMPRESS_GZIP_7,
424 ZIO_COMPRESS_GZIP_8,
425 ZIO_COMPRESS_GZIP_9,
426 ZIO_COMPRESS_FUNCTIONS
427};
428
429#define ZIO_COMPRESS_ON_VALUE ZIO_COMPRESS_LZJB
430#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF
431
432/* nvlist pack encoding */
433#define NV_ENCODE_NATIVE 0
434#define NV_ENCODE_XDR 1
435
436typedef enum {
437 DATA_TYPE_UNKNOWN = 0,
438 DATA_TYPE_BOOLEAN,
439 DATA_TYPE_BYTE,
440 DATA_TYPE_INT16,
441 DATA_TYPE_UINT16,
442 DATA_TYPE_INT32,
443 DATA_TYPE_UINT32,
444 DATA_TYPE_INT64,
445 DATA_TYPE_UINT64,
446 DATA_TYPE_STRING,
447 DATA_TYPE_BYTE_ARRAY,
448 DATA_TYPE_INT16_ARRAY,
449 DATA_TYPE_UINT16_ARRAY,
450 DATA_TYPE_INT32_ARRAY,
451 DATA_TYPE_UINT32_ARRAY,
452 DATA_TYPE_INT64_ARRAY,
453 DATA_TYPE_UINT64_ARRAY,
454 DATA_TYPE_STRING_ARRAY,
455 DATA_TYPE_HRTIME,
456 DATA_TYPE_NVLIST,
457 DATA_TYPE_NVLIST_ARRAY,
458 DATA_TYPE_BOOLEAN_VALUE,
459 DATA_TYPE_INT8,
460 DATA_TYPE_UINT8,
461 DATA_TYPE_BOOLEAN_ARRAY,
462 DATA_TYPE_INT8_ARRAY,
463 DATA_TYPE_UINT8_ARRAY
464} data_type_t;
465
466/*
467 * On-disk version number.
468 */
469#define SPA_VERSION_1 1ULL
470#define SPA_VERSION_2 2ULL
471#define SPA_VERSION_3 3ULL
472#define SPA_VERSION_4 4ULL
473#define SPA_VERSION_5 5ULL
474#define SPA_VERSION_6 6ULL
475#define SPA_VERSION_7 7ULL
476#define SPA_VERSION_8 8ULL
477#define SPA_VERSION_9 9ULL
478#define SPA_VERSION_10 10ULL
479#define SPA_VERSION_11 11ULL
480#define SPA_VERSION_12 12ULL
481#define SPA_VERSION_13 13ULL
482#define SPA_VERSION_14 14ULL
482/*
483 * When bumping up SPA_VERSION, make sure GRUB ZFS understand the on-disk
484 * format change. Go to usr/src/grub/grub-0.95/stage2/{zfs-include/, fsys_zfs*},
485 * and do the appropriate changes.
486 */
483/*
484 * When bumping up SPA_VERSION, make sure GRUB ZFS understand the on-disk
485 * format change. Go to usr/src/grub/grub-0.95/stage2/{zfs-include/, fsys_zfs*},
486 * and do the appropriate changes.
487 */
487#define SPA_VERSION SPA_VERSION_13
488#define SPA_VERSION_STRING "13"
488#define SPA_VERSION SPA_VERSION_14
489#define SPA_VERSION_STRING "14"
489
490/*
491 * Symbolic names for the changes that caused a SPA_VERSION switch.
492 * Used in the code when checking for presence or absence of a feature.
493 * Feel free to define multiple symbolic names for each version if there
494 * were multiple changes to on-disk structures during that version.
495 *
496 * NOTE: When checking the current SPA_VERSION in your code, be sure
497 * to use spa_version() since it reports the version of the
498 * last synced uberblock. Checking the in-flight version can
499 * be dangerous in some cases.
500 */
501#define SPA_VERSION_INITIAL SPA_VERSION_1
502#define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2
503#define SPA_VERSION_SPARES SPA_VERSION_3
504#define SPA_VERSION_RAID6 SPA_VERSION_3
505#define SPA_VERSION_BPLIST_ACCOUNT SPA_VERSION_3
506#define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3
507#define SPA_VERSION_DNODE_BYTES SPA_VERSION_3
508#define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4
509#define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5
510#define SPA_VERSION_BOOTFS SPA_VERSION_6
511#define SPA_VERSION_SLOGS SPA_VERSION_7
512#define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8
513#define SPA_VERSION_FUID SPA_VERSION_9
514#define SPA_VERSION_REFRESERVATION SPA_VERSION_9
515#define SPA_VERSION_REFQUOTA SPA_VERSION_9
516#define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9
517#define SPA_VERSION_L2CACHE SPA_VERSION_10
518#define SPA_VERSION_NEXT_CLONES SPA_VERSION_11
519#define SPA_VERSION_ORIGIN SPA_VERSION_11
520#define SPA_VERSION_DSL_SCRUB SPA_VERSION_11
521#define SPA_VERSION_SNAP_PROPS SPA_VERSION_12
522#define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13
490
491/*
492 * Symbolic names for the changes that caused a SPA_VERSION switch.
493 * Used in the code when checking for presence or absence of a feature.
494 * Feel free to define multiple symbolic names for each version if there
495 * were multiple changes to on-disk structures during that version.
496 *
497 * NOTE: When checking the current SPA_VERSION in your code, be sure
498 * to use spa_version() since it reports the version of the
499 * last synced uberblock. Checking the in-flight version can
500 * be dangerous in some cases.
501 */
502#define SPA_VERSION_INITIAL SPA_VERSION_1
503#define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2
504#define SPA_VERSION_SPARES SPA_VERSION_3
505#define SPA_VERSION_RAID6 SPA_VERSION_3
506#define SPA_VERSION_BPLIST_ACCOUNT SPA_VERSION_3
507#define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3
508#define SPA_VERSION_DNODE_BYTES SPA_VERSION_3
509#define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4
510#define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5
511#define SPA_VERSION_BOOTFS SPA_VERSION_6
512#define SPA_VERSION_SLOGS SPA_VERSION_7
513#define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8
514#define SPA_VERSION_FUID SPA_VERSION_9
515#define SPA_VERSION_REFRESERVATION SPA_VERSION_9
516#define SPA_VERSION_REFQUOTA SPA_VERSION_9
517#define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9
518#define SPA_VERSION_L2CACHE SPA_VERSION_10
519#define SPA_VERSION_NEXT_CLONES SPA_VERSION_11
520#define SPA_VERSION_ORIGIN SPA_VERSION_11
521#define SPA_VERSION_DSL_SCRUB SPA_VERSION_11
522#define SPA_VERSION_SNAP_PROPS SPA_VERSION_12
523#define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13
524#define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14
523
524/*
525 * The following are configuration names used in the nvlist describing a pool's
526 * configuration.
527 */
528#define ZPOOL_CONFIG_VERSION "version"
529#define ZPOOL_CONFIG_POOL_NAME "name"
530#define ZPOOL_CONFIG_POOL_STATE "state"
531#define ZPOOL_CONFIG_POOL_TXG "txg"
532#define ZPOOL_CONFIG_POOL_GUID "pool_guid"
533#define ZPOOL_CONFIG_CREATE_TXG "create_txg"
534#define ZPOOL_CONFIG_TOP_GUID "top_guid"
535#define ZPOOL_CONFIG_VDEV_TREE "vdev_tree"
536#define ZPOOL_CONFIG_TYPE "type"
537#define ZPOOL_CONFIG_CHILDREN "children"
538#define ZPOOL_CONFIG_ID "id"
539#define ZPOOL_CONFIG_GUID "guid"
540#define ZPOOL_CONFIG_PATH "path"
541#define ZPOOL_CONFIG_DEVID "devid"
542#define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array"
543#define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
544#define ZPOOL_CONFIG_ASHIFT "ashift"
545#define ZPOOL_CONFIG_ASIZE "asize"
546#define ZPOOL_CONFIG_DTL "DTL"
547#define ZPOOL_CONFIG_STATS "stats"
548#define ZPOOL_CONFIG_WHOLE_DISK "whole_disk"
549#define ZPOOL_CONFIG_OFFLINE "offline"
550#define ZPOOL_CONFIG_ERRCOUNT "error_count"
551#define ZPOOL_CONFIG_NOT_PRESENT "not_present"
552#define ZPOOL_CONFIG_SPARES "spares"
553#define ZPOOL_CONFIG_IS_SPARE "is_spare"
554#define ZPOOL_CONFIG_NPARITY "nparity"
555#define ZPOOL_CONFIG_HOSTID "hostid"
556#define ZPOOL_CONFIG_HOSTNAME "hostname"
557#define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */
558
559#define VDEV_TYPE_ROOT "root"
560#define VDEV_TYPE_MIRROR "mirror"
561#define VDEV_TYPE_REPLACING "replacing"
562#define VDEV_TYPE_RAIDZ "raidz"
563#define VDEV_TYPE_DISK "disk"
564#define VDEV_TYPE_FILE "file"
565#define VDEV_TYPE_MISSING "missing"
566#define VDEV_TYPE_SPARE "spare"
567
568/*
569 * This is needed in userland to report the minimum necessary device size.
570 */
571#define SPA_MINDEVSIZE (64ULL << 20)
572
573/*
574 * The location of the pool configuration repository, shared between kernel and
575 * userland.
576 */
577#define ZPOOL_CACHE_DIR "/boot/zfs"
578#define ZPOOL_CACHE_FILE "zpool.cache"
579#define ZPOOL_CACHE_TMP ".zpool.cache"
580
581#define ZPOOL_CACHE ZPOOL_CACHE_DIR "/" ZPOOL_CACHE_FILE
582
583/*
584 * vdev states are ordered from least to most healthy.
585 * A vdev that's CANT_OPEN or below is considered unusable.
586 */
587typedef enum vdev_state {
588 VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */
589 VDEV_STATE_CLOSED, /* Not currently open */
590 VDEV_STATE_OFFLINE, /* Not allowed to open */
591 VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */
592 VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */
593 VDEV_STATE_HEALTHY /* Presumed good */
594} vdev_state_t;
595
596/*
597 * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field
598 * of the vdev stats structure uses these constants to distinguish why.
599 */
600typedef enum vdev_aux {
601 VDEV_AUX_NONE, /* no error */
602 VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */
603 VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */
604 VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */
605 VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */
606 VDEV_AUX_TOO_SMALL, /* vdev size is too small */
607 VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */
608 VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */
609 VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */
610 VDEV_AUX_SPARED /* hot spare used in another pool */
611} vdev_aux_t;
612
613/*
614 * pool state. The following states are written to disk as part of the normal
615 * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE. The remaining states are
616 * software abstractions used at various levels to communicate pool state.
617 */
618typedef enum pool_state {
619 POOL_STATE_ACTIVE = 0, /* In active use */
620 POOL_STATE_EXPORTED, /* Explicitly exported */
621 POOL_STATE_DESTROYED, /* Explicitly destroyed */
622 POOL_STATE_SPARE, /* Reserved for hot spare use */
623 POOL_STATE_UNINITIALIZED, /* Internal spa_t state */
624 POOL_STATE_UNAVAIL, /* Internal libzfs state */
625 POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */
626} pool_state_t;
627
628/*
629 * The uberblock version is incremented whenever an incompatible on-disk
630 * format change is made to the SPA, DMU, or ZAP.
631 *
632 * Note: the first two fields should never be moved. When a storage pool
633 * is opened, the uberblock must be read off the disk before the version
634 * can be checked. If the ub_version field is moved, we may not detect
635 * version mismatch. If the ub_magic field is moved, applications that
636 * expect the magic number in the first word won't work.
637 */
638#define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */
639#define UBERBLOCK_SHIFT 10 /* up to 1K */
640
641struct uberblock {
642 uint64_t ub_magic; /* UBERBLOCK_MAGIC */
643 uint64_t ub_version; /* SPA_VERSION */
644 uint64_t ub_txg; /* txg of last sync */
645 uint64_t ub_guid_sum; /* sum of all vdev guids */
646 uint64_t ub_timestamp; /* UTC time of last sync */
647 blkptr_t ub_rootbp; /* MOS objset_phys_t */
648};
649
650/*
651 * Flags.
652 */
653#define DNODE_MUST_BE_ALLOCATED 1
654#define DNODE_MUST_BE_FREE 2
655
656/*
657 * Fixed constants.
658 */
659#define DNODE_SHIFT 9 /* 512 bytes */
660#define DN_MIN_INDBLKSHIFT 10 /* 1k */
661#define DN_MAX_INDBLKSHIFT 14 /* 16k */
662#define DNODE_BLOCK_SHIFT 14 /* 16k */
663#define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */
664#define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */
665#define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */
666
667/*
668 * Derived constants.
669 */
670#define DNODE_SIZE (1 << DNODE_SHIFT)
671#define DN_MAX_NBLKPTR ((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
672#define DN_MAX_BONUSLEN (DNODE_SIZE - DNODE_CORE_SIZE - (1 << SPA_BLKPTRSHIFT))
673#define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT)
674
675#define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT)
676#define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT)
677#define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
678
679/* The +2 here is a cheesy way to round up */
680#define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \
681 (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT)))
682
683#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \
684 (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
685
686#define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
687 (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
688
689#define EPB(blkshift, typeshift) (1 << (blkshift - typeshift))
690
691/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
692#define DNODE_FLAG_USED_BYTES (1<<0)
693
694typedef struct dnode_phys {
695 uint8_t dn_type; /* dmu_object_type_t */
696 uint8_t dn_indblkshift; /* ln2(indirect block size) */
697 uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */
698 uint8_t dn_nblkptr; /* length of dn_blkptr */
699 uint8_t dn_bonustype; /* type of data in bonus buffer */
700 uint8_t dn_checksum; /* ZIO_CHECKSUM type */
701 uint8_t dn_compress; /* ZIO_COMPRESS type */
702 uint8_t dn_flags; /* DNODE_FLAG_* */
703 uint16_t dn_datablkszsec; /* data block size in 512b sectors */
704 uint16_t dn_bonuslen; /* length of dn_bonus */
705 uint8_t dn_pad2[4];
706
707 /* accounting is protected by dn_dirty_mtx */
708 uint64_t dn_maxblkid; /* largest allocated block ID */
709 uint64_t dn_used; /* bytes (or sectors) of disk space */
710
711 uint64_t dn_pad3[4];
712
713 blkptr_t dn_blkptr[1];
714 uint8_t dn_bonus[DN_MAX_BONUSLEN];
715} dnode_phys_t;
716
717typedef enum dmu_object_type {
718 DMU_OT_NONE,
719 /* general: */
720 DMU_OT_OBJECT_DIRECTORY, /* ZAP */
721 DMU_OT_OBJECT_ARRAY, /* UINT64 */
722 DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */
723 DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */
724 DMU_OT_BPLIST, /* UINT64 */
725 DMU_OT_BPLIST_HDR, /* UINT64 */
726 /* spa: */
727 DMU_OT_SPACE_MAP_HEADER, /* UINT64 */
728 DMU_OT_SPACE_MAP, /* UINT64 */
729 /* zil: */
730 DMU_OT_INTENT_LOG, /* UINT64 */
731 /* dmu: */
732 DMU_OT_DNODE, /* DNODE */
733 DMU_OT_OBJSET, /* OBJSET */
734 /* dsl: */
735 DMU_OT_DSL_DIR, /* UINT64 */
736 DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */
737 DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */
738 DMU_OT_DSL_PROPS, /* ZAP */
739 DMU_OT_DSL_DATASET, /* UINT64 */
740 /* zpl: */
741 DMU_OT_ZNODE, /* ZNODE */
742 DMU_OT_ACL, /* ACL */
743 DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */
744 DMU_OT_DIRECTORY_CONTENTS, /* ZAP */
745 DMU_OT_MASTER_NODE, /* ZAP */
746 DMU_OT_UNLINKED_SET, /* ZAP */
747 /* zvol: */
748 DMU_OT_ZVOL, /* UINT8 */
749 DMU_OT_ZVOL_PROP, /* ZAP */
750 /* other; for testing only! */
751 DMU_OT_PLAIN_OTHER, /* UINT8 */
752 DMU_OT_UINT64_OTHER, /* UINT64 */
753 DMU_OT_ZAP_OTHER, /* ZAP */
754 /* new object types: */
755 DMU_OT_ERROR_LOG, /* ZAP */
756 DMU_OT_SPA_HISTORY, /* UINT8 */
757 DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */
758 DMU_OT_POOL_PROPS, /* ZAP */
759
760 DMU_OT_NUMTYPES
761} dmu_object_type_t;
762
763typedef enum dmu_objset_type {
764 DMU_OST_NONE,
765 DMU_OST_META,
766 DMU_OST_ZFS,
767 DMU_OST_ZVOL,
768 DMU_OST_OTHER, /* For testing only! */
769 DMU_OST_ANY, /* Be careful! */
770 DMU_OST_NUMTYPES
771} dmu_objset_type_t;
772
773/*
774 * Intent log header - this on disk structure holds fields to manage
775 * the log. All fields are 64 bit to easily handle cross architectures.
776 */
777typedef struct zil_header {
778 uint64_t zh_claim_txg; /* txg in which log blocks were claimed */
779 uint64_t zh_replay_seq; /* highest replayed sequence number */
780 blkptr_t zh_log; /* log chain */
781 uint64_t zh_claim_seq; /* highest claimed sequence number */
782 uint64_t zh_pad[5];
783} zil_header_t;
784
785typedef struct objset_phys {
786 dnode_phys_t os_meta_dnode;
787 zil_header_t os_zil_header;
788 uint64_t os_type;
789 char os_pad[1024 - sizeof (dnode_phys_t) - sizeof (zil_header_t) -
790 sizeof (uint64_t)];
791} objset_phys_t;
792
793typedef struct dsl_dir_phys {
794 uint64_t dd_creation_time; /* not actually used */
795 uint64_t dd_head_dataset_obj;
796 uint64_t dd_parent_obj;
797 uint64_t dd_clone_parent_obj;
798 uint64_t dd_child_dir_zapobj;
799 /*
800 * how much space our children are accounting for; for leaf
801 * datasets, == physical space used by fs + snaps
802 */
803 uint64_t dd_used_bytes;
804 uint64_t dd_compressed_bytes;
805 uint64_t dd_uncompressed_bytes;
806 /* Administrative quota setting */
807 uint64_t dd_quota;
808 /* Administrative reservation setting */
809 uint64_t dd_reserved;
810 uint64_t dd_props_zapobj;
811 uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */
812} dsl_dir_phys_t;
813
814typedef struct dsl_dataset_phys {
815 uint64_t ds_dir_obj;
816 uint64_t ds_prev_snap_obj;
817 uint64_t ds_prev_snap_txg;
818 uint64_t ds_next_snap_obj;
819 uint64_t ds_snapnames_zapobj; /* zap obj of snaps; ==0 for snaps */
820 uint64_t ds_num_children; /* clone/snap children; ==0 for head */
821 uint64_t ds_creation_time; /* seconds since 1970 */
822 uint64_t ds_creation_txg;
823 uint64_t ds_deadlist_obj;
824 uint64_t ds_used_bytes;
825 uint64_t ds_compressed_bytes;
826 uint64_t ds_uncompressed_bytes;
827 uint64_t ds_unique_bytes; /* only relevant to snapshots */
828 /*
829 * The ds_fsid_guid is a 56-bit ID that can change to avoid
830 * collisions. The ds_guid is a 64-bit ID that will never
831 * change, so there is a small probability that it will collide.
832 */
833 uint64_t ds_fsid_guid;
834 uint64_t ds_guid;
835 uint64_t ds_flags;
836 blkptr_t ds_bp;
837 uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */
838} dsl_dataset_phys_t;
839
840/*
841 * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
842 */
843#define DMU_POOL_DIRECTORY_OBJECT 1
844#define DMU_POOL_CONFIG "config"
845#define DMU_POOL_ROOT_DATASET "root_dataset"
846#define DMU_POOL_SYNC_BPLIST "sync_bplist"
847#define DMU_POOL_ERRLOG_SCRUB "errlog_scrub"
848#define DMU_POOL_ERRLOG_LAST "errlog_last"
849#define DMU_POOL_SPARES "spares"
850#define DMU_POOL_DEFLATE "deflate"
851#define DMU_POOL_HISTORY "history"
852#define DMU_POOL_PROPS "pool_props"
853
854#define ZAP_MAGIC 0x2F52AB2ABULL
855
856#define FZAP_BLOCK_SHIFT(zap) ((zap)->zap_block_shift)
857
858#define ZAP_MAXCD (uint32_t)(-1)
859#define ZAP_HASHBITS 28
860#define MZAP_ENT_LEN 64
861#define MZAP_NAME_LEN (MZAP_ENT_LEN - 8 - 4 - 2)
862#define MZAP_MAX_BLKSHIFT SPA_MAXBLOCKSHIFT
863#define MZAP_MAX_BLKSZ (1 << MZAP_MAX_BLKSHIFT)
864
865typedef struct mzap_ent_phys {
866 uint64_t mze_value;
867 uint32_t mze_cd;
868 uint16_t mze_pad; /* in case we want to chain them someday */
869 char mze_name[MZAP_NAME_LEN];
870} mzap_ent_phys_t;
871
872typedef struct mzap_phys {
873 uint64_t mz_block_type; /* ZBT_MICRO */
874 uint64_t mz_salt;
875 uint64_t mz_pad[6];
876 mzap_ent_phys_t mz_chunk[1];
877 /* actually variable size depending on block size */
878} mzap_phys_t;
879
880/*
881 * The (fat) zap is stored in one object. It is an array of
882 * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of:
883 *
884 * ptrtbl fits in first block:
885 * [zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ...
886 *
887 * ptrtbl too big for first block:
888 * [zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ...
889 *
890 */
891
892#define ZBT_LEAF ((1ULL << 63) + 0)
893#define ZBT_HEADER ((1ULL << 63) + 1)
894#define ZBT_MICRO ((1ULL << 63) + 3)
895/* any other values are ptrtbl blocks */
896
897/*
898 * the embedded pointer table takes up half a block:
899 * block size / entry size (2^3) / 2
900 */
901#define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1)
902
903/*
904 * The embedded pointer table starts half-way through the block. Since
905 * the pointer table itself is half the block, it starts at (64-bit)
906 * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)).
907 */
908#define ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \
909 ((uint64_t *)(zap)->zap_phys) \
910 [(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))]
911
912/*
913 * TAKE NOTE:
914 * If zap_phys_t is modified, zap_byteswap() must be modified.
915 */
916typedef struct zap_phys {
917 uint64_t zap_block_type; /* ZBT_HEADER */
918 uint64_t zap_magic; /* ZAP_MAGIC */
919
920 struct zap_table_phys {
921 uint64_t zt_blk; /* starting block number */
922 uint64_t zt_numblks; /* number of blocks */
923 uint64_t zt_shift; /* bits to index it */
924 uint64_t zt_nextblk; /* next (larger) copy start block */
925 uint64_t zt_blks_copied; /* number source blocks copied */
926 } zap_ptrtbl;
927
928 uint64_t zap_freeblk; /* the next free block */
929 uint64_t zap_num_leafs; /* number of leafs */
930 uint64_t zap_num_entries; /* number of entries */
931 uint64_t zap_salt; /* salt to stir into hash function */
932 /*
933 * This structure is followed by padding, and then the embedded
934 * pointer table. The embedded pointer table takes up second
935 * half of the block. It is accessed using the
936 * ZAP_EMBEDDED_PTRTBL_ENT() macro.
937 */
938} zap_phys_t;
939
940typedef struct zap_table_phys zap_table_phys_t;
941
942typedef struct fat_zap {
943 int zap_block_shift; /* block size shift */
944 zap_phys_t *zap_phys;
945} fat_zap_t;
946
947#define ZAP_LEAF_MAGIC 0x2AB1EAF
948
949/* chunk size = 24 bytes */
950#define ZAP_LEAF_CHUNKSIZE 24
951
952/*
953 * The amount of space available for chunks is:
954 * block size (1<<l->l_bs) - hash entry size (2) * number of hash
955 * entries - header space (2*chunksize)
956 */
957#define ZAP_LEAF_NUMCHUNKS(l) \
958 (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \
959 ZAP_LEAF_CHUNKSIZE - 2)
960
961/*
962 * The amount of space within the chunk available for the array is:
963 * chunk size - space for type (1) - space for next pointer (2)
964 */
965#define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3)
966
967#define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \
968 (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES)
969
970/*
971 * Low water mark: when there are only this many chunks free, start
972 * growing the ptrtbl. Ideally, this should be larger than a
973 * "reasonably-sized" entry. 20 chunks is more than enough for the
974 * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value),
975 * while still being only around 3% for 16k blocks.
976 */
977#define ZAP_LEAF_LOW_WATER (20)
978
979/*
980 * The leaf hash table has block size / 2^5 (32) number of entries,
981 * which should be more than enough for the maximum number of entries,
982 * which is less than block size / CHUNKSIZE (24) / minimum number of
983 * chunks per entry (3).
984 */
985#define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5)
986#define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l))
987
988/*
989 * The chunks start immediately after the hash table. The end of the
990 * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a
991 * chunk_t.
992 */
993#define ZAP_LEAF_CHUNK(l, idx) \
994 ((zap_leaf_chunk_t *) \
995 ((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx]
996#define ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry)
997
998typedef enum zap_chunk_type {
999 ZAP_CHUNK_FREE = 253,
1000 ZAP_CHUNK_ENTRY = 252,
1001 ZAP_CHUNK_ARRAY = 251,
1002 ZAP_CHUNK_TYPE_MAX = 250
1003} zap_chunk_type_t;
1004
1005/*
1006 * TAKE NOTE:
1007 * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified.
1008 */
1009typedef struct zap_leaf_phys {
1010 struct zap_leaf_header {
1011 uint64_t lh_block_type; /* ZBT_LEAF */
1012 uint64_t lh_pad1;
1013 uint64_t lh_prefix; /* hash prefix of this leaf */
1014 uint32_t lh_magic; /* ZAP_LEAF_MAGIC */
1015 uint16_t lh_nfree; /* number free chunks */
1016 uint16_t lh_nentries; /* number of entries */
1017 uint16_t lh_prefix_len; /* num bits used to id this */
1018
1019/* above is accessable to zap, below is zap_leaf private */
1020
1021 uint16_t lh_freelist; /* chunk head of free list */
1022 uint8_t lh_pad2[12];
1023 } l_hdr; /* 2 24-byte chunks */
1024
1025 /*
1026 * The header is followed by a hash table with
1027 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is
1028 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap)
1029 * zap_leaf_chunk structures. These structures are accessed
1030 * with the ZAP_LEAF_CHUNK() macro.
1031 */
1032
1033 uint16_t l_hash[1];
1034} zap_leaf_phys_t;
1035
1036typedef union zap_leaf_chunk {
1037 struct zap_leaf_entry {
1038 uint8_t le_type; /* always ZAP_CHUNK_ENTRY */
1039 uint8_t le_int_size; /* size of ints */
1040 uint16_t le_next; /* next entry in hash chain */
1041 uint16_t le_name_chunk; /* first chunk of the name */
1042 uint16_t le_name_length; /* bytes in name, incl null */
1043 uint16_t le_value_chunk; /* first chunk of the value */
1044 uint16_t le_value_length; /* value length in ints */
1045 uint32_t le_cd; /* collision differentiator */
1046 uint64_t le_hash; /* hash value of the name */
1047 } l_entry;
1048 struct zap_leaf_array {
1049 uint8_t la_type; /* always ZAP_CHUNK_ARRAY */
1050 uint8_t la_array[ZAP_LEAF_ARRAY_BYTES];
1051 uint16_t la_next; /* next blk or CHAIN_END */
1052 } l_array;
1053 struct zap_leaf_free {
1054 uint8_t lf_type; /* always ZAP_CHUNK_FREE */
1055 uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES];
1056 uint16_t lf_next; /* next in free list, or CHAIN_END */
1057 } l_free;
1058} zap_leaf_chunk_t;
1059
1060typedef struct zap_leaf {
1061 int l_bs; /* block size shift */
1062 zap_leaf_phys_t *l_phys;
1063} zap_leaf_t;
1064
1065/*
1066 * Define special zfs pflags
1067 */
1068#define ZFS_XATTR 0x1 /* is an extended attribute */
1069#define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */
1070#define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */
1071
1072#define MASTER_NODE_OBJ 1
1073
1074/*
1075 * special attributes for master node.
1076 */
1077
1078#define ZFS_FSID "FSID"
1079#define ZFS_UNLINKED_SET "DELETE_QUEUE"
1080#define ZFS_ROOT_OBJ "ROOT"
1081#define ZPL_VERSION_OBJ "VERSION"
1082#define ZFS_PROP_BLOCKPERPAGE "BLOCKPERPAGE"
1083#define ZFS_PROP_NOGROWBLOCKS "NOGROWBLOCKS"
1084
1085#define ZFS_FLAG_BLOCKPERPAGE 0x1
1086#define ZFS_FLAG_NOGROWBLOCKS 0x2
1087
1088/*
1089 * ZPL version - rev'd whenever an incompatible on-disk format change
1090 * occurs. Independent of SPA/DMU/ZAP versioning.
1091 */
1092
1093#define ZPL_VERSION 1ULL
1094
1095/*
1096 * The directory entry has the type (currently unused on Solaris) in the
1097 * top 4 bits, and the object number in the low 48 bits. The "middle"
1098 * 12 bits are unused.
1099 */
1100#define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
1101#define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
1102#define ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj)
1103
1104typedef struct ace {
1105 uid_t a_who; /* uid or gid */
1106 uint32_t a_access_mask; /* read,write,... */
1107 uint16_t a_flags; /* see below */
1108 uint16_t a_type; /* allow or deny */
1109} ace_t;
1110
1111#define ACE_SLOT_CNT 6
1112
1113typedef struct zfs_znode_acl {
1114 uint64_t z_acl_extern_obj; /* ext acl pieces */
1115 uint32_t z_acl_count; /* Number of ACEs */
1116 uint16_t z_acl_version; /* acl version */
1117 uint16_t z_acl_pad; /* pad */
1118 ace_t z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */
1119} zfs_znode_acl_t;
1120
1121/*
1122 * This is the persistent portion of the znode. It is stored
1123 * in the "bonus buffer" of the file. Short symbolic links
1124 * are also stored in the bonus buffer.
1125 */
1126typedef struct znode_phys {
1127 uint64_t zp_atime[2]; /* 0 - last file access time */
1128 uint64_t zp_mtime[2]; /* 16 - last file modification time */
1129 uint64_t zp_ctime[2]; /* 32 - last file change time */
1130 uint64_t zp_crtime[2]; /* 48 - creation time */
1131 uint64_t zp_gen; /* 64 - generation (txg of creation) */
1132 uint64_t zp_mode; /* 72 - file mode bits */
1133 uint64_t zp_size; /* 80 - size of file */
1134 uint64_t zp_parent; /* 88 - directory parent (`..') */
1135 uint64_t zp_links; /* 96 - number of links to file */
1136 uint64_t zp_xattr; /* 104 - DMU object for xattrs */
1137 uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */
1138 uint64_t zp_flags; /* 120 - persistent flags */
1139 uint64_t zp_uid; /* 128 - file owner */
1140 uint64_t zp_gid; /* 136 - owning group */
1141 uint64_t zp_pad[4]; /* 144 - future */
1142 zfs_znode_acl_t zp_acl; /* 176 - 263 ACL */
1143 /*
1144 * Data may pad out any remaining bytes in the znode buffer, eg:
1145 *
1146 * |<---------------------- dnode_phys (512) ------------------------>|
1147 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->|
1148 * |<---- znode (264) ---->|<---- data (56) ---->|
1149 *
1150 * At present, we only use this space to store symbolic links.
1151 */
1152} znode_phys_t;
1153
1154/*
1155 * In-core vdev representation.
1156 */
1157struct vdev;
1158typedef int vdev_phys_read_t(struct vdev *vdev, void *priv,
1159 off_t offset, void *buf, size_t bytes);
1160typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp,
1161 void *buf, off_t offset, size_t bytes);
1162
1163typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t;
1164
1165typedef struct vdev {
1166 STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */
1167 STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */
1168 vdev_list_t v_children; /* children of this vdev */
1169 char *v_name; /* vdev name */
1170 uint64_t v_guid; /* vdev guid */
1171 int v_id; /* index in parent */
1172 int v_ashift; /* offset to block shift */
1173 int v_nparity; /* # parity for raidz */
1174 int v_nchildren; /* # children */
1175 vdev_state_t v_state; /* current state */
1176 vdev_phys_read_t *v_phys_read; /* read from raw leaf vdev */
1177 vdev_read_t *v_read; /* read from vdev */
1178 void *v_read_priv; /* private data for read function */
1179} vdev_t;
1180
1181/*
1182 * In-core pool representation.
1183 */
1184typedef STAILQ_HEAD(spa_list, spa) spa_list_t;
1185
1186typedef struct spa {
1187 STAILQ_ENTRY(spa) spa_link; /* link in global pool list */
1188 char *spa_name; /* pool name */
1189 uint64_t spa_guid; /* pool guid */
1190 uint64_t spa_txg; /* most recent transaction */
1191 struct uberblock spa_uberblock; /* best uberblock so far */
1192 vdev_list_t spa_vdevs; /* list of all toplevel vdevs */
1193 objset_phys_t spa_mos; /* MOS for this pool */
1194 objset_phys_t spa_root_objset; /* current mounted ZPL objset */
1195} spa_t;
525
526/*
527 * The following are configuration names used in the nvlist describing a pool's
528 * configuration.
529 */
530#define ZPOOL_CONFIG_VERSION "version"
531#define ZPOOL_CONFIG_POOL_NAME "name"
532#define ZPOOL_CONFIG_POOL_STATE "state"
533#define ZPOOL_CONFIG_POOL_TXG "txg"
534#define ZPOOL_CONFIG_POOL_GUID "pool_guid"
535#define ZPOOL_CONFIG_CREATE_TXG "create_txg"
536#define ZPOOL_CONFIG_TOP_GUID "top_guid"
537#define ZPOOL_CONFIG_VDEV_TREE "vdev_tree"
538#define ZPOOL_CONFIG_TYPE "type"
539#define ZPOOL_CONFIG_CHILDREN "children"
540#define ZPOOL_CONFIG_ID "id"
541#define ZPOOL_CONFIG_GUID "guid"
542#define ZPOOL_CONFIG_PATH "path"
543#define ZPOOL_CONFIG_DEVID "devid"
544#define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array"
545#define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
546#define ZPOOL_CONFIG_ASHIFT "ashift"
547#define ZPOOL_CONFIG_ASIZE "asize"
548#define ZPOOL_CONFIG_DTL "DTL"
549#define ZPOOL_CONFIG_STATS "stats"
550#define ZPOOL_CONFIG_WHOLE_DISK "whole_disk"
551#define ZPOOL_CONFIG_OFFLINE "offline"
552#define ZPOOL_CONFIG_ERRCOUNT "error_count"
553#define ZPOOL_CONFIG_NOT_PRESENT "not_present"
554#define ZPOOL_CONFIG_SPARES "spares"
555#define ZPOOL_CONFIG_IS_SPARE "is_spare"
556#define ZPOOL_CONFIG_NPARITY "nparity"
557#define ZPOOL_CONFIG_HOSTID "hostid"
558#define ZPOOL_CONFIG_HOSTNAME "hostname"
559#define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */
560
561#define VDEV_TYPE_ROOT "root"
562#define VDEV_TYPE_MIRROR "mirror"
563#define VDEV_TYPE_REPLACING "replacing"
564#define VDEV_TYPE_RAIDZ "raidz"
565#define VDEV_TYPE_DISK "disk"
566#define VDEV_TYPE_FILE "file"
567#define VDEV_TYPE_MISSING "missing"
568#define VDEV_TYPE_SPARE "spare"
569
570/*
571 * This is needed in userland to report the minimum necessary device size.
572 */
573#define SPA_MINDEVSIZE (64ULL << 20)
574
575/*
576 * The location of the pool configuration repository, shared between kernel and
577 * userland.
578 */
579#define ZPOOL_CACHE_DIR "/boot/zfs"
580#define ZPOOL_CACHE_FILE "zpool.cache"
581#define ZPOOL_CACHE_TMP ".zpool.cache"
582
583#define ZPOOL_CACHE ZPOOL_CACHE_DIR "/" ZPOOL_CACHE_FILE
584
585/*
586 * vdev states are ordered from least to most healthy.
587 * A vdev that's CANT_OPEN or below is considered unusable.
588 */
589typedef enum vdev_state {
590 VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */
591 VDEV_STATE_CLOSED, /* Not currently open */
592 VDEV_STATE_OFFLINE, /* Not allowed to open */
593 VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */
594 VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */
595 VDEV_STATE_HEALTHY /* Presumed good */
596} vdev_state_t;
597
598/*
599 * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field
600 * of the vdev stats structure uses these constants to distinguish why.
601 */
602typedef enum vdev_aux {
603 VDEV_AUX_NONE, /* no error */
604 VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */
605 VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */
606 VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */
607 VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */
608 VDEV_AUX_TOO_SMALL, /* vdev size is too small */
609 VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */
610 VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */
611 VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */
612 VDEV_AUX_SPARED /* hot spare used in another pool */
613} vdev_aux_t;
614
615/*
616 * pool state. The following states are written to disk as part of the normal
617 * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE. The remaining states are
618 * software abstractions used at various levels to communicate pool state.
619 */
620typedef enum pool_state {
621 POOL_STATE_ACTIVE = 0, /* In active use */
622 POOL_STATE_EXPORTED, /* Explicitly exported */
623 POOL_STATE_DESTROYED, /* Explicitly destroyed */
624 POOL_STATE_SPARE, /* Reserved for hot spare use */
625 POOL_STATE_UNINITIALIZED, /* Internal spa_t state */
626 POOL_STATE_UNAVAIL, /* Internal libzfs state */
627 POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */
628} pool_state_t;
629
630/*
631 * The uberblock version is incremented whenever an incompatible on-disk
632 * format change is made to the SPA, DMU, or ZAP.
633 *
634 * Note: the first two fields should never be moved. When a storage pool
635 * is opened, the uberblock must be read off the disk before the version
636 * can be checked. If the ub_version field is moved, we may not detect
637 * version mismatch. If the ub_magic field is moved, applications that
638 * expect the magic number in the first word won't work.
639 */
640#define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */
641#define UBERBLOCK_SHIFT 10 /* up to 1K */
642
643struct uberblock {
644 uint64_t ub_magic; /* UBERBLOCK_MAGIC */
645 uint64_t ub_version; /* SPA_VERSION */
646 uint64_t ub_txg; /* txg of last sync */
647 uint64_t ub_guid_sum; /* sum of all vdev guids */
648 uint64_t ub_timestamp; /* UTC time of last sync */
649 blkptr_t ub_rootbp; /* MOS objset_phys_t */
650};
651
652/*
653 * Flags.
654 */
655#define DNODE_MUST_BE_ALLOCATED 1
656#define DNODE_MUST_BE_FREE 2
657
658/*
659 * Fixed constants.
660 */
661#define DNODE_SHIFT 9 /* 512 bytes */
662#define DN_MIN_INDBLKSHIFT 10 /* 1k */
663#define DN_MAX_INDBLKSHIFT 14 /* 16k */
664#define DNODE_BLOCK_SHIFT 14 /* 16k */
665#define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */
666#define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */
667#define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */
668
669/*
670 * Derived constants.
671 */
672#define DNODE_SIZE (1 << DNODE_SHIFT)
673#define DN_MAX_NBLKPTR ((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
674#define DN_MAX_BONUSLEN (DNODE_SIZE - DNODE_CORE_SIZE - (1 << SPA_BLKPTRSHIFT))
675#define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT)
676
677#define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT)
678#define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT)
679#define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
680
681/* The +2 here is a cheesy way to round up */
682#define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \
683 (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT)))
684
685#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \
686 (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
687
688#define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
689 (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
690
691#define EPB(blkshift, typeshift) (1 << (blkshift - typeshift))
692
693/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
694#define DNODE_FLAG_USED_BYTES (1<<0)
695
696typedef struct dnode_phys {
697 uint8_t dn_type; /* dmu_object_type_t */
698 uint8_t dn_indblkshift; /* ln2(indirect block size) */
699 uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */
700 uint8_t dn_nblkptr; /* length of dn_blkptr */
701 uint8_t dn_bonustype; /* type of data in bonus buffer */
702 uint8_t dn_checksum; /* ZIO_CHECKSUM type */
703 uint8_t dn_compress; /* ZIO_COMPRESS type */
704 uint8_t dn_flags; /* DNODE_FLAG_* */
705 uint16_t dn_datablkszsec; /* data block size in 512b sectors */
706 uint16_t dn_bonuslen; /* length of dn_bonus */
707 uint8_t dn_pad2[4];
708
709 /* accounting is protected by dn_dirty_mtx */
710 uint64_t dn_maxblkid; /* largest allocated block ID */
711 uint64_t dn_used; /* bytes (or sectors) of disk space */
712
713 uint64_t dn_pad3[4];
714
715 blkptr_t dn_blkptr[1];
716 uint8_t dn_bonus[DN_MAX_BONUSLEN];
717} dnode_phys_t;
718
719typedef enum dmu_object_type {
720 DMU_OT_NONE,
721 /* general: */
722 DMU_OT_OBJECT_DIRECTORY, /* ZAP */
723 DMU_OT_OBJECT_ARRAY, /* UINT64 */
724 DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */
725 DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */
726 DMU_OT_BPLIST, /* UINT64 */
727 DMU_OT_BPLIST_HDR, /* UINT64 */
728 /* spa: */
729 DMU_OT_SPACE_MAP_HEADER, /* UINT64 */
730 DMU_OT_SPACE_MAP, /* UINT64 */
731 /* zil: */
732 DMU_OT_INTENT_LOG, /* UINT64 */
733 /* dmu: */
734 DMU_OT_DNODE, /* DNODE */
735 DMU_OT_OBJSET, /* OBJSET */
736 /* dsl: */
737 DMU_OT_DSL_DIR, /* UINT64 */
738 DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */
739 DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */
740 DMU_OT_DSL_PROPS, /* ZAP */
741 DMU_OT_DSL_DATASET, /* UINT64 */
742 /* zpl: */
743 DMU_OT_ZNODE, /* ZNODE */
744 DMU_OT_ACL, /* ACL */
745 DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */
746 DMU_OT_DIRECTORY_CONTENTS, /* ZAP */
747 DMU_OT_MASTER_NODE, /* ZAP */
748 DMU_OT_UNLINKED_SET, /* ZAP */
749 /* zvol: */
750 DMU_OT_ZVOL, /* UINT8 */
751 DMU_OT_ZVOL_PROP, /* ZAP */
752 /* other; for testing only! */
753 DMU_OT_PLAIN_OTHER, /* UINT8 */
754 DMU_OT_UINT64_OTHER, /* UINT64 */
755 DMU_OT_ZAP_OTHER, /* ZAP */
756 /* new object types: */
757 DMU_OT_ERROR_LOG, /* ZAP */
758 DMU_OT_SPA_HISTORY, /* UINT8 */
759 DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */
760 DMU_OT_POOL_PROPS, /* ZAP */
761
762 DMU_OT_NUMTYPES
763} dmu_object_type_t;
764
765typedef enum dmu_objset_type {
766 DMU_OST_NONE,
767 DMU_OST_META,
768 DMU_OST_ZFS,
769 DMU_OST_ZVOL,
770 DMU_OST_OTHER, /* For testing only! */
771 DMU_OST_ANY, /* Be careful! */
772 DMU_OST_NUMTYPES
773} dmu_objset_type_t;
774
775/*
776 * Intent log header - this on disk structure holds fields to manage
777 * the log. All fields are 64 bit to easily handle cross architectures.
778 */
779typedef struct zil_header {
780 uint64_t zh_claim_txg; /* txg in which log blocks were claimed */
781 uint64_t zh_replay_seq; /* highest replayed sequence number */
782 blkptr_t zh_log; /* log chain */
783 uint64_t zh_claim_seq; /* highest claimed sequence number */
784 uint64_t zh_pad[5];
785} zil_header_t;
786
787typedef struct objset_phys {
788 dnode_phys_t os_meta_dnode;
789 zil_header_t os_zil_header;
790 uint64_t os_type;
791 char os_pad[1024 - sizeof (dnode_phys_t) - sizeof (zil_header_t) -
792 sizeof (uint64_t)];
793} objset_phys_t;
794
795typedef struct dsl_dir_phys {
796 uint64_t dd_creation_time; /* not actually used */
797 uint64_t dd_head_dataset_obj;
798 uint64_t dd_parent_obj;
799 uint64_t dd_clone_parent_obj;
800 uint64_t dd_child_dir_zapobj;
801 /*
802 * how much space our children are accounting for; for leaf
803 * datasets, == physical space used by fs + snaps
804 */
805 uint64_t dd_used_bytes;
806 uint64_t dd_compressed_bytes;
807 uint64_t dd_uncompressed_bytes;
808 /* Administrative quota setting */
809 uint64_t dd_quota;
810 /* Administrative reservation setting */
811 uint64_t dd_reserved;
812 uint64_t dd_props_zapobj;
813 uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */
814} dsl_dir_phys_t;
815
816typedef struct dsl_dataset_phys {
817 uint64_t ds_dir_obj;
818 uint64_t ds_prev_snap_obj;
819 uint64_t ds_prev_snap_txg;
820 uint64_t ds_next_snap_obj;
821 uint64_t ds_snapnames_zapobj; /* zap obj of snaps; ==0 for snaps */
822 uint64_t ds_num_children; /* clone/snap children; ==0 for head */
823 uint64_t ds_creation_time; /* seconds since 1970 */
824 uint64_t ds_creation_txg;
825 uint64_t ds_deadlist_obj;
826 uint64_t ds_used_bytes;
827 uint64_t ds_compressed_bytes;
828 uint64_t ds_uncompressed_bytes;
829 uint64_t ds_unique_bytes; /* only relevant to snapshots */
830 /*
831 * The ds_fsid_guid is a 56-bit ID that can change to avoid
832 * collisions. The ds_guid is a 64-bit ID that will never
833 * change, so there is a small probability that it will collide.
834 */
835 uint64_t ds_fsid_guid;
836 uint64_t ds_guid;
837 uint64_t ds_flags;
838 blkptr_t ds_bp;
839 uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */
840} dsl_dataset_phys_t;
841
842/*
843 * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
844 */
845#define DMU_POOL_DIRECTORY_OBJECT 1
846#define DMU_POOL_CONFIG "config"
847#define DMU_POOL_ROOT_DATASET "root_dataset"
848#define DMU_POOL_SYNC_BPLIST "sync_bplist"
849#define DMU_POOL_ERRLOG_SCRUB "errlog_scrub"
850#define DMU_POOL_ERRLOG_LAST "errlog_last"
851#define DMU_POOL_SPARES "spares"
852#define DMU_POOL_DEFLATE "deflate"
853#define DMU_POOL_HISTORY "history"
854#define DMU_POOL_PROPS "pool_props"
855
856#define ZAP_MAGIC 0x2F52AB2ABULL
857
858#define FZAP_BLOCK_SHIFT(zap) ((zap)->zap_block_shift)
859
860#define ZAP_MAXCD (uint32_t)(-1)
861#define ZAP_HASHBITS 28
862#define MZAP_ENT_LEN 64
863#define MZAP_NAME_LEN (MZAP_ENT_LEN - 8 - 4 - 2)
864#define MZAP_MAX_BLKSHIFT SPA_MAXBLOCKSHIFT
865#define MZAP_MAX_BLKSZ (1 << MZAP_MAX_BLKSHIFT)
866
867typedef struct mzap_ent_phys {
868 uint64_t mze_value;
869 uint32_t mze_cd;
870 uint16_t mze_pad; /* in case we want to chain them someday */
871 char mze_name[MZAP_NAME_LEN];
872} mzap_ent_phys_t;
873
874typedef struct mzap_phys {
875 uint64_t mz_block_type; /* ZBT_MICRO */
876 uint64_t mz_salt;
877 uint64_t mz_pad[6];
878 mzap_ent_phys_t mz_chunk[1];
879 /* actually variable size depending on block size */
880} mzap_phys_t;
881
882/*
883 * The (fat) zap is stored in one object. It is an array of
884 * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of:
885 *
886 * ptrtbl fits in first block:
887 * [zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ...
888 *
889 * ptrtbl too big for first block:
890 * [zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ...
891 *
892 */
893
894#define ZBT_LEAF ((1ULL << 63) + 0)
895#define ZBT_HEADER ((1ULL << 63) + 1)
896#define ZBT_MICRO ((1ULL << 63) + 3)
897/* any other values are ptrtbl blocks */
898
899/*
900 * the embedded pointer table takes up half a block:
901 * block size / entry size (2^3) / 2
902 */
903#define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1)
904
905/*
906 * The embedded pointer table starts half-way through the block. Since
907 * the pointer table itself is half the block, it starts at (64-bit)
908 * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)).
909 */
910#define ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \
911 ((uint64_t *)(zap)->zap_phys) \
912 [(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))]
913
914/*
915 * TAKE NOTE:
916 * If zap_phys_t is modified, zap_byteswap() must be modified.
917 */
918typedef struct zap_phys {
919 uint64_t zap_block_type; /* ZBT_HEADER */
920 uint64_t zap_magic; /* ZAP_MAGIC */
921
922 struct zap_table_phys {
923 uint64_t zt_blk; /* starting block number */
924 uint64_t zt_numblks; /* number of blocks */
925 uint64_t zt_shift; /* bits to index it */
926 uint64_t zt_nextblk; /* next (larger) copy start block */
927 uint64_t zt_blks_copied; /* number source blocks copied */
928 } zap_ptrtbl;
929
930 uint64_t zap_freeblk; /* the next free block */
931 uint64_t zap_num_leafs; /* number of leafs */
932 uint64_t zap_num_entries; /* number of entries */
933 uint64_t zap_salt; /* salt to stir into hash function */
934 /*
935 * This structure is followed by padding, and then the embedded
936 * pointer table. The embedded pointer table takes up second
937 * half of the block. It is accessed using the
938 * ZAP_EMBEDDED_PTRTBL_ENT() macro.
939 */
940} zap_phys_t;
941
942typedef struct zap_table_phys zap_table_phys_t;
943
944typedef struct fat_zap {
945 int zap_block_shift; /* block size shift */
946 zap_phys_t *zap_phys;
947} fat_zap_t;
948
949#define ZAP_LEAF_MAGIC 0x2AB1EAF
950
951/* chunk size = 24 bytes */
952#define ZAP_LEAF_CHUNKSIZE 24
953
954/*
955 * The amount of space available for chunks is:
956 * block size (1<<l->l_bs) - hash entry size (2) * number of hash
957 * entries - header space (2*chunksize)
958 */
959#define ZAP_LEAF_NUMCHUNKS(l) \
960 (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \
961 ZAP_LEAF_CHUNKSIZE - 2)
962
963/*
964 * The amount of space within the chunk available for the array is:
965 * chunk size - space for type (1) - space for next pointer (2)
966 */
967#define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3)
968
969#define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \
970 (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES)
971
972/*
973 * Low water mark: when there are only this many chunks free, start
974 * growing the ptrtbl. Ideally, this should be larger than a
975 * "reasonably-sized" entry. 20 chunks is more than enough for the
976 * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value),
977 * while still being only around 3% for 16k blocks.
978 */
979#define ZAP_LEAF_LOW_WATER (20)
980
981/*
982 * The leaf hash table has block size / 2^5 (32) number of entries,
983 * which should be more than enough for the maximum number of entries,
984 * which is less than block size / CHUNKSIZE (24) / minimum number of
985 * chunks per entry (3).
986 */
987#define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5)
988#define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l))
989
990/*
991 * The chunks start immediately after the hash table. The end of the
992 * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a
993 * chunk_t.
994 */
995#define ZAP_LEAF_CHUNK(l, idx) \
996 ((zap_leaf_chunk_t *) \
997 ((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx]
998#define ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry)
999
1000typedef enum zap_chunk_type {
1001 ZAP_CHUNK_FREE = 253,
1002 ZAP_CHUNK_ENTRY = 252,
1003 ZAP_CHUNK_ARRAY = 251,
1004 ZAP_CHUNK_TYPE_MAX = 250
1005} zap_chunk_type_t;
1006
1007/*
1008 * TAKE NOTE:
1009 * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified.
1010 */
1011typedef struct zap_leaf_phys {
1012 struct zap_leaf_header {
1013 uint64_t lh_block_type; /* ZBT_LEAF */
1014 uint64_t lh_pad1;
1015 uint64_t lh_prefix; /* hash prefix of this leaf */
1016 uint32_t lh_magic; /* ZAP_LEAF_MAGIC */
1017 uint16_t lh_nfree; /* number free chunks */
1018 uint16_t lh_nentries; /* number of entries */
1019 uint16_t lh_prefix_len; /* num bits used to id this */
1020
1021/* above is accessable to zap, below is zap_leaf private */
1022
1023 uint16_t lh_freelist; /* chunk head of free list */
1024 uint8_t lh_pad2[12];
1025 } l_hdr; /* 2 24-byte chunks */
1026
1027 /*
1028 * The header is followed by a hash table with
1029 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is
1030 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap)
1031 * zap_leaf_chunk structures. These structures are accessed
1032 * with the ZAP_LEAF_CHUNK() macro.
1033 */
1034
1035 uint16_t l_hash[1];
1036} zap_leaf_phys_t;
1037
1038typedef union zap_leaf_chunk {
1039 struct zap_leaf_entry {
1040 uint8_t le_type; /* always ZAP_CHUNK_ENTRY */
1041 uint8_t le_int_size; /* size of ints */
1042 uint16_t le_next; /* next entry in hash chain */
1043 uint16_t le_name_chunk; /* first chunk of the name */
1044 uint16_t le_name_length; /* bytes in name, incl null */
1045 uint16_t le_value_chunk; /* first chunk of the value */
1046 uint16_t le_value_length; /* value length in ints */
1047 uint32_t le_cd; /* collision differentiator */
1048 uint64_t le_hash; /* hash value of the name */
1049 } l_entry;
1050 struct zap_leaf_array {
1051 uint8_t la_type; /* always ZAP_CHUNK_ARRAY */
1052 uint8_t la_array[ZAP_LEAF_ARRAY_BYTES];
1053 uint16_t la_next; /* next blk or CHAIN_END */
1054 } l_array;
1055 struct zap_leaf_free {
1056 uint8_t lf_type; /* always ZAP_CHUNK_FREE */
1057 uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES];
1058 uint16_t lf_next; /* next in free list, or CHAIN_END */
1059 } l_free;
1060} zap_leaf_chunk_t;
1061
1062typedef struct zap_leaf {
1063 int l_bs; /* block size shift */
1064 zap_leaf_phys_t *l_phys;
1065} zap_leaf_t;
1066
1067/*
1068 * Define special zfs pflags
1069 */
1070#define ZFS_XATTR 0x1 /* is an extended attribute */
1071#define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */
1072#define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */
1073
1074#define MASTER_NODE_OBJ 1
1075
1076/*
1077 * special attributes for master node.
1078 */
1079
1080#define ZFS_FSID "FSID"
1081#define ZFS_UNLINKED_SET "DELETE_QUEUE"
1082#define ZFS_ROOT_OBJ "ROOT"
1083#define ZPL_VERSION_OBJ "VERSION"
1084#define ZFS_PROP_BLOCKPERPAGE "BLOCKPERPAGE"
1085#define ZFS_PROP_NOGROWBLOCKS "NOGROWBLOCKS"
1086
1087#define ZFS_FLAG_BLOCKPERPAGE 0x1
1088#define ZFS_FLAG_NOGROWBLOCKS 0x2
1089
1090/*
1091 * ZPL version - rev'd whenever an incompatible on-disk format change
1092 * occurs. Independent of SPA/DMU/ZAP versioning.
1093 */
1094
1095#define ZPL_VERSION 1ULL
1096
1097/*
1098 * The directory entry has the type (currently unused on Solaris) in the
1099 * top 4 bits, and the object number in the low 48 bits. The "middle"
1100 * 12 bits are unused.
1101 */
1102#define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
1103#define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
1104#define ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj)
1105
1106typedef struct ace {
1107 uid_t a_who; /* uid or gid */
1108 uint32_t a_access_mask; /* read,write,... */
1109 uint16_t a_flags; /* see below */
1110 uint16_t a_type; /* allow or deny */
1111} ace_t;
1112
1113#define ACE_SLOT_CNT 6
1114
1115typedef struct zfs_znode_acl {
1116 uint64_t z_acl_extern_obj; /* ext acl pieces */
1117 uint32_t z_acl_count; /* Number of ACEs */
1118 uint16_t z_acl_version; /* acl version */
1119 uint16_t z_acl_pad; /* pad */
1120 ace_t z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */
1121} zfs_znode_acl_t;
1122
1123/*
1124 * This is the persistent portion of the znode. It is stored
1125 * in the "bonus buffer" of the file. Short symbolic links
1126 * are also stored in the bonus buffer.
1127 */
1128typedef struct znode_phys {
1129 uint64_t zp_atime[2]; /* 0 - last file access time */
1130 uint64_t zp_mtime[2]; /* 16 - last file modification time */
1131 uint64_t zp_ctime[2]; /* 32 - last file change time */
1132 uint64_t zp_crtime[2]; /* 48 - creation time */
1133 uint64_t zp_gen; /* 64 - generation (txg of creation) */
1134 uint64_t zp_mode; /* 72 - file mode bits */
1135 uint64_t zp_size; /* 80 - size of file */
1136 uint64_t zp_parent; /* 88 - directory parent (`..') */
1137 uint64_t zp_links; /* 96 - number of links to file */
1138 uint64_t zp_xattr; /* 104 - DMU object for xattrs */
1139 uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */
1140 uint64_t zp_flags; /* 120 - persistent flags */
1141 uint64_t zp_uid; /* 128 - file owner */
1142 uint64_t zp_gid; /* 136 - owning group */
1143 uint64_t zp_pad[4]; /* 144 - future */
1144 zfs_znode_acl_t zp_acl; /* 176 - 263 ACL */
1145 /*
1146 * Data may pad out any remaining bytes in the znode buffer, eg:
1147 *
1148 * |<---------------------- dnode_phys (512) ------------------------>|
1149 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->|
1150 * |<---- znode (264) ---->|<---- data (56) ---->|
1151 *
1152 * At present, we only use this space to store symbolic links.
1153 */
1154} znode_phys_t;
1155
1156/*
1157 * In-core vdev representation.
1158 */
1159struct vdev;
1160typedef int vdev_phys_read_t(struct vdev *vdev, void *priv,
1161 off_t offset, void *buf, size_t bytes);
1162typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp,
1163 void *buf, off_t offset, size_t bytes);
1164
1165typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t;
1166
1167typedef struct vdev {
1168 STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */
1169 STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */
1170 vdev_list_t v_children; /* children of this vdev */
1171 char *v_name; /* vdev name */
1172 uint64_t v_guid; /* vdev guid */
1173 int v_id; /* index in parent */
1174 int v_ashift; /* offset to block shift */
1175 int v_nparity; /* # parity for raidz */
1176 int v_nchildren; /* # children */
1177 vdev_state_t v_state; /* current state */
1178 vdev_phys_read_t *v_phys_read; /* read from raw leaf vdev */
1179 vdev_read_t *v_read; /* read from vdev */
1180 void *v_read_priv; /* private data for read function */
1181} vdev_t;
1182
1183/*
1184 * In-core pool representation.
1185 */
1186typedef STAILQ_HEAD(spa_list, spa) spa_list_t;
1187
1188typedef struct spa {
1189 STAILQ_ENTRY(spa) spa_link; /* link in global pool list */
1190 char *spa_name; /* pool name */
1191 uint64_t spa_guid; /* pool guid */
1192 uint64_t spa_txg; /* most recent transaction */
1193 struct uberblock spa_uberblock; /* best uberblock so far */
1194 vdev_list_t spa_vdevs; /* list of all toplevel vdevs */
1195 objset_phys_t spa_mos; /* MOS for this pool */
1196 objset_phys_t spa_root_objset; /* current mounted ZPL objset */
1197} spa_t;