1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#ifndef	_G_MIRROR_H_
32#define	_G_MIRROR_H_
33
34#include <sys/endian.h>
35#include <sys/md5.h>
36
37#define	G_MIRROR_CLASS_NAME	"MIRROR"
38
39#define	G_MIRROR_MAGIC		"GEOM::MIRROR"
40/*
41 * Version history:
42 * 0 - Initial version number.
43 * 1 - Added 'prefer' balance algorithm.
44 * 2 - Added md_genid field to metadata.
45 * 3 - Added md_provsize field to metadata.
46 * 4 - Added 'no failure synchronization' flag.
47 */
48#define	G_MIRROR_VERSION	4
49
50#define	G_MIRROR_BALANCE_NONE		0
51#define	G_MIRROR_BALANCE_ROUND_ROBIN	1
52#define	G_MIRROR_BALANCE_LOAD		2
53#define	G_MIRROR_BALANCE_SPLIT		3
54#define	G_MIRROR_BALANCE_PREFER		4
55#define	G_MIRROR_BALANCE_MIN		G_MIRROR_BALANCE_NONE
56#define	G_MIRROR_BALANCE_MAX		G_MIRROR_BALANCE_PREFER
57
58#define	G_MIRROR_DISK_FLAG_DIRTY		0x0000000000000001ULL
59#define	G_MIRROR_DISK_FLAG_SYNCHRONIZING	0x0000000000000002ULL
60#define	G_MIRROR_DISK_FLAG_FORCE_SYNC		0x0000000000000004ULL
61#define	G_MIRROR_DISK_FLAG_INACTIVE		0x0000000000000008ULL
62#define	G_MIRROR_DISK_FLAG_HARDCODED		0x0000000000000010ULL
63#define	G_MIRROR_DISK_FLAG_BROKEN		0x0000000000000020ULL
64#define	G_MIRROR_DISK_FLAG_CANDELETE		0x0000000000000040ULL
65
66/* Per-disk flags which are recorded in on-disk metadata. */
67#define	G_MIRROR_DISK_FLAG_MASK		(G_MIRROR_DISK_FLAG_DIRTY |	\
68					 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \
69					 G_MIRROR_DISK_FLAG_FORCE_SYNC | \
70					 G_MIRROR_DISK_FLAG_INACTIVE | \
71					 G_MIRROR_DISK_FLAG_CANDELETE)
72
73#define	G_MIRROR_DEVICE_FLAG_NOAUTOSYNC	0x0000000000000001ULL
74#define	G_MIRROR_DEVICE_FLAG_NOFAILSYNC	0x0000000000000002ULL
75
76/* Mirror flags which are recorded in on-disk metadata. */
77#define	G_MIRROR_DEVICE_FLAG_MASK	(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC | \
78					 G_MIRROR_DEVICE_FLAG_NOFAILSYNC)
79
80#ifdef _KERNEL
81#define	G_MIRROR_DEVICE_FLAG_DESTROY	0x0100000000000000ULL
82#define	G_MIRROR_DEVICE_FLAG_DRAIN	0x0200000000000000ULL
83#define	G_MIRROR_DEVICE_FLAG_CLOSEWAIT	0x0400000000000000ULL
84#define	G_MIRROR_DEVICE_FLAG_TASTING	0x0800000000000000ULL
85#define	G_MIRROR_DEVICE_FLAG_WIPE	0x1000000000000000ULL
86
87extern int g_mirror_debug;
88
89#define G_MIRROR_DEBUG(lvl, ...) \
90    _GEOM_DEBUG("GEOM_MIRROR", g_mirror_debug, (lvl), NULL, __VA_ARGS__)
91#define G_MIRROR_LOGREQ(lvl, bp, ...) \
92    _GEOM_DEBUG("GEOM_MIRROR", g_mirror_debug, (lvl), (bp), __VA_ARGS__)
93
94#define	G_MIRROR_BIO_FLAG_REGULAR	0x01
95#define	G_MIRROR_BIO_FLAG_SYNC		0x02
96
97/*
98 * Informations needed for synchronization.
99 */
100struct g_mirror_disk_sync {
101	struct g_consumer *ds_consumer;	/* Consumer connected to our mirror. */
102	off_t		  ds_offset;	/* Offset of next request to send. */
103	off_t		  ds_offset_done; /* Offset of already synchronized
104					   region. */
105	time_t		  ds_update_ts; /* Time of last metadata update. */
106	u_int		  ds_syncid;	/* Disk's synchronization ID. */
107	u_int		  ds_inflight;	/* Number of in-flight sync requests. */
108	struct bio	**ds_bios;	/* BIOs for synchronization I/O. */
109};
110
111/*
112 * Informations needed for synchronization.
113 */
114struct g_mirror_device_sync {
115	struct g_geom	*ds_geom;	/* Synchronization geom. */
116	u_int		 ds_ndisks;	/* Number of disks in SYNCHRONIZING
117					   state. */
118};
119
120#define	G_MIRROR_DISK_STATE_NONE		0
121#define	G_MIRROR_DISK_STATE_NEW			1
122#define	G_MIRROR_DISK_STATE_ACTIVE		2
123#define	G_MIRROR_DISK_STATE_STALE		3
124#define	G_MIRROR_DISK_STATE_SYNCHRONIZING	4
125#define	G_MIRROR_DISK_STATE_DISCONNECTED	5
126#define	G_MIRROR_DISK_STATE_DESTROY		6
127struct g_mirror_disk {
128	uint32_t	 d_id;		/* Disk ID. */
129	struct g_consumer *d_consumer;	/* Consumer. */
130	struct g_mirror_softc	*d_softc; /* Back-pointer to softc. */
131	int		 d_state;	/* Disk state. */
132	u_int		 d_priority;	/* Disk priority. */
133	u_int		 load;		/* Averaged queue length */
134	off_t		 d_last_offset;	/* Last read offset */
135	uint64_t	 d_flags;	/* Additional flags. */
136	u_int		 d_genid;	/* Disk's generation ID. */
137	struct g_mirror_disk_sync d_sync;/* Sync information. */
138	LIST_ENTRY(g_mirror_disk) d_next;
139	u_int		 d_init_ndisks;	/* Initial number of mirror components */
140	uint32_t	 d_init_slice;	/* Initial slice size */
141	uint8_t		 d_init_balance;/* Initial balance */
142	uint64_t	 d_init_mediasize;/* Initial mediasize */
143};
144#define	d_name	d_consumer->provider->name
145
146#define	G_MIRROR_EVENT_DONTWAIT	0x1
147#define	G_MIRROR_EVENT_WAIT	0x2
148#define	G_MIRROR_EVENT_DEVICE	0x4
149#define	G_MIRROR_EVENT_DONE	0x8
150struct g_mirror_event {
151	struct g_mirror_disk	*e_disk;
152	int			 e_state;
153	int			 e_flags;
154	int			 e_error;
155	TAILQ_ENTRY(g_mirror_event) e_next;
156};
157
158#define	G_MIRROR_DEVICE_STATE_STARTING		0
159#define	G_MIRROR_DEVICE_STATE_RUNNING		1
160
161#define	G_MIRROR_TYPE_MANUAL	0
162#define	G_MIRROR_TYPE_AUTOMATIC	1
163
164/* Bump syncid on first write. */
165#define	G_MIRROR_BUMP_SYNCID		0x1
166/* Bump genid immediately. */
167#define	G_MIRROR_BUMP_GENID		0x2
168/* Bump syncid immediately. */
169#define	G_MIRROR_BUMP_SYNCID_NOW	0x4
170struct g_mirror_softc {
171	u_int		sc_type;	/* Device type (manual/automatic). */
172	u_int		sc_state;	/* Device state. */
173	uint32_t	sc_slice;	/* Slice size. */
174	uint8_t		sc_balance;	/* Balance algorithm. */
175	uint64_t	sc_mediasize;	/* Device size. */
176	uint32_t	sc_sectorsize;	/* Sector size. */
177	uint64_t	sc_flags;	/* Additional flags. */
178
179	struct g_geom	*sc_geom;
180	struct g_provider *sc_provider;
181	int		sc_provider_open;
182
183	uint32_t	sc_id;		/* Mirror unique ID. */
184
185	struct sx	 sc_lock;
186	struct bio_queue sc_queue;
187	struct mtx	 sc_queue_mtx;
188	struct proc	*sc_worker;
189	struct bio_queue sc_inflight; /* In-flight regular write requests. */
190	struct bio_queue sc_regular_delayed; /* Delayed I/O requests due to
191						collision with sync requests. */
192	struct bio_queue sc_sync_delayed; /* Delayed sync requests due to
193					     collision with regular requests. */
194
195	LIST_HEAD(, g_mirror_disk) sc_disks;
196	u_int		sc_ndisks;	/* Number of disks. */
197	struct g_mirror_disk *sc_hint;
198
199	u_int		sc_genid;	/* Generation ID. */
200	u_int		sc_syncid;	/* Synchronization ID. */
201	int		sc_bump_id;
202	struct g_mirror_device_sync sc_sync;
203	int		sc_idle;	/* DIRTY flags removed. */
204	time_t		sc_last_write;
205	u_int		sc_writes;
206	u_int		sc_refcnt;	/* Number of softc references */
207
208	TAILQ_HEAD(, g_mirror_event) sc_events;
209	struct mtx	sc_events_mtx;
210	struct g_mirror_event *sc_timeout_event;
211
212	struct callout	sc_callout;
213
214	struct root_hold_token *sc_rootmount;
215
216	struct mtx	 sc_done_mtx;
217};
218#define	sc_name	sc_geom->name
219
220struct g_mirror_metadata;
221
222u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state);
223struct g_geom * g_mirror_create(struct g_class *mp,
224    const struct g_mirror_metadata *md, u_int type);
225#define	G_MIRROR_DESTROY_SOFT		0
226#define	G_MIRROR_DESTROY_DELAYED	1
227#define	G_MIRROR_DESTROY_HARD		2
228int g_mirror_destroy(struct g_mirror_softc *sc, int how);
229int g_mirror_event_send(void *arg, int state, int flags);
230struct g_mirror_metadata;
231int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
232    struct g_mirror_metadata *md);
233int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md);
234void g_mirror_fill_metadata(struct g_mirror_softc *sc,
235    struct g_mirror_disk *disk, struct g_mirror_metadata *md);
236void g_mirror_update_metadata(struct g_mirror_disk *disk);
237
238g_ctl_req_t g_mirror_config;
239#endif	/* _KERNEL */
240
241struct g_mirror_metadata {
242	char		md_magic[16];	/* Magic value. */
243	uint32_t	md_version;	/* Version number. */
244	char		md_name[16];	/* Mirror name. */
245	uint32_t	md_mid;		/* Mirror unique ID. */
246	uint32_t	md_did;		/* Disk unique ID. */
247	uint8_t		md_all;		/* Number of disks in mirror. */
248	uint32_t	md_genid;	/* Generation ID. */
249	uint32_t	md_syncid;	/* Synchronization ID. */
250	uint8_t		md_priority;	/* Disk priority. */
251	uint32_t	md_slice;	/* Slice size. */
252	uint8_t		md_balance;	/* Balance type. */
253	uint64_t	md_mediasize;	/* Size of the smallest
254					   disk in mirror. */
255	uint32_t	md_sectorsize;	/* Sector size. */
256	uint64_t	md_sync_offset;	/* Synchronized offset. */
257	uint64_t	md_mflags;	/* Additional mirror flags. */
258	uint64_t	md_dflags;	/* Additional disk flags. */
259	char		md_provider[16]; /* Hardcoded provider. */
260	uint64_t	md_provsize;	/* Provider's size. */
261	u_char		md_hash[16];	/* MD5 hash. */
262};
263static __inline void
264mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data)
265{
266	MD5_CTX ctx;
267
268	bcopy(md->md_magic, data, 16);
269	le32enc(data + 16, md->md_version);
270	bcopy(md->md_name, data + 20, 16);
271	le32enc(data + 36, md->md_mid);
272	le32enc(data + 40, md->md_did);
273	*(data + 44) = md->md_all;
274	le32enc(data + 45, md->md_genid);
275	le32enc(data + 49, md->md_syncid);
276	*(data + 53) = md->md_priority;
277	le32enc(data + 54, md->md_slice);
278	*(data + 58) = md->md_balance;
279	le64enc(data + 59, md->md_mediasize);
280	le32enc(data + 67, md->md_sectorsize);
281	le64enc(data + 71, md->md_sync_offset);
282	le64enc(data + 79, md->md_mflags);
283	le64enc(data + 87, md->md_dflags);
284	bcopy(md->md_provider, data + 95, 16);
285	le64enc(data + 111, md->md_provsize);
286	MD5Init(&ctx);
287	MD5Update(&ctx, data, 119);
288	MD5Final(md->md_hash, &ctx);
289	bcopy(md->md_hash, data + 119, 16);
290}
291static __inline int
292mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md)
293{
294	MD5_CTX ctx;
295
296	bcopy(data + 20, md->md_name, 16);
297	md->md_mid = le32dec(data + 36);
298	md->md_did = le32dec(data + 40);
299	md->md_all = *(data + 44);
300	md->md_syncid = le32dec(data + 45);
301	md->md_priority = *(data + 49);
302	md->md_slice = le32dec(data + 50);
303	md->md_balance = *(data + 54);
304	md->md_mediasize = le64dec(data + 55);
305	md->md_sectorsize = le32dec(data + 63);
306	md->md_sync_offset = le64dec(data + 67);
307	md->md_mflags = le64dec(data + 75);
308	md->md_dflags = le64dec(data + 83);
309	bcopy(data + 91, md->md_provider, 16);
310	bcopy(data + 107, md->md_hash, 16);
311	MD5Init(&ctx);
312	MD5Update(&ctx, data, 107);
313	MD5Final(md->md_hash, &ctx);
314	if (bcmp(md->md_hash, data + 107, 16) != 0)
315		return (EINVAL);
316
317	/* New fields. */
318	md->md_genid = 0;
319	md->md_provsize = 0;
320
321	return (0);
322}
323static __inline int
324mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md)
325{
326	MD5_CTX ctx;
327
328	bcopy(data + 20, md->md_name, 16);
329	md->md_mid = le32dec(data + 36);
330	md->md_did = le32dec(data + 40);
331	md->md_all = *(data + 44);
332	md->md_genid = le32dec(data + 45);
333	md->md_syncid = le32dec(data + 49);
334	md->md_priority = *(data + 53);
335	md->md_slice = le32dec(data + 54);
336	md->md_balance = *(data + 58);
337	md->md_mediasize = le64dec(data + 59);
338	md->md_sectorsize = le32dec(data + 67);
339	md->md_sync_offset = le64dec(data + 71);
340	md->md_mflags = le64dec(data + 79);
341	md->md_dflags = le64dec(data + 87);
342	bcopy(data + 95, md->md_provider, 16);
343	bcopy(data + 111, md->md_hash, 16);
344	MD5Init(&ctx);
345	MD5Update(&ctx, data, 111);
346	MD5Final(md->md_hash, &ctx);
347	if (bcmp(md->md_hash, data + 111, 16) != 0)
348		return (EINVAL);
349
350	/* New fields. */
351	md->md_provsize = 0;
352
353	return (0);
354}
355static __inline int
356mirror_metadata_decode_v3v4(const u_char *data, struct g_mirror_metadata *md)
357{
358	MD5_CTX ctx;
359
360	bcopy(data + 20, md->md_name, 16);
361	md->md_mid = le32dec(data + 36);
362	md->md_did = le32dec(data + 40);
363	md->md_all = *(data + 44);
364	md->md_genid = le32dec(data + 45);
365	md->md_syncid = le32dec(data + 49);
366	md->md_priority = *(data + 53);
367	md->md_slice = le32dec(data + 54);
368	md->md_balance = *(data + 58);
369	md->md_mediasize = le64dec(data + 59);
370	md->md_sectorsize = le32dec(data + 67);
371	md->md_sync_offset = le64dec(data + 71);
372	md->md_mflags = le64dec(data + 79);
373	md->md_dflags = le64dec(data + 87);
374	bcopy(data + 95, md->md_provider, 16);
375	md->md_provsize = le64dec(data + 111);
376	bcopy(data + 119, md->md_hash, 16);
377	MD5Init(&ctx);
378	MD5Update(&ctx, data, 119);
379	MD5Final(md->md_hash, &ctx);
380	if (bcmp(md->md_hash, data + 119, 16) != 0)
381		return (EINVAL);
382	return (0);
383}
384static __inline int
385mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md)
386{
387	int error;
388
389	bcopy(data, md->md_magic, 16);
390	md->md_version = le32dec(data + 16);
391	switch (md->md_version) {
392	case 0:
393	case 1:
394		error = mirror_metadata_decode_v0v1(data, md);
395		break;
396	case 2:
397		error = mirror_metadata_decode_v2(data, md);
398		break;
399	case 3:
400	case 4:
401		error = mirror_metadata_decode_v3v4(data, md);
402		break;
403	default:
404		error = EINVAL;
405		break;
406	}
407	return (error);
408}
409
410static __inline const char *
411balance_name(u_int balance)
412{
413	static const char *algorithms[] = {
414		[G_MIRROR_BALANCE_NONE] = "none",
415		[G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
416		[G_MIRROR_BALANCE_LOAD] = "load",
417		[G_MIRROR_BALANCE_SPLIT] = "split",
418		[G_MIRROR_BALANCE_PREFER] = "prefer",
419		[G_MIRROR_BALANCE_MAX + 1] = "unknown"
420	};
421
422	if (balance > G_MIRROR_BALANCE_MAX)
423		balance = G_MIRROR_BALANCE_MAX + 1;
424
425	return (algorithms[balance]);
426}
427
428static __inline int
429balance_id(const char *name)
430{
431	static const char *algorithms[] = {
432		[G_MIRROR_BALANCE_NONE] = "none",
433		[G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
434		[G_MIRROR_BALANCE_LOAD] = "load",
435		[G_MIRROR_BALANCE_SPLIT] = "split",
436		[G_MIRROR_BALANCE_PREFER] = "prefer"
437	};
438	int n;
439
440	for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) {
441		if (strcmp(name, algorithms[n]) == 0)
442			return (n);
443	}
444	return (-1);
445}
446
447static __inline void
448mirror_metadata_dump(const struct g_mirror_metadata *md)
449{
450	static const char hex[] = "0123456789abcdef";
451	char hash[16 * 2 + 1];
452	u_int i;
453
454	printf("     magic: %s\n", md->md_magic);
455	printf("   version: %u\n", (u_int)md->md_version);
456	printf("      name: %s\n", md->md_name);
457	printf("       mid: %u\n", (u_int)md->md_mid);
458	printf("       did: %u\n", (u_int)md->md_did);
459	printf("       all: %u\n", (u_int)md->md_all);
460	printf("     genid: %u\n", (u_int)md->md_genid);
461	printf("    syncid: %u\n", (u_int)md->md_syncid);
462	printf("  priority: %u\n", (u_int)md->md_priority);
463	printf("     slice: %u\n", (u_int)md->md_slice);
464	printf("   balance: %s\n", balance_name((u_int)md->md_balance));
465	printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize);
466	printf("sectorsize: %u\n", (u_int)md->md_sectorsize);
467	printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset);
468	printf("    mflags:");
469	if (md->md_mflags == 0)
470		printf(" NONE");
471	else {
472		if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
473			printf(" NOFAILSYNC");
474		if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0)
475			printf(" NOAUTOSYNC");
476	}
477	printf("\n");
478	printf("    dflags:");
479	if (md->md_dflags == 0)
480		printf(" NONE");
481	else {
482		if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0)
483			printf(" DIRTY");
484		if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0)
485			printf(" SYNCHRONIZING");
486		if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0)
487			printf(" FORCE_SYNC");
488		if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0)
489			printf(" INACTIVE");
490	}
491	printf("\n");
492	printf("hcprovider: %s\n", md->md_provider);
493	printf("  provsize: %ju\n", (uintmax_t)md->md_provsize);
494	bzero(hash, sizeof(hash));
495	for (i = 0; i < 16; i++) {
496		hash[i * 2] = hex[md->md_hash[i] >> 4];
497		hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f];
498	}
499	printf("  MD5 hash: %s\n", hash);
500}
501#endif	/* !_G_MIRROR_H_ */
502