g_mirror.c revision 133448
1/*-
2 * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 133448 2004-08-10 19:53:31Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/bitstring.h>
41#include <vm/uma.h>
42#include <machine/atomic.h>
43#include <geom/geom.h>
44#include <sys/proc.h>
45#include <sys/kthread.h>
46#include <geom/mirror/g_mirror.h>
47
48
49static MALLOC_DEFINE(M_MIRROR, "mirror data", "GEOM_MIRROR Data");
50
51SYSCTL_DECL(_kern_geom);
52SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff");
53u_int g_mirror_debug = 0;
54SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0,
55    "Debug level");
56static u_int g_mirror_sync_block_size = 131072;
57SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_block_size, CTLFLAG_RW,
58    &g_mirror_sync_block_size, 0, "Synchronization block size");
59static u_int g_mirror_timeout = 8;
60SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout,
61    0, "Time to wait on all mirror components");
62static u_int g_mirror_reqs_per_sync = 5;
63SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, reqs_per_sync, CTLFLAG_RW,
64    &g_mirror_reqs_per_sync, 0,
65    "Number of regular I/O requests per synchronization request");
66static u_int g_mirror_syncs_per_sec = 100;
67SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, syncs_per_sec, CTLFLAG_RW,
68    &g_mirror_syncs_per_sec, 0,
69    "Number of synchronizations requests per second");
70
71#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
72	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
73	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
74	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
75} while (0)
76
77
78static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
79    struct g_geom *gp);
80static g_taste_t g_mirror_taste;
81
82struct g_class g_mirror_class = {
83	.name = G_MIRROR_CLASS_NAME,
84	.version = G_VERSION,
85	.ctlreq = g_mirror_config,
86	.taste = g_mirror_taste,
87	.destroy_geom = g_mirror_destroy_geom
88};
89
90
91static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
92static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
93static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force);
94static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
95    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
96static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
97
98
99static const char *
100g_mirror_disk_state2str(int state)
101{
102
103	switch (state) {
104	case G_MIRROR_DISK_STATE_NONE:
105		return ("NONE");
106	case G_MIRROR_DISK_STATE_NEW:
107		return ("NEW");
108	case G_MIRROR_DISK_STATE_ACTIVE:
109		return ("ACTIVE");
110	case G_MIRROR_DISK_STATE_STALE:
111		return ("STALE");
112	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
113		return ("SYNCHRONIZING");
114	case G_MIRROR_DISK_STATE_DISCONNECTED:
115		return ("DISCONNECTED");
116	case G_MIRROR_DISK_STATE_DESTROY:
117		return ("DESTROY");
118	default:
119		return ("INVALID");
120	}
121}
122
123static const char *
124g_mirror_device_state2str(int state)
125{
126
127	switch (state) {
128	case G_MIRROR_DEVICE_STATE_STARTING:
129		return ("STARTING");
130	case G_MIRROR_DEVICE_STATE_RUNNING:
131		return ("RUNNING");
132	default:
133		return ("INVALID");
134	}
135}
136
137static const char *
138g_mirror_get_diskname(struct g_mirror_disk *disk)
139{
140
141	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
142		return ("[unknown]");
143	return (disk->d_name);
144}
145
146/*
147 * --- Events handling functions ---
148 * Events in geom_mirror are used to maintain disks and device status
149 * from one thread to simplify locking.
150 */
151static void
152g_mirror_event_free(struct g_mirror_event *ep)
153{
154
155	free(ep, M_MIRROR);
156}
157
158int
159g_mirror_event_send(void *arg, int state, int flags)
160{
161	struct g_mirror_softc *sc;
162	struct g_mirror_disk *disk;
163	struct g_mirror_event *ep;
164	int error;
165
166	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
167	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
168	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
169		disk = NULL;
170		sc = arg;
171	} else {
172		disk = arg;
173		sc = disk->d_softc;
174	}
175	ep->e_disk = disk;
176	ep->e_state = state;
177	ep->e_flags = flags;
178	ep->e_error = 0;
179	mtx_lock(&sc->sc_events_mtx);
180	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
181	mtx_unlock(&sc->sc_events_mtx);
182	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
183	mtx_lock(&sc->sc_queue_mtx);
184	wakeup(sc);
185	mtx_unlock(&sc->sc_queue_mtx);
186	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
187		return (0);
188	g_topology_assert();
189	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
190	g_topology_unlock();
191	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
192		mtx_lock(&sc->sc_events_mtx);
193		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
194		    hz * 5);
195	}
196	/* Don't even try to use 'sc' here, because it could be already dead. */
197	g_topology_lock();
198	error = ep->e_error;
199	g_mirror_event_free(ep);
200	return (error);
201}
202
203static struct g_mirror_event *
204g_mirror_event_get(struct g_mirror_softc *sc)
205{
206	struct g_mirror_event *ep;
207
208	mtx_lock(&sc->sc_events_mtx);
209	ep = TAILQ_FIRST(&sc->sc_events);
210	if (ep != NULL)
211		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
212	mtx_unlock(&sc->sc_events_mtx);
213	return (ep);
214}
215
216static void
217g_mirror_event_cancel(struct g_mirror_disk *disk)
218{
219	struct g_mirror_softc *sc;
220	struct g_mirror_event *ep, *tmpep;
221
222	g_topology_assert();
223
224	sc = disk->d_softc;
225	mtx_lock(&sc->sc_events_mtx);
226	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
227		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
228			continue;
229		if (ep->e_disk != disk)
230			continue;
231		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
232		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
233			g_mirror_event_free(ep);
234		else {
235			ep->e_error = ECANCELED;
236			wakeup(ep);
237		}
238	}
239	mtx_unlock(&sc->sc_events_mtx);
240}
241
242/*
243 * Return the number of disks in given state.
244 * If state is equal to -1, count all connected disks.
245 */
246u_int
247g_mirror_ndisks(struct g_mirror_softc *sc, int state)
248{
249	struct g_mirror_disk *disk;
250	u_int n = 0;
251
252	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
253		if (state == -1 || disk->d_state == state)
254			n++;
255	}
256	return (n);
257}
258
259/*
260 * Find a disk in mirror by its disk ID.
261 */
262static struct g_mirror_disk *
263g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
264{
265	struct g_mirror_disk *disk;
266
267	g_topology_assert();
268
269	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
270		if (disk->d_id == id)
271			return (disk);
272	}
273	return (NULL);
274}
275
276static u_int
277g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
278{
279	struct bio *bp;
280	u_int nreqs = 0;
281
282	mtx_lock(&sc->sc_queue_mtx);
283	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
284		if (bp->bio_from == cp)
285			nreqs++;
286	}
287	mtx_unlock(&sc->sc_queue_mtx);
288	return (nreqs);
289}
290
291static void
292g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
293{
294
295	g_topology_assert();
296
297	cp->private = NULL;
298	if (cp->nstart != cp->nend) {
299		G_MIRROR_DEBUG(2,
300		    "I/O requests for %s exist, can't destroy it now.",
301		    cp->provider->name);
302		return;
303	}
304	if (g_mirror_nrequests(sc, cp) > 0) {
305		G_MIRROR_DEBUG(2,
306		    "I/O requests for %s in queue, can't destroy it now.",
307		    cp->provider->name);
308		return;
309	}
310	G_MIRROR_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
311	g_detach(cp);
312	g_destroy_consumer(cp);
313}
314
315static int
316g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
317{
318	int error;
319
320	g_topology_assert();
321	KASSERT(disk->d_consumer == NULL,
322	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
323
324	disk->d_consumer = g_new_consumer(disk->d_softc->sc_geom);
325	disk->d_consumer->private = disk;
326	error = g_attach(disk->d_consumer, pp);
327	if (error != 0)
328		return (error);
329	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
330	return (0);
331}
332
333static void
334g_mirror_disconnect_disk(struct g_mirror_disk *disk)
335{
336	struct g_consumer *cp;
337
338	g_topology_assert();
339
340	cp = disk->d_consumer;
341	if (cp == NULL)
342		return;
343	if (cp->provider != NULL) {
344		G_MIRROR_DEBUG(2, "Disk %s disconnected.",
345		    g_mirror_get_diskname(disk));
346		if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) {
347			G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
348			    cp->provider->name, -cp->acr, -cp->acw, -cp->ace,
349			    0);
350			g_access(cp, -cp->acr, -cp->acw, -cp->ace);
351		}
352		g_mirror_kill_consumer(disk->d_softc, cp);
353	} else {
354		g_destroy_consumer(cp);
355	}
356}
357
358/*
359 * Initialize disk. This means allocate memory, create consumer, attach it
360 * to the provider and open access (r1w1e1) to it.
361 */
362static struct g_mirror_disk *
363g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
364    struct g_mirror_metadata *md, int *errorp)
365{
366	struct g_mirror_disk *disk;
367	int error;
368
369	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
370	if (disk == NULL) {
371		error = ENOMEM;
372		goto fail;
373	}
374	disk->d_softc = sc;
375	error = g_mirror_connect_disk(disk, pp);
376	if (error != 0)
377		goto fail;
378	disk->d_id = md->md_did;
379	disk->d_state = G_MIRROR_DISK_STATE_NONE;
380	disk->d_priority = md->md_priority;
381	disk->d_delay.sec = 0;
382	disk->d_delay.frac = 0;
383	binuptime(&disk->d_last_used);
384	disk->d_flags = md->md_dflags;
385	if (md->md_provider[0] != '\0')
386		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
387	disk->d_sync.ds_consumer = NULL;
388	disk->d_sync.ds_offset = md->md_sync_offset;
389	disk->d_sync.ds_offset_done = md->md_sync_offset;
390	disk->d_sync.ds_syncid = md->md_syncid;
391	if (errorp != NULL)
392		*errorp = 0;
393	return (disk);
394fail:
395	if (errorp != NULL)
396		*errorp = error;
397	if (disk != NULL) {
398		g_mirror_disconnect_disk(disk);
399		free(disk, M_MIRROR);
400	}
401	return (NULL);
402}
403
404static void
405g_mirror_destroy_disk(struct g_mirror_disk *disk)
406{
407	struct g_mirror_softc *sc;
408
409	g_topology_assert();
410
411	LIST_REMOVE(disk, d_next);
412	g_mirror_event_cancel(disk);
413	sc = disk->d_softc;
414	if (sc->sc_hint == disk)
415		sc->sc_hint = NULL;
416	switch (disk->d_state) {
417	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
418		g_mirror_sync_stop(disk, 1);
419		/* FALLTHROUGH */
420	case G_MIRROR_DISK_STATE_NEW:
421	case G_MIRROR_DISK_STATE_STALE:
422	case G_MIRROR_DISK_STATE_ACTIVE:
423		g_mirror_disconnect_disk(disk);
424		free(disk, M_MIRROR);
425		break;
426	default:
427		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
428		    g_mirror_get_diskname(disk),
429		    g_mirror_disk_state2str(disk->d_state)));
430	}
431}
432
433static void
434g_mirror_destroy_device(struct g_mirror_softc *sc)
435{
436	struct g_mirror_disk *disk;
437	struct g_mirror_event *ep;
438	struct g_geom *gp;
439	struct g_consumer *cp;
440
441	g_topology_assert();
442
443	gp = sc->sc_geom;
444	if (sc->sc_provider != NULL)
445		g_mirror_destroy_provider(sc);
446	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
447	    disk = LIST_FIRST(&sc->sc_disks)) {
448		g_mirror_destroy_disk(disk);
449	}
450	while ((ep = g_mirror_event_get(sc)) != NULL) {
451		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
452			g_mirror_event_free(ep);
453		else {
454			ep->e_error = ECANCELED;
455			ep->e_flags |= G_MIRROR_EVENT_DONE;
456			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
457			mtx_lock(&sc->sc_events_mtx);
458			wakeup(ep);
459			mtx_unlock(&sc->sc_events_mtx);
460		}
461	}
462	callout_drain(&sc->sc_callout);
463	gp->softc = NULL;
464	uma_zdestroy(sc->sc_sync.ds_zone);
465	while ((cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer)) != NULL) {
466		if (cp->provider != NULL) {
467			if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
468				g_access(cp, -cp->acr, -cp->acw, -cp->ace);
469			g_detach(cp);
470		}
471		g_destroy_consumer(cp);
472	}
473	sc->sc_sync.ds_geom->softc = NULL;
474	g_destroy_geom(sc->sc_sync.ds_geom);
475	mtx_destroy(&sc->sc_queue_mtx);
476	mtx_destroy(&sc->sc_events_mtx);
477	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
478	g_wither_geom(gp, ENXIO);
479}
480
481static void
482g_mirror_orphan(struct g_consumer *cp)
483{
484	struct g_mirror_disk *disk;
485
486	g_topology_assert();
487
488	disk = cp->private;
489	if (disk == NULL)
490		return;
491	disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
492	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
493	    G_MIRROR_EVENT_DONTWAIT);
494}
495
496static void
497g_mirror_spoiled(struct g_consumer *cp)
498{
499	struct g_mirror_disk *disk;
500
501	g_topology_assert();
502
503	disk = cp->private;
504	if (disk == NULL)
505		return;
506	disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
507	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
508	    G_MIRROR_EVENT_DONTWAIT);
509}
510
511/*
512 * Function should return the next active disk on the list.
513 * It is possible that it will be the same disk as given.
514 * If there are no active disks on list, NULL is returned.
515 */
516static __inline struct g_mirror_disk *
517g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
518{
519	struct g_mirror_disk *dp;
520
521	for (dp = LIST_NEXT(disk, d_next); dp != disk;
522	    dp = LIST_NEXT(dp, d_next)) {
523		if (dp == NULL)
524			dp = LIST_FIRST(&sc->sc_disks);
525		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
526			break;
527	}
528	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
529		return (NULL);
530	return (dp);
531}
532
533static struct g_mirror_disk *
534g_mirror_get_disk(struct g_mirror_softc *sc)
535{
536	struct g_mirror_disk *disk;
537
538	if (sc->sc_hint == NULL) {
539		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
540		if (sc->sc_hint == NULL)
541			return (NULL);
542	}
543	disk = sc->sc_hint;
544	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
545		disk = g_mirror_find_next(sc, disk);
546		if (disk == NULL)
547			return (NULL);
548	}
549	sc->sc_hint = g_mirror_find_next(sc, disk);
550	return (disk);
551}
552
553static int
554g_mirror_clear_metadata(struct g_mirror_disk *disk)
555{
556	struct g_mirror_softc *sc;
557	struct g_consumer *cp;
558	off_t offset, length;
559	u_char *sector;
560	int close = 0, error = 0;
561
562	g_topology_assert();
563
564	sc = disk->d_softc;
565	cp = disk->d_consumer;
566	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
567	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
568	length = cp->provider->sectorsize;
569	offset = cp->provider->mediasize - length;
570	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
571	/*
572	 * Open consumer if it wasn't opened and remember to close it.
573	 */
574	if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
575		error = g_access(cp, 0, 1, 1);
576		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
577		    cp->provider->name, 0, 1, 1, error);
578		if (error == 0)
579			close = 1;
580#ifdef	INVARIANTS
581	} else {
582		KASSERT(cp->acw > 0 && cp->ace > 0,
583		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
584		    cp->acr, cp->acw, cp->ace));
585#endif
586	}
587	if (error == 0) {
588		g_topology_unlock();
589		error = g_write_data(cp, offset, sector, length);
590		g_topology_lock();
591	}
592	free(sector, M_MIRROR);
593	if (close) {
594		g_access(cp, 0, -1, -1);
595		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
596		    cp->provider->name, 0, -1, -1, 0);
597	}
598	if (error != 0) {
599		G_MIRROR_DEBUG(0, "Cannot clear metadata on disk %s.",
600		    g_mirror_get_diskname(disk));
601		disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
602		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
603		    G_MIRROR_EVENT_DONTWAIT);
604		return (error);
605	}
606	G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
607	    g_mirror_get_diskname(disk));
608	return (0);
609}
610
611void
612g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
613    struct g_mirror_metadata *md)
614{
615
616	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
617	md->md_version = G_MIRROR_VERSION;
618	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
619	md->md_mid = sc->sc_id;
620	md->md_all = sc->sc_ndisks;
621	md->md_slice = sc->sc_slice;
622	md->md_balance = sc->sc_balance;
623	md->md_mediasize = sc->sc_mediasize;
624	md->md_sectorsize = sc->sc_sectorsize;
625	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
626	bzero(md->md_provider, sizeof(md->md_provider));
627	if (disk == NULL) {
628		md->md_did = arc4random();
629		md->md_priority = 0;
630		md->md_syncid = 0;
631		md->md_dflags = 0;
632		md->md_sync_offset = 0;
633	} else {
634		md->md_did = disk->d_id;
635		md->md_priority = disk->d_priority;
636		md->md_syncid = disk->d_sync.ds_syncid;
637		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
638		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
639			md->md_sync_offset = disk->d_sync.ds_offset_done;
640		else
641			md->md_sync_offset = 0;
642		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
643			strlcpy(md->md_provider,
644			    disk->d_consumer->provider->name,
645			    sizeof(md->md_provider));
646		}
647	}
648}
649
650void
651g_mirror_update_metadata(struct g_mirror_disk *disk)
652{
653	struct g_mirror_softc *sc;
654	struct g_mirror_metadata md;
655	struct g_consumer *cp;
656	off_t offset, length;
657	u_char *sector;
658	int close = 0, error = 0;
659
660	g_topology_assert();
661
662	sc = disk->d_softc;
663	cp = disk->d_consumer;
664	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
665	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
666	length = cp->provider->sectorsize;
667	offset = cp->provider->mediasize - length;
668	sector = malloc((size_t)length, M_MIRROR, M_WAITOK);
669	/*
670	 * Open consumer if it wasn't opened and remember to close it.
671	 */
672	if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
673		error = g_access(cp, 0, 1, 1);
674		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
675		    cp->provider->name, 0, 1, 1, error);
676		if (error == 0)
677			close = 1;
678#ifdef	INVARIANTS
679	} else {
680		KASSERT(cp->acw > 0 && cp->ace > 0,
681		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
682		    cp->acr, cp->acw, cp->ace));
683#endif
684	}
685	if (error == 0) {
686		g_mirror_fill_metadata(sc, disk, &md);
687		mirror_metadata_encode(&md, sector);
688		g_topology_unlock();
689		error = g_write_data(cp, offset, sector, length);
690		g_topology_lock();
691	}
692	free(sector, M_MIRROR);
693	if (close) {
694		g_access(cp, 0, -1, -1);
695		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
696		    cp->provider->name, 0, -1, -1, 0);
697	}
698	if (error != 0) {
699		G_MIRROR_DEBUG(0,
700		    "Cannot update metadata on disk %s (error=%d).",
701		    g_mirror_get_diskname(disk), error);
702		disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
703		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
704		    G_MIRROR_EVENT_DONTWAIT);
705		return;
706	}
707	G_MIRROR_DEBUG(2, "Metadata on %s updated.",
708	    g_mirror_get_diskname(disk));
709}
710
711static void
712g_mirror_bump_syncid(struct g_mirror_softc *sc)
713{
714	struct g_mirror_disk *disk;
715
716	g_topology_assert();
717	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
718	    ("%s called with no active disks (device=%s).", __func__,
719	    sc->sc_name));
720
721	sc->sc_syncid++;
722	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
723	    sc->sc_syncid);
724	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
725		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
726		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
727			disk->d_sync.ds_syncid = sc->sc_syncid;
728			g_mirror_update_metadata(disk);
729		}
730	}
731}
732
733static __inline int
734bintime_cmp(struct bintime *bt1, struct bintime *bt2)
735{
736
737	if (bt1->sec < bt2->sec)
738		return (-1);
739	else if (bt1->sec > bt2->sec)
740		return (1);
741	if (bt1->frac < bt2->frac)
742		return (-1);
743	else if (bt1->frac > bt2->frac)
744		return (1);
745	return (0);
746}
747
748static void
749g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp)
750{
751
752	if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD)
753		return;
754	binuptime(&disk->d_delay);
755	bintime_sub(&disk->d_delay, &bp->bio_t0);
756}
757
758static void
759g_mirror_done(struct bio *bp)
760{
761	struct g_mirror_softc *sc;
762
763	sc = bp->bio_from->geom->softc;
764	bp->bio_cflags |= G_MIRROR_BIO_FLAG_REGULAR;
765	mtx_lock(&sc->sc_queue_mtx);
766	bioq_disksort(&sc->sc_queue, bp);
767	wakeup(sc);
768	mtx_unlock(&sc->sc_queue_mtx);
769}
770
771static void
772g_mirror_regular_request(struct bio *bp)
773{
774	struct g_mirror_softc *sc;
775	struct g_mirror_disk *disk;
776	struct bio *pbp;
777
778	g_topology_assert_not();
779
780	pbp = bp->bio_parent;
781	sc = pbp->bio_to->geom->softc;
782	disk = bp->bio_from->private;
783	if (disk == NULL) {
784		g_topology_lock();
785		g_mirror_kill_consumer(sc, bp->bio_from);
786		g_topology_unlock();
787	} else {
788		g_mirror_update_delay(disk, bp);
789	}
790
791	pbp->bio_inbed++;
792	KASSERT(pbp->bio_inbed <= pbp->bio_children,
793	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
794	    pbp->bio_children));
795	if (bp->bio_error == 0 && pbp->bio_error == 0) {
796		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
797		g_destroy_bio(bp);
798		if (pbp->bio_children == pbp->bio_inbed) {
799			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
800			pbp->bio_completed = pbp->bio_length;
801			g_io_deliver(pbp, pbp->bio_error);
802		}
803		return;
804	} else if (bp->bio_error != 0) {
805		if (pbp->bio_error == 0)
806			pbp->bio_error = bp->bio_error;
807		G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
808		    bp->bio_error);
809		if (disk != NULL) {
810			sc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
811			g_mirror_event_send(disk,
812			    G_MIRROR_DISK_STATE_DISCONNECTED,
813			    G_MIRROR_EVENT_DONTWAIT);
814		}
815		switch (pbp->bio_cmd) {
816		case BIO_DELETE:
817		case BIO_WRITE:
818			pbp->bio_inbed--;
819			pbp->bio_children--;
820			break;
821		}
822	}
823	g_destroy_bio(bp);
824
825	switch (pbp->bio_cmd) {
826	case BIO_READ:
827		if (pbp->bio_children == pbp->bio_inbed) {
828			pbp->bio_error = 0;
829			mtx_lock(&sc->sc_queue_mtx);
830			bioq_disksort(&sc->sc_queue, pbp);
831			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
832			wakeup(sc);
833			mtx_unlock(&sc->sc_queue_mtx);
834		}
835		break;
836	case BIO_DELETE:
837	case BIO_WRITE:
838		if (pbp->bio_children == 0) {
839			/*
840			 * All requests failed.
841			 */
842		} else if (pbp->bio_inbed < pbp->bio_children) {
843			/* Do nothing. */
844			break;
845		} else if (pbp->bio_children == pbp->bio_inbed) {
846			/* Some requests succeeded. */
847			pbp->bio_error = 0;
848			pbp->bio_completed = pbp->bio_length;
849		}
850		g_io_deliver(pbp, pbp->bio_error);
851		break;
852	default:
853		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
854		break;
855	}
856}
857
858static void
859g_mirror_sync_done(struct bio *bp)
860{
861	struct g_mirror_softc *sc;
862
863	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
864	sc = bp->bio_from->geom->softc;
865	bp->bio_cflags |= G_MIRROR_BIO_FLAG_SYNC;
866	mtx_lock(&sc->sc_queue_mtx);
867	bioq_disksort(&sc->sc_queue, bp);
868	wakeup(sc);
869	mtx_unlock(&sc->sc_queue_mtx);
870}
871
872static void
873g_mirror_start(struct bio *bp)
874{
875	struct g_mirror_softc *sc;
876
877	sc = bp->bio_to->geom->softc;
878	/*
879	 * If sc == NULL or there are no valid disks, provider's error
880	 * should be set and g_mirror_start() should not be called at all.
881	 */
882	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
883	    ("Provider's error should be set (error=%d)(mirror=%s).",
884	    bp->bio_to->error, bp->bio_to->name));
885	G_MIRROR_LOGREQ(3, bp, "Request received.");
886
887	switch (bp->bio_cmd) {
888	case BIO_READ:
889	case BIO_WRITE:
890	case BIO_DELETE:
891		break;
892	case BIO_GETATTR:
893	default:
894		g_io_deliver(bp, EOPNOTSUPP);
895		return;
896	}
897	mtx_lock(&sc->sc_queue_mtx);
898	bioq_disksort(&sc->sc_queue, bp);
899	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
900	wakeup(sc);
901	mtx_unlock(&sc->sc_queue_mtx);
902}
903
904/*
905 * Send one synchronization request.
906 */
907static void
908g_mirror_sync_one(struct g_mirror_disk *disk)
909{
910	struct g_mirror_softc *sc;
911	struct bio *bp;
912
913	sc = disk->d_softc;
914	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
915	    ("Disk %s is not marked for synchronization.",
916	    g_mirror_get_diskname(disk)));
917
918	bp = g_new_bio();
919	if (bp == NULL)
920		return;
921	bp->bio_parent = NULL;
922	bp->bio_cmd = BIO_READ;
923	bp->bio_offset = disk->d_sync.ds_offset;
924	bp->bio_length = MIN(sc->sc_sync.ds_block,
925	    sc->sc_mediasize - bp->bio_offset);
926	bp->bio_cflags = 0;
927	bp->bio_done = g_mirror_sync_done;
928	bp->bio_data = uma_zalloc(sc->sc_sync.ds_zone, M_NOWAIT | M_ZERO);
929	if (bp->bio_data == NULL) {
930		g_destroy_bio(bp);
931		return;
932	}
933	disk->d_sync.ds_offset += bp->bio_length;
934	bp->bio_to = sc->sc_provider;
935	G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
936	g_io_request(bp, disk->d_sync.ds_consumer);
937}
938
939static void
940g_mirror_sync_request(struct bio *bp)
941{
942	struct g_mirror_softc *sc;
943	struct g_mirror_disk *disk;
944
945	sc = bp->bio_from->geom->softc;
946	disk = bp->bio_from->private;
947	if (disk == NULL) {
948		g_topology_lock();
949		g_mirror_kill_consumer(sc, bp->bio_from);
950		g_topology_unlock();
951		uma_zfree(sc->sc_sync.ds_zone, bp->bio_data);
952		g_destroy_bio(bp);
953		return;
954	}
955
956	/*
957	 * Synchronization request.
958	 */
959	switch (bp->bio_cmd) {
960	case BIO_READ:
961	    {
962		struct g_consumer *cp;
963
964		if (bp->bio_error != 0) {
965			G_MIRROR_LOGREQ(0, bp,
966			    "Synchronization request failed (error=%d).",
967			    bp->bio_error);
968			uma_zfree(sc->sc_sync.ds_zone, bp->bio_data);
969			g_destroy_bio(bp);
970			return;
971		}
972		bp->bio_cmd = BIO_WRITE;
973		bp->bio_cflags = 0;
974		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
975		cp = disk->d_consumer;
976		KASSERT(cp->acr == 0 && cp->acw == 1 && cp->ace == 1,
977		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
978		    cp->acr, cp->acw, cp->ace));
979		g_io_request(bp, cp);
980		return;
981	    }
982	case BIO_WRITE:
983		uma_zfree(sc->sc_sync.ds_zone, bp->bio_data);
984		if (bp->bio_error != 0) {
985			G_MIRROR_LOGREQ(0, bp,
986			    "Synchronization request failed (error=%d).",
987			    bp->bio_error);
988			g_destroy_bio(bp);
989			sc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
990			g_mirror_event_send(disk,
991			    G_MIRROR_DISK_STATE_DISCONNECTED,
992			    G_MIRROR_EVENT_DONTWAIT);
993			return;
994		}
995		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
996		disk->d_sync.ds_offset_done = bp->bio_offset + bp->bio_length;
997		g_destroy_bio(bp);
998		if (disk->d_sync.ds_offset_done == sc->sc_provider->mediasize) {
999			/*
1000			 * Disk up-to-date, activate it.
1001			 */
1002			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1003			    G_MIRROR_EVENT_DONTWAIT);
1004			return;
1005		} else if ((disk->d_sync.ds_offset_done %
1006		    (sc->sc_sync.ds_block * 100)) == 0) {
1007			/*
1008			 * Update offset_done on every 100 blocks.
1009			 * XXX: This should be configurable.
1010			 */
1011			g_topology_lock();
1012			g_mirror_update_metadata(disk);
1013			g_topology_unlock();
1014		}
1015		return;
1016	default:
1017		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1018		    bp->bio_cmd, sc->sc_name));
1019		break;
1020	}
1021}
1022
1023static void
1024g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1025{
1026	struct g_mirror_disk *disk;
1027	struct g_consumer *cp;
1028	struct bio *cbp;
1029
1030	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1031		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1032			break;
1033	}
1034	if (disk == NULL) {
1035		if (bp->bio_error == 0)
1036			bp->bio_error = ENXIO;
1037		g_io_deliver(bp, bp->bio_error);
1038		return;
1039	}
1040	cbp = g_clone_bio(bp);
1041	if (cbp == NULL) {
1042		if (bp->bio_error == 0)
1043			bp->bio_error = ENOMEM;
1044		g_io_deliver(bp, bp->bio_error);
1045		return;
1046	}
1047	/*
1048	 * Fill in the component buf structure.
1049	 */
1050	cp = disk->d_consumer;
1051	cbp->bio_done = g_mirror_done;
1052	cbp->bio_to = cp->provider;
1053	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1054	KASSERT(cp->acr > 0 && cp->ace > 0,
1055	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1056	    cp->acw, cp->ace));
1057	g_io_request(cbp, cp);
1058}
1059
1060static void
1061g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1062{
1063	struct g_mirror_disk *disk;
1064	struct g_consumer *cp;
1065	struct bio *cbp;
1066
1067	disk = g_mirror_get_disk(sc);
1068	if (disk == NULL) {
1069		if (bp->bio_error == 0)
1070			bp->bio_error = ENXIO;
1071		g_io_deliver(bp, bp->bio_error);
1072		return;
1073	}
1074	cbp = g_clone_bio(bp);
1075	if (cbp == NULL) {
1076		if (bp->bio_error == 0)
1077			bp->bio_error = ENOMEM;
1078		g_io_deliver(bp, bp->bio_error);
1079		return;
1080	}
1081	/*
1082	 * Fill in the component buf structure.
1083	 */
1084	cp = disk->d_consumer;
1085	cbp->bio_done = g_mirror_done;
1086	cbp->bio_to = cp->provider;
1087	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1088	KASSERT(cp->acr > 0 && cp->ace > 0,
1089	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1090	    cp->acw, cp->ace));
1091	g_io_request(cbp, cp);
1092}
1093
1094static void
1095g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1096{
1097	struct g_mirror_disk *disk, *dp;
1098	struct g_consumer *cp;
1099	struct bio *cbp;
1100	struct bintime curtime;
1101
1102	binuptime(&curtime);
1103	/*
1104	 * Find a disk which the smallest load.
1105	 */
1106	disk = NULL;
1107	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1108		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1109			continue;
1110		/* If disk wasn't used for more than 2 sec, use it. */
1111		if (curtime.sec - dp->d_last_used.sec >= 2) {
1112			disk = dp;
1113			break;
1114		}
1115		if (disk == NULL ||
1116		    bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) {
1117			disk = dp;
1118		}
1119	}
1120	cbp = g_clone_bio(bp);
1121	if (cbp == NULL) {
1122		if (bp->bio_error == 0)
1123			bp->bio_error = ENOMEM;
1124		g_io_deliver(bp, bp->bio_error);
1125		return;
1126	}
1127	/*
1128	 * Fill in the component buf structure.
1129	 */
1130	cp = disk->d_consumer;
1131	cbp->bio_done = g_mirror_done;
1132	cbp->bio_to = cp->provider;
1133	binuptime(&disk->d_last_used);
1134	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1135	KASSERT(cp->acr > 0 && cp->ace > 0,
1136	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1137	    cp->acw, cp->ace));
1138	g_io_request(cbp, cp);
1139}
1140
1141static void
1142g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1143{
1144	struct bio_queue_head queue;
1145	struct g_mirror_disk *disk;
1146	struct g_consumer *cp;
1147	struct bio *cbp;
1148	off_t left, mod, offset, slice;
1149	u_char *data;
1150	u_int ndisks;
1151
1152	if (bp->bio_length <= sc->sc_slice) {
1153		g_mirror_request_round_robin(sc, bp);
1154		return;
1155	}
1156	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1157	slice = bp->bio_length / ndisks;
1158	mod = slice % sc->sc_provider->sectorsize;
1159	if (mod != 0)
1160		slice += sc->sc_provider->sectorsize - mod;
1161	/*
1162	 * Allocate all bios before sending any request, so we can
1163	 * return ENOMEM in nice and clean way.
1164	 */
1165	left = bp->bio_length;
1166	offset = bp->bio_offset;
1167	data = bp->bio_data;
1168	bioq_init(&queue);
1169	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1170		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1171			continue;
1172		cbp = g_clone_bio(bp);
1173		if (cbp == NULL) {
1174			for (cbp = bioq_first(&queue); cbp != NULL;
1175			    cbp = bioq_first(&queue)) {
1176				bioq_remove(&queue, cbp);
1177				g_destroy_bio(cbp);
1178			}
1179			if (bp->bio_error == 0)
1180				bp->bio_error = ENOMEM;
1181			g_io_deliver(bp, bp->bio_error);
1182			return;
1183		}
1184		bioq_insert_tail(&queue, cbp);
1185		cbp->bio_done = g_mirror_done;
1186		cbp->bio_caller1 = disk;
1187		cbp->bio_to = disk->d_consumer->provider;
1188		cbp->bio_offset = offset;
1189		cbp->bio_data = data;
1190		cbp->bio_length = MIN(left, slice);
1191		left -= cbp->bio_length;
1192		if (left == 0)
1193			break;
1194		offset += cbp->bio_length;
1195		data += cbp->bio_length;
1196	}
1197	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1198		bioq_remove(&queue, cbp);
1199		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1200		disk = cbp->bio_caller1;
1201		cbp->bio_caller1 = NULL;
1202		cp = disk->d_consumer;
1203		KASSERT(cp->acr > 0 && cp->ace > 0,
1204		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1205		    cp->acr, cp->acw, cp->ace));
1206		g_io_request(cbp, disk->d_consumer);
1207	}
1208}
1209
1210static void
1211g_mirror_register_request(struct bio *bp)
1212{
1213	struct g_mirror_softc *sc;
1214
1215	sc = bp->bio_to->geom->softc;
1216	switch (bp->bio_cmd) {
1217	case BIO_READ:
1218		switch (sc->sc_balance) {
1219		case G_MIRROR_BALANCE_LOAD:
1220			g_mirror_request_load(sc, bp);
1221			break;
1222		case G_MIRROR_BALANCE_PREFER:
1223			g_mirror_request_prefer(sc, bp);
1224			break;
1225		case G_MIRROR_BALANCE_ROUND_ROBIN:
1226			g_mirror_request_round_robin(sc, bp);
1227			break;
1228		case G_MIRROR_BALANCE_SPLIT:
1229			g_mirror_request_split(sc, bp);
1230			break;
1231		}
1232		return;
1233	case BIO_WRITE:
1234	case BIO_DELETE:
1235	    {
1236		struct g_mirror_disk *disk;
1237		struct bio_queue_head queue;
1238		struct g_consumer *cp;
1239		struct bio *cbp;
1240
1241		/*
1242		 * Allocate all bios before sending any request, so we can
1243		 * return ENOMEM in nice and clean way.
1244		 */
1245		bioq_init(&queue);
1246		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1247			switch (disk->d_state) {
1248			case G_MIRROR_DISK_STATE_ACTIVE:
1249				break;
1250			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1251				if (bp->bio_offset >= disk->d_sync.ds_offset)
1252					continue;
1253				break;
1254			default:
1255				continue;
1256			}
1257			cbp = g_clone_bio(bp);
1258			if (cbp == NULL) {
1259				for (cbp = bioq_first(&queue); cbp != NULL;
1260				    cbp = bioq_first(&queue)) {
1261					bioq_remove(&queue, cbp);
1262					g_destroy_bio(cbp);
1263				}
1264				if (bp->bio_error == 0)
1265					bp->bio_error = ENOMEM;
1266				g_io_deliver(bp, bp->bio_error);
1267				return;
1268			}
1269			bioq_insert_tail(&queue, cbp);
1270		}
1271		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1272			switch (disk->d_state) {
1273			case G_MIRROR_DISK_STATE_ACTIVE:
1274				break;
1275			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1276				if (bp->bio_offset >= disk->d_sync.ds_offset)
1277					continue;
1278				break;
1279			default:
1280				continue;
1281			}
1282			cbp = bioq_first(&queue);
1283			KASSERT(cbp != NULL, ("NULL cbp! (device %s).",
1284			    sc->sc_name));
1285			bioq_remove(&queue, cbp);
1286			cp = disk->d_consumer;
1287			cbp->bio_done = g_mirror_done;
1288			cbp->bio_to = cp->provider;
1289			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1290			KASSERT(cp->acw > 0 && cp->ace > 0,
1291			    ("Consumer %s not opened (r%dw%de%d).",
1292			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1293			g_io_request(cbp, cp);
1294		}
1295		/*
1296		 * Bump syncid on first write.
1297		 */
1298		if (sc->sc_bump_syncid == G_MIRROR_BUMP_ON_FIRST_WRITE) {
1299			sc->sc_bump_syncid = 0;
1300			g_topology_lock();
1301			g_mirror_bump_syncid(sc);
1302			g_topology_unlock();
1303		}
1304		return;
1305	    }
1306	default:
1307		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1308		    bp->bio_cmd, sc->sc_name));
1309		break;
1310	}
1311}
1312
1313/*
1314 * Worker thread.
1315 */
1316static void
1317g_mirror_worker(void *arg)
1318{
1319	struct g_mirror_softc *sc;
1320	struct g_mirror_disk *disk;
1321	struct g_mirror_event *ep;
1322	struct bio *bp;
1323	u_int nreqs;
1324
1325	sc = arg;
1326	curthread->td_base_pri = PRIBIO;
1327
1328	nreqs = 0;
1329	for (;;) {
1330		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1331		/*
1332		 * First take a look at events.
1333		 * This is important to handle events before any I/O requests.
1334		 */
1335		ep = g_mirror_event_get(sc);
1336		if (ep != NULL) {
1337			g_topology_lock();
1338			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1339				/* Update only device status. */
1340				G_MIRROR_DEBUG(3,
1341				    "Running event for device %s.",
1342				    sc->sc_name);
1343				ep->e_error = 0;
1344				g_mirror_update_device(sc, 1);
1345			} else {
1346				/* Update disk status. */
1347				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1348				     g_mirror_get_diskname(ep->e_disk));
1349				ep->e_error = g_mirror_update_disk(ep->e_disk,
1350				    ep->e_state);
1351				if (ep->e_error == 0)
1352					g_mirror_update_device(sc, 0);
1353			}
1354			g_topology_unlock();
1355			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1356				KASSERT(ep->e_error == 0,
1357				    ("Error cannot be handled."));
1358				g_mirror_event_free(ep);
1359			} else {
1360				ep->e_flags |= G_MIRROR_EVENT_DONE;
1361				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1362				    ep);
1363				mtx_lock(&sc->sc_events_mtx);
1364				wakeup(ep);
1365				mtx_unlock(&sc->sc_events_mtx);
1366			}
1367			if ((sc->sc_flags &
1368			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1369end:
1370				if ((sc->sc_flags &
1371				    G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1372					G_MIRROR_DEBUG(4, "%s: Waking up %p.",
1373					    __func__, &sc->sc_worker);
1374					wakeup(&sc->sc_worker);
1375					sc->sc_worker = NULL;
1376				} else {
1377					g_topology_lock();
1378					g_mirror_destroy_device(sc);
1379					g_topology_unlock();
1380					free(sc, M_MIRROR);
1381				}
1382				kthread_exit(0);
1383			}
1384			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1385			continue;
1386		}
1387		/*
1388		 * Now I/O requests.
1389		 */
1390		/* Get first request from the queue. */
1391		mtx_lock(&sc->sc_queue_mtx);
1392		bp = bioq_first(&sc->sc_queue);
1393		if (bp == NULL) {
1394			if ((sc->sc_flags &
1395			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1396				mtx_unlock(&sc->sc_queue_mtx);
1397				goto end;
1398			}
1399		}
1400		if (sc->sc_sync.ds_ndisks > 0 &&
1401		    (bp == NULL || nreqs > g_mirror_reqs_per_sync)) {
1402			mtx_unlock(&sc->sc_queue_mtx);
1403			/*
1404			 * It is time for synchronization...
1405			 */
1406			nreqs = 0;
1407			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1408				if (disk->d_state !=
1409				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
1410					continue;
1411				}
1412				if (disk->d_sync.ds_offset >=
1413				    sc->sc_provider->mediasize) {
1414					continue;
1415				}
1416				if (disk->d_sync.ds_offset >
1417				    disk->d_sync.ds_offset_done) {
1418					continue;
1419				}
1420				g_mirror_sync_one(disk);
1421			}
1422			G_MIRROR_DEBUG(5, "%s: I'm here 2.", __func__);
1423			goto sleep;
1424		}
1425		if (bp == NULL) {
1426			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 0);
1427			G_MIRROR_DEBUG(5, "%s: I'm here 3.", __func__);
1428			continue;
1429		}
1430		nreqs++;
1431		bioq_remove(&sc->sc_queue, bp);
1432		mtx_unlock(&sc->sc_queue_mtx);
1433
1434		if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) {
1435			g_mirror_regular_request(bp);
1436		} else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1437			u_int timeout, sps;
1438
1439			g_mirror_sync_request(bp);
1440sleep:
1441			sps = atomic_load_acq_int(&g_mirror_syncs_per_sec);
1442			if (sps == 0) {
1443				G_MIRROR_DEBUG(5, "%s: I'm here 5.", __func__);
1444				continue;
1445			}
1446			mtx_lock(&sc->sc_queue_mtx);
1447			if (bioq_first(&sc->sc_queue) != NULL) {
1448				mtx_unlock(&sc->sc_queue_mtx);
1449				G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1450				continue;
1451			}
1452			timeout = hz / sps;
1453			if (timeout == 0)
1454				timeout = 1;
1455			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w2",
1456			    timeout);
1457		} else {
1458			g_mirror_register_request(bp);
1459		}
1460		G_MIRROR_DEBUG(5, "%s: I'm here 6.", __func__);
1461	}
1462}
1463
1464/*
1465 * Open disk's consumer if needed.
1466 */
1467static void
1468g_mirror_update_access(struct g_mirror_disk *disk)
1469{
1470	struct g_provider *pp;
1471	struct g_consumer *cp;
1472	int acr, acw, ace, cpw, error;
1473
1474	g_topology_assert();
1475
1476	cp = disk->d_consumer;
1477	pp = disk->d_softc->sc_provider;
1478	if (pp == NULL) {
1479		acr = -cp->acr;
1480		acw = -cp->acw;
1481		ace = -cp->ace;
1482	} else {
1483		acr = pp->acr - cp->acr;
1484		acw = pp->acw - cp->acw;
1485		ace = pp->ace - cp->ace;
1486		/* Grab an extra "exclusive" bit. */
1487		if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
1488			ace++;
1489	}
1490	if (acr == 0 && acw == 0 && ace == 0)
1491		return;
1492	cpw = cp->acw;
1493	error = g_access(cp, acr, acw, ace);
1494	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", cp->provider->name, acr,
1495	    acw, ace, error);
1496	if (error != 0) {
1497		disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
1498		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
1499		    G_MIRROR_EVENT_DONTWAIT);
1500		return;
1501	}
1502	if (cpw == 0 && cp->acw > 0) {
1503		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
1504		    g_mirror_get_diskname(disk), disk->d_softc->sc_name);
1505		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1506	} else if (cpw > 0 && cp->acw == 0) {
1507		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
1508		    g_mirror_get_diskname(disk), disk->d_softc->sc_name);
1509		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1510	}
1511}
1512
1513static void
1514g_mirror_sync_start(struct g_mirror_disk *disk)
1515{
1516	struct g_mirror_softc *sc;
1517	struct g_consumer *cp;
1518	int error;
1519
1520	g_topology_assert();
1521
1522	sc = disk->d_softc;
1523	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1524	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
1525	    sc->sc_state));
1526	cp = disk->d_consumer;
1527	KASSERT(cp->acr == 0 && cp->acw == 0 && cp->ace == 0,
1528	    ("Consumer %s already opened.", cp->provider->name));
1529
1530	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
1531	    g_mirror_get_diskname(disk));
1532	error = g_access(cp, 0, 1, 1);
1533	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", cp->provider->name, 0, 1,
1534	    1, error);
1535	if (error != 0) {
1536		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
1537		    G_MIRROR_EVENT_DONTWAIT);
1538		return;
1539	}
1540	disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1541	KASSERT(disk->d_sync.ds_consumer == NULL,
1542	    ("Sync consumer already exists (device=%s, disk=%s).",
1543	    sc->sc_name, g_mirror_get_diskname(disk)));
1544	disk->d_sync.ds_consumer = g_new_consumer(sc->sc_sync.ds_geom);
1545	disk->d_sync.ds_consumer->private = disk;
1546	error = g_attach(disk->d_sync.ds_consumer, disk->d_softc->sc_provider);
1547	KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
1548	    disk->d_softc->sc_name, error));
1549	error = g_access(disk->d_sync.ds_consumer, 1, 0, 0);
1550	KASSERT(error == 0, ("Cannot open %s (error=%d).",
1551	    disk->d_softc->sc_name, error));
1552	sc->sc_sync.ds_ndisks++;
1553}
1554
1555/*
1556 * Stop synchronization process.
1557 * type: 0 - synchronization finished
1558 *       1 - synchronization stopped
1559 */
1560static void
1561g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
1562{
1563	struct g_consumer *cp;
1564
1565	g_topology_assert();
1566	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1567	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1568	    g_mirror_disk_state2str(disk->d_state)));
1569	if (disk->d_sync.ds_consumer == NULL)
1570		return;
1571
1572	if (type == 0) {
1573		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
1574		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1575	} else /* if (type == 1) */ {
1576		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
1577		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1578	}
1579	cp = disk->d_sync.ds_consumer;
1580	g_access(cp, -1, 0, 0);
1581	g_mirror_kill_consumer(disk->d_softc, cp);
1582	disk->d_sync.ds_consumer = NULL;
1583	disk->d_softc->sc_sync.ds_ndisks--;
1584	cp = disk->d_consumer;
1585	KASSERT(cp->acr == 0 && cp->acw == 1 && cp->ace == 1,
1586	    ("Consumer %s not opened.", cp->provider->name));
1587	g_access(cp, 0, -1, -1);
1588	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", cp->provider->name, 0, -1,
1589	    -1, 0);
1590	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1591}
1592
1593static void
1594g_mirror_launch_provider(struct g_mirror_softc *sc)
1595{
1596	struct g_mirror_disk *disk;
1597	struct g_provider *pp;
1598
1599	g_topology_assert();
1600
1601	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
1602	pp->mediasize = sc->sc_mediasize;
1603	pp->sectorsize = sc->sc_sectorsize;
1604	sc->sc_provider = pp;
1605	g_error_provider(pp, 0);
1606	G_MIRROR_DEBUG(0, "Device %s: provider %s launched.", sc->sc_name,
1607	    pp->name);
1608	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1609		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1610			g_mirror_sync_start(disk);
1611	}
1612}
1613
1614static void
1615g_mirror_destroy_provider(struct g_mirror_softc *sc)
1616{
1617	struct g_mirror_disk *disk;
1618	struct bio *bp;
1619
1620	g_topology_assert();
1621	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
1622	    sc->sc_name));
1623
1624	g_error_provider(sc->sc_provider, ENXIO);
1625	mtx_lock(&sc->sc_queue_mtx);
1626	while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
1627		bioq_remove(&sc->sc_queue, bp);
1628		g_io_deliver(bp, ENXIO);
1629	}
1630	mtx_unlock(&sc->sc_queue_mtx);
1631	G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
1632	    sc->sc_provider->name);
1633	sc->sc_provider->flags |= G_PF_WITHER;
1634	g_orphan_provider(sc->sc_provider, ENXIO);
1635	sc->sc_provider = NULL;
1636	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1637		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1638			g_mirror_sync_stop(disk, 1);
1639	}
1640}
1641
1642static void
1643g_mirror_go(void *arg)
1644{
1645	struct g_mirror_softc *sc;
1646
1647	sc = arg;
1648	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
1649	g_mirror_event_send(sc, 0,
1650	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
1651}
1652
1653static u_int
1654g_mirror_determine_state(struct g_mirror_disk *disk)
1655{
1656	struct g_mirror_softc *sc;
1657	u_int state;
1658
1659	sc = disk->d_softc;
1660	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
1661		if ((disk->d_flags &
1662		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1663			/* Disk does not need synchronization. */
1664			state = G_MIRROR_DISK_STATE_ACTIVE;
1665		} else {
1666			if ((sc->sc_flags &
1667			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0  ||
1668			    (disk->d_flags &
1669			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1670				/*
1671				 * We can start synchronization from
1672				 * the stored offset.
1673				 */
1674				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1675			} else {
1676				state = G_MIRROR_DISK_STATE_STALE;
1677			}
1678		}
1679	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
1680		/*
1681		 * Reset all synchronization data for this disk,
1682		 * because if it even was synchronized, it was
1683		 * synchronized to disks with different syncid.
1684		 */
1685		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
1686		disk->d_sync.ds_offset = 0;
1687		disk->d_sync.ds_offset_done = 0;
1688		disk->d_sync.ds_syncid = sc->sc_syncid;
1689		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
1690		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1691			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1692		} else {
1693			state = G_MIRROR_DISK_STATE_STALE;
1694		}
1695	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
1696		/*
1697		 * Not good, NOT GOOD!
1698		 * It means that mirror was started on stale disks
1699		 * and more fresh disk just arrive.
1700		 * If there were writes, mirror is fucked up, sorry.
1701		 * I think the best choice here is don't touch
1702		 * this disk and inform the user laudly.
1703		 */
1704		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
1705		    "disk (%s) arrives!! It will not be connected to the "
1706		    "running device.", sc->sc_name,
1707		    g_mirror_get_diskname(disk));
1708		g_mirror_destroy_disk(disk);
1709		state = G_MIRROR_DISK_STATE_NONE;
1710		/* Return immediately, because disk was destroyed. */
1711		return (state);
1712	}
1713	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
1714	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
1715	return (state);
1716}
1717
1718/*
1719 * Update device state.
1720 */
1721static void
1722g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force)
1723{
1724	struct g_mirror_disk *disk;
1725	u_int state;
1726
1727	g_topology_assert();
1728
1729	switch (sc->sc_state) {
1730	case G_MIRROR_DEVICE_STATE_STARTING:
1731	    {
1732		struct g_mirror_disk *pdisk;
1733		u_int dirty, ndisks, syncid;
1734
1735		KASSERT(sc->sc_provider == NULL,
1736		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
1737		/*
1738		 * Are we ready? We are, if all disks are connected or
1739		 * if we have any disks and 'force' is true.
1740		 */
1741		if ((force && g_mirror_ndisks(sc, -1) > 0) ||
1742		    sc->sc_ndisks == g_mirror_ndisks(sc, -1)) {
1743			;
1744		} else if (g_mirror_ndisks(sc, -1) == 0) {
1745			/*
1746			 * Disks went down in starting phase, so destroy
1747			 * device.
1748			 */
1749			callout_drain(&sc->sc_callout);
1750			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1751			return;
1752		} else {
1753			return;
1754		}
1755
1756		/*
1757		 * Activate all disks with the biggest syncid.
1758		 */
1759		if (force) {
1760			/*
1761			 * If 'force' is true, we have been called due to
1762			 * timeout, so don't bother canceling timeout.
1763			 */
1764			ndisks = 0;
1765			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1766				if ((disk->d_flags &
1767				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1768					ndisks++;
1769				}
1770			}
1771			if (ndisks == 0) {
1772				/* No valid disks found, destroy device. */
1773				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1774				return;
1775			}
1776		} else {
1777			/* Cancel timeout. */
1778			callout_drain(&sc->sc_callout);
1779		}
1780
1781		/*
1782		 * Find disk with the biggest syncid.
1783		 */
1784		syncid = 0;
1785		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1786			if (disk->d_sync.ds_syncid > syncid)
1787				syncid = disk->d_sync.ds_syncid;
1788		}
1789
1790		/*
1791		 * Here we need to look for dirty disks and if all disks
1792		 * with the biggest syncid are dirty, we have to choose
1793		 * one with the biggest priority and rebuild the rest.
1794		 */
1795		/*
1796		 * Find the number of dirty disks with the biggest syncid.
1797		 * Find the number of disks with the biggest syncid.
1798		 * While here, find a disk with the biggest priority.
1799		 */
1800		dirty = ndisks = 0;
1801		pdisk = NULL;
1802		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1803			if (disk->d_sync.ds_syncid != syncid)
1804				continue;
1805			if ((disk->d_flags &
1806			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1807				continue;
1808			}
1809			ndisks++;
1810			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1811				dirty++;
1812				if (pdisk == NULL ||
1813				    pdisk->d_priority < disk->d_priority) {
1814					pdisk = disk;
1815				}
1816			}
1817		}
1818		if (dirty == 0) {
1819			/* No dirty disks at all, great. */
1820		} else if (dirty == ndisks) {
1821			/*
1822			 * Force synchronization for all dirty disks except one
1823			 * with the biggest priority.
1824			 */
1825			KASSERT(pdisk != NULL, ("pdisk == NULL"));
1826			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
1827			    "master disk for synchronization.",
1828			    g_mirror_get_diskname(pdisk), sc->sc_name);
1829			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1830				if (disk->d_sync.ds_syncid != syncid)
1831					continue;
1832				if ((disk->d_flags &
1833				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1834					continue;
1835				}
1836				KASSERT((disk->d_flags &
1837				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
1838				    ("Disk %s isn't marked as dirty.",
1839				    g_mirror_get_diskname(disk)));
1840				/* Skip the disk with the biggest priority. */
1841				if (disk == pdisk)
1842					continue;
1843				disk->d_sync.ds_syncid = 0;
1844			}
1845		} else if (dirty < ndisks) {
1846			/*
1847			 * Force synchronization for all dirty disks.
1848			 * We have some non-dirty disks.
1849			 */
1850			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1851				if (disk->d_sync.ds_syncid != syncid)
1852					continue;
1853				if ((disk->d_flags &
1854				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1855					continue;
1856				}
1857				if ((disk->d_flags &
1858				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1859					continue;
1860				}
1861				disk->d_sync.ds_syncid = 0;
1862			}
1863		}
1864
1865		/* Reset hint. */
1866		sc->sc_hint = NULL;
1867		sc->sc_syncid = syncid;
1868		if (force) {
1869			/* Remember to bump syncid on first write. */
1870			sc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
1871		}
1872		state = G_MIRROR_DEVICE_STATE_RUNNING;
1873		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
1874		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
1875		    g_mirror_device_state2str(state));
1876		sc->sc_state = state;
1877		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1878			state = g_mirror_determine_state(disk);
1879			g_mirror_event_send(disk, state,
1880			    G_MIRROR_EVENT_DONTWAIT);
1881			if (state == G_MIRROR_DISK_STATE_STALE) {
1882				sc->sc_bump_syncid =
1883				    G_MIRROR_BUMP_ON_FIRST_WRITE;
1884			}
1885		}
1886		break;
1887	    }
1888	case G_MIRROR_DEVICE_STATE_RUNNING:
1889		/*
1890		 * Bump syncid here, if we need to do it immediately.
1891		 */
1892		if (sc->sc_bump_syncid == G_MIRROR_BUMP_IMMEDIATELY) {
1893			sc->sc_bump_syncid = 0;
1894			g_mirror_bump_syncid(sc);
1895		}
1896		if (g_mirror_ndisks(sc, -1) == 0) {
1897			/*
1898			 * No disks at all, we need to destroy device.
1899			 */
1900			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1901		} else if (g_mirror_ndisks(sc,
1902		    G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
1903		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
1904			/*
1905			 * No active disks, destroy provider.
1906			 */
1907			if (sc->sc_provider != NULL)
1908				g_mirror_destroy_provider(sc);
1909		} else if (g_mirror_ndisks(sc,
1910		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
1911		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
1912			/*
1913			 * We have active disks, launch provider if it doesn't
1914			 * exist.
1915			 */
1916			if (sc->sc_provider == NULL)
1917				g_mirror_launch_provider(sc);
1918		}
1919		break;
1920	default:
1921		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
1922		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
1923		break;
1924	}
1925}
1926
1927/*
1928 * Update disk state and device state if needed.
1929 */
1930#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
1931	"Disk %s state changed from %s to %s (device %s).",		\
1932	g_mirror_get_diskname(disk),					\
1933	g_mirror_disk_state2str(disk->d_state),				\
1934	g_mirror_disk_state2str(state), sc->sc_name)
1935static int
1936g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
1937{
1938	struct g_mirror_softc *sc;
1939
1940	g_topology_assert();
1941
1942	sc = disk->d_softc;
1943again:
1944	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
1945	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
1946	    g_mirror_disk_state2str(state));
1947	switch (state) {
1948	case G_MIRROR_DISK_STATE_NEW:
1949		/*
1950		 * Possible scenarios:
1951		 * 1. New disk arrive.
1952		 */
1953		/* Previous state should be NONE. */
1954		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
1955		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1956		    g_mirror_disk_state2str(disk->d_state)));
1957		DISK_STATE_CHANGED();
1958
1959		disk->d_state = state;
1960		if (LIST_EMPTY(&sc->sc_disks))
1961			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
1962		else {
1963			struct g_mirror_disk *dp;
1964
1965			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1966				if (disk->d_priority >= dp->d_priority) {
1967					LIST_INSERT_BEFORE(dp, disk, d_next);
1968					dp = NULL;
1969					break;
1970				}
1971				if (LIST_NEXT(dp, d_next) == NULL)
1972					break;
1973			}
1974			if (dp != NULL)
1975				LIST_INSERT_AFTER(dp, disk, d_next);
1976		}
1977		G_MIRROR_DEBUG(0, "Device %s: provider %s detected.",
1978		    sc->sc_name, g_mirror_get_diskname(disk));
1979		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
1980			break;
1981		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1982		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
1983		    g_mirror_device_state2str(sc->sc_state),
1984		    g_mirror_get_diskname(disk),
1985		    g_mirror_disk_state2str(disk->d_state)));
1986		state = g_mirror_determine_state(disk);
1987		if (state != G_MIRROR_DISK_STATE_NONE)
1988			goto again;
1989		break;
1990	case G_MIRROR_DISK_STATE_ACTIVE:
1991		/*
1992		 * Possible scenarios:
1993		 * 1. New disk does not need synchronization.
1994		 * 2. Synchronization process finished successfully.
1995		 */
1996		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1997		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
1998		    g_mirror_device_state2str(sc->sc_state),
1999		    g_mirror_get_diskname(disk),
2000		    g_mirror_disk_state2str(disk->d_state)));
2001		/* Previous state should be NEW or SYNCHRONIZING. */
2002		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2003		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2004		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2005		    g_mirror_disk_state2str(disk->d_state)));
2006		DISK_STATE_CHANGED();
2007
2008		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2009			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2010		else if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2011			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2012			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2013			g_mirror_sync_stop(disk, 0);
2014		}
2015		disk->d_state = state;
2016		disk->d_sync.ds_offset = 0;
2017		disk->d_sync.ds_offset_done = 0;
2018		g_mirror_update_access(disk);
2019		g_mirror_update_metadata(disk);
2020		G_MIRROR_DEBUG(0, "Device %s: provider %s activated.",
2021		    sc->sc_name, g_mirror_get_diskname(disk));
2022		break;
2023	case G_MIRROR_DISK_STATE_STALE:
2024		/*
2025		 * Possible scenarios:
2026		 * 1. Stale disk was connected.
2027		 */
2028		/* Previous state should be NEW. */
2029		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2030		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2031		    g_mirror_disk_state2str(disk->d_state)));
2032		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2033		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2034		    g_mirror_device_state2str(sc->sc_state),
2035		    g_mirror_get_diskname(disk),
2036		    g_mirror_disk_state2str(disk->d_state)));
2037		/*
2038		 * STALE state is only possible if device is marked
2039		 * NOAUTOSYNC.
2040		 */
2041		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2042		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2043		    g_mirror_device_state2str(sc->sc_state),
2044		    g_mirror_get_diskname(disk),
2045		    g_mirror_disk_state2str(disk->d_state)));
2046		DISK_STATE_CHANGED();
2047
2048		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2049		disk->d_state = state;
2050		g_mirror_update_metadata(disk);
2051		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2052		    sc->sc_name, g_mirror_get_diskname(disk));
2053		break;
2054	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2055		/*
2056		 * Possible scenarios:
2057		 * 1. Disk which needs synchronization was connected.
2058		 */
2059		/* Previous state should be NEW. */
2060		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2061		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2062		    g_mirror_disk_state2str(disk->d_state)));
2063		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2064		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2065		    g_mirror_device_state2str(sc->sc_state),
2066		    g_mirror_get_diskname(disk),
2067		    g_mirror_disk_state2str(disk->d_state)));
2068		DISK_STATE_CHANGED();
2069
2070		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2071			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2072		disk->d_state = state;
2073		if (sc->sc_provider != NULL) {
2074			g_mirror_sync_start(disk);
2075			g_mirror_update_metadata(disk);
2076		}
2077		break;
2078	case G_MIRROR_DISK_STATE_DISCONNECTED:
2079		/*
2080		 * Possible scenarios:
2081		 * 1. Device wasn't running yet, but disk disappear.
2082		 * 2. Disk was active and disapppear.
2083		 * 3. Disk disappear during synchronization process.
2084		 */
2085		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2086			/*
2087			 * Previous state should be ACTIVE, STALE or
2088			 * SYNCHRONIZING.
2089			 */
2090			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2091			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2092			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2093			    ("Wrong disk state (%s, %s).",
2094			    g_mirror_get_diskname(disk),
2095			    g_mirror_disk_state2str(disk->d_state)));
2096		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2097			/* Previous state should be NEW. */
2098			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2099			    ("Wrong disk state (%s, %s).",
2100			    g_mirror_get_diskname(disk),
2101			    g_mirror_disk_state2str(disk->d_state)));
2102			/*
2103			 * Reset bumping syncid if disk disappeared in STARTING
2104			 * state.
2105			 */
2106			if (sc->sc_bump_syncid == G_MIRROR_BUMP_ON_FIRST_WRITE)
2107				sc->sc_bump_syncid = 0;
2108#ifdef	INVARIANTS
2109		} else {
2110			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2111			    sc->sc_name,
2112			    g_mirror_device_state2str(sc->sc_state),
2113			    g_mirror_get_diskname(disk),
2114			    g_mirror_disk_state2str(disk->d_state)));
2115#endif
2116		}
2117		DISK_STATE_CHANGED();
2118		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2119		    sc->sc_name, g_mirror_get_diskname(disk));
2120
2121		g_mirror_destroy_disk(disk);
2122		break;
2123	case G_MIRROR_DISK_STATE_DESTROY:
2124	    {
2125		int error;
2126
2127		error = g_mirror_clear_metadata(disk);
2128		if (error != 0)
2129			return (error);
2130		DISK_STATE_CHANGED();
2131		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2132		    sc->sc_name, g_mirror_get_diskname(disk));
2133
2134		g_mirror_destroy_disk(disk);
2135		sc->sc_ndisks--;
2136		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2137			g_mirror_update_metadata(disk);
2138		}
2139		break;
2140	    }
2141	default:
2142		KASSERT(1 == 0, ("Unknown state (%u).", state));
2143		break;
2144	}
2145	return (0);
2146}
2147#undef	DISK_STATE_CHANGED
2148
2149static int
2150g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2151{
2152	struct g_provider *pp;
2153	u_char *buf;
2154	int error;
2155
2156	g_topology_assert();
2157
2158	error = g_access(cp, 1, 0, 0);
2159	if (error != 0)
2160		return (error);
2161	pp = cp->provider;
2162	g_topology_unlock();
2163	/* Metadata are stored on last sector. */
2164	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2165	    &error);
2166	g_topology_lock();
2167	if (buf == NULL) {
2168		g_access(cp, -1, 0, 0);
2169		return (error);
2170	}
2171	if (error != 0) {
2172		g_access(cp, -1, 0, 0);
2173		g_free(buf);
2174		return (error);
2175	}
2176	error = g_access(cp, -1, 0, 0);
2177	KASSERT(error == 0, ("Cannot decrease access count for %s.", pp->name));
2178
2179	/* Decode metadata. */
2180	error = mirror_metadata_decode(buf, md);
2181	g_free(buf);
2182	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2183		return (EINVAL);
2184	if (error != 0) {
2185		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2186		    cp->provider->name);
2187		return (error);
2188	}
2189
2190	return (0);
2191}
2192
2193static int
2194g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2195    struct g_mirror_metadata *md)
2196{
2197
2198	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2199		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2200		    pp->name, md->md_did);
2201		return (EEXIST);
2202	}
2203	if (md->md_all != sc->sc_ndisks) {
2204		G_MIRROR_DEBUG(1,
2205		    "Invalid '%s' field on disk %s (device %s), skipping.",
2206		    "md_all", pp->name, sc->sc_name);
2207		return (EINVAL);
2208	}
2209	if (md->md_slice != sc->sc_slice) {
2210		G_MIRROR_DEBUG(1,
2211		    "Invalid '%s' field on disk %s (device %s), skipping.",
2212		    "md_slice", pp->name, sc->sc_name);
2213		return (EINVAL);
2214	}
2215	if (md->md_balance != sc->sc_balance) {
2216		G_MIRROR_DEBUG(1,
2217		    "Invalid '%s' field on disk %s (device %s), skipping.",
2218		    "md_balance", pp->name, sc->sc_name);
2219		return (EINVAL);
2220	}
2221	if (md->md_mediasize != sc->sc_mediasize) {
2222		G_MIRROR_DEBUG(1,
2223		    "Invalid '%s' field on disk %s (device %s), skipping.",
2224		    "md_mediasize", pp->name, sc->sc_name);
2225		return (EINVAL);
2226	}
2227	if (sc->sc_mediasize > pp->mediasize) {
2228		G_MIRROR_DEBUG(1,
2229		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2230		    sc->sc_name);
2231		return (EINVAL);
2232	}
2233	if (md->md_sectorsize != sc->sc_sectorsize) {
2234		G_MIRROR_DEBUG(1,
2235		    "Invalid '%s' field on disk %s (device %s), skipping.",
2236		    "md_sectorsize", pp->name, sc->sc_name);
2237		return (EINVAL);
2238	}
2239	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2240		G_MIRROR_DEBUG(1,
2241		    "Invalid sector size of disk %s (device %s), skipping.",
2242		    pp->name, sc->sc_name);
2243		return (EINVAL);
2244	}
2245	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2246		G_MIRROR_DEBUG(1,
2247		    "Invalid device flags on disk %s (device %s), skipping.",
2248		    pp->name, sc->sc_name);
2249		return (EINVAL);
2250	}
2251	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2252		G_MIRROR_DEBUG(1,
2253		    "Invalid disk flags on disk %s (device %s), skipping.",
2254		    pp->name, sc->sc_name);
2255		return (EINVAL);
2256	}
2257	return (0);
2258}
2259
2260static int
2261g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2262    struct g_mirror_metadata *md)
2263{
2264	struct g_mirror_disk *disk;
2265	int error;
2266
2267	g_topology_assert();
2268	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2269
2270	error = g_mirror_check_metadata(sc, pp, md);
2271	if (error != 0)
2272		return (error);
2273	disk = g_mirror_init_disk(sc, pp, md, &error);
2274	if (disk == NULL)
2275		return (error);
2276	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2277	    G_MIRROR_EVENT_WAIT);
2278	return (error);
2279}
2280
2281static int
2282g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2283{
2284	struct g_mirror_softc *sc;
2285	struct g_mirror_disk *disk;
2286	int dcr, dcw, dce, err, error;
2287
2288	g_topology_assert();
2289	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2290	    acw, ace);
2291
2292	dcr = pp->acr + acr;
2293	dcw = pp->acw + acw;
2294	dce = pp->ace + ace;
2295
2296	/* On first open, grab an extra "exclusive" bit */
2297	if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
2298		ace++;
2299	/* ... and let go of it on last close */
2300	if (dcr == 0 && dcw == 0 && dce == 0)
2301		ace--;
2302
2303	sc = pp->geom->softc;
2304	if (sc == NULL || LIST_EMPTY(&sc->sc_disks)) {
2305		if (acr <= 0 && acw <= 0 && ace <= 0)
2306			return (0);
2307		else
2308			return (ENXIO);
2309	}
2310	error = ENXIO;
2311	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2312		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
2313			continue;
2314		err = g_access(disk->d_consumer, acr, acw, ace);
2315		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
2316		    g_mirror_get_diskname(disk), acr, acw, ace, err);
2317		if (err == 0) {
2318			/*
2319			 * Mark disk as dirty on open and unmark on close.
2320			 */
2321			if (pp->acw == 0 && dcw > 0) {
2322				G_MIRROR_DEBUG(1,
2323				    "Disk %s (device %s) marked as dirty.",
2324				    g_mirror_get_diskname(disk), sc->sc_name);
2325				disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2326				g_mirror_update_metadata(disk);
2327			} else if (pp->acw > 0 && dcw == 0) {
2328				G_MIRROR_DEBUG(1,
2329				    "Disk %s (device %s) marked as clean.",
2330				    g_mirror_get_diskname(disk), sc->sc_name);
2331				disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2332				g_mirror_update_metadata(disk);
2333			}
2334			error = 0;
2335		} else {
2336			sc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
2337			g_mirror_event_send(disk,
2338			    G_MIRROR_DISK_STATE_DISCONNECTED,
2339			    G_MIRROR_EVENT_DONTWAIT);
2340		}
2341	}
2342	return (error);
2343}
2344
2345static struct g_geom *
2346g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
2347{
2348	struct g_mirror_softc *sc;
2349	struct g_geom *gp;
2350	int error, timeout;
2351
2352	g_topology_assert();
2353	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2354	    md->md_mid);
2355
2356	/* One disk is minimum. */
2357	if (md->md_all < 1)
2358		return (NULL);
2359	/*
2360	 * Action geom.
2361	 */
2362	gp = g_new_geomf(mp, "%s", md->md_name);
2363	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2364	gp->start = g_mirror_start;
2365	gp->spoiled = g_mirror_spoiled;
2366	gp->orphan = g_mirror_orphan;
2367	gp->access = g_mirror_access;
2368	gp->dumpconf = g_mirror_dumpconf;
2369
2370	sc->sc_id = md->md_mid;
2371	sc->sc_slice = md->md_slice;
2372	sc->sc_balance = md->md_balance;
2373	sc->sc_mediasize = md->md_mediasize;
2374	sc->sc_sectorsize = md->md_sectorsize;
2375	sc->sc_ndisks = md->md_all;
2376	sc->sc_flags = md->md_mflags;
2377	sc->sc_bump_syncid = 0;
2378	bioq_init(&sc->sc_queue);
2379	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
2380	LIST_INIT(&sc->sc_disks);
2381	TAILQ_INIT(&sc->sc_events);
2382	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
2383	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2384	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
2385	gp->softc = sc;
2386	sc->sc_geom = gp;
2387	sc->sc_provider = NULL;
2388	/*
2389	 * Synchronization geom.
2390	 */
2391	gp = g_new_geomf(mp, "%s.sync", md->md_name);
2392	gp->softc = sc;
2393	gp->spoiled = g_mirror_spoiled;
2394	gp->orphan = g_mirror_orphan;
2395	sc->sc_sync.ds_geom = gp;
2396	sc->sc_sync.ds_block = atomic_load_acq_int(&g_mirror_sync_block_size);
2397	sc->sc_sync.ds_ndisks = 0;
2398	sc->sc_sync.ds_zone = uma_zcreate("gmirror:sync", sc->sc_sync.ds_block,
2399	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2400	error = kthread_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
2401	    "g_mirror %s", md->md_name);
2402	if (error != 0) {
2403		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
2404		    sc->sc_name);
2405		uma_zdestroy(sc->sc_sync.ds_zone);
2406		g_destroy_geom(sc->sc_sync.ds_geom);
2407		mtx_destroy(&sc->sc_events_mtx);
2408		mtx_destroy(&sc->sc_queue_mtx);
2409		g_destroy_geom(sc->sc_geom);
2410		free(sc, M_MIRROR);
2411		return (NULL);
2412	}
2413
2414	G_MIRROR_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
2415
2416	/*
2417	 * Run timeout.
2418	 */
2419	timeout = atomic_load_acq_int(&g_mirror_timeout);
2420	callout_reset(&sc->sc_callout, timeout * hz, g_mirror_go, sc);
2421	return (sc->sc_geom);
2422}
2423
2424int
2425g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force)
2426{
2427	struct g_provider *pp;
2428
2429	g_topology_assert();
2430
2431	if (sc == NULL)
2432		return (ENXIO);
2433	pp = sc->sc_provider;
2434	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
2435		if (force) {
2436			G_MIRROR_DEBUG(0, "Device %s is still open, so it "
2437			    "can't be definitely removed.", pp->name);
2438		} else {
2439			G_MIRROR_DEBUG(1,
2440			    "Device %s is still open (r%dw%de%d).", pp->name,
2441			    pp->acr, pp->acw, pp->ace);
2442			return (EBUSY);
2443		}
2444	}
2445
2446	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2447	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
2448	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
2449	mtx_lock(&sc->sc_queue_mtx);
2450	wakeup(sc);
2451	mtx_unlock(&sc->sc_queue_mtx);
2452	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
2453	while (sc->sc_worker != NULL)
2454		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
2455	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
2456	g_mirror_destroy_device(sc);
2457	free(sc, M_MIRROR);
2458	return (0);
2459}
2460
2461static void
2462g_mirror_taste_orphan(struct g_consumer *cp)
2463{
2464
2465	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2466	    cp->provider->name));
2467}
2468
2469static struct g_geom *
2470g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2471{
2472	struct g_mirror_metadata md;
2473	struct g_mirror_softc *sc;
2474	struct g_consumer *cp;
2475	struct g_geom *gp;
2476	int error;
2477
2478	g_topology_assert();
2479	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2480	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
2481
2482	gp = g_new_geomf(mp, "mirror:taste");
2483	/*
2484	 * This orphan function should be never called.
2485	 */
2486	gp->orphan = g_mirror_taste_orphan;
2487	cp = g_new_consumer(gp);
2488	g_attach(cp, pp);
2489	error = g_mirror_read_metadata(cp, &md);
2490	g_detach(cp);
2491	g_destroy_consumer(cp);
2492	g_destroy_geom(gp);
2493	if (error != 0)
2494		return (NULL);
2495	gp = NULL;
2496
2497	if (md.md_version > G_MIRROR_VERSION) {
2498		printf("geom_mirror.ko module is too old to handle %s.\n",
2499		    pp->name);
2500		return (NULL);
2501	}
2502	if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
2503		return (NULL);
2504	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
2505		G_MIRROR_DEBUG(0,
2506		    "Device %s: provider %s marked as inactive, skipping.",
2507		    md.md_name, pp->name);
2508		return (NULL);
2509	}
2510	if (g_mirror_debug >= 2)
2511		mirror_metadata_dump(&md);
2512
2513	/*
2514	 * Let's check if device already exists.
2515	 */
2516	LIST_FOREACH(gp, &mp->geom, geom) {
2517		sc = gp->softc;
2518		if (sc == NULL)
2519			continue;
2520		if (sc->sc_sync.ds_geom == gp)
2521			continue;
2522		if (strcmp(md.md_name, sc->sc_name) != 0)
2523			continue;
2524		if (md.md_mid != sc->sc_id) {
2525			G_MIRROR_DEBUG(0, "Device %s already configured.",
2526			    sc->sc_name);
2527			return (NULL);
2528		}
2529		break;
2530	}
2531	if (gp == NULL) {
2532		gp = g_mirror_create(mp, &md);
2533		if (gp == NULL) {
2534			G_MIRROR_DEBUG(0, "Cannot create device %s.",
2535			    md.md_name);
2536			return (NULL);
2537		}
2538		sc = gp->softc;
2539	}
2540	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
2541	error = g_mirror_add_disk(sc, pp, &md);
2542	if (error != 0) {
2543		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
2544		    pp->name, gp->name, error);
2545		if (LIST_EMPTY(&sc->sc_disks))
2546			g_mirror_destroy(sc, 1);
2547		return (NULL);
2548	}
2549	return (gp);
2550}
2551
2552static int
2553g_mirror_destroy_geom(struct gctl_req *req __unused,
2554    struct g_class *mp __unused, struct g_geom *gp)
2555{
2556
2557	return (g_mirror_destroy(gp->softc, 0));
2558}
2559
2560static void
2561g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2562    struct g_consumer *cp, struct g_provider *pp)
2563{
2564	struct g_mirror_softc *sc;
2565
2566	g_topology_assert();
2567
2568	sc = gp->softc;
2569	if (sc == NULL)
2570		return;
2571	/* Skip synchronization geom. */
2572	if (gp == sc->sc_sync.ds_geom)
2573		return;
2574	if (pp != NULL) {
2575		/* Nothing here. */
2576	} else if (cp != NULL) {
2577		struct g_mirror_disk *disk;
2578
2579		disk = cp->private;
2580		if (disk == NULL)
2581			return;
2582		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
2583		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2584			sbuf_printf(sb, "%s<Synchronized>", indent);
2585			if (disk->d_sync.ds_offset_done == 0)
2586				sbuf_printf(sb, "0%%");
2587			else {
2588				sbuf_printf(sb, "%u%%",
2589				    (u_int)((disk->d_sync.ds_offset_done * 100) /
2590				    sc->sc_provider->mediasize));
2591			}
2592			sbuf_printf(sb, "</Synchronized>\n");
2593		}
2594		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
2595		    disk->d_sync.ds_syncid);
2596		sbuf_printf(sb, "%s<Flags>", indent);
2597		if (disk->d_flags == 0)
2598			sbuf_printf(sb, "NONE");
2599		else {
2600			int first = 1;
2601
2602#define	ADD_FLAG(flag, name)	do {					\
2603	if ((disk->d_flags & (flag)) != 0) {				\
2604		if (!first)						\
2605			sbuf_printf(sb, ", ");				\
2606		else							\
2607			first = 0;					\
2608		sbuf_printf(sb, name);					\
2609	}								\
2610} while (0)
2611			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
2612			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
2613			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
2614			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
2615			    "SYNCHRONIZING");
2616			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
2617#undef	ADD_FLAG
2618		}
2619		sbuf_printf(sb, "</Flags>\n");
2620		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
2621		    disk->d_priority);
2622		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2623		    g_mirror_disk_state2str(disk->d_state));
2624	} else {
2625		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2626		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
2627		sbuf_printf(sb, "%s<Flags>", indent);
2628		if (sc->sc_flags == 0)
2629			sbuf_printf(sb, "NONE");
2630		else {
2631			int first = 1;
2632
2633#define	ADD_FLAG(flag, name)	do {					\
2634	if ((sc->sc_flags & (flag)) != 0) {				\
2635		if (!first)						\
2636			sbuf_printf(sb, ", ");				\
2637		else							\
2638			first = 0;					\
2639		sbuf_printf(sb, name);					\
2640	}								\
2641} while (0)
2642			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
2643#undef	ADD_FLAG
2644		}
2645		sbuf_printf(sb, "</Flags>\n");
2646		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
2647		    (u_int)sc->sc_slice);
2648		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
2649		    balance_name(sc->sc_balance));
2650		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2651		    sc->sc_ndisks);
2652	}
2653}
2654
2655DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
2656