g_mirror.c revision 139146
1/*-
2 * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 139146 2004-12-21 19:50:18Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/eventhandler.h>
41#include <vm/uma.h>
42#include <geom/geom.h>
43#include <sys/proc.h>
44#include <sys/kthread.h>
45#include <geom/mirror/g_mirror.h>
46
47
48static MALLOC_DEFINE(M_MIRROR, "mirror data", "GEOM_MIRROR Data");
49
50SYSCTL_DECL(_kern_geom);
51SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff");
52u_int g_mirror_debug = 0;
53TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug);
54SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0,
55    "Debug level");
56static u_int g_mirror_timeout = 4;
57TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout);
58SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout,
59    0, "Time to wait on all mirror components");
60static u_int g_mirror_idletime = 5;
61TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime);
62SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW,
63    &g_mirror_idletime, 0, "Mark components as clean when idling");
64static u_int g_mirror_reqs_per_sync = 5;
65SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, reqs_per_sync, CTLFLAG_RW,
66    &g_mirror_reqs_per_sync, 0,
67    "Number of regular I/O requests per synchronization request");
68static u_int g_mirror_syncs_per_sec = 100;
69SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, syncs_per_sec, CTLFLAG_RW,
70    &g_mirror_syncs_per_sec, 0,
71    "Number of synchronizations requests per second");
72
73#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
74	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
75	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
76	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
77} while (0)
78
79static eventhandler_tag g_mirror_ehtag = NULL;
80
81static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
82    struct g_geom *gp);
83static g_taste_t g_mirror_taste;
84static void g_mirror_init(struct g_class *mp);
85static void g_mirror_fini(struct g_class *mp);
86
87struct g_class g_mirror_class = {
88	.name = G_MIRROR_CLASS_NAME,
89	.version = G_VERSION,
90	.ctlreq = g_mirror_config,
91	.taste = g_mirror_taste,
92	.destroy_geom = g_mirror_destroy_geom,
93	.init = g_mirror_init,
94	.fini = g_mirror_fini
95};
96
97
98static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
99static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
100static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force);
101static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
102    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
103static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
104
105
106static const char *
107g_mirror_disk_state2str(int state)
108{
109
110	switch (state) {
111	case G_MIRROR_DISK_STATE_NONE:
112		return ("NONE");
113	case G_MIRROR_DISK_STATE_NEW:
114		return ("NEW");
115	case G_MIRROR_DISK_STATE_ACTIVE:
116		return ("ACTIVE");
117	case G_MIRROR_DISK_STATE_STALE:
118		return ("STALE");
119	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
120		return ("SYNCHRONIZING");
121	case G_MIRROR_DISK_STATE_DISCONNECTED:
122		return ("DISCONNECTED");
123	case G_MIRROR_DISK_STATE_DESTROY:
124		return ("DESTROY");
125	default:
126		return ("INVALID");
127	}
128}
129
130static const char *
131g_mirror_device_state2str(int state)
132{
133
134	switch (state) {
135	case G_MIRROR_DEVICE_STATE_STARTING:
136		return ("STARTING");
137	case G_MIRROR_DEVICE_STATE_RUNNING:
138		return ("RUNNING");
139	default:
140		return ("INVALID");
141	}
142}
143
144static const char *
145g_mirror_get_diskname(struct g_mirror_disk *disk)
146{
147
148	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
149		return ("[unknown]");
150	return (disk->d_name);
151}
152
153/*
154 * --- Events handling functions ---
155 * Events in geom_mirror are used to maintain disks and device status
156 * from one thread to simplify locking.
157 */
158static void
159g_mirror_event_free(struct g_mirror_event *ep)
160{
161
162	free(ep, M_MIRROR);
163}
164
165int
166g_mirror_event_send(void *arg, int state, int flags)
167{
168	struct g_mirror_softc *sc;
169	struct g_mirror_disk *disk;
170	struct g_mirror_event *ep;
171	int error;
172
173	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
174	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
175	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
176		disk = NULL;
177		sc = arg;
178	} else {
179		disk = arg;
180		sc = disk->d_softc;
181	}
182	ep->e_disk = disk;
183	ep->e_state = state;
184	ep->e_flags = flags;
185	ep->e_error = 0;
186	mtx_lock(&sc->sc_events_mtx);
187	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
188	mtx_unlock(&sc->sc_events_mtx);
189	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
190	mtx_lock(&sc->sc_queue_mtx);
191	wakeup(sc);
192	mtx_unlock(&sc->sc_queue_mtx);
193	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
194		return (0);
195	g_topology_assert();
196	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
197	g_topology_unlock();
198	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
199		mtx_lock(&sc->sc_events_mtx);
200		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
201		    hz * 5);
202	}
203	/* Don't even try to use 'sc' here, because it could be already dead. */
204	g_topology_lock();
205	error = ep->e_error;
206	g_mirror_event_free(ep);
207	return (error);
208}
209
210static struct g_mirror_event *
211g_mirror_event_get(struct g_mirror_softc *sc)
212{
213	struct g_mirror_event *ep;
214
215	mtx_lock(&sc->sc_events_mtx);
216	ep = TAILQ_FIRST(&sc->sc_events);
217	mtx_unlock(&sc->sc_events_mtx);
218	return (ep);
219}
220
221
222static void
223g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
224{
225
226	mtx_lock(&sc->sc_events_mtx);
227	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
228	mtx_unlock(&sc->sc_events_mtx);
229}
230
231static void
232g_mirror_event_cancel(struct g_mirror_disk *disk)
233{
234	struct g_mirror_softc *sc;
235	struct g_mirror_event *ep, *tmpep;
236
237	g_topology_assert();
238
239	sc = disk->d_softc;
240	mtx_lock(&sc->sc_events_mtx);
241	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
242		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
243			continue;
244		if (ep->e_disk != disk)
245			continue;
246		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
247		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
248			g_mirror_event_free(ep);
249		else {
250			ep->e_error = ECANCELED;
251			wakeup(ep);
252		}
253	}
254	mtx_unlock(&sc->sc_events_mtx);
255}
256
257/*
258 * Return the number of disks in given state.
259 * If state is equal to -1, count all connected disks.
260 */
261u_int
262g_mirror_ndisks(struct g_mirror_softc *sc, int state)
263{
264	struct g_mirror_disk *disk;
265	u_int n = 0;
266
267	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
268		if (state == -1 || disk->d_state == state)
269			n++;
270	}
271	return (n);
272}
273
274/*
275 * Find a disk in mirror by its disk ID.
276 */
277static struct g_mirror_disk *
278g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
279{
280	struct g_mirror_disk *disk;
281
282	g_topology_assert();
283
284	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
285		if (disk->d_id == id)
286			return (disk);
287	}
288	return (NULL);
289}
290
291static u_int
292g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
293{
294	struct bio *bp;
295	u_int nreqs = 0;
296
297	mtx_lock(&sc->sc_queue_mtx);
298	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
299		if (bp->bio_from == cp)
300			nreqs++;
301	}
302	mtx_unlock(&sc->sc_queue_mtx);
303	return (nreqs);
304}
305
306static int
307g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
308{
309
310	if (cp->index > 0) {
311		G_MIRROR_DEBUG(2,
312		    "I/O requests for %s exist, can't destroy it now.",
313		    cp->provider->name);
314		return (1);
315	}
316	if (g_mirror_nrequests(sc, cp) > 0) {
317		G_MIRROR_DEBUG(2,
318		    "I/O requests for %s in queue, can't destroy it now.",
319		    cp->provider->name);
320		return (1);
321	}
322	return (0);
323}
324
325static void
326g_mirror_destroy_consumer(void *arg, int flags __unused)
327{
328	struct g_consumer *cp;
329
330	cp = arg;
331	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
332	g_detach(cp);
333	g_destroy_consumer(cp);
334}
335
336static void
337g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
338{
339	struct g_provider *pp;
340	int retaste_wait;
341
342	g_topology_assert();
343
344	cp->private = NULL;
345	if (g_mirror_is_busy(sc, cp))
346		return;
347	pp = cp->provider;
348	retaste_wait = 0;
349	if (cp->acw == 1) {
350		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
351			retaste_wait = 1;
352	}
353	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
354	    -cp->acw, -cp->ace, 0);
355	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
356		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
357	if (retaste_wait) {
358		/*
359		 * After retaste event was send (inside g_access()), we can send
360		 * event to detach and destroy consumer.
361		 * A class, which has consumer to the given provider connected
362		 * will not receive retaste event for the provider.
363		 * This is the way how I ignore retaste events when I close
364		 * consumers opened for write: I detach and destroy consumer
365		 * after retaste event is sent.
366		 */
367		g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
368		return;
369	}
370	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
371	g_detach(cp);
372	g_destroy_consumer(cp);
373}
374
375static int
376g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
377{
378	int error;
379
380	g_topology_assert();
381	KASSERT(disk->d_consumer == NULL,
382	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
383
384	disk->d_consumer = g_new_consumer(disk->d_softc->sc_geom);
385	disk->d_consumer->private = disk;
386	disk->d_consumer->index = 0;
387	error = g_attach(disk->d_consumer, pp);
388	if (error != 0)
389		return (error);
390	error = g_access(disk->d_consumer, 1, 1, 1);
391	if (error != 0) {
392		G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
393		    pp->name, error);
394		return (error);
395	}
396
397	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
398	return (0);
399}
400
401static void
402g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
403{
404
405	g_topology_assert();
406
407	if (cp == NULL)
408		return;
409	if (cp->provider != NULL)
410		g_mirror_kill_consumer(sc, cp);
411	else
412		g_destroy_consumer(cp);
413}
414
415/*
416 * Initialize disk. This means allocate memory, create consumer, attach it
417 * to the provider and open access (r1w1e1) to it.
418 */
419static struct g_mirror_disk *
420g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
421    struct g_mirror_metadata *md, int *errorp)
422{
423	struct g_mirror_disk *disk;
424	int error;
425
426	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
427	if (disk == NULL) {
428		error = ENOMEM;
429		goto fail;
430	}
431	disk->d_softc = sc;
432	error = g_mirror_connect_disk(disk, pp);
433	if (error != 0)
434		goto fail;
435	disk->d_id = md->md_did;
436	disk->d_state = G_MIRROR_DISK_STATE_NONE;
437	disk->d_priority = md->md_priority;
438	disk->d_delay.sec = 0;
439	disk->d_delay.frac = 0;
440	binuptime(&disk->d_last_used);
441	disk->d_flags = md->md_dflags;
442	if (md->md_provider[0] != '\0')
443		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
444	disk->d_sync.ds_consumer = NULL;
445	disk->d_sync.ds_offset = md->md_sync_offset;
446	disk->d_sync.ds_offset_done = md->md_sync_offset;
447	disk->d_sync.ds_resync = -1;
448	disk->d_sync.ds_syncid = md->md_syncid;
449	if (errorp != NULL)
450		*errorp = 0;
451	return (disk);
452fail:
453	if (errorp != NULL)
454		*errorp = error;
455	if (disk != NULL) {
456		g_mirror_disconnect_consumer(sc, disk->d_consumer);
457		free(disk, M_MIRROR);
458	}
459	return (NULL);
460}
461
462static void
463g_mirror_destroy_disk(struct g_mirror_disk *disk)
464{
465	struct g_mirror_softc *sc;
466
467	g_topology_assert();
468
469	LIST_REMOVE(disk, d_next);
470	g_mirror_event_cancel(disk);
471	sc = disk->d_softc;
472	if (sc->sc_hint == disk)
473		sc->sc_hint = NULL;
474	switch (disk->d_state) {
475	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
476		g_mirror_sync_stop(disk, 1);
477		/* FALLTHROUGH */
478	case G_MIRROR_DISK_STATE_NEW:
479	case G_MIRROR_DISK_STATE_STALE:
480	case G_MIRROR_DISK_STATE_ACTIVE:
481		g_mirror_disconnect_consumer(sc, disk->d_consumer);
482		free(disk, M_MIRROR);
483		break;
484	default:
485		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
486		    g_mirror_get_diskname(disk),
487		    g_mirror_disk_state2str(disk->d_state)));
488	}
489}
490
491static void
492g_mirror_destroy_device(struct g_mirror_softc *sc)
493{
494	struct g_mirror_disk *disk;
495	struct g_mirror_event *ep;
496	struct g_geom *gp;
497	struct g_consumer *cp, *tmpcp;
498
499	g_topology_assert();
500
501	gp = sc->sc_geom;
502	if (sc->sc_provider != NULL)
503		g_mirror_destroy_provider(sc);
504	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
505	    disk = LIST_FIRST(&sc->sc_disks)) {
506		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
507		g_mirror_update_metadata(disk);
508		g_mirror_destroy_disk(disk);
509	}
510	while ((ep = g_mirror_event_get(sc)) != NULL) {
511		g_mirror_event_remove(sc, ep);
512		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
513			g_mirror_event_free(ep);
514		else {
515			ep->e_error = ECANCELED;
516			ep->e_flags |= G_MIRROR_EVENT_DONE;
517			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
518			mtx_lock(&sc->sc_events_mtx);
519			wakeup(ep);
520			mtx_unlock(&sc->sc_events_mtx);
521		}
522	}
523	callout_drain(&sc->sc_callout);
524	gp->softc = NULL;
525
526	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
527		g_mirror_disconnect_consumer(sc, cp);
528	}
529	sc->sc_sync.ds_geom->softc = NULL;
530	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
531	mtx_destroy(&sc->sc_queue_mtx);
532	mtx_destroy(&sc->sc_events_mtx);
533	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
534	g_wither_geom(gp, ENXIO);
535}
536
537static void
538g_mirror_orphan(struct g_consumer *cp)
539{
540	struct g_mirror_disk *disk;
541
542	g_topology_assert();
543
544	disk = cp->private;
545	if (disk == NULL)
546		return;
547	disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
548	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
549	    G_MIRROR_EVENT_DONTWAIT);
550}
551
552static void
553g_mirror_spoiled(struct g_consumer *cp)
554{
555	struct g_mirror_disk *disk;
556
557	g_topology_assert();
558
559	disk = cp->private;
560	if (disk == NULL)
561		return;
562	disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
563	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
564	    G_MIRROR_EVENT_DONTWAIT);
565}
566
567/*
568 * Function should return the next active disk on the list.
569 * It is possible that it will be the same disk as given.
570 * If there are no active disks on list, NULL is returned.
571 */
572static __inline struct g_mirror_disk *
573g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
574{
575	struct g_mirror_disk *dp;
576
577	for (dp = LIST_NEXT(disk, d_next); dp != disk;
578	    dp = LIST_NEXT(dp, d_next)) {
579		if (dp == NULL)
580			dp = LIST_FIRST(&sc->sc_disks);
581		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
582			break;
583	}
584	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
585		return (NULL);
586	return (dp);
587}
588
589static struct g_mirror_disk *
590g_mirror_get_disk(struct g_mirror_softc *sc)
591{
592	struct g_mirror_disk *disk;
593
594	if (sc->sc_hint == NULL) {
595		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
596		if (sc->sc_hint == NULL)
597			return (NULL);
598	}
599	disk = sc->sc_hint;
600	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
601		disk = g_mirror_find_next(sc, disk);
602		if (disk == NULL)
603			return (NULL);
604	}
605	sc->sc_hint = g_mirror_find_next(sc, disk);
606	return (disk);
607}
608
609static int
610g_mirror_write_metadata(struct g_mirror_disk *disk,
611    struct g_mirror_metadata *md)
612{
613	struct g_mirror_softc *sc;
614	struct g_consumer *cp;
615	off_t offset, length;
616	u_char *sector;
617	int error = 0;
618
619	g_topology_assert();
620
621	sc = disk->d_softc;
622	cp = disk->d_consumer;
623	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
624	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
625	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
626	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
627	    cp->acw, cp->ace));
628	length = cp->provider->sectorsize;
629	offset = cp->provider->mediasize - length;
630	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
631	if (md != NULL)
632		mirror_metadata_encode(md, sector);
633	g_topology_unlock();
634	error = g_write_data(cp, offset, sector, length);
635	g_topology_lock();
636	free(sector, M_MIRROR);
637	if (error != 0) {
638		disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
639		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
640		    G_MIRROR_EVENT_DONTWAIT);
641	}
642	return (error);
643}
644
645static int
646g_mirror_clear_metadata(struct g_mirror_disk *disk)
647{
648	int error;
649
650	g_topology_assert();
651	error = g_mirror_write_metadata(disk, NULL);
652	if (error == 0) {
653		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
654		    g_mirror_get_diskname(disk));
655	} else {
656		G_MIRROR_DEBUG(0,
657		    "Cannot clear metadata on disk %s (error=%d).",
658		    g_mirror_get_diskname(disk), error);
659	}
660	return (error);
661}
662
663void
664g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
665    struct g_mirror_metadata *md)
666{
667
668	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
669	md->md_version = G_MIRROR_VERSION;
670	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
671	md->md_mid = sc->sc_id;
672	md->md_all = sc->sc_ndisks;
673	md->md_slice = sc->sc_slice;
674	md->md_balance = sc->sc_balance;
675	md->md_mediasize = sc->sc_mediasize;
676	md->md_sectorsize = sc->sc_sectorsize;
677	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
678	bzero(md->md_provider, sizeof(md->md_provider));
679	if (disk == NULL) {
680		md->md_did = arc4random();
681		md->md_priority = 0;
682		md->md_syncid = 0;
683		md->md_dflags = 0;
684		md->md_sync_offset = 0;
685	} else {
686		md->md_did = disk->d_id;
687		md->md_priority = disk->d_priority;
688		md->md_syncid = disk->d_sync.ds_syncid;
689		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
690		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
691			md->md_sync_offset = disk->d_sync.ds_offset_done;
692		else
693			md->md_sync_offset = 0;
694		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
695			strlcpy(md->md_provider,
696			    disk->d_consumer->provider->name,
697			    sizeof(md->md_provider));
698		}
699	}
700}
701
702void
703g_mirror_update_metadata(struct g_mirror_disk *disk)
704{
705	struct g_mirror_metadata md;
706	int error;
707
708	g_topology_assert();
709	g_mirror_fill_metadata(disk->d_softc, disk, &md);
710	error = g_mirror_write_metadata(disk, &md);
711	if (error == 0) {
712		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
713		    g_mirror_get_diskname(disk));
714	} else {
715		G_MIRROR_DEBUG(0,
716		    "Cannot update metadata on disk %s (error=%d).",
717		    g_mirror_get_diskname(disk), error);
718	}
719}
720
721static void
722g_mirror_bump_syncid(struct g_mirror_softc *sc)
723{
724	struct g_mirror_disk *disk;
725
726	g_topology_assert();
727	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
728	    ("%s called with no active disks (device=%s).", __func__,
729	    sc->sc_name));
730
731	sc->sc_syncid++;
732	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
733	    sc->sc_syncid);
734	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
735		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
736		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
737			disk->d_sync.ds_syncid = sc->sc_syncid;
738			g_mirror_update_metadata(disk);
739		}
740	}
741}
742
743static void
744g_mirror_idle(struct g_mirror_softc *sc)
745{
746	struct g_mirror_disk *disk;
747
748	if (sc->sc_provider == NULL || sc->sc_provider->acw == 0)
749		return;
750	sc->sc_idle = 1;
751	g_topology_lock();
752	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
753		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
754			continue;
755		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
756		    g_mirror_get_diskname(disk), sc->sc_name);
757		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
758		g_mirror_update_metadata(disk);
759	}
760	g_topology_unlock();
761}
762
763static void
764g_mirror_unidle(struct g_mirror_softc *sc)
765{
766	struct g_mirror_disk *disk;
767
768	sc->sc_idle = 0;
769	g_topology_lock();
770	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
771		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
772			continue;
773		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
774		    g_mirror_get_diskname(disk), sc->sc_name);
775		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
776		g_mirror_update_metadata(disk);
777	}
778	g_topology_unlock();
779}
780
781/*
782 * Return 1 if we should check if mirror is idling.
783 */
784static int
785g_mirror_check_idle(struct g_mirror_softc *sc)
786{
787	struct g_mirror_disk *disk;
788
789	if (sc->sc_idle)
790		return (0);
791	if (sc->sc_provider != NULL && sc->sc_provider->acw == 0)
792		return (0);
793	/*
794	 * Check if there are no in-flight requests.
795	 */
796	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
797		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
798			continue;
799		if (disk->d_consumer->index > 0)
800			return (0);
801	}
802	return (1);
803}
804
805static __inline int
806bintime_cmp(struct bintime *bt1, struct bintime *bt2)
807{
808
809	if (bt1->sec < bt2->sec)
810		return (-1);
811	else if (bt1->sec > bt2->sec)
812		return (1);
813	if (bt1->frac < bt2->frac)
814		return (-1);
815	else if (bt1->frac > bt2->frac)
816		return (1);
817	return (0);
818}
819
820static void
821g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp)
822{
823
824	if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD)
825		return;
826	binuptime(&disk->d_delay);
827	bintime_sub(&disk->d_delay, &bp->bio_t0);
828}
829
830static void
831g_mirror_done(struct bio *bp)
832{
833	struct g_mirror_softc *sc;
834
835	sc = bp->bio_from->geom->softc;
836	bp->bio_cflags |= G_MIRROR_BIO_FLAG_REGULAR;
837	mtx_lock(&sc->sc_queue_mtx);
838	bioq_disksort(&sc->sc_queue, bp);
839	wakeup(sc);
840	mtx_unlock(&sc->sc_queue_mtx);
841}
842
843static void
844g_mirror_regular_request(struct bio *bp)
845{
846	struct g_mirror_softc *sc;
847	struct g_mirror_disk *disk;
848	struct bio *pbp;
849
850	g_topology_assert_not();
851
852	bp->bio_from->index--;
853	pbp = bp->bio_parent;
854	sc = pbp->bio_to->geom->softc;
855	disk = bp->bio_from->private;
856	if (disk == NULL) {
857		g_topology_lock();
858		g_mirror_kill_consumer(sc, bp->bio_from);
859		g_topology_unlock();
860	} else {
861		g_mirror_update_delay(disk, bp);
862	}
863
864	pbp->bio_inbed++;
865	KASSERT(pbp->bio_inbed <= pbp->bio_children,
866	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
867	    pbp->bio_children));
868	if (bp->bio_error == 0 && pbp->bio_error == 0) {
869		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
870		g_destroy_bio(bp);
871		if (pbp->bio_children == pbp->bio_inbed) {
872			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
873			pbp->bio_completed = pbp->bio_length;
874			g_io_deliver(pbp, pbp->bio_error);
875		}
876		return;
877	} else if (bp->bio_error != 0) {
878		if (pbp->bio_error == 0)
879			pbp->bio_error = bp->bio_error;
880		G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
881		    bp->bio_error);
882		if (disk != NULL) {
883			sc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
884			g_mirror_event_send(disk,
885			    G_MIRROR_DISK_STATE_DISCONNECTED,
886			    G_MIRROR_EVENT_DONTWAIT);
887		}
888		switch (pbp->bio_cmd) {
889		case BIO_DELETE:
890		case BIO_WRITE:
891			pbp->bio_inbed--;
892			pbp->bio_children--;
893			break;
894		}
895	}
896	g_destroy_bio(bp);
897
898	switch (pbp->bio_cmd) {
899	case BIO_READ:
900		if (pbp->bio_children == pbp->bio_inbed) {
901			pbp->bio_error = 0;
902			mtx_lock(&sc->sc_queue_mtx);
903			bioq_disksort(&sc->sc_queue, pbp);
904			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
905			wakeup(sc);
906			mtx_unlock(&sc->sc_queue_mtx);
907		}
908		break;
909	case BIO_DELETE:
910	case BIO_WRITE:
911		if (pbp->bio_children == 0) {
912			/*
913			 * All requests failed.
914			 */
915		} else if (pbp->bio_inbed < pbp->bio_children) {
916			/* Do nothing. */
917			break;
918		} else if (pbp->bio_children == pbp->bio_inbed) {
919			/* Some requests succeeded. */
920			pbp->bio_error = 0;
921			pbp->bio_completed = pbp->bio_length;
922		}
923		g_io_deliver(pbp, pbp->bio_error);
924		break;
925	default:
926		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
927		break;
928	}
929}
930
931static void
932g_mirror_sync_done(struct bio *bp)
933{
934	struct g_mirror_softc *sc;
935
936	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
937	sc = bp->bio_from->geom->softc;
938	bp->bio_cflags |= G_MIRROR_BIO_FLAG_SYNC;
939	mtx_lock(&sc->sc_queue_mtx);
940	bioq_disksort(&sc->sc_queue, bp);
941	wakeup(sc);
942	mtx_unlock(&sc->sc_queue_mtx);
943}
944
945static void
946g_mirror_start(struct bio *bp)
947{
948	struct g_mirror_softc *sc;
949
950	sc = bp->bio_to->geom->softc;
951	/*
952	 * If sc == NULL or there are no valid disks, provider's error
953	 * should be set and g_mirror_start() should not be called at all.
954	 */
955	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
956	    ("Provider's error should be set (error=%d)(mirror=%s).",
957	    bp->bio_to->error, bp->bio_to->name));
958	G_MIRROR_LOGREQ(3, bp, "Request received.");
959
960	switch (bp->bio_cmd) {
961	case BIO_READ:
962	case BIO_WRITE:
963	case BIO_DELETE:
964		break;
965	case BIO_GETATTR:
966	default:
967		g_io_deliver(bp, EOPNOTSUPP);
968		return;
969	}
970	mtx_lock(&sc->sc_queue_mtx);
971	bioq_disksort(&sc->sc_queue, bp);
972	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
973	wakeup(sc);
974	mtx_unlock(&sc->sc_queue_mtx);
975}
976
977/*
978 * Send one synchronization request.
979 */
980static void
981g_mirror_sync_one(struct g_mirror_disk *disk)
982{
983	struct g_mirror_softc *sc;
984	struct bio *bp;
985
986	sc = disk->d_softc;
987	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
988	    ("Disk %s is not marked for synchronization.",
989	    g_mirror_get_diskname(disk)));
990
991	bp = g_new_bio();
992	if (bp == NULL)
993		return;
994	bp->bio_parent = NULL;
995	bp->bio_cmd = BIO_READ;
996	bp->bio_offset = disk->d_sync.ds_offset;
997	bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
998	bp->bio_cflags = 0;
999	bp->bio_done = g_mirror_sync_done;
1000	bp->bio_data = disk->d_sync.ds_data;
1001	if (bp->bio_data == NULL) {
1002		g_destroy_bio(bp);
1003		return;
1004	}
1005	disk->d_sync.ds_offset += bp->bio_length;
1006	bp->bio_to = sc->sc_provider;
1007	G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1008	disk->d_sync.ds_consumer->index++;
1009	g_io_request(bp, disk->d_sync.ds_consumer);
1010}
1011
1012static void
1013g_mirror_sync_request(struct bio *bp)
1014{
1015	struct g_mirror_softc *sc;
1016	struct g_mirror_disk *disk;
1017
1018	bp->bio_from->index--;
1019	sc = bp->bio_from->geom->softc;
1020	disk = bp->bio_from->private;
1021	if (disk == NULL) {
1022		g_topology_lock();
1023		g_mirror_kill_consumer(sc, bp->bio_from);
1024		g_topology_unlock();
1025		g_destroy_bio(bp);
1026		return;
1027	}
1028
1029	/*
1030	 * Synchronization request.
1031	 */
1032	switch (bp->bio_cmd) {
1033	case BIO_READ:
1034	    {
1035		struct g_consumer *cp;
1036
1037		if (bp->bio_error != 0) {
1038			G_MIRROR_LOGREQ(0, bp,
1039			    "Synchronization request failed (error=%d).",
1040			    bp->bio_error);
1041			g_destroy_bio(bp);
1042			return;
1043		}
1044		G_MIRROR_LOGREQ(3, bp,
1045		    "Synchronization request half-finished.");
1046		bp->bio_cmd = BIO_WRITE;
1047		bp->bio_cflags = 0;
1048		cp = disk->d_consumer;
1049		KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1050		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1051		    cp->acr, cp->acw, cp->ace));
1052		cp->index++;
1053		g_io_request(bp, cp);
1054		return;
1055	    }
1056	case BIO_WRITE:
1057	    {
1058		struct g_mirror_disk_sync *sync;
1059
1060		if (bp->bio_error != 0) {
1061			G_MIRROR_LOGREQ(0, bp,
1062			    "Synchronization request failed (error=%d).",
1063			    bp->bio_error);
1064			g_destroy_bio(bp);
1065			sc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
1066			g_mirror_event_send(disk,
1067			    G_MIRROR_DISK_STATE_DISCONNECTED,
1068			    G_MIRROR_EVENT_DONTWAIT);
1069			return;
1070		}
1071		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1072		sync = &disk->d_sync;
1073		sync->ds_offset_done = bp->bio_offset + bp->bio_length;
1074		g_destroy_bio(bp);
1075		if (sync->ds_resync != -1)
1076			break;
1077		if (sync->ds_offset_done == sc->sc_provider->mediasize) {
1078			/*
1079			 * Disk up-to-date, activate it.
1080			 */
1081			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1082			    G_MIRROR_EVENT_DONTWAIT);
1083			return;
1084		} else if (sync->ds_offset_done % (MAXPHYS * 100) == 0) {
1085			/*
1086			 * Update offset_done on every 100 blocks.
1087			 * XXX: This should be configurable.
1088			 */
1089			g_topology_lock();
1090			g_mirror_update_metadata(disk);
1091			g_topology_unlock();
1092		}
1093		return;
1094	    }
1095	default:
1096		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1097		    bp->bio_cmd, sc->sc_name));
1098		break;
1099	}
1100}
1101
1102static void
1103g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1104{
1105	struct g_mirror_disk *disk;
1106	struct g_consumer *cp;
1107	struct bio *cbp;
1108
1109	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1110		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1111			break;
1112	}
1113	if (disk == NULL) {
1114		if (bp->bio_error == 0)
1115			bp->bio_error = ENXIO;
1116		g_io_deliver(bp, bp->bio_error);
1117		return;
1118	}
1119	cbp = g_clone_bio(bp);
1120	if (cbp == NULL) {
1121		if (bp->bio_error == 0)
1122			bp->bio_error = ENOMEM;
1123		g_io_deliver(bp, bp->bio_error);
1124		return;
1125	}
1126	/*
1127	 * Fill in the component buf structure.
1128	 */
1129	cp = disk->d_consumer;
1130	cbp->bio_done = g_mirror_done;
1131	cbp->bio_to = cp->provider;
1132	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1133	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1134	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1135	    cp->acw, cp->ace));
1136	cp->index++;
1137	g_io_request(cbp, cp);
1138}
1139
1140static void
1141g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1142{
1143	struct g_mirror_disk *disk;
1144	struct g_consumer *cp;
1145	struct bio *cbp;
1146
1147	disk = g_mirror_get_disk(sc);
1148	if (disk == NULL) {
1149		if (bp->bio_error == 0)
1150			bp->bio_error = ENXIO;
1151		g_io_deliver(bp, bp->bio_error);
1152		return;
1153	}
1154	cbp = g_clone_bio(bp);
1155	if (cbp == NULL) {
1156		if (bp->bio_error == 0)
1157			bp->bio_error = ENOMEM;
1158		g_io_deliver(bp, bp->bio_error);
1159		return;
1160	}
1161	/*
1162	 * Fill in the component buf structure.
1163	 */
1164	cp = disk->d_consumer;
1165	cbp->bio_done = g_mirror_done;
1166	cbp->bio_to = cp->provider;
1167	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1168	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1169	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1170	    cp->acw, cp->ace));
1171	cp->index++;
1172	g_io_request(cbp, cp);
1173}
1174
1175static void
1176g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1177{
1178	struct g_mirror_disk *disk, *dp;
1179	struct g_consumer *cp;
1180	struct bio *cbp;
1181	struct bintime curtime;
1182
1183	binuptime(&curtime);
1184	/*
1185	 * Find a disk which the smallest load.
1186	 */
1187	disk = NULL;
1188	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1189		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1190			continue;
1191		/* If disk wasn't used for more than 2 sec, use it. */
1192		if (curtime.sec - dp->d_last_used.sec >= 2) {
1193			disk = dp;
1194			break;
1195		}
1196		if (disk == NULL ||
1197		    bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) {
1198			disk = dp;
1199		}
1200	}
1201	cbp = g_clone_bio(bp);
1202	if (cbp == NULL) {
1203		if (bp->bio_error == 0)
1204			bp->bio_error = ENOMEM;
1205		g_io_deliver(bp, bp->bio_error);
1206		return;
1207	}
1208	/*
1209	 * Fill in the component buf structure.
1210	 */
1211	cp = disk->d_consumer;
1212	cbp->bio_done = g_mirror_done;
1213	cbp->bio_to = cp->provider;
1214	binuptime(&disk->d_last_used);
1215	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1216	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1217	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1218	    cp->acw, cp->ace));
1219	cp->index++;
1220	g_io_request(cbp, cp);
1221}
1222
1223static void
1224g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1225{
1226	struct bio_queue_head queue;
1227	struct g_mirror_disk *disk;
1228	struct g_consumer *cp;
1229	struct bio *cbp;
1230	off_t left, mod, offset, slice;
1231	u_char *data;
1232	u_int ndisks;
1233
1234	if (bp->bio_length <= sc->sc_slice) {
1235		g_mirror_request_round_robin(sc, bp);
1236		return;
1237	}
1238	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1239	slice = bp->bio_length / ndisks;
1240	mod = slice % sc->sc_provider->sectorsize;
1241	if (mod != 0)
1242		slice += sc->sc_provider->sectorsize - mod;
1243	/*
1244	 * Allocate all bios before sending any request, so we can
1245	 * return ENOMEM in nice and clean way.
1246	 */
1247	left = bp->bio_length;
1248	offset = bp->bio_offset;
1249	data = bp->bio_data;
1250	bioq_init(&queue);
1251	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1252		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1253			continue;
1254		cbp = g_clone_bio(bp);
1255		if (cbp == NULL) {
1256			for (cbp = bioq_first(&queue); cbp != NULL;
1257			    cbp = bioq_first(&queue)) {
1258				bioq_remove(&queue, cbp);
1259				g_destroy_bio(cbp);
1260			}
1261			if (bp->bio_error == 0)
1262				bp->bio_error = ENOMEM;
1263			g_io_deliver(bp, bp->bio_error);
1264			return;
1265		}
1266		bioq_insert_tail(&queue, cbp);
1267		cbp->bio_done = g_mirror_done;
1268		cbp->bio_caller1 = disk;
1269		cbp->bio_to = disk->d_consumer->provider;
1270		cbp->bio_offset = offset;
1271		cbp->bio_data = data;
1272		cbp->bio_length = MIN(left, slice);
1273		left -= cbp->bio_length;
1274		if (left == 0)
1275			break;
1276		offset += cbp->bio_length;
1277		data += cbp->bio_length;
1278	}
1279	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1280		bioq_remove(&queue, cbp);
1281		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1282		disk = cbp->bio_caller1;
1283		cbp->bio_caller1 = NULL;
1284		cp = disk->d_consumer;
1285		KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1286		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1287		    cp->acr, cp->acw, cp->ace));
1288		disk->d_consumer->index++;
1289		g_io_request(cbp, disk->d_consumer);
1290	}
1291}
1292
1293static void
1294g_mirror_register_request(struct bio *bp)
1295{
1296	struct g_mirror_softc *sc;
1297
1298	sc = bp->bio_to->geom->softc;
1299	switch (bp->bio_cmd) {
1300	case BIO_READ:
1301		switch (sc->sc_balance) {
1302		case G_MIRROR_BALANCE_LOAD:
1303			g_mirror_request_load(sc, bp);
1304			break;
1305		case G_MIRROR_BALANCE_PREFER:
1306			g_mirror_request_prefer(sc, bp);
1307			break;
1308		case G_MIRROR_BALANCE_ROUND_ROBIN:
1309			g_mirror_request_round_robin(sc, bp);
1310			break;
1311		case G_MIRROR_BALANCE_SPLIT:
1312			g_mirror_request_split(sc, bp);
1313			break;
1314		}
1315		return;
1316	case BIO_WRITE:
1317	case BIO_DELETE:
1318	    {
1319		struct g_mirror_disk *disk;
1320		struct g_mirror_disk_sync *sync;
1321		struct bio_queue_head queue;
1322		struct g_consumer *cp;
1323		struct bio *cbp;
1324
1325		if (sc->sc_idle)
1326			g_mirror_unidle(sc);
1327		/*
1328		 * Allocate all bios before sending any request, so we can
1329		 * return ENOMEM in nice and clean way.
1330		 */
1331		bioq_init(&queue);
1332		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1333			sync = &disk->d_sync;
1334			switch (disk->d_state) {
1335			case G_MIRROR_DISK_STATE_ACTIVE:
1336				break;
1337			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1338				if (bp->bio_offset >= sync->ds_offset)
1339					continue;
1340				else if (bp->bio_offset + bp->bio_length >
1341				    sync->ds_offset_done &&
1342				    (bp->bio_offset < sync->ds_resync ||
1343				     sync->ds_resync == -1)) {
1344					sync->ds_resync = bp->bio_offset -
1345					    (bp->bio_offset % MAXPHYS);
1346				}
1347				break;
1348			default:
1349				continue;
1350			}
1351			cbp = g_clone_bio(bp);
1352			if (cbp == NULL) {
1353				for (cbp = bioq_first(&queue); cbp != NULL;
1354				    cbp = bioq_first(&queue)) {
1355					bioq_remove(&queue, cbp);
1356					g_destroy_bio(cbp);
1357				}
1358				if (bp->bio_error == 0)
1359					bp->bio_error = ENOMEM;
1360				g_io_deliver(bp, bp->bio_error);
1361				return;
1362			}
1363			bioq_insert_tail(&queue, cbp);
1364			cbp->bio_done = g_mirror_done;
1365			cp = disk->d_consumer;
1366			cbp->bio_caller1 = cp;
1367			cbp->bio_to = cp->provider;
1368			KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1369			    ("Consumer %s not opened (r%dw%de%d).",
1370			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1371		}
1372		for (cbp = bioq_first(&queue); cbp != NULL;
1373		    cbp = bioq_first(&queue)) {
1374			bioq_remove(&queue, cbp);
1375			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1376			cp = cbp->bio_caller1;
1377			cbp->bio_caller1 = NULL;
1378			cp->index++;
1379			g_io_request(cbp, cp);
1380		}
1381		/*
1382		 * Bump syncid on first write.
1383		 */
1384		if (sc->sc_bump_syncid == G_MIRROR_BUMP_ON_FIRST_WRITE) {
1385			sc->sc_bump_syncid = 0;
1386			g_topology_lock();
1387			g_mirror_bump_syncid(sc);
1388			g_topology_unlock();
1389		}
1390		return;
1391	    }
1392	default:
1393		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1394		    bp->bio_cmd, sc->sc_name));
1395		break;
1396	}
1397}
1398
1399static int
1400g_mirror_can_destroy(struct g_mirror_softc *sc)
1401{
1402	struct g_geom *gp;
1403	struct g_consumer *cp;
1404
1405	g_topology_assert();
1406	gp = sc->sc_geom;
1407	LIST_FOREACH(cp, &gp->consumer, consumer) {
1408		if (g_mirror_is_busy(sc, cp))
1409			return (0);
1410	}
1411	gp = sc->sc_sync.ds_geom;
1412	LIST_FOREACH(cp, &gp->consumer, consumer) {
1413		if (g_mirror_is_busy(sc, cp))
1414			return (0);
1415	}
1416	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1417	    sc->sc_name);
1418	return (1);
1419}
1420
1421static int
1422g_mirror_try_destroy(struct g_mirror_softc *sc)
1423{
1424
1425	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1426		g_topology_lock();
1427		if (!g_mirror_can_destroy(sc)) {
1428			g_topology_unlock();
1429			return (0);
1430		}
1431		g_topology_unlock();
1432		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1433		    &sc->sc_worker);
1434		wakeup(&sc->sc_worker);
1435		sc->sc_worker = NULL;
1436	} else {
1437		g_topology_lock();
1438		if (!g_mirror_can_destroy(sc)) {
1439			g_topology_unlock();
1440			return (0);
1441		}
1442		g_mirror_destroy_device(sc);
1443		g_topology_unlock();
1444		free(sc, M_MIRROR);
1445	}
1446	return (1);
1447}
1448
1449/*
1450 * Worker thread.
1451 */
1452static void
1453g_mirror_worker(void *arg)
1454{
1455	struct g_mirror_softc *sc;
1456	struct g_mirror_disk *disk;
1457	struct g_mirror_disk_sync *sync;
1458	struct g_mirror_event *ep;
1459	struct bio *bp;
1460	u_int nreqs;
1461
1462	sc = arg;
1463	curthread->td_base_pri = PRIBIO;
1464
1465	nreqs = 0;
1466	for (;;) {
1467		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1468		/*
1469		 * First take a look at events.
1470		 * This is important to handle events before any I/O requests.
1471		 */
1472		ep = g_mirror_event_get(sc);
1473		if (ep != NULL && g_topology_try_lock()) {
1474			g_mirror_event_remove(sc, ep);
1475			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1476				/* Update only device status. */
1477				G_MIRROR_DEBUG(3,
1478				    "Running event for device %s.",
1479				    sc->sc_name);
1480				ep->e_error = 0;
1481				g_mirror_update_device(sc, 1);
1482			} else {
1483				/* Update disk status. */
1484				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1485				     g_mirror_get_diskname(ep->e_disk));
1486				ep->e_error = g_mirror_update_disk(ep->e_disk,
1487				    ep->e_state);
1488				if (ep->e_error == 0)
1489					g_mirror_update_device(sc, 0);
1490			}
1491			g_topology_unlock();
1492			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1493				KASSERT(ep->e_error == 0,
1494				    ("Error cannot be handled."));
1495				g_mirror_event_free(ep);
1496			} else {
1497				ep->e_flags |= G_MIRROR_EVENT_DONE;
1498				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1499				    ep);
1500				mtx_lock(&sc->sc_events_mtx);
1501				wakeup(ep);
1502				mtx_unlock(&sc->sc_events_mtx);
1503			}
1504			if ((sc->sc_flags &
1505			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1506				if (g_mirror_try_destroy(sc))
1507					kthread_exit(0);
1508			}
1509			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1510			continue;
1511		}
1512		/*
1513		 * Now I/O requests.
1514		 */
1515		/* Get first request from the queue. */
1516		mtx_lock(&sc->sc_queue_mtx);
1517		bp = bioq_first(&sc->sc_queue);
1518		if (bp == NULL) {
1519			if (ep != NULL) {
1520				/*
1521				 * No I/O requests and topology lock was
1522				 * already held? Try again.
1523				 */
1524				mtx_unlock(&sc->sc_queue_mtx);
1525				continue;
1526			}
1527			if ((sc->sc_flags &
1528			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1529				mtx_unlock(&sc->sc_queue_mtx);
1530				if (g_mirror_try_destroy(sc))
1531					kthread_exit(0);
1532				mtx_lock(&sc->sc_queue_mtx);
1533			}
1534		}
1535		if (sc->sc_sync.ds_ndisks > 0 &&
1536		    (bp == NULL || nreqs > g_mirror_reqs_per_sync)) {
1537			mtx_unlock(&sc->sc_queue_mtx);
1538			/*
1539			 * It is time for synchronization...
1540			 */
1541			nreqs = 0;
1542			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1543				if (disk->d_state !=
1544				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
1545					continue;
1546				}
1547				sync = &disk->d_sync;
1548				if (sync->ds_offset >=
1549				    sc->sc_provider->mediasize) {
1550					continue;
1551				}
1552				if (sync->ds_offset > sync->ds_offset_done)
1553					continue;
1554				if (sync->ds_resync != -1) {
1555					sync->ds_offset = sync->ds_resync;
1556					sync->ds_offset_done = sync->ds_resync;
1557					sync->ds_resync = -1;
1558				}
1559				g_mirror_sync_one(disk);
1560			}
1561			G_MIRROR_DEBUG(5, "%s: I'm here 2.", __func__);
1562			goto sleep;
1563		}
1564		if (bp == NULL) {
1565			if (g_mirror_check_idle(sc)) {
1566				u_int idletime;
1567
1568				idletime = g_mirror_idletime;
1569				if (idletime == 0)
1570					idletime = 1;
1571				idletime *= hz;
1572				if (msleep(sc, &sc->sc_queue_mtx, PRIBIO | PDROP,
1573				    "m:w1", idletime) == EWOULDBLOCK) {
1574					G_MIRROR_DEBUG(5, "%s: I'm here 3.",
1575					    __func__);
1576					/*
1577					 * No I/O requests in 'idletime' seconds,
1578					 * so mark components as clean.
1579					 */
1580					g_mirror_idle(sc);
1581				}
1582				G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1583			} else {
1584				MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP,
1585				    "m:w2", 0);
1586				G_MIRROR_DEBUG(5, "%s: I'm here 5.", __func__);
1587			}
1588			continue;
1589		}
1590		nreqs++;
1591		bioq_remove(&sc->sc_queue, bp);
1592		mtx_unlock(&sc->sc_queue_mtx);
1593
1594		if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) {
1595			g_mirror_regular_request(bp);
1596		} else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1597			u_int timeout, sps;
1598
1599			g_mirror_sync_request(bp);
1600sleep:
1601			sps = g_mirror_syncs_per_sec;
1602			if (sps == 0) {
1603				G_MIRROR_DEBUG(5, "%s: I'm here 6.", __func__);
1604				continue;
1605			}
1606			if (ep != NULL) {
1607				/*
1608				 * We have some pending events, don't sleep now.
1609				 */
1610				G_MIRROR_DEBUG(5, "%s: I'm here 7.", __func__);
1611				continue;
1612			}
1613			mtx_lock(&sc->sc_queue_mtx);
1614			if (bioq_first(&sc->sc_queue) != NULL) {
1615				mtx_unlock(&sc->sc_queue_mtx);
1616				G_MIRROR_DEBUG(5, "%s: I'm here 8.", __func__);
1617				continue;
1618			}
1619			timeout = hz / sps;
1620			if (timeout == 0)
1621				timeout = 1;
1622			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w3",
1623			    timeout);
1624		} else {
1625			g_mirror_register_request(bp);
1626		}
1627		G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
1628	}
1629}
1630
1631/*
1632 * Open disk's consumer if needed.
1633 */
1634static void
1635g_mirror_update_access(struct g_mirror_disk *disk)
1636{
1637	struct g_provider *pp;
1638
1639	g_topology_assert();
1640
1641	pp = disk->d_softc->sc_provider;
1642	if (pp == NULL)
1643		return;
1644	if (pp->acw > 0) {
1645		if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1646			G_MIRROR_DEBUG(1,
1647			    "Disk %s (device %s) marked as dirty.",
1648			    g_mirror_get_diskname(disk),
1649			    disk->d_softc->sc_name);
1650			disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1651		}
1652	} else if (pp->acw == 0) {
1653		if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1654			G_MIRROR_DEBUG(1,
1655			    "Disk %s (device %s) marked as clean.",
1656			    g_mirror_get_diskname(disk),
1657			    disk->d_softc->sc_name);
1658			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1659		}
1660	}
1661}
1662
1663static void
1664g_mirror_sync_start(struct g_mirror_disk *disk)
1665{
1666	struct g_mirror_softc *sc;
1667	int error;
1668
1669	g_topology_assert();
1670
1671	sc = disk->d_softc;
1672	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1673	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
1674	    sc->sc_state));
1675
1676	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
1677	    g_mirror_get_diskname(disk));
1678	disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1679	KASSERT(disk->d_sync.ds_consumer == NULL,
1680	    ("Sync consumer already exists (device=%s, disk=%s).",
1681	    sc->sc_name, g_mirror_get_diskname(disk)));
1682	disk->d_sync.ds_consumer = g_new_consumer(sc->sc_sync.ds_geom);
1683	disk->d_sync.ds_consumer->private = disk;
1684	disk->d_sync.ds_consumer->index = 0;
1685	error = g_attach(disk->d_sync.ds_consumer, disk->d_softc->sc_provider);
1686	KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
1687	    disk->d_softc->sc_name, error));
1688	error = g_access(disk->d_sync.ds_consumer, 1, 0, 0);
1689	KASSERT(error == 0, ("Cannot open %s (error=%d).",
1690	    disk->d_softc->sc_name, error));
1691	disk->d_sync.ds_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
1692	sc->sc_sync.ds_ndisks++;
1693}
1694
1695/*
1696 * Stop synchronization process.
1697 * type: 0 - synchronization finished
1698 *       1 - synchronization stopped
1699 */
1700static void
1701g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
1702{
1703
1704	g_topology_assert();
1705	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1706	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1707	    g_mirror_disk_state2str(disk->d_state)));
1708	if (disk->d_sync.ds_consumer == NULL)
1709		return;
1710
1711	if (type == 0) {
1712		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
1713		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1714	} else /* if (type == 1) */ {
1715		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
1716		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1717	}
1718	g_mirror_kill_consumer(disk->d_softc, disk->d_sync.ds_consumer);
1719	free(disk->d_sync.ds_data, M_MIRROR);
1720	disk->d_sync.ds_consumer = NULL;
1721	disk->d_softc->sc_sync.ds_ndisks--;
1722	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1723}
1724
1725static void
1726g_mirror_launch_provider(struct g_mirror_softc *sc)
1727{
1728	struct g_mirror_disk *disk;
1729	struct g_provider *pp;
1730
1731	g_topology_assert();
1732
1733	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
1734	pp->mediasize = sc->sc_mediasize;
1735	pp->sectorsize = sc->sc_sectorsize;
1736	sc->sc_provider = pp;
1737	g_error_provider(pp, 0);
1738	G_MIRROR_DEBUG(0, "Device %s: provider %s launched.", sc->sc_name,
1739	    pp->name);
1740	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1741		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1742			g_mirror_sync_start(disk);
1743	}
1744}
1745
1746static void
1747g_mirror_destroy_provider(struct g_mirror_softc *sc)
1748{
1749	struct g_mirror_disk *disk;
1750	struct bio *bp;
1751
1752	g_topology_assert();
1753	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
1754	    sc->sc_name));
1755
1756	g_error_provider(sc->sc_provider, ENXIO);
1757	mtx_lock(&sc->sc_queue_mtx);
1758	while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
1759		bioq_remove(&sc->sc_queue, bp);
1760		g_io_deliver(bp, ENXIO);
1761	}
1762	mtx_unlock(&sc->sc_queue_mtx);
1763	G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
1764	    sc->sc_provider->name);
1765	sc->sc_provider->flags |= G_PF_WITHER;
1766	g_orphan_provider(sc->sc_provider, ENXIO);
1767	sc->sc_provider = NULL;
1768	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1769		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1770			g_mirror_sync_stop(disk, 1);
1771	}
1772}
1773
1774static void
1775g_mirror_go(void *arg)
1776{
1777	struct g_mirror_softc *sc;
1778
1779	sc = arg;
1780	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
1781	g_mirror_event_send(sc, 0,
1782	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
1783}
1784
1785static u_int
1786g_mirror_determine_state(struct g_mirror_disk *disk)
1787{
1788	struct g_mirror_softc *sc;
1789	u_int state;
1790
1791	sc = disk->d_softc;
1792	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
1793		if ((disk->d_flags &
1794		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1795			/* Disk does not need synchronization. */
1796			state = G_MIRROR_DISK_STATE_ACTIVE;
1797		} else {
1798			if ((sc->sc_flags &
1799			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0  ||
1800			    (disk->d_flags &
1801			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1802				/*
1803				 * We can start synchronization from
1804				 * the stored offset.
1805				 */
1806				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1807			} else {
1808				state = G_MIRROR_DISK_STATE_STALE;
1809			}
1810		}
1811	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
1812		/*
1813		 * Reset all synchronization data for this disk,
1814		 * because if it even was synchronized, it was
1815		 * synchronized to disks with different syncid.
1816		 */
1817		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
1818		disk->d_sync.ds_offset = 0;
1819		disk->d_sync.ds_offset_done = 0;
1820		disk->d_sync.ds_syncid = sc->sc_syncid;
1821		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
1822		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1823			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1824		} else {
1825			state = G_MIRROR_DISK_STATE_STALE;
1826		}
1827	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
1828		/*
1829		 * Not good, NOT GOOD!
1830		 * It means that mirror was started on stale disks
1831		 * and more fresh disk just arrive.
1832		 * If there were writes, mirror is fucked up, sorry.
1833		 * I think the best choice here is don't touch
1834		 * this disk and inform the user laudly.
1835		 */
1836		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
1837		    "disk (%s) arrives!! It will not be connected to the "
1838		    "running device.", sc->sc_name,
1839		    g_mirror_get_diskname(disk));
1840		g_mirror_destroy_disk(disk);
1841		state = G_MIRROR_DISK_STATE_NONE;
1842		/* Return immediately, because disk was destroyed. */
1843		return (state);
1844	}
1845	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
1846	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
1847	return (state);
1848}
1849
1850/*
1851 * Update device state.
1852 */
1853static void
1854g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force)
1855{
1856	struct g_mirror_disk *disk;
1857	u_int state;
1858
1859	g_topology_assert();
1860
1861	switch (sc->sc_state) {
1862	case G_MIRROR_DEVICE_STATE_STARTING:
1863	    {
1864		struct g_mirror_disk *pdisk;
1865		u_int dirty, ndisks, syncid;
1866
1867		KASSERT(sc->sc_provider == NULL,
1868		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
1869		/*
1870		 * Are we ready? We are, if all disks are connected or
1871		 * if we have any disks and 'force' is true.
1872		 */
1873		if ((force && g_mirror_ndisks(sc, -1) > 0) ||
1874		    sc->sc_ndisks == g_mirror_ndisks(sc, -1)) {
1875			;
1876		} else if (g_mirror_ndisks(sc, -1) == 0) {
1877			/*
1878			 * Disks went down in starting phase, so destroy
1879			 * device.
1880			 */
1881			callout_drain(&sc->sc_callout);
1882			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1883			return;
1884		} else {
1885			return;
1886		}
1887
1888		/*
1889		 * Activate all disks with the biggest syncid.
1890		 */
1891		if (force) {
1892			/*
1893			 * If 'force' is true, we have been called due to
1894			 * timeout, so don't bother canceling timeout.
1895			 */
1896			ndisks = 0;
1897			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1898				if ((disk->d_flags &
1899				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1900					ndisks++;
1901				}
1902			}
1903			if (ndisks == 0) {
1904				/* No valid disks found, destroy device. */
1905				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1906				return;
1907			}
1908		} else {
1909			/* Cancel timeout. */
1910			callout_drain(&sc->sc_callout);
1911		}
1912
1913		/*
1914		 * Find disk with the biggest syncid.
1915		 */
1916		syncid = 0;
1917		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1918			if (disk->d_sync.ds_syncid > syncid)
1919				syncid = disk->d_sync.ds_syncid;
1920		}
1921
1922		/*
1923		 * Here we need to look for dirty disks and if all disks
1924		 * with the biggest syncid are dirty, we have to choose
1925		 * one with the biggest priority and rebuild the rest.
1926		 */
1927		/*
1928		 * Find the number of dirty disks with the biggest syncid.
1929		 * Find the number of disks with the biggest syncid.
1930		 * While here, find a disk with the biggest priority.
1931		 */
1932		dirty = ndisks = 0;
1933		pdisk = NULL;
1934		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1935			if (disk->d_sync.ds_syncid != syncid)
1936				continue;
1937			if ((disk->d_flags &
1938			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1939				continue;
1940			}
1941			ndisks++;
1942			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1943				dirty++;
1944				if (pdisk == NULL ||
1945				    pdisk->d_priority < disk->d_priority) {
1946					pdisk = disk;
1947				}
1948			}
1949		}
1950		if (dirty == 0) {
1951			/* No dirty disks at all, great. */
1952		} else if (dirty == ndisks) {
1953			/*
1954			 * Force synchronization for all dirty disks except one
1955			 * with the biggest priority.
1956			 */
1957			KASSERT(pdisk != NULL, ("pdisk == NULL"));
1958			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
1959			    "master disk for synchronization.",
1960			    g_mirror_get_diskname(pdisk), sc->sc_name);
1961			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1962				if (disk->d_sync.ds_syncid != syncid)
1963					continue;
1964				if ((disk->d_flags &
1965				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1966					continue;
1967				}
1968				KASSERT((disk->d_flags &
1969				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
1970				    ("Disk %s isn't marked as dirty.",
1971				    g_mirror_get_diskname(disk)));
1972				/* Skip the disk with the biggest priority. */
1973				if (disk == pdisk)
1974					continue;
1975				disk->d_sync.ds_syncid = 0;
1976			}
1977		} else if (dirty < ndisks) {
1978			/*
1979			 * Force synchronization for all dirty disks.
1980			 * We have some non-dirty disks.
1981			 */
1982			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1983				if (disk->d_sync.ds_syncid != syncid)
1984					continue;
1985				if ((disk->d_flags &
1986				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1987					continue;
1988				}
1989				if ((disk->d_flags &
1990				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1991					continue;
1992				}
1993				disk->d_sync.ds_syncid = 0;
1994			}
1995		}
1996
1997		/* Reset hint. */
1998		sc->sc_hint = NULL;
1999		sc->sc_syncid = syncid;
2000		if (force) {
2001			/* Remember to bump syncid on first write. */
2002			sc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
2003		}
2004		state = G_MIRROR_DEVICE_STATE_RUNNING;
2005		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2006		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2007		    g_mirror_device_state2str(state));
2008		sc->sc_state = state;
2009		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2010			state = g_mirror_determine_state(disk);
2011			g_mirror_event_send(disk, state,
2012			    G_MIRROR_EVENT_DONTWAIT);
2013			if (state == G_MIRROR_DISK_STATE_STALE) {
2014				sc->sc_bump_syncid =
2015				    G_MIRROR_BUMP_ON_FIRST_WRITE;
2016			}
2017		}
2018		wakeup(&g_mirror_class);
2019		break;
2020	    }
2021	case G_MIRROR_DEVICE_STATE_RUNNING:
2022		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2023		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2024			/*
2025			 * No active disks or no disks at all,
2026			 * so destroy device.
2027			 */
2028			if (sc->sc_provider != NULL)
2029				g_mirror_destroy_provider(sc);
2030			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2031			break;
2032		} else if (g_mirror_ndisks(sc,
2033		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2034		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2035			/*
2036			 * We have active disks, launch provider if it doesn't
2037			 * exist.
2038			 */
2039			if (sc->sc_provider == NULL)
2040				g_mirror_launch_provider(sc);
2041		}
2042		/*
2043		 * Bump syncid here, if we need to do it immediately.
2044		 */
2045		if (sc->sc_bump_syncid == G_MIRROR_BUMP_IMMEDIATELY) {
2046			sc->sc_bump_syncid = 0;
2047			g_mirror_bump_syncid(sc);
2048		}
2049		break;
2050	default:
2051		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2052		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2053		break;
2054	}
2055}
2056
2057/*
2058 * Update disk state and device state if needed.
2059 */
2060#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
2061	"Disk %s state changed from %s to %s (device %s).",		\
2062	g_mirror_get_diskname(disk),					\
2063	g_mirror_disk_state2str(disk->d_state),				\
2064	g_mirror_disk_state2str(state), sc->sc_name)
2065static int
2066g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2067{
2068	struct g_mirror_softc *sc;
2069
2070	g_topology_assert();
2071
2072	sc = disk->d_softc;
2073again:
2074	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2075	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2076	    g_mirror_disk_state2str(state));
2077	switch (state) {
2078	case G_MIRROR_DISK_STATE_NEW:
2079		/*
2080		 * Possible scenarios:
2081		 * 1. New disk arrive.
2082		 */
2083		/* Previous state should be NONE. */
2084		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2085		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2086		    g_mirror_disk_state2str(disk->d_state)));
2087		DISK_STATE_CHANGED();
2088
2089		disk->d_state = state;
2090		if (LIST_EMPTY(&sc->sc_disks))
2091			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2092		else {
2093			struct g_mirror_disk *dp;
2094
2095			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2096				if (disk->d_priority >= dp->d_priority) {
2097					LIST_INSERT_BEFORE(dp, disk, d_next);
2098					dp = NULL;
2099					break;
2100				}
2101				if (LIST_NEXT(dp, d_next) == NULL)
2102					break;
2103			}
2104			if (dp != NULL)
2105				LIST_INSERT_AFTER(dp, disk, d_next);
2106		}
2107		G_MIRROR_DEBUG(0, "Device %s: provider %s detected.",
2108		    sc->sc_name, g_mirror_get_diskname(disk));
2109		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2110			break;
2111		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2112		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2113		    g_mirror_device_state2str(sc->sc_state),
2114		    g_mirror_get_diskname(disk),
2115		    g_mirror_disk_state2str(disk->d_state)));
2116		state = g_mirror_determine_state(disk);
2117		if (state != G_MIRROR_DISK_STATE_NONE)
2118			goto again;
2119		break;
2120	case G_MIRROR_DISK_STATE_ACTIVE:
2121		/*
2122		 * Possible scenarios:
2123		 * 1. New disk does not need synchronization.
2124		 * 2. Synchronization process finished successfully.
2125		 */
2126		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2127		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2128		    g_mirror_device_state2str(sc->sc_state),
2129		    g_mirror_get_diskname(disk),
2130		    g_mirror_disk_state2str(disk->d_state)));
2131		/* Previous state should be NEW or SYNCHRONIZING. */
2132		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2133		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2134		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2135		    g_mirror_disk_state2str(disk->d_state)));
2136		DISK_STATE_CHANGED();
2137
2138		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2139			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2140		else if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2141			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2142			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2143			g_mirror_sync_stop(disk, 0);
2144		}
2145		disk->d_state = state;
2146		disk->d_sync.ds_offset = 0;
2147		disk->d_sync.ds_offset_done = 0;
2148		g_mirror_update_access(disk);
2149		g_mirror_update_metadata(disk);
2150		G_MIRROR_DEBUG(0, "Device %s: provider %s activated.",
2151		    sc->sc_name, g_mirror_get_diskname(disk));
2152		break;
2153	case G_MIRROR_DISK_STATE_STALE:
2154		/*
2155		 * Possible scenarios:
2156		 * 1. Stale disk was connected.
2157		 */
2158		/* Previous state should be NEW. */
2159		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2160		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2161		    g_mirror_disk_state2str(disk->d_state)));
2162		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2163		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2164		    g_mirror_device_state2str(sc->sc_state),
2165		    g_mirror_get_diskname(disk),
2166		    g_mirror_disk_state2str(disk->d_state)));
2167		/*
2168		 * STALE state is only possible if device is marked
2169		 * NOAUTOSYNC.
2170		 */
2171		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2172		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2173		    g_mirror_device_state2str(sc->sc_state),
2174		    g_mirror_get_diskname(disk),
2175		    g_mirror_disk_state2str(disk->d_state)));
2176		DISK_STATE_CHANGED();
2177
2178		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2179		disk->d_state = state;
2180		g_mirror_update_metadata(disk);
2181		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2182		    sc->sc_name, g_mirror_get_diskname(disk));
2183		break;
2184	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2185		/*
2186		 * Possible scenarios:
2187		 * 1. Disk which needs synchronization was connected.
2188		 */
2189		/* Previous state should be NEW. */
2190		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2191		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2192		    g_mirror_disk_state2str(disk->d_state)));
2193		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2194		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2195		    g_mirror_device_state2str(sc->sc_state),
2196		    g_mirror_get_diskname(disk),
2197		    g_mirror_disk_state2str(disk->d_state)));
2198		DISK_STATE_CHANGED();
2199
2200		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2201			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2202		disk->d_state = state;
2203		if (sc->sc_provider != NULL) {
2204			g_mirror_sync_start(disk);
2205			g_mirror_update_metadata(disk);
2206		}
2207		break;
2208	case G_MIRROR_DISK_STATE_DISCONNECTED:
2209		/*
2210		 * Possible scenarios:
2211		 * 1. Device wasn't running yet, but disk disappear.
2212		 * 2. Disk was active and disapppear.
2213		 * 3. Disk disappear during synchronization process.
2214		 */
2215		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2216			/*
2217			 * Previous state should be ACTIVE, STALE or
2218			 * SYNCHRONIZING.
2219			 */
2220			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2221			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2222			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2223			    ("Wrong disk state (%s, %s).",
2224			    g_mirror_get_diskname(disk),
2225			    g_mirror_disk_state2str(disk->d_state)));
2226		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2227			/* Previous state should be NEW. */
2228			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2229			    ("Wrong disk state (%s, %s).",
2230			    g_mirror_get_diskname(disk),
2231			    g_mirror_disk_state2str(disk->d_state)));
2232			/*
2233			 * Reset bumping syncid if disk disappeared in STARTING
2234			 * state.
2235			 */
2236			if (sc->sc_bump_syncid == G_MIRROR_BUMP_ON_FIRST_WRITE)
2237				sc->sc_bump_syncid = 0;
2238#ifdef	INVARIANTS
2239		} else {
2240			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2241			    sc->sc_name,
2242			    g_mirror_device_state2str(sc->sc_state),
2243			    g_mirror_get_diskname(disk),
2244			    g_mirror_disk_state2str(disk->d_state)));
2245#endif
2246		}
2247		DISK_STATE_CHANGED();
2248		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2249		    sc->sc_name, g_mirror_get_diskname(disk));
2250
2251		g_mirror_destroy_disk(disk);
2252		break;
2253	case G_MIRROR_DISK_STATE_DESTROY:
2254	    {
2255		int error;
2256
2257		error = g_mirror_clear_metadata(disk);
2258		if (error != 0)
2259			return (error);
2260		DISK_STATE_CHANGED();
2261		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2262		    sc->sc_name, g_mirror_get_diskname(disk));
2263
2264		g_mirror_destroy_disk(disk);
2265		sc->sc_ndisks--;
2266		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2267			g_mirror_update_metadata(disk);
2268		}
2269		break;
2270	    }
2271	default:
2272		KASSERT(1 == 0, ("Unknown state (%u).", state));
2273		break;
2274	}
2275	return (0);
2276}
2277#undef	DISK_STATE_CHANGED
2278
2279static int
2280g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2281{
2282	struct g_provider *pp;
2283	u_char *buf;
2284	int error;
2285
2286	g_topology_assert();
2287
2288	error = g_access(cp, 1, 0, 0);
2289	if (error != 0)
2290		return (error);
2291	pp = cp->provider;
2292	g_topology_unlock();
2293	/* Metadata are stored on last sector. */
2294	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2295	    &error);
2296	g_topology_lock();
2297	g_access(cp, -1, 0, 0);
2298	if (error != 0) {
2299		if (buf != NULL)
2300			g_free(buf);
2301		return (error);
2302	}
2303
2304	/* Decode metadata. */
2305	error = mirror_metadata_decode(buf, md);
2306	g_free(buf);
2307	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2308		return (EINVAL);
2309	if (error != 0) {
2310		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2311		    cp->provider->name);
2312		return (error);
2313	}
2314
2315	return (0);
2316}
2317
2318static int
2319g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2320    struct g_mirror_metadata *md)
2321{
2322
2323	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2324		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2325		    pp->name, md->md_did);
2326		return (EEXIST);
2327	}
2328	if (md->md_all != sc->sc_ndisks) {
2329		G_MIRROR_DEBUG(1,
2330		    "Invalid '%s' field on disk %s (device %s), skipping.",
2331		    "md_all", pp->name, sc->sc_name);
2332		return (EINVAL);
2333	}
2334	if (md->md_slice != sc->sc_slice) {
2335		G_MIRROR_DEBUG(1,
2336		    "Invalid '%s' field on disk %s (device %s), skipping.",
2337		    "md_slice", pp->name, sc->sc_name);
2338		return (EINVAL);
2339	}
2340	if (md->md_balance != sc->sc_balance) {
2341		G_MIRROR_DEBUG(1,
2342		    "Invalid '%s' field on disk %s (device %s), skipping.",
2343		    "md_balance", pp->name, sc->sc_name);
2344		return (EINVAL);
2345	}
2346	if (md->md_mediasize != sc->sc_mediasize) {
2347		G_MIRROR_DEBUG(1,
2348		    "Invalid '%s' field on disk %s (device %s), skipping.",
2349		    "md_mediasize", pp->name, sc->sc_name);
2350		return (EINVAL);
2351	}
2352	if (sc->sc_mediasize > pp->mediasize) {
2353		G_MIRROR_DEBUG(1,
2354		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2355		    sc->sc_name);
2356		return (EINVAL);
2357	}
2358	if (md->md_sectorsize != sc->sc_sectorsize) {
2359		G_MIRROR_DEBUG(1,
2360		    "Invalid '%s' field on disk %s (device %s), skipping.",
2361		    "md_sectorsize", pp->name, sc->sc_name);
2362		return (EINVAL);
2363	}
2364	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2365		G_MIRROR_DEBUG(1,
2366		    "Invalid sector size of disk %s (device %s), skipping.",
2367		    pp->name, sc->sc_name);
2368		return (EINVAL);
2369	}
2370	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2371		G_MIRROR_DEBUG(1,
2372		    "Invalid device flags on disk %s (device %s), skipping.",
2373		    pp->name, sc->sc_name);
2374		return (EINVAL);
2375	}
2376	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2377		G_MIRROR_DEBUG(1,
2378		    "Invalid disk flags on disk %s (device %s), skipping.",
2379		    pp->name, sc->sc_name);
2380		return (EINVAL);
2381	}
2382	return (0);
2383}
2384
2385static int
2386g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2387    struct g_mirror_metadata *md)
2388{
2389	struct g_mirror_disk *disk;
2390	int error;
2391
2392	g_topology_assert();
2393	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2394
2395	error = g_mirror_check_metadata(sc, pp, md);
2396	if (error != 0)
2397		return (error);
2398	disk = g_mirror_init_disk(sc, pp, md, &error);
2399	if (disk == NULL)
2400		return (error);
2401	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2402	    G_MIRROR_EVENT_WAIT);
2403	return (error);
2404}
2405
2406static int
2407g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2408{
2409	struct g_mirror_softc *sc;
2410	struct g_mirror_disk *disk;
2411	int dcr, dcw, dce;
2412
2413	g_topology_assert();
2414	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2415	    acw, ace);
2416
2417	dcr = pp->acr + acr;
2418	dcw = pp->acw + acw;
2419	dce = pp->ace + ace;
2420
2421	sc = pp->geom->softc;
2422	if (sc == NULL || LIST_EMPTY(&sc->sc_disks) ||
2423	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
2424		if (acr <= 0 && acw <= 0 && ace <= 0)
2425			return (0);
2426		else
2427			return (ENXIO);
2428	}
2429	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2430		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
2431			continue;
2432		/*
2433		 * Mark disk as dirty on open and unmark on close.
2434		 */
2435		if (pp->acw == 0 && dcw > 0) {
2436			G_MIRROR_DEBUG(1,
2437			    "Disk %s (device %s) marked as dirty.",
2438			    g_mirror_get_diskname(disk), sc->sc_name);
2439			disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2440			g_mirror_update_metadata(disk);
2441		} else if (pp->acw > 0 && dcw == 0) {
2442			G_MIRROR_DEBUG(1,
2443			    "Disk %s (device %s) marked as clean.",
2444			    g_mirror_get_diskname(disk), sc->sc_name);
2445			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2446			g_mirror_update_metadata(disk);
2447		}
2448	}
2449	return (0);
2450}
2451
2452static struct g_geom *
2453g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
2454{
2455	struct g_mirror_softc *sc;
2456	struct g_geom *gp;
2457	int error, timeout;
2458
2459	g_topology_assert();
2460	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2461	    md->md_mid);
2462
2463	/* One disk is minimum. */
2464	if (md->md_all < 1)
2465		return (NULL);
2466	/*
2467	 * Action geom.
2468	 */
2469	gp = g_new_geomf(mp, "%s", md->md_name);
2470	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2471	gp->start = g_mirror_start;
2472	gp->spoiled = g_mirror_spoiled;
2473	gp->orphan = g_mirror_orphan;
2474	gp->access = g_mirror_access;
2475	gp->dumpconf = g_mirror_dumpconf;
2476
2477	sc->sc_id = md->md_mid;
2478	sc->sc_slice = md->md_slice;
2479	sc->sc_balance = md->md_balance;
2480	sc->sc_mediasize = md->md_mediasize;
2481	sc->sc_sectorsize = md->md_sectorsize;
2482	sc->sc_ndisks = md->md_all;
2483	sc->sc_flags = md->md_mflags;
2484	sc->sc_bump_syncid = 0;
2485	sc->sc_idle = 0;
2486	bioq_init(&sc->sc_queue);
2487	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
2488	LIST_INIT(&sc->sc_disks);
2489	TAILQ_INIT(&sc->sc_events);
2490	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
2491	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2492	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
2493	gp->softc = sc;
2494	sc->sc_geom = gp;
2495	sc->sc_provider = NULL;
2496	/*
2497	 * Synchronization geom.
2498	 */
2499	gp = g_new_geomf(mp, "%s.sync", md->md_name);
2500	gp->softc = sc;
2501	gp->orphan = g_mirror_orphan;
2502	sc->sc_sync.ds_geom = gp;
2503	sc->sc_sync.ds_ndisks = 0;
2504	error = kthread_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
2505	    "g_mirror %s", md->md_name);
2506	if (error != 0) {
2507		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
2508		    sc->sc_name);
2509		g_destroy_geom(sc->sc_sync.ds_geom);
2510		mtx_destroy(&sc->sc_events_mtx);
2511		mtx_destroy(&sc->sc_queue_mtx);
2512		g_destroy_geom(sc->sc_geom);
2513		free(sc, M_MIRROR);
2514		return (NULL);
2515	}
2516
2517	G_MIRROR_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
2518
2519	/*
2520	 * Run timeout.
2521	 */
2522	timeout = g_mirror_timeout * hz;
2523	callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
2524	return (sc->sc_geom);
2525}
2526
2527int
2528g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force)
2529{
2530	struct g_provider *pp;
2531
2532	g_topology_assert();
2533
2534	if (sc == NULL)
2535		return (ENXIO);
2536	pp = sc->sc_provider;
2537	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
2538		if (force) {
2539			G_MIRROR_DEBUG(1, "Device %s is still open, so it "
2540			    "can't be definitely removed.", pp->name);
2541		} else {
2542			G_MIRROR_DEBUG(1,
2543			    "Device %s is still open (r%dw%de%d).", pp->name,
2544			    pp->acr, pp->acw, pp->ace);
2545			return (EBUSY);
2546		}
2547	}
2548
2549	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2550	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
2551	g_topology_unlock();
2552	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
2553	mtx_lock(&sc->sc_queue_mtx);
2554	wakeup(sc);
2555	mtx_unlock(&sc->sc_queue_mtx);
2556	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
2557	while (sc->sc_worker != NULL)
2558		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
2559	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
2560	g_topology_lock();
2561	g_mirror_destroy_device(sc);
2562	free(sc, M_MIRROR);
2563	return (0);
2564}
2565
2566static void
2567g_mirror_taste_orphan(struct g_consumer *cp)
2568{
2569
2570	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2571	    cp->provider->name));
2572}
2573
2574static struct g_geom *
2575g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2576{
2577	struct g_mirror_metadata md;
2578	struct g_mirror_softc *sc;
2579	struct g_consumer *cp;
2580	struct g_geom *gp;
2581	int error;
2582
2583	g_topology_assert();
2584	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2585	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
2586
2587	gp = g_new_geomf(mp, "mirror:taste");
2588	/*
2589	 * This orphan function should be never called.
2590	 */
2591	gp->orphan = g_mirror_taste_orphan;
2592	cp = g_new_consumer(gp);
2593	g_attach(cp, pp);
2594	error = g_mirror_read_metadata(cp, &md);
2595	g_detach(cp);
2596	g_destroy_consumer(cp);
2597	g_destroy_geom(gp);
2598	if (error != 0)
2599		return (NULL);
2600	gp = NULL;
2601
2602	if (md.md_version > G_MIRROR_VERSION) {
2603		printf("geom_mirror.ko module is too old to handle %s.\n",
2604		    pp->name);
2605		return (NULL);
2606	}
2607	if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
2608		return (NULL);
2609	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
2610		G_MIRROR_DEBUG(0,
2611		    "Device %s: provider %s marked as inactive, skipping.",
2612		    md.md_name, pp->name);
2613		return (NULL);
2614	}
2615	if (g_mirror_debug >= 2)
2616		mirror_metadata_dump(&md);
2617
2618	/*
2619	 * Let's check if device already exists.
2620	 */
2621	sc = NULL;
2622	LIST_FOREACH(gp, &mp->geom, geom) {
2623		sc = gp->softc;
2624		if (sc == NULL)
2625			continue;
2626		if (sc->sc_sync.ds_geom == gp)
2627			continue;
2628		if (strcmp(md.md_name, sc->sc_name) != 0)
2629			continue;
2630		if (md.md_mid != sc->sc_id) {
2631			G_MIRROR_DEBUG(0, "Device %s already configured.",
2632			    sc->sc_name);
2633			return (NULL);
2634		}
2635		break;
2636	}
2637	if (gp == NULL) {
2638		gp = g_mirror_create(mp, &md);
2639		if (gp == NULL) {
2640			G_MIRROR_DEBUG(0, "Cannot create device %s.",
2641			    md.md_name);
2642			return (NULL);
2643		}
2644		sc = gp->softc;
2645	}
2646	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
2647	error = g_mirror_add_disk(sc, pp, &md);
2648	if (error != 0) {
2649		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
2650		    pp->name, gp->name, error);
2651		if (LIST_EMPTY(&sc->sc_disks))
2652			g_mirror_destroy(sc, 1);
2653		return (NULL);
2654	}
2655	return (gp);
2656}
2657
2658static int
2659g_mirror_destroy_geom(struct gctl_req *req __unused,
2660    struct g_class *mp __unused, struct g_geom *gp)
2661{
2662
2663	return (g_mirror_destroy(gp->softc, 0));
2664}
2665
2666static void
2667g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2668    struct g_consumer *cp, struct g_provider *pp)
2669{
2670	struct g_mirror_softc *sc;
2671
2672	g_topology_assert();
2673
2674	sc = gp->softc;
2675	if (sc == NULL)
2676		return;
2677	/* Skip synchronization geom. */
2678	if (gp == sc->sc_sync.ds_geom)
2679		return;
2680	if (pp != NULL) {
2681		/* Nothing here. */
2682	} else if (cp != NULL) {
2683		struct g_mirror_disk *disk;
2684
2685		disk = cp->private;
2686		if (disk == NULL)
2687			return;
2688		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
2689		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2690			sbuf_printf(sb, "%s<Synchronized>", indent);
2691			if (disk->d_sync.ds_offset_done == 0)
2692				sbuf_printf(sb, "0%%");
2693			else {
2694				sbuf_printf(sb, "%u%%",
2695				    (u_int)((disk->d_sync.ds_offset_done * 100) /
2696				    sc->sc_provider->mediasize));
2697			}
2698			sbuf_printf(sb, "</Synchronized>\n");
2699		}
2700		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
2701		    disk->d_sync.ds_syncid);
2702		sbuf_printf(sb, "%s<Flags>", indent);
2703		if (disk->d_flags == 0)
2704			sbuf_printf(sb, "NONE");
2705		else {
2706			int first = 1;
2707
2708#define	ADD_FLAG(flag, name)	do {					\
2709	if ((disk->d_flags & (flag)) != 0) {				\
2710		if (!first)						\
2711			sbuf_printf(sb, ", ");				\
2712		else							\
2713			first = 0;					\
2714		sbuf_printf(sb, name);					\
2715	}								\
2716} while (0)
2717			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
2718			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
2719			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
2720			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
2721			    "SYNCHRONIZING");
2722			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
2723#undef	ADD_FLAG
2724		}
2725		sbuf_printf(sb, "</Flags>\n");
2726		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
2727		    disk->d_priority);
2728		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2729		    g_mirror_disk_state2str(disk->d_state));
2730	} else {
2731		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2732		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
2733		sbuf_printf(sb, "%s<Flags>", indent);
2734		if (sc->sc_flags == 0)
2735			sbuf_printf(sb, "NONE");
2736		else {
2737			int first = 1;
2738
2739#define	ADD_FLAG(flag, name)	do {					\
2740	if ((sc->sc_flags & (flag)) != 0) {				\
2741		if (!first)						\
2742			sbuf_printf(sb, ", ");				\
2743		else							\
2744			first = 0;					\
2745		sbuf_printf(sb, name);					\
2746	}								\
2747} while (0)
2748			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
2749#undef	ADD_FLAG
2750		}
2751		sbuf_printf(sb, "</Flags>\n");
2752		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
2753		    (u_int)sc->sc_slice);
2754		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
2755		    balance_name(sc->sc_balance));
2756		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2757		    sc->sc_ndisks);
2758		sbuf_printf(sb, "%s<State>", indent);
2759		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2760			sbuf_printf(sb, "%s", "STARTING");
2761		else if (sc->sc_ndisks ==
2762		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
2763			sbuf_printf(sb, "%s", "COMPLETE");
2764		else
2765			sbuf_printf(sb, "%s", "DEGRADED");
2766		sbuf_printf(sb, "</State>\n");
2767	}
2768}
2769
2770static void
2771g_mirror_shutdown(void *arg, int howto)
2772{
2773	struct g_class *mp;
2774	struct g_geom *gp, *gp2;
2775
2776	mp = arg;
2777	DROP_GIANT();
2778	g_topology_lock();
2779	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2780		if (gp->softc == NULL)
2781			continue;
2782		g_mirror_destroy(gp->softc, 1);
2783	}
2784	g_topology_unlock();
2785	PICKUP_GIANT();
2786#if 0
2787	tsleep(&gp, PRIBIO, "m:shutdown", hz * 20);
2788#endif
2789}
2790
2791static void
2792g_mirror_init(struct g_class *mp)
2793{
2794
2795	g_mirror_ehtag = EVENTHANDLER_REGISTER(shutdown_post_sync,
2796	    g_mirror_shutdown, mp, SHUTDOWN_PRI_FIRST);
2797	if (g_mirror_ehtag == NULL)
2798		G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
2799}
2800
2801static void
2802g_mirror_fini(struct g_class *mp)
2803{
2804
2805	if (g_mirror_ehtag == NULL)
2806		return;
2807	EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_ehtag);
2808}
2809
2810static int
2811g_mirror_can_go(void)
2812{
2813	struct g_mirror_softc *sc;
2814	struct g_geom *gp;
2815	struct g_provider *pp;
2816	int can_go;
2817
2818	DROP_GIANT();
2819	can_go = 1;
2820	g_topology_lock();
2821	LIST_FOREACH(gp, &g_mirror_class.geom, geom) {
2822		sc = gp->softc;
2823		if (sc == NULL) {
2824			can_go = 0;
2825			break;
2826		}
2827		pp = sc->sc_provider;
2828		if (pp == NULL || pp->error != 0) {
2829			can_go = 0;
2830			break;
2831		}
2832	}
2833	g_topology_unlock();
2834	PICKUP_GIANT();
2835	return (can_go);
2836}
2837
2838static void
2839g_mirror_rootwait(void)
2840{
2841
2842	/*
2843	 * HACK: Wait for GEOM, because g_mirror_rootwait() can be called,
2844	 * HACK: before we get providers for tasting.
2845	 */
2846	tsleep(&g_mirror_class, PRIBIO, "mroot", hz * 3);
2847	/*
2848	 * Wait for mirrors in degraded state.
2849	 */
2850	for (;;) {
2851		if (g_mirror_can_go())
2852			break;
2853		tsleep(&g_mirror_class, PRIBIO, "mroot", hz);
2854	}
2855}
2856
2857SYSINIT(g_mirror_root, SI_SUB_RAID, SI_ORDER_FIRST, g_mirror_rootwait, NULL)
2858
2859DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
2860