g_mirror.c revision 223921
1178476Sjb/*-
2178476Sjb * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3178476Sjb * All rights reserved.
4178476Sjb *
5178476Sjb * Redistribution and use in source and binary forms, with or without
6178476Sjb * modification, are permitted provided that the following conditions
7178476Sjb * are met:
8178476Sjb * 1. Redistributions of source code must retain the above copyright
9178476Sjb *    notice, this list of conditions and the following disclaimer.
10178476Sjb * 2. Redistributions in binary form must reproduce the above copyright
11178476Sjb *    notice, this list of conditions and the following disclaimer in the
12178476Sjb *    documentation and/or other materials provided with the distribution.
13178476Sjb *
14178476Sjb * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15178476Sjb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16178476Sjb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17178476Sjb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18178476Sjb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19178476Sjb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20178476Sjb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21178476Sjb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22178476Sjb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23178476Sjb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24178476Sjb * SUCH DAMAGE.
25178476Sjb */
26178476Sjb
27178476Sjb#include <sys/cdefs.h>
28178476Sjb__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 223921 2011-07-11 05:22:31Z ae $");
29178476Sjb
30178476Sjb#include <sys/param.h>
31178476Sjb#include <sys/systm.h>
32178476Sjb#include <sys/kernel.h>
33178476Sjb#include <sys/module.h>
34178476Sjb#include <sys/limits.h>
35178476Sjb#include <sys/lock.h>
36178476Sjb#include <sys/mutex.h>
37178476Sjb#include <sys/bio.h>
38178476Sjb#include <sys/sbuf.h>
39178476Sjb#include <sys/sysctl.h>
40178476Sjb#include <sys/malloc.h>
41178476Sjb#include <sys/eventhandler.h>
42178476Sjb#include <vm/uma.h>
43178476Sjb#include <geom/geom.h>
44178476Sjb#include <sys/proc.h>
45178476Sjb#include <sys/kthread.h>
46178476Sjb#include <sys/sched.h>
47178476Sjb#include <geom/mirror/g_mirror.h>
48178476Sjb
49178476SjbFEATURE(geom_mirror, "GEOM mirroring support");
50178476Sjb
51178476Sjbstatic MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data");
52178476Sjb
53178476SjbSYSCTL_DECL(_kern_geom);
54178476SjbSYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff");
55178476Sjbu_int g_mirror_debug = 0;
56178476SjbTUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug);
57178476SjbSYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0,
58    "Debug level");
59static u_int g_mirror_timeout = 4;
60TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout);
61SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout,
62    0, "Time to wait on all mirror components");
63static u_int g_mirror_idletime = 5;
64TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime);
65SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW,
66    &g_mirror_idletime, 0, "Mark components as clean when idling");
67static u_int g_mirror_disconnect_on_failure = 1;
68TUNABLE_INT("kern.geom.mirror.disconnect_on_failure",
69    &g_mirror_disconnect_on_failure);
70SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
71    &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
72static u_int g_mirror_syncreqs = 2;
73TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs);
74SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
75    &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests.");
76
77#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
78	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
79	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
80	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
81} while (0)
82
83static eventhandler_tag g_mirror_pre_sync = NULL;
84
85static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
86    struct g_geom *gp);
87static g_taste_t g_mirror_taste;
88static void g_mirror_init(struct g_class *mp);
89static void g_mirror_fini(struct g_class *mp);
90
91struct g_class g_mirror_class = {
92	.name = G_MIRROR_CLASS_NAME,
93	.version = G_VERSION,
94	.ctlreq = g_mirror_config,
95	.taste = g_mirror_taste,
96	.destroy_geom = g_mirror_destroy_geom,
97	.init = g_mirror_init,
98	.fini = g_mirror_fini
99};
100
101
102static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
103static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
104static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force);
105static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
106    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
107static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
108static void g_mirror_register_request(struct bio *bp);
109static void g_mirror_sync_release(struct g_mirror_softc *sc);
110
111
112static const char *
113g_mirror_disk_state2str(int state)
114{
115
116	switch (state) {
117	case G_MIRROR_DISK_STATE_NONE:
118		return ("NONE");
119	case G_MIRROR_DISK_STATE_NEW:
120		return ("NEW");
121	case G_MIRROR_DISK_STATE_ACTIVE:
122		return ("ACTIVE");
123	case G_MIRROR_DISK_STATE_STALE:
124		return ("STALE");
125	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
126		return ("SYNCHRONIZING");
127	case G_MIRROR_DISK_STATE_DISCONNECTED:
128		return ("DISCONNECTED");
129	case G_MIRROR_DISK_STATE_DESTROY:
130		return ("DESTROY");
131	default:
132		return ("INVALID");
133	}
134}
135
136static const char *
137g_mirror_device_state2str(int state)
138{
139
140	switch (state) {
141	case G_MIRROR_DEVICE_STATE_STARTING:
142		return ("STARTING");
143	case G_MIRROR_DEVICE_STATE_RUNNING:
144		return ("RUNNING");
145	default:
146		return ("INVALID");
147	}
148}
149
150static const char *
151g_mirror_get_diskname(struct g_mirror_disk *disk)
152{
153
154	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
155		return ("[unknown]");
156	return (disk->d_name);
157}
158
159/*
160 * --- Events handling functions ---
161 * Events in geom_mirror are used to maintain disks and device status
162 * from one thread to simplify locking.
163 */
164static void
165g_mirror_event_free(struct g_mirror_event *ep)
166{
167
168	free(ep, M_MIRROR);
169}
170
171int
172g_mirror_event_send(void *arg, int state, int flags)
173{
174	struct g_mirror_softc *sc;
175	struct g_mirror_disk *disk;
176	struct g_mirror_event *ep;
177	int error;
178
179	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
180	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
181	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
182		disk = NULL;
183		sc = arg;
184	} else {
185		disk = arg;
186		sc = disk->d_softc;
187	}
188	ep->e_disk = disk;
189	ep->e_state = state;
190	ep->e_flags = flags;
191	ep->e_error = 0;
192	mtx_lock(&sc->sc_events_mtx);
193	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
194	mtx_unlock(&sc->sc_events_mtx);
195	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
196	mtx_lock(&sc->sc_queue_mtx);
197	wakeup(sc);
198	mtx_unlock(&sc->sc_queue_mtx);
199	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
200		return (0);
201	sx_assert(&sc->sc_lock, SX_XLOCKED);
202	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
203	sx_xunlock(&sc->sc_lock);
204	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
205		mtx_lock(&sc->sc_events_mtx);
206		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
207		    hz * 5);
208	}
209	error = ep->e_error;
210	g_mirror_event_free(ep);
211	sx_xlock(&sc->sc_lock);
212	return (error);
213}
214
215static struct g_mirror_event *
216g_mirror_event_get(struct g_mirror_softc *sc)
217{
218	struct g_mirror_event *ep;
219
220	mtx_lock(&sc->sc_events_mtx);
221	ep = TAILQ_FIRST(&sc->sc_events);
222	mtx_unlock(&sc->sc_events_mtx);
223	return (ep);
224}
225
226static void
227g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
228{
229
230	mtx_lock(&sc->sc_events_mtx);
231	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
232	mtx_unlock(&sc->sc_events_mtx);
233}
234
235static void
236g_mirror_event_cancel(struct g_mirror_disk *disk)
237{
238	struct g_mirror_softc *sc;
239	struct g_mirror_event *ep, *tmpep;
240
241	sc = disk->d_softc;
242	sx_assert(&sc->sc_lock, SX_XLOCKED);
243
244	mtx_lock(&sc->sc_events_mtx);
245	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
246		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
247			continue;
248		if (ep->e_disk != disk)
249			continue;
250		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
251		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
252			g_mirror_event_free(ep);
253		else {
254			ep->e_error = ECANCELED;
255			wakeup(ep);
256		}
257	}
258	mtx_unlock(&sc->sc_events_mtx);
259}
260
261/*
262 * Return the number of disks in given state.
263 * If state is equal to -1, count all connected disks.
264 */
265u_int
266g_mirror_ndisks(struct g_mirror_softc *sc, int state)
267{
268	struct g_mirror_disk *disk;
269	u_int n = 0;
270
271	sx_assert(&sc->sc_lock, SX_LOCKED);
272
273	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
274		if (state == -1 || disk->d_state == state)
275			n++;
276	}
277	return (n);
278}
279
280/*
281 * Find a disk in mirror by its disk ID.
282 */
283static struct g_mirror_disk *
284g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
285{
286	struct g_mirror_disk *disk;
287
288	sx_assert(&sc->sc_lock, SX_XLOCKED);
289
290	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
291		if (disk->d_id == id)
292			return (disk);
293	}
294	return (NULL);
295}
296
297static u_int
298g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
299{
300	struct bio *bp;
301	u_int nreqs = 0;
302
303	mtx_lock(&sc->sc_queue_mtx);
304	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
305		if (bp->bio_from == cp)
306			nreqs++;
307	}
308	mtx_unlock(&sc->sc_queue_mtx);
309	return (nreqs);
310}
311
312static int
313g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
314{
315
316	if (cp->index > 0) {
317		G_MIRROR_DEBUG(2,
318		    "I/O requests for %s exist, can't destroy it now.",
319		    cp->provider->name);
320		return (1);
321	}
322	if (g_mirror_nrequests(sc, cp) > 0) {
323		G_MIRROR_DEBUG(2,
324		    "I/O requests for %s in queue, can't destroy it now.",
325		    cp->provider->name);
326		return (1);
327	}
328	return (0);
329}
330
331static void
332g_mirror_destroy_consumer(void *arg, int flags __unused)
333{
334	struct g_consumer *cp;
335
336	g_topology_assert();
337
338	cp = arg;
339	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
340	g_detach(cp);
341	g_destroy_consumer(cp);
342}
343
344static void
345g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
346{
347	struct g_provider *pp;
348	int retaste_wait;
349
350	g_topology_assert();
351
352	cp->private = NULL;
353	if (g_mirror_is_busy(sc, cp))
354		return;
355	pp = cp->provider;
356	retaste_wait = 0;
357	if (cp->acw == 1) {
358		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
359			retaste_wait = 1;
360	}
361	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
362	    -cp->acw, -cp->ace, 0);
363	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
364		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
365	if (retaste_wait) {
366		/*
367		 * After retaste event was send (inside g_access()), we can send
368		 * event to detach and destroy consumer.
369		 * A class, which has consumer to the given provider connected
370		 * will not receive retaste event for the provider.
371		 * This is the way how I ignore retaste events when I close
372		 * consumers opened for write: I detach and destroy consumer
373		 * after retaste event is sent.
374		 */
375		g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
376		return;
377	}
378	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
379	g_detach(cp);
380	g_destroy_consumer(cp);
381}
382
383static int
384g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
385{
386	struct g_consumer *cp;
387	int error;
388
389	g_topology_assert_not();
390	KASSERT(disk->d_consumer == NULL,
391	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
392
393	g_topology_lock();
394	cp = g_new_consumer(disk->d_softc->sc_geom);
395	error = g_attach(cp, pp);
396	if (error != 0) {
397		g_destroy_consumer(cp);
398		g_topology_unlock();
399		return (error);
400	}
401	error = g_access(cp, 1, 1, 1);
402	if (error != 0) {
403		g_detach(cp);
404		g_destroy_consumer(cp);
405		g_topology_unlock();
406		G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
407		    pp->name, error);
408		return (error);
409	}
410	g_topology_unlock();
411	disk->d_consumer = cp;
412	disk->d_consumer->private = disk;
413	disk->d_consumer->index = 0;
414
415	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
416	return (0);
417}
418
419static void
420g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
421{
422
423	g_topology_assert();
424
425	if (cp == NULL)
426		return;
427	if (cp->provider != NULL)
428		g_mirror_kill_consumer(sc, cp);
429	else
430		g_destroy_consumer(cp);
431}
432
433/*
434 * Initialize disk. This means allocate memory, create consumer, attach it
435 * to the provider and open access (r1w1e1) to it.
436 */
437static struct g_mirror_disk *
438g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
439    struct g_mirror_metadata *md, int *errorp)
440{
441	struct g_mirror_disk *disk;
442	int error;
443
444	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
445	if (disk == NULL) {
446		error = ENOMEM;
447		goto fail;
448	}
449	disk->d_softc = sc;
450	error = g_mirror_connect_disk(disk, pp);
451	if (error != 0)
452		goto fail;
453	disk->d_id = md->md_did;
454	disk->d_state = G_MIRROR_DISK_STATE_NONE;
455	disk->d_priority = md->md_priority;
456	disk->d_flags = md->md_dflags;
457	if (md->md_provider[0] != '\0')
458		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
459	disk->d_sync.ds_consumer = NULL;
460	disk->d_sync.ds_offset = md->md_sync_offset;
461	disk->d_sync.ds_offset_done = md->md_sync_offset;
462	disk->d_genid = md->md_genid;
463	disk->d_sync.ds_syncid = md->md_syncid;
464	if (errorp != NULL)
465		*errorp = 0;
466	return (disk);
467fail:
468	if (errorp != NULL)
469		*errorp = error;
470	if (disk != NULL)
471		free(disk, M_MIRROR);
472	return (NULL);
473}
474
475static void
476g_mirror_destroy_disk(struct g_mirror_disk *disk)
477{
478	struct g_mirror_softc *sc;
479
480	g_topology_assert_not();
481	sc = disk->d_softc;
482	sx_assert(&sc->sc_lock, SX_XLOCKED);
483
484	LIST_REMOVE(disk, d_next);
485	g_mirror_event_cancel(disk);
486	if (sc->sc_hint == disk)
487		sc->sc_hint = NULL;
488	switch (disk->d_state) {
489	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
490		g_mirror_sync_stop(disk, 1);
491		/* FALLTHROUGH */
492	case G_MIRROR_DISK_STATE_NEW:
493	case G_MIRROR_DISK_STATE_STALE:
494	case G_MIRROR_DISK_STATE_ACTIVE:
495		g_topology_lock();
496		g_mirror_disconnect_consumer(sc, disk->d_consumer);
497		g_topology_unlock();
498		free(disk, M_MIRROR);
499		break;
500	default:
501		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
502		    g_mirror_get_diskname(disk),
503		    g_mirror_disk_state2str(disk->d_state)));
504	}
505}
506
507static void
508g_mirror_destroy_device(struct g_mirror_softc *sc)
509{
510	struct g_mirror_disk *disk;
511	struct g_mirror_event *ep;
512	struct g_geom *gp;
513	struct g_consumer *cp, *tmpcp;
514
515	g_topology_assert_not();
516	sx_assert(&sc->sc_lock, SX_XLOCKED);
517
518	gp = sc->sc_geom;
519	if (sc->sc_provider != NULL)
520		g_mirror_destroy_provider(sc);
521	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
522	    disk = LIST_FIRST(&sc->sc_disks)) {
523		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
524		g_mirror_update_metadata(disk);
525		g_mirror_destroy_disk(disk);
526	}
527	while ((ep = g_mirror_event_get(sc)) != NULL) {
528		g_mirror_event_remove(sc, ep);
529		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
530			g_mirror_event_free(ep);
531		else {
532			ep->e_error = ECANCELED;
533			ep->e_flags |= G_MIRROR_EVENT_DONE;
534			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
535			mtx_lock(&sc->sc_events_mtx);
536			wakeup(ep);
537			mtx_unlock(&sc->sc_events_mtx);
538		}
539	}
540	callout_drain(&sc->sc_callout);
541
542	g_topology_lock();
543	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
544		g_mirror_disconnect_consumer(sc, cp);
545	}
546	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
547	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
548	g_wither_geom(gp, ENXIO);
549	g_topology_unlock();
550	mtx_destroy(&sc->sc_queue_mtx);
551	mtx_destroy(&sc->sc_events_mtx);
552	sx_xunlock(&sc->sc_lock);
553	sx_destroy(&sc->sc_lock);
554}
555
556static void
557g_mirror_orphan(struct g_consumer *cp)
558{
559	struct g_mirror_disk *disk;
560
561	g_topology_assert();
562
563	disk = cp->private;
564	if (disk == NULL)
565		return;
566	disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
567	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
568	    G_MIRROR_EVENT_DONTWAIT);
569}
570
571/*
572 * Function should return the next active disk on the list.
573 * It is possible that it will be the same disk as given.
574 * If there are no active disks on list, NULL is returned.
575 */
576static __inline struct g_mirror_disk *
577g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
578{
579	struct g_mirror_disk *dp;
580
581	for (dp = LIST_NEXT(disk, d_next); dp != disk;
582	    dp = LIST_NEXT(dp, d_next)) {
583		if (dp == NULL)
584			dp = LIST_FIRST(&sc->sc_disks);
585		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
586			break;
587	}
588	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
589		return (NULL);
590	return (dp);
591}
592
593static struct g_mirror_disk *
594g_mirror_get_disk(struct g_mirror_softc *sc)
595{
596	struct g_mirror_disk *disk;
597
598	if (sc->sc_hint == NULL) {
599		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
600		if (sc->sc_hint == NULL)
601			return (NULL);
602	}
603	disk = sc->sc_hint;
604	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
605		disk = g_mirror_find_next(sc, disk);
606		if (disk == NULL)
607			return (NULL);
608	}
609	sc->sc_hint = g_mirror_find_next(sc, disk);
610	return (disk);
611}
612
613static int
614g_mirror_write_metadata(struct g_mirror_disk *disk,
615    struct g_mirror_metadata *md)
616{
617	struct g_mirror_softc *sc;
618	struct g_consumer *cp;
619	off_t offset, length;
620	u_char *sector;
621	int error = 0;
622
623	g_topology_assert_not();
624	sc = disk->d_softc;
625	sx_assert(&sc->sc_lock, SX_LOCKED);
626
627	cp = disk->d_consumer;
628	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
629	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
630	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
631	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
632	    cp->acw, cp->ace));
633	length = cp->provider->sectorsize;
634	offset = cp->provider->mediasize - length;
635	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
636	if (md != NULL)
637		mirror_metadata_encode(md, sector);
638	error = g_write_data(cp, offset, sector, length);
639	free(sector, M_MIRROR);
640	if (error != 0) {
641		if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
642			disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
643			G_MIRROR_DEBUG(0, "Cannot write metadata on %s "
644			    "(device=%s, error=%d).",
645			    g_mirror_get_diskname(disk), sc->sc_name, error);
646		} else {
647			G_MIRROR_DEBUG(1, "Cannot write metadata on %s "
648			    "(device=%s, error=%d).",
649			    g_mirror_get_diskname(disk), sc->sc_name, error);
650		}
651		if (g_mirror_disconnect_on_failure &&
652		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
653			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
654			g_mirror_event_send(disk,
655			    G_MIRROR_DISK_STATE_DISCONNECTED,
656			    G_MIRROR_EVENT_DONTWAIT);
657		}
658	}
659	return (error);
660}
661
662static int
663g_mirror_clear_metadata(struct g_mirror_disk *disk)
664{
665	int error;
666
667	g_topology_assert_not();
668	sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
669
670	error = g_mirror_write_metadata(disk, NULL);
671	if (error == 0) {
672		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
673		    g_mirror_get_diskname(disk));
674	} else {
675		G_MIRROR_DEBUG(0,
676		    "Cannot clear metadata on disk %s (error=%d).",
677		    g_mirror_get_diskname(disk), error);
678	}
679	return (error);
680}
681
682void
683g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
684    struct g_mirror_metadata *md)
685{
686
687	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
688	md->md_version = G_MIRROR_VERSION;
689	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
690	md->md_mid = sc->sc_id;
691	md->md_all = sc->sc_ndisks;
692	md->md_slice = sc->sc_slice;
693	md->md_balance = sc->sc_balance;
694	md->md_genid = sc->sc_genid;
695	md->md_mediasize = sc->sc_mediasize;
696	md->md_sectorsize = sc->sc_sectorsize;
697	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
698	bzero(md->md_provider, sizeof(md->md_provider));
699	if (disk == NULL) {
700		md->md_did = arc4random();
701		md->md_priority = 0;
702		md->md_syncid = 0;
703		md->md_dflags = 0;
704		md->md_sync_offset = 0;
705		md->md_provsize = 0;
706	} else {
707		md->md_did = disk->d_id;
708		md->md_priority = disk->d_priority;
709		md->md_syncid = disk->d_sync.ds_syncid;
710		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
711		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
712			md->md_sync_offset = disk->d_sync.ds_offset_done;
713		else
714			md->md_sync_offset = 0;
715		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
716			strlcpy(md->md_provider,
717			    disk->d_consumer->provider->name,
718			    sizeof(md->md_provider));
719		}
720		md->md_provsize = disk->d_consumer->provider->mediasize;
721	}
722}
723
724void
725g_mirror_update_metadata(struct g_mirror_disk *disk)
726{
727	struct g_mirror_softc *sc;
728	struct g_mirror_metadata md;
729	int error;
730
731	g_topology_assert_not();
732	sc = disk->d_softc;
733	sx_assert(&sc->sc_lock, SX_LOCKED);
734
735	g_mirror_fill_metadata(sc, disk, &md);
736	error = g_mirror_write_metadata(disk, &md);
737	if (error == 0) {
738		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
739		    g_mirror_get_diskname(disk));
740	} else {
741		G_MIRROR_DEBUG(0,
742		    "Cannot update metadata on disk %s (error=%d).",
743		    g_mirror_get_diskname(disk), error);
744	}
745}
746
747static void
748g_mirror_bump_syncid(struct g_mirror_softc *sc)
749{
750	struct g_mirror_disk *disk;
751
752	g_topology_assert_not();
753	sx_assert(&sc->sc_lock, SX_XLOCKED);
754	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
755	    ("%s called with no active disks (device=%s).", __func__,
756	    sc->sc_name));
757
758	sc->sc_syncid++;
759	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
760	    sc->sc_syncid);
761	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
762		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
763		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
764			disk->d_sync.ds_syncid = sc->sc_syncid;
765			g_mirror_update_metadata(disk);
766		}
767	}
768}
769
770static void
771g_mirror_bump_genid(struct g_mirror_softc *sc)
772{
773	struct g_mirror_disk *disk;
774
775	g_topology_assert_not();
776	sx_assert(&sc->sc_lock, SX_XLOCKED);
777	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
778	    ("%s called with no active disks (device=%s).", __func__,
779	    sc->sc_name));
780
781	sc->sc_genid++;
782	G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
783	    sc->sc_genid);
784	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
785		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
786		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
787			disk->d_genid = sc->sc_genid;
788			g_mirror_update_metadata(disk);
789		}
790	}
791}
792
793static int
794g_mirror_idle(struct g_mirror_softc *sc, int acw)
795{
796	struct g_mirror_disk *disk;
797	int timeout;
798
799	g_topology_assert_not();
800	sx_assert(&sc->sc_lock, SX_XLOCKED);
801
802	if (sc->sc_provider == NULL)
803		return (0);
804	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
805		return (0);
806	if (sc->sc_idle)
807		return (0);
808	if (sc->sc_writes > 0)
809		return (0);
810	if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
811		timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write);
812		if (timeout > 0)
813			return (timeout);
814	}
815	sc->sc_idle = 1;
816	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
817		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
818			continue;
819		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
820		    g_mirror_get_diskname(disk), sc->sc_name);
821		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
822		g_mirror_update_metadata(disk);
823	}
824	return (0);
825}
826
827static void
828g_mirror_unidle(struct g_mirror_softc *sc)
829{
830	struct g_mirror_disk *disk;
831
832	g_topology_assert_not();
833	sx_assert(&sc->sc_lock, SX_XLOCKED);
834
835	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
836		return;
837	sc->sc_idle = 0;
838	sc->sc_last_write = time_uptime;
839	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
840		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
841			continue;
842		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
843		    g_mirror_get_diskname(disk), sc->sc_name);
844		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
845		g_mirror_update_metadata(disk);
846	}
847}
848
849static void
850g_mirror_done(struct bio *bp)
851{
852	struct g_mirror_softc *sc;
853
854	sc = bp->bio_from->geom->softc;
855	bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR;
856	mtx_lock(&sc->sc_queue_mtx);
857	bioq_disksort(&sc->sc_queue, bp);
858	mtx_unlock(&sc->sc_queue_mtx);
859	wakeup(sc);
860}
861
862static void
863g_mirror_regular_request(struct bio *bp)
864{
865	struct g_mirror_softc *sc;
866	struct g_mirror_disk *disk;
867	struct bio *pbp;
868
869	g_topology_assert_not();
870
871	pbp = bp->bio_parent;
872	sc = pbp->bio_to->geom->softc;
873	bp->bio_from->index--;
874	if (bp->bio_cmd == BIO_WRITE)
875		sc->sc_writes--;
876	disk = bp->bio_from->private;
877	if (disk == NULL) {
878		g_topology_lock();
879		g_mirror_kill_consumer(sc, bp->bio_from);
880		g_topology_unlock();
881	}
882
883	pbp->bio_inbed++;
884	KASSERT(pbp->bio_inbed <= pbp->bio_children,
885	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
886	    pbp->bio_children));
887	if (bp->bio_error == 0 && pbp->bio_error == 0) {
888		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
889		g_destroy_bio(bp);
890		if (pbp->bio_children == pbp->bio_inbed) {
891			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
892			pbp->bio_completed = pbp->bio_length;
893			if (pbp->bio_cmd == BIO_WRITE) {
894				bioq_remove(&sc->sc_inflight, pbp);
895				/* Release delayed sync requests if possible. */
896				g_mirror_sync_release(sc);
897			}
898			g_io_deliver(pbp, pbp->bio_error);
899		}
900		return;
901	} else if (bp->bio_error != 0) {
902		if (pbp->bio_error == 0)
903			pbp->bio_error = bp->bio_error;
904		if (disk != NULL) {
905			if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
906				disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
907				G_MIRROR_LOGREQ(0, bp,
908				    "Request failed (error=%d).",
909				    bp->bio_error);
910			} else {
911				G_MIRROR_LOGREQ(1, bp,
912				    "Request failed (error=%d).",
913				    bp->bio_error);
914			}
915			if (g_mirror_disconnect_on_failure &&
916			    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1)
917			{
918				sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
919				g_mirror_event_send(disk,
920				    G_MIRROR_DISK_STATE_DISCONNECTED,
921				    G_MIRROR_EVENT_DONTWAIT);
922			}
923		}
924		switch (pbp->bio_cmd) {
925		case BIO_DELETE:
926		case BIO_WRITE:
927			pbp->bio_inbed--;
928			pbp->bio_children--;
929			break;
930		}
931	}
932	g_destroy_bio(bp);
933
934	switch (pbp->bio_cmd) {
935	case BIO_READ:
936		if (pbp->bio_inbed < pbp->bio_children)
937			break;
938		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1)
939			g_io_deliver(pbp, pbp->bio_error);
940		else {
941			pbp->bio_error = 0;
942			mtx_lock(&sc->sc_queue_mtx);
943			bioq_disksort(&sc->sc_queue, pbp);
944			mtx_unlock(&sc->sc_queue_mtx);
945			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
946			wakeup(sc);
947		}
948		break;
949	case BIO_DELETE:
950	case BIO_WRITE:
951		if (pbp->bio_children == 0) {
952			/*
953			 * All requests failed.
954			 */
955		} else if (pbp->bio_inbed < pbp->bio_children) {
956			/* Do nothing. */
957			break;
958		} else if (pbp->bio_children == pbp->bio_inbed) {
959			/* Some requests succeeded. */
960			pbp->bio_error = 0;
961			pbp->bio_completed = pbp->bio_length;
962		}
963		bioq_remove(&sc->sc_inflight, pbp);
964		/* Release delayed sync requests if possible. */
965		g_mirror_sync_release(sc);
966		g_io_deliver(pbp, pbp->bio_error);
967		break;
968	default:
969		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
970		break;
971	}
972}
973
974static void
975g_mirror_sync_done(struct bio *bp)
976{
977	struct g_mirror_softc *sc;
978
979	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
980	sc = bp->bio_from->geom->softc;
981	bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC;
982	mtx_lock(&sc->sc_queue_mtx);
983	bioq_disksort(&sc->sc_queue, bp);
984	mtx_unlock(&sc->sc_queue_mtx);
985	wakeup(sc);
986}
987
988static void
989g_mirror_kernel_dump(struct bio *bp)
990{
991	struct g_mirror_softc *sc;
992	struct g_mirror_disk *disk;
993	struct bio *cbp;
994	struct g_kerneldump *gkd;
995
996	/*
997	 * We configure dumping to the first component, because this component
998	 * will be used for reading with 'prefer' balance algorithm.
999	 * If the component with the higest priority is currently disconnected
1000	 * we will not be able to read the dump after the reboot if it will be
1001	 * connected and synchronized later. Can we do something better?
1002	 */
1003	sc = bp->bio_to->geom->softc;
1004	disk = LIST_FIRST(&sc->sc_disks);
1005
1006	gkd = (struct g_kerneldump *)bp->bio_data;
1007	if (gkd->length > bp->bio_to->mediasize)
1008		gkd->length = bp->bio_to->mediasize;
1009	cbp = g_clone_bio(bp);
1010	if (cbp == NULL) {
1011		g_io_deliver(bp, ENOMEM);
1012		return;
1013	}
1014	cbp->bio_done = g_std_done;
1015	g_io_request(cbp, disk->d_consumer);
1016	G_MIRROR_DEBUG(1, "Kernel dump will go to %s.",
1017	    g_mirror_get_diskname(disk));
1018}
1019
1020static void
1021g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp)
1022{
1023	struct bio_queue_head queue;
1024	struct g_mirror_disk *disk;
1025	struct g_consumer *cp;
1026	struct bio *cbp;
1027
1028	bioq_init(&queue);
1029	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1030		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1031			continue;
1032		cbp = g_clone_bio(bp);
1033		if (cbp == NULL) {
1034			for (cbp = bioq_first(&queue); cbp != NULL;
1035			    cbp = bioq_first(&queue)) {
1036				bioq_remove(&queue, cbp);
1037				g_destroy_bio(cbp);
1038			}
1039			if (bp->bio_error == 0)
1040				bp->bio_error = ENOMEM;
1041			g_io_deliver(bp, bp->bio_error);
1042			return;
1043		}
1044		bioq_insert_tail(&queue, cbp);
1045		cbp->bio_done = g_std_done;
1046		cbp->bio_caller1 = disk;
1047		cbp->bio_to = disk->d_consumer->provider;
1048	}
1049	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1050		bioq_remove(&queue, cbp);
1051		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1052		disk = cbp->bio_caller1;
1053		cbp->bio_caller1 = NULL;
1054		cp = disk->d_consumer;
1055		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1056		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1057		    cp->acr, cp->acw, cp->ace));
1058		g_io_request(cbp, disk->d_consumer);
1059	}
1060}
1061
1062static void
1063g_mirror_start(struct bio *bp)
1064{
1065	struct g_mirror_softc *sc;
1066
1067	sc = bp->bio_to->geom->softc;
1068	/*
1069	 * If sc == NULL or there are no valid disks, provider's error
1070	 * should be set and g_mirror_start() should not be called at all.
1071	 */
1072	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1073	    ("Provider's error should be set (error=%d)(mirror=%s).",
1074	    bp->bio_to->error, bp->bio_to->name));
1075	G_MIRROR_LOGREQ(3, bp, "Request received.");
1076
1077	switch (bp->bio_cmd) {
1078	case BIO_READ:
1079	case BIO_WRITE:
1080	case BIO_DELETE:
1081		break;
1082	case BIO_FLUSH:
1083		g_mirror_flush(sc, bp);
1084		return;
1085	case BIO_GETATTR:
1086		if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) {
1087			g_mirror_kernel_dump(bp);
1088			return;
1089		}
1090		/* FALLTHROUGH */
1091	default:
1092		g_io_deliver(bp, EOPNOTSUPP);
1093		return;
1094	}
1095	mtx_lock(&sc->sc_queue_mtx);
1096	bioq_disksort(&sc->sc_queue, bp);
1097	mtx_unlock(&sc->sc_queue_mtx);
1098	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1099	wakeup(sc);
1100}
1101
1102/*
1103 * Return TRUE if the given request is colliding with a in-progress
1104 * synchronization request.
1105 */
1106static int
1107g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp)
1108{
1109	struct g_mirror_disk *disk;
1110	struct bio *sbp;
1111	off_t rstart, rend, sstart, send;
1112	int i;
1113
1114	if (sc->sc_sync.ds_ndisks == 0)
1115		return (0);
1116	rstart = bp->bio_offset;
1117	rend = bp->bio_offset + bp->bio_length;
1118	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1119		if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING)
1120			continue;
1121		for (i = 0; i < g_mirror_syncreqs; i++) {
1122			sbp = disk->d_sync.ds_bios[i];
1123			if (sbp == NULL)
1124				continue;
1125			sstart = sbp->bio_offset;
1126			send = sbp->bio_offset + sbp->bio_length;
1127			if (rend > sstart && rstart < send)
1128				return (1);
1129		}
1130	}
1131	return (0);
1132}
1133
1134/*
1135 * Return TRUE if the given sync request is colliding with a in-progress regular
1136 * request.
1137 */
1138static int
1139g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp)
1140{
1141	off_t rstart, rend, sstart, send;
1142	struct bio *bp;
1143
1144	if (sc->sc_sync.ds_ndisks == 0)
1145		return (0);
1146	sstart = sbp->bio_offset;
1147	send = sbp->bio_offset + sbp->bio_length;
1148	TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1149		rstart = bp->bio_offset;
1150		rend = bp->bio_offset + bp->bio_length;
1151		if (rend > sstart && rstart < send)
1152			return (1);
1153	}
1154	return (0);
1155}
1156
1157/*
1158 * Puts request onto delayed queue.
1159 */
1160static void
1161g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp)
1162{
1163
1164	G_MIRROR_LOGREQ(2, bp, "Delaying request.");
1165	bioq_insert_head(&sc->sc_regular_delayed, bp);
1166}
1167
1168/*
1169 * Puts synchronization request onto delayed queue.
1170 */
1171static void
1172g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp)
1173{
1174
1175	G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request.");
1176	bioq_insert_tail(&sc->sc_sync_delayed, bp);
1177}
1178
1179/*
1180 * Releases delayed regular requests which don't collide anymore with sync
1181 * requests.
1182 */
1183static void
1184g_mirror_regular_release(struct g_mirror_softc *sc)
1185{
1186	struct bio *bp, *bp2;
1187
1188	TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1189		if (g_mirror_sync_collision(sc, bp))
1190			continue;
1191		bioq_remove(&sc->sc_regular_delayed, bp);
1192		G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1193		mtx_lock(&sc->sc_queue_mtx);
1194		bioq_insert_head(&sc->sc_queue, bp);
1195#if 0
1196		/*
1197		 * wakeup() is not needed, because this function is called from
1198		 * the worker thread.
1199		 */
1200		wakeup(&sc->sc_queue);
1201#endif
1202		mtx_unlock(&sc->sc_queue_mtx);
1203	}
1204}
1205
1206/*
1207 * Releases delayed sync requests which don't collide anymore with regular
1208 * requests.
1209 */
1210static void
1211g_mirror_sync_release(struct g_mirror_softc *sc)
1212{
1213	struct bio *bp, *bp2;
1214
1215	TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1216		if (g_mirror_regular_collision(sc, bp))
1217			continue;
1218		bioq_remove(&sc->sc_sync_delayed, bp);
1219		G_MIRROR_LOGREQ(2, bp,
1220		    "Releasing delayed synchronization request.");
1221		g_io_request(bp, bp->bio_from);
1222	}
1223}
1224
1225/*
1226 * Handle synchronization requests.
1227 * Every synchronization request is two-steps process: first, READ request is
1228 * send to active provider and then WRITE request (with read data) to the provider
1229 * beeing synchronized. When WRITE is finished, new synchronization request is
1230 * send.
1231 */
1232static void
1233g_mirror_sync_request(struct bio *bp)
1234{
1235	struct g_mirror_softc *sc;
1236	struct g_mirror_disk *disk;
1237
1238	bp->bio_from->index--;
1239	sc = bp->bio_from->geom->softc;
1240	disk = bp->bio_from->private;
1241	if (disk == NULL) {
1242		sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1243		g_topology_lock();
1244		g_mirror_kill_consumer(sc, bp->bio_from);
1245		g_topology_unlock();
1246		free(bp->bio_data, M_MIRROR);
1247		g_destroy_bio(bp);
1248		sx_xlock(&sc->sc_lock);
1249		return;
1250	}
1251
1252	/*
1253	 * Synchronization request.
1254	 */
1255	switch (bp->bio_cmd) {
1256	case BIO_READ:
1257	    {
1258		struct g_consumer *cp;
1259
1260		if (bp->bio_error != 0) {
1261			G_MIRROR_LOGREQ(0, bp,
1262			    "Synchronization request failed (error=%d).",
1263			    bp->bio_error);
1264			g_destroy_bio(bp);
1265			return;
1266		}
1267		G_MIRROR_LOGREQ(3, bp,
1268		    "Synchronization request half-finished.");
1269		bp->bio_cmd = BIO_WRITE;
1270		bp->bio_cflags = 0;
1271		cp = disk->d_consumer;
1272		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1273		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1274		    cp->acr, cp->acw, cp->ace));
1275		cp->index++;
1276		g_io_request(bp, cp);
1277		return;
1278	    }
1279	case BIO_WRITE:
1280	    {
1281		struct g_mirror_disk_sync *sync;
1282		off_t offset;
1283		void *data;
1284		int i;
1285
1286		if (bp->bio_error != 0) {
1287			G_MIRROR_LOGREQ(0, bp,
1288			    "Synchronization request failed (error=%d).",
1289			    bp->bio_error);
1290			g_destroy_bio(bp);
1291			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1292			g_mirror_event_send(disk,
1293			    G_MIRROR_DISK_STATE_DISCONNECTED,
1294			    G_MIRROR_EVENT_DONTWAIT);
1295			return;
1296		}
1297		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1298		sync = &disk->d_sync;
1299		if (sync->ds_offset == sc->sc_mediasize ||
1300		    sync->ds_consumer == NULL ||
1301		    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1302			/* Don't send more synchronization requests. */
1303			sync->ds_inflight--;
1304			if (sync->ds_bios != NULL) {
1305				i = (int)(uintptr_t)bp->bio_caller1;
1306				sync->ds_bios[i] = NULL;
1307			}
1308			free(bp->bio_data, M_MIRROR);
1309			g_destroy_bio(bp);
1310			if (sync->ds_inflight > 0)
1311				return;
1312			if (sync->ds_consumer == NULL ||
1313			    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1314				return;
1315			}
1316			/* Disk up-to-date, activate it. */
1317			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1318			    G_MIRROR_EVENT_DONTWAIT);
1319			return;
1320		}
1321
1322		/* Send next synchronization request. */
1323		data = bp->bio_data;
1324		bzero(bp, sizeof(*bp));
1325		bp->bio_cmd = BIO_READ;
1326		bp->bio_offset = sync->ds_offset;
1327		bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1328		sync->ds_offset += bp->bio_length;
1329		bp->bio_done = g_mirror_sync_done;
1330		bp->bio_data = data;
1331		bp->bio_from = sync->ds_consumer;
1332		bp->bio_to = sc->sc_provider;
1333		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1334		sync->ds_consumer->index++;
1335		/*
1336		 * Delay the request if it is colliding with a regular request.
1337		 */
1338		if (g_mirror_regular_collision(sc, bp))
1339			g_mirror_sync_delay(sc, bp);
1340		else
1341			g_io_request(bp, sync->ds_consumer);
1342
1343		/* Release delayed requests if possible. */
1344		g_mirror_regular_release(sc);
1345
1346		/* Find the smallest offset */
1347		offset = sc->sc_mediasize;
1348		for (i = 0; i < g_mirror_syncreqs; i++) {
1349			bp = sync->ds_bios[i];
1350			if (bp->bio_offset < offset)
1351				offset = bp->bio_offset;
1352		}
1353		if (sync->ds_offset_done + (MAXPHYS * 100) < offset) {
1354			/* Update offset_done on every 100 blocks. */
1355			sync->ds_offset_done = offset;
1356			g_mirror_update_metadata(disk);
1357		}
1358		return;
1359	    }
1360	default:
1361		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1362		    bp->bio_cmd, sc->sc_name));
1363		break;
1364	}
1365}
1366
1367static void
1368g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1369{
1370	struct g_mirror_disk *disk;
1371	struct g_consumer *cp;
1372	struct bio *cbp;
1373
1374	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1375		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1376			break;
1377	}
1378	if (disk == NULL) {
1379		if (bp->bio_error == 0)
1380			bp->bio_error = ENXIO;
1381		g_io_deliver(bp, bp->bio_error);
1382		return;
1383	}
1384	cbp = g_clone_bio(bp);
1385	if (cbp == NULL) {
1386		if (bp->bio_error == 0)
1387			bp->bio_error = ENOMEM;
1388		g_io_deliver(bp, bp->bio_error);
1389		return;
1390	}
1391	/*
1392	 * Fill in the component buf structure.
1393	 */
1394	cp = disk->d_consumer;
1395	cbp->bio_done = g_mirror_done;
1396	cbp->bio_to = cp->provider;
1397	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1398	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1399	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1400	    cp->acw, cp->ace));
1401	cp->index++;
1402	g_io_request(cbp, cp);
1403}
1404
1405static void
1406g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1407{
1408	struct g_mirror_disk *disk;
1409	struct g_consumer *cp;
1410	struct bio *cbp;
1411
1412	disk = g_mirror_get_disk(sc);
1413	if (disk == NULL) {
1414		if (bp->bio_error == 0)
1415			bp->bio_error = ENXIO;
1416		g_io_deliver(bp, bp->bio_error);
1417		return;
1418	}
1419	cbp = g_clone_bio(bp);
1420	if (cbp == NULL) {
1421		if (bp->bio_error == 0)
1422			bp->bio_error = ENOMEM;
1423		g_io_deliver(bp, bp->bio_error);
1424		return;
1425	}
1426	/*
1427	 * Fill in the component buf structure.
1428	 */
1429	cp = disk->d_consumer;
1430	cbp->bio_done = g_mirror_done;
1431	cbp->bio_to = cp->provider;
1432	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1433	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1434	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1435	    cp->acw, cp->ace));
1436	cp->index++;
1437	g_io_request(cbp, cp);
1438}
1439
1440#define TRACK_SIZE  (1 * 1024 * 1024)
1441#define LOAD_SCALE	256
1442#define ABS(x)		(((x) >= 0) ? (x) : (-(x)))
1443
1444static void
1445g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1446{
1447	struct g_mirror_disk *disk, *dp;
1448	struct g_consumer *cp;
1449	struct bio *cbp;
1450	int prio, best;
1451
1452	/* Find a disk with the smallest load. */
1453	disk = NULL;
1454	best = INT_MAX;
1455	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1456		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1457			continue;
1458		prio = dp->load;
1459		/* If disk head is precisely in position - highly prefer it. */
1460		if (dp->d_last_offset == bp->bio_offset)
1461			prio -= 2 * LOAD_SCALE;
1462		else
1463		/* If disk head is close to position - prefer it. */
1464		if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE)
1465			prio -= 1 * LOAD_SCALE;
1466		if (prio <= best) {
1467			disk = dp;
1468			best = prio;
1469		}
1470	}
1471	KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name));
1472	cbp = g_clone_bio(bp);
1473	if (cbp == NULL) {
1474		if (bp->bio_error == 0)
1475			bp->bio_error = ENOMEM;
1476		g_io_deliver(bp, bp->bio_error);
1477		return;
1478	}
1479	/*
1480	 * Fill in the component buf structure.
1481	 */
1482	cp = disk->d_consumer;
1483	cbp->bio_done = g_mirror_done;
1484	cbp->bio_to = cp->provider;
1485	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1486	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1487	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1488	    cp->acw, cp->ace));
1489	cp->index++;
1490	/* Remember last head position */
1491	disk->d_last_offset = bp->bio_offset + bp->bio_length;
1492	/* Update loads. */
1493	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1494		dp->load = (dp->d_consumer->index * LOAD_SCALE +
1495		    dp->load * 7) / 8;
1496	}
1497	g_io_request(cbp, cp);
1498}
1499
1500static void
1501g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1502{
1503	struct bio_queue_head queue;
1504	struct g_mirror_disk *disk;
1505	struct g_consumer *cp;
1506	struct bio *cbp;
1507	off_t left, mod, offset, slice;
1508	u_char *data;
1509	u_int ndisks;
1510
1511	if (bp->bio_length <= sc->sc_slice) {
1512		g_mirror_request_round_robin(sc, bp);
1513		return;
1514	}
1515	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1516	slice = bp->bio_length / ndisks;
1517	mod = slice % sc->sc_provider->sectorsize;
1518	if (mod != 0)
1519		slice += sc->sc_provider->sectorsize - mod;
1520	/*
1521	 * Allocate all bios before sending any request, so we can
1522	 * return ENOMEM in nice and clean way.
1523	 */
1524	left = bp->bio_length;
1525	offset = bp->bio_offset;
1526	data = bp->bio_data;
1527	bioq_init(&queue);
1528	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1529		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1530			continue;
1531		cbp = g_clone_bio(bp);
1532		if (cbp == NULL) {
1533			for (cbp = bioq_first(&queue); cbp != NULL;
1534			    cbp = bioq_first(&queue)) {
1535				bioq_remove(&queue, cbp);
1536				g_destroy_bio(cbp);
1537			}
1538			if (bp->bio_error == 0)
1539				bp->bio_error = ENOMEM;
1540			g_io_deliver(bp, bp->bio_error);
1541			return;
1542		}
1543		bioq_insert_tail(&queue, cbp);
1544		cbp->bio_done = g_mirror_done;
1545		cbp->bio_caller1 = disk;
1546		cbp->bio_to = disk->d_consumer->provider;
1547		cbp->bio_offset = offset;
1548		cbp->bio_data = data;
1549		cbp->bio_length = MIN(left, slice);
1550		left -= cbp->bio_length;
1551		if (left == 0)
1552			break;
1553		offset += cbp->bio_length;
1554		data += cbp->bio_length;
1555	}
1556	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1557		bioq_remove(&queue, cbp);
1558		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1559		disk = cbp->bio_caller1;
1560		cbp->bio_caller1 = NULL;
1561		cp = disk->d_consumer;
1562		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1563		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1564		    cp->acr, cp->acw, cp->ace));
1565		disk->d_consumer->index++;
1566		g_io_request(cbp, disk->d_consumer);
1567	}
1568}
1569
1570static void
1571g_mirror_register_request(struct bio *bp)
1572{
1573	struct g_mirror_softc *sc;
1574
1575	sc = bp->bio_to->geom->softc;
1576	switch (bp->bio_cmd) {
1577	case BIO_READ:
1578		switch (sc->sc_balance) {
1579		case G_MIRROR_BALANCE_LOAD:
1580			g_mirror_request_load(sc, bp);
1581			break;
1582		case G_MIRROR_BALANCE_PREFER:
1583			g_mirror_request_prefer(sc, bp);
1584			break;
1585		case G_MIRROR_BALANCE_ROUND_ROBIN:
1586			g_mirror_request_round_robin(sc, bp);
1587			break;
1588		case G_MIRROR_BALANCE_SPLIT:
1589			g_mirror_request_split(sc, bp);
1590			break;
1591		}
1592		return;
1593	case BIO_WRITE:
1594	case BIO_DELETE:
1595	    {
1596		struct g_mirror_disk *disk;
1597		struct g_mirror_disk_sync *sync;
1598		struct bio_queue_head queue;
1599		struct g_consumer *cp;
1600		struct bio *cbp;
1601
1602		/*
1603		 * Delay the request if it is colliding with a synchronization
1604		 * request.
1605		 */
1606		if (g_mirror_sync_collision(sc, bp)) {
1607			g_mirror_regular_delay(sc, bp);
1608			return;
1609		}
1610
1611		if (sc->sc_idle)
1612			g_mirror_unidle(sc);
1613		else
1614			sc->sc_last_write = time_uptime;
1615
1616		/*
1617		 * Allocate all bios before sending any request, so we can
1618		 * return ENOMEM in nice and clean way.
1619		 */
1620		bioq_init(&queue);
1621		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1622			sync = &disk->d_sync;
1623			switch (disk->d_state) {
1624			case G_MIRROR_DISK_STATE_ACTIVE:
1625				break;
1626			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1627				if (bp->bio_offset >= sync->ds_offset)
1628					continue;
1629				break;
1630			default:
1631				continue;
1632			}
1633			cbp = g_clone_bio(bp);
1634			if (cbp == NULL) {
1635				for (cbp = bioq_first(&queue); cbp != NULL;
1636				    cbp = bioq_first(&queue)) {
1637					bioq_remove(&queue, cbp);
1638					g_destroy_bio(cbp);
1639				}
1640				if (bp->bio_error == 0)
1641					bp->bio_error = ENOMEM;
1642				g_io_deliver(bp, bp->bio_error);
1643				return;
1644			}
1645			bioq_insert_tail(&queue, cbp);
1646			cbp->bio_done = g_mirror_done;
1647			cp = disk->d_consumer;
1648			cbp->bio_caller1 = cp;
1649			cbp->bio_to = cp->provider;
1650			KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1651			    ("Consumer %s not opened (r%dw%de%d).",
1652			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1653		}
1654		for (cbp = bioq_first(&queue); cbp != NULL;
1655		    cbp = bioq_first(&queue)) {
1656			bioq_remove(&queue, cbp);
1657			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1658			cp = cbp->bio_caller1;
1659			cbp->bio_caller1 = NULL;
1660			cp->index++;
1661			sc->sc_writes++;
1662			g_io_request(cbp, cp);
1663		}
1664		/*
1665		 * Put request onto inflight queue, so we can check if new
1666		 * synchronization requests don't collide with it.
1667		 */
1668		bioq_insert_tail(&sc->sc_inflight, bp);
1669		/*
1670		 * Bump syncid on first write.
1671		 */
1672		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1673			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1674			g_mirror_bump_syncid(sc);
1675		}
1676		return;
1677	    }
1678	default:
1679		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1680		    bp->bio_cmd, sc->sc_name));
1681		break;
1682	}
1683}
1684
1685static int
1686g_mirror_can_destroy(struct g_mirror_softc *sc)
1687{
1688	struct g_geom *gp;
1689	struct g_consumer *cp;
1690
1691	g_topology_assert();
1692	gp = sc->sc_geom;
1693	if (gp->softc == NULL)
1694		return (1);
1695	LIST_FOREACH(cp, &gp->consumer, consumer) {
1696		if (g_mirror_is_busy(sc, cp))
1697			return (0);
1698	}
1699	gp = sc->sc_sync.ds_geom;
1700	LIST_FOREACH(cp, &gp->consumer, consumer) {
1701		if (g_mirror_is_busy(sc, cp))
1702			return (0);
1703	}
1704	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1705	    sc->sc_name);
1706	return (1);
1707}
1708
1709static int
1710g_mirror_try_destroy(struct g_mirror_softc *sc)
1711{
1712
1713	if (sc->sc_rootmount != NULL) {
1714		G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1715		    sc->sc_rootmount);
1716		root_mount_rel(sc->sc_rootmount);
1717		sc->sc_rootmount = NULL;
1718	}
1719	g_topology_lock();
1720	if (!g_mirror_can_destroy(sc)) {
1721		g_topology_unlock();
1722		return (0);
1723	}
1724	sc->sc_geom->softc = NULL;
1725	sc->sc_sync.ds_geom->softc = NULL;
1726	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1727		g_topology_unlock();
1728		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1729		    &sc->sc_worker);
1730		/* Unlock sc_lock here, as it can be destroyed after wakeup. */
1731		sx_xunlock(&sc->sc_lock);
1732		wakeup(&sc->sc_worker);
1733		sc->sc_worker = NULL;
1734	} else {
1735		g_topology_unlock();
1736		g_mirror_destroy_device(sc);
1737		free(sc, M_MIRROR);
1738	}
1739	return (1);
1740}
1741
1742/*
1743 * Worker thread.
1744 */
1745static void
1746g_mirror_worker(void *arg)
1747{
1748	struct g_mirror_softc *sc;
1749	struct g_mirror_event *ep;
1750	struct bio *bp;
1751	int timeout;
1752
1753	sc = arg;
1754	thread_lock(curthread);
1755	sched_prio(curthread, PRIBIO);
1756	thread_unlock(curthread);
1757
1758	sx_xlock(&sc->sc_lock);
1759	for (;;) {
1760		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1761		/*
1762		 * First take a look at events.
1763		 * This is important to handle events before any I/O requests.
1764		 */
1765		ep = g_mirror_event_get(sc);
1766		if (ep != NULL) {
1767			g_mirror_event_remove(sc, ep);
1768			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1769				/* Update only device status. */
1770				G_MIRROR_DEBUG(3,
1771				    "Running event for device %s.",
1772				    sc->sc_name);
1773				ep->e_error = 0;
1774				g_mirror_update_device(sc, 1);
1775			} else {
1776				/* Update disk status. */
1777				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1778				     g_mirror_get_diskname(ep->e_disk));
1779				ep->e_error = g_mirror_update_disk(ep->e_disk,
1780				    ep->e_state);
1781				if (ep->e_error == 0)
1782					g_mirror_update_device(sc, 0);
1783			}
1784			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1785				KASSERT(ep->e_error == 0,
1786				    ("Error cannot be handled."));
1787				g_mirror_event_free(ep);
1788			} else {
1789				ep->e_flags |= G_MIRROR_EVENT_DONE;
1790				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1791				    ep);
1792				mtx_lock(&sc->sc_events_mtx);
1793				wakeup(ep);
1794				mtx_unlock(&sc->sc_events_mtx);
1795			}
1796			if ((sc->sc_flags &
1797			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1798				if (g_mirror_try_destroy(sc)) {
1799					curthread->td_pflags &= ~TDP_GEOM;
1800					G_MIRROR_DEBUG(1, "Thread exiting.");
1801					kproc_exit(0);
1802				}
1803			}
1804			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1805			continue;
1806		}
1807		/*
1808		 * Check if we can mark array as CLEAN and if we can't take
1809		 * how much seconds should we wait.
1810		 */
1811		timeout = g_mirror_idle(sc, -1);
1812		/*
1813		 * Now I/O requests.
1814		 */
1815		/* Get first request from the queue. */
1816		mtx_lock(&sc->sc_queue_mtx);
1817		bp = bioq_first(&sc->sc_queue);
1818		if (bp == NULL) {
1819			if ((sc->sc_flags &
1820			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1821				mtx_unlock(&sc->sc_queue_mtx);
1822				if (g_mirror_try_destroy(sc)) {
1823					curthread->td_pflags &= ~TDP_GEOM;
1824					G_MIRROR_DEBUG(1, "Thread exiting.");
1825					kproc_exit(0);
1826				}
1827				mtx_lock(&sc->sc_queue_mtx);
1828			}
1829			sx_xunlock(&sc->sc_lock);
1830			/*
1831			 * XXX: We can miss an event here, because an event
1832			 *      can be added without sx-device-lock and without
1833			 *      mtx-queue-lock. Maybe I should just stop using
1834			 *      dedicated mutex for events synchronization and
1835			 *      stick with the queue lock?
1836			 *      The event will hang here until next I/O request
1837			 *      or next event is received.
1838			 */
1839			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1",
1840			    timeout * hz);
1841			sx_xlock(&sc->sc_lock);
1842			G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1843			continue;
1844		}
1845		bioq_remove(&sc->sc_queue, bp);
1846		mtx_unlock(&sc->sc_queue_mtx);
1847
1848		if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
1849		    (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1850			g_mirror_sync_request(bp);	/* READ */
1851		} else if (bp->bio_to != sc->sc_provider) {
1852			if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0)
1853				g_mirror_regular_request(bp);
1854			else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
1855				g_mirror_sync_request(bp);	/* WRITE */
1856			else {
1857				KASSERT(0,
1858				    ("Invalid request cflags=0x%hhx to=%s.",
1859				    bp->bio_cflags, bp->bio_to->name));
1860			}
1861		} else {
1862			g_mirror_register_request(bp);
1863		}
1864		G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
1865	}
1866}
1867
1868static void
1869g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
1870{
1871
1872	sx_assert(&sc->sc_lock, SX_LOCKED);
1873
1874	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
1875		return;
1876	if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1877		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
1878		    g_mirror_get_diskname(disk), sc->sc_name);
1879		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1880	} else if (sc->sc_idle &&
1881	    (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1882		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
1883		    g_mirror_get_diskname(disk), sc->sc_name);
1884		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1885	}
1886}
1887
1888static void
1889g_mirror_sync_start(struct g_mirror_disk *disk)
1890{
1891	struct g_mirror_softc *sc;
1892	struct g_consumer *cp;
1893	struct bio *bp;
1894	int error, i;
1895
1896	g_topology_assert_not();
1897	sc = disk->d_softc;
1898	sx_assert(&sc->sc_lock, SX_LOCKED);
1899
1900	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1901	    ("Disk %s is not marked for synchronization.",
1902	    g_mirror_get_diskname(disk)));
1903	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1904	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
1905	    sc->sc_state));
1906
1907	sx_xunlock(&sc->sc_lock);
1908	g_topology_lock();
1909	cp = g_new_consumer(sc->sc_sync.ds_geom);
1910	error = g_attach(cp, sc->sc_provider);
1911	KASSERT(error == 0,
1912	    ("Cannot attach to %s (error=%d).", sc->sc_name, error));
1913	error = g_access(cp, 1, 0, 0);
1914	KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
1915	g_topology_unlock();
1916	sx_xlock(&sc->sc_lock);
1917
1918	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
1919	    g_mirror_get_diskname(disk));
1920	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0)
1921		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1922	KASSERT(disk->d_sync.ds_consumer == NULL,
1923	    ("Sync consumer already exists (device=%s, disk=%s).",
1924	    sc->sc_name, g_mirror_get_diskname(disk)));
1925
1926	disk->d_sync.ds_consumer = cp;
1927	disk->d_sync.ds_consumer->private = disk;
1928	disk->d_sync.ds_consumer->index = 0;
1929
1930	/*
1931	 * Allocate memory for synchronization bios and initialize them.
1932	 */
1933	disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs,
1934	    M_MIRROR, M_WAITOK);
1935	for (i = 0; i < g_mirror_syncreqs; i++) {
1936		bp = g_alloc_bio();
1937		disk->d_sync.ds_bios[i] = bp;
1938		bp->bio_parent = NULL;
1939		bp->bio_cmd = BIO_READ;
1940		bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
1941		bp->bio_cflags = 0;
1942		bp->bio_offset = disk->d_sync.ds_offset;
1943		bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1944		disk->d_sync.ds_offset += bp->bio_length;
1945		bp->bio_done = g_mirror_sync_done;
1946		bp->bio_from = disk->d_sync.ds_consumer;
1947		bp->bio_to = sc->sc_provider;
1948		bp->bio_caller1 = (void *)(uintptr_t)i;
1949	}
1950
1951	/* Increase the number of disks in SYNCHRONIZING state. */
1952	sc->sc_sync.ds_ndisks++;
1953	/* Set the number of in-flight synchronization requests. */
1954	disk->d_sync.ds_inflight = g_mirror_syncreqs;
1955
1956	/*
1957	 * Fire off first synchronization requests.
1958	 */
1959	for (i = 0; i < g_mirror_syncreqs; i++) {
1960		bp = disk->d_sync.ds_bios[i];
1961		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1962		disk->d_sync.ds_consumer->index++;
1963		/*
1964		 * Delay the request if it is colliding with a regular request.
1965		 */
1966		if (g_mirror_regular_collision(sc, bp))
1967			g_mirror_sync_delay(sc, bp);
1968		else
1969			g_io_request(bp, disk->d_sync.ds_consumer);
1970	}
1971}
1972
1973/*
1974 * Stop synchronization process.
1975 * type: 0 - synchronization finished
1976 *       1 - synchronization stopped
1977 */
1978static void
1979g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
1980{
1981	struct g_mirror_softc *sc;
1982	struct g_consumer *cp;
1983
1984	g_topology_assert_not();
1985	sc = disk->d_softc;
1986	sx_assert(&sc->sc_lock, SX_LOCKED);
1987
1988	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1989	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1990	    g_mirror_disk_state2str(disk->d_state)));
1991	if (disk->d_sync.ds_consumer == NULL)
1992		return;
1993
1994	if (type == 0) {
1995		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
1996		    sc->sc_name, g_mirror_get_diskname(disk));
1997	} else /* if (type == 1) */ {
1998		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
1999		    sc->sc_name, g_mirror_get_diskname(disk));
2000	}
2001	free(disk->d_sync.ds_bios, M_MIRROR);
2002	disk->d_sync.ds_bios = NULL;
2003	cp = disk->d_sync.ds_consumer;
2004	disk->d_sync.ds_consumer = NULL;
2005	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2006	sc->sc_sync.ds_ndisks--;
2007	sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2008	g_topology_lock();
2009	g_mirror_kill_consumer(sc, cp);
2010	g_topology_unlock();
2011	sx_xlock(&sc->sc_lock);
2012}
2013
2014static void
2015g_mirror_launch_provider(struct g_mirror_softc *sc)
2016{
2017	struct g_mirror_disk *disk;
2018	struct g_provider *pp;
2019
2020	sx_assert(&sc->sc_lock, SX_LOCKED);
2021
2022	g_topology_lock();
2023	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
2024	pp->mediasize = sc->sc_mediasize;
2025	pp->sectorsize = sc->sc_sectorsize;
2026	pp->stripesize = 0;
2027	pp->stripeoffset = 0;
2028	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2029		if (disk->d_consumer && disk->d_consumer->provider &&
2030		    disk->d_consumer->provider->stripesize > pp->stripesize) {
2031			pp->stripesize = disk->d_consumer->provider->stripesize;
2032			pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2033		}
2034	}
2035	sc->sc_provider = pp;
2036	g_error_provider(pp, 0);
2037	g_topology_unlock();
2038	G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2039	    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks);
2040	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2041		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2042			g_mirror_sync_start(disk);
2043	}
2044}
2045
2046static void
2047g_mirror_destroy_provider(struct g_mirror_softc *sc)
2048{
2049	struct g_mirror_disk *disk;
2050	struct bio *bp;
2051
2052	g_topology_assert_not();
2053	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2054	    sc->sc_name));
2055
2056	g_topology_lock();
2057	g_error_provider(sc->sc_provider, ENXIO);
2058	mtx_lock(&sc->sc_queue_mtx);
2059	while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2060		bioq_remove(&sc->sc_queue, bp);
2061		g_io_deliver(bp, ENXIO);
2062	}
2063	mtx_unlock(&sc->sc_queue_mtx);
2064	G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2065	    sc->sc_provider->name);
2066	sc->sc_provider->flags |= G_PF_WITHER;
2067	g_orphan_provider(sc->sc_provider, ENXIO);
2068	g_topology_unlock();
2069	sc->sc_provider = NULL;
2070	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2071		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2072			g_mirror_sync_stop(disk, 1);
2073	}
2074}
2075
2076static void
2077g_mirror_go(void *arg)
2078{
2079	struct g_mirror_softc *sc;
2080
2081	sc = arg;
2082	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2083	g_mirror_event_send(sc, 0,
2084	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
2085}
2086
2087static u_int
2088g_mirror_determine_state(struct g_mirror_disk *disk)
2089{
2090	struct g_mirror_softc *sc;
2091	u_int state;
2092
2093	sc = disk->d_softc;
2094	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2095		if ((disk->d_flags &
2096		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2097			/* Disk does not need synchronization. */
2098			state = G_MIRROR_DISK_STATE_ACTIVE;
2099		} else {
2100			if ((sc->sc_flags &
2101			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2102			    (disk->d_flags &
2103			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2104				/*
2105				 * We can start synchronization from
2106				 * the stored offset.
2107				 */
2108				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2109			} else {
2110				state = G_MIRROR_DISK_STATE_STALE;
2111			}
2112		}
2113	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2114		/*
2115		 * Reset all synchronization data for this disk,
2116		 * because if it even was synchronized, it was
2117		 * synchronized to disks with different syncid.
2118		 */
2119		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2120		disk->d_sync.ds_offset = 0;
2121		disk->d_sync.ds_offset_done = 0;
2122		disk->d_sync.ds_syncid = sc->sc_syncid;
2123		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2124		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2125			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2126		} else {
2127			state = G_MIRROR_DISK_STATE_STALE;
2128		}
2129	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2130		/*
2131		 * Not good, NOT GOOD!
2132		 * It means that mirror was started on stale disks
2133		 * and more fresh disk just arrive.
2134		 * If there were writes, mirror is broken, sorry.
2135		 * I think the best choice here is don't touch
2136		 * this disk and inform the user loudly.
2137		 */
2138		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
2139		    "disk (%s) arrives!! It will not be connected to the "
2140		    "running device.", sc->sc_name,
2141		    g_mirror_get_diskname(disk));
2142		g_mirror_destroy_disk(disk);
2143		state = G_MIRROR_DISK_STATE_NONE;
2144		/* Return immediately, because disk was destroyed. */
2145		return (state);
2146	}
2147	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
2148	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
2149	return (state);
2150}
2151
2152/*
2153 * Update device state.
2154 */
2155static void
2156g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force)
2157{
2158	struct g_mirror_disk *disk;
2159	u_int state;
2160
2161	sx_assert(&sc->sc_lock, SX_XLOCKED);
2162
2163	switch (sc->sc_state) {
2164	case G_MIRROR_DEVICE_STATE_STARTING:
2165	    {
2166		struct g_mirror_disk *pdisk, *tdisk;
2167		u_int dirty, ndisks, genid, syncid;
2168
2169		KASSERT(sc->sc_provider == NULL,
2170		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2171		/*
2172		 * Are we ready? We are, if all disks are connected or
2173		 * if we have any disks and 'force' is true.
2174		 */
2175		ndisks = g_mirror_ndisks(sc, -1);
2176		if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) {
2177			;
2178		} else if (ndisks == 0) {
2179			/*
2180			 * Disks went down in starting phase, so destroy
2181			 * device.
2182			 */
2183			callout_drain(&sc->sc_callout);
2184			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2185			G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2186			    sc->sc_rootmount);
2187			root_mount_rel(sc->sc_rootmount);
2188			sc->sc_rootmount = NULL;
2189			return;
2190		} else {
2191			return;
2192		}
2193
2194		/*
2195		 * Activate all disks with the biggest syncid.
2196		 */
2197		if (force) {
2198			/*
2199			 * If 'force' is true, we have been called due to
2200			 * timeout, so don't bother canceling timeout.
2201			 */
2202			ndisks = 0;
2203			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2204				if ((disk->d_flags &
2205				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2206					ndisks++;
2207				}
2208			}
2209			if (ndisks == 0) {
2210				/* No valid disks found, destroy device. */
2211				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2212				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2213				    __LINE__, sc->sc_rootmount);
2214				root_mount_rel(sc->sc_rootmount);
2215				sc->sc_rootmount = NULL;
2216				return;
2217			}
2218		} else {
2219			/* Cancel timeout. */
2220			callout_drain(&sc->sc_callout);
2221		}
2222
2223		/*
2224		 * Find the biggest genid.
2225		 */
2226		genid = 0;
2227		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2228			if (disk->d_genid > genid)
2229				genid = disk->d_genid;
2230		}
2231		sc->sc_genid = genid;
2232		/*
2233		 * Remove all disks without the biggest genid.
2234		 */
2235		LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
2236			if (disk->d_genid < genid) {
2237				G_MIRROR_DEBUG(0,
2238				    "Component %s (device %s) broken, skipping.",
2239				    g_mirror_get_diskname(disk), sc->sc_name);
2240				g_mirror_destroy_disk(disk);
2241			}
2242		}
2243
2244		/*
2245		 * Find the biggest syncid.
2246		 */
2247		syncid = 0;
2248		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2249			if (disk->d_sync.ds_syncid > syncid)
2250				syncid = disk->d_sync.ds_syncid;
2251		}
2252
2253		/*
2254		 * Here we need to look for dirty disks and if all disks
2255		 * with the biggest syncid are dirty, we have to choose
2256		 * one with the biggest priority and rebuild the rest.
2257		 */
2258		/*
2259		 * Find the number of dirty disks with the biggest syncid.
2260		 * Find the number of disks with the biggest syncid.
2261		 * While here, find a disk with the biggest priority.
2262		 */
2263		dirty = ndisks = 0;
2264		pdisk = NULL;
2265		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2266			if (disk->d_sync.ds_syncid != syncid)
2267				continue;
2268			if ((disk->d_flags &
2269			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2270				continue;
2271			}
2272			ndisks++;
2273			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2274				dirty++;
2275				if (pdisk == NULL ||
2276				    pdisk->d_priority < disk->d_priority) {
2277					pdisk = disk;
2278				}
2279			}
2280		}
2281		if (dirty == 0) {
2282			/* No dirty disks at all, great. */
2283		} else if (dirty == ndisks) {
2284			/*
2285			 * Force synchronization for all dirty disks except one
2286			 * with the biggest priority.
2287			 */
2288			KASSERT(pdisk != NULL, ("pdisk == NULL"));
2289			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
2290			    "master disk for synchronization.",
2291			    g_mirror_get_diskname(pdisk), sc->sc_name);
2292			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2293				if (disk->d_sync.ds_syncid != syncid)
2294					continue;
2295				if ((disk->d_flags &
2296				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2297					continue;
2298				}
2299				KASSERT((disk->d_flags &
2300				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
2301				    ("Disk %s isn't marked as dirty.",
2302				    g_mirror_get_diskname(disk)));
2303				/* Skip the disk with the biggest priority. */
2304				if (disk == pdisk)
2305					continue;
2306				disk->d_sync.ds_syncid = 0;
2307			}
2308		} else if (dirty < ndisks) {
2309			/*
2310			 * Force synchronization for all dirty disks.
2311			 * We have some non-dirty disks.
2312			 */
2313			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2314				if (disk->d_sync.ds_syncid != syncid)
2315					continue;
2316				if ((disk->d_flags &
2317				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2318					continue;
2319				}
2320				if ((disk->d_flags &
2321				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2322					continue;
2323				}
2324				disk->d_sync.ds_syncid = 0;
2325			}
2326		}
2327
2328		/* Reset hint. */
2329		sc->sc_hint = NULL;
2330		sc->sc_syncid = syncid;
2331		if (force) {
2332			/* Remember to bump syncid on first write. */
2333			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2334		}
2335		state = G_MIRROR_DEVICE_STATE_RUNNING;
2336		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2337		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2338		    g_mirror_device_state2str(state));
2339		sc->sc_state = state;
2340		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2341			state = g_mirror_determine_state(disk);
2342			g_mirror_event_send(disk, state,
2343			    G_MIRROR_EVENT_DONTWAIT);
2344			if (state == G_MIRROR_DISK_STATE_STALE)
2345				sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2346		}
2347		break;
2348	    }
2349	case G_MIRROR_DEVICE_STATE_RUNNING:
2350		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2351		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2352			/*
2353			 * No active disks or no disks at all,
2354			 * so destroy device.
2355			 */
2356			if (sc->sc_provider != NULL)
2357				g_mirror_destroy_provider(sc);
2358			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2359			break;
2360		} else if (g_mirror_ndisks(sc,
2361		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2362		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2363			/*
2364			 * We have active disks, launch provider if it doesn't
2365			 * exist.
2366			 */
2367			if (sc->sc_provider == NULL)
2368				g_mirror_launch_provider(sc);
2369			if (sc->sc_rootmount != NULL) {
2370				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2371				    __LINE__, sc->sc_rootmount);
2372				root_mount_rel(sc->sc_rootmount);
2373				sc->sc_rootmount = NULL;
2374			}
2375		}
2376		/*
2377		 * Genid should be bumped immediately, so do it here.
2378		 */
2379		if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2380			sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2381			g_mirror_bump_genid(sc);
2382		}
2383		break;
2384	default:
2385		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2386		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2387		break;
2388	}
2389}
2390
2391/*
2392 * Update disk state and device state if needed.
2393 */
2394#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
2395	"Disk %s state changed from %s to %s (device %s).",		\
2396	g_mirror_get_diskname(disk),					\
2397	g_mirror_disk_state2str(disk->d_state),				\
2398	g_mirror_disk_state2str(state), sc->sc_name)
2399static int
2400g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2401{
2402	struct g_mirror_softc *sc;
2403
2404	sc = disk->d_softc;
2405	sx_assert(&sc->sc_lock, SX_XLOCKED);
2406
2407again:
2408	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2409	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2410	    g_mirror_disk_state2str(state));
2411	switch (state) {
2412	case G_MIRROR_DISK_STATE_NEW:
2413		/*
2414		 * Possible scenarios:
2415		 * 1. New disk arrive.
2416		 */
2417		/* Previous state should be NONE. */
2418		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2419		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2420		    g_mirror_disk_state2str(disk->d_state)));
2421		DISK_STATE_CHANGED();
2422
2423		disk->d_state = state;
2424		if (LIST_EMPTY(&sc->sc_disks))
2425			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2426		else {
2427			struct g_mirror_disk *dp;
2428
2429			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2430				if (disk->d_priority >= dp->d_priority) {
2431					LIST_INSERT_BEFORE(dp, disk, d_next);
2432					dp = NULL;
2433					break;
2434				}
2435				if (LIST_NEXT(dp, d_next) == NULL)
2436					break;
2437			}
2438			if (dp != NULL)
2439				LIST_INSERT_AFTER(dp, disk, d_next);
2440		}
2441		G_MIRROR_DEBUG(1, "Device %s: provider %s detected.",
2442		    sc->sc_name, g_mirror_get_diskname(disk));
2443		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2444			break;
2445		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2446		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2447		    g_mirror_device_state2str(sc->sc_state),
2448		    g_mirror_get_diskname(disk),
2449		    g_mirror_disk_state2str(disk->d_state)));
2450		state = g_mirror_determine_state(disk);
2451		if (state != G_MIRROR_DISK_STATE_NONE)
2452			goto again;
2453		break;
2454	case G_MIRROR_DISK_STATE_ACTIVE:
2455		/*
2456		 * Possible scenarios:
2457		 * 1. New disk does not need synchronization.
2458		 * 2. Synchronization process finished successfully.
2459		 */
2460		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2461		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2462		    g_mirror_device_state2str(sc->sc_state),
2463		    g_mirror_get_diskname(disk),
2464		    g_mirror_disk_state2str(disk->d_state)));
2465		/* Previous state should be NEW or SYNCHRONIZING. */
2466		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2467		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2468		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2469		    g_mirror_disk_state2str(disk->d_state)));
2470		DISK_STATE_CHANGED();
2471
2472		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2473			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2474			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2475			g_mirror_sync_stop(disk, 0);
2476		}
2477		disk->d_state = state;
2478		disk->d_sync.ds_offset = 0;
2479		disk->d_sync.ds_offset_done = 0;
2480		g_mirror_update_idle(sc, disk);
2481		g_mirror_update_metadata(disk);
2482		G_MIRROR_DEBUG(1, "Device %s: provider %s activated.",
2483		    sc->sc_name, g_mirror_get_diskname(disk));
2484		break;
2485	case G_MIRROR_DISK_STATE_STALE:
2486		/*
2487		 * Possible scenarios:
2488		 * 1. Stale disk was connected.
2489		 */
2490		/* Previous state should be NEW. */
2491		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2492		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2493		    g_mirror_disk_state2str(disk->d_state)));
2494		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2495		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2496		    g_mirror_device_state2str(sc->sc_state),
2497		    g_mirror_get_diskname(disk),
2498		    g_mirror_disk_state2str(disk->d_state)));
2499		/*
2500		 * STALE state is only possible if device is marked
2501		 * NOAUTOSYNC.
2502		 */
2503		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2504		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2505		    g_mirror_device_state2str(sc->sc_state),
2506		    g_mirror_get_diskname(disk),
2507		    g_mirror_disk_state2str(disk->d_state)));
2508		DISK_STATE_CHANGED();
2509
2510		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2511		disk->d_state = state;
2512		g_mirror_update_metadata(disk);
2513		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2514		    sc->sc_name, g_mirror_get_diskname(disk));
2515		break;
2516	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2517		/*
2518		 * Possible scenarios:
2519		 * 1. Disk which needs synchronization was connected.
2520		 */
2521		/* Previous state should be NEW. */
2522		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2523		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2524		    g_mirror_disk_state2str(disk->d_state)));
2525		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2526		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2527		    g_mirror_device_state2str(sc->sc_state),
2528		    g_mirror_get_diskname(disk),
2529		    g_mirror_disk_state2str(disk->d_state)));
2530		DISK_STATE_CHANGED();
2531
2532		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2533			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2534		disk->d_state = state;
2535		if (sc->sc_provider != NULL) {
2536			g_mirror_sync_start(disk);
2537			g_mirror_update_metadata(disk);
2538		}
2539		break;
2540	case G_MIRROR_DISK_STATE_DISCONNECTED:
2541		/*
2542		 * Possible scenarios:
2543		 * 1. Device wasn't running yet, but disk disappear.
2544		 * 2. Disk was active and disapppear.
2545		 * 3. Disk disappear during synchronization process.
2546		 */
2547		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2548			/*
2549			 * Previous state should be ACTIVE, STALE or
2550			 * SYNCHRONIZING.
2551			 */
2552			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2553			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2554			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2555			    ("Wrong disk state (%s, %s).",
2556			    g_mirror_get_diskname(disk),
2557			    g_mirror_disk_state2str(disk->d_state)));
2558		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2559			/* Previous state should be NEW. */
2560			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2561			    ("Wrong disk state (%s, %s).",
2562			    g_mirror_get_diskname(disk),
2563			    g_mirror_disk_state2str(disk->d_state)));
2564			/*
2565			 * Reset bumping syncid if disk disappeared in STARTING
2566			 * state.
2567			 */
2568			if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2569				sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2570#ifdef	INVARIANTS
2571		} else {
2572			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2573			    sc->sc_name,
2574			    g_mirror_device_state2str(sc->sc_state),
2575			    g_mirror_get_diskname(disk),
2576			    g_mirror_disk_state2str(disk->d_state)));
2577#endif
2578		}
2579		DISK_STATE_CHANGED();
2580		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2581		    sc->sc_name, g_mirror_get_diskname(disk));
2582
2583		g_mirror_destroy_disk(disk);
2584		break;
2585	case G_MIRROR_DISK_STATE_DESTROY:
2586	    {
2587		int error;
2588
2589		error = g_mirror_clear_metadata(disk);
2590		if (error != 0)
2591			return (error);
2592		DISK_STATE_CHANGED();
2593		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2594		    sc->sc_name, g_mirror_get_diskname(disk));
2595
2596		g_mirror_destroy_disk(disk);
2597		sc->sc_ndisks--;
2598		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2599			g_mirror_update_metadata(disk);
2600		}
2601		break;
2602	    }
2603	default:
2604		KASSERT(1 == 0, ("Unknown state (%u).", state));
2605		break;
2606	}
2607	return (0);
2608}
2609#undef	DISK_STATE_CHANGED
2610
2611int
2612g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2613{
2614	struct g_provider *pp;
2615	u_char *buf;
2616	int error;
2617
2618	g_topology_assert();
2619
2620	error = g_access(cp, 1, 0, 0);
2621	if (error != 0)
2622		return (error);
2623	pp = cp->provider;
2624	g_topology_unlock();
2625	/* Metadata are stored on last sector. */
2626	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2627	    &error);
2628	g_topology_lock();
2629	g_access(cp, -1, 0, 0);
2630	if (buf == NULL) {
2631		G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2632		    cp->provider->name, error);
2633		return (error);
2634	}
2635
2636	/* Decode metadata. */
2637	error = mirror_metadata_decode(buf, md);
2638	g_free(buf);
2639	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2640		return (EINVAL);
2641	if (md->md_version > G_MIRROR_VERSION) {
2642		G_MIRROR_DEBUG(0,
2643		    "Kernel module is too old to handle metadata from %s.",
2644		    cp->provider->name);
2645		return (EINVAL);
2646	}
2647	if (error != 0) {
2648		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2649		    cp->provider->name);
2650		return (error);
2651	}
2652
2653	return (0);
2654}
2655
2656static int
2657g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2658    struct g_mirror_metadata *md)
2659{
2660
2661	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2662		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2663		    pp->name, md->md_did);
2664		return (EEXIST);
2665	}
2666	if (md->md_all != sc->sc_ndisks) {
2667		G_MIRROR_DEBUG(1,
2668		    "Invalid '%s' field on disk %s (device %s), skipping.",
2669		    "md_all", pp->name, sc->sc_name);
2670		return (EINVAL);
2671	}
2672	if (md->md_slice != sc->sc_slice) {
2673		G_MIRROR_DEBUG(1,
2674		    "Invalid '%s' field on disk %s (device %s), skipping.",
2675		    "md_slice", pp->name, sc->sc_name);
2676		return (EINVAL);
2677	}
2678	if (md->md_balance != sc->sc_balance) {
2679		G_MIRROR_DEBUG(1,
2680		    "Invalid '%s' field on disk %s (device %s), skipping.",
2681		    "md_balance", pp->name, sc->sc_name);
2682		return (EINVAL);
2683	}
2684	if (md->md_mediasize != sc->sc_mediasize) {
2685		G_MIRROR_DEBUG(1,
2686		    "Invalid '%s' field on disk %s (device %s), skipping.",
2687		    "md_mediasize", pp->name, sc->sc_name);
2688		return (EINVAL);
2689	}
2690	if (sc->sc_mediasize > pp->mediasize) {
2691		G_MIRROR_DEBUG(1,
2692		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2693		    sc->sc_name);
2694		return (EINVAL);
2695	}
2696	if (md->md_sectorsize != sc->sc_sectorsize) {
2697		G_MIRROR_DEBUG(1,
2698		    "Invalid '%s' field on disk %s (device %s), skipping.",
2699		    "md_sectorsize", pp->name, sc->sc_name);
2700		return (EINVAL);
2701	}
2702	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2703		G_MIRROR_DEBUG(1,
2704		    "Invalid sector size of disk %s (device %s), skipping.",
2705		    pp->name, sc->sc_name);
2706		return (EINVAL);
2707	}
2708	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2709		G_MIRROR_DEBUG(1,
2710		    "Invalid device flags on disk %s (device %s), skipping.",
2711		    pp->name, sc->sc_name);
2712		return (EINVAL);
2713	}
2714	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2715		G_MIRROR_DEBUG(1,
2716		    "Invalid disk flags on disk %s (device %s), skipping.",
2717		    pp->name, sc->sc_name);
2718		return (EINVAL);
2719	}
2720	return (0);
2721}
2722
2723int
2724g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2725    struct g_mirror_metadata *md)
2726{
2727	struct g_mirror_disk *disk;
2728	int error;
2729
2730	g_topology_assert_not();
2731	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2732
2733	error = g_mirror_check_metadata(sc, pp, md);
2734	if (error != 0)
2735		return (error);
2736	if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2737	    md->md_genid < sc->sc_genid) {
2738		G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2739		    pp->name, sc->sc_name);
2740		return (EINVAL);
2741	}
2742	disk = g_mirror_init_disk(sc, pp, md, &error);
2743	if (disk == NULL)
2744		return (error);
2745	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2746	    G_MIRROR_EVENT_WAIT);
2747	if (error != 0)
2748		return (error);
2749	if (md->md_version < G_MIRROR_VERSION) {
2750		G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2751		    pp->name, md->md_version, G_MIRROR_VERSION);
2752		g_mirror_update_metadata(disk);
2753	}
2754	return (0);
2755}
2756
2757static void
2758g_mirror_destroy_delayed(void *arg, int flag)
2759{
2760	struct g_mirror_softc *sc;
2761	int error;
2762
2763	if (flag == EV_CANCEL) {
2764		G_MIRROR_DEBUG(1, "Destroying canceled.");
2765		return;
2766	}
2767	sc = arg;
2768	g_topology_unlock();
2769	sx_xlock(&sc->sc_lock);
2770	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0,
2771	    ("DESTROY flag set on %s.", sc->sc_name));
2772	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0,
2773	    ("DESTROYING flag not set on %s.", sc->sc_name));
2774	G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name);
2775	error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT);
2776	if (error != 0) {
2777		G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
2778		sx_xunlock(&sc->sc_lock);
2779	}
2780	g_topology_lock();
2781}
2782
2783static int
2784g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2785{
2786	struct g_mirror_softc *sc;
2787	int dcr, dcw, dce, error = 0;
2788
2789	g_topology_assert();
2790	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2791	    acw, ace);
2792
2793	sc = pp->geom->softc;
2794	if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
2795		return (0);
2796	KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
2797
2798	dcr = pp->acr + acr;
2799	dcw = pp->acw + acw;
2800	dce = pp->ace + ace;
2801
2802	g_topology_unlock();
2803	sx_xlock(&sc->sc_lock);
2804	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 ||
2805	    LIST_EMPTY(&sc->sc_disks)) {
2806		if (acr > 0 || acw > 0 || ace > 0)
2807			error = ENXIO;
2808		goto end;
2809	}
2810	if (dcw == 0 && !sc->sc_idle)
2811		g_mirror_idle(sc, dcw);
2812	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) {
2813		if (acr > 0 || acw > 0 || ace > 0) {
2814			error = ENXIO;
2815			goto end;
2816		}
2817		if (dcr == 0 && dcw == 0 && dce == 0) {
2818			g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK,
2819			    sc, NULL);
2820		}
2821	}
2822end:
2823	sx_xunlock(&sc->sc_lock);
2824	g_topology_lock();
2825	return (error);
2826}
2827
2828static struct g_geom *
2829g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
2830{
2831	struct g_mirror_softc *sc;
2832	struct g_geom *gp;
2833	int error, timeout;
2834
2835	g_topology_assert();
2836	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2837	    md->md_mid);
2838
2839	/* One disk is minimum. */
2840	if (md->md_all < 1)
2841		return (NULL);
2842	/*
2843	 * Action geom.
2844	 */
2845	gp = g_new_geomf(mp, "%s", md->md_name);
2846	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2847	gp->start = g_mirror_start;
2848	gp->orphan = g_mirror_orphan;
2849	gp->access = g_mirror_access;
2850	gp->dumpconf = g_mirror_dumpconf;
2851
2852	sc->sc_id = md->md_mid;
2853	sc->sc_slice = md->md_slice;
2854	sc->sc_balance = md->md_balance;
2855	sc->sc_mediasize = md->md_mediasize;
2856	sc->sc_sectorsize = md->md_sectorsize;
2857	sc->sc_ndisks = md->md_all;
2858	sc->sc_flags = md->md_mflags;
2859	sc->sc_bump_id = 0;
2860	sc->sc_idle = 1;
2861	sc->sc_last_write = time_uptime;
2862	sc->sc_writes = 0;
2863	sx_init(&sc->sc_lock, "gmirror:lock");
2864	bioq_init(&sc->sc_queue);
2865	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
2866	bioq_init(&sc->sc_regular_delayed);
2867	bioq_init(&sc->sc_inflight);
2868	bioq_init(&sc->sc_sync_delayed);
2869	LIST_INIT(&sc->sc_disks);
2870	TAILQ_INIT(&sc->sc_events);
2871	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
2872	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2873	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
2874	gp->softc = sc;
2875	sc->sc_geom = gp;
2876	sc->sc_provider = NULL;
2877	/*
2878	 * Synchronization geom.
2879	 */
2880	gp = g_new_geomf(mp, "%s.sync", md->md_name);
2881	gp->softc = sc;
2882	gp->orphan = g_mirror_orphan;
2883	sc->sc_sync.ds_geom = gp;
2884	sc->sc_sync.ds_ndisks = 0;
2885	error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
2886	    "g_mirror %s", md->md_name);
2887	if (error != 0) {
2888		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
2889		    sc->sc_name);
2890		g_destroy_geom(sc->sc_sync.ds_geom);
2891		mtx_destroy(&sc->sc_events_mtx);
2892		mtx_destroy(&sc->sc_queue_mtx);
2893		sx_destroy(&sc->sc_lock);
2894		g_destroy_geom(sc->sc_geom);
2895		free(sc, M_MIRROR);
2896		return (NULL);
2897	}
2898
2899	G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).",
2900	    sc->sc_name, sc->sc_ndisks, sc->sc_id);
2901
2902	sc->sc_rootmount = root_mount_hold("GMIRROR");
2903	G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2904	/*
2905	 * Run timeout.
2906	 */
2907	timeout = g_mirror_timeout * hz;
2908	callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
2909	return (sc->sc_geom);
2910}
2911
2912int
2913g_mirror_destroy(struct g_mirror_softc *sc, int how)
2914{
2915	struct g_mirror_disk *disk;
2916	struct g_provider *pp;
2917
2918	g_topology_assert_not();
2919	if (sc == NULL)
2920		return (ENXIO);
2921	sx_assert(&sc->sc_lock, SX_XLOCKED);
2922
2923	pp = sc->sc_provider;
2924	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
2925		switch (how) {
2926		case G_MIRROR_DESTROY_SOFT:
2927			G_MIRROR_DEBUG(1,
2928			    "Device %s is still open (r%dw%de%d).", pp->name,
2929			    pp->acr, pp->acw, pp->ace);
2930			return (EBUSY);
2931		case G_MIRROR_DESTROY_DELAYED:
2932			G_MIRROR_DEBUG(1,
2933			    "Device %s will be destroyed on last close.",
2934			    pp->name);
2935			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2936				if (disk->d_state ==
2937				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2938					g_mirror_sync_stop(disk, 1);
2939				}
2940			}
2941			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING;
2942			return (EBUSY);
2943		case G_MIRROR_DESTROY_HARD:
2944			G_MIRROR_DEBUG(1, "Device %s is still open, so it "
2945			    "can't be definitely removed.", pp->name);
2946		}
2947	}
2948
2949	g_topology_lock();
2950	if (sc->sc_geom->softc == NULL) {
2951		g_topology_unlock();
2952		return (0);
2953	}
2954	sc->sc_geom->softc = NULL;
2955	sc->sc_sync.ds_geom->softc = NULL;
2956	g_topology_unlock();
2957
2958	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2959	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
2960	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
2961	sx_xunlock(&sc->sc_lock);
2962	mtx_lock(&sc->sc_queue_mtx);
2963	wakeup(sc);
2964	mtx_unlock(&sc->sc_queue_mtx);
2965	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
2966	while (sc->sc_worker != NULL)
2967		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
2968	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
2969	sx_xlock(&sc->sc_lock);
2970	g_mirror_destroy_device(sc);
2971	free(sc, M_MIRROR);
2972	return (0);
2973}
2974
2975static void
2976g_mirror_taste_orphan(struct g_consumer *cp)
2977{
2978
2979	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2980	    cp->provider->name));
2981}
2982
2983static struct g_geom *
2984g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2985{
2986	struct g_mirror_metadata md;
2987	struct g_mirror_softc *sc;
2988	struct g_consumer *cp;
2989	struct g_geom *gp;
2990	int error;
2991
2992	g_topology_assert();
2993	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2994	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
2995
2996	gp = g_new_geomf(mp, "mirror:taste");
2997	/*
2998	 * This orphan function should be never called.
2999	 */
3000	gp->orphan = g_mirror_taste_orphan;
3001	cp = g_new_consumer(gp);
3002	g_attach(cp, pp);
3003	error = g_mirror_read_metadata(cp, &md);
3004	g_detach(cp);
3005	g_destroy_consumer(cp);
3006	g_destroy_geom(gp);
3007	if (error != 0)
3008		return (NULL);
3009	gp = NULL;
3010
3011	if (md.md_provider[0] != '\0' &&
3012	    !g_compare_names(md.md_provider, pp->name))
3013		return (NULL);
3014	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3015		return (NULL);
3016	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
3017		G_MIRROR_DEBUG(0,
3018		    "Device %s: provider %s marked as inactive, skipping.",
3019		    md.md_name, pp->name);
3020		return (NULL);
3021	}
3022	if (g_mirror_debug >= 2)
3023		mirror_metadata_dump(&md);
3024
3025	/*
3026	 * Let's check if device already exists.
3027	 */
3028	sc = NULL;
3029	LIST_FOREACH(gp, &mp->geom, geom) {
3030		sc = gp->softc;
3031		if (sc == NULL)
3032			continue;
3033		if (sc->sc_sync.ds_geom == gp)
3034			continue;
3035		if (strcmp(md.md_name, sc->sc_name) != 0)
3036			continue;
3037		if (md.md_mid != sc->sc_id) {
3038			G_MIRROR_DEBUG(0, "Device %s already configured.",
3039			    sc->sc_name);
3040			return (NULL);
3041		}
3042		break;
3043	}
3044	if (gp == NULL) {
3045		gp = g_mirror_create(mp, &md);
3046		if (gp == NULL) {
3047			G_MIRROR_DEBUG(0, "Cannot create device %s.",
3048			    md.md_name);
3049			return (NULL);
3050		}
3051		sc = gp->softc;
3052	}
3053	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3054	g_topology_unlock();
3055	sx_xlock(&sc->sc_lock);
3056	error = g_mirror_add_disk(sc, pp, &md);
3057	if (error != 0) {
3058		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3059		    pp->name, gp->name, error);
3060		if (LIST_EMPTY(&sc->sc_disks)) {
3061			g_cancel_event(sc);
3062			g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3063			g_topology_lock();
3064			return (NULL);
3065		}
3066		gp = NULL;
3067	}
3068	sx_xunlock(&sc->sc_lock);
3069	g_topology_lock();
3070	return (gp);
3071}
3072
3073static int
3074g_mirror_destroy_geom(struct gctl_req *req __unused,
3075    struct g_class *mp __unused, struct g_geom *gp)
3076{
3077	struct g_mirror_softc *sc;
3078	int error;
3079
3080	g_topology_unlock();
3081	sc = gp->softc;
3082	sx_xlock(&sc->sc_lock);
3083	g_cancel_event(sc);
3084	error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT);
3085	if (error != 0)
3086		sx_xunlock(&sc->sc_lock);
3087	g_topology_lock();
3088	return (error);
3089}
3090
3091static void
3092g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3093    struct g_consumer *cp, struct g_provider *pp)
3094{
3095	struct g_mirror_softc *sc;
3096
3097	g_topology_assert();
3098
3099	sc = gp->softc;
3100	if (sc == NULL)
3101		return;
3102	/* Skip synchronization geom. */
3103	if (gp == sc->sc_sync.ds_geom)
3104		return;
3105	if (pp != NULL) {
3106		/* Nothing here. */
3107	} else if (cp != NULL) {
3108		struct g_mirror_disk *disk;
3109
3110		disk = cp->private;
3111		if (disk == NULL)
3112			return;
3113		g_topology_unlock();
3114		sx_xlock(&sc->sc_lock);
3115		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
3116		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3117			sbuf_printf(sb, "%s<Synchronized>", indent);
3118			if (disk->d_sync.ds_offset == 0)
3119				sbuf_printf(sb, "0%%");
3120			else {
3121				sbuf_printf(sb, "%u%%",
3122				    (u_int)((disk->d_sync.ds_offset * 100) /
3123				    sc->sc_provider->mediasize));
3124			}
3125			sbuf_printf(sb, "</Synchronized>\n");
3126		}
3127		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3128		    disk->d_sync.ds_syncid);
3129		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
3130		    disk->d_genid);
3131		sbuf_printf(sb, "%s<Flags>", indent);
3132		if (disk->d_flags == 0)
3133			sbuf_printf(sb, "NONE");
3134		else {
3135			int first = 1;
3136
3137#define	ADD_FLAG(flag, name)	do {					\
3138	if ((disk->d_flags & (flag)) != 0) {				\
3139		if (!first)						\
3140			sbuf_printf(sb, ", ");				\
3141		else							\
3142			first = 0;					\
3143		sbuf_printf(sb, name);					\
3144	}								\
3145} while (0)
3146			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
3147			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
3148			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
3149			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
3150			    "SYNCHRONIZING");
3151			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3152			ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN");
3153#undef	ADD_FLAG
3154		}
3155		sbuf_printf(sb, "</Flags>\n");
3156		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
3157		    disk->d_priority);
3158		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3159		    g_mirror_disk_state2str(disk->d_state));
3160		sx_xunlock(&sc->sc_lock);
3161		g_topology_lock();
3162	} else {
3163		g_topology_unlock();
3164		sx_xlock(&sc->sc_lock);
3165		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3166		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3167		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3168		sbuf_printf(sb, "%s<Flags>", indent);
3169		if (sc->sc_flags == 0)
3170			sbuf_printf(sb, "NONE");
3171		else {
3172			int first = 1;
3173
3174#define	ADD_FLAG(flag, name)	do {					\
3175	if ((sc->sc_flags & (flag)) != 0) {				\
3176		if (!first)						\
3177			sbuf_printf(sb, ", ");				\
3178		else							\
3179			first = 0;					\
3180		sbuf_printf(sb, name);					\
3181	}								\
3182} while (0)
3183			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3184			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3185#undef	ADD_FLAG
3186		}
3187		sbuf_printf(sb, "</Flags>\n");
3188		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
3189		    (u_int)sc->sc_slice);
3190		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
3191		    balance_name(sc->sc_balance));
3192		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3193		    sc->sc_ndisks);
3194		sbuf_printf(sb, "%s<State>", indent);
3195		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
3196			sbuf_printf(sb, "%s", "STARTING");
3197		else if (sc->sc_ndisks ==
3198		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
3199			sbuf_printf(sb, "%s", "COMPLETE");
3200		else
3201			sbuf_printf(sb, "%s", "DEGRADED");
3202		sbuf_printf(sb, "</State>\n");
3203		sx_xunlock(&sc->sc_lock);
3204		g_topology_lock();
3205	}
3206}
3207
3208static void
3209g_mirror_shutdown_pre_sync(void *arg, int howto)
3210{
3211	struct g_class *mp;
3212	struct g_geom *gp, *gp2;
3213	struct g_mirror_softc *sc;
3214	int error;
3215
3216	mp = arg;
3217	DROP_GIANT();
3218	g_topology_lock();
3219	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3220		if ((sc = gp->softc) == NULL)
3221			continue;
3222		/* Skip synchronization geom. */
3223		if (gp == sc->sc_sync.ds_geom)
3224			continue;
3225		g_topology_unlock();
3226		sx_xlock(&sc->sc_lock);
3227		g_cancel_event(sc);
3228		error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED);
3229		if (error != 0)
3230			sx_xunlock(&sc->sc_lock);
3231		g_topology_lock();
3232	}
3233	g_topology_unlock();
3234	PICKUP_GIANT();
3235}
3236
3237static void
3238g_mirror_init(struct g_class *mp)
3239{
3240
3241	g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
3242	    g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
3243	if (g_mirror_pre_sync == NULL)
3244		G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
3245}
3246
3247static void
3248g_mirror_fini(struct g_class *mp)
3249{
3250
3251	if (g_mirror_pre_sync != NULL)
3252		EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync);
3253}
3254
3255DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
3256