g_mirror.c revision 334611
1/*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/geom/mirror/g_mirror.c 334611 2018-06-04 14:13:04Z markj $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bio.h>
33#include <sys/eventhandler.h>
34#include <sys/fail.h>
35#include <sys/kernel.h>
36#include <sys/kthread.h>
37#include <sys/limits.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/sbuf.h>
43#include <sys/sched.h>
44#include <sys/sx.h>
45#include <sys/sysctl.h>
46
47#include <geom/geom.h>
48#include <geom/mirror/g_mirror.h>
49
50FEATURE(geom_mirror, "GEOM mirroring support");
51
52static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data");
53
54SYSCTL_DECL(_kern_geom);
55static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0,
56    "GEOM_MIRROR stuff");
57int g_mirror_debug = 0;
58SYSCTL_INT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0,
59    "Debug level");
60static u_int g_mirror_timeout = 4;
61SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout,
62    0, "Time to wait on all mirror components");
63static u_int g_mirror_idletime = 5;
64SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN,
65    &g_mirror_idletime, 0, "Mark components as clean when idling");
66static u_int g_mirror_disconnect_on_failure = 1;
67SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
68    &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
69static u_int g_mirror_syncreqs = 2;
70SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
71    &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests.");
72static u_int g_mirror_sync_period = 5;
73SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_update_period, CTLFLAG_RWTUN,
74    &g_mirror_sync_period, 0,
75    "Metadata update period during synchronization, in seconds");
76
77#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
78	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
79	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
80	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
81} while (0)
82
83static eventhandler_tag g_mirror_post_sync = NULL;
84static int g_mirror_shutdown = 0;
85
86static g_ctl_destroy_geom_t g_mirror_destroy_geom;
87static g_taste_t g_mirror_taste;
88static g_init_t g_mirror_init;
89static g_fini_t g_mirror_fini;
90static g_provgone_t g_mirror_providergone;
91static g_resize_t g_mirror_resize;
92
93struct g_class g_mirror_class = {
94	.name = G_MIRROR_CLASS_NAME,
95	.version = G_VERSION,
96	.ctlreq = g_mirror_config,
97	.taste = g_mirror_taste,
98	.destroy_geom = g_mirror_destroy_geom,
99	.init = g_mirror_init,
100	.fini = g_mirror_fini,
101	.providergone = g_mirror_providergone,
102	.resize = g_mirror_resize
103};
104
105
106static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
107static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
108static void g_mirror_update_device(struct g_mirror_softc *sc, bool force);
109static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
110    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
111static void g_mirror_sync_reinit(const struct g_mirror_disk *disk,
112    struct bio *bp, off_t offset);
113static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
114static void g_mirror_register_request(struct g_mirror_softc *sc,
115    struct bio *bp);
116static void g_mirror_sync_release(struct g_mirror_softc *sc);
117
118
119static const char *
120g_mirror_disk_state2str(int state)
121{
122
123	switch (state) {
124	case G_MIRROR_DISK_STATE_NONE:
125		return ("NONE");
126	case G_MIRROR_DISK_STATE_NEW:
127		return ("NEW");
128	case G_MIRROR_DISK_STATE_ACTIVE:
129		return ("ACTIVE");
130	case G_MIRROR_DISK_STATE_STALE:
131		return ("STALE");
132	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
133		return ("SYNCHRONIZING");
134	case G_MIRROR_DISK_STATE_DISCONNECTED:
135		return ("DISCONNECTED");
136	case G_MIRROR_DISK_STATE_DESTROY:
137		return ("DESTROY");
138	default:
139		return ("INVALID");
140	}
141}
142
143static const char *
144g_mirror_device_state2str(int state)
145{
146
147	switch (state) {
148	case G_MIRROR_DEVICE_STATE_STARTING:
149		return ("STARTING");
150	case G_MIRROR_DEVICE_STATE_RUNNING:
151		return ("RUNNING");
152	default:
153		return ("INVALID");
154	}
155}
156
157static const char *
158g_mirror_get_diskname(struct g_mirror_disk *disk)
159{
160
161	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
162		return ("[unknown]");
163	return (disk->d_name);
164}
165
166/*
167 * --- Events handling functions ---
168 * Events in geom_mirror are used to maintain disks and device status
169 * from one thread to simplify locking.
170 */
171static void
172g_mirror_event_free(struct g_mirror_event *ep)
173{
174
175	free(ep, M_MIRROR);
176}
177
178int
179g_mirror_event_send(void *arg, int state, int flags)
180{
181	struct g_mirror_softc *sc;
182	struct g_mirror_disk *disk;
183	struct g_mirror_event *ep;
184	int error;
185
186	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
187	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
188	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
189		disk = NULL;
190		sc = arg;
191	} else {
192		disk = arg;
193		sc = disk->d_softc;
194	}
195	ep->e_disk = disk;
196	ep->e_state = state;
197	ep->e_flags = flags;
198	ep->e_error = 0;
199	mtx_lock(&sc->sc_events_mtx);
200	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
201	mtx_unlock(&sc->sc_events_mtx);
202	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
203	mtx_lock(&sc->sc_queue_mtx);
204	wakeup(sc);
205	mtx_unlock(&sc->sc_queue_mtx);
206	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
207		return (0);
208	sx_assert(&sc->sc_lock, SX_XLOCKED);
209	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
210	sx_xunlock(&sc->sc_lock);
211	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
212		mtx_lock(&sc->sc_events_mtx);
213		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
214		    hz * 5);
215	}
216	error = ep->e_error;
217	g_mirror_event_free(ep);
218	sx_xlock(&sc->sc_lock);
219	return (error);
220}
221
222static struct g_mirror_event *
223g_mirror_event_first(struct g_mirror_softc *sc)
224{
225	struct g_mirror_event *ep;
226
227	mtx_lock(&sc->sc_events_mtx);
228	ep = TAILQ_FIRST(&sc->sc_events);
229	mtx_unlock(&sc->sc_events_mtx);
230	return (ep);
231}
232
233static void
234g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
235{
236
237	mtx_lock(&sc->sc_events_mtx);
238	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
239	mtx_unlock(&sc->sc_events_mtx);
240}
241
242static void
243g_mirror_event_cancel(struct g_mirror_disk *disk)
244{
245	struct g_mirror_softc *sc;
246	struct g_mirror_event *ep, *tmpep;
247
248	sc = disk->d_softc;
249	sx_assert(&sc->sc_lock, SX_XLOCKED);
250
251	mtx_lock(&sc->sc_events_mtx);
252	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
253		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
254			continue;
255		if (ep->e_disk != disk)
256			continue;
257		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
258		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
259			g_mirror_event_free(ep);
260		else {
261			ep->e_error = ECANCELED;
262			wakeup(ep);
263		}
264	}
265	mtx_unlock(&sc->sc_events_mtx);
266}
267
268/*
269 * Return the number of disks in given state.
270 * If state is equal to -1, count all connected disks.
271 */
272u_int
273g_mirror_ndisks(struct g_mirror_softc *sc, int state)
274{
275	struct g_mirror_disk *disk;
276	u_int n = 0;
277
278	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
279		if (state == -1 || disk->d_state == state)
280			n++;
281	}
282	return (n);
283}
284
285/*
286 * Find a disk in mirror by its disk ID.
287 */
288static struct g_mirror_disk *
289g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
290{
291	struct g_mirror_disk *disk;
292
293	sx_assert(&sc->sc_lock, SX_XLOCKED);
294
295	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
296		if (disk->d_id == id)
297			return (disk);
298	}
299	return (NULL);
300}
301
302static u_int
303g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
304{
305	struct bio *bp;
306	u_int nreqs = 0;
307
308	mtx_lock(&sc->sc_queue_mtx);
309	TAILQ_FOREACH(bp, &sc->sc_queue, bio_queue) {
310		if (bp->bio_from == cp)
311			nreqs++;
312	}
313	mtx_unlock(&sc->sc_queue_mtx);
314	return (nreqs);
315}
316
317static int
318g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
319{
320
321	if (cp->index > 0) {
322		G_MIRROR_DEBUG(2,
323		    "I/O requests for %s exist, can't destroy it now.",
324		    cp->provider->name);
325		return (1);
326	}
327	if (g_mirror_nrequests(sc, cp) > 0) {
328		G_MIRROR_DEBUG(2,
329		    "I/O requests for %s in queue, can't destroy it now.",
330		    cp->provider->name);
331		return (1);
332	}
333	return (0);
334}
335
336static void
337g_mirror_destroy_consumer(void *arg, int flags __unused)
338{
339	struct g_consumer *cp;
340
341	g_topology_assert();
342
343	cp = arg;
344	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
345	g_detach(cp);
346	g_destroy_consumer(cp);
347}
348
349static void
350g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
351{
352	struct g_provider *pp;
353	int retaste_wait;
354
355	g_topology_assert();
356
357	cp->private = NULL;
358	if (g_mirror_is_busy(sc, cp))
359		return;
360	pp = cp->provider;
361	retaste_wait = 0;
362	if (cp->acw == 1) {
363		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
364			retaste_wait = 1;
365	}
366	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
367	    -cp->acw, -cp->ace, 0);
368	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
369		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
370	if (retaste_wait) {
371		/*
372		 * After retaste event was send (inside g_access()), we can send
373		 * event to detach and destroy consumer.
374		 * A class, which has consumer to the given provider connected
375		 * will not receive retaste event for the provider.
376		 * This is the way how I ignore retaste events when I close
377		 * consumers opened for write: I detach and destroy consumer
378		 * after retaste event is sent.
379		 */
380		g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
381		return;
382	}
383	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
384	g_detach(cp);
385	g_destroy_consumer(cp);
386}
387
388static int
389g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
390{
391	struct g_consumer *cp;
392	int error;
393
394	g_topology_assert_not();
395	KASSERT(disk->d_consumer == NULL,
396	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
397
398	g_topology_lock();
399	cp = g_new_consumer(disk->d_softc->sc_geom);
400	cp->flags |= G_CF_DIRECT_RECEIVE;
401	error = g_attach(cp, pp);
402	if (error != 0) {
403		g_destroy_consumer(cp);
404		g_topology_unlock();
405		return (error);
406	}
407	error = g_access(cp, 1, 1, 1);
408	if (error != 0) {
409		g_detach(cp);
410		g_destroy_consumer(cp);
411		g_topology_unlock();
412		G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
413		    pp->name, error);
414		return (error);
415	}
416	g_topology_unlock();
417	disk->d_consumer = cp;
418	disk->d_consumer->private = disk;
419	disk->d_consumer->index = 0;
420
421	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
422	return (0);
423}
424
425static void
426g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
427{
428
429	g_topology_assert();
430
431	if (cp == NULL)
432		return;
433	if (cp->provider != NULL)
434		g_mirror_kill_consumer(sc, cp);
435	else
436		g_destroy_consumer(cp);
437}
438
439/*
440 * Initialize disk. This means allocate memory, create consumer, attach it
441 * to the provider and open access (r1w1e1) to it.
442 */
443static struct g_mirror_disk *
444g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
445    struct g_mirror_metadata *md, int *errorp)
446{
447	struct g_mirror_disk *disk;
448	int i, error;
449
450	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
451	if (disk == NULL) {
452		error = ENOMEM;
453		goto fail;
454	}
455	disk->d_softc = sc;
456	error = g_mirror_connect_disk(disk, pp);
457	if (error != 0)
458		goto fail;
459	disk->d_id = md->md_did;
460	disk->d_state = G_MIRROR_DISK_STATE_NONE;
461	disk->d_priority = md->md_priority;
462	disk->d_flags = md->md_dflags;
463	error = g_getattr("GEOM::candelete", disk->d_consumer, &i);
464	if (error == 0 && i != 0)
465		disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE;
466	if (md->md_provider[0] != '\0')
467		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
468	disk->d_sync.ds_consumer = NULL;
469	disk->d_sync.ds_offset = md->md_sync_offset;
470	disk->d_sync.ds_offset_done = md->md_sync_offset;
471	disk->d_sync.ds_update_ts = time_uptime;
472	disk->d_genid = md->md_genid;
473	disk->d_sync.ds_syncid = md->md_syncid;
474	if (errorp != NULL)
475		*errorp = 0;
476	return (disk);
477fail:
478	if (errorp != NULL)
479		*errorp = error;
480	if (disk != NULL)
481		free(disk, M_MIRROR);
482	return (NULL);
483}
484
485static void
486g_mirror_destroy_disk(struct g_mirror_disk *disk)
487{
488	struct g_mirror_softc *sc;
489
490	g_topology_assert_not();
491	sc = disk->d_softc;
492	sx_assert(&sc->sc_lock, SX_XLOCKED);
493
494	g_topology_lock();
495	LIST_REMOVE(disk, d_next);
496	g_topology_unlock();
497	g_mirror_event_cancel(disk);
498	if (sc->sc_hint == disk)
499		sc->sc_hint = NULL;
500	switch (disk->d_state) {
501	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
502		g_mirror_sync_stop(disk, 1);
503		/* FALLTHROUGH */
504	case G_MIRROR_DISK_STATE_NEW:
505	case G_MIRROR_DISK_STATE_STALE:
506	case G_MIRROR_DISK_STATE_ACTIVE:
507		g_topology_lock();
508		g_mirror_disconnect_consumer(sc, disk->d_consumer);
509		g_topology_unlock();
510		free(disk, M_MIRROR);
511		break;
512	default:
513		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
514		    g_mirror_get_diskname(disk),
515		    g_mirror_disk_state2str(disk->d_state)));
516	}
517}
518
519static void
520g_mirror_free_device(struct g_mirror_softc *sc)
521{
522
523	g_topology_assert();
524
525	mtx_destroy(&sc->sc_queue_mtx);
526	mtx_destroy(&sc->sc_events_mtx);
527	mtx_destroy(&sc->sc_done_mtx);
528	sx_destroy(&sc->sc_lock);
529	free(sc, M_MIRROR);
530}
531
532static void
533g_mirror_providergone(struct g_provider *pp)
534{
535	struct g_mirror_softc *sc = pp->private;
536
537	if ((--sc->sc_refcnt) == 0)
538		g_mirror_free_device(sc);
539}
540
541static void
542g_mirror_destroy_device(struct g_mirror_softc *sc)
543{
544	struct g_mirror_disk *disk;
545	struct g_mirror_event *ep;
546	struct g_geom *gp;
547	struct g_consumer *cp, *tmpcp;
548
549	g_topology_assert_not();
550	sx_assert(&sc->sc_lock, SX_XLOCKED);
551
552	gp = sc->sc_geom;
553	if (sc->sc_provider != NULL)
554		g_mirror_destroy_provider(sc);
555	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
556	    disk = LIST_FIRST(&sc->sc_disks)) {
557		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
558		g_mirror_update_metadata(disk);
559		g_mirror_destroy_disk(disk);
560	}
561	while ((ep = g_mirror_event_first(sc)) != NULL) {
562		g_mirror_event_remove(sc, ep);
563		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
564			g_mirror_event_free(ep);
565		else {
566			ep->e_error = ECANCELED;
567			ep->e_flags |= G_MIRROR_EVENT_DONE;
568			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
569			mtx_lock(&sc->sc_events_mtx);
570			wakeup(ep);
571			mtx_unlock(&sc->sc_events_mtx);
572		}
573	}
574	callout_drain(&sc->sc_callout);
575
576	g_topology_lock();
577	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
578		g_mirror_disconnect_consumer(sc, cp);
579	}
580	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
581	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
582	g_wither_geom(gp, ENXIO);
583	sx_xunlock(&sc->sc_lock);
584	if ((--sc->sc_refcnt) == 0)
585		g_mirror_free_device(sc);
586	g_topology_unlock();
587}
588
589static void
590g_mirror_orphan(struct g_consumer *cp)
591{
592	struct g_mirror_disk *disk;
593
594	g_topology_assert();
595
596	disk = cp->private;
597	if (disk == NULL)
598		return;
599	disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
600	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
601	    G_MIRROR_EVENT_DONTWAIT);
602}
603
604/*
605 * Function should return the next active disk on the list.
606 * It is possible that it will be the same disk as given.
607 * If there are no active disks on list, NULL is returned.
608 */
609static __inline struct g_mirror_disk *
610g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
611{
612	struct g_mirror_disk *dp;
613
614	for (dp = LIST_NEXT(disk, d_next); dp != disk;
615	    dp = LIST_NEXT(dp, d_next)) {
616		if (dp == NULL)
617			dp = LIST_FIRST(&sc->sc_disks);
618		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
619			break;
620	}
621	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
622		return (NULL);
623	return (dp);
624}
625
626static struct g_mirror_disk *
627g_mirror_get_disk(struct g_mirror_softc *sc)
628{
629	struct g_mirror_disk *disk;
630
631	if (sc->sc_hint == NULL) {
632		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
633		if (sc->sc_hint == NULL)
634			return (NULL);
635	}
636	disk = sc->sc_hint;
637	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
638		disk = g_mirror_find_next(sc, disk);
639		if (disk == NULL)
640			return (NULL);
641	}
642	sc->sc_hint = g_mirror_find_next(sc, disk);
643	return (disk);
644}
645
646static int
647g_mirror_write_metadata(struct g_mirror_disk *disk,
648    struct g_mirror_metadata *md)
649{
650	struct g_mirror_softc *sc;
651	struct g_consumer *cp;
652	off_t offset, length;
653	u_char *sector;
654	int error = 0;
655
656	g_topology_assert_not();
657	sc = disk->d_softc;
658	sx_assert(&sc->sc_lock, SX_LOCKED);
659
660	cp = disk->d_consumer;
661	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
662	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
663	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
664	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
665	    cp->acw, cp->ace));
666	length = cp->provider->sectorsize;
667	offset = cp->provider->mediasize - length;
668	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
669	if (md != NULL &&
670	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) {
671		/*
672		 * Handle the case, when the size of parent provider reduced.
673		 */
674		if (offset < md->md_mediasize)
675			error = ENOSPC;
676		else
677			mirror_metadata_encode(md, sector);
678	}
679	KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error);
680	if (error == 0)
681		error = g_write_data(cp, offset, sector, length);
682	free(sector, M_MIRROR);
683	if (error != 0) {
684		if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
685			disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
686			G_MIRROR_DEBUG(0, "Cannot write metadata on %s "
687			    "(device=%s, error=%d).",
688			    g_mirror_get_diskname(disk), sc->sc_name, error);
689		} else {
690			G_MIRROR_DEBUG(1, "Cannot write metadata on %s "
691			    "(device=%s, error=%d).",
692			    g_mirror_get_diskname(disk), sc->sc_name, error);
693		}
694		if (g_mirror_disconnect_on_failure &&
695		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
696			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
697			g_mirror_event_send(disk,
698			    G_MIRROR_DISK_STATE_DISCONNECTED,
699			    G_MIRROR_EVENT_DONTWAIT);
700		}
701	}
702	return (error);
703}
704
705static int
706g_mirror_clear_metadata(struct g_mirror_disk *disk)
707{
708	int error;
709
710	g_topology_assert_not();
711	sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
712
713	if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
714		return (0);
715	error = g_mirror_write_metadata(disk, NULL);
716	if (error == 0) {
717		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
718		    g_mirror_get_diskname(disk));
719	} else {
720		G_MIRROR_DEBUG(0,
721		    "Cannot clear metadata on disk %s (error=%d).",
722		    g_mirror_get_diskname(disk), error);
723	}
724	return (error);
725}
726
727void
728g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
729    struct g_mirror_metadata *md)
730{
731
732	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
733	md->md_version = G_MIRROR_VERSION;
734	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
735	md->md_mid = sc->sc_id;
736	md->md_all = sc->sc_ndisks;
737	md->md_slice = sc->sc_slice;
738	md->md_balance = sc->sc_balance;
739	md->md_genid = sc->sc_genid;
740	md->md_mediasize = sc->sc_mediasize;
741	md->md_sectorsize = sc->sc_sectorsize;
742	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
743	bzero(md->md_provider, sizeof(md->md_provider));
744	if (disk == NULL) {
745		md->md_did = arc4random();
746		md->md_priority = 0;
747		md->md_syncid = 0;
748		md->md_dflags = 0;
749		md->md_sync_offset = 0;
750		md->md_provsize = 0;
751	} else {
752		md->md_did = disk->d_id;
753		md->md_priority = disk->d_priority;
754		md->md_syncid = disk->d_sync.ds_syncid;
755		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
756		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
757			md->md_sync_offset = disk->d_sync.ds_offset_done;
758		else
759			md->md_sync_offset = 0;
760		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
761			strlcpy(md->md_provider,
762			    disk->d_consumer->provider->name,
763			    sizeof(md->md_provider));
764		}
765		md->md_provsize = disk->d_consumer->provider->mediasize;
766	}
767}
768
769void
770g_mirror_update_metadata(struct g_mirror_disk *disk)
771{
772	struct g_mirror_softc *sc;
773	struct g_mirror_metadata md;
774	int error;
775
776	g_topology_assert_not();
777	sc = disk->d_softc;
778	sx_assert(&sc->sc_lock, SX_LOCKED);
779
780	if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
781		return;
782	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0)
783		g_mirror_fill_metadata(sc, disk, &md);
784	error = g_mirror_write_metadata(disk, &md);
785	if (error == 0) {
786		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
787		    g_mirror_get_diskname(disk));
788	} else {
789		G_MIRROR_DEBUG(0,
790		    "Cannot update metadata on disk %s (error=%d).",
791		    g_mirror_get_diskname(disk), error);
792	}
793}
794
795static void
796g_mirror_bump_syncid(struct g_mirror_softc *sc)
797{
798	struct g_mirror_disk *disk;
799
800	g_topology_assert_not();
801	sx_assert(&sc->sc_lock, SX_XLOCKED);
802	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
803	    ("%s called with no active disks (device=%s).", __func__,
804	    sc->sc_name));
805
806	sc->sc_syncid++;
807	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
808	    sc->sc_syncid);
809	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
810		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
811		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
812			disk->d_sync.ds_syncid = sc->sc_syncid;
813			g_mirror_update_metadata(disk);
814		}
815	}
816}
817
818static void
819g_mirror_bump_genid(struct g_mirror_softc *sc)
820{
821	struct g_mirror_disk *disk;
822
823	g_topology_assert_not();
824	sx_assert(&sc->sc_lock, SX_XLOCKED);
825	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
826	    ("%s called with no active disks (device=%s).", __func__,
827	    sc->sc_name));
828
829	sc->sc_genid++;
830	G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
831	    sc->sc_genid);
832	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
833		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
834		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
835			disk->d_genid = sc->sc_genid;
836			g_mirror_update_metadata(disk);
837		}
838	}
839}
840
841static int
842g_mirror_idle(struct g_mirror_softc *sc, int acw)
843{
844	struct g_mirror_disk *disk;
845	int timeout;
846
847	g_topology_assert_not();
848	sx_assert(&sc->sc_lock, SX_XLOCKED);
849
850	if (sc->sc_provider == NULL)
851		return (0);
852	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
853		return (0);
854	if (sc->sc_idle)
855		return (0);
856	if (sc->sc_writes > 0)
857		return (0);
858	if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
859		timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write);
860		if (!g_mirror_shutdown && timeout > 0)
861			return (timeout);
862	}
863	sc->sc_idle = 1;
864	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
865		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
866			continue;
867		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.",
868		    g_mirror_get_diskname(disk), sc->sc_name);
869		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
870		g_mirror_update_metadata(disk);
871	}
872	return (0);
873}
874
875static void
876g_mirror_unidle(struct g_mirror_softc *sc)
877{
878	struct g_mirror_disk *disk;
879
880	g_topology_assert_not();
881	sx_assert(&sc->sc_lock, SX_XLOCKED);
882
883	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
884		return;
885	sc->sc_idle = 0;
886	sc->sc_last_write = time_uptime;
887	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
888		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
889			continue;
890		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.",
891		    g_mirror_get_diskname(disk), sc->sc_name);
892		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
893		g_mirror_update_metadata(disk);
894	}
895}
896
897static void
898g_mirror_done(struct bio *bp)
899{
900	struct g_mirror_softc *sc;
901
902	sc = bp->bio_from->geom->softc;
903	bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR;
904	mtx_lock(&sc->sc_queue_mtx);
905	TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
906	mtx_unlock(&sc->sc_queue_mtx);
907	wakeup(sc);
908}
909
910static void
911g_mirror_regular_request_error(struct g_mirror_softc *sc,
912    struct g_mirror_disk *disk, struct bio *bp)
913{
914
915	if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == EOPNOTSUPP)
916		return;
917
918	if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
919		disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
920		G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
921		    bp->bio_error);
922	} else {
923		G_MIRROR_LOGREQ(1, bp, "Request failed (error=%d).",
924		    bp->bio_error);
925	}
926	if (g_mirror_disconnect_on_failure &&
927	    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
928		if (bp->bio_error == ENXIO &&
929		    bp->bio_cmd == BIO_READ)
930			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
931		else if (bp->bio_error == ENXIO)
932			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID_NOW;
933		else
934			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
935		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
936		    G_MIRROR_EVENT_DONTWAIT);
937	}
938}
939
940static void
941g_mirror_regular_request(struct g_mirror_softc *sc, struct bio *bp)
942{
943	struct g_mirror_disk *disk;
944	struct bio *pbp;
945
946	g_topology_assert_not();
947	KASSERT(sc->sc_provider == bp->bio_parent->bio_to,
948	    ("regular request %p with unexpected origin", bp));
949
950	pbp = bp->bio_parent;
951	bp->bio_from->index--;
952	if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE)
953		sc->sc_writes--;
954	disk = bp->bio_from->private;
955	if (disk == NULL) {
956		g_topology_lock();
957		g_mirror_kill_consumer(sc, bp->bio_from);
958		g_topology_unlock();
959	}
960
961	switch (bp->bio_cmd) {
962	case BIO_READ:
963		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read,
964		    bp->bio_error);
965		break;
966	case BIO_WRITE:
967		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write,
968		    bp->bio_error);
969		break;
970	case BIO_DELETE:
971		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_delete,
972		    bp->bio_error);
973		break;
974	case BIO_FLUSH:
975		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_flush,
976		    bp->bio_error);
977		break;
978	}
979
980	pbp->bio_inbed++;
981	KASSERT(pbp->bio_inbed <= pbp->bio_children,
982	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
983	    pbp->bio_children));
984	if (bp->bio_error == 0 && pbp->bio_error == 0) {
985		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
986		g_destroy_bio(bp);
987		if (pbp->bio_children == pbp->bio_inbed) {
988			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
989			pbp->bio_completed = pbp->bio_length;
990			if (pbp->bio_cmd == BIO_WRITE ||
991			    pbp->bio_cmd == BIO_DELETE) {
992				TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue);
993				/* Release delayed sync requests if possible. */
994				g_mirror_sync_release(sc);
995			}
996			g_io_deliver(pbp, pbp->bio_error);
997		}
998		return;
999	} else if (bp->bio_error != 0) {
1000		if (pbp->bio_error == 0)
1001			pbp->bio_error = bp->bio_error;
1002		if (disk != NULL)
1003			g_mirror_regular_request_error(sc, disk, bp);
1004		switch (pbp->bio_cmd) {
1005		case BIO_DELETE:
1006		case BIO_WRITE:
1007		case BIO_FLUSH:
1008			pbp->bio_inbed--;
1009			pbp->bio_children--;
1010			break;
1011		}
1012	}
1013	g_destroy_bio(bp);
1014
1015	switch (pbp->bio_cmd) {
1016	case BIO_READ:
1017		if (pbp->bio_inbed < pbp->bio_children)
1018			break;
1019		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1)
1020			g_io_deliver(pbp, pbp->bio_error);
1021		else {
1022			pbp->bio_error = 0;
1023			mtx_lock(&sc->sc_queue_mtx);
1024			TAILQ_INSERT_TAIL(&sc->sc_queue, pbp, bio_queue);
1025			mtx_unlock(&sc->sc_queue_mtx);
1026			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1027			wakeup(sc);
1028		}
1029		break;
1030	case BIO_DELETE:
1031	case BIO_WRITE:
1032	case BIO_FLUSH:
1033		if (pbp->bio_children == 0) {
1034			/*
1035			 * All requests failed.
1036			 */
1037		} else if (pbp->bio_inbed < pbp->bio_children) {
1038			/* Do nothing. */
1039			break;
1040		} else if (pbp->bio_children == pbp->bio_inbed) {
1041			/* Some requests succeeded. */
1042			pbp->bio_error = 0;
1043			pbp->bio_completed = pbp->bio_length;
1044		}
1045		if (pbp->bio_cmd == BIO_WRITE || pbp->bio_cmd == BIO_DELETE) {
1046			TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue);
1047			/* Release delayed sync requests if possible. */
1048			g_mirror_sync_release(sc);
1049		}
1050		g_io_deliver(pbp, pbp->bio_error);
1051		break;
1052	default:
1053		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
1054		break;
1055	}
1056}
1057
1058static void
1059g_mirror_sync_done(struct bio *bp)
1060{
1061	struct g_mirror_softc *sc;
1062
1063	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
1064	sc = bp->bio_from->geom->softc;
1065	bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC;
1066	mtx_lock(&sc->sc_queue_mtx);
1067	TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
1068	mtx_unlock(&sc->sc_queue_mtx);
1069	wakeup(sc);
1070}
1071
1072static void
1073g_mirror_candelete(struct bio *bp)
1074{
1075	struct g_mirror_softc *sc;
1076	struct g_mirror_disk *disk;
1077	int *val;
1078
1079	sc = bp->bio_to->private;
1080	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1081		if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE)
1082			break;
1083	}
1084	val = (int *)bp->bio_data;
1085	*val = (disk != NULL);
1086	g_io_deliver(bp, 0);
1087}
1088
1089static void
1090g_mirror_kernel_dump(struct bio *bp)
1091{
1092	struct g_mirror_softc *sc;
1093	struct g_mirror_disk *disk;
1094	struct bio *cbp;
1095	struct g_kerneldump *gkd;
1096
1097	/*
1098	 * We configure dumping to the first component, because this component
1099	 * will be used for reading with 'prefer' balance algorithm.
1100	 * If the component with the highest priority is currently disconnected
1101	 * we will not be able to read the dump after the reboot if it will be
1102	 * connected and synchronized later. Can we do something better?
1103	 */
1104	sc = bp->bio_to->private;
1105	disk = LIST_FIRST(&sc->sc_disks);
1106
1107	gkd = (struct g_kerneldump *)bp->bio_data;
1108	if (gkd->length > bp->bio_to->mediasize)
1109		gkd->length = bp->bio_to->mediasize;
1110	cbp = g_clone_bio(bp);
1111	if (cbp == NULL) {
1112		g_io_deliver(bp, ENOMEM);
1113		return;
1114	}
1115	cbp->bio_done = g_std_done;
1116	g_io_request(cbp, disk->d_consumer);
1117	G_MIRROR_DEBUG(1, "Kernel dump will go to %s.",
1118	    g_mirror_get_diskname(disk));
1119}
1120
1121static void
1122g_mirror_start(struct bio *bp)
1123{
1124	struct g_mirror_softc *sc;
1125
1126	sc = bp->bio_to->private;
1127	/*
1128	 * If sc == NULL or there are no valid disks, provider's error
1129	 * should be set and g_mirror_start() should not be called at all.
1130	 */
1131	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1132	    ("Provider's error should be set (error=%d)(mirror=%s).",
1133	    bp->bio_to->error, bp->bio_to->name));
1134	G_MIRROR_LOGREQ(3, bp, "Request received.");
1135
1136	switch (bp->bio_cmd) {
1137	case BIO_READ:
1138	case BIO_WRITE:
1139	case BIO_DELETE:
1140	case BIO_FLUSH:
1141		break;
1142	case BIO_GETATTR:
1143		if (!strcmp(bp->bio_attribute, "GEOM::candelete")) {
1144			g_mirror_candelete(bp);
1145			return;
1146		} else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) {
1147			g_mirror_kernel_dump(bp);
1148			return;
1149		}
1150		/* FALLTHROUGH */
1151	default:
1152		g_io_deliver(bp, EOPNOTSUPP);
1153		return;
1154	}
1155	mtx_lock(&sc->sc_queue_mtx);
1156	if (bp->bio_to->error != 0) {
1157		mtx_unlock(&sc->sc_queue_mtx);
1158		g_io_deliver(bp, bp->bio_to->error);
1159		return;
1160	}
1161	TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
1162	mtx_unlock(&sc->sc_queue_mtx);
1163	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1164	wakeup(sc);
1165}
1166
1167/*
1168 * Return TRUE if the given request is colliding with a in-progress
1169 * synchronization request.
1170 */
1171static bool
1172g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp)
1173{
1174	struct g_mirror_disk *disk;
1175	struct bio *sbp;
1176	off_t rstart, rend, sstart, send;
1177	u_int i;
1178
1179	if (sc->sc_sync.ds_ndisks == 0)
1180		return (false);
1181	rstart = bp->bio_offset;
1182	rend = bp->bio_offset + bp->bio_length;
1183	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1184		if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING)
1185			continue;
1186		for (i = 0; i < g_mirror_syncreqs; i++) {
1187			sbp = disk->d_sync.ds_bios[i];
1188			if (sbp == NULL)
1189				continue;
1190			sstart = sbp->bio_offset;
1191			send = sbp->bio_offset + sbp->bio_length;
1192			if (rend > sstart && rstart < send)
1193				return (true);
1194		}
1195	}
1196	return (false);
1197}
1198
1199/*
1200 * Return TRUE if the given sync request is colliding with a in-progress regular
1201 * request.
1202 */
1203static bool
1204g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp)
1205{
1206	off_t rstart, rend, sstart, send;
1207	struct bio *bp;
1208
1209	if (sc->sc_sync.ds_ndisks == 0)
1210		return (false);
1211	sstart = sbp->bio_offset;
1212	send = sbp->bio_offset + sbp->bio_length;
1213	TAILQ_FOREACH(bp, &sc->sc_inflight, bio_queue) {
1214		rstart = bp->bio_offset;
1215		rend = bp->bio_offset + bp->bio_length;
1216		if (rend > sstart && rstart < send)
1217			return (true);
1218	}
1219	return (false);
1220}
1221
1222/*
1223 * Puts regular request onto delayed queue.
1224 */
1225static void
1226g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp)
1227{
1228
1229	G_MIRROR_LOGREQ(2, bp, "Delaying request.");
1230	TAILQ_INSERT_TAIL(&sc->sc_regular_delayed, bp, bio_queue);
1231}
1232
1233/*
1234 * Puts synchronization request onto delayed queue.
1235 */
1236static void
1237g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp)
1238{
1239
1240	G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request.");
1241	TAILQ_INSERT_TAIL(&sc->sc_sync_delayed, bp, bio_queue);
1242}
1243
1244/*
1245 * Requeue delayed regular requests.
1246 */
1247static void
1248g_mirror_regular_release(struct g_mirror_softc *sc)
1249{
1250	struct bio *bp;
1251
1252	if ((bp = TAILQ_FIRST(&sc->sc_regular_delayed)) == NULL)
1253		return;
1254	if (g_mirror_sync_collision(sc, bp))
1255		return;
1256
1257	G_MIRROR_DEBUG(2, "Requeuing regular requests after collision.");
1258	mtx_lock(&sc->sc_queue_mtx);
1259	TAILQ_CONCAT(&sc->sc_regular_delayed, &sc->sc_queue, bio_queue);
1260	TAILQ_SWAP(&sc->sc_regular_delayed, &sc->sc_queue, bio, bio_queue);
1261	mtx_unlock(&sc->sc_queue_mtx);
1262}
1263
1264/*
1265 * Releases delayed sync requests which don't collide anymore with regular
1266 * requests.
1267 */
1268static void
1269g_mirror_sync_release(struct g_mirror_softc *sc)
1270{
1271	struct bio *bp, *bp2;
1272
1273	TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed, bio_queue, bp2) {
1274		if (g_mirror_regular_collision(sc, bp))
1275			continue;
1276		TAILQ_REMOVE(&sc->sc_sync_delayed, bp, bio_queue);
1277		G_MIRROR_LOGREQ(2, bp,
1278		    "Releasing delayed synchronization request.");
1279		g_io_request(bp, bp->bio_from);
1280	}
1281}
1282
1283/*
1284 * Free a synchronization request and clear its slot in the array.
1285 */
1286static void
1287g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp)
1288{
1289	int idx;
1290
1291	if (disk != NULL && disk->d_sync.ds_bios != NULL) {
1292		idx = (int)(uintptr_t)bp->bio_caller1;
1293		KASSERT(disk->d_sync.ds_bios[idx] == bp,
1294		    ("unexpected sync BIO at %p:%d", disk, idx));
1295		disk->d_sync.ds_bios[idx] = NULL;
1296	}
1297	free(bp->bio_data, M_MIRROR);
1298	g_destroy_bio(bp);
1299}
1300
1301/*
1302 * Handle synchronization requests.
1303 * Every synchronization request is a two-step process: first, a read request is
1304 * sent to the mirror provider via the sync consumer. If that request completes
1305 * successfully, it is converted to a write and sent to the disk being
1306 * synchronized. If the write also completes successfully, the synchronization
1307 * offset is advanced and a new read request is submitted.
1308 */
1309static void
1310g_mirror_sync_request(struct g_mirror_softc *sc, struct bio *bp)
1311{
1312	struct g_mirror_disk *disk;
1313	struct g_mirror_disk_sync *sync;
1314
1315	KASSERT((bp->bio_cmd == BIO_READ &&
1316	    bp->bio_from->geom == sc->sc_sync.ds_geom) ||
1317	    (bp->bio_cmd == BIO_WRITE && bp->bio_from->geom == sc->sc_geom),
1318	    ("Sync BIO %p with unexpected origin", bp));
1319
1320	bp->bio_from->index--;
1321	disk = bp->bio_from->private;
1322	if (disk == NULL) {
1323		sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1324		g_topology_lock();
1325		g_mirror_kill_consumer(sc, bp->bio_from);
1326		g_topology_unlock();
1327		g_mirror_sync_request_free(NULL, bp);
1328		sx_xlock(&sc->sc_lock);
1329		return;
1330	}
1331
1332	sync = &disk->d_sync;
1333
1334	/*
1335	 * Synchronization request.
1336	 */
1337	switch (bp->bio_cmd) {
1338	case BIO_READ: {
1339		struct g_consumer *cp;
1340
1341		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read,
1342		    bp->bio_error);
1343
1344		if (bp->bio_error != 0) {
1345			G_MIRROR_LOGREQ(0, bp,
1346			    "Synchronization request failed (error=%d).",
1347			    bp->bio_error);
1348
1349			/*
1350			 * The read error will trigger a syncid bump, so there's
1351			 * no need to do that here.
1352			 *
1353			 * The read error handling for regular requests will
1354			 * retry the read from all active mirrors before passing
1355			 * the error back up, so there's no need to retry here.
1356			 */
1357			g_mirror_sync_request_free(disk, bp);
1358			g_mirror_event_send(disk,
1359			    G_MIRROR_DISK_STATE_DISCONNECTED,
1360			    G_MIRROR_EVENT_DONTWAIT);
1361			return;
1362		}
1363		G_MIRROR_LOGREQ(3, bp,
1364		    "Synchronization request half-finished.");
1365		bp->bio_cmd = BIO_WRITE;
1366		bp->bio_cflags = 0;
1367		cp = disk->d_consumer;
1368		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1369		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1370		    cp->acr, cp->acw, cp->ace));
1371		cp->index++;
1372		g_io_request(bp, cp);
1373		return;
1374	}
1375	case BIO_WRITE: {
1376		off_t offset;
1377		int i;
1378
1379		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write,
1380		    bp->bio_error);
1381
1382		if (bp->bio_error != 0) {
1383			G_MIRROR_LOGREQ(0, bp,
1384			    "Synchronization request failed (error=%d).",
1385			    bp->bio_error);
1386			g_mirror_sync_request_free(disk, bp);
1387			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1388			g_mirror_event_send(disk,
1389			    G_MIRROR_DISK_STATE_DISCONNECTED,
1390			    G_MIRROR_EVENT_DONTWAIT);
1391			return;
1392		}
1393		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1394		if (sync->ds_offset >= sc->sc_mediasize ||
1395		    sync->ds_consumer == NULL ||
1396		    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1397			/* Don't send more synchronization requests. */
1398			sync->ds_inflight--;
1399			g_mirror_sync_request_free(disk, bp);
1400			if (sync->ds_inflight > 0)
1401				return;
1402			if (sync->ds_consumer == NULL ||
1403			    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1404				return;
1405			}
1406			/* Disk up-to-date, activate it. */
1407			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1408			    G_MIRROR_EVENT_DONTWAIT);
1409			return;
1410		}
1411
1412		/* Send next synchronization request. */
1413		g_mirror_sync_reinit(disk, bp, sync->ds_offset);
1414		sync->ds_offset += bp->bio_length;
1415
1416		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1417		sync->ds_consumer->index++;
1418
1419		/*
1420		 * Delay the request if it is colliding with a regular request.
1421		 */
1422		if (g_mirror_regular_collision(sc, bp))
1423			g_mirror_sync_delay(sc, bp);
1424		else
1425			g_io_request(bp, sync->ds_consumer);
1426
1427		/* Requeue delayed requests if possible. */
1428		g_mirror_regular_release(sc);
1429
1430		/* Find the smallest offset */
1431		offset = sc->sc_mediasize;
1432		for (i = 0; i < g_mirror_syncreqs; i++) {
1433			bp = sync->ds_bios[i];
1434			if (bp != NULL && bp->bio_offset < offset)
1435				offset = bp->bio_offset;
1436		}
1437		if (g_mirror_sync_period > 0 &&
1438		    time_uptime - sync->ds_update_ts > g_mirror_sync_period) {
1439			sync->ds_offset_done = offset;
1440			g_mirror_update_metadata(disk);
1441			sync->ds_update_ts = time_uptime;
1442		}
1443		return;
1444	}
1445	default:
1446		panic("Invalid I/O request %p", bp);
1447	}
1448}
1449
1450static void
1451g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1452{
1453	struct g_mirror_disk *disk;
1454	struct g_consumer *cp;
1455	struct bio *cbp;
1456
1457	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1458		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1459			break;
1460	}
1461	if (disk == NULL) {
1462		if (bp->bio_error == 0)
1463			bp->bio_error = ENXIO;
1464		g_io_deliver(bp, bp->bio_error);
1465		return;
1466	}
1467	cbp = g_clone_bio(bp);
1468	if (cbp == NULL) {
1469		if (bp->bio_error == 0)
1470			bp->bio_error = ENOMEM;
1471		g_io_deliver(bp, bp->bio_error);
1472		return;
1473	}
1474	/*
1475	 * Fill in the component buf structure.
1476	 */
1477	cp = disk->d_consumer;
1478	cbp->bio_done = g_mirror_done;
1479	cbp->bio_to = cp->provider;
1480	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1481	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1482	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1483	    cp->acw, cp->ace));
1484	cp->index++;
1485	g_io_request(cbp, cp);
1486}
1487
1488static void
1489g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1490{
1491	struct g_mirror_disk *disk;
1492	struct g_consumer *cp;
1493	struct bio *cbp;
1494
1495	disk = g_mirror_get_disk(sc);
1496	if (disk == NULL) {
1497		if (bp->bio_error == 0)
1498			bp->bio_error = ENXIO;
1499		g_io_deliver(bp, bp->bio_error);
1500		return;
1501	}
1502	cbp = g_clone_bio(bp);
1503	if (cbp == NULL) {
1504		if (bp->bio_error == 0)
1505			bp->bio_error = ENOMEM;
1506		g_io_deliver(bp, bp->bio_error);
1507		return;
1508	}
1509	/*
1510	 * Fill in the component buf structure.
1511	 */
1512	cp = disk->d_consumer;
1513	cbp->bio_done = g_mirror_done;
1514	cbp->bio_to = cp->provider;
1515	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1516	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1517	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1518	    cp->acw, cp->ace));
1519	cp->index++;
1520	g_io_request(cbp, cp);
1521}
1522
1523#define TRACK_SIZE  (1 * 1024 * 1024)
1524#define LOAD_SCALE	256
1525#define ABS(x)		(((x) >= 0) ? (x) : (-(x)))
1526
1527static void
1528g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1529{
1530	struct g_mirror_disk *disk, *dp;
1531	struct g_consumer *cp;
1532	struct bio *cbp;
1533	int prio, best;
1534
1535	/* Find a disk with the smallest load. */
1536	disk = NULL;
1537	best = INT_MAX;
1538	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1539		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1540			continue;
1541		prio = dp->load;
1542		/* If disk head is precisely in position - highly prefer it. */
1543		if (dp->d_last_offset == bp->bio_offset)
1544			prio -= 2 * LOAD_SCALE;
1545		else
1546		/* If disk head is close to position - prefer it. */
1547		if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE)
1548			prio -= 1 * LOAD_SCALE;
1549		if (prio <= best) {
1550			disk = dp;
1551			best = prio;
1552		}
1553	}
1554	KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name));
1555	cbp = g_clone_bio(bp);
1556	if (cbp == NULL) {
1557		if (bp->bio_error == 0)
1558			bp->bio_error = ENOMEM;
1559		g_io_deliver(bp, bp->bio_error);
1560		return;
1561	}
1562	/*
1563	 * Fill in the component buf structure.
1564	 */
1565	cp = disk->d_consumer;
1566	cbp->bio_done = g_mirror_done;
1567	cbp->bio_to = cp->provider;
1568	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1569	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1570	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1571	    cp->acw, cp->ace));
1572	cp->index++;
1573	/* Remember last head position */
1574	disk->d_last_offset = bp->bio_offset + bp->bio_length;
1575	/* Update loads. */
1576	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1577		dp->load = (dp->d_consumer->index * LOAD_SCALE +
1578		    dp->load * 7) / 8;
1579	}
1580	g_io_request(cbp, cp);
1581}
1582
1583static void
1584g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1585{
1586	struct bio_queue queue;
1587	struct g_mirror_disk *disk;
1588	struct g_consumer *cp;
1589	struct bio *cbp;
1590	off_t left, mod, offset, slice;
1591	u_char *data;
1592	u_int ndisks;
1593
1594	if (bp->bio_length <= sc->sc_slice) {
1595		g_mirror_request_round_robin(sc, bp);
1596		return;
1597	}
1598	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1599	slice = bp->bio_length / ndisks;
1600	mod = slice % sc->sc_provider->sectorsize;
1601	if (mod != 0)
1602		slice += sc->sc_provider->sectorsize - mod;
1603	/*
1604	 * Allocate all bios before sending any request, so we can
1605	 * return ENOMEM in nice and clean way.
1606	 */
1607	left = bp->bio_length;
1608	offset = bp->bio_offset;
1609	data = bp->bio_data;
1610	TAILQ_INIT(&queue);
1611	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1612		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1613			continue;
1614		cbp = g_clone_bio(bp);
1615		if (cbp == NULL) {
1616			while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1617				TAILQ_REMOVE(&queue, cbp, bio_queue);
1618				g_destroy_bio(cbp);
1619			}
1620			if (bp->bio_error == 0)
1621				bp->bio_error = ENOMEM;
1622			g_io_deliver(bp, bp->bio_error);
1623			return;
1624		}
1625		TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1626		cbp->bio_done = g_mirror_done;
1627		cbp->bio_caller1 = disk;
1628		cbp->bio_to = disk->d_consumer->provider;
1629		cbp->bio_offset = offset;
1630		cbp->bio_data = data;
1631		cbp->bio_length = MIN(left, slice);
1632		left -= cbp->bio_length;
1633		if (left == 0)
1634			break;
1635		offset += cbp->bio_length;
1636		data += cbp->bio_length;
1637	}
1638	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1639		TAILQ_REMOVE(&queue, cbp, bio_queue);
1640		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1641		disk = cbp->bio_caller1;
1642		cbp->bio_caller1 = NULL;
1643		cp = disk->d_consumer;
1644		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1645		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1646		    cp->acr, cp->acw, cp->ace));
1647		disk->d_consumer->index++;
1648		g_io_request(cbp, disk->d_consumer);
1649	}
1650}
1651
1652static void
1653g_mirror_register_request(struct g_mirror_softc *sc, struct bio *bp)
1654{
1655	struct bio_queue queue;
1656	struct bio *cbp;
1657	struct g_consumer *cp;
1658	struct g_mirror_disk *disk;
1659
1660	sx_assert(&sc->sc_lock, SA_XLOCKED);
1661
1662	/*
1663	 * To avoid ordering issues, if a write is deferred because of a
1664	 * collision with a sync request, all I/O is deferred until that
1665	 * write is initiated.
1666	 */
1667	if (bp->bio_from->geom != sc->sc_sync.ds_geom &&
1668	    !TAILQ_EMPTY(&sc->sc_regular_delayed)) {
1669		g_mirror_regular_delay(sc, bp);
1670		return;
1671	}
1672
1673	switch (bp->bio_cmd) {
1674	case BIO_READ:
1675		switch (sc->sc_balance) {
1676		case G_MIRROR_BALANCE_LOAD:
1677			g_mirror_request_load(sc, bp);
1678			break;
1679		case G_MIRROR_BALANCE_PREFER:
1680			g_mirror_request_prefer(sc, bp);
1681			break;
1682		case G_MIRROR_BALANCE_ROUND_ROBIN:
1683			g_mirror_request_round_robin(sc, bp);
1684			break;
1685		case G_MIRROR_BALANCE_SPLIT:
1686			g_mirror_request_split(sc, bp);
1687			break;
1688		}
1689		return;
1690	case BIO_WRITE:
1691	case BIO_DELETE:
1692		/*
1693		 * Delay the request if it is colliding with a synchronization
1694		 * request.
1695		 */
1696		if (g_mirror_sync_collision(sc, bp)) {
1697			g_mirror_regular_delay(sc, bp);
1698			return;
1699		}
1700
1701		if (sc->sc_idle)
1702			g_mirror_unidle(sc);
1703		else
1704			sc->sc_last_write = time_uptime;
1705
1706		/*
1707		 * Bump syncid on first write.
1708		 */
1709		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1710			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1711			g_mirror_bump_syncid(sc);
1712		}
1713
1714		/*
1715		 * Allocate all bios before sending any request, so we can
1716		 * return ENOMEM in nice and clean way.
1717		 */
1718		TAILQ_INIT(&queue);
1719		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1720			switch (disk->d_state) {
1721			case G_MIRROR_DISK_STATE_ACTIVE:
1722				break;
1723			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1724				if (bp->bio_offset >= disk->d_sync.ds_offset)
1725					continue;
1726				break;
1727			default:
1728				continue;
1729			}
1730			if (bp->bio_cmd == BIO_DELETE &&
1731			    (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0)
1732				continue;
1733			cbp = g_clone_bio(bp);
1734			if (cbp == NULL) {
1735				while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1736					TAILQ_REMOVE(&queue, cbp, bio_queue);
1737					g_destroy_bio(cbp);
1738				}
1739				if (bp->bio_error == 0)
1740					bp->bio_error = ENOMEM;
1741				g_io_deliver(bp, bp->bio_error);
1742				return;
1743			}
1744			TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1745			cbp->bio_done = g_mirror_done;
1746			cp = disk->d_consumer;
1747			cbp->bio_caller1 = cp;
1748			cbp->bio_to = cp->provider;
1749			KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1750			    ("Consumer %s not opened (r%dw%de%d).",
1751			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1752		}
1753		if (TAILQ_EMPTY(&queue)) {
1754			KASSERT(bp->bio_cmd == BIO_DELETE,
1755			    ("No consumers for regular request %p", bp));
1756			g_io_deliver(bp, EOPNOTSUPP);
1757			return;
1758		}
1759		while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1760			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1761			TAILQ_REMOVE(&queue, cbp, bio_queue);
1762			cp = cbp->bio_caller1;
1763			cbp->bio_caller1 = NULL;
1764			cp->index++;
1765			sc->sc_writes++;
1766			g_io_request(cbp, cp);
1767		}
1768		/*
1769		 * Put request onto inflight queue, so we can check if new
1770		 * synchronization requests don't collide with it.
1771		 */
1772		TAILQ_INSERT_TAIL(&sc->sc_inflight, bp, bio_queue);
1773		return;
1774	case BIO_FLUSH:
1775		TAILQ_INIT(&queue);
1776		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1777			if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1778				continue;
1779			cbp = g_clone_bio(bp);
1780			if (cbp == NULL) {
1781				while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1782					TAILQ_REMOVE(&queue, cbp, bio_queue);
1783					g_destroy_bio(cbp);
1784				}
1785				if (bp->bio_error == 0)
1786					bp->bio_error = ENOMEM;
1787				g_io_deliver(bp, bp->bio_error);
1788				return;
1789			}
1790			TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1791			cbp->bio_done = g_mirror_done;
1792			cbp->bio_caller1 = disk;
1793			cbp->bio_to = disk->d_consumer->provider;
1794		}
1795		KASSERT(!TAILQ_EMPTY(&queue),
1796		    ("No consumers for regular request %p", bp));
1797		while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1798			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1799			TAILQ_REMOVE(&queue, cbp, bio_queue);
1800			disk = cbp->bio_caller1;
1801			cbp->bio_caller1 = NULL;
1802			cp = disk->d_consumer;
1803			KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1804			    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1805			    cp->acr, cp->acw, cp->ace));
1806			cp->index++;
1807			g_io_request(cbp, cp);
1808		}
1809		break;
1810	default:
1811		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1812		    bp->bio_cmd, sc->sc_name));
1813		break;
1814	}
1815}
1816
1817static int
1818g_mirror_can_destroy(struct g_mirror_softc *sc)
1819{
1820	struct g_geom *gp;
1821	struct g_consumer *cp;
1822
1823	g_topology_assert();
1824	gp = sc->sc_geom;
1825	if (gp->softc == NULL)
1826		return (1);
1827	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0)
1828		return (0);
1829	LIST_FOREACH(cp, &gp->consumer, consumer) {
1830		if (g_mirror_is_busy(sc, cp))
1831			return (0);
1832	}
1833	gp = sc->sc_sync.ds_geom;
1834	LIST_FOREACH(cp, &gp->consumer, consumer) {
1835		if (g_mirror_is_busy(sc, cp))
1836			return (0);
1837	}
1838	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1839	    sc->sc_name);
1840	return (1);
1841}
1842
1843static int
1844g_mirror_try_destroy(struct g_mirror_softc *sc)
1845{
1846
1847	if (sc->sc_rootmount != NULL) {
1848		G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1849		    sc->sc_rootmount);
1850		root_mount_rel(sc->sc_rootmount);
1851		sc->sc_rootmount = NULL;
1852	}
1853	g_topology_lock();
1854	if (!g_mirror_can_destroy(sc)) {
1855		g_topology_unlock();
1856		return (0);
1857	}
1858	sc->sc_geom->softc = NULL;
1859	sc->sc_sync.ds_geom->softc = NULL;
1860	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DRAIN) != 0) {
1861		g_topology_unlock();
1862		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1863		    &sc->sc_worker);
1864		/* Unlock sc_lock here, as it can be destroyed after wakeup. */
1865		sx_xunlock(&sc->sc_lock);
1866		wakeup(&sc->sc_worker);
1867		sc->sc_worker = NULL;
1868	} else {
1869		g_topology_unlock();
1870		g_mirror_destroy_device(sc);
1871	}
1872	return (1);
1873}
1874
1875/*
1876 * Worker thread.
1877 */
1878static void
1879g_mirror_worker(void *arg)
1880{
1881	struct g_mirror_softc *sc;
1882	struct g_mirror_event *ep;
1883	struct bio *bp;
1884	int timeout;
1885
1886	sc = arg;
1887	thread_lock(curthread);
1888	sched_prio(curthread, PRIBIO);
1889	thread_unlock(curthread);
1890
1891	sx_xlock(&sc->sc_lock);
1892	for (;;) {
1893		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1894		/*
1895		 * First take a look at events.
1896		 * This is important to handle events before any I/O requests.
1897		 */
1898		ep = g_mirror_event_first(sc);
1899		if (ep != NULL) {
1900			g_mirror_event_remove(sc, ep);
1901			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1902				/* Update only device status. */
1903				G_MIRROR_DEBUG(3,
1904				    "Running event for device %s.",
1905				    sc->sc_name);
1906				ep->e_error = 0;
1907				g_mirror_update_device(sc, true);
1908			} else {
1909				/* Update disk status. */
1910				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1911				     g_mirror_get_diskname(ep->e_disk));
1912				ep->e_error = g_mirror_update_disk(ep->e_disk,
1913				    ep->e_state);
1914				if (ep->e_error == 0)
1915					g_mirror_update_device(sc, false);
1916			}
1917			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1918				KASSERT(ep->e_error == 0,
1919				    ("Error cannot be handled."));
1920				g_mirror_event_free(ep);
1921			} else {
1922				ep->e_flags |= G_MIRROR_EVENT_DONE;
1923				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1924				    ep);
1925				mtx_lock(&sc->sc_events_mtx);
1926				wakeup(ep);
1927				mtx_unlock(&sc->sc_events_mtx);
1928			}
1929			if ((sc->sc_flags &
1930			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1931				if (g_mirror_try_destroy(sc)) {
1932					curthread->td_pflags &= ~TDP_GEOM;
1933					G_MIRROR_DEBUG(1, "Thread exiting.");
1934					kproc_exit(0);
1935				}
1936			}
1937			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1938			continue;
1939		}
1940
1941		/*
1942		 * Check if we can mark array as CLEAN and if we can't take
1943		 * how much seconds should we wait.
1944		 */
1945		timeout = g_mirror_idle(sc, -1);
1946
1947		/*
1948		 * Handle I/O requests.
1949		 */
1950		mtx_lock(&sc->sc_queue_mtx);
1951		bp = TAILQ_FIRST(&sc->sc_queue);
1952		if (bp != NULL)
1953			TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
1954		else {
1955			if ((sc->sc_flags &
1956			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1957				mtx_unlock(&sc->sc_queue_mtx);
1958				if (g_mirror_try_destroy(sc)) {
1959					curthread->td_pflags &= ~TDP_GEOM;
1960					G_MIRROR_DEBUG(1, "Thread exiting.");
1961					kproc_exit(0);
1962				}
1963				mtx_lock(&sc->sc_queue_mtx);
1964				if (!TAILQ_EMPTY(&sc->sc_queue)) {
1965					mtx_unlock(&sc->sc_queue_mtx);
1966					continue;
1967				}
1968			}
1969			if (g_mirror_event_first(sc) != NULL) {
1970				mtx_unlock(&sc->sc_queue_mtx);
1971				continue;
1972			}
1973			sx_xunlock(&sc->sc_lock);
1974			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1",
1975			    timeout * hz);
1976			sx_xlock(&sc->sc_lock);
1977			G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1978			continue;
1979		}
1980		mtx_unlock(&sc->sc_queue_mtx);
1981
1982		if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
1983		    (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1984			/*
1985			 * Handle completion of the first half (the read) of a
1986			 * block synchronization operation.
1987			 */
1988			g_mirror_sync_request(sc, bp);
1989		} else if (bp->bio_to != sc->sc_provider) {
1990			if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0)
1991				/*
1992				 * Handle completion of a regular I/O request.
1993				 */
1994				g_mirror_regular_request(sc, bp);
1995			else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
1996				/*
1997				 * Handle completion of the second half (the
1998				 * write) of a block synchronization operation.
1999				 */
2000				g_mirror_sync_request(sc, bp);
2001			else {
2002				KASSERT(0,
2003				    ("Invalid request cflags=0x%hx to=%s.",
2004				    bp->bio_cflags, bp->bio_to->name));
2005			}
2006		} else {
2007			/*
2008			 * Initiate an I/O request.
2009			 */
2010			g_mirror_register_request(sc, bp);
2011		}
2012		G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
2013	}
2014}
2015
2016static void
2017g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
2018{
2019
2020	sx_assert(&sc->sc_lock, SX_LOCKED);
2021
2022	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
2023		return;
2024	if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2025		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.",
2026		    g_mirror_get_diskname(disk), sc->sc_name);
2027		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2028	} else if (sc->sc_idle &&
2029	    (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2030		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.",
2031		    g_mirror_get_diskname(disk), sc->sc_name);
2032		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2033	}
2034}
2035
2036static void
2037g_mirror_sync_reinit(const struct g_mirror_disk *disk, struct bio *bp,
2038    off_t offset)
2039{
2040	void *data;
2041	int idx;
2042
2043	data = bp->bio_data;
2044	idx = (int)(uintptr_t)bp->bio_caller1;
2045	g_reset_bio(bp);
2046
2047	bp->bio_cmd = BIO_READ;
2048	bp->bio_data = data;
2049	bp->bio_done = g_mirror_sync_done;
2050	bp->bio_from = disk->d_sync.ds_consumer;
2051	bp->bio_to = disk->d_softc->sc_provider;
2052	bp->bio_caller1 = (void *)(uintptr_t)idx;
2053	bp->bio_offset = offset;
2054	bp->bio_length = MIN(MAXPHYS,
2055	    disk->d_softc->sc_mediasize - bp->bio_offset);
2056}
2057
2058static void
2059g_mirror_sync_start(struct g_mirror_disk *disk)
2060{
2061	struct g_mirror_softc *sc;
2062	struct g_mirror_disk_sync *sync;
2063	struct g_consumer *cp;
2064	struct bio *bp;
2065	int error, i;
2066
2067	g_topology_assert_not();
2068	sc = disk->d_softc;
2069	sync = &disk->d_sync;
2070	sx_assert(&sc->sc_lock, SX_LOCKED);
2071
2072	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2073	    ("Disk %s is not marked for synchronization.",
2074	    g_mirror_get_diskname(disk)));
2075	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2076	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
2077	    sc->sc_state));
2078
2079	sx_xunlock(&sc->sc_lock);
2080	g_topology_lock();
2081	cp = g_new_consumer(sc->sc_sync.ds_geom);
2082	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2083	error = g_attach(cp, sc->sc_provider);
2084	KASSERT(error == 0,
2085	    ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2086	error = g_access(cp, 1, 0, 0);
2087	KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2088	g_topology_unlock();
2089	sx_xlock(&sc->sc_lock);
2090
2091	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2092	    g_mirror_get_diskname(disk));
2093	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0)
2094		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2095	KASSERT(sync->ds_consumer == NULL,
2096	    ("Sync consumer already exists (device=%s, disk=%s).",
2097	    sc->sc_name, g_mirror_get_diskname(disk)));
2098
2099	sync->ds_consumer = cp;
2100	sync->ds_consumer->private = disk;
2101	sync->ds_consumer->index = 0;
2102
2103	/*
2104	 * Allocate memory for synchronization bios and initialize them.
2105	 */
2106	sync->ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs,
2107	    M_MIRROR, M_WAITOK);
2108	for (i = 0; i < g_mirror_syncreqs; i++) {
2109		bp = g_alloc_bio();
2110		sync->ds_bios[i] = bp;
2111
2112		bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
2113		bp->bio_caller1 = (void *)(uintptr_t)i;
2114		g_mirror_sync_reinit(disk, bp, sync->ds_offset);
2115		sync->ds_offset += bp->bio_length;
2116	}
2117
2118	/* Increase the number of disks in SYNCHRONIZING state. */
2119	sc->sc_sync.ds_ndisks++;
2120	/* Set the number of in-flight synchronization requests. */
2121	sync->ds_inflight = g_mirror_syncreqs;
2122
2123	/*
2124	 * Fire off first synchronization requests.
2125	 */
2126	for (i = 0; i < g_mirror_syncreqs; i++) {
2127		bp = sync->ds_bios[i];
2128		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
2129		sync->ds_consumer->index++;
2130		/*
2131		 * Delay the request if it is colliding with a regular request.
2132		 */
2133		if (g_mirror_regular_collision(sc, bp))
2134			g_mirror_sync_delay(sc, bp);
2135		else
2136			g_io_request(bp, sync->ds_consumer);
2137	}
2138}
2139
2140/*
2141 * Stop synchronization process.
2142 * type: 0 - synchronization finished
2143 *       1 - synchronization stopped
2144 */
2145static void
2146g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
2147{
2148	struct g_mirror_softc *sc;
2149	struct g_consumer *cp;
2150
2151	g_topology_assert_not();
2152	sc = disk->d_softc;
2153	sx_assert(&sc->sc_lock, SX_LOCKED);
2154
2155	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2156	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2157	    g_mirror_disk_state2str(disk->d_state)));
2158	if (disk->d_sync.ds_consumer == NULL)
2159		return;
2160
2161	if (type == 0) {
2162		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2163		    sc->sc_name, g_mirror_get_diskname(disk));
2164	} else /* if (type == 1) */ {
2165		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2166		    sc->sc_name, g_mirror_get_diskname(disk));
2167	}
2168	g_mirror_regular_release(sc);
2169	free(disk->d_sync.ds_bios, M_MIRROR);
2170	disk->d_sync.ds_bios = NULL;
2171	cp = disk->d_sync.ds_consumer;
2172	disk->d_sync.ds_consumer = NULL;
2173	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2174	sc->sc_sync.ds_ndisks--;
2175	sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2176	g_topology_lock();
2177	g_mirror_kill_consumer(sc, cp);
2178	g_topology_unlock();
2179	sx_xlock(&sc->sc_lock);
2180}
2181
2182static void
2183g_mirror_launch_provider(struct g_mirror_softc *sc)
2184{
2185	struct g_mirror_disk *disk;
2186	struct g_provider *pp, *dp;
2187
2188	sx_assert(&sc->sc_lock, SX_LOCKED);
2189
2190	g_topology_lock();
2191	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
2192	pp->flags |= G_PF_DIRECT_RECEIVE;
2193	pp->mediasize = sc->sc_mediasize;
2194	pp->sectorsize = sc->sc_sectorsize;
2195	pp->stripesize = 0;
2196	pp->stripeoffset = 0;
2197
2198	/* Splitting of unmapped BIO's could work but isn't implemented now */
2199	if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT)
2200		pp->flags |= G_PF_ACCEPT_UNMAPPED;
2201
2202	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2203		if (disk->d_consumer && disk->d_consumer->provider) {
2204			dp = disk->d_consumer->provider;
2205			if (dp->stripesize > pp->stripesize) {
2206				pp->stripesize = dp->stripesize;
2207				pp->stripeoffset = dp->stripeoffset;
2208			}
2209			/* A provider underneath us doesn't support unmapped */
2210			if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) {
2211				G_MIRROR_DEBUG(0, "Cancelling unmapped "
2212				    "because of %s.", dp->name);
2213				pp->flags &= ~G_PF_ACCEPT_UNMAPPED;
2214			}
2215		}
2216	}
2217	pp->private = sc;
2218	sc->sc_refcnt++;
2219	sc->sc_provider = pp;
2220	g_error_provider(pp, 0);
2221	g_topology_unlock();
2222	G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2223	    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks);
2224	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2225		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2226			g_mirror_sync_start(disk);
2227	}
2228}
2229
2230static void
2231g_mirror_destroy_provider(struct g_mirror_softc *sc)
2232{
2233	struct g_mirror_disk *disk;
2234	struct bio *bp;
2235
2236	g_topology_assert_not();
2237	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2238	    sc->sc_name));
2239
2240	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2241		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2242			g_mirror_sync_stop(disk, 1);
2243	}
2244
2245	g_topology_lock();
2246	g_error_provider(sc->sc_provider, ENXIO);
2247	mtx_lock(&sc->sc_queue_mtx);
2248	while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) {
2249		TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
2250		/*
2251		 * Abort any pending I/O that wasn't generated by us.
2252		 * Synchronization requests and requests destined for individual
2253		 * mirror components can be destroyed immediately.
2254		 */
2255		if (bp->bio_to == sc->sc_provider &&
2256		    bp->bio_from->geom != sc->sc_sync.ds_geom) {
2257			g_io_deliver(bp, ENXIO);
2258		} else {
2259			if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
2260				free(bp->bio_data, M_MIRROR);
2261			g_destroy_bio(bp);
2262		}
2263	}
2264	mtx_unlock(&sc->sc_queue_mtx);
2265	g_wither_provider(sc->sc_provider, ENXIO);
2266	sc->sc_provider = NULL;
2267	G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name);
2268	g_topology_unlock();
2269}
2270
2271static void
2272g_mirror_go(void *arg)
2273{
2274	struct g_mirror_softc *sc;
2275
2276	sc = arg;
2277	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2278	g_mirror_event_send(sc, 0,
2279	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
2280}
2281
2282static u_int
2283g_mirror_determine_state(struct g_mirror_disk *disk)
2284{
2285	struct g_mirror_softc *sc;
2286	u_int state;
2287
2288	sc = disk->d_softc;
2289	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2290		if ((disk->d_flags &
2291		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0 &&
2292		    (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 ||
2293		     (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0)) {
2294			/* Disk does not need synchronization. */
2295			state = G_MIRROR_DISK_STATE_ACTIVE;
2296		} else {
2297			if ((sc->sc_flags &
2298			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2299			    (disk->d_flags &
2300			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2301				/*
2302				 * We can start synchronization from
2303				 * the stored offset.
2304				 */
2305				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2306			} else {
2307				state = G_MIRROR_DISK_STATE_STALE;
2308			}
2309		}
2310	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2311		/*
2312		 * Reset all synchronization data for this disk,
2313		 * because if it even was synchronized, it was
2314		 * synchronized to disks with different syncid.
2315		 */
2316		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2317		disk->d_sync.ds_offset = 0;
2318		disk->d_sync.ds_offset_done = 0;
2319		disk->d_sync.ds_syncid = sc->sc_syncid;
2320		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2321		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2322			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2323		} else {
2324			state = G_MIRROR_DISK_STATE_STALE;
2325		}
2326	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2327		/*
2328		 * Not good, NOT GOOD!
2329		 * It means that mirror was started on stale disks
2330		 * and more fresh disk just arrive.
2331		 * If there were writes, mirror is broken, sorry.
2332		 * I think the best choice here is don't touch
2333		 * this disk and inform the user loudly.
2334		 */
2335		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
2336		    "disk (%s) arrives!! It will not be connected to the "
2337		    "running device.", sc->sc_name,
2338		    g_mirror_get_diskname(disk));
2339		g_mirror_destroy_disk(disk);
2340		state = G_MIRROR_DISK_STATE_NONE;
2341		/* Return immediately, because disk was destroyed. */
2342		return (state);
2343	}
2344	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
2345	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
2346	return (state);
2347}
2348
2349/*
2350 * Update device state.
2351 */
2352static void
2353g_mirror_update_device(struct g_mirror_softc *sc, bool force)
2354{
2355	struct g_mirror_disk *disk;
2356	u_int state;
2357
2358	sx_assert(&sc->sc_lock, SX_XLOCKED);
2359
2360	switch (sc->sc_state) {
2361	case G_MIRROR_DEVICE_STATE_STARTING:
2362	    {
2363		struct g_mirror_disk *pdisk, *tdisk;
2364		u_int dirty, ndisks, genid, syncid;
2365		bool broken;
2366
2367		KASSERT(sc->sc_provider == NULL,
2368		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2369		/*
2370		 * Are we ready? We are, if all disks are connected or
2371		 * if we have any disks and 'force' is true.
2372		 */
2373		ndisks = g_mirror_ndisks(sc, -1);
2374		if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) {
2375			;
2376		} else if (ndisks == 0) {
2377			/*
2378			 * Disks went down in starting phase, so destroy
2379			 * device.
2380			 */
2381			callout_drain(&sc->sc_callout);
2382			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2383			G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2384			    sc->sc_rootmount);
2385			root_mount_rel(sc->sc_rootmount);
2386			sc->sc_rootmount = NULL;
2387			return;
2388		} else {
2389			return;
2390		}
2391
2392		/*
2393		 * Activate all disks with the biggest syncid.
2394		 */
2395		if (force) {
2396			/*
2397			 * If 'force' is true, we have been called due to
2398			 * timeout, so don't bother canceling timeout.
2399			 */
2400			ndisks = 0;
2401			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2402				if ((disk->d_flags &
2403				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2404					ndisks++;
2405				}
2406			}
2407			if (ndisks == 0) {
2408				/* No valid disks found, destroy device. */
2409				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2410				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2411				    __LINE__, sc->sc_rootmount);
2412				root_mount_rel(sc->sc_rootmount);
2413				sc->sc_rootmount = NULL;
2414				return;
2415			}
2416		} else {
2417			/* Cancel timeout. */
2418			callout_drain(&sc->sc_callout);
2419		}
2420
2421		/*
2422		 * Find the biggest genid.
2423		 */
2424		genid = 0;
2425		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2426			if (disk->d_genid > genid)
2427				genid = disk->d_genid;
2428		}
2429		sc->sc_genid = genid;
2430		/*
2431		 * Remove all disks without the biggest genid.
2432		 */
2433		broken = false;
2434		LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
2435			if (disk->d_genid < genid) {
2436				G_MIRROR_DEBUG(0,
2437				    "Component %s (device %s) broken, skipping.",
2438				    g_mirror_get_diskname(disk), sc->sc_name);
2439				g_mirror_destroy_disk(disk);
2440				/*
2441				 * Bump the syncid in case we discover a healthy
2442				 * replacement disk after starting the mirror.
2443				 */
2444				broken = true;
2445			}
2446		}
2447
2448		/*
2449		 * Find the biggest syncid.
2450		 */
2451		syncid = 0;
2452		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2453			if (disk->d_sync.ds_syncid > syncid)
2454				syncid = disk->d_sync.ds_syncid;
2455		}
2456
2457		/*
2458		 * Here we need to look for dirty disks and if all disks
2459		 * with the biggest syncid are dirty, we have to choose
2460		 * one with the biggest priority and rebuild the rest.
2461		 */
2462		/*
2463		 * Find the number of dirty disks with the biggest syncid.
2464		 * Find the number of disks with the biggest syncid.
2465		 * While here, find a disk with the biggest priority.
2466		 */
2467		dirty = ndisks = 0;
2468		pdisk = NULL;
2469		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2470			if (disk->d_sync.ds_syncid != syncid)
2471				continue;
2472			if ((disk->d_flags &
2473			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2474				continue;
2475			}
2476			ndisks++;
2477			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2478				dirty++;
2479				if (pdisk == NULL ||
2480				    pdisk->d_priority < disk->d_priority) {
2481					pdisk = disk;
2482				}
2483			}
2484		}
2485		if (dirty == 0) {
2486			/* No dirty disks at all, great. */
2487		} else if (dirty == ndisks) {
2488			/*
2489			 * Force synchronization for all dirty disks except one
2490			 * with the biggest priority.
2491			 */
2492			KASSERT(pdisk != NULL, ("pdisk == NULL"));
2493			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
2494			    "master disk for synchronization.",
2495			    g_mirror_get_diskname(pdisk), sc->sc_name);
2496			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2497				if (disk->d_sync.ds_syncid != syncid)
2498					continue;
2499				if ((disk->d_flags &
2500				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2501					continue;
2502				}
2503				KASSERT((disk->d_flags &
2504				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
2505				    ("Disk %s isn't marked as dirty.",
2506				    g_mirror_get_diskname(disk)));
2507				/* Skip the disk with the biggest priority. */
2508				if (disk == pdisk)
2509					continue;
2510				disk->d_sync.ds_syncid = 0;
2511			}
2512		} else if (dirty < ndisks) {
2513			/*
2514			 * Force synchronization for all dirty disks.
2515			 * We have some non-dirty disks.
2516			 */
2517			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2518				if (disk->d_sync.ds_syncid != syncid)
2519					continue;
2520				if ((disk->d_flags &
2521				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2522					continue;
2523				}
2524				if ((disk->d_flags &
2525				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2526					continue;
2527				}
2528				disk->d_sync.ds_syncid = 0;
2529			}
2530		}
2531
2532		/* Reset hint. */
2533		sc->sc_hint = NULL;
2534		sc->sc_syncid = syncid;
2535		if (force || broken) {
2536			/* Remember to bump syncid on first write. */
2537			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2538		}
2539		state = G_MIRROR_DEVICE_STATE_RUNNING;
2540		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2541		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2542		    g_mirror_device_state2str(state));
2543		sc->sc_state = state;
2544		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2545			state = g_mirror_determine_state(disk);
2546			g_mirror_event_send(disk, state,
2547			    G_MIRROR_EVENT_DONTWAIT);
2548			if (state == G_MIRROR_DISK_STATE_STALE)
2549				sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2550		}
2551		break;
2552	    }
2553	case G_MIRROR_DEVICE_STATE_RUNNING:
2554		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2555		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2556			/*
2557			 * No usable disks, so destroy the device.
2558			 */
2559			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2560			break;
2561		} else if (g_mirror_ndisks(sc,
2562		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2563		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2564			/*
2565			 * We have active disks, launch provider if it doesn't
2566			 * exist.
2567			 */
2568			if (sc->sc_provider == NULL)
2569				g_mirror_launch_provider(sc);
2570			if (sc->sc_rootmount != NULL) {
2571				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2572				    __LINE__, sc->sc_rootmount);
2573				root_mount_rel(sc->sc_rootmount);
2574				sc->sc_rootmount = NULL;
2575			}
2576		}
2577		/*
2578		 * Genid should be bumped immediately, so do it here.
2579		 */
2580		if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2581			sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2582			g_mirror_bump_genid(sc);
2583		}
2584		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID_NOW) != 0) {
2585			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID_NOW;
2586			g_mirror_bump_syncid(sc);
2587		}
2588		break;
2589	default:
2590		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2591		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2592		break;
2593	}
2594}
2595
2596/*
2597 * Update disk state and device state if needed.
2598 */
2599#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
2600	"Disk %s state changed from %s to %s (device %s).",		\
2601	g_mirror_get_diskname(disk),					\
2602	g_mirror_disk_state2str(disk->d_state),				\
2603	g_mirror_disk_state2str(state), sc->sc_name)
2604static int
2605g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2606{
2607	struct g_mirror_softc *sc;
2608
2609	sc = disk->d_softc;
2610	sx_assert(&sc->sc_lock, SX_XLOCKED);
2611
2612again:
2613	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2614	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2615	    g_mirror_disk_state2str(state));
2616	switch (state) {
2617	case G_MIRROR_DISK_STATE_NEW:
2618		/*
2619		 * Possible scenarios:
2620		 * 1. New disk arrive.
2621		 */
2622		/* Previous state should be NONE. */
2623		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2624		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2625		    g_mirror_disk_state2str(disk->d_state)));
2626		DISK_STATE_CHANGED();
2627
2628		disk->d_state = state;
2629		g_topology_lock();
2630		if (LIST_EMPTY(&sc->sc_disks))
2631			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2632		else {
2633			struct g_mirror_disk *dp;
2634
2635			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2636				if (disk->d_priority >= dp->d_priority) {
2637					LIST_INSERT_BEFORE(dp, disk, d_next);
2638					dp = NULL;
2639					break;
2640				}
2641				if (LIST_NEXT(dp, d_next) == NULL)
2642					break;
2643			}
2644			if (dp != NULL)
2645				LIST_INSERT_AFTER(dp, disk, d_next);
2646		}
2647		g_topology_unlock();
2648		G_MIRROR_DEBUG(1, "Device %s: provider %s detected.",
2649		    sc->sc_name, g_mirror_get_diskname(disk));
2650		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2651			break;
2652		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2653		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2654		    g_mirror_device_state2str(sc->sc_state),
2655		    g_mirror_get_diskname(disk),
2656		    g_mirror_disk_state2str(disk->d_state)));
2657		state = g_mirror_determine_state(disk);
2658		if (state != G_MIRROR_DISK_STATE_NONE)
2659			goto again;
2660		break;
2661	case G_MIRROR_DISK_STATE_ACTIVE:
2662		/*
2663		 * Possible scenarios:
2664		 * 1. New disk does not need synchronization.
2665		 * 2. Synchronization process finished successfully.
2666		 */
2667		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2668		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2669		    g_mirror_device_state2str(sc->sc_state),
2670		    g_mirror_get_diskname(disk),
2671		    g_mirror_disk_state2str(disk->d_state)));
2672		/* Previous state should be NEW or SYNCHRONIZING. */
2673		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2674		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2675		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2676		    g_mirror_disk_state2str(disk->d_state)));
2677		DISK_STATE_CHANGED();
2678
2679		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2680			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2681			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2682			g_mirror_sync_stop(disk, 0);
2683		}
2684		disk->d_state = state;
2685		disk->d_sync.ds_offset = 0;
2686		disk->d_sync.ds_offset_done = 0;
2687		g_mirror_update_idle(sc, disk);
2688		g_mirror_update_metadata(disk);
2689		G_MIRROR_DEBUG(1, "Device %s: provider %s activated.",
2690		    sc->sc_name, g_mirror_get_diskname(disk));
2691		break;
2692	case G_MIRROR_DISK_STATE_STALE:
2693		/*
2694		 * Possible scenarios:
2695		 * 1. Stale disk was connected.
2696		 */
2697		/* Previous state should be NEW. */
2698		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2699		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2700		    g_mirror_disk_state2str(disk->d_state)));
2701		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2702		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2703		    g_mirror_device_state2str(sc->sc_state),
2704		    g_mirror_get_diskname(disk),
2705		    g_mirror_disk_state2str(disk->d_state)));
2706		/*
2707		 * STALE state is only possible if device is marked
2708		 * NOAUTOSYNC.
2709		 */
2710		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2711		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2712		    g_mirror_device_state2str(sc->sc_state),
2713		    g_mirror_get_diskname(disk),
2714		    g_mirror_disk_state2str(disk->d_state)));
2715		DISK_STATE_CHANGED();
2716
2717		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2718		disk->d_state = state;
2719		g_mirror_update_metadata(disk);
2720		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2721		    sc->sc_name, g_mirror_get_diskname(disk));
2722		break;
2723	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2724		/*
2725		 * Possible scenarios:
2726		 * 1. Disk which needs synchronization was connected.
2727		 */
2728		/* Previous state should be NEW. */
2729		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2730		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2731		    g_mirror_disk_state2str(disk->d_state)));
2732		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2733		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2734		    g_mirror_device_state2str(sc->sc_state),
2735		    g_mirror_get_diskname(disk),
2736		    g_mirror_disk_state2str(disk->d_state)));
2737		DISK_STATE_CHANGED();
2738
2739		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2740			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2741		disk->d_state = state;
2742		if (sc->sc_provider != NULL) {
2743			g_mirror_sync_start(disk);
2744			g_mirror_update_metadata(disk);
2745		}
2746		break;
2747	case G_MIRROR_DISK_STATE_DISCONNECTED:
2748		/*
2749		 * Possible scenarios:
2750		 * 1. Device wasn't running yet, but disk disappear.
2751		 * 2. Disk was active and disapppear.
2752		 * 3. Disk disappear during synchronization process.
2753		 */
2754		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2755			/*
2756			 * Previous state should be ACTIVE, STALE or
2757			 * SYNCHRONIZING.
2758			 */
2759			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2760			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2761			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2762			    ("Wrong disk state (%s, %s).",
2763			    g_mirror_get_diskname(disk),
2764			    g_mirror_disk_state2str(disk->d_state)));
2765		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2766			/* Previous state should be NEW. */
2767			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2768			    ("Wrong disk state (%s, %s).",
2769			    g_mirror_get_diskname(disk),
2770			    g_mirror_disk_state2str(disk->d_state)));
2771			/*
2772			 * Reset bumping syncid if disk disappeared in STARTING
2773			 * state.
2774			 */
2775			if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2776				sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2777#ifdef	INVARIANTS
2778		} else {
2779			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2780			    sc->sc_name,
2781			    g_mirror_device_state2str(sc->sc_state),
2782			    g_mirror_get_diskname(disk),
2783			    g_mirror_disk_state2str(disk->d_state)));
2784#endif
2785		}
2786		DISK_STATE_CHANGED();
2787		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2788		    sc->sc_name, g_mirror_get_diskname(disk));
2789
2790		g_mirror_destroy_disk(disk);
2791		break;
2792	case G_MIRROR_DISK_STATE_DESTROY:
2793	    {
2794		int error;
2795
2796		error = g_mirror_clear_metadata(disk);
2797		if (error != 0) {
2798			G_MIRROR_DEBUG(0,
2799			    "Device %s: failed to clear metadata on %s: %d.",
2800			    sc->sc_name, g_mirror_get_diskname(disk), error);
2801			break;
2802		}
2803		DISK_STATE_CHANGED();
2804		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2805		    sc->sc_name, g_mirror_get_diskname(disk));
2806
2807		g_mirror_destroy_disk(disk);
2808		sc->sc_ndisks--;
2809		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2810			g_mirror_update_metadata(disk);
2811		}
2812		break;
2813	    }
2814	default:
2815		KASSERT(1 == 0, ("Unknown state (%u).", state));
2816		break;
2817	}
2818	return (0);
2819}
2820#undef	DISK_STATE_CHANGED
2821
2822int
2823g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2824{
2825	struct g_provider *pp;
2826	u_char *buf;
2827	int error;
2828
2829	g_topology_assert();
2830
2831	error = g_access(cp, 1, 0, 0);
2832	if (error != 0)
2833		return (error);
2834	pp = cp->provider;
2835	g_topology_unlock();
2836	/* Metadata are stored on last sector. */
2837	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2838	    &error);
2839	g_topology_lock();
2840	g_access(cp, -1, 0, 0);
2841	if (buf == NULL) {
2842		G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2843		    cp->provider->name, error);
2844		return (error);
2845	}
2846
2847	/* Decode metadata. */
2848	error = mirror_metadata_decode(buf, md);
2849	g_free(buf);
2850	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2851		return (EINVAL);
2852	if (md->md_version > G_MIRROR_VERSION) {
2853		G_MIRROR_DEBUG(0,
2854		    "Kernel module is too old to handle metadata from %s.",
2855		    cp->provider->name);
2856		return (EINVAL);
2857	}
2858	if (error != 0) {
2859		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2860		    cp->provider->name);
2861		return (error);
2862	}
2863
2864	return (0);
2865}
2866
2867static int
2868g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2869    struct g_mirror_metadata *md)
2870{
2871
2872	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2873		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2874		    pp->name, md->md_did);
2875		return (EEXIST);
2876	}
2877	if (md->md_all != sc->sc_ndisks) {
2878		G_MIRROR_DEBUG(1,
2879		    "Invalid '%s' field on disk %s (device %s), skipping.",
2880		    "md_all", pp->name, sc->sc_name);
2881		return (EINVAL);
2882	}
2883	if (md->md_slice != sc->sc_slice) {
2884		G_MIRROR_DEBUG(1,
2885		    "Invalid '%s' field on disk %s (device %s), skipping.",
2886		    "md_slice", pp->name, sc->sc_name);
2887		return (EINVAL);
2888	}
2889	if (md->md_balance != sc->sc_balance) {
2890		G_MIRROR_DEBUG(1,
2891		    "Invalid '%s' field on disk %s (device %s), skipping.",
2892		    "md_balance", pp->name, sc->sc_name);
2893		return (EINVAL);
2894	}
2895#if 0
2896	if (md->md_mediasize != sc->sc_mediasize) {
2897		G_MIRROR_DEBUG(1,
2898		    "Invalid '%s' field on disk %s (device %s), skipping.",
2899		    "md_mediasize", pp->name, sc->sc_name);
2900		return (EINVAL);
2901	}
2902#endif
2903	if (sc->sc_mediasize > pp->mediasize) {
2904		G_MIRROR_DEBUG(1,
2905		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2906		    sc->sc_name);
2907		return (EINVAL);
2908	}
2909	if (md->md_sectorsize != sc->sc_sectorsize) {
2910		G_MIRROR_DEBUG(1,
2911		    "Invalid '%s' field on disk %s (device %s), skipping.",
2912		    "md_sectorsize", pp->name, sc->sc_name);
2913		return (EINVAL);
2914	}
2915	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2916		G_MIRROR_DEBUG(1,
2917		    "Invalid sector size of disk %s (device %s), skipping.",
2918		    pp->name, sc->sc_name);
2919		return (EINVAL);
2920	}
2921	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2922		G_MIRROR_DEBUG(1,
2923		    "Invalid device flags on disk %s (device %s), skipping.",
2924		    pp->name, sc->sc_name);
2925		return (EINVAL);
2926	}
2927	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2928		G_MIRROR_DEBUG(1,
2929		    "Invalid disk flags on disk %s (device %s), skipping.",
2930		    pp->name, sc->sc_name);
2931		return (EINVAL);
2932	}
2933	return (0);
2934}
2935
2936int
2937g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2938    struct g_mirror_metadata *md)
2939{
2940	struct g_mirror_disk *disk;
2941	int error;
2942
2943	g_topology_assert_not();
2944	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2945
2946	error = g_mirror_check_metadata(sc, pp, md);
2947	if (error != 0)
2948		return (error);
2949	if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2950	    md->md_genid < sc->sc_genid) {
2951		G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2952		    pp->name, sc->sc_name);
2953		return (EINVAL);
2954	}
2955	disk = g_mirror_init_disk(sc, pp, md, &error);
2956	if (disk == NULL)
2957		return (error);
2958	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2959	    G_MIRROR_EVENT_WAIT);
2960	if (error != 0)
2961		return (error);
2962	if (md->md_version < G_MIRROR_VERSION) {
2963		G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2964		    pp->name, md->md_version, G_MIRROR_VERSION);
2965		g_mirror_update_metadata(disk);
2966	}
2967	return (0);
2968}
2969
2970static void
2971g_mirror_destroy_delayed(void *arg, int flag)
2972{
2973	struct g_mirror_softc *sc;
2974	int error;
2975
2976	if (flag == EV_CANCEL) {
2977		G_MIRROR_DEBUG(1, "Destroying canceled.");
2978		return;
2979	}
2980	sc = arg;
2981	g_topology_unlock();
2982	sx_xlock(&sc->sc_lock);
2983	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0,
2984	    ("DESTROY flag set on %s.", sc->sc_name));
2985	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0,
2986	    ("CLOSEWAIT flag not set on %s.", sc->sc_name));
2987	G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name);
2988	error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT);
2989	if (error != 0) {
2990		G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).",
2991		    sc->sc_name, error);
2992		sx_xunlock(&sc->sc_lock);
2993	}
2994	g_topology_lock();
2995}
2996
2997static int
2998g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2999{
3000	struct g_mirror_softc *sc;
3001	int error = 0;
3002
3003	g_topology_assert();
3004	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3005	    acw, ace);
3006
3007	sc = pp->private;
3008	KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3009
3010	g_topology_unlock();
3011	sx_xlock(&sc->sc_lock);
3012	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 ||
3013	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 ||
3014	    LIST_EMPTY(&sc->sc_disks)) {
3015		if (acr > 0 || acw > 0 || ace > 0)
3016			error = ENXIO;
3017		goto end;
3018	}
3019	sc->sc_provider_open += acr + acw + ace;
3020	if (pp->acw + acw == 0)
3021		g_mirror_idle(sc, 0);
3022	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 &&
3023	    sc->sc_provider_open == 0)
3024		g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL);
3025end:
3026	sx_xunlock(&sc->sc_lock);
3027	g_topology_lock();
3028	return (error);
3029}
3030
3031struct g_geom *
3032g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md,
3033    u_int type)
3034{
3035	struct g_mirror_softc *sc;
3036	struct g_geom *gp;
3037	int error, timeout;
3038
3039	g_topology_assert();
3040	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
3041	    md->md_mid);
3042
3043	/* One disk is minimum. */
3044	if (md->md_all < 1)
3045		return (NULL);
3046	/*
3047	 * Action geom.
3048	 */
3049	gp = g_new_geomf(mp, "%s", md->md_name);
3050	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
3051	gp->start = g_mirror_start;
3052	gp->orphan = g_mirror_orphan;
3053	gp->access = g_mirror_access;
3054	gp->dumpconf = g_mirror_dumpconf;
3055
3056	sc->sc_type = type;
3057	sc->sc_id = md->md_mid;
3058	sc->sc_slice = md->md_slice;
3059	sc->sc_balance = md->md_balance;
3060	sc->sc_mediasize = md->md_mediasize;
3061	sc->sc_sectorsize = md->md_sectorsize;
3062	sc->sc_ndisks = md->md_all;
3063	sc->sc_flags = md->md_mflags;
3064	sc->sc_bump_id = 0;
3065	sc->sc_idle = 1;
3066	sc->sc_last_write = time_uptime;
3067	sc->sc_writes = 0;
3068	sc->sc_refcnt = 1;
3069	sx_init(&sc->sc_lock, "gmirror:lock");
3070	TAILQ_INIT(&sc->sc_queue);
3071	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
3072	TAILQ_INIT(&sc->sc_regular_delayed);
3073	TAILQ_INIT(&sc->sc_inflight);
3074	TAILQ_INIT(&sc->sc_sync_delayed);
3075	LIST_INIT(&sc->sc_disks);
3076	TAILQ_INIT(&sc->sc_events);
3077	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
3078	callout_init(&sc->sc_callout, 1);
3079	mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF);
3080	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
3081	gp->softc = sc;
3082	sc->sc_geom = gp;
3083	sc->sc_provider = NULL;
3084	sc->sc_provider_open = 0;
3085	/*
3086	 * Synchronization geom.
3087	 */
3088	gp = g_new_geomf(mp, "%s.sync", md->md_name);
3089	gp->softc = sc;
3090	gp->orphan = g_mirror_orphan;
3091	sc->sc_sync.ds_geom = gp;
3092	sc->sc_sync.ds_ndisks = 0;
3093	error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
3094	    "g_mirror %s", md->md_name);
3095	if (error != 0) {
3096		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
3097		    sc->sc_name);
3098		g_destroy_geom(sc->sc_sync.ds_geom);
3099		g_destroy_geom(sc->sc_geom);
3100		g_mirror_free_device(sc);
3101		return (NULL);
3102	}
3103
3104	G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).",
3105	    sc->sc_name, sc->sc_ndisks, sc->sc_id);
3106
3107	sc->sc_rootmount = root_mount_hold("GMIRROR");
3108	G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3109	/*
3110	 * Run timeout.
3111	 */
3112	timeout = g_mirror_timeout * hz;
3113	callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
3114	return (sc->sc_geom);
3115}
3116
3117int
3118g_mirror_destroy(struct g_mirror_softc *sc, int how)
3119{
3120	struct g_mirror_disk *disk;
3121
3122	g_topology_assert_not();
3123	sx_assert(&sc->sc_lock, SX_XLOCKED);
3124
3125	if (sc->sc_provider_open != 0) {
3126		switch (how) {
3127		case G_MIRROR_DESTROY_SOFT:
3128			G_MIRROR_DEBUG(1,
3129			    "Device %s is still open (%d).", sc->sc_name,
3130			    sc->sc_provider_open);
3131			return (EBUSY);
3132		case G_MIRROR_DESTROY_DELAYED:
3133			G_MIRROR_DEBUG(1,
3134			    "Device %s will be destroyed on last close.",
3135			    sc->sc_name);
3136			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
3137				if (disk->d_state ==
3138				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3139					g_mirror_sync_stop(disk, 1);
3140				}
3141			}
3142			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_CLOSEWAIT;
3143			return (EBUSY);
3144		case G_MIRROR_DESTROY_HARD:
3145			G_MIRROR_DEBUG(1, "Device %s is still open, so it "
3146			    "can't be definitely removed.", sc->sc_name);
3147		}
3148	}
3149
3150	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
3151		sx_xunlock(&sc->sc_lock);
3152		return (0);
3153	}
3154	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
3155	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DRAIN;
3156	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3157	sx_xunlock(&sc->sc_lock);
3158	mtx_lock(&sc->sc_queue_mtx);
3159	wakeup(sc);
3160	mtx_unlock(&sc->sc_queue_mtx);
3161	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3162	while (sc->sc_worker != NULL)
3163		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
3164	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3165	sx_xlock(&sc->sc_lock);
3166	g_mirror_destroy_device(sc);
3167	return (0);
3168}
3169
3170static void
3171g_mirror_taste_orphan(struct g_consumer *cp)
3172{
3173
3174	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3175	    cp->provider->name));
3176}
3177
3178static struct g_geom *
3179g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3180{
3181	struct g_mirror_metadata md;
3182	struct g_mirror_softc *sc;
3183	struct g_consumer *cp;
3184	struct g_geom *gp;
3185	int error;
3186
3187	g_topology_assert();
3188	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3189	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
3190
3191	gp = g_new_geomf(mp, "mirror:taste");
3192	/*
3193	 * This orphan function should be never called.
3194	 */
3195	gp->orphan = g_mirror_taste_orphan;
3196	cp = g_new_consumer(gp);
3197	g_attach(cp, pp);
3198	error = g_mirror_read_metadata(cp, &md);
3199	g_detach(cp);
3200	g_destroy_consumer(cp);
3201	g_destroy_geom(gp);
3202	if (error != 0)
3203		return (NULL);
3204	gp = NULL;
3205
3206	if (md.md_provider[0] != '\0' &&
3207	    !g_compare_names(md.md_provider, pp->name))
3208		return (NULL);
3209	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3210		return (NULL);
3211	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
3212		G_MIRROR_DEBUG(0,
3213		    "Device %s: provider %s marked as inactive, skipping.",
3214		    md.md_name, pp->name);
3215		return (NULL);
3216	}
3217	if (g_mirror_debug >= 2)
3218		mirror_metadata_dump(&md);
3219
3220	/*
3221	 * Let's check if device already exists.
3222	 */
3223	sc = NULL;
3224	LIST_FOREACH(gp, &mp->geom, geom) {
3225		sc = gp->softc;
3226		if (sc == NULL)
3227			continue;
3228		if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
3229			continue;
3230		if (sc->sc_sync.ds_geom == gp)
3231			continue;
3232		if (strcmp(md.md_name, sc->sc_name) != 0)
3233			continue;
3234		if (md.md_mid != sc->sc_id) {
3235			G_MIRROR_DEBUG(0, "Device %s already configured.",
3236			    sc->sc_name);
3237			return (NULL);
3238		}
3239		break;
3240	}
3241	if (gp == NULL) {
3242		gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC);
3243		if (gp == NULL) {
3244			G_MIRROR_DEBUG(0, "Cannot create device %s.",
3245			    md.md_name);
3246			return (NULL);
3247		}
3248		sc = gp->softc;
3249	}
3250	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3251	g_topology_unlock();
3252	sx_xlock(&sc->sc_lock);
3253	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING;
3254	error = g_mirror_add_disk(sc, pp, &md);
3255	if (error != 0) {
3256		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3257		    pp->name, gp->name, error);
3258		if (LIST_EMPTY(&sc->sc_disks)) {
3259			g_cancel_event(sc);
3260			g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3261			g_topology_lock();
3262			return (NULL);
3263		}
3264		gp = NULL;
3265	}
3266	sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING;
3267	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
3268		g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3269		g_topology_lock();
3270		return (NULL);
3271	}
3272	sx_xunlock(&sc->sc_lock);
3273	g_topology_lock();
3274	return (gp);
3275}
3276
3277static void
3278g_mirror_resize(struct g_consumer *cp)
3279{
3280	struct g_mirror_disk *disk;
3281
3282	g_topology_assert();
3283	g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name);
3284
3285	disk = cp->private;
3286	if (disk == NULL)
3287		return;
3288	g_topology_unlock();
3289	g_mirror_update_metadata(disk);
3290	g_topology_lock();
3291}
3292
3293static int
3294g_mirror_destroy_geom(struct gctl_req *req __unused,
3295    struct g_class *mp __unused, struct g_geom *gp)
3296{
3297	struct g_mirror_softc *sc;
3298	int error;
3299
3300	g_topology_unlock();
3301	sc = gp->softc;
3302	sx_xlock(&sc->sc_lock);
3303	g_cancel_event(sc);
3304	error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT);
3305	if (error != 0)
3306		sx_xunlock(&sc->sc_lock);
3307	g_topology_lock();
3308	return (error);
3309}
3310
3311static void
3312g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3313    struct g_consumer *cp, struct g_provider *pp)
3314{
3315	struct g_mirror_softc *sc;
3316
3317	g_topology_assert();
3318
3319	sc = gp->softc;
3320	if (sc == NULL)
3321		return;
3322	/* Skip synchronization geom. */
3323	if (gp == sc->sc_sync.ds_geom)
3324		return;
3325	if (pp != NULL) {
3326		/* Nothing here. */
3327	} else if (cp != NULL) {
3328		struct g_mirror_disk *disk;
3329
3330		disk = cp->private;
3331		if (disk == NULL)
3332			return;
3333		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
3334		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3335			sbuf_printf(sb, "%s<Synchronized>", indent);
3336			if (disk->d_sync.ds_offset == 0)
3337				sbuf_printf(sb, "0%%");
3338			else
3339				sbuf_printf(sb, "%u%%",
3340				    (u_int)((disk->d_sync.ds_offset * 100) /
3341				    sc->sc_mediasize));
3342			sbuf_printf(sb, "</Synchronized>\n");
3343			if (disk->d_sync.ds_offset > 0)
3344				sbuf_printf(sb, "%s<BytesSynced>%jd"
3345				    "</BytesSynced>\n", indent,
3346				    (intmax_t)disk->d_sync.ds_offset);
3347		}
3348		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3349		    disk->d_sync.ds_syncid);
3350		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
3351		    disk->d_genid);
3352		sbuf_printf(sb, "%s<Flags>", indent);
3353		if (disk->d_flags == 0)
3354			sbuf_printf(sb, "NONE");
3355		else {
3356			int first = 1;
3357
3358#define	ADD_FLAG(flag, name)	do {					\
3359	if ((disk->d_flags & (flag)) != 0) {				\
3360		if (!first)						\
3361			sbuf_printf(sb, ", ");				\
3362		else							\
3363			first = 0;					\
3364		sbuf_printf(sb, name);					\
3365	}								\
3366} while (0)
3367			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
3368			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
3369			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
3370			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
3371			    "SYNCHRONIZING");
3372			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3373			ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN");
3374#undef	ADD_FLAG
3375		}
3376		sbuf_printf(sb, "</Flags>\n");
3377		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
3378		    disk->d_priority);
3379		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3380		    g_mirror_disk_state2str(disk->d_state));
3381	} else {
3382		sbuf_printf(sb, "%s<Type>", indent);
3383		switch (sc->sc_type) {
3384		case G_MIRROR_TYPE_AUTOMATIC:
3385			sbuf_printf(sb, "AUTOMATIC");
3386			break;
3387		case G_MIRROR_TYPE_MANUAL:
3388			sbuf_printf(sb, "MANUAL");
3389			break;
3390		default:
3391			sbuf_printf(sb, "UNKNOWN");
3392			break;
3393		}
3394		sbuf_printf(sb, "</Type>\n");
3395		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3396		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3397		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3398		sbuf_printf(sb, "%s<Flags>", indent);
3399		if (sc->sc_flags == 0)
3400			sbuf_printf(sb, "NONE");
3401		else {
3402			int first = 1;
3403
3404#define	ADD_FLAG(flag, name)	do {					\
3405	if ((sc->sc_flags & (flag)) != 0) {				\
3406		if (!first)						\
3407			sbuf_printf(sb, ", ");				\
3408		else							\
3409			first = 0;					\
3410		sbuf_printf(sb, name);					\
3411	}								\
3412} while (0)
3413			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3414			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3415#undef	ADD_FLAG
3416		}
3417		sbuf_printf(sb, "</Flags>\n");
3418		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
3419		    (u_int)sc->sc_slice);
3420		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
3421		    balance_name(sc->sc_balance));
3422		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3423		    sc->sc_ndisks);
3424		sbuf_printf(sb, "%s<State>", indent);
3425		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
3426			sbuf_printf(sb, "%s", "STARTING");
3427		else if (sc->sc_ndisks ==
3428		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
3429			sbuf_printf(sb, "%s", "COMPLETE");
3430		else
3431			sbuf_printf(sb, "%s", "DEGRADED");
3432		sbuf_printf(sb, "</State>\n");
3433	}
3434}
3435
3436static void
3437g_mirror_shutdown_post_sync(void *arg, int howto)
3438{
3439	struct g_class *mp;
3440	struct g_geom *gp, *gp2;
3441	struct g_mirror_softc *sc;
3442	int error;
3443
3444	if (panicstr != NULL)
3445		return;
3446
3447	mp = arg;
3448	g_topology_lock();
3449	g_mirror_shutdown = 1;
3450	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3451		if ((sc = gp->softc) == NULL)
3452			continue;
3453		/* Skip synchronization geom. */
3454		if (gp == sc->sc_sync.ds_geom)
3455			continue;
3456		g_topology_unlock();
3457		sx_xlock(&sc->sc_lock);
3458		g_mirror_idle(sc, -1);
3459		g_cancel_event(sc);
3460		error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED);
3461		if (error != 0)
3462			sx_xunlock(&sc->sc_lock);
3463		g_topology_lock();
3464	}
3465	g_topology_unlock();
3466}
3467
3468static void
3469g_mirror_init(struct g_class *mp)
3470{
3471
3472	g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3473	    g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3474	if (g_mirror_post_sync == NULL)
3475		G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
3476}
3477
3478static void
3479g_mirror_fini(struct g_class *mp)
3480{
3481
3482	if (g_mirror_post_sync != NULL)
3483		EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync);
3484}
3485
3486DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
3487MODULE_VERSION(geom_mirror, 0);
3488