g_mirror.c revision 328334
1/*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/geom/mirror/g_mirror.c 328334 2018-01-24 15:16:17Z markj $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bio.h>
33#include <sys/eventhandler.h>
34#include <sys/fail.h>
35#include <sys/kernel.h>
36#include <sys/kthread.h>
37#include <sys/limits.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/sbuf.h>
43#include <sys/sched.h>
44#include <sys/sx.h>
45#include <sys/sysctl.h>
46
47#include <geom/geom.h>
48#include <geom/mirror/g_mirror.h>
49
50FEATURE(geom_mirror, "GEOM mirroring support");
51
52static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data");
53
54SYSCTL_DECL(_kern_geom);
55static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0,
56    "GEOM_MIRROR stuff");
57int g_mirror_debug = 0;
58SYSCTL_INT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0,
59    "Debug level");
60static u_int g_mirror_timeout = 4;
61SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout,
62    0, "Time to wait on all mirror components");
63static u_int g_mirror_idletime = 5;
64SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN,
65    &g_mirror_idletime, 0, "Mark components as clean when idling");
66static u_int g_mirror_disconnect_on_failure = 1;
67SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
68    &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
69static u_int g_mirror_syncreqs = 2;
70SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
71    &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests.");
72static u_int g_mirror_sync_period = 5;
73SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_update_period, CTLFLAG_RWTUN,
74    &g_mirror_sync_period, 0,
75    "Metadata update period during synchronization, in seconds");
76
77#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
78	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
79	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
80	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
81} while (0)
82
83static eventhandler_tag g_mirror_post_sync = NULL;
84static int g_mirror_shutdown = 0;
85
86static g_ctl_destroy_geom_t g_mirror_destroy_geom;
87static g_taste_t g_mirror_taste;
88static g_init_t g_mirror_init;
89static g_fini_t g_mirror_fini;
90static g_provgone_t g_mirror_providergone;
91static g_resize_t g_mirror_resize;
92
93struct g_class g_mirror_class = {
94	.name = G_MIRROR_CLASS_NAME,
95	.version = G_VERSION,
96	.ctlreq = g_mirror_config,
97	.taste = g_mirror_taste,
98	.destroy_geom = g_mirror_destroy_geom,
99	.init = g_mirror_init,
100	.fini = g_mirror_fini,
101	.providergone = g_mirror_providergone,
102	.resize = g_mirror_resize
103};
104
105
106static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
107static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
108static void g_mirror_update_device(struct g_mirror_softc *sc, bool force);
109static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
110    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
111static void g_mirror_sync_reinit(const struct g_mirror_disk *disk,
112    struct bio *bp, off_t offset);
113static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
114static void g_mirror_register_request(struct g_mirror_softc *sc,
115    struct bio *bp);
116static void g_mirror_sync_release(struct g_mirror_softc *sc);
117
118
119static const char *
120g_mirror_disk_state2str(int state)
121{
122
123	switch (state) {
124	case G_MIRROR_DISK_STATE_NONE:
125		return ("NONE");
126	case G_MIRROR_DISK_STATE_NEW:
127		return ("NEW");
128	case G_MIRROR_DISK_STATE_ACTIVE:
129		return ("ACTIVE");
130	case G_MIRROR_DISK_STATE_STALE:
131		return ("STALE");
132	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
133		return ("SYNCHRONIZING");
134	case G_MIRROR_DISK_STATE_DISCONNECTED:
135		return ("DISCONNECTED");
136	case G_MIRROR_DISK_STATE_DESTROY:
137		return ("DESTROY");
138	default:
139		return ("INVALID");
140	}
141}
142
143static const char *
144g_mirror_device_state2str(int state)
145{
146
147	switch (state) {
148	case G_MIRROR_DEVICE_STATE_STARTING:
149		return ("STARTING");
150	case G_MIRROR_DEVICE_STATE_RUNNING:
151		return ("RUNNING");
152	default:
153		return ("INVALID");
154	}
155}
156
157static const char *
158g_mirror_get_diskname(struct g_mirror_disk *disk)
159{
160
161	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
162		return ("[unknown]");
163	return (disk->d_name);
164}
165
166/*
167 * --- Events handling functions ---
168 * Events in geom_mirror are used to maintain disks and device status
169 * from one thread to simplify locking.
170 */
171static void
172g_mirror_event_free(struct g_mirror_event *ep)
173{
174
175	free(ep, M_MIRROR);
176}
177
178int
179g_mirror_event_send(void *arg, int state, int flags)
180{
181	struct g_mirror_softc *sc;
182	struct g_mirror_disk *disk;
183	struct g_mirror_event *ep;
184	int error;
185
186	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
187	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
188	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
189		disk = NULL;
190		sc = arg;
191	} else {
192		disk = arg;
193		sc = disk->d_softc;
194	}
195	ep->e_disk = disk;
196	ep->e_state = state;
197	ep->e_flags = flags;
198	ep->e_error = 0;
199	mtx_lock(&sc->sc_events_mtx);
200	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
201	mtx_unlock(&sc->sc_events_mtx);
202	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
203	mtx_lock(&sc->sc_queue_mtx);
204	wakeup(sc);
205	mtx_unlock(&sc->sc_queue_mtx);
206	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
207		return (0);
208	sx_assert(&sc->sc_lock, SX_XLOCKED);
209	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
210	sx_xunlock(&sc->sc_lock);
211	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
212		mtx_lock(&sc->sc_events_mtx);
213		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
214		    hz * 5);
215	}
216	error = ep->e_error;
217	g_mirror_event_free(ep);
218	sx_xlock(&sc->sc_lock);
219	return (error);
220}
221
222static struct g_mirror_event *
223g_mirror_event_first(struct g_mirror_softc *sc)
224{
225	struct g_mirror_event *ep;
226
227	mtx_lock(&sc->sc_events_mtx);
228	ep = TAILQ_FIRST(&sc->sc_events);
229	mtx_unlock(&sc->sc_events_mtx);
230	return (ep);
231}
232
233static void
234g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
235{
236
237	mtx_lock(&sc->sc_events_mtx);
238	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
239	mtx_unlock(&sc->sc_events_mtx);
240}
241
242static void
243g_mirror_event_cancel(struct g_mirror_disk *disk)
244{
245	struct g_mirror_softc *sc;
246	struct g_mirror_event *ep, *tmpep;
247
248	sc = disk->d_softc;
249	sx_assert(&sc->sc_lock, SX_XLOCKED);
250
251	mtx_lock(&sc->sc_events_mtx);
252	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
253		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
254			continue;
255		if (ep->e_disk != disk)
256			continue;
257		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
258		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
259			g_mirror_event_free(ep);
260		else {
261			ep->e_error = ECANCELED;
262			wakeup(ep);
263		}
264	}
265	mtx_unlock(&sc->sc_events_mtx);
266}
267
268/*
269 * Return the number of disks in given state.
270 * If state is equal to -1, count all connected disks.
271 */
272u_int
273g_mirror_ndisks(struct g_mirror_softc *sc, int state)
274{
275	struct g_mirror_disk *disk;
276	u_int n = 0;
277
278	sx_assert(&sc->sc_lock, SX_LOCKED);
279
280	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
281		if (state == -1 || disk->d_state == state)
282			n++;
283	}
284	return (n);
285}
286
287/*
288 * Find a disk in mirror by its disk ID.
289 */
290static struct g_mirror_disk *
291g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
292{
293	struct g_mirror_disk *disk;
294
295	sx_assert(&sc->sc_lock, SX_XLOCKED);
296
297	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
298		if (disk->d_id == id)
299			return (disk);
300	}
301	return (NULL);
302}
303
304static u_int
305g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
306{
307	struct bio *bp;
308	u_int nreqs = 0;
309
310	mtx_lock(&sc->sc_queue_mtx);
311	TAILQ_FOREACH(bp, &sc->sc_queue, bio_queue) {
312		if (bp->bio_from == cp)
313			nreqs++;
314	}
315	mtx_unlock(&sc->sc_queue_mtx);
316	return (nreqs);
317}
318
319static int
320g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
321{
322
323	if (cp->index > 0) {
324		G_MIRROR_DEBUG(2,
325		    "I/O requests for %s exist, can't destroy it now.",
326		    cp->provider->name);
327		return (1);
328	}
329	if (g_mirror_nrequests(sc, cp) > 0) {
330		G_MIRROR_DEBUG(2,
331		    "I/O requests for %s in queue, can't destroy it now.",
332		    cp->provider->name);
333		return (1);
334	}
335	return (0);
336}
337
338static void
339g_mirror_destroy_consumer(void *arg, int flags __unused)
340{
341	struct g_consumer *cp;
342
343	g_topology_assert();
344
345	cp = arg;
346	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
347	g_detach(cp);
348	g_destroy_consumer(cp);
349}
350
351static void
352g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
353{
354	struct g_provider *pp;
355	int retaste_wait;
356
357	g_topology_assert();
358
359	cp->private = NULL;
360	if (g_mirror_is_busy(sc, cp))
361		return;
362	pp = cp->provider;
363	retaste_wait = 0;
364	if (cp->acw == 1) {
365		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
366			retaste_wait = 1;
367	}
368	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
369	    -cp->acw, -cp->ace, 0);
370	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
371		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
372	if (retaste_wait) {
373		/*
374		 * After retaste event was send (inside g_access()), we can send
375		 * event to detach and destroy consumer.
376		 * A class, which has consumer to the given provider connected
377		 * will not receive retaste event for the provider.
378		 * This is the way how I ignore retaste events when I close
379		 * consumers opened for write: I detach and destroy consumer
380		 * after retaste event is sent.
381		 */
382		g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
383		return;
384	}
385	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
386	g_detach(cp);
387	g_destroy_consumer(cp);
388}
389
390static int
391g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
392{
393	struct g_consumer *cp;
394	int error;
395
396	g_topology_assert_not();
397	KASSERT(disk->d_consumer == NULL,
398	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
399
400	g_topology_lock();
401	cp = g_new_consumer(disk->d_softc->sc_geom);
402	cp->flags |= G_CF_DIRECT_RECEIVE;
403	error = g_attach(cp, pp);
404	if (error != 0) {
405		g_destroy_consumer(cp);
406		g_topology_unlock();
407		return (error);
408	}
409	error = g_access(cp, 1, 1, 1);
410	if (error != 0) {
411		g_detach(cp);
412		g_destroy_consumer(cp);
413		g_topology_unlock();
414		G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
415		    pp->name, error);
416		return (error);
417	}
418	g_topology_unlock();
419	disk->d_consumer = cp;
420	disk->d_consumer->private = disk;
421	disk->d_consumer->index = 0;
422
423	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
424	return (0);
425}
426
427static void
428g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
429{
430
431	g_topology_assert();
432
433	if (cp == NULL)
434		return;
435	if (cp->provider != NULL)
436		g_mirror_kill_consumer(sc, cp);
437	else
438		g_destroy_consumer(cp);
439}
440
441/*
442 * Initialize disk. This means allocate memory, create consumer, attach it
443 * to the provider and open access (r1w1e1) to it.
444 */
445static struct g_mirror_disk *
446g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
447    struct g_mirror_metadata *md, int *errorp)
448{
449	struct g_mirror_disk *disk;
450	int i, error;
451
452	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
453	if (disk == NULL) {
454		error = ENOMEM;
455		goto fail;
456	}
457	disk->d_softc = sc;
458	error = g_mirror_connect_disk(disk, pp);
459	if (error != 0)
460		goto fail;
461	disk->d_id = md->md_did;
462	disk->d_state = G_MIRROR_DISK_STATE_NONE;
463	disk->d_priority = md->md_priority;
464	disk->d_flags = md->md_dflags;
465	error = g_getattr("GEOM::candelete", disk->d_consumer, &i);
466	if (error == 0 && i != 0)
467		disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE;
468	if (md->md_provider[0] != '\0')
469		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
470	disk->d_sync.ds_consumer = NULL;
471	disk->d_sync.ds_offset = md->md_sync_offset;
472	disk->d_sync.ds_offset_done = md->md_sync_offset;
473	disk->d_sync.ds_update_ts = time_uptime;
474	disk->d_genid = md->md_genid;
475	disk->d_sync.ds_syncid = md->md_syncid;
476	if (errorp != NULL)
477		*errorp = 0;
478	return (disk);
479fail:
480	if (errorp != NULL)
481		*errorp = error;
482	if (disk != NULL)
483		free(disk, M_MIRROR);
484	return (NULL);
485}
486
487static void
488g_mirror_destroy_disk(struct g_mirror_disk *disk)
489{
490	struct g_mirror_softc *sc;
491
492	g_topology_assert_not();
493	sc = disk->d_softc;
494	sx_assert(&sc->sc_lock, SX_XLOCKED);
495
496	LIST_REMOVE(disk, d_next);
497	g_mirror_event_cancel(disk);
498	if (sc->sc_hint == disk)
499		sc->sc_hint = NULL;
500	switch (disk->d_state) {
501	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
502		g_mirror_sync_stop(disk, 1);
503		/* FALLTHROUGH */
504	case G_MIRROR_DISK_STATE_NEW:
505	case G_MIRROR_DISK_STATE_STALE:
506	case G_MIRROR_DISK_STATE_ACTIVE:
507		g_topology_lock();
508		g_mirror_disconnect_consumer(sc, disk->d_consumer);
509		g_topology_unlock();
510		free(disk, M_MIRROR);
511		break;
512	default:
513		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
514		    g_mirror_get_diskname(disk),
515		    g_mirror_disk_state2str(disk->d_state)));
516	}
517}
518
519static void
520g_mirror_free_device(struct g_mirror_softc *sc)
521{
522
523	mtx_destroy(&sc->sc_queue_mtx);
524	mtx_destroy(&sc->sc_events_mtx);
525	mtx_destroy(&sc->sc_done_mtx);
526	sx_destroy(&sc->sc_lock);
527	free(sc, M_MIRROR);
528}
529
530static void
531g_mirror_providergone(struct g_provider *pp)
532{
533	struct g_mirror_softc *sc = pp->private;
534
535	if ((--sc->sc_refcnt) == 0)
536		g_mirror_free_device(sc);
537}
538
539static void
540g_mirror_destroy_device(struct g_mirror_softc *sc)
541{
542	struct g_mirror_disk *disk;
543	struct g_mirror_event *ep;
544	struct g_geom *gp;
545	struct g_consumer *cp, *tmpcp;
546
547	g_topology_assert_not();
548	sx_assert(&sc->sc_lock, SX_XLOCKED);
549
550	gp = sc->sc_geom;
551	if (sc->sc_provider != NULL)
552		g_mirror_destroy_provider(sc);
553	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
554	    disk = LIST_FIRST(&sc->sc_disks)) {
555		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
556		g_mirror_update_metadata(disk);
557		g_mirror_destroy_disk(disk);
558	}
559	while ((ep = g_mirror_event_first(sc)) != NULL) {
560		g_mirror_event_remove(sc, ep);
561		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
562			g_mirror_event_free(ep);
563		else {
564			ep->e_error = ECANCELED;
565			ep->e_flags |= G_MIRROR_EVENT_DONE;
566			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
567			mtx_lock(&sc->sc_events_mtx);
568			wakeup(ep);
569			mtx_unlock(&sc->sc_events_mtx);
570		}
571	}
572	callout_drain(&sc->sc_callout);
573
574	g_topology_lock();
575	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
576		g_mirror_disconnect_consumer(sc, cp);
577	}
578	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
579	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
580	g_wither_geom(gp, ENXIO);
581	sx_xunlock(&sc->sc_lock);
582	if ((--sc->sc_refcnt) == 0)
583		g_mirror_free_device(sc);
584	g_topology_unlock();
585}
586
587static void
588g_mirror_orphan(struct g_consumer *cp)
589{
590	struct g_mirror_disk *disk;
591
592	g_topology_assert();
593
594	disk = cp->private;
595	if (disk == NULL)
596		return;
597	disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
598	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
599	    G_MIRROR_EVENT_DONTWAIT);
600}
601
602/*
603 * Function should return the next active disk on the list.
604 * It is possible that it will be the same disk as given.
605 * If there are no active disks on list, NULL is returned.
606 */
607static __inline struct g_mirror_disk *
608g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
609{
610	struct g_mirror_disk *dp;
611
612	for (dp = LIST_NEXT(disk, d_next); dp != disk;
613	    dp = LIST_NEXT(dp, d_next)) {
614		if (dp == NULL)
615			dp = LIST_FIRST(&sc->sc_disks);
616		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
617			break;
618	}
619	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
620		return (NULL);
621	return (dp);
622}
623
624static struct g_mirror_disk *
625g_mirror_get_disk(struct g_mirror_softc *sc)
626{
627	struct g_mirror_disk *disk;
628
629	if (sc->sc_hint == NULL) {
630		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
631		if (sc->sc_hint == NULL)
632			return (NULL);
633	}
634	disk = sc->sc_hint;
635	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
636		disk = g_mirror_find_next(sc, disk);
637		if (disk == NULL)
638			return (NULL);
639	}
640	sc->sc_hint = g_mirror_find_next(sc, disk);
641	return (disk);
642}
643
644static int
645g_mirror_write_metadata(struct g_mirror_disk *disk,
646    struct g_mirror_metadata *md)
647{
648	struct g_mirror_softc *sc;
649	struct g_consumer *cp;
650	off_t offset, length;
651	u_char *sector;
652	int error = 0;
653
654	g_topology_assert_not();
655	sc = disk->d_softc;
656	sx_assert(&sc->sc_lock, SX_LOCKED);
657
658	cp = disk->d_consumer;
659	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
660	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
661	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
662	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
663	    cp->acw, cp->ace));
664	length = cp->provider->sectorsize;
665	offset = cp->provider->mediasize - length;
666	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
667	if (md != NULL &&
668	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) {
669		/*
670		 * Handle the case, when the size of parent provider reduced.
671		 */
672		if (offset < md->md_mediasize)
673			error = ENOSPC;
674		else
675			mirror_metadata_encode(md, sector);
676	}
677	KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error);
678	if (error == 0)
679		error = g_write_data(cp, offset, sector, length);
680	free(sector, M_MIRROR);
681	if (error != 0) {
682		if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
683			disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
684			G_MIRROR_DEBUG(0, "Cannot write metadata on %s "
685			    "(device=%s, error=%d).",
686			    g_mirror_get_diskname(disk), sc->sc_name, error);
687		} else {
688			G_MIRROR_DEBUG(1, "Cannot write metadata on %s "
689			    "(device=%s, error=%d).",
690			    g_mirror_get_diskname(disk), sc->sc_name, error);
691		}
692		if (g_mirror_disconnect_on_failure &&
693		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
694			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
695			g_mirror_event_send(disk,
696			    G_MIRROR_DISK_STATE_DISCONNECTED,
697			    G_MIRROR_EVENT_DONTWAIT);
698		}
699	}
700	return (error);
701}
702
703static int
704g_mirror_clear_metadata(struct g_mirror_disk *disk)
705{
706	int error;
707
708	g_topology_assert_not();
709	sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
710
711	if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
712		return (0);
713	error = g_mirror_write_metadata(disk, NULL);
714	if (error == 0) {
715		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
716		    g_mirror_get_diskname(disk));
717	} else {
718		G_MIRROR_DEBUG(0,
719		    "Cannot clear metadata on disk %s (error=%d).",
720		    g_mirror_get_diskname(disk), error);
721	}
722	return (error);
723}
724
725void
726g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
727    struct g_mirror_metadata *md)
728{
729
730	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
731	md->md_version = G_MIRROR_VERSION;
732	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
733	md->md_mid = sc->sc_id;
734	md->md_all = sc->sc_ndisks;
735	md->md_slice = sc->sc_slice;
736	md->md_balance = sc->sc_balance;
737	md->md_genid = sc->sc_genid;
738	md->md_mediasize = sc->sc_mediasize;
739	md->md_sectorsize = sc->sc_sectorsize;
740	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
741	bzero(md->md_provider, sizeof(md->md_provider));
742	if (disk == NULL) {
743		md->md_did = arc4random();
744		md->md_priority = 0;
745		md->md_syncid = 0;
746		md->md_dflags = 0;
747		md->md_sync_offset = 0;
748		md->md_provsize = 0;
749	} else {
750		md->md_did = disk->d_id;
751		md->md_priority = disk->d_priority;
752		md->md_syncid = disk->d_sync.ds_syncid;
753		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
754		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
755			md->md_sync_offset = disk->d_sync.ds_offset_done;
756		else
757			md->md_sync_offset = 0;
758		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
759			strlcpy(md->md_provider,
760			    disk->d_consumer->provider->name,
761			    sizeof(md->md_provider));
762		}
763		md->md_provsize = disk->d_consumer->provider->mediasize;
764	}
765}
766
767void
768g_mirror_update_metadata(struct g_mirror_disk *disk)
769{
770	struct g_mirror_softc *sc;
771	struct g_mirror_metadata md;
772	int error;
773
774	g_topology_assert_not();
775	sc = disk->d_softc;
776	sx_assert(&sc->sc_lock, SX_LOCKED);
777
778	if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
779		return;
780	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0)
781		g_mirror_fill_metadata(sc, disk, &md);
782	error = g_mirror_write_metadata(disk, &md);
783	if (error == 0) {
784		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
785		    g_mirror_get_diskname(disk));
786	} else {
787		G_MIRROR_DEBUG(0,
788		    "Cannot update metadata on disk %s (error=%d).",
789		    g_mirror_get_diskname(disk), error);
790	}
791}
792
793static void
794g_mirror_bump_syncid(struct g_mirror_softc *sc)
795{
796	struct g_mirror_disk *disk;
797
798	g_topology_assert_not();
799	sx_assert(&sc->sc_lock, SX_XLOCKED);
800	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
801	    ("%s called with no active disks (device=%s).", __func__,
802	    sc->sc_name));
803
804	sc->sc_syncid++;
805	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
806	    sc->sc_syncid);
807	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
808		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
809		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
810			disk->d_sync.ds_syncid = sc->sc_syncid;
811			g_mirror_update_metadata(disk);
812		}
813	}
814}
815
816static void
817g_mirror_bump_genid(struct g_mirror_softc *sc)
818{
819	struct g_mirror_disk *disk;
820
821	g_topology_assert_not();
822	sx_assert(&sc->sc_lock, SX_XLOCKED);
823	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
824	    ("%s called with no active disks (device=%s).", __func__,
825	    sc->sc_name));
826
827	sc->sc_genid++;
828	G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
829	    sc->sc_genid);
830	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
831		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
832		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
833			disk->d_genid = sc->sc_genid;
834			g_mirror_update_metadata(disk);
835		}
836	}
837}
838
839static int
840g_mirror_idle(struct g_mirror_softc *sc, int acw)
841{
842	struct g_mirror_disk *disk;
843	int timeout;
844
845	g_topology_assert_not();
846	sx_assert(&sc->sc_lock, SX_XLOCKED);
847
848	if (sc->sc_provider == NULL)
849		return (0);
850	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
851		return (0);
852	if (sc->sc_idle)
853		return (0);
854	if (sc->sc_writes > 0)
855		return (0);
856	if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
857		timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write);
858		if (!g_mirror_shutdown && timeout > 0)
859			return (timeout);
860	}
861	sc->sc_idle = 1;
862	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
863		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
864			continue;
865		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.",
866		    g_mirror_get_diskname(disk), sc->sc_name);
867		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
868		g_mirror_update_metadata(disk);
869	}
870	return (0);
871}
872
873static void
874g_mirror_unidle(struct g_mirror_softc *sc)
875{
876	struct g_mirror_disk *disk;
877
878	g_topology_assert_not();
879	sx_assert(&sc->sc_lock, SX_XLOCKED);
880
881	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
882		return;
883	sc->sc_idle = 0;
884	sc->sc_last_write = time_uptime;
885	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
886		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
887			continue;
888		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.",
889		    g_mirror_get_diskname(disk), sc->sc_name);
890		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
891		g_mirror_update_metadata(disk);
892	}
893}
894
895static void
896g_mirror_done(struct bio *bp)
897{
898	struct g_mirror_softc *sc;
899
900	sc = bp->bio_from->geom->softc;
901	bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR;
902	mtx_lock(&sc->sc_queue_mtx);
903	TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
904	mtx_unlock(&sc->sc_queue_mtx);
905	wakeup(sc);
906}
907
908static void
909g_mirror_regular_request_error(struct g_mirror_softc *sc,
910    struct g_mirror_disk *disk, struct bio *bp)
911{
912
913	if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == EOPNOTSUPP)
914		return;
915
916	if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
917		disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
918		G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
919		    bp->bio_error);
920	} else {
921		G_MIRROR_LOGREQ(1, bp, "Request failed (error=%d).",
922		    bp->bio_error);
923	}
924	if (g_mirror_disconnect_on_failure &&
925	    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
926		if (bp->bio_error == ENXIO &&
927		    bp->bio_cmd == BIO_READ)
928			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
929		else if (bp->bio_error == ENXIO)
930			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID_NOW;
931		else
932			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
933		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
934		    G_MIRROR_EVENT_DONTWAIT);
935	}
936}
937
938static void
939g_mirror_regular_request(struct g_mirror_softc *sc, struct bio *bp)
940{
941	struct g_mirror_disk *disk;
942	struct bio *pbp;
943
944	g_topology_assert_not();
945	KASSERT(sc->sc_provider == bp->bio_parent->bio_to,
946	    ("regular request %p with unexpected origin", bp));
947
948	pbp = bp->bio_parent;
949	bp->bio_from->index--;
950	if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE)
951		sc->sc_writes--;
952	disk = bp->bio_from->private;
953	if (disk == NULL) {
954		g_topology_lock();
955		g_mirror_kill_consumer(sc, bp->bio_from);
956		g_topology_unlock();
957	}
958
959	switch (bp->bio_cmd) {
960	case BIO_READ:
961		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read,
962		    bp->bio_error);
963		break;
964	case BIO_WRITE:
965		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write,
966		    bp->bio_error);
967		break;
968	case BIO_DELETE:
969		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_delete,
970		    bp->bio_error);
971		break;
972	case BIO_FLUSH:
973		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_flush,
974		    bp->bio_error);
975		break;
976	}
977
978	pbp->bio_inbed++;
979	KASSERT(pbp->bio_inbed <= pbp->bio_children,
980	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
981	    pbp->bio_children));
982	if (bp->bio_error == 0 && pbp->bio_error == 0) {
983		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
984		g_destroy_bio(bp);
985		if (pbp->bio_children == pbp->bio_inbed) {
986			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
987			pbp->bio_completed = pbp->bio_length;
988			if (pbp->bio_cmd == BIO_WRITE ||
989			    pbp->bio_cmd == BIO_DELETE) {
990				TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue);
991				/* Release delayed sync requests if possible. */
992				g_mirror_sync_release(sc);
993			}
994			g_io_deliver(pbp, pbp->bio_error);
995		}
996		return;
997	} else if (bp->bio_error != 0) {
998		if (pbp->bio_error == 0)
999			pbp->bio_error = bp->bio_error;
1000		if (disk != NULL)
1001			g_mirror_regular_request_error(sc, disk, bp);
1002		switch (pbp->bio_cmd) {
1003		case BIO_DELETE:
1004		case BIO_WRITE:
1005		case BIO_FLUSH:
1006			pbp->bio_inbed--;
1007			pbp->bio_children--;
1008			break;
1009		}
1010	}
1011	g_destroy_bio(bp);
1012
1013	switch (pbp->bio_cmd) {
1014	case BIO_READ:
1015		if (pbp->bio_inbed < pbp->bio_children)
1016			break;
1017		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1)
1018			g_io_deliver(pbp, pbp->bio_error);
1019		else {
1020			pbp->bio_error = 0;
1021			mtx_lock(&sc->sc_queue_mtx);
1022			TAILQ_INSERT_TAIL(&sc->sc_queue, pbp, bio_queue);
1023			mtx_unlock(&sc->sc_queue_mtx);
1024			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1025			wakeup(sc);
1026		}
1027		break;
1028	case BIO_DELETE:
1029	case BIO_WRITE:
1030	case BIO_FLUSH:
1031		if (pbp->bio_children == 0) {
1032			/*
1033			 * All requests failed.
1034			 */
1035		} else if (pbp->bio_inbed < pbp->bio_children) {
1036			/* Do nothing. */
1037			break;
1038		} else if (pbp->bio_children == pbp->bio_inbed) {
1039			/* Some requests succeeded. */
1040			pbp->bio_error = 0;
1041			pbp->bio_completed = pbp->bio_length;
1042		}
1043		if (pbp->bio_cmd == BIO_WRITE || pbp->bio_cmd == BIO_DELETE) {
1044			TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue);
1045			/* Release delayed sync requests if possible. */
1046			g_mirror_sync_release(sc);
1047		}
1048		g_io_deliver(pbp, pbp->bio_error);
1049		break;
1050	default:
1051		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
1052		break;
1053	}
1054}
1055
1056static void
1057g_mirror_sync_done(struct bio *bp)
1058{
1059	struct g_mirror_softc *sc;
1060
1061	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
1062	sc = bp->bio_from->geom->softc;
1063	bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC;
1064	mtx_lock(&sc->sc_queue_mtx);
1065	TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
1066	mtx_unlock(&sc->sc_queue_mtx);
1067	wakeup(sc);
1068}
1069
1070static void
1071g_mirror_candelete(struct bio *bp)
1072{
1073	struct g_mirror_softc *sc;
1074	struct g_mirror_disk *disk;
1075	int *val;
1076
1077	sc = bp->bio_to->private;
1078	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1079		if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE)
1080			break;
1081	}
1082	val = (int *)bp->bio_data;
1083	*val = (disk != NULL);
1084	g_io_deliver(bp, 0);
1085}
1086
1087static void
1088g_mirror_kernel_dump(struct bio *bp)
1089{
1090	struct g_mirror_softc *sc;
1091	struct g_mirror_disk *disk;
1092	struct bio *cbp;
1093	struct g_kerneldump *gkd;
1094
1095	/*
1096	 * We configure dumping to the first component, because this component
1097	 * will be used for reading with 'prefer' balance algorithm.
1098	 * If the component with the highest priority is currently disconnected
1099	 * we will not be able to read the dump after the reboot if it will be
1100	 * connected and synchronized later. Can we do something better?
1101	 */
1102	sc = bp->bio_to->private;
1103	disk = LIST_FIRST(&sc->sc_disks);
1104
1105	gkd = (struct g_kerneldump *)bp->bio_data;
1106	if (gkd->length > bp->bio_to->mediasize)
1107		gkd->length = bp->bio_to->mediasize;
1108	cbp = g_clone_bio(bp);
1109	if (cbp == NULL) {
1110		g_io_deliver(bp, ENOMEM);
1111		return;
1112	}
1113	cbp->bio_done = g_std_done;
1114	g_io_request(cbp, disk->d_consumer);
1115	G_MIRROR_DEBUG(1, "Kernel dump will go to %s.",
1116	    g_mirror_get_diskname(disk));
1117}
1118
1119static void
1120g_mirror_start(struct bio *bp)
1121{
1122	struct g_mirror_softc *sc;
1123
1124	sc = bp->bio_to->private;
1125	/*
1126	 * If sc == NULL or there are no valid disks, provider's error
1127	 * should be set and g_mirror_start() should not be called at all.
1128	 */
1129	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1130	    ("Provider's error should be set (error=%d)(mirror=%s).",
1131	    bp->bio_to->error, bp->bio_to->name));
1132	G_MIRROR_LOGREQ(3, bp, "Request received.");
1133
1134	switch (bp->bio_cmd) {
1135	case BIO_READ:
1136	case BIO_WRITE:
1137	case BIO_DELETE:
1138	case BIO_FLUSH:
1139		break;
1140	case BIO_GETATTR:
1141		if (!strcmp(bp->bio_attribute, "GEOM::candelete")) {
1142			g_mirror_candelete(bp);
1143			return;
1144		} else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) {
1145			g_mirror_kernel_dump(bp);
1146			return;
1147		}
1148		/* FALLTHROUGH */
1149	default:
1150		g_io_deliver(bp, EOPNOTSUPP);
1151		return;
1152	}
1153	mtx_lock(&sc->sc_queue_mtx);
1154	if (bp->bio_to->error != 0) {
1155		mtx_unlock(&sc->sc_queue_mtx);
1156		g_io_deliver(bp, bp->bio_to->error);
1157		return;
1158	}
1159	TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
1160	mtx_unlock(&sc->sc_queue_mtx);
1161	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1162	wakeup(sc);
1163}
1164
1165/*
1166 * Return TRUE if the given request is colliding with a in-progress
1167 * synchronization request.
1168 */
1169static bool
1170g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp)
1171{
1172	struct g_mirror_disk *disk;
1173	struct bio *sbp;
1174	off_t rstart, rend, sstart, send;
1175	u_int i;
1176
1177	if (sc->sc_sync.ds_ndisks == 0)
1178		return (false);
1179	rstart = bp->bio_offset;
1180	rend = bp->bio_offset + bp->bio_length;
1181	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1182		if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING)
1183			continue;
1184		for (i = 0; i < g_mirror_syncreqs; i++) {
1185			sbp = disk->d_sync.ds_bios[i];
1186			if (sbp == NULL)
1187				continue;
1188			sstart = sbp->bio_offset;
1189			send = sbp->bio_offset + sbp->bio_length;
1190			if (rend > sstart && rstart < send)
1191				return (true);
1192		}
1193	}
1194	return (false);
1195}
1196
1197/*
1198 * Return TRUE if the given sync request is colliding with a in-progress regular
1199 * request.
1200 */
1201static bool
1202g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp)
1203{
1204	off_t rstart, rend, sstart, send;
1205	struct bio *bp;
1206
1207	if (sc->sc_sync.ds_ndisks == 0)
1208		return (false);
1209	sstart = sbp->bio_offset;
1210	send = sbp->bio_offset + sbp->bio_length;
1211	TAILQ_FOREACH(bp, &sc->sc_inflight, bio_queue) {
1212		rstart = bp->bio_offset;
1213		rend = bp->bio_offset + bp->bio_length;
1214		if (rend > sstart && rstart < send)
1215			return (true);
1216	}
1217	return (false);
1218}
1219
1220/*
1221 * Puts regular request onto delayed queue.
1222 */
1223static void
1224g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp)
1225{
1226
1227	G_MIRROR_LOGREQ(2, bp, "Delaying request.");
1228	TAILQ_INSERT_TAIL(&sc->sc_regular_delayed, bp, bio_queue);
1229}
1230
1231/*
1232 * Puts synchronization request onto delayed queue.
1233 */
1234static void
1235g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp)
1236{
1237
1238	G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request.");
1239	TAILQ_INSERT_TAIL(&sc->sc_sync_delayed, bp, bio_queue);
1240}
1241
1242/*
1243 * Requeue delayed regular requests.
1244 */
1245static void
1246g_mirror_regular_release(struct g_mirror_softc *sc)
1247{
1248	struct bio *bp;
1249
1250	if ((bp = TAILQ_FIRST(&sc->sc_regular_delayed)) == NULL)
1251		return;
1252	if (g_mirror_sync_collision(sc, bp))
1253		return;
1254
1255	G_MIRROR_DEBUG(2, "Requeuing regular requests after collision.");
1256	mtx_lock(&sc->sc_queue_mtx);
1257	TAILQ_CONCAT(&sc->sc_regular_delayed, &sc->sc_queue, bio_queue);
1258	TAILQ_SWAP(&sc->sc_regular_delayed, &sc->sc_queue, bio, bio_queue);
1259	mtx_unlock(&sc->sc_queue_mtx);
1260}
1261
1262/*
1263 * Releases delayed sync requests which don't collide anymore with regular
1264 * requests.
1265 */
1266static void
1267g_mirror_sync_release(struct g_mirror_softc *sc)
1268{
1269	struct bio *bp, *bp2;
1270
1271	TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed, bio_queue, bp2) {
1272		if (g_mirror_regular_collision(sc, bp))
1273			continue;
1274		TAILQ_REMOVE(&sc->sc_sync_delayed, bp, bio_queue);
1275		G_MIRROR_LOGREQ(2, bp,
1276		    "Releasing delayed synchronization request.");
1277		g_io_request(bp, bp->bio_from);
1278	}
1279}
1280
1281/*
1282 * Free a synchronization request and clear its slot in the array.
1283 */
1284static void
1285g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp)
1286{
1287	int idx;
1288
1289	if (disk != NULL && disk->d_sync.ds_bios != NULL) {
1290		idx = (int)(uintptr_t)bp->bio_caller1;
1291		KASSERT(disk->d_sync.ds_bios[idx] == bp,
1292		    ("unexpected sync BIO at %p:%d", disk, idx));
1293		disk->d_sync.ds_bios[idx] = NULL;
1294	}
1295	free(bp->bio_data, M_MIRROR);
1296	g_destroy_bio(bp);
1297}
1298
1299/*
1300 * Handle synchronization requests.
1301 * Every synchronization request is a two-step process: first, a read request is
1302 * sent to the mirror provider via the sync consumer. If that request completes
1303 * successfully, it is converted to a write and sent to the disk being
1304 * synchronized. If the write also completes successfully, the synchronization
1305 * offset is advanced and a new read request is submitted.
1306 */
1307static void
1308g_mirror_sync_request(struct g_mirror_softc *sc, struct bio *bp)
1309{
1310	struct g_mirror_disk *disk;
1311	struct g_mirror_disk_sync *sync;
1312
1313	KASSERT((bp->bio_cmd == BIO_READ &&
1314	    bp->bio_from->geom == sc->sc_sync.ds_geom) ||
1315	    (bp->bio_cmd == BIO_WRITE && bp->bio_from->geom == sc->sc_geom),
1316	    ("Sync BIO %p with unexpected origin", bp));
1317
1318	bp->bio_from->index--;
1319	disk = bp->bio_from->private;
1320	if (disk == NULL) {
1321		sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1322		g_topology_lock();
1323		g_mirror_kill_consumer(sc, bp->bio_from);
1324		g_topology_unlock();
1325		g_mirror_sync_request_free(NULL, bp);
1326		sx_xlock(&sc->sc_lock);
1327		return;
1328	}
1329
1330	sync = &disk->d_sync;
1331
1332	/*
1333	 * Synchronization request.
1334	 */
1335	switch (bp->bio_cmd) {
1336	case BIO_READ: {
1337		struct g_mirror_disk *d;
1338		struct g_consumer *cp;
1339		int readable;
1340
1341		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read,
1342		    bp->bio_error);
1343
1344		if (bp->bio_error != 0) {
1345			G_MIRROR_LOGREQ(0, bp,
1346			    "Synchronization request failed (error=%d).",
1347			    bp->bio_error);
1348
1349			/*
1350			 * If there's at least one other disk from which we can
1351			 * read the block, retry the request.
1352			 */
1353			readable = 0;
1354			LIST_FOREACH(d, &sc->sc_disks, d_next)
1355				if (d->d_state == G_MIRROR_DISK_STATE_ACTIVE &&
1356				    !(d->d_flags & G_MIRROR_DISK_FLAG_BROKEN))
1357					readable++;
1358
1359			/*
1360			 * The read error will trigger a syncid bump, so there's
1361			 * no need to do that here.
1362			 *
1363			 * If we can retry the read from another disk, do so.
1364			 * Otherwise, all we can do is kick out the new disk.
1365			 */
1366			if (readable == 0) {
1367				g_mirror_sync_request_free(disk, bp);
1368				g_mirror_event_send(disk,
1369				    G_MIRROR_DISK_STATE_DISCONNECTED,
1370				    G_MIRROR_EVENT_DONTWAIT);
1371			} else {
1372				g_mirror_sync_reinit(disk, bp, bp->bio_offset);
1373				goto retry_read;
1374			}
1375			return;
1376		}
1377		G_MIRROR_LOGREQ(3, bp,
1378		    "Synchronization request half-finished.");
1379		bp->bio_cmd = BIO_WRITE;
1380		bp->bio_cflags = 0;
1381		cp = disk->d_consumer;
1382		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1383		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1384		    cp->acr, cp->acw, cp->ace));
1385		cp->index++;
1386		g_io_request(bp, cp);
1387		return;
1388	}
1389	case BIO_WRITE: {
1390		off_t offset;
1391		int i;
1392
1393		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write,
1394		    bp->bio_error);
1395
1396		if (bp->bio_error != 0) {
1397			G_MIRROR_LOGREQ(0, bp,
1398			    "Synchronization request failed (error=%d).",
1399			    bp->bio_error);
1400			g_mirror_sync_request_free(disk, bp);
1401			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1402			g_mirror_event_send(disk,
1403			    G_MIRROR_DISK_STATE_DISCONNECTED,
1404			    G_MIRROR_EVENT_DONTWAIT);
1405			return;
1406		}
1407		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1408		if (sync->ds_offset >= sc->sc_mediasize ||
1409		    sync->ds_consumer == NULL ||
1410		    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1411			/* Don't send more synchronization requests. */
1412			sync->ds_inflight--;
1413			g_mirror_sync_request_free(disk, bp);
1414			if (sync->ds_inflight > 0)
1415				return;
1416			if (sync->ds_consumer == NULL ||
1417			    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1418				return;
1419			}
1420			/* Disk up-to-date, activate it. */
1421			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1422			    G_MIRROR_EVENT_DONTWAIT);
1423			return;
1424		}
1425
1426		/* Send next synchronization request. */
1427		g_mirror_sync_reinit(disk, bp, sync->ds_offset);
1428		sync->ds_offset += bp->bio_length;
1429
1430retry_read:
1431		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1432		sync->ds_consumer->index++;
1433
1434		/*
1435		 * Delay the request if it is colliding with a regular request.
1436		 */
1437		if (g_mirror_regular_collision(sc, bp))
1438			g_mirror_sync_delay(sc, bp);
1439		else
1440			g_io_request(bp, sync->ds_consumer);
1441
1442		/* Requeue delayed requests if possible. */
1443		g_mirror_regular_release(sc);
1444
1445		/* Find the smallest offset */
1446		offset = sc->sc_mediasize;
1447		for (i = 0; i < g_mirror_syncreqs; i++) {
1448			bp = sync->ds_bios[i];
1449			if (bp != NULL && bp->bio_offset < offset)
1450				offset = bp->bio_offset;
1451		}
1452		if (g_mirror_sync_period > 0 &&
1453		    time_uptime - sync->ds_update_ts > g_mirror_sync_period) {
1454			sync->ds_offset_done = offset;
1455			g_mirror_update_metadata(disk);
1456			sync->ds_update_ts = time_uptime;
1457		}
1458		return;
1459	}
1460	default:
1461		panic("Invalid I/O request %p", bp);
1462	}
1463}
1464
1465static void
1466g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1467{
1468	struct g_mirror_disk *disk;
1469	struct g_consumer *cp;
1470	struct bio *cbp;
1471
1472	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1473		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1474			break;
1475	}
1476	if (disk == NULL) {
1477		if (bp->bio_error == 0)
1478			bp->bio_error = ENXIO;
1479		g_io_deliver(bp, bp->bio_error);
1480		return;
1481	}
1482	cbp = g_clone_bio(bp);
1483	if (cbp == NULL) {
1484		if (bp->bio_error == 0)
1485			bp->bio_error = ENOMEM;
1486		g_io_deliver(bp, bp->bio_error);
1487		return;
1488	}
1489	/*
1490	 * Fill in the component buf structure.
1491	 */
1492	cp = disk->d_consumer;
1493	cbp->bio_done = g_mirror_done;
1494	cbp->bio_to = cp->provider;
1495	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1496	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1497	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1498	    cp->acw, cp->ace));
1499	cp->index++;
1500	g_io_request(cbp, cp);
1501}
1502
1503static void
1504g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1505{
1506	struct g_mirror_disk *disk;
1507	struct g_consumer *cp;
1508	struct bio *cbp;
1509
1510	disk = g_mirror_get_disk(sc);
1511	if (disk == NULL) {
1512		if (bp->bio_error == 0)
1513			bp->bio_error = ENXIO;
1514		g_io_deliver(bp, bp->bio_error);
1515		return;
1516	}
1517	cbp = g_clone_bio(bp);
1518	if (cbp == NULL) {
1519		if (bp->bio_error == 0)
1520			bp->bio_error = ENOMEM;
1521		g_io_deliver(bp, bp->bio_error);
1522		return;
1523	}
1524	/*
1525	 * Fill in the component buf structure.
1526	 */
1527	cp = disk->d_consumer;
1528	cbp->bio_done = g_mirror_done;
1529	cbp->bio_to = cp->provider;
1530	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1531	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1532	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1533	    cp->acw, cp->ace));
1534	cp->index++;
1535	g_io_request(cbp, cp);
1536}
1537
1538#define TRACK_SIZE  (1 * 1024 * 1024)
1539#define LOAD_SCALE	256
1540#define ABS(x)		(((x) >= 0) ? (x) : (-(x)))
1541
1542static void
1543g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1544{
1545	struct g_mirror_disk *disk, *dp;
1546	struct g_consumer *cp;
1547	struct bio *cbp;
1548	int prio, best;
1549
1550	/* Find a disk with the smallest load. */
1551	disk = NULL;
1552	best = INT_MAX;
1553	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1554		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1555			continue;
1556		prio = dp->load;
1557		/* If disk head is precisely in position - highly prefer it. */
1558		if (dp->d_last_offset == bp->bio_offset)
1559			prio -= 2 * LOAD_SCALE;
1560		else
1561		/* If disk head is close to position - prefer it. */
1562		if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE)
1563			prio -= 1 * LOAD_SCALE;
1564		if (prio <= best) {
1565			disk = dp;
1566			best = prio;
1567		}
1568	}
1569	KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name));
1570	cbp = g_clone_bio(bp);
1571	if (cbp == NULL) {
1572		if (bp->bio_error == 0)
1573			bp->bio_error = ENOMEM;
1574		g_io_deliver(bp, bp->bio_error);
1575		return;
1576	}
1577	/*
1578	 * Fill in the component buf structure.
1579	 */
1580	cp = disk->d_consumer;
1581	cbp->bio_done = g_mirror_done;
1582	cbp->bio_to = cp->provider;
1583	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1584	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1585	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1586	    cp->acw, cp->ace));
1587	cp->index++;
1588	/* Remember last head position */
1589	disk->d_last_offset = bp->bio_offset + bp->bio_length;
1590	/* Update loads. */
1591	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1592		dp->load = (dp->d_consumer->index * LOAD_SCALE +
1593		    dp->load * 7) / 8;
1594	}
1595	g_io_request(cbp, cp);
1596}
1597
1598static void
1599g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1600{
1601	struct bio_queue queue;
1602	struct g_mirror_disk *disk;
1603	struct g_consumer *cp;
1604	struct bio *cbp;
1605	off_t left, mod, offset, slice;
1606	u_char *data;
1607	u_int ndisks;
1608
1609	if (bp->bio_length <= sc->sc_slice) {
1610		g_mirror_request_round_robin(sc, bp);
1611		return;
1612	}
1613	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1614	slice = bp->bio_length / ndisks;
1615	mod = slice % sc->sc_provider->sectorsize;
1616	if (mod != 0)
1617		slice += sc->sc_provider->sectorsize - mod;
1618	/*
1619	 * Allocate all bios before sending any request, so we can
1620	 * return ENOMEM in nice and clean way.
1621	 */
1622	left = bp->bio_length;
1623	offset = bp->bio_offset;
1624	data = bp->bio_data;
1625	TAILQ_INIT(&queue);
1626	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1627		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1628			continue;
1629		cbp = g_clone_bio(bp);
1630		if (cbp == NULL) {
1631			while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1632				TAILQ_REMOVE(&queue, cbp, bio_queue);
1633				g_destroy_bio(cbp);
1634			}
1635			if (bp->bio_error == 0)
1636				bp->bio_error = ENOMEM;
1637			g_io_deliver(bp, bp->bio_error);
1638			return;
1639		}
1640		TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1641		cbp->bio_done = g_mirror_done;
1642		cbp->bio_caller1 = disk;
1643		cbp->bio_to = disk->d_consumer->provider;
1644		cbp->bio_offset = offset;
1645		cbp->bio_data = data;
1646		cbp->bio_length = MIN(left, slice);
1647		left -= cbp->bio_length;
1648		if (left == 0)
1649			break;
1650		offset += cbp->bio_length;
1651		data += cbp->bio_length;
1652	}
1653	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1654		TAILQ_REMOVE(&queue, cbp, bio_queue);
1655		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1656		disk = cbp->bio_caller1;
1657		cbp->bio_caller1 = NULL;
1658		cp = disk->d_consumer;
1659		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1660		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1661		    cp->acr, cp->acw, cp->ace));
1662		disk->d_consumer->index++;
1663		g_io_request(cbp, disk->d_consumer);
1664	}
1665}
1666
1667static void
1668g_mirror_register_request(struct g_mirror_softc *sc, struct bio *bp)
1669{
1670	struct bio_queue queue;
1671	struct bio *cbp;
1672	struct g_consumer *cp;
1673	struct g_mirror_disk *disk;
1674
1675	sx_assert(&sc->sc_lock, SA_XLOCKED);
1676
1677	/*
1678	 * To avoid ordering issues, if a write is deferred because of a
1679	 * collision with a sync request, all I/O is deferred until that
1680	 * write is initiated.
1681	 */
1682	if (bp->bio_from->geom != sc->sc_sync.ds_geom &&
1683	    !TAILQ_EMPTY(&sc->sc_regular_delayed)) {
1684		g_mirror_regular_delay(sc, bp);
1685		return;
1686	}
1687
1688	switch (bp->bio_cmd) {
1689	case BIO_READ:
1690		switch (sc->sc_balance) {
1691		case G_MIRROR_BALANCE_LOAD:
1692			g_mirror_request_load(sc, bp);
1693			break;
1694		case G_MIRROR_BALANCE_PREFER:
1695			g_mirror_request_prefer(sc, bp);
1696			break;
1697		case G_MIRROR_BALANCE_ROUND_ROBIN:
1698			g_mirror_request_round_robin(sc, bp);
1699			break;
1700		case G_MIRROR_BALANCE_SPLIT:
1701			g_mirror_request_split(sc, bp);
1702			break;
1703		}
1704		return;
1705	case BIO_WRITE:
1706	case BIO_DELETE:
1707		/*
1708		 * Delay the request if it is colliding with a synchronization
1709		 * request.
1710		 */
1711		if (g_mirror_sync_collision(sc, bp)) {
1712			g_mirror_regular_delay(sc, bp);
1713			return;
1714		}
1715
1716		if (sc->sc_idle)
1717			g_mirror_unidle(sc);
1718		else
1719			sc->sc_last_write = time_uptime;
1720
1721		/*
1722		 * Bump syncid on first write.
1723		 */
1724		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1725			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1726			g_mirror_bump_syncid(sc);
1727		}
1728
1729		/*
1730		 * Allocate all bios before sending any request, so we can
1731		 * return ENOMEM in nice and clean way.
1732		 */
1733		TAILQ_INIT(&queue);
1734		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1735			switch (disk->d_state) {
1736			case G_MIRROR_DISK_STATE_ACTIVE:
1737				break;
1738			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1739				if (bp->bio_offset >= disk->d_sync.ds_offset)
1740					continue;
1741				break;
1742			default:
1743				continue;
1744			}
1745			if (bp->bio_cmd == BIO_DELETE &&
1746			    (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0)
1747				continue;
1748			cbp = g_clone_bio(bp);
1749			if (cbp == NULL) {
1750				while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1751					TAILQ_REMOVE(&queue, cbp, bio_queue);
1752					g_destroy_bio(cbp);
1753				}
1754				if (bp->bio_error == 0)
1755					bp->bio_error = ENOMEM;
1756				g_io_deliver(bp, bp->bio_error);
1757				return;
1758			}
1759			TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1760			cbp->bio_done = g_mirror_done;
1761			cp = disk->d_consumer;
1762			cbp->bio_caller1 = cp;
1763			cbp->bio_to = cp->provider;
1764			KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1765			    ("Consumer %s not opened (r%dw%de%d).",
1766			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1767		}
1768		if (TAILQ_EMPTY(&queue)) {
1769			KASSERT(bp->bio_cmd == BIO_DELETE,
1770			    ("No consumers for regular request %p", bp));
1771			g_io_deliver(bp, EOPNOTSUPP);
1772			return;
1773		}
1774		while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1775			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1776			TAILQ_REMOVE(&queue, cbp, bio_queue);
1777			cp = cbp->bio_caller1;
1778			cbp->bio_caller1 = NULL;
1779			cp->index++;
1780			sc->sc_writes++;
1781			g_io_request(cbp, cp);
1782		}
1783		/*
1784		 * Put request onto inflight queue, so we can check if new
1785		 * synchronization requests don't collide with it.
1786		 */
1787		TAILQ_INSERT_TAIL(&sc->sc_inflight, bp, bio_queue);
1788		return;
1789	case BIO_FLUSH:
1790		TAILQ_INIT(&queue);
1791		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1792			if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1793				continue;
1794			cbp = g_clone_bio(bp);
1795			if (cbp == NULL) {
1796				while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1797					TAILQ_REMOVE(&queue, cbp, bio_queue);
1798					g_destroy_bio(cbp);
1799				}
1800				if (bp->bio_error == 0)
1801					bp->bio_error = ENOMEM;
1802				g_io_deliver(bp, bp->bio_error);
1803				return;
1804			}
1805			TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1806			cbp->bio_done = g_mirror_done;
1807			cbp->bio_caller1 = disk;
1808			cbp->bio_to = disk->d_consumer->provider;
1809		}
1810		KASSERT(!TAILQ_EMPTY(&queue),
1811		    ("No consumers for regular request %p", bp));
1812		while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1813			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1814			TAILQ_REMOVE(&queue, cbp, bio_queue);
1815			disk = cbp->bio_caller1;
1816			cbp->bio_caller1 = NULL;
1817			cp = disk->d_consumer;
1818			KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1819			    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1820			    cp->acr, cp->acw, cp->ace));
1821			cp->index++;
1822			g_io_request(cbp, cp);
1823		}
1824		break;
1825	default:
1826		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1827		    bp->bio_cmd, sc->sc_name));
1828		break;
1829	}
1830}
1831
1832static int
1833g_mirror_can_destroy(struct g_mirror_softc *sc)
1834{
1835	struct g_geom *gp;
1836	struct g_consumer *cp;
1837
1838	g_topology_assert();
1839	gp = sc->sc_geom;
1840	if (gp->softc == NULL)
1841		return (1);
1842	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0)
1843		return (0);
1844	LIST_FOREACH(cp, &gp->consumer, consumer) {
1845		if (g_mirror_is_busy(sc, cp))
1846			return (0);
1847	}
1848	gp = sc->sc_sync.ds_geom;
1849	LIST_FOREACH(cp, &gp->consumer, consumer) {
1850		if (g_mirror_is_busy(sc, cp))
1851			return (0);
1852	}
1853	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1854	    sc->sc_name);
1855	return (1);
1856}
1857
1858static int
1859g_mirror_try_destroy(struct g_mirror_softc *sc)
1860{
1861
1862	if (sc->sc_rootmount != NULL) {
1863		G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1864		    sc->sc_rootmount);
1865		root_mount_rel(sc->sc_rootmount);
1866		sc->sc_rootmount = NULL;
1867	}
1868	g_topology_lock();
1869	if (!g_mirror_can_destroy(sc)) {
1870		g_topology_unlock();
1871		return (0);
1872	}
1873	sc->sc_geom->softc = NULL;
1874	sc->sc_sync.ds_geom->softc = NULL;
1875	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DRAIN) != 0) {
1876		g_topology_unlock();
1877		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1878		    &sc->sc_worker);
1879		/* Unlock sc_lock here, as it can be destroyed after wakeup. */
1880		sx_xunlock(&sc->sc_lock);
1881		wakeup(&sc->sc_worker);
1882		sc->sc_worker = NULL;
1883	} else {
1884		g_topology_unlock();
1885		g_mirror_destroy_device(sc);
1886	}
1887	return (1);
1888}
1889
1890/*
1891 * Worker thread.
1892 */
1893static void
1894g_mirror_worker(void *arg)
1895{
1896	struct g_mirror_softc *sc;
1897	struct g_mirror_event *ep;
1898	struct bio *bp;
1899	int timeout;
1900
1901	sc = arg;
1902	thread_lock(curthread);
1903	sched_prio(curthread, PRIBIO);
1904	thread_unlock(curthread);
1905
1906	sx_xlock(&sc->sc_lock);
1907	for (;;) {
1908		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1909		/*
1910		 * First take a look at events.
1911		 * This is important to handle events before any I/O requests.
1912		 */
1913		ep = g_mirror_event_first(sc);
1914		if (ep != NULL) {
1915			g_mirror_event_remove(sc, ep);
1916			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1917				/* Update only device status. */
1918				G_MIRROR_DEBUG(3,
1919				    "Running event for device %s.",
1920				    sc->sc_name);
1921				ep->e_error = 0;
1922				g_mirror_update_device(sc, true);
1923			} else {
1924				/* Update disk status. */
1925				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1926				     g_mirror_get_diskname(ep->e_disk));
1927				ep->e_error = g_mirror_update_disk(ep->e_disk,
1928				    ep->e_state);
1929				if (ep->e_error == 0)
1930					g_mirror_update_device(sc, false);
1931			}
1932			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1933				KASSERT(ep->e_error == 0,
1934				    ("Error cannot be handled."));
1935				g_mirror_event_free(ep);
1936			} else {
1937				ep->e_flags |= G_MIRROR_EVENT_DONE;
1938				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1939				    ep);
1940				mtx_lock(&sc->sc_events_mtx);
1941				wakeup(ep);
1942				mtx_unlock(&sc->sc_events_mtx);
1943			}
1944			if ((sc->sc_flags &
1945			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1946				if (g_mirror_try_destroy(sc)) {
1947					curthread->td_pflags &= ~TDP_GEOM;
1948					G_MIRROR_DEBUG(1, "Thread exiting.");
1949					kproc_exit(0);
1950				}
1951			}
1952			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1953			continue;
1954		}
1955
1956		/*
1957		 * Check if we can mark array as CLEAN and if we can't take
1958		 * how much seconds should we wait.
1959		 */
1960		timeout = g_mirror_idle(sc, -1);
1961
1962		/*
1963		 * Handle I/O requests.
1964		 */
1965		mtx_lock(&sc->sc_queue_mtx);
1966		bp = TAILQ_FIRST(&sc->sc_queue);
1967		if (bp != NULL)
1968			TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
1969		else {
1970			if ((sc->sc_flags &
1971			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1972				mtx_unlock(&sc->sc_queue_mtx);
1973				if (g_mirror_try_destroy(sc)) {
1974					curthread->td_pflags &= ~TDP_GEOM;
1975					G_MIRROR_DEBUG(1, "Thread exiting.");
1976					kproc_exit(0);
1977				}
1978				mtx_lock(&sc->sc_queue_mtx);
1979				if (!TAILQ_EMPTY(&sc->sc_queue)) {
1980					mtx_unlock(&sc->sc_queue_mtx);
1981					continue;
1982				}
1983			}
1984			if (g_mirror_event_first(sc) != NULL) {
1985				mtx_unlock(&sc->sc_queue_mtx);
1986				continue;
1987			}
1988			sx_xunlock(&sc->sc_lock);
1989			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1",
1990			    timeout * hz);
1991			sx_xlock(&sc->sc_lock);
1992			G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1993			continue;
1994		}
1995		mtx_unlock(&sc->sc_queue_mtx);
1996
1997		if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
1998		    (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1999			/*
2000			 * Handle completion of the first half (the read) of a
2001			 * block synchronization operation.
2002			 */
2003			g_mirror_sync_request(sc, bp);
2004		} else if (bp->bio_to != sc->sc_provider) {
2005			if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0)
2006				/*
2007				 * Handle completion of a regular I/O request.
2008				 */
2009				g_mirror_regular_request(sc, bp);
2010			else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
2011				/*
2012				 * Handle completion of the second half (the
2013				 * write) of a block synchronization operation.
2014				 */
2015				g_mirror_sync_request(sc, bp);
2016			else {
2017				KASSERT(0,
2018				    ("Invalid request cflags=0x%hx to=%s.",
2019				    bp->bio_cflags, bp->bio_to->name));
2020			}
2021		} else {
2022			/*
2023			 * Initiate an I/O request.
2024			 */
2025			g_mirror_register_request(sc, bp);
2026		}
2027		G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
2028	}
2029}
2030
2031static void
2032g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
2033{
2034
2035	sx_assert(&sc->sc_lock, SX_LOCKED);
2036
2037	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
2038		return;
2039	if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2040		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.",
2041		    g_mirror_get_diskname(disk), sc->sc_name);
2042		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2043	} else if (sc->sc_idle &&
2044	    (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2045		G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.",
2046		    g_mirror_get_diskname(disk), sc->sc_name);
2047		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2048	}
2049}
2050
2051static void
2052g_mirror_sync_reinit(const struct g_mirror_disk *disk, struct bio *bp,
2053    off_t offset)
2054{
2055	void *data;
2056	int idx;
2057
2058	data = bp->bio_data;
2059	idx = (int)(uintptr_t)bp->bio_caller1;
2060	g_reset_bio(bp);
2061
2062	bp->bio_cmd = BIO_READ;
2063	bp->bio_data = data;
2064	bp->bio_done = g_mirror_sync_done;
2065	bp->bio_from = disk->d_sync.ds_consumer;
2066	bp->bio_to = disk->d_softc->sc_provider;
2067	bp->bio_caller1 = (void *)(uintptr_t)idx;
2068	bp->bio_offset = offset;
2069	bp->bio_length = MIN(MAXPHYS,
2070	    disk->d_softc->sc_mediasize - bp->bio_offset);
2071}
2072
2073static void
2074g_mirror_sync_start(struct g_mirror_disk *disk)
2075{
2076	struct g_mirror_softc *sc;
2077	struct g_mirror_disk_sync *sync;
2078	struct g_consumer *cp;
2079	struct bio *bp;
2080	int error, i;
2081
2082	g_topology_assert_not();
2083	sc = disk->d_softc;
2084	sync = &disk->d_sync;
2085	sx_assert(&sc->sc_lock, SX_LOCKED);
2086
2087	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2088	    ("Disk %s is not marked for synchronization.",
2089	    g_mirror_get_diskname(disk)));
2090	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2091	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
2092	    sc->sc_state));
2093
2094	sx_xunlock(&sc->sc_lock);
2095	g_topology_lock();
2096	cp = g_new_consumer(sc->sc_sync.ds_geom);
2097	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2098	error = g_attach(cp, sc->sc_provider);
2099	KASSERT(error == 0,
2100	    ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2101	error = g_access(cp, 1, 0, 0);
2102	KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2103	g_topology_unlock();
2104	sx_xlock(&sc->sc_lock);
2105
2106	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2107	    g_mirror_get_diskname(disk));
2108	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0)
2109		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2110	KASSERT(sync->ds_consumer == NULL,
2111	    ("Sync consumer already exists (device=%s, disk=%s).",
2112	    sc->sc_name, g_mirror_get_diskname(disk)));
2113
2114	sync->ds_consumer = cp;
2115	sync->ds_consumer->private = disk;
2116	sync->ds_consumer->index = 0;
2117
2118	/*
2119	 * Allocate memory for synchronization bios and initialize them.
2120	 */
2121	sync->ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs,
2122	    M_MIRROR, M_WAITOK);
2123	for (i = 0; i < g_mirror_syncreqs; i++) {
2124		bp = g_alloc_bio();
2125		sync->ds_bios[i] = bp;
2126
2127		bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
2128		bp->bio_caller1 = (void *)(uintptr_t)i;
2129		g_mirror_sync_reinit(disk, bp, sync->ds_offset);
2130		sync->ds_offset += bp->bio_length;
2131	}
2132
2133	/* Increase the number of disks in SYNCHRONIZING state. */
2134	sc->sc_sync.ds_ndisks++;
2135	/* Set the number of in-flight synchronization requests. */
2136	sync->ds_inflight = g_mirror_syncreqs;
2137
2138	/*
2139	 * Fire off first synchronization requests.
2140	 */
2141	for (i = 0; i < g_mirror_syncreqs; i++) {
2142		bp = sync->ds_bios[i];
2143		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
2144		sync->ds_consumer->index++;
2145		/*
2146		 * Delay the request if it is colliding with a regular request.
2147		 */
2148		if (g_mirror_regular_collision(sc, bp))
2149			g_mirror_sync_delay(sc, bp);
2150		else
2151			g_io_request(bp, sync->ds_consumer);
2152	}
2153}
2154
2155/*
2156 * Stop synchronization process.
2157 * type: 0 - synchronization finished
2158 *       1 - synchronization stopped
2159 */
2160static void
2161g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
2162{
2163	struct g_mirror_softc *sc;
2164	struct g_consumer *cp;
2165
2166	g_topology_assert_not();
2167	sc = disk->d_softc;
2168	sx_assert(&sc->sc_lock, SX_LOCKED);
2169
2170	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2171	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2172	    g_mirror_disk_state2str(disk->d_state)));
2173	if (disk->d_sync.ds_consumer == NULL)
2174		return;
2175
2176	if (type == 0) {
2177		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2178		    sc->sc_name, g_mirror_get_diskname(disk));
2179	} else /* if (type == 1) */ {
2180		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2181		    sc->sc_name, g_mirror_get_diskname(disk));
2182	}
2183	g_mirror_regular_release(sc);
2184	free(disk->d_sync.ds_bios, M_MIRROR);
2185	disk->d_sync.ds_bios = NULL;
2186	cp = disk->d_sync.ds_consumer;
2187	disk->d_sync.ds_consumer = NULL;
2188	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2189	sc->sc_sync.ds_ndisks--;
2190	sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2191	g_topology_lock();
2192	g_mirror_kill_consumer(sc, cp);
2193	g_topology_unlock();
2194	sx_xlock(&sc->sc_lock);
2195}
2196
2197static void
2198g_mirror_launch_provider(struct g_mirror_softc *sc)
2199{
2200	struct g_mirror_disk *disk;
2201	struct g_provider *pp, *dp;
2202
2203	sx_assert(&sc->sc_lock, SX_LOCKED);
2204
2205	g_topology_lock();
2206	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
2207	pp->flags |= G_PF_DIRECT_RECEIVE;
2208	pp->mediasize = sc->sc_mediasize;
2209	pp->sectorsize = sc->sc_sectorsize;
2210	pp->stripesize = 0;
2211	pp->stripeoffset = 0;
2212
2213	/* Splitting of unmapped BIO's could work but isn't implemented now */
2214	if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT)
2215		pp->flags |= G_PF_ACCEPT_UNMAPPED;
2216
2217	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2218		if (disk->d_consumer && disk->d_consumer->provider) {
2219			dp = disk->d_consumer->provider;
2220			if (dp->stripesize > pp->stripesize) {
2221				pp->stripesize = dp->stripesize;
2222				pp->stripeoffset = dp->stripeoffset;
2223			}
2224			/* A provider underneath us doesn't support unmapped */
2225			if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) {
2226				G_MIRROR_DEBUG(0, "Cancelling unmapped "
2227				    "because of %s.", dp->name);
2228				pp->flags &= ~G_PF_ACCEPT_UNMAPPED;
2229			}
2230		}
2231	}
2232	pp->private = sc;
2233	sc->sc_refcnt++;
2234	sc->sc_provider = pp;
2235	g_error_provider(pp, 0);
2236	g_topology_unlock();
2237	G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2238	    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks);
2239	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2240		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2241			g_mirror_sync_start(disk);
2242	}
2243}
2244
2245static void
2246g_mirror_destroy_provider(struct g_mirror_softc *sc)
2247{
2248	struct g_mirror_disk *disk;
2249	struct bio *bp;
2250
2251	g_topology_assert_not();
2252	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2253	    sc->sc_name));
2254
2255	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2256		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2257			g_mirror_sync_stop(disk, 1);
2258	}
2259
2260	g_topology_lock();
2261	g_error_provider(sc->sc_provider, ENXIO);
2262	mtx_lock(&sc->sc_queue_mtx);
2263	while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) {
2264		TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
2265		/*
2266		 * Abort any pending I/O that wasn't generated by us.
2267		 * Synchronization requests and requests destined for individual
2268		 * mirror components can be destroyed immediately.
2269		 */
2270		if (bp->bio_to == sc->sc_provider &&
2271		    bp->bio_from->geom != sc->sc_sync.ds_geom) {
2272			g_io_deliver(bp, ENXIO);
2273		} else {
2274			if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
2275				free(bp->bio_data, M_MIRROR);
2276			g_destroy_bio(bp);
2277		}
2278	}
2279	mtx_unlock(&sc->sc_queue_mtx);
2280	g_wither_provider(sc->sc_provider, ENXIO);
2281	sc->sc_provider = NULL;
2282	G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name);
2283	g_topology_unlock();
2284}
2285
2286static void
2287g_mirror_go(void *arg)
2288{
2289	struct g_mirror_softc *sc;
2290
2291	sc = arg;
2292	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2293	g_mirror_event_send(sc, 0,
2294	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
2295}
2296
2297static u_int
2298g_mirror_determine_state(struct g_mirror_disk *disk)
2299{
2300	struct g_mirror_softc *sc;
2301	u_int state;
2302
2303	sc = disk->d_softc;
2304	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2305		if ((disk->d_flags &
2306		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0 &&
2307		    (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 ||
2308		     (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0)) {
2309			/* Disk does not need synchronization. */
2310			state = G_MIRROR_DISK_STATE_ACTIVE;
2311		} else {
2312			if ((sc->sc_flags &
2313			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2314			    (disk->d_flags &
2315			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2316				/*
2317				 * We can start synchronization from
2318				 * the stored offset.
2319				 */
2320				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2321			} else {
2322				state = G_MIRROR_DISK_STATE_STALE;
2323			}
2324		}
2325	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2326		/*
2327		 * Reset all synchronization data for this disk,
2328		 * because if it even was synchronized, it was
2329		 * synchronized to disks with different syncid.
2330		 */
2331		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2332		disk->d_sync.ds_offset = 0;
2333		disk->d_sync.ds_offset_done = 0;
2334		disk->d_sync.ds_syncid = sc->sc_syncid;
2335		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2336		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2337			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2338		} else {
2339			state = G_MIRROR_DISK_STATE_STALE;
2340		}
2341	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2342		/*
2343		 * Not good, NOT GOOD!
2344		 * It means that mirror was started on stale disks
2345		 * and more fresh disk just arrive.
2346		 * If there were writes, mirror is broken, sorry.
2347		 * I think the best choice here is don't touch
2348		 * this disk and inform the user loudly.
2349		 */
2350		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
2351		    "disk (%s) arrives!! It will not be connected to the "
2352		    "running device.", sc->sc_name,
2353		    g_mirror_get_diskname(disk));
2354		g_mirror_destroy_disk(disk);
2355		state = G_MIRROR_DISK_STATE_NONE;
2356		/* Return immediately, because disk was destroyed. */
2357		return (state);
2358	}
2359	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
2360	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
2361	return (state);
2362}
2363
2364/*
2365 * Update device state.
2366 */
2367static void
2368g_mirror_update_device(struct g_mirror_softc *sc, bool force)
2369{
2370	struct g_mirror_disk *disk;
2371	u_int state;
2372
2373	sx_assert(&sc->sc_lock, SX_XLOCKED);
2374
2375	switch (sc->sc_state) {
2376	case G_MIRROR_DEVICE_STATE_STARTING:
2377	    {
2378		struct g_mirror_disk *pdisk, *tdisk;
2379		u_int dirty, ndisks, genid, syncid;
2380		bool broken;
2381
2382		KASSERT(sc->sc_provider == NULL,
2383		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2384		/*
2385		 * Are we ready? We are, if all disks are connected or
2386		 * if we have any disks and 'force' is true.
2387		 */
2388		ndisks = g_mirror_ndisks(sc, -1);
2389		if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) {
2390			;
2391		} else if (ndisks == 0) {
2392			/*
2393			 * Disks went down in starting phase, so destroy
2394			 * device.
2395			 */
2396			callout_drain(&sc->sc_callout);
2397			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2398			G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2399			    sc->sc_rootmount);
2400			root_mount_rel(sc->sc_rootmount);
2401			sc->sc_rootmount = NULL;
2402			return;
2403		} else {
2404			return;
2405		}
2406
2407		/*
2408		 * Activate all disks with the biggest syncid.
2409		 */
2410		if (force) {
2411			/*
2412			 * If 'force' is true, we have been called due to
2413			 * timeout, so don't bother canceling timeout.
2414			 */
2415			ndisks = 0;
2416			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2417				if ((disk->d_flags &
2418				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2419					ndisks++;
2420				}
2421			}
2422			if (ndisks == 0) {
2423				/* No valid disks found, destroy device. */
2424				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2425				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2426				    __LINE__, sc->sc_rootmount);
2427				root_mount_rel(sc->sc_rootmount);
2428				sc->sc_rootmount = NULL;
2429				return;
2430			}
2431		} else {
2432			/* Cancel timeout. */
2433			callout_drain(&sc->sc_callout);
2434		}
2435
2436		/*
2437		 * Find the biggest genid.
2438		 */
2439		genid = 0;
2440		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2441			if (disk->d_genid > genid)
2442				genid = disk->d_genid;
2443		}
2444		sc->sc_genid = genid;
2445		/*
2446		 * Remove all disks without the biggest genid.
2447		 */
2448		broken = false;
2449		LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
2450			if (disk->d_genid < genid) {
2451				G_MIRROR_DEBUG(0,
2452				    "Component %s (device %s) broken, skipping.",
2453				    g_mirror_get_diskname(disk), sc->sc_name);
2454				g_mirror_destroy_disk(disk);
2455				/*
2456				 * Bump the syncid in case we discover a healthy
2457				 * replacement disk after starting the mirror.
2458				 */
2459				broken = true;
2460			}
2461		}
2462
2463		/*
2464		 * Find the biggest syncid.
2465		 */
2466		syncid = 0;
2467		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2468			if (disk->d_sync.ds_syncid > syncid)
2469				syncid = disk->d_sync.ds_syncid;
2470		}
2471
2472		/*
2473		 * Here we need to look for dirty disks and if all disks
2474		 * with the biggest syncid are dirty, we have to choose
2475		 * one with the biggest priority and rebuild the rest.
2476		 */
2477		/*
2478		 * Find the number of dirty disks with the biggest syncid.
2479		 * Find the number of disks with the biggest syncid.
2480		 * While here, find a disk with the biggest priority.
2481		 */
2482		dirty = ndisks = 0;
2483		pdisk = NULL;
2484		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2485			if (disk->d_sync.ds_syncid != syncid)
2486				continue;
2487			if ((disk->d_flags &
2488			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2489				continue;
2490			}
2491			ndisks++;
2492			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2493				dirty++;
2494				if (pdisk == NULL ||
2495				    pdisk->d_priority < disk->d_priority) {
2496					pdisk = disk;
2497				}
2498			}
2499		}
2500		if (dirty == 0) {
2501			/* No dirty disks at all, great. */
2502		} else if (dirty == ndisks) {
2503			/*
2504			 * Force synchronization for all dirty disks except one
2505			 * with the biggest priority.
2506			 */
2507			KASSERT(pdisk != NULL, ("pdisk == NULL"));
2508			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
2509			    "master disk for synchronization.",
2510			    g_mirror_get_diskname(pdisk), sc->sc_name);
2511			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2512				if (disk->d_sync.ds_syncid != syncid)
2513					continue;
2514				if ((disk->d_flags &
2515				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2516					continue;
2517				}
2518				KASSERT((disk->d_flags &
2519				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
2520				    ("Disk %s isn't marked as dirty.",
2521				    g_mirror_get_diskname(disk)));
2522				/* Skip the disk with the biggest priority. */
2523				if (disk == pdisk)
2524					continue;
2525				disk->d_sync.ds_syncid = 0;
2526			}
2527		} else if (dirty < ndisks) {
2528			/*
2529			 * Force synchronization for all dirty disks.
2530			 * We have some non-dirty disks.
2531			 */
2532			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2533				if (disk->d_sync.ds_syncid != syncid)
2534					continue;
2535				if ((disk->d_flags &
2536				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2537					continue;
2538				}
2539				if ((disk->d_flags &
2540				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2541					continue;
2542				}
2543				disk->d_sync.ds_syncid = 0;
2544			}
2545		}
2546
2547		/* Reset hint. */
2548		sc->sc_hint = NULL;
2549		sc->sc_syncid = syncid;
2550		if (force || broken) {
2551			/* Remember to bump syncid on first write. */
2552			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2553		}
2554		state = G_MIRROR_DEVICE_STATE_RUNNING;
2555		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2556		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2557		    g_mirror_device_state2str(state));
2558		sc->sc_state = state;
2559		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2560			state = g_mirror_determine_state(disk);
2561			g_mirror_event_send(disk, state,
2562			    G_MIRROR_EVENT_DONTWAIT);
2563			if (state == G_MIRROR_DISK_STATE_STALE)
2564				sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2565		}
2566		break;
2567	    }
2568	case G_MIRROR_DEVICE_STATE_RUNNING:
2569		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2570		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2571			/*
2572			 * No usable disks, so destroy the device.
2573			 */
2574			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2575			break;
2576		} else if (g_mirror_ndisks(sc,
2577		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2578		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2579			/*
2580			 * We have active disks, launch provider if it doesn't
2581			 * exist.
2582			 */
2583			if (sc->sc_provider == NULL)
2584				g_mirror_launch_provider(sc);
2585			if (sc->sc_rootmount != NULL) {
2586				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2587				    __LINE__, sc->sc_rootmount);
2588				root_mount_rel(sc->sc_rootmount);
2589				sc->sc_rootmount = NULL;
2590			}
2591		}
2592		/*
2593		 * Genid should be bumped immediately, so do it here.
2594		 */
2595		if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2596			sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2597			g_mirror_bump_genid(sc);
2598		}
2599		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID_NOW) != 0) {
2600			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID_NOW;
2601			g_mirror_bump_syncid(sc);
2602		}
2603		break;
2604	default:
2605		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2606		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2607		break;
2608	}
2609}
2610
2611/*
2612 * Update disk state and device state if needed.
2613 */
2614#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
2615	"Disk %s state changed from %s to %s (device %s).",		\
2616	g_mirror_get_diskname(disk),					\
2617	g_mirror_disk_state2str(disk->d_state),				\
2618	g_mirror_disk_state2str(state), sc->sc_name)
2619static int
2620g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2621{
2622	struct g_mirror_softc *sc;
2623
2624	sc = disk->d_softc;
2625	sx_assert(&sc->sc_lock, SX_XLOCKED);
2626
2627again:
2628	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2629	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2630	    g_mirror_disk_state2str(state));
2631	switch (state) {
2632	case G_MIRROR_DISK_STATE_NEW:
2633		/*
2634		 * Possible scenarios:
2635		 * 1. New disk arrive.
2636		 */
2637		/* Previous state should be NONE. */
2638		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2639		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2640		    g_mirror_disk_state2str(disk->d_state)));
2641		DISK_STATE_CHANGED();
2642
2643		disk->d_state = state;
2644		if (LIST_EMPTY(&sc->sc_disks))
2645			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2646		else {
2647			struct g_mirror_disk *dp;
2648
2649			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2650				if (disk->d_priority >= dp->d_priority) {
2651					LIST_INSERT_BEFORE(dp, disk, d_next);
2652					dp = NULL;
2653					break;
2654				}
2655				if (LIST_NEXT(dp, d_next) == NULL)
2656					break;
2657			}
2658			if (dp != NULL)
2659				LIST_INSERT_AFTER(dp, disk, d_next);
2660		}
2661		G_MIRROR_DEBUG(1, "Device %s: provider %s detected.",
2662		    sc->sc_name, g_mirror_get_diskname(disk));
2663		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2664			break;
2665		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2666		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2667		    g_mirror_device_state2str(sc->sc_state),
2668		    g_mirror_get_diskname(disk),
2669		    g_mirror_disk_state2str(disk->d_state)));
2670		state = g_mirror_determine_state(disk);
2671		if (state != G_MIRROR_DISK_STATE_NONE)
2672			goto again;
2673		break;
2674	case G_MIRROR_DISK_STATE_ACTIVE:
2675		/*
2676		 * Possible scenarios:
2677		 * 1. New disk does not need synchronization.
2678		 * 2. Synchronization process finished successfully.
2679		 */
2680		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2681		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2682		    g_mirror_device_state2str(sc->sc_state),
2683		    g_mirror_get_diskname(disk),
2684		    g_mirror_disk_state2str(disk->d_state)));
2685		/* Previous state should be NEW or SYNCHRONIZING. */
2686		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2687		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2688		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2689		    g_mirror_disk_state2str(disk->d_state)));
2690		DISK_STATE_CHANGED();
2691
2692		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2693			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2694			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2695			g_mirror_sync_stop(disk, 0);
2696		}
2697		disk->d_state = state;
2698		disk->d_sync.ds_offset = 0;
2699		disk->d_sync.ds_offset_done = 0;
2700		g_mirror_update_idle(sc, disk);
2701		g_mirror_update_metadata(disk);
2702		G_MIRROR_DEBUG(1, "Device %s: provider %s activated.",
2703		    sc->sc_name, g_mirror_get_diskname(disk));
2704		break;
2705	case G_MIRROR_DISK_STATE_STALE:
2706		/*
2707		 * Possible scenarios:
2708		 * 1. Stale disk was connected.
2709		 */
2710		/* Previous state should be NEW. */
2711		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2712		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2713		    g_mirror_disk_state2str(disk->d_state)));
2714		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2715		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2716		    g_mirror_device_state2str(sc->sc_state),
2717		    g_mirror_get_diskname(disk),
2718		    g_mirror_disk_state2str(disk->d_state)));
2719		/*
2720		 * STALE state is only possible if device is marked
2721		 * NOAUTOSYNC.
2722		 */
2723		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2724		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2725		    g_mirror_device_state2str(sc->sc_state),
2726		    g_mirror_get_diskname(disk),
2727		    g_mirror_disk_state2str(disk->d_state)));
2728		DISK_STATE_CHANGED();
2729
2730		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2731		disk->d_state = state;
2732		g_mirror_update_metadata(disk);
2733		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2734		    sc->sc_name, g_mirror_get_diskname(disk));
2735		break;
2736	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2737		/*
2738		 * Possible scenarios:
2739		 * 1. Disk which needs synchronization was connected.
2740		 */
2741		/* Previous state should be NEW. */
2742		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2743		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2744		    g_mirror_disk_state2str(disk->d_state)));
2745		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2746		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2747		    g_mirror_device_state2str(sc->sc_state),
2748		    g_mirror_get_diskname(disk),
2749		    g_mirror_disk_state2str(disk->d_state)));
2750		DISK_STATE_CHANGED();
2751
2752		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2753			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2754		disk->d_state = state;
2755		if (sc->sc_provider != NULL) {
2756			g_mirror_sync_start(disk);
2757			g_mirror_update_metadata(disk);
2758		}
2759		break;
2760	case G_MIRROR_DISK_STATE_DISCONNECTED:
2761		/*
2762		 * Possible scenarios:
2763		 * 1. Device wasn't running yet, but disk disappear.
2764		 * 2. Disk was active and disapppear.
2765		 * 3. Disk disappear during synchronization process.
2766		 */
2767		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2768			/*
2769			 * Previous state should be ACTIVE, STALE or
2770			 * SYNCHRONIZING.
2771			 */
2772			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2773			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2774			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2775			    ("Wrong disk state (%s, %s).",
2776			    g_mirror_get_diskname(disk),
2777			    g_mirror_disk_state2str(disk->d_state)));
2778		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2779			/* Previous state should be NEW. */
2780			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2781			    ("Wrong disk state (%s, %s).",
2782			    g_mirror_get_diskname(disk),
2783			    g_mirror_disk_state2str(disk->d_state)));
2784			/*
2785			 * Reset bumping syncid if disk disappeared in STARTING
2786			 * state.
2787			 */
2788			if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2789				sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2790#ifdef	INVARIANTS
2791		} else {
2792			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2793			    sc->sc_name,
2794			    g_mirror_device_state2str(sc->sc_state),
2795			    g_mirror_get_diskname(disk),
2796			    g_mirror_disk_state2str(disk->d_state)));
2797#endif
2798		}
2799		DISK_STATE_CHANGED();
2800		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2801		    sc->sc_name, g_mirror_get_diskname(disk));
2802
2803		g_mirror_destroy_disk(disk);
2804		break;
2805	case G_MIRROR_DISK_STATE_DESTROY:
2806	    {
2807		int error;
2808
2809		error = g_mirror_clear_metadata(disk);
2810		if (error != 0) {
2811			G_MIRROR_DEBUG(0,
2812			    "Device %s: failed to clear metadata on %s: %d.",
2813			    sc->sc_name, g_mirror_get_diskname(disk), error);
2814			break;
2815		}
2816		DISK_STATE_CHANGED();
2817		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2818		    sc->sc_name, g_mirror_get_diskname(disk));
2819
2820		g_mirror_destroy_disk(disk);
2821		sc->sc_ndisks--;
2822		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2823			g_mirror_update_metadata(disk);
2824		}
2825		break;
2826	    }
2827	default:
2828		KASSERT(1 == 0, ("Unknown state (%u).", state));
2829		break;
2830	}
2831	return (0);
2832}
2833#undef	DISK_STATE_CHANGED
2834
2835int
2836g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2837{
2838	struct g_provider *pp;
2839	u_char *buf;
2840	int error;
2841
2842	g_topology_assert();
2843
2844	error = g_access(cp, 1, 0, 0);
2845	if (error != 0)
2846		return (error);
2847	pp = cp->provider;
2848	g_topology_unlock();
2849	/* Metadata are stored on last sector. */
2850	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2851	    &error);
2852	g_topology_lock();
2853	g_access(cp, -1, 0, 0);
2854	if (buf == NULL) {
2855		G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2856		    cp->provider->name, error);
2857		return (error);
2858	}
2859
2860	/* Decode metadata. */
2861	error = mirror_metadata_decode(buf, md);
2862	g_free(buf);
2863	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2864		return (EINVAL);
2865	if (md->md_version > G_MIRROR_VERSION) {
2866		G_MIRROR_DEBUG(0,
2867		    "Kernel module is too old to handle metadata from %s.",
2868		    cp->provider->name);
2869		return (EINVAL);
2870	}
2871	if (error != 0) {
2872		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2873		    cp->provider->name);
2874		return (error);
2875	}
2876
2877	return (0);
2878}
2879
2880static int
2881g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2882    struct g_mirror_metadata *md)
2883{
2884
2885	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2886		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2887		    pp->name, md->md_did);
2888		return (EEXIST);
2889	}
2890	if (md->md_all != sc->sc_ndisks) {
2891		G_MIRROR_DEBUG(1,
2892		    "Invalid '%s' field on disk %s (device %s), skipping.",
2893		    "md_all", pp->name, sc->sc_name);
2894		return (EINVAL);
2895	}
2896	if (md->md_slice != sc->sc_slice) {
2897		G_MIRROR_DEBUG(1,
2898		    "Invalid '%s' field on disk %s (device %s), skipping.",
2899		    "md_slice", pp->name, sc->sc_name);
2900		return (EINVAL);
2901	}
2902	if (md->md_balance != sc->sc_balance) {
2903		G_MIRROR_DEBUG(1,
2904		    "Invalid '%s' field on disk %s (device %s), skipping.",
2905		    "md_balance", pp->name, sc->sc_name);
2906		return (EINVAL);
2907	}
2908#if 0
2909	if (md->md_mediasize != sc->sc_mediasize) {
2910		G_MIRROR_DEBUG(1,
2911		    "Invalid '%s' field on disk %s (device %s), skipping.",
2912		    "md_mediasize", pp->name, sc->sc_name);
2913		return (EINVAL);
2914	}
2915#endif
2916	if (sc->sc_mediasize > pp->mediasize) {
2917		G_MIRROR_DEBUG(1,
2918		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2919		    sc->sc_name);
2920		return (EINVAL);
2921	}
2922	if (md->md_sectorsize != sc->sc_sectorsize) {
2923		G_MIRROR_DEBUG(1,
2924		    "Invalid '%s' field on disk %s (device %s), skipping.",
2925		    "md_sectorsize", pp->name, sc->sc_name);
2926		return (EINVAL);
2927	}
2928	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2929		G_MIRROR_DEBUG(1,
2930		    "Invalid sector size of disk %s (device %s), skipping.",
2931		    pp->name, sc->sc_name);
2932		return (EINVAL);
2933	}
2934	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2935		G_MIRROR_DEBUG(1,
2936		    "Invalid device flags on disk %s (device %s), skipping.",
2937		    pp->name, sc->sc_name);
2938		return (EINVAL);
2939	}
2940	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2941		G_MIRROR_DEBUG(1,
2942		    "Invalid disk flags on disk %s (device %s), skipping.",
2943		    pp->name, sc->sc_name);
2944		return (EINVAL);
2945	}
2946	return (0);
2947}
2948
2949int
2950g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2951    struct g_mirror_metadata *md)
2952{
2953	struct g_mirror_disk *disk;
2954	int error;
2955
2956	g_topology_assert_not();
2957	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2958
2959	error = g_mirror_check_metadata(sc, pp, md);
2960	if (error != 0)
2961		return (error);
2962	if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2963	    md->md_genid < sc->sc_genid) {
2964		G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2965		    pp->name, sc->sc_name);
2966		return (EINVAL);
2967	}
2968	disk = g_mirror_init_disk(sc, pp, md, &error);
2969	if (disk == NULL)
2970		return (error);
2971	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2972	    G_MIRROR_EVENT_WAIT);
2973	if (error != 0)
2974		return (error);
2975	if (md->md_version < G_MIRROR_VERSION) {
2976		G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2977		    pp->name, md->md_version, G_MIRROR_VERSION);
2978		g_mirror_update_metadata(disk);
2979	}
2980	return (0);
2981}
2982
2983static void
2984g_mirror_destroy_delayed(void *arg, int flag)
2985{
2986	struct g_mirror_softc *sc;
2987	int error;
2988
2989	if (flag == EV_CANCEL) {
2990		G_MIRROR_DEBUG(1, "Destroying canceled.");
2991		return;
2992	}
2993	sc = arg;
2994	g_topology_unlock();
2995	sx_xlock(&sc->sc_lock);
2996	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0,
2997	    ("DESTROY flag set on %s.", sc->sc_name));
2998	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0,
2999	    ("CLOSEWAIT flag not set on %s.", sc->sc_name));
3000	G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name);
3001	error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT);
3002	if (error != 0) {
3003		G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).",
3004		    sc->sc_name, error);
3005		sx_xunlock(&sc->sc_lock);
3006	}
3007	g_topology_lock();
3008}
3009
3010static int
3011g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
3012{
3013	struct g_mirror_softc *sc;
3014	int error = 0;
3015
3016	g_topology_assert();
3017	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3018	    acw, ace);
3019
3020	sc = pp->private;
3021	KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3022
3023	g_topology_unlock();
3024	sx_xlock(&sc->sc_lock);
3025	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 ||
3026	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 ||
3027	    LIST_EMPTY(&sc->sc_disks)) {
3028		if (acr > 0 || acw > 0 || ace > 0)
3029			error = ENXIO;
3030		goto end;
3031	}
3032	sc->sc_provider_open += acr + acw + ace;
3033	if (pp->acw + acw == 0)
3034		g_mirror_idle(sc, 0);
3035	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 &&
3036	    sc->sc_provider_open == 0)
3037		g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL);
3038end:
3039	sx_xunlock(&sc->sc_lock);
3040	g_topology_lock();
3041	return (error);
3042}
3043
3044struct g_geom *
3045g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md,
3046    u_int type)
3047{
3048	struct g_mirror_softc *sc;
3049	struct g_geom *gp;
3050	int error, timeout;
3051
3052	g_topology_assert();
3053	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
3054	    md->md_mid);
3055
3056	/* One disk is minimum. */
3057	if (md->md_all < 1)
3058		return (NULL);
3059	/*
3060	 * Action geom.
3061	 */
3062	gp = g_new_geomf(mp, "%s", md->md_name);
3063	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
3064	gp->start = g_mirror_start;
3065	gp->orphan = g_mirror_orphan;
3066	gp->access = g_mirror_access;
3067	gp->dumpconf = g_mirror_dumpconf;
3068
3069	sc->sc_type = type;
3070	sc->sc_id = md->md_mid;
3071	sc->sc_slice = md->md_slice;
3072	sc->sc_balance = md->md_balance;
3073	sc->sc_mediasize = md->md_mediasize;
3074	sc->sc_sectorsize = md->md_sectorsize;
3075	sc->sc_ndisks = md->md_all;
3076	sc->sc_flags = md->md_mflags;
3077	sc->sc_bump_id = 0;
3078	sc->sc_idle = 1;
3079	sc->sc_last_write = time_uptime;
3080	sc->sc_writes = 0;
3081	sc->sc_refcnt = 1;
3082	sx_init(&sc->sc_lock, "gmirror:lock");
3083	TAILQ_INIT(&sc->sc_queue);
3084	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
3085	TAILQ_INIT(&sc->sc_regular_delayed);
3086	TAILQ_INIT(&sc->sc_inflight);
3087	TAILQ_INIT(&sc->sc_sync_delayed);
3088	LIST_INIT(&sc->sc_disks);
3089	TAILQ_INIT(&sc->sc_events);
3090	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
3091	callout_init(&sc->sc_callout, 1);
3092	mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF);
3093	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
3094	gp->softc = sc;
3095	sc->sc_geom = gp;
3096	sc->sc_provider = NULL;
3097	sc->sc_provider_open = 0;
3098	/*
3099	 * Synchronization geom.
3100	 */
3101	gp = g_new_geomf(mp, "%s.sync", md->md_name);
3102	gp->softc = sc;
3103	gp->orphan = g_mirror_orphan;
3104	sc->sc_sync.ds_geom = gp;
3105	sc->sc_sync.ds_ndisks = 0;
3106	error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
3107	    "g_mirror %s", md->md_name);
3108	if (error != 0) {
3109		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
3110		    sc->sc_name);
3111		g_destroy_geom(sc->sc_sync.ds_geom);
3112		g_destroy_geom(sc->sc_geom);
3113		g_mirror_free_device(sc);
3114		return (NULL);
3115	}
3116
3117	G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).",
3118	    sc->sc_name, sc->sc_ndisks, sc->sc_id);
3119
3120	sc->sc_rootmount = root_mount_hold("GMIRROR");
3121	G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3122	/*
3123	 * Run timeout.
3124	 */
3125	timeout = g_mirror_timeout * hz;
3126	callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
3127	return (sc->sc_geom);
3128}
3129
3130int
3131g_mirror_destroy(struct g_mirror_softc *sc, int how)
3132{
3133	struct g_mirror_disk *disk;
3134
3135	g_topology_assert_not();
3136	sx_assert(&sc->sc_lock, SX_XLOCKED);
3137
3138	if (sc->sc_provider_open != 0) {
3139		switch (how) {
3140		case G_MIRROR_DESTROY_SOFT:
3141			G_MIRROR_DEBUG(1,
3142			    "Device %s is still open (%d).", sc->sc_name,
3143			    sc->sc_provider_open);
3144			return (EBUSY);
3145		case G_MIRROR_DESTROY_DELAYED:
3146			G_MIRROR_DEBUG(1,
3147			    "Device %s will be destroyed on last close.",
3148			    sc->sc_name);
3149			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
3150				if (disk->d_state ==
3151				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3152					g_mirror_sync_stop(disk, 1);
3153				}
3154			}
3155			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_CLOSEWAIT;
3156			return (EBUSY);
3157		case G_MIRROR_DESTROY_HARD:
3158			G_MIRROR_DEBUG(1, "Device %s is still open, so it "
3159			    "can't be definitely removed.", sc->sc_name);
3160		}
3161	}
3162
3163	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
3164		sx_xunlock(&sc->sc_lock);
3165		return (0);
3166	}
3167	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
3168	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DRAIN;
3169	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3170	sx_xunlock(&sc->sc_lock);
3171	mtx_lock(&sc->sc_queue_mtx);
3172	wakeup(sc);
3173	mtx_unlock(&sc->sc_queue_mtx);
3174	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3175	while (sc->sc_worker != NULL)
3176		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
3177	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3178	sx_xlock(&sc->sc_lock);
3179	g_mirror_destroy_device(sc);
3180	return (0);
3181}
3182
3183static void
3184g_mirror_taste_orphan(struct g_consumer *cp)
3185{
3186
3187	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3188	    cp->provider->name));
3189}
3190
3191static struct g_geom *
3192g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3193{
3194	struct g_mirror_metadata md;
3195	struct g_mirror_softc *sc;
3196	struct g_consumer *cp;
3197	struct g_geom *gp;
3198	int error;
3199
3200	g_topology_assert();
3201	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3202	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
3203
3204	gp = g_new_geomf(mp, "mirror:taste");
3205	/*
3206	 * This orphan function should be never called.
3207	 */
3208	gp->orphan = g_mirror_taste_orphan;
3209	cp = g_new_consumer(gp);
3210	g_attach(cp, pp);
3211	error = g_mirror_read_metadata(cp, &md);
3212	g_detach(cp);
3213	g_destroy_consumer(cp);
3214	g_destroy_geom(gp);
3215	if (error != 0)
3216		return (NULL);
3217	gp = NULL;
3218
3219	if (md.md_provider[0] != '\0' &&
3220	    !g_compare_names(md.md_provider, pp->name))
3221		return (NULL);
3222	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3223		return (NULL);
3224	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
3225		G_MIRROR_DEBUG(0,
3226		    "Device %s: provider %s marked as inactive, skipping.",
3227		    md.md_name, pp->name);
3228		return (NULL);
3229	}
3230	if (g_mirror_debug >= 2)
3231		mirror_metadata_dump(&md);
3232
3233	/*
3234	 * Let's check if device already exists.
3235	 */
3236	sc = NULL;
3237	LIST_FOREACH(gp, &mp->geom, geom) {
3238		sc = gp->softc;
3239		if (sc == NULL)
3240			continue;
3241		if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
3242			continue;
3243		if (sc->sc_sync.ds_geom == gp)
3244			continue;
3245		if (strcmp(md.md_name, sc->sc_name) != 0)
3246			continue;
3247		if (md.md_mid != sc->sc_id) {
3248			G_MIRROR_DEBUG(0, "Device %s already configured.",
3249			    sc->sc_name);
3250			return (NULL);
3251		}
3252		break;
3253	}
3254	if (gp == NULL) {
3255		gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC);
3256		if (gp == NULL) {
3257			G_MIRROR_DEBUG(0, "Cannot create device %s.",
3258			    md.md_name);
3259			return (NULL);
3260		}
3261		sc = gp->softc;
3262	}
3263	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3264	g_topology_unlock();
3265	sx_xlock(&sc->sc_lock);
3266	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING;
3267	error = g_mirror_add_disk(sc, pp, &md);
3268	if (error != 0) {
3269		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3270		    pp->name, gp->name, error);
3271		if (LIST_EMPTY(&sc->sc_disks)) {
3272			g_cancel_event(sc);
3273			g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3274			g_topology_lock();
3275			return (NULL);
3276		}
3277		gp = NULL;
3278	}
3279	sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING;
3280	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
3281		g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3282		g_topology_lock();
3283		return (NULL);
3284	}
3285	sx_xunlock(&sc->sc_lock);
3286	g_topology_lock();
3287	return (gp);
3288}
3289
3290static void
3291g_mirror_resize(struct g_consumer *cp)
3292{
3293	struct g_mirror_disk *disk;
3294
3295	g_topology_assert();
3296	g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name);
3297
3298	disk = cp->private;
3299	if (disk == NULL)
3300		return;
3301	g_topology_unlock();
3302	g_mirror_update_metadata(disk);
3303	g_topology_lock();
3304}
3305
3306static int
3307g_mirror_destroy_geom(struct gctl_req *req __unused,
3308    struct g_class *mp __unused, struct g_geom *gp)
3309{
3310	struct g_mirror_softc *sc;
3311	int error;
3312
3313	g_topology_unlock();
3314	sc = gp->softc;
3315	sx_xlock(&sc->sc_lock);
3316	g_cancel_event(sc);
3317	error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT);
3318	if (error != 0)
3319		sx_xunlock(&sc->sc_lock);
3320	g_topology_lock();
3321	return (error);
3322}
3323
3324static void
3325g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3326    struct g_consumer *cp, struct g_provider *pp)
3327{
3328	struct g_mirror_softc *sc;
3329
3330	g_topology_assert();
3331
3332	sc = gp->softc;
3333	if (sc == NULL)
3334		return;
3335	/* Skip synchronization geom. */
3336	if (gp == sc->sc_sync.ds_geom)
3337		return;
3338	if (pp != NULL) {
3339		/* Nothing here. */
3340	} else if (cp != NULL) {
3341		struct g_mirror_disk *disk;
3342
3343		disk = cp->private;
3344		if (disk == NULL)
3345			return;
3346		g_topology_unlock();
3347		sx_xlock(&sc->sc_lock);
3348		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
3349		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3350			sbuf_printf(sb, "%s<Synchronized>", indent);
3351			if (disk->d_sync.ds_offset == 0)
3352				sbuf_printf(sb, "0%%");
3353			else {
3354				sbuf_printf(sb, "%u%%",
3355				    (u_int)((disk->d_sync.ds_offset * 100) /
3356				    sc->sc_provider->mediasize));
3357			}
3358			sbuf_printf(sb, "</Synchronized>\n");
3359			if (disk->d_sync.ds_offset > 0) {
3360				sbuf_printf(sb, "%s<BytesSynced>%jd"
3361				    "</BytesSynced>\n", indent,
3362				    (intmax_t)disk->d_sync.ds_offset);
3363			}
3364		}
3365		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3366		    disk->d_sync.ds_syncid);
3367		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
3368		    disk->d_genid);
3369		sbuf_printf(sb, "%s<Flags>", indent);
3370		if (disk->d_flags == 0)
3371			sbuf_printf(sb, "NONE");
3372		else {
3373			int first = 1;
3374
3375#define	ADD_FLAG(flag, name)	do {					\
3376	if ((disk->d_flags & (flag)) != 0) {				\
3377		if (!first)						\
3378			sbuf_printf(sb, ", ");				\
3379		else							\
3380			first = 0;					\
3381		sbuf_printf(sb, name);					\
3382	}								\
3383} while (0)
3384			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
3385			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
3386			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
3387			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
3388			    "SYNCHRONIZING");
3389			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3390			ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN");
3391#undef	ADD_FLAG
3392		}
3393		sbuf_printf(sb, "</Flags>\n");
3394		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
3395		    disk->d_priority);
3396		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3397		    g_mirror_disk_state2str(disk->d_state));
3398		sx_xunlock(&sc->sc_lock);
3399		g_topology_lock();
3400	} else {
3401		g_topology_unlock();
3402		sx_xlock(&sc->sc_lock);
3403		sbuf_printf(sb, "%s<Type>", indent);
3404		switch (sc->sc_type) {
3405		case G_MIRROR_TYPE_AUTOMATIC:
3406			sbuf_printf(sb, "AUTOMATIC");
3407			break;
3408		case G_MIRROR_TYPE_MANUAL:
3409			sbuf_printf(sb, "MANUAL");
3410			break;
3411		default:
3412			sbuf_printf(sb, "UNKNOWN");
3413			break;
3414		}
3415		sbuf_printf(sb, "</Type>\n");
3416		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3417		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3418		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3419		sbuf_printf(sb, "%s<Flags>", indent);
3420		if (sc->sc_flags == 0)
3421			sbuf_printf(sb, "NONE");
3422		else {
3423			int first = 1;
3424
3425#define	ADD_FLAG(flag, name)	do {					\
3426	if ((sc->sc_flags & (flag)) != 0) {				\
3427		if (!first)						\
3428			sbuf_printf(sb, ", ");				\
3429		else							\
3430			first = 0;					\
3431		sbuf_printf(sb, name);					\
3432	}								\
3433} while (0)
3434			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3435			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3436#undef	ADD_FLAG
3437		}
3438		sbuf_printf(sb, "</Flags>\n");
3439		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
3440		    (u_int)sc->sc_slice);
3441		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
3442		    balance_name(sc->sc_balance));
3443		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3444		    sc->sc_ndisks);
3445		sbuf_printf(sb, "%s<State>", indent);
3446		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
3447			sbuf_printf(sb, "%s", "STARTING");
3448		else if (sc->sc_ndisks ==
3449		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
3450			sbuf_printf(sb, "%s", "COMPLETE");
3451		else
3452			sbuf_printf(sb, "%s", "DEGRADED");
3453		sbuf_printf(sb, "</State>\n");
3454		sx_xunlock(&sc->sc_lock);
3455		g_topology_lock();
3456	}
3457}
3458
3459static void
3460g_mirror_shutdown_post_sync(void *arg, int howto)
3461{
3462	struct g_class *mp;
3463	struct g_geom *gp, *gp2;
3464	struct g_mirror_softc *sc;
3465	int error;
3466
3467	if (panicstr != NULL)
3468		return;
3469
3470	mp = arg;
3471	g_topology_lock();
3472	g_mirror_shutdown = 1;
3473	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3474		if ((sc = gp->softc) == NULL)
3475			continue;
3476		/* Skip synchronization geom. */
3477		if (gp == sc->sc_sync.ds_geom)
3478			continue;
3479		g_topology_unlock();
3480		sx_xlock(&sc->sc_lock);
3481		g_mirror_idle(sc, -1);
3482		g_cancel_event(sc);
3483		error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED);
3484		if (error != 0)
3485			sx_xunlock(&sc->sc_lock);
3486		g_topology_lock();
3487	}
3488	g_topology_unlock();
3489}
3490
3491static void
3492g_mirror_init(struct g_class *mp)
3493{
3494
3495	g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3496	    g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3497	if (g_mirror_post_sync == NULL)
3498		G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
3499}
3500
3501static void
3502g_mirror_fini(struct g_class *mp)
3503{
3504
3505	if (g_mirror_post_sync != NULL)
3506		EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync);
3507}
3508
3509DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
3510