g_mirror.c revision 324588
1/*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/geom/mirror/g_mirror.c 324588 2017-10-13 09:13:08Z avg $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/fail.h>
33#include <sys/kernel.h>
34#include <sys/module.h>
35#include <sys/limits.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/bio.h>
39#include <sys/sbuf.h>
40#include <sys/sysctl.h>
41#include <sys/malloc.h>
42#include <sys/eventhandler.h>
43#include <vm/uma.h>
44#include <geom/geom.h>
45#include <sys/proc.h>
46#include <sys/kthread.h>
47#include <sys/sched.h>
48#include <geom/mirror/g_mirror.h>
49
50FEATURE(geom_mirror, "GEOM mirroring support");
51
52static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data");
53
54SYSCTL_DECL(_kern_geom);
55static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0,
56    "GEOM_MIRROR stuff");
57u_int g_mirror_debug = 0;
58SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0,
59    "Debug level");
60static u_int g_mirror_timeout = 4;
61SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout,
62    0, "Time to wait on all mirror components");
63static u_int g_mirror_idletime = 5;
64SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN,
65    &g_mirror_idletime, 0, "Mark components as clean when idling");
66static u_int g_mirror_disconnect_on_failure = 1;
67SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
68    &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
69static u_int g_mirror_syncreqs = 2;
70SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
71    &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests.");
72
73#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
74	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
75	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
76	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
77} while (0)
78
79static eventhandler_tag g_mirror_post_sync = NULL;
80static int g_mirror_shutdown = 0;
81
82static g_ctl_destroy_geom_t g_mirror_destroy_geom;
83static g_taste_t g_mirror_taste;
84static g_init_t g_mirror_init;
85static g_fini_t g_mirror_fini;
86static g_provgone_t g_mirror_providergone;
87static g_resize_t g_mirror_resize;
88
89struct g_class g_mirror_class = {
90	.name = G_MIRROR_CLASS_NAME,
91	.version = G_VERSION,
92	.ctlreq = g_mirror_config,
93	.taste = g_mirror_taste,
94	.destroy_geom = g_mirror_destroy_geom,
95	.init = g_mirror_init,
96	.fini = g_mirror_fini,
97	.providergone = g_mirror_providergone,
98	.resize = g_mirror_resize
99};
100
101
102static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
103static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
104static void g_mirror_update_device(struct g_mirror_softc *sc, bool force);
105static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
106    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
107static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
108static void g_mirror_register_request(struct bio *bp);
109static void g_mirror_sync_release(struct g_mirror_softc *sc);
110
111
112static const char *
113g_mirror_disk_state2str(int state)
114{
115
116	switch (state) {
117	case G_MIRROR_DISK_STATE_NONE:
118		return ("NONE");
119	case G_MIRROR_DISK_STATE_NEW:
120		return ("NEW");
121	case G_MIRROR_DISK_STATE_ACTIVE:
122		return ("ACTIVE");
123	case G_MIRROR_DISK_STATE_STALE:
124		return ("STALE");
125	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
126		return ("SYNCHRONIZING");
127	case G_MIRROR_DISK_STATE_DISCONNECTED:
128		return ("DISCONNECTED");
129	case G_MIRROR_DISK_STATE_DESTROY:
130		return ("DESTROY");
131	default:
132		return ("INVALID");
133	}
134}
135
136static const char *
137g_mirror_device_state2str(int state)
138{
139
140	switch (state) {
141	case G_MIRROR_DEVICE_STATE_STARTING:
142		return ("STARTING");
143	case G_MIRROR_DEVICE_STATE_RUNNING:
144		return ("RUNNING");
145	default:
146		return ("INVALID");
147	}
148}
149
150static const char *
151g_mirror_get_diskname(struct g_mirror_disk *disk)
152{
153
154	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
155		return ("[unknown]");
156	return (disk->d_name);
157}
158
159/*
160 * --- Events handling functions ---
161 * Events in geom_mirror are used to maintain disks and device status
162 * from one thread to simplify locking.
163 */
164static void
165g_mirror_event_free(struct g_mirror_event *ep)
166{
167
168	free(ep, M_MIRROR);
169}
170
171int
172g_mirror_event_send(void *arg, int state, int flags)
173{
174	struct g_mirror_softc *sc;
175	struct g_mirror_disk *disk;
176	struct g_mirror_event *ep;
177	int error;
178
179	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
180	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
181	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
182		disk = NULL;
183		sc = arg;
184	} else {
185		disk = arg;
186		sc = disk->d_softc;
187	}
188	ep->e_disk = disk;
189	ep->e_state = state;
190	ep->e_flags = flags;
191	ep->e_error = 0;
192	mtx_lock(&sc->sc_events_mtx);
193	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
194	mtx_unlock(&sc->sc_events_mtx);
195	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
196	mtx_lock(&sc->sc_queue_mtx);
197	wakeup(sc);
198	mtx_unlock(&sc->sc_queue_mtx);
199	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
200		return (0);
201	sx_assert(&sc->sc_lock, SX_XLOCKED);
202	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
203	sx_xunlock(&sc->sc_lock);
204	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
205		mtx_lock(&sc->sc_events_mtx);
206		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
207		    hz * 5);
208	}
209	error = ep->e_error;
210	g_mirror_event_free(ep);
211	sx_xlock(&sc->sc_lock);
212	return (error);
213}
214
215static struct g_mirror_event *
216g_mirror_event_get(struct g_mirror_softc *sc)
217{
218	struct g_mirror_event *ep;
219
220	mtx_lock(&sc->sc_events_mtx);
221	ep = TAILQ_FIRST(&sc->sc_events);
222	mtx_unlock(&sc->sc_events_mtx);
223	return (ep);
224}
225
226static void
227g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
228{
229
230	mtx_lock(&sc->sc_events_mtx);
231	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
232	mtx_unlock(&sc->sc_events_mtx);
233}
234
235static void
236g_mirror_event_cancel(struct g_mirror_disk *disk)
237{
238	struct g_mirror_softc *sc;
239	struct g_mirror_event *ep, *tmpep;
240
241	sc = disk->d_softc;
242	sx_assert(&sc->sc_lock, SX_XLOCKED);
243
244	mtx_lock(&sc->sc_events_mtx);
245	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
246		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
247			continue;
248		if (ep->e_disk != disk)
249			continue;
250		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
251		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
252			g_mirror_event_free(ep);
253		else {
254			ep->e_error = ECANCELED;
255			wakeup(ep);
256		}
257	}
258	mtx_unlock(&sc->sc_events_mtx);
259}
260
261/*
262 * Return the number of disks in given state.
263 * If state is equal to -1, count all connected disks.
264 */
265u_int
266g_mirror_ndisks(struct g_mirror_softc *sc, int state)
267{
268	struct g_mirror_disk *disk;
269	u_int n = 0;
270
271	sx_assert(&sc->sc_lock, SX_LOCKED);
272
273	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
274		if (state == -1 || disk->d_state == state)
275			n++;
276	}
277	return (n);
278}
279
280/*
281 * Find a disk in mirror by its disk ID.
282 */
283static struct g_mirror_disk *
284g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
285{
286	struct g_mirror_disk *disk;
287
288	sx_assert(&sc->sc_lock, SX_XLOCKED);
289
290	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
291		if (disk->d_id == id)
292			return (disk);
293	}
294	return (NULL);
295}
296
297static u_int
298g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
299{
300	struct bio *bp;
301	u_int nreqs = 0;
302
303	mtx_lock(&sc->sc_queue_mtx);
304	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
305		if (bp->bio_from == cp)
306			nreqs++;
307	}
308	mtx_unlock(&sc->sc_queue_mtx);
309	return (nreqs);
310}
311
312static int
313g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
314{
315
316	if (cp->index > 0) {
317		G_MIRROR_DEBUG(2,
318		    "I/O requests for %s exist, can't destroy it now.",
319		    cp->provider->name);
320		return (1);
321	}
322	if (g_mirror_nrequests(sc, cp) > 0) {
323		G_MIRROR_DEBUG(2,
324		    "I/O requests for %s in queue, can't destroy it now.",
325		    cp->provider->name);
326		return (1);
327	}
328	return (0);
329}
330
331static void
332g_mirror_destroy_consumer(void *arg, int flags __unused)
333{
334	struct g_consumer *cp;
335
336	g_topology_assert();
337
338	cp = arg;
339	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
340	g_detach(cp);
341	g_destroy_consumer(cp);
342}
343
344static void
345g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
346{
347	struct g_provider *pp;
348	int retaste_wait;
349
350	g_topology_assert();
351
352	cp->private = NULL;
353	if (g_mirror_is_busy(sc, cp))
354		return;
355	pp = cp->provider;
356	retaste_wait = 0;
357	if (cp->acw == 1) {
358		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
359			retaste_wait = 1;
360	}
361	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
362	    -cp->acw, -cp->ace, 0);
363	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
364		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
365	if (retaste_wait) {
366		/*
367		 * After retaste event was send (inside g_access()), we can send
368		 * event to detach and destroy consumer.
369		 * A class, which has consumer to the given provider connected
370		 * will not receive retaste event for the provider.
371		 * This is the way how I ignore retaste events when I close
372		 * consumers opened for write: I detach and destroy consumer
373		 * after retaste event is sent.
374		 */
375		g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
376		return;
377	}
378	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
379	g_detach(cp);
380	g_destroy_consumer(cp);
381}
382
383static int
384g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
385{
386	struct g_consumer *cp;
387	int error;
388
389	g_topology_assert_not();
390	KASSERT(disk->d_consumer == NULL,
391	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
392
393	g_topology_lock();
394	cp = g_new_consumer(disk->d_softc->sc_geom);
395	cp->flags |= G_CF_DIRECT_RECEIVE;
396	error = g_attach(cp, pp);
397	if (error != 0) {
398		g_destroy_consumer(cp);
399		g_topology_unlock();
400		return (error);
401	}
402	error = g_access(cp, 1, 1, 1);
403	if (error != 0) {
404		g_detach(cp);
405		g_destroy_consumer(cp);
406		g_topology_unlock();
407		G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
408		    pp->name, error);
409		return (error);
410	}
411	g_topology_unlock();
412	disk->d_consumer = cp;
413	disk->d_consumer->private = disk;
414	disk->d_consumer->index = 0;
415
416	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
417	return (0);
418}
419
420static void
421g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
422{
423
424	g_topology_assert();
425
426	if (cp == NULL)
427		return;
428	if (cp->provider != NULL)
429		g_mirror_kill_consumer(sc, cp);
430	else
431		g_destroy_consumer(cp);
432}
433
434/*
435 * Initialize disk. This means allocate memory, create consumer, attach it
436 * to the provider and open access (r1w1e1) to it.
437 */
438static struct g_mirror_disk *
439g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
440    struct g_mirror_metadata *md, int *errorp)
441{
442	struct g_mirror_disk *disk;
443	int i, error;
444
445	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
446	if (disk == NULL) {
447		error = ENOMEM;
448		goto fail;
449	}
450	disk->d_softc = sc;
451	error = g_mirror_connect_disk(disk, pp);
452	if (error != 0)
453		goto fail;
454	disk->d_id = md->md_did;
455	disk->d_state = G_MIRROR_DISK_STATE_NONE;
456	disk->d_priority = md->md_priority;
457	disk->d_flags = md->md_dflags;
458	error = g_getattr("GEOM::candelete", disk->d_consumer, &i);
459	if (error == 0 && i != 0)
460		disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE;
461	if (md->md_provider[0] != '\0')
462		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
463	disk->d_sync.ds_consumer = NULL;
464	disk->d_sync.ds_offset = md->md_sync_offset;
465	disk->d_sync.ds_offset_done = md->md_sync_offset;
466	disk->d_genid = md->md_genid;
467	disk->d_sync.ds_syncid = md->md_syncid;
468	if (errorp != NULL)
469		*errorp = 0;
470	return (disk);
471fail:
472	if (errorp != NULL)
473		*errorp = error;
474	if (disk != NULL)
475		free(disk, M_MIRROR);
476	return (NULL);
477}
478
479static void
480g_mirror_destroy_disk(struct g_mirror_disk *disk)
481{
482	struct g_mirror_softc *sc;
483
484	g_topology_assert_not();
485	sc = disk->d_softc;
486	sx_assert(&sc->sc_lock, SX_XLOCKED);
487
488	LIST_REMOVE(disk, d_next);
489	g_mirror_event_cancel(disk);
490	if (sc->sc_hint == disk)
491		sc->sc_hint = NULL;
492	switch (disk->d_state) {
493	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
494		g_mirror_sync_stop(disk, 1);
495		/* FALLTHROUGH */
496	case G_MIRROR_DISK_STATE_NEW:
497	case G_MIRROR_DISK_STATE_STALE:
498	case G_MIRROR_DISK_STATE_ACTIVE:
499		g_topology_lock();
500		g_mirror_disconnect_consumer(sc, disk->d_consumer);
501		g_topology_unlock();
502		free(disk, M_MIRROR);
503		break;
504	default:
505		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
506		    g_mirror_get_diskname(disk),
507		    g_mirror_disk_state2str(disk->d_state)));
508	}
509}
510
511static void
512g_mirror_free_device(struct g_mirror_softc *sc)
513{
514
515	mtx_destroy(&sc->sc_queue_mtx);
516	mtx_destroy(&sc->sc_events_mtx);
517	mtx_destroy(&sc->sc_done_mtx);
518	sx_destroy(&sc->sc_lock);
519	free(sc, M_MIRROR);
520}
521
522static void
523g_mirror_providergone(struct g_provider *pp)
524{
525	struct g_mirror_softc *sc = pp->private;
526
527	if ((--sc->sc_refcnt) == 0)
528		g_mirror_free_device(sc);
529}
530
531static void
532g_mirror_destroy_device(struct g_mirror_softc *sc)
533{
534	struct g_mirror_disk *disk;
535	struct g_mirror_event *ep;
536	struct g_geom *gp;
537	struct g_consumer *cp, *tmpcp;
538
539	g_topology_assert_not();
540	sx_assert(&sc->sc_lock, SX_XLOCKED);
541
542	gp = sc->sc_geom;
543	if (sc->sc_provider != NULL)
544		g_mirror_destroy_provider(sc);
545	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
546	    disk = LIST_FIRST(&sc->sc_disks)) {
547		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
548		g_mirror_update_metadata(disk);
549		g_mirror_destroy_disk(disk);
550	}
551	while ((ep = g_mirror_event_get(sc)) != NULL) {
552		g_mirror_event_remove(sc, ep);
553		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
554			g_mirror_event_free(ep);
555		else {
556			ep->e_error = ECANCELED;
557			ep->e_flags |= G_MIRROR_EVENT_DONE;
558			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
559			mtx_lock(&sc->sc_events_mtx);
560			wakeup(ep);
561			mtx_unlock(&sc->sc_events_mtx);
562		}
563	}
564	callout_drain(&sc->sc_callout);
565
566	g_topology_lock();
567	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
568		g_mirror_disconnect_consumer(sc, cp);
569	}
570	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
571	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
572	g_wither_geom(gp, ENXIO);
573	sx_xunlock(&sc->sc_lock);
574	if ((--sc->sc_refcnt) == 0)
575		g_mirror_free_device(sc);
576	g_topology_unlock();
577}
578
579static void
580g_mirror_orphan(struct g_consumer *cp)
581{
582	struct g_mirror_disk *disk;
583
584	g_topology_assert();
585
586	disk = cp->private;
587	if (disk == NULL)
588		return;
589	disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
590	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
591	    G_MIRROR_EVENT_DONTWAIT);
592}
593
594/*
595 * Function should return the next active disk on the list.
596 * It is possible that it will be the same disk as given.
597 * If there are no active disks on list, NULL is returned.
598 */
599static __inline struct g_mirror_disk *
600g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
601{
602	struct g_mirror_disk *dp;
603
604	for (dp = LIST_NEXT(disk, d_next); dp != disk;
605	    dp = LIST_NEXT(dp, d_next)) {
606		if (dp == NULL)
607			dp = LIST_FIRST(&sc->sc_disks);
608		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
609			break;
610	}
611	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
612		return (NULL);
613	return (dp);
614}
615
616static struct g_mirror_disk *
617g_mirror_get_disk(struct g_mirror_softc *sc)
618{
619	struct g_mirror_disk *disk;
620
621	if (sc->sc_hint == NULL) {
622		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
623		if (sc->sc_hint == NULL)
624			return (NULL);
625	}
626	disk = sc->sc_hint;
627	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
628		disk = g_mirror_find_next(sc, disk);
629		if (disk == NULL)
630			return (NULL);
631	}
632	sc->sc_hint = g_mirror_find_next(sc, disk);
633	return (disk);
634}
635
636static int
637g_mirror_write_metadata(struct g_mirror_disk *disk,
638    struct g_mirror_metadata *md)
639{
640	struct g_mirror_softc *sc;
641	struct g_consumer *cp;
642	off_t offset, length;
643	u_char *sector;
644	int error = 0;
645
646	g_topology_assert_not();
647	sc = disk->d_softc;
648	sx_assert(&sc->sc_lock, SX_LOCKED);
649
650	cp = disk->d_consumer;
651	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
652	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
653	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
654	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
655	    cp->acw, cp->ace));
656	length = cp->provider->sectorsize;
657	offset = cp->provider->mediasize - length;
658	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
659	if (md != NULL &&
660	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) {
661		/*
662		 * Handle the case, when the size of parent provider reduced.
663		 */
664		if (offset < md->md_mediasize)
665			error = ENOSPC;
666		else
667			mirror_metadata_encode(md, sector);
668	}
669	KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error);
670	if (error == 0)
671		error = g_write_data(cp, offset, sector, length);
672	free(sector, M_MIRROR);
673	if (error != 0) {
674		if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
675			disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
676			G_MIRROR_DEBUG(0, "Cannot write metadata on %s "
677			    "(device=%s, error=%d).",
678			    g_mirror_get_diskname(disk), sc->sc_name, error);
679		} else {
680			G_MIRROR_DEBUG(1, "Cannot write metadata on %s "
681			    "(device=%s, error=%d).",
682			    g_mirror_get_diskname(disk), sc->sc_name, error);
683		}
684		if (g_mirror_disconnect_on_failure &&
685		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
686			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
687			g_mirror_event_send(disk,
688			    G_MIRROR_DISK_STATE_DISCONNECTED,
689			    G_MIRROR_EVENT_DONTWAIT);
690		}
691	}
692	return (error);
693}
694
695static int
696g_mirror_clear_metadata(struct g_mirror_disk *disk)
697{
698	int error;
699
700	g_topology_assert_not();
701	sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
702
703	if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
704		return (0);
705	error = g_mirror_write_metadata(disk, NULL);
706	if (error == 0) {
707		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
708		    g_mirror_get_diskname(disk));
709	} else {
710		G_MIRROR_DEBUG(0,
711		    "Cannot clear metadata on disk %s (error=%d).",
712		    g_mirror_get_diskname(disk), error);
713	}
714	return (error);
715}
716
717void
718g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
719    struct g_mirror_metadata *md)
720{
721
722	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
723	md->md_version = G_MIRROR_VERSION;
724	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
725	md->md_mid = sc->sc_id;
726	md->md_all = sc->sc_ndisks;
727	md->md_slice = sc->sc_slice;
728	md->md_balance = sc->sc_balance;
729	md->md_genid = sc->sc_genid;
730	md->md_mediasize = sc->sc_mediasize;
731	md->md_sectorsize = sc->sc_sectorsize;
732	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
733	bzero(md->md_provider, sizeof(md->md_provider));
734	if (disk == NULL) {
735		md->md_did = arc4random();
736		md->md_priority = 0;
737		md->md_syncid = 0;
738		md->md_dflags = 0;
739		md->md_sync_offset = 0;
740		md->md_provsize = 0;
741	} else {
742		md->md_did = disk->d_id;
743		md->md_priority = disk->d_priority;
744		md->md_syncid = disk->d_sync.ds_syncid;
745		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
746		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
747			md->md_sync_offset = disk->d_sync.ds_offset_done;
748		else
749			md->md_sync_offset = 0;
750		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
751			strlcpy(md->md_provider,
752			    disk->d_consumer->provider->name,
753			    sizeof(md->md_provider));
754		}
755		md->md_provsize = disk->d_consumer->provider->mediasize;
756	}
757}
758
759void
760g_mirror_update_metadata(struct g_mirror_disk *disk)
761{
762	struct g_mirror_softc *sc;
763	struct g_mirror_metadata md;
764	int error;
765
766	g_topology_assert_not();
767	sc = disk->d_softc;
768	sx_assert(&sc->sc_lock, SX_LOCKED);
769
770	if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
771		return;
772	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0)
773		g_mirror_fill_metadata(sc, disk, &md);
774	error = g_mirror_write_metadata(disk, &md);
775	if (error == 0) {
776		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
777		    g_mirror_get_diskname(disk));
778	} else {
779		G_MIRROR_DEBUG(0,
780		    "Cannot update metadata on disk %s (error=%d).",
781		    g_mirror_get_diskname(disk), error);
782	}
783}
784
785static void
786g_mirror_bump_syncid(struct g_mirror_softc *sc)
787{
788	struct g_mirror_disk *disk;
789
790	g_topology_assert_not();
791	sx_assert(&sc->sc_lock, SX_XLOCKED);
792	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
793	    ("%s called with no active disks (device=%s).", __func__,
794	    sc->sc_name));
795
796	sc->sc_syncid++;
797	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
798	    sc->sc_syncid);
799	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
800		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
801		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
802			disk->d_sync.ds_syncid = sc->sc_syncid;
803			g_mirror_update_metadata(disk);
804		}
805	}
806}
807
808static void
809g_mirror_bump_genid(struct g_mirror_softc *sc)
810{
811	struct g_mirror_disk *disk;
812
813	g_topology_assert_not();
814	sx_assert(&sc->sc_lock, SX_XLOCKED);
815	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
816	    ("%s called with no active disks (device=%s).", __func__,
817	    sc->sc_name));
818
819	sc->sc_genid++;
820	G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
821	    sc->sc_genid);
822	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
823		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
824		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
825			disk->d_genid = sc->sc_genid;
826			g_mirror_update_metadata(disk);
827		}
828	}
829}
830
831static int
832g_mirror_idle(struct g_mirror_softc *sc, int acw)
833{
834	struct g_mirror_disk *disk;
835	int timeout;
836
837	g_topology_assert_not();
838	sx_assert(&sc->sc_lock, SX_XLOCKED);
839
840	if (sc->sc_provider == NULL)
841		return (0);
842	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
843		return (0);
844	if (sc->sc_idle)
845		return (0);
846	if (sc->sc_writes > 0)
847		return (0);
848	if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
849		timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write);
850		if (!g_mirror_shutdown && timeout > 0)
851			return (timeout);
852	}
853	sc->sc_idle = 1;
854	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
855		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
856			continue;
857		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
858		    g_mirror_get_diskname(disk), sc->sc_name);
859		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
860		g_mirror_update_metadata(disk);
861	}
862	return (0);
863}
864
865static void
866g_mirror_unidle(struct g_mirror_softc *sc)
867{
868	struct g_mirror_disk *disk;
869
870	g_topology_assert_not();
871	sx_assert(&sc->sc_lock, SX_XLOCKED);
872
873	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
874		return;
875	sc->sc_idle = 0;
876	sc->sc_last_write = time_uptime;
877	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
878		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
879			continue;
880		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
881		    g_mirror_get_diskname(disk), sc->sc_name);
882		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
883		g_mirror_update_metadata(disk);
884	}
885}
886
887static void
888g_mirror_flush_done(struct bio *bp)
889{
890	struct g_mirror_softc *sc;
891	struct bio *pbp;
892
893	pbp = bp->bio_parent;
894	sc = pbp->bio_to->private;
895	mtx_lock(&sc->sc_done_mtx);
896	if (pbp->bio_error == 0)
897		pbp->bio_error = bp->bio_error;
898	pbp->bio_completed += bp->bio_completed;
899	pbp->bio_inbed++;
900	if (pbp->bio_children == pbp->bio_inbed) {
901		mtx_unlock(&sc->sc_done_mtx);
902		g_io_deliver(pbp, pbp->bio_error);
903	} else
904		mtx_unlock(&sc->sc_done_mtx);
905	g_destroy_bio(bp);
906}
907
908static void
909g_mirror_done(struct bio *bp)
910{
911	struct g_mirror_softc *sc;
912
913	sc = bp->bio_from->geom->softc;
914	bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR;
915	mtx_lock(&sc->sc_queue_mtx);
916	bioq_insert_tail(&sc->sc_queue, bp);
917	mtx_unlock(&sc->sc_queue_mtx);
918	wakeup(sc);
919}
920
921static void
922g_mirror_regular_request(struct bio *bp)
923{
924	struct g_mirror_softc *sc;
925	struct g_mirror_disk *disk;
926	struct bio *pbp;
927
928	g_topology_assert_not();
929
930	pbp = bp->bio_parent;
931	sc = pbp->bio_to->private;
932	bp->bio_from->index--;
933	if (bp->bio_cmd == BIO_WRITE)
934		sc->sc_writes--;
935	disk = bp->bio_from->private;
936	if (disk == NULL) {
937		g_topology_lock();
938		g_mirror_kill_consumer(sc, bp->bio_from);
939		g_topology_unlock();
940	}
941
942	if (bp->bio_cmd == BIO_READ)
943		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read,
944		    bp->bio_error);
945	else if (bp->bio_cmd == BIO_WRITE)
946		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write,
947		    bp->bio_error);
948
949	pbp->bio_inbed++;
950	KASSERT(pbp->bio_inbed <= pbp->bio_children,
951	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
952	    pbp->bio_children));
953	if (bp->bio_error == 0 && pbp->bio_error == 0) {
954		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
955		g_destroy_bio(bp);
956		if (pbp->bio_children == pbp->bio_inbed) {
957			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
958			pbp->bio_completed = pbp->bio_length;
959			if (pbp->bio_cmd == BIO_WRITE ||
960			    pbp->bio_cmd == BIO_DELETE) {
961				bioq_remove(&sc->sc_inflight, pbp);
962				/* Release delayed sync requests if possible. */
963				g_mirror_sync_release(sc);
964			}
965			g_io_deliver(pbp, pbp->bio_error);
966		}
967		return;
968	} else if (bp->bio_error != 0) {
969		if (pbp->bio_error == 0)
970			pbp->bio_error = bp->bio_error;
971		if (disk != NULL) {
972			if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
973				disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
974				G_MIRROR_LOGREQ(0, bp,
975				    "Request failed (error=%d).",
976				    bp->bio_error);
977			} else {
978				G_MIRROR_LOGREQ(1, bp,
979				    "Request failed (error=%d).",
980				    bp->bio_error);
981			}
982			if (g_mirror_disconnect_on_failure &&
983			    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1)
984			{
985				if (bp->bio_error == ENXIO &&
986				    bp->bio_cmd == BIO_READ)
987					sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
988				else if (bp->bio_error == ENXIO)
989					sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID_NOW;
990				else
991					sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
992				g_mirror_event_send(disk,
993				    G_MIRROR_DISK_STATE_DISCONNECTED,
994				    G_MIRROR_EVENT_DONTWAIT);
995			}
996		}
997		switch (pbp->bio_cmd) {
998		case BIO_DELETE:
999		case BIO_WRITE:
1000			pbp->bio_inbed--;
1001			pbp->bio_children--;
1002			break;
1003		}
1004	}
1005	g_destroy_bio(bp);
1006
1007	switch (pbp->bio_cmd) {
1008	case BIO_READ:
1009		if (pbp->bio_inbed < pbp->bio_children)
1010			break;
1011		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1)
1012			g_io_deliver(pbp, pbp->bio_error);
1013		else {
1014			pbp->bio_error = 0;
1015			mtx_lock(&sc->sc_queue_mtx);
1016			bioq_insert_tail(&sc->sc_queue, pbp);
1017			mtx_unlock(&sc->sc_queue_mtx);
1018			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1019			wakeup(sc);
1020		}
1021		break;
1022	case BIO_DELETE:
1023	case BIO_WRITE:
1024		if (pbp->bio_children == 0) {
1025			/*
1026			 * All requests failed.
1027			 */
1028		} else if (pbp->bio_inbed < pbp->bio_children) {
1029			/* Do nothing. */
1030			break;
1031		} else if (pbp->bio_children == pbp->bio_inbed) {
1032			/* Some requests succeeded. */
1033			pbp->bio_error = 0;
1034			pbp->bio_completed = pbp->bio_length;
1035		}
1036		bioq_remove(&sc->sc_inflight, pbp);
1037		/* Release delayed sync requests if possible. */
1038		g_mirror_sync_release(sc);
1039		g_io_deliver(pbp, pbp->bio_error);
1040		break;
1041	default:
1042		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
1043		break;
1044	}
1045}
1046
1047static void
1048g_mirror_sync_done(struct bio *bp)
1049{
1050	struct g_mirror_softc *sc;
1051
1052	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
1053	sc = bp->bio_from->geom->softc;
1054	bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC;
1055	mtx_lock(&sc->sc_queue_mtx);
1056	bioq_insert_tail(&sc->sc_queue, bp);
1057	mtx_unlock(&sc->sc_queue_mtx);
1058	wakeup(sc);
1059}
1060
1061static void
1062g_mirror_candelete(struct bio *bp)
1063{
1064	struct g_mirror_softc *sc;
1065	struct g_mirror_disk *disk;
1066	int *val;
1067
1068	sc = bp->bio_to->private;
1069	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1070		if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE)
1071			break;
1072	}
1073	val = (int *)bp->bio_data;
1074	*val = (disk != NULL);
1075	g_io_deliver(bp, 0);
1076}
1077
1078static void
1079g_mirror_kernel_dump(struct bio *bp)
1080{
1081	struct g_mirror_softc *sc;
1082	struct g_mirror_disk *disk;
1083	struct bio *cbp;
1084	struct g_kerneldump *gkd;
1085
1086	/*
1087	 * We configure dumping to the first component, because this component
1088	 * will be used for reading with 'prefer' balance algorithm.
1089	 * If the component with the highest priority is currently disconnected
1090	 * we will not be able to read the dump after the reboot if it will be
1091	 * connected and synchronized later. Can we do something better?
1092	 */
1093	sc = bp->bio_to->private;
1094	disk = LIST_FIRST(&sc->sc_disks);
1095
1096	gkd = (struct g_kerneldump *)bp->bio_data;
1097	if (gkd->length > bp->bio_to->mediasize)
1098		gkd->length = bp->bio_to->mediasize;
1099	cbp = g_clone_bio(bp);
1100	if (cbp == NULL) {
1101		g_io_deliver(bp, ENOMEM);
1102		return;
1103	}
1104	cbp->bio_done = g_std_done;
1105	g_io_request(cbp, disk->d_consumer);
1106	G_MIRROR_DEBUG(1, "Kernel dump will go to %s.",
1107	    g_mirror_get_diskname(disk));
1108}
1109
1110static void
1111g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp)
1112{
1113	struct bio_queue_head queue;
1114	struct g_mirror_disk *disk;
1115	struct g_consumer *cp;
1116	struct bio *cbp;
1117
1118	bioq_init(&queue);
1119	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1120		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1121			continue;
1122		cbp = g_clone_bio(bp);
1123		if (cbp == NULL) {
1124			while ((cbp = bioq_takefirst(&queue)) != NULL)
1125				g_destroy_bio(cbp);
1126			if (bp->bio_error == 0)
1127				bp->bio_error = ENOMEM;
1128			g_io_deliver(bp, bp->bio_error);
1129			return;
1130		}
1131		bioq_insert_tail(&queue, cbp);
1132		cbp->bio_done = g_mirror_flush_done;
1133		cbp->bio_caller1 = disk;
1134		cbp->bio_to = disk->d_consumer->provider;
1135	}
1136	while ((cbp = bioq_takefirst(&queue)) != NULL) {
1137		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1138		disk = cbp->bio_caller1;
1139		cbp->bio_caller1 = NULL;
1140		cp = disk->d_consumer;
1141		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1142		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1143		    cp->acr, cp->acw, cp->ace));
1144		g_io_request(cbp, disk->d_consumer);
1145	}
1146}
1147
1148static void
1149g_mirror_start(struct bio *bp)
1150{
1151	struct g_mirror_softc *sc;
1152
1153	sc = bp->bio_to->private;
1154	/*
1155	 * If sc == NULL or there are no valid disks, provider's error
1156	 * should be set and g_mirror_start() should not be called at all.
1157	 */
1158	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1159	    ("Provider's error should be set (error=%d)(mirror=%s).",
1160	    bp->bio_to->error, bp->bio_to->name));
1161	G_MIRROR_LOGREQ(3, bp, "Request received.");
1162
1163	switch (bp->bio_cmd) {
1164	case BIO_READ:
1165	case BIO_WRITE:
1166	case BIO_DELETE:
1167		break;
1168	case BIO_FLUSH:
1169		g_mirror_flush(sc, bp);
1170		return;
1171	case BIO_GETATTR:
1172		if (!strcmp(bp->bio_attribute, "GEOM::candelete")) {
1173			g_mirror_candelete(bp);
1174			return;
1175		} else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) {
1176			g_mirror_kernel_dump(bp);
1177			return;
1178		}
1179		/* FALLTHROUGH */
1180	default:
1181		g_io_deliver(bp, EOPNOTSUPP);
1182		return;
1183	}
1184	mtx_lock(&sc->sc_queue_mtx);
1185	bioq_insert_tail(&sc->sc_queue, bp);
1186	mtx_unlock(&sc->sc_queue_mtx);
1187	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1188	wakeup(sc);
1189}
1190
1191/*
1192 * Return TRUE if the given request is colliding with a in-progress
1193 * synchronization request.
1194 */
1195static int
1196g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp)
1197{
1198	struct g_mirror_disk *disk;
1199	struct bio *sbp;
1200	off_t rstart, rend, sstart, send;
1201	u_int i;
1202
1203	if (sc->sc_sync.ds_ndisks == 0)
1204		return (0);
1205	rstart = bp->bio_offset;
1206	rend = bp->bio_offset + bp->bio_length;
1207	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1208		if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING)
1209			continue;
1210		for (i = 0; i < g_mirror_syncreqs; i++) {
1211			sbp = disk->d_sync.ds_bios[i];
1212			if (sbp == NULL)
1213				continue;
1214			sstart = sbp->bio_offset;
1215			send = sbp->bio_offset + sbp->bio_length;
1216			if (rend > sstart && rstart < send)
1217				return (1);
1218		}
1219	}
1220	return (0);
1221}
1222
1223/*
1224 * Return TRUE if the given sync request is colliding with a in-progress regular
1225 * request.
1226 */
1227static int
1228g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp)
1229{
1230	off_t rstart, rend, sstart, send;
1231	struct bio *bp;
1232
1233	if (sc->sc_sync.ds_ndisks == 0)
1234		return (0);
1235	sstart = sbp->bio_offset;
1236	send = sbp->bio_offset + sbp->bio_length;
1237	TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1238		rstart = bp->bio_offset;
1239		rend = bp->bio_offset + bp->bio_length;
1240		if (rend > sstart && rstart < send)
1241			return (1);
1242	}
1243	return (0);
1244}
1245
1246/*
1247 * Puts request onto delayed queue.
1248 */
1249static void
1250g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp)
1251{
1252
1253	G_MIRROR_LOGREQ(2, bp, "Delaying request.");
1254	bioq_insert_head(&sc->sc_regular_delayed, bp);
1255}
1256
1257/*
1258 * Puts synchronization request onto delayed queue.
1259 */
1260static void
1261g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp)
1262{
1263
1264	G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request.");
1265	bioq_insert_tail(&sc->sc_sync_delayed, bp);
1266}
1267
1268/*
1269 * Releases delayed regular requests which don't collide anymore with sync
1270 * requests.
1271 */
1272static void
1273g_mirror_regular_release(struct g_mirror_softc *sc)
1274{
1275	struct bio *bp, *bp2;
1276
1277	TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1278		if (g_mirror_sync_collision(sc, bp))
1279			continue;
1280		bioq_remove(&sc->sc_regular_delayed, bp);
1281		G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1282		mtx_lock(&sc->sc_queue_mtx);
1283		bioq_insert_head(&sc->sc_queue, bp);
1284#if 0
1285		/*
1286		 * wakeup() is not needed, because this function is called from
1287		 * the worker thread.
1288		 */
1289		wakeup(&sc->sc_queue);
1290#endif
1291		mtx_unlock(&sc->sc_queue_mtx);
1292	}
1293}
1294
1295/*
1296 * Releases delayed sync requests which don't collide anymore with regular
1297 * requests.
1298 */
1299static void
1300g_mirror_sync_release(struct g_mirror_softc *sc)
1301{
1302	struct bio *bp, *bp2;
1303
1304	TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1305		if (g_mirror_regular_collision(sc, bp))
1306			continue;
1307		bioq_remove(&sc->sc_sync_delayed, bp);
1308		G_MIRROR_LOGREQ(2, bp,
1309		    "Releasing delayed synchronization request.");
1310		g_io_request(bp, bp->bio_from);
1311	}
1312}
1313
1314/*
1315 * Handle synchronization requests.
1316 * Every synchronization request is two-steps process: first, READ request is
1317 * send to active provider and then WRITE request (with read data) to the provider
1318 * being synchronized. When WRITE is finished, new synchronization request is
1319 * send.
1320 */
1321static void
1322g_mirror_sync_request(struct bio *bp)
1323{
1324	struct g_mirror_softc *sc;
1325	struct g_mirror_disk *disk;
1326
1327	bp->bio_from->index--;
1328	sc = bp->bio_from->geom->softc;
1329	disk = bp->bio_from->private;
1330	if (disk == NULL) {
1331		sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1332		g_topology_lock();
1333		g_mirror_kill_consumer(sc, bp->bio_from);
1334		g_topology_unlock();
1335		free(bp->bio_data, M_MIRROR);
1336		g_destroy_bio(bp);
1337		sx_xlock(&sc->sc_lock);
1338		return;
1339	}
1340
1341	/*
1342	 * Synchronization request.
1343	 */
1344	switch (bp->bio_cmd) {
1345	case BIO_READ:
1346	    {
1347		struct g_consumer *cp;
1348
1349		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read,
1350		    bp->bio_error);
1351
1352		if (bp->bio_error != 0) {
1353			G_MIRROR_LOGREQ(0, bp,
1354			    "Synchronization request failed (error=%d).",
1355			    bp->bio_error);
1356			g_destroy_bio(bp);
1357			return;
1358		}
1359		G_MIRROR_LOGREQ(3, bp,
1360		    "Synchronization request half-finished.");
1361		bp->bio_cmd = BIO_WRITE;
1362		bp->bio_cflags = 0;
1363		cp = disk->d_consumer;
1364		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1365		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1366		    cp->acr, cp->acw, cp->ace));
1367		cp->index++;
1368		g_io_request(bp, cp);
1369		return;
1370	    }
1371	case BIO_WRITE:
1372	    {
1373		struct g_mirror_disk_sync *sync;
1374		off_t offset;
1375		void *data;
1376		int i;
1377
1378		KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write,
1379		    bp->bio_error);
1380
1381		if (bp->bio_error != 0) {
1382			G_MIRROR_LOGREQ(0, bp,
1383			    "Synchronization request failed (error=%d).",
1384			    bp->bio_error);
1385			g_destroy_bio(bp);
1386			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1387			g_mirror_event_send(disk,
1388			    G_MIRROR_DISK_STATE_DISCONNECTED,
1389			    G_MIRROR_EVENT_DONTWAIT);
1390			return;
1391		}
1392		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1393		sync = &disk->d_sync;
1394		if (sync->ds_offset >= sc->sc_mediasize ||
1395		    sync->ds_consumer == NULL ||
1396		    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1397			/* Don't send more synchronization requests. */
1398			sync->ds_inflight--;
1399			if (sync->ds_bios != NULL) {
1400				i = (int)(uintptr_t)bp->bio_caller1;
1401				sync->ds_bios[i] = NULL;
1402			}
1403			free(bp->bio_data, M_MIRROR);
1404			g_destroy_bio(bp);
1405			if (sync->ds_inflight > 0)
1406				return;
1407			if (sync->ds_consumer == NULL ||
1408			    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1409				return;
1410			}
1411			/* Disk up-to-date, activate it. */
1412			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1413			    G_MIRROR_EVENT_DONTWAIT);
1414			return;
1415		}
1416
1417		/* Send next synchronization request. */
1418		data = bp->bio_data;
1419		g_reset_bio(bp);
1420		bp->bio_cmd = BIO_READ;
1421		bp->bio_offset = sync->ds_offset;
1422		bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1423		sync->ds_offset += bp->bio_length;
1424		bp->bio_done = g_mirror_sync_done;
1425		bp->bio_data = data;
1426		bp->bio_from = sync->ds_consumer;
1427		bp->bio_to = sc->sc_provider;
1428		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1429		sync->ds_consumer->index++;
1430		/*
1431		 * Delay the request if it is colliding with a regular request.
1432		 */
1433		if (g_mirror_regular_collision(sc, bp))
1434			g_mirror_sync_delay(sc, bp);
1435		else
1436			g_io_request(bp, sync->ds_consumer);
1437
1438		/* Release delayed requests if possible. */
1439		g_mirror_regular_release(sc);
1440
1441		/* Find the smallest offset */
1442		offset = sc->sc_mediasize;
1443		for (i = 0; i < g_mirror_syncreqs; i++) {
1444			bp = sync->ds_bios[i];
1445			if (bp->bio_offset < offset)
1446				offset = bp->bio_offset;
1447		}
1448		if (sync->ds_offset_done + (MAXPHYS * 100) < offset) {
1449			/* Update offset_done on every 100 blocks. */
1450			sync->ds_offset_done = offset;
1451			g_mirror_update_metadata(disk);
1452		}
1453		return;
1454	    }
1455	default:
1456		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1457		    bp->bio_cmd, sc->sc_name));
1458		break;
1459	}
1460}
1461
1462static void
1463g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1464{
1465	struct g_mirror_disk *disk;
1466	struct g_consumer *cp;
1467	struct bio *cbp;
1468
1469	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1470		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1471			break;
1472	}
1473	if (disk == NULL) {
1474		if (bp->bio_error == 0)
1475			bp->bio_error = ENXIO;
1476		g_io_deliver(bp, bp->bio_error);
1477		return;
1478	}
1479	cbp = g_clone_bio(bp);
1480	if (cbp == NULL) {
1481		if (bp->bio_error == 0)
1482			bp->bio_error = ENOMEM;
1483		g_io_deliver(bp, bp->bio_error);
1484		return;
1485	}
1486	/*
1487	 * Fill in the component buf structure.
1488	 */
1489	cp = disk->d_consumer;
1490	cbp->bio_done = g_mirror_done;
1491	cbp->bio_to = cp->provider;
1492	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1493	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1494	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1495	    cp->acw, cp->ace));
1496	cp->index++;
1497	g_io_request(cbp, cp);
1498}
1499
1500static void
1501g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1502{
1503	struct g_mirror_disk *disk;
1504	struct g_consumer *cp;
1505	struct bio *cbp;
1506
1507	disk = g_mirror_get_disk(sc);
1508	if (disk == NULL) {
1509		if (bp->bio_error == 0)
1510			bp->bio_error = ENXIO;
1511		g_io_deliver(bp, bp->bio_error);
1512		return;
1513	}
1514	cbp = g_clone_bio(bp);
1515	if (cbp == NULL) {
1516		if (bp->bio_error == 0)
1517			bp->bio_error = ENOMEM;
1518		g_io_deliver(bp, bp->bio_error);
1519		return;
1520	}
1521	/*
1522	 * Fill in the component buf structure.
1523	 */
1524	cp = disk->d_consumer;
1525	cbp->bio_done = g_mirror_done;
1526	cbp->bio_to = cp->provider;
1527	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1528	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1529	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1530	    cp->acw, cp->ace));
1531	cp->index++;
1532	g_io_request(cbp, cp);
1533}
1534
1535#define TRACK_SIZE  (1 * 1024 * 1024)
1536#define LOAD_SCALE	256
1537#define ABS(x)		(((x) >= 0) ? (x) : (-(x)))
1538
1539static void
1540g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1541{
1542	struct g_mirror_disk *disk, *dp;
1543	struct g_consumer *cp;
1544	struct bio *cbp;
1545	int prio, best;
1546
1547	/* Find a disk with the smallest load. */
1548	disk = NULL;
1549	best = INT_MAX;
1550	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1551		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1552			continue;
1553		prio = dp->load;
1554		/* If disk head is precisely in position - highly prefer it. */
1555		if (dp->d_last_offset == bp->bio_offset)
1556			prio -= 2 * LOAD_SCALE;
1557		else
1558		/* If disk head is close to position - prefer it. */
1559		if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE)
1560			prio -= 1 * LOAD_SCALE;
1561		if (prio <= best) {
1562			disk = dp;
1563			best = prio;
1564		}
1565	}
1566	KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name));
1567	cbp = g_clone_bio(bp);
1568	if (cbp == NULL) {
1569		if (bp->bio_error == 0)
1570			bp->bio_error = ENOMEM;
1571		g_io_deliver(bp, bp->bio_error);
1572		return;
1573	}
1574	/*
1575	 * Fill in the component buf structure.
1576	 */
1577	cp = disk->d_consumer;
1578	cbp->bio_done = g_mirror_done;
1579	cbp->bio_to = cp->provider;
1580	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1581	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1582	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1583	    cp->acw, cp->ace));
1584	cp->index++;
1585	/* Remember last head position */
1586	disk->d_last_offset = bp->bio_offset + bp->bio_length;
1587	/* Update loads. */
1588	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1589		dp->load = (dp->d_consumer->index * LOAD_SCALE +
1590		    dp->load * 7) / 8;
1591	}
1592	g_io_request(cbp, cp);
1593}
1594
1595static void
1596g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1597{
1598	struct bio_queue_head queue;
1599	struct g_mirror_disk *disk;
1600	struct g_consumer *cp;
1601	struct bio *cbp;
1602	off_t left, mod, offset, slice;
1603	u_char *data;
1604	u_int ndisks;
1605
1606	if (bp->bio_length <= sc->sc_slice) {
1607		g_mirror_request_round_robin(sc, bp);
1608		return;
1609	}
1610	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1611	slice = bp->bio_length / ndisks;
1612	mod = slice % sc->sc_provider->sectorsize;
1613	if (mod != 0)
1614		slice += sc->sc_provider->sectorsize - mod;
1615	/*
1616	 * Allocate all bios before sending any request, so we can
1617	 * return ENOMEM in nice and clean way.
1618	 */
1619	left = bp->bio_length;
1620	offset = bp->bio_offset;
1621	data = bp->bio_data;
1622	bioq_init(&queue);
1623	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1624		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1625			continue;
1626		cbp = g_clone_bio(bp);
1627		if (cbp == NULL) {
1628			while ((cbp = bioq_takefirst(&queue)) != NULL)
1629				g_destroy_bio(cbp);
1630			if (bp->bio_error == 0)
1631				bp->bio_error = ENOMEM;
1632			g_io_deliver(bp, bp->bio_error);
1633			return;
1634		}
1635		bioq_insert_tail(&queue, cbp);
1636		cbp->bio_done = g_mirror_done;
1637		cbp->bio_caller1 = disk;
1638		cbp->bio_to = disk->d_consumer->provider;
1639		cbp->bio_offset = offset;
1640		cbp->bio_data = data;
1641		cbp->bio_length = MIN(left, slice);
1642		left -= cbp->bio_length;
1643		if (left == 0)
1644			break;
1645		offset += cbp->bio_length;
1646		data += cbp->bio_length;
1647	}
1648	while ((cbp = bioq_takefirst(&queue)) != NULL) {
1649		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1650		disk = cbp->bio_caller1;
1651		cbp->bio_caller1 = NULL;
1652		cp = disk->d_consumer;
1653		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1654		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1655		    cp->acr, cp->acw, cp->ace));
1656		disk->d_consumer->index++;
1657		g_io_request(cbp, disk->d_consumer);
1658	}
1659}
1660
1661static void
1662g_mirror_register_request(struct bio *bp)
1663{
1664	struct g_mirror_softc *sc;
1665
1666	sc = bp->bio_to->private;
1667	switch (bp->bio_cmd) {
1668	case BIO_READ:
1669		switch (sc->sc_balance) {
1670		case G_MIRROR_BALANCE_LOAD:
1671			g_mirror_request_load(sc, bp);
1672			break;
1673		case G_MIRROR_BALANCE_PREFER:
1674			g_mirror_request_prefer(sc, bp);
1675			break;
1676		case G_MIRROR_BALANCE_ROUND_ROBIN:
1677			g_mirror_request_round_robin(sc, bp);
1678			break;
1679		case G_MIRROR_BALANCE_SPLIT:
1680			g_mirror_request_split(sc, bp);
1681			break;
1682		}
1683		return;
1684	case BIO_WRITE:
1685	case BIO_DELETE:
1686	    {
1687		struct g_mirror_disk *disk;
1688		struct g_mirror_disk_sync *sync;
1689		struct bio_queue_head queue;
1690		struct g_consumer *cp;
1691		struct bio *cbp;
1692
1693		/*
1694		 * Delay the request if it is colliding with a synchronization
1695		 * request.
1696		 */
1697		if (g_mirror_sync_collision(sc, bp)) {
1698			g_mirror_regular_delay(sc, bp);
1699			return;
1700		}
1701
1702		if (sc->sc_idle)
1703			g_mirror_unidle(sc);
1704		else
1705			sc->sc_last_write = time_uptime;
1706
1707		/*
1708		 * Allocate all bios before sending any request, so we can
1709		 * return ENOMEM in nice and clean way.
1710		 */
1711		bioq_init(&queue);
1712		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1713			sync = &disk->d_sync;
1714			switch (disk->d_state) {
1715			case G_MIRROR_DISK_STATE_ACTIVE:
1716				break;
1717			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1718				if (bp->bio_offset >= sync->ds_offset)
1719					continue;
1720				break;
1721			default:
1722				continue;
1723			}
1724			if (bp->bio_cmd == BIO_DELETE &&
1725			    (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0)
1726				continue;
1727			cbp = g_clone_bio(bp);
1728			if (cbp == NULL) {
1729				while ((cbp = bioq_takefirst(&queue)) != NULL)
1730					g_destroy_bio(cbp);
1731				if (bp->bio_error == 0)
1732					bp->bio_error = ENOMEM;
1733				g_io_deliver(bp, bp->bio_error);
1734				return;
1735			}
1736			bioq_insert_tail(&queue, cbp);
1737			cbp->bio_done = g_mirror_done;
1738			cp = disk->d_consumer;
1739			cbp->bio_caller1 = cp;
1740			cbp->bio_to = cp->provider;
1741			KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1742			    ("Consumer %s not opened (r%dw%de%d).",
1743			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1744		}
1745		if (bioq_first(&queue) == NULL) {
1746			g_io_deliver(bp, EOPNOTSUPP);
1747			return;
1748		}
1749		while ((cbp = bioq_takefirst(&queue)) != NULL) {
1750			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1751			cp = cbp->bio_caller1;
1752			cbp->bio_caller1 = NULL;
1753			cp->index++;
1754			sc->sc_writes++;
1755			g_io_request(cbp, cp);
1756		}
1757		/*
1758		 * Put request onto inflight queue, so we can check if new
1759		 * synchronization requests don't collide with it.
1760		 */
1761		bioq_insert_tail(&sc->sc_inflight, bp);
1762		/*
1763		 * Bump syncid on first write.
1764		 */
1765		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1766			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1767			g_mirror_bump_syncid(sc);
1768		}
1769		return;
1770	    }
1771	default:
1772		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1773		    bp->bio_cmd, sc->sc_name));
1774		break;
1775	}
1776}
1777
1778static int
1779g_mirror_can_destroy(struct g_mirror_softc *sc)
1780{
1781	struct g_geom *gp;
1782	struct g_consumer *cp;
1783
1784	g_topology_assert();
1785	gp = sc->sc_geom;
1786	if (gp->softc == NULL)
1787		return (1);
1788	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0)
1789		return (0);
1790	LIST_FOREACH(cp, &gp->consumer, consumer) {
1791		if (g_mirror_is_busy(sc, cp))
1792			return (0);
1793	}
1794	gp = sc->sc_sync.ds_geom;
1795	LIST_FOREACH(cp, &gp->consumer, consumer) {
1796		if (g_mirror_is_busy(sc, cp))
1797			return (0);
1798	}
1799	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1800	    sc->sc_name);
1801	return (1);
1802}
1803
1804static int
1805g_mirror_try_destroy(struct g_mirror_softc *sc)
1806{
1807
1808	if (sc->sc_rootmount != NULL) {
1809		G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1810		    sc->sc_rootmount);
1811		root_mount_rel(sc->sc_rootmount);
1812		sc->sc_rootmount = NULL;
1813	}
1814	g_topology_lock();
1815	if (!g_mirror_can_destroy(sc)) {
1816		g_topology_unlock();
1817		return (0);
1818	}
1819	sc->sc_geom->softc = NULL;
1820	sc->sc_sync.ds_geom->softc = NULL;
1821	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1822		g_topology_unlock();
1823		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1824		    &sc->sc_worker);
1825		/* Unlock sc_lock here, as it can be destroyed after wakeup. */
1826		sx_xunlock(&sc->sc_lock);
1827		wakeup(&sc->sc_worker);
1828		sc->sc_worker = NULL;
1829	} else {
1830		g_topology_unlock();
1831		g_mirror_destroy_device(sc);
1832	}
1833	return (1);
1834}
1835
1836/*
1837 * Worker thread.
1838 */
1839static void
1840g_mirror_worker(void *arg)
1841{
1842	struct g_mirror_softc *sc;
1843	struct g_mirror_event *ep;
1844	struct bio *bp;
1845	int timeout;
1846
1847	sc = arg;
1848	thread_lock(curthread);
1849	sched_prio(curthread, PRIBIO);
1850	thread_unlock(curthread);
1851
1852	sx_xlock(&sc->sc_lock);
1853	for (;;) {
1854		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1855		/*
1856		 * First take a look at events.
1857		 * This is important to handle events before any I/O requests.
1858		 */
1859		ep = g_mirror_event_get(sc);
1860		if (ep != NULL) {
1861			g_mirror_event_remove(sc, ep);
1862			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1863				/* Update only device status. */
1864				G_MIRROR_DEBUG(3,
1865				    "Running event for device %s.",
1866				    sc->sc_name);
1867				ep->e_error = 0;
1868				g_mirror_update_device(sc, true);
1869			} else {
1870				/* Update disk status. */
1871				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1872				     g_mirror_get_diskname(ep->e_disk));
1873				ep->e_error = g_mirror_update_disk(ep->e_disk,
1874				    ep->e_state);
1875				if (ep->e_error == 0)
1876					g_mirror_update_device(sc, false);
1877			}
1878			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1879				KASSERT(ep->e_error == 0,
1880				    ("Error cannot be handled."));
1881				g_mirror_event_free(ep);
1882			} else {
1883				ep->e_flags |= G_MIRROR_EVENT_DONE;
1884				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1885				    ep);
1886				mtx_lock(&sc->sc_events_mtx);
1887				wakeup(ep);
1888				mtx_unlock(&sc->sc_events_mtx);
1889			}
1890			if ((sc->sc_flags &
1891			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1892				if (g_mirror_try_destroy(sc)) {
1893					curthread->td_pflags &= ~TDP_GEOM;
1894					G_MIRROR_DEBUG(1, "Thread exiting.");
1895					kproc_exit(0);
1896				}
1897			}
1898			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1899			continue;
1900		}
1901		/*
1902		 * Check if we can mark array as CLEAN and if we can't take
1903		 * how much seconds should we wait.
1904		 */
1905		timeout = g_mirror_idle(sc, -1);
1906		/*
1907		 * Now I/O requests.
1908		 */
1909		/* Get first request from the queue. */
1910		mtx_lock(&sc->sc_queue_mtx);
1911		bp = bioq_takefirst(&sc->sc_queue);
1912		if (bp == NULL) {
1913			if ((sc->sc_flags &
1914			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1915				mtx_unlock(&sc->sc_queue_mtx);
1916				if (g_mirror_try_destroy(sc)) {
1917					curthread->td_pflags &= ~TDP_GEOM;
1918					G_MIRROR_DEBUG(1, "Thread exiting.");
1919					kproc_exit(0);
1920				}
1921				mtx_lock(&sc->sc_queue_mtx);
1922				if (bioq_first(&sc->sc_queue) != NULL) {
1923					mtx_unlock(&sc->sc_queue_mtx);
1924					continue;
1925				}
1926			}
1927			sx_xunlock(&sc->sc_lock);
1928			/*
1929			 * XXX: We can miss an event here, because an event
1930			 *      can be added without sx-device-lock and without
1931			 *      mtx-queue-lock. Maybe I should just stop using
1932			 *      dedicated mutex for events synchronization and
1933			 *      stick with the queue lock?
1934			 *      The event will hang here until next I/O request
1935			 *      or next event is received.
1936			 */
1937			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1",
1938			    timeout * hz);
1939			sx_xlock(&sc->sc_lock);
1940			G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1941			continue;
1942		}
1943		mtx_unlock(&sc->sc_queue_mtx);
1944
1945		if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
1946		    (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1947			g_mirror_sync_request(bp);	/* READ */
1948		} else if (bp->bio_to != sc->sc_provider) {
1949			if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0)
1950				g_mirror_regular_request(bp);
1951			else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
1952				g_mirror_sync_request(bp);	/* WRITE */
1953			else {
1954				KASSERT(0,
1955				    ("Invalid request cflags=0x%hx to=%s.",
1956				    bp->bio_cflags, bp->bio_to->name));
1957			}
1958		} else {
1959			g_mirror_register_request(bp);
1960		}
1961		G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
1962	}
1963}
1964
1965static void
1966g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
1967{
1968
1969	sx_assert(&sc->sc_lock, SX_LOCKED);
1970
1971	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
1972		return;
1973	if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1974		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
1975		    g_mirror_get_diskname(disk), sc->sc_name);
1976		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1977	} else if (sc->sc_idle &&
1978	    (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1979		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
1980		    g_mirror_get_diskname(disk), sc->sc_name);
1981		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1982	}
1983}
1984
1985static void
1986g_mirror_sync_start(struct g_mirror_disk *disk)
1987{
1988	struct g_mirror_softc *sc;
1989	struct g_consumer *cp;
1990	struct bio *bp;
1991	int error, i;
1992
1993	g_topology_assert_not();
1994	sc = disk->d_softc;
1995	sx_assert(&sc->sc_lock, SX_LOCKED);
1996
1997	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1998	    ("Disk %s is not marked for synchronization.",
1999	    g_mirror_get_diskname(disk)));
2000	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2001	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
2002	    sc->sc_state));
2003
2004	sx_xunlock(&sc->sc_lock);
2005	g_topology_lock();
2006	cp = g_new_consumer(sc->sc_sync.ds_geom);
2007	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2008	error = g_attach(cp, sc->sc_provider);
2009	KASSERT(error == 0,
2010	    ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2011	error = g_access(cp, 1, 0, 0);
2012	KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2013	g_topology_unlock();
2014	sx_xlock(&sc->sc_lock);
2015
2016	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2017	    g_mirror_get_diskname(disk));
2018	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0)
2019		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2020	KASSERT(disk->d_sync.ds_consumer == NULL,
2021	    ("Sync consumer already exists (device=%s, disk=%s).",
2022	    sc->sc_name, g_mirror_get_diskname(disk)));
2023
2024	disk->d_sync.ds_consumer = cp;
2025	disk->d_sync.ds_consumer->private = disk;
2026	disk->d_sync.ds_consumer->index = 0;
2027
2028	/*
2029	 * Allocate memory for synchronization bios and initialize them.
2030	 */
2031	disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs,
2032	    M_MIRROR, M_WAITOK);
2033	for (i = 0; i < g_mirror_syncreqs; i++) {
2034		bp = g_alloc_bio();
2035		disk->d_sync.ds_bios[i] = bp;
2036		bp->bio_parent = NULL;
2037		bp->bio_cmd = BIO_READ;
2038		bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
2039		bp->bio_cflags = 0;
2040		bp->bio_offset = disk->d_sync.ds_offset;
2041		bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2042		disk->d_sync.ds_offset += bp->bio_length;
2043		bp->bio_done = g_mirror_sync_done;
2044		bp->bio_from = disk->d_sync.ds_consumer;
2045		bp->bio_to = sc->sc_provider;
2046		bp->bio_caller1 = (void *)(uintptr_t)i;
2047	}
2048
2049	/* Increase the number of disks in SYNCHRONIZING state. */
2050	sc->sc_sync.ds_ndisks++;
2051	/* Set the number of in-flight synchronization requests. */
2052	disk->d_sync.ds_inflight = g_mirror_syncreqs;
2053
2054	/*
2055	 * Fire off first synchronization requests.
2056	 */
2057	for (i = 0; i < g_mirror_syncreqs; i++) {
2058		bp = disk->d_sync.ds_bios[i];
2059		G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
2060		disk->d_sync.ds_consumer->index++;
2061		/*
2062		 * Delay the request if it is colliding with a regular request.
2063		 */
2064		if (g_mirror_regular_collision(sc, bp))
2065			g_mirror_sync_delay(sc, bp);
2066		else
2067			g_io_request(bp, disk->d_sync.ds_consumer);
2068	}
2069}
2070
2071/*
2072 * Stop synchronization process.
2073 * type: 0 - synchronization finished
2074 *       1 - synchronization stopped
2075 */
2076static void
2077g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
2078{
2079	struct g_mirror_softc *sc;
2080	struct g_consumer *cp;
2081
2082	g_topology_assert_not();
2083	sc = disk->d_softc;
2084	sx_assert(&sc->sc_lock, SX_LOCKED);
2085
2086	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2087	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2088	    g_mirror_disk_state2str(disk->d_state)));
2089	if (disk->d_sync.ds_consumer == NULL)
2090		return;
2091
2092	if (type == 0) {
2093		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2094		    sc->sc_name, g_mirror_get_diskname(disk));
2095	} else /* if (type == 1) */ {
2096		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2097		    sc->sc_name, g_mirror_get_diskname(disk));
2098	}
2099	free(disk->d_sync.ds_bios, M_MIRROR);
2100	disk->d_sync.ds_bios = NULL;
2101	cp = disk->d_sync.ds_consumer;
2102	disk->d_sync.ds_consumer = NULL;
2103	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2104	sc->sc_sync.ds_ndisks--;
2105	sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2106	g_topology_lock();
2107	g_mirror_kill_consumer(sc, cp);
2108	g_topology_unlock();
2109	sx_xlock(&sc->sc_lock);
2110}
2111
2112static void
2113g_mirror_launch_provider(struct g_mirror_softc *sc)
2114{
2115	struct g_mirror_disk *disk;
2116	struct g_provider *pp, *dp;
2117
2118	sx_assert(&sc->sc_lock, SX_LOCKED);
2119
2120	g_topology_lock();
2121	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
2122	pp->flags |= G_PF_DIRECT_RECEIVE;
2123	pp->mediasize = sc->sc_mediasize;
2124	pp->sectorsize = sc->sc_sectorsize;
2125	pp->stripesize = 0;
2126	pp->stripeoffset = 0;
2127
2128	/* Splitting of unmapped BIO's could work but isn't implemented now */
2129	if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT)
2130		pp->flags |= G_PF_ACCEPT_UNMAPPED;
2131
2132	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2133		if (disk->d_consumer && disk->d_consumer->provider) {
2134			dp = disk->d_consumer->provider;
2135			if (dp->stripesize > pp->stripesize) {
2136				pp->stripesize = dp->stripesize;
2137				pp->stripeoffset = dp->stripeoffset;
2138			}
2139			/* A provider underneath us doesn't support unmapped */
2140			if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) {
2141				G_MIRROR_DEBUG(0, "Cancelling unmapped "
2142				    "because of %s.", dp->name);
2143				pp->flags &= ~G_PF_ACCEPT_UNMAPPED;
2144			}
2145		}
2146	}
2147	pp->private = sc;
2148	sc->sc_refcnt++;
2149	sc->sc_provider = pp;
2150	g_error_provider(pp, 0);
2151	g_topology_unlock();
2152	G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2153	    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks);
2154	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2155		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2156			g_mirror_sync_start(disk);
2157	}
2158}
2159
2160static void
2161g_mirror_destroy_provider(struct g_mirror_softc *sc)
2162{
2163	struct g_mirror_disk *disk;
2164	struct bio *bp;
2165
2166	g_topology_assert_not();
2167	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2168	    sc->sc_name));
2169
2170	g_topology_lock();
2171	g_error_provider(sc->sc_provider, ENXIO);
2172	mtx_lock(&sc->sc_queue_mtx);
2173	while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
2174		/*
2175		 * Abort any pending I/O that wasn't generated by us.
2176		 * Synchronization requests and requests destined for individual
2177		 * mirror components can be destroyed immediately.
2178		 */
2179		if (bp->bio_to == sc->sc_provider &&
2180		    bp->bio_from->geom != sc->sc_sync.ds_geom) {
2181			g_io_deliver(bp, ENXIO);
2182		} else {
2183			if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
2184				free(bp->bio_data, M_MIRROR);
2185			g_destroy_bio(bp);
2186		}
2187	}
2188	mtx_unlock(&sc->sc_queue_mtx);
2189	g_wither_provider(sc->sc_provider, ENXIO);
2190	sc->sc_provider = NULL;
2191	G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name);
2192	g_topology_unlock();
2193	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2194		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2195			g_mirror_sync_stop(disk, 1);
2196	}
2197}
2198
2199static void
2200g_mirror_go(void *arg)
2201{
2202	struct g_mirror_softc *sc;
2203
2204	sc = arg;
2205	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2206	g_mirror_event_send(sc, 0,
2207	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
2208}
2209
2210static u_int
2211g_mirror_determine_state(struct g_mirror_disk *disk)
2212{
2213	struct g_mirror_softc *sc;
2214	u_int state;
2215
2216	sc = disk->d_softc;
2217	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2218		if ((disk->d_flags &
2219		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0 &&
2220		    (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 ||
2221		     (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0)) {
2222			/* Disk does not need synchronization. */
2223			state = G_MIRROR_DISK_STATE_ACTIVE;
2224		} else {
2225			if ((sc->sc_flags &
2226			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2227			    (disk->d_flags &
2228			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2229				/*
2230				 * We can start synchronization from
2231				 * the stored offset.
2232				 */
2233				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2234			} else {
2235				state = G_MIRROR_DISK_STATE_STALE;
2236			}
2237		}
2238	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2239		/*
2240		 * Reset all synchronization data for this disk,
2241		 * because if it even was synchronized, it was
2242		 * synchronized to disks with different syncid.
2243		 */
2244		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2245		disk->d_sync.ds_offset = 0;
2246		disk->d_sync.ds_offset_done = 0;
2247		disk->d_sync.ds_syncid = sc->sc_syncid;
2248		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2249		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2250			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2251		} else {
2252			state = G_MIRROR_DISK_STATE_STALE;
2253		}
2254	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2255		/*
2256		 * Not good, NOT GOOD!
2257		 * It means that mirror was started on stale disks
2258		 * and more fresh disk just arrive.
2259		 * If there were writes, mirror is broken, sorry.
2260		 * I think the best choice here is don't touch
2261		 * this disk and inform the user loudly.
2262		 */
2263		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
2264		    "disk (%s) arrives!! It will not be connected to the "
2265		    "running device.", sc->sc_name,
2266		    g_mirror_get_diskname(disk));
2267		g_mirror_destroy_disk(disk);
2268		state = G_MIRROR_DISK_STATE_NONE;
2269		/* Return immediately, because disk was destroyed. */
2270		return (state);
2271	}
2272	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
2273	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
2274	return (state);
2275}
2276
2277/*
2278 * Update device state.
2279 */
2280static void
2281g_mirror_update_device(struct g_mirror_softc *sc, bool force)
2282{
2283	struct g_mirror_disk *disk;
2284	u_int state;
2285
2286	sx_assert(&sc->sc_lock, SX_XLOCKED);
2287
2288	switch (sc->sc_state) {
2289	case G_MIRROR_DEVICE_STATE_STARTING:
2290	    {
2291		struct g_mirror_disk *pdisk, *tdisk;
2292		u_int dirty, ndisks, genid, syncid;
2293		bool broken;
2294
2295		KASSERT(sc->sc_provider == NULL,
2296		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2297		/*
2298		 * Are we ready? We are, if all disks are connected or
2299		 * if we have any disks and 'force' is true.
2300		 */
2301		ndisks = g_mirror_ndisks(sc, -1);
2302		if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) {
2303			;
2304		} else if (ndisks == 0) {
2305			/*
2306			 * Disks went down in starting phase, so destroy
2307			 * device.
2308			 */
2309			callout_drain(&sc->sc_callout);
2310			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2311			G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2312			    sc->sc_rootmount);
2313			root_mount_rel(sc->sc_rootmount);
2314			sc->sc_rootmount = NULL;
2315			return;
2316		} else {
2317			return;
2318		}
2319
2320		/*
2321		 * Activate all disks with the biggest syncid.
2322		 */
2323		if (force) {
2324			/*
2325			 * If 'force' is true, we have been called due to
2326			 * timeout, so don't bother canceling timeout.
2327			 */
2328			ndisks = 0;
2329			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2330				if ((disk->d_flags &
2331				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2332					ndisks++;
2333				}
2334			}
2335			if (ndisks == 0) {
2336				/* No valid disks found, destroy device. */
2337				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2338				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2339				    __LINE__, sc->sc_rootmount);
2340				root_mount_rel(sc->sc_rootmount);
2341				sc->sc_rootmount = NULL;
2342				return;
2343			}
2344		} else {
2345			/* Cancel timeout. */
2346			callout_drain(&sc->sc_callout);
2347		}
2348
2349		/*
2350		 * Find the biggest genid.
2351		 */
2352		genid = 0;
2353		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2354			if (disk->d_genid > genid)
2355				genid = disk->d_genid;
2356		}
2357		sc->sc_genid = genid;
2358		/*
2359		 * Remove all disks without the biggest genid.
2360		 */
2361		broken = false;
2362		LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
2363			if (disk->d_genid < genid) {
2364				G_MIRROR_DEBUG(0,
2365				    "Component %s (device %s) broken, skipping.",
2366				    g_mirror_get_diskname(disk), sc->sc_name);
2367				g_mirror_destroy_disk(disk);
2368				/*
2369				 * Bump the syncid in case we discover a healthy
2370				 * replacement disk after starting the mirror.
2371				 */
2372				broken = true;
2373			}
2374		}
2375
2376		/*
2377		 * Find the biggest syncid.
2378		 */
2379		syncid = 0;
2380		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2381			if (disk->d_sync.ds_syncid > syncid)
2382				syncid = disk->d_sync.ds_syncid;
2383		}
2384
2385		/*
2386		 * Here we need to look for dirty disks and if all disks
2387		 * with the biggest syncid are dirty, we have to choose
2388		 * one with the biggest priority and rebuild the rest.
2389		 */
2390		/*
2391		 * Find the number of dirty disks with the biggest syncid.
2392		 * Find the number of disks with the biggest syncid.
2393		 * While here, find a disk with the biggest priority.
2394		 */
2395		dirty = ndisks = 0;
2396		pdisk = NULL;
2397		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2398			if (disk->d_sync.ds_syncid != syncid)
2399				continue;
2400			if ((disk->d_flags &
2401			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2402				continue;
2403			}
2404			ndisks++;
2405			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2406				dirty++;
2407				if (pdisk == NULL ||
2408				    pdisk->d_priority < disk->d_priority) {
2409					pdisk = disk;
2410				}
2411			}
2412		}
2413		if (dirty == 0) {
2414			/* No dirty disks at all, great. */
2415		} else if (dirty == ndisks) {
2416			/*
2417			 * Force synchronization for all dirty disks except one
2418			 * with the biggest priority.
2419			 */
2420			KASSERT(pdisk != NULL, ("pdisk == NULL"));
2421			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
2422			    "master disk for synchronization.",
2423			    g_mirror_get_diskname(pdisk), sc->sc_name);
2424			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2425				if (disk->d_sync.ds_syncid != syncid)
2426					continue;
2427				if ((disk->d_flags &
2428				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2429					continue;
2430				}
2431				KASSERT((disk->d_flags &
2432				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
2433				    ("Disk %s isn't marked as dirty.",
2434				    g_mirror_get_diskname(disk)));
2435				/* Skip the disk with the biggest priority. */
2436				if (disk == pdisk)
2437					continue;
2438				disk->d_sync.ds_syncid = 0;
2439			}
2440		} else if (dirty < ndisks) {
2441			/*
2442			 * Force synchronization for all dirty disks.
2443			 * We have some non-dirty disks.
2444			 */
2445			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2446				if (disk->d_sync.ds_syncid != syncid)
2447					continue;
2448				if ((disk->d_flags &
2449				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2450					continue;
2451				}
2452				if ((disk->d_flags &
2453				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2454					continue;
2455				}
2456				disk->d_sync.ds_syncid = 0;
2457			}
2458		}
2459
2460		/* Reset hint. */
2461		sc->sc_hint = NULL;
2462		sc->sc_syncid = syncid;
2463		if (force || broken) {
2464			/* Remember to bump syncid on first write. */
2465			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2466		}
2467		state = G_MIRROR_DEVICE_STATE_RUNNING;
2468		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2469		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2470		    g_mirror_device_state2str(state));
2471		sc->sc_state = state;
2472		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2473			state = g_mirror_determine_state(disk);
2474			g_mirror_event_send(disk, state,
2475			    G_MIRROR_EVENT_DONTWAIT);
2476			if (state == G_MIRROR_DISK_STATE_STALE)
2477				sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2478		}
2479		break;
2480	    }
2481	case G_MIRROR_DEVICE_STATE_RUNNING:
2482		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2483		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2484			/*
2485			 * No active disks or no disks at all,
2486			 * so destroy device.
2487			 */
2488			if (sc->sc_provider != NULL)
2489				g_mirror_destroy_provider(sc);
2490			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2491			break;
2492		} else if (g_mirror_ndisks(sc,
2493		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2494		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2495			/*
2496			 * We have active disks, launch provider if it doesn't
2497			 * exist.
2498			 */
2499			if (sc->sc_provider == NULL)
2500				g_mirror_launch_provider(sc);
2501			if (sc->sc_rootmount != NULL) {
2502				G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2503				    __LINE__, sc->sc_rootmount);
2504				root_mount_rel(sc->sc_rootmount);
2505				sc->sc_rootmount = NULL;
2506			}
2507		}
2508		/*
2509		 * Genid should be bumped immediately, so do it here.
2510		 */
2511		if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2512			sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2513			g_mirror_bump_genid(sc);
2514		}
2515		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID_NOW) != 0) {
2516			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID_NOW;
2517			g_mirror_bump_syncid(sc);
2518		}
2519		break;
2520	default:
2521		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2522		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2523		break;
2524	}
2525}
2526
2527/*
2528 * Update disk state and device state if needed.
2529 */
2530#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
2531	"Disk %s state changed from %s to %s (device %s).",		\
2532	g_mirror_get_diskname(disk),					\
2533	g_mirror_disk_state2str(disk->d_state),				\
2534	g_mirror_disk_state2str(state), sc->sc_name)
2535static int
2536g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2537{
2538	struct g_mirror_softc *sc;
2539
2540	sc = disk->d_softc;
2541	sx_assert(&sc->sc_lock, SX_XLOCKED);
2542
2543again:
2544	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2545	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2546	    g_mirror_disk_state2str(state));
2547	switch (state) {
2548	case G_MIRROR_DISK_STATE_NEW:
2549		/*
2550		 * Possible scenarios:
2551		 * 1. New disk arrive.
2552		 */
2553		/* Previous state should be NONE. */
2554		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2555		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2556		    g_mirror_disk_state2str(disk->d_state)));
2557		DISK_STATE_CHANGED();
2558
2559		disk->d_state = state;
2560		if (LIST_EMPTY(&sc->sc_disks))
2561			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2562		else {
2563			struct g_mirror_disk *dp;
2564
2565			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2566				if (disk->d_priority >= dp->d_priority) {
2567					LIST_INSERT_BEFORE(dp, disk, d_next);
2568					dp = NULL;
2569					break;
2570				}
2571				if (LIST_NEXT(dp, d_next) == NULL)
2572					break;
2573			}
2574			if (dp != NULL)
2575				LIST_INSERT_AFTER(dp, disk, d_next);
2576		}
2577		G_MIRROR_DEBUG(1, "Device %s: provider %s detected.",
2578		    sc->sc_name, g_mirror_get_diskname(disk));
2579		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2580			break;
2581		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2582		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2583		    g_mirror_device_state2str(sc->sc_state),
2584		    g_mirror_get_diskname(disk),
2585		    g_mirror_disk_state2str(disk->d_state)));
2586		state = g_mirror_determine_state(disk);
2587		if (state != G_MIRROR_DISK_STATE_NONE)
2588			goto again;
2589		break;
2590	case G_MIRROR_DISK_STATE_ACTIVE:
2591		/*
2592		 * Possible scenarios:
2593		 * 1. New disk does not need synchronization.
2594		 * 2. Synchronization process finished successfully.
2595		 */
2596		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2597		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2598		    g_mirror_device_state2str(sc->sc_state),
2599		    g_mirror_get_diskname(disk),
2600		    g_mirror_disk_state2str(disk->d_state)));
2601		/* Previous state should be NEW or SYNCHRONIZING. */
2602		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2603		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2604		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2605		    g_mirror_disk_state2str(disk->d_state)));
2606		DISK_STATE_CHANGED();
2607
2608		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2609			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2610			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2611			g_mirror_sync_stop(disk, 0);
2612		}
2613		disk->d_state = state;
2614		disk->d_sync.ds_offset = 0;
2615		disk->d_sync.ds_offset_done = 0;
2616		g_mirror_update_idle(sc, disk);
2617		g_mirror_update_metadata(disk);
2618		G_MIRROR_DEBUG(1, "Device %s: provider %s activated.",
2619		    sc->sc_name, g_mirror_get_diskname(disk));
2620		break;
2621	case G_MIRROR_DISK_STATE_STALE:
2622		/*
2623		 * Possible scenarios:
2624		 * 1. Stale disk was connected.
2625		 */
2626		/* Previous state should be NEW. */
2627		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2628		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2629		    g_mirror_disk_state2str(disk->d_state)));
2630		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2631		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2632		    g_mirror_device_state2str(sc->sc_state),
2633		    g_mirror_get_diskname(disk),
2634		    g_mirror_disk_state2str(disk->d_state)));
2635		/*
2636		 * STALE state is only possible if device is marked
2637		 * NOAUTOSYNC.
2638		 */
2639		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2640		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2641		    g_mirror_device_state2str(sc->sc_state),
2642		    g_mirror_get_diskname(disk),
2643		    g_mirror_disk_state2str(disk->d_state)));
2644		DISK_STATE_CHANGED();
2645
2646		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2647		disk->d_state = state;
2648		g_mirror_update_metadata(disk);
2649		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2650		    sc->sc_name, g_mirror_get_diskname(disk));
2651		break;
2652	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2653		/*
2654		 * Possible scenarios:
2655		 * 1. Disk which needs synchronization was connected.
2656		 */
2657		/* Previous state should be NEW. */
2658		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2659		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2660		    g_mirror_disk_state2str(disk->d_state)));
2661		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2662		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2663		    g_mirror_device_state2str(sc->sc_state),
2664		    g_mirror_get_diskname(disk),
2665		    g_mirror_disk_state2str(disk->d_state)));
2666		DISK_STATE_CHANGED();
2667
2668		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2669			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2670		disk->d_state = state;
2671		if (sc->sc_provider != NULL) {
2672			g_mirror_sync_start(disk);
2673			g_mirror_update_metadata(disk);
2674		}
2675		break;
2676	case G_MIRROR_DISK_STATE_DISCONNECTED:
2677		/*
2678		 * Possible scenarios:
2679		 * 1. Device wasn't running yet, but disk disappear.
2680		 * 2. Disk was active and disapppear.
2681		 * 3. Disk disappear during synchronization process.
2682		 */
2683		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2684			/*
2685			 * Previous state should be ACTIVE, STALE or
2686			 * SYNCHRONIZING.
2687			 */
2688			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2689			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2690			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2691			    ("Wrong disk state (%s, %s).",
2692			    g_mirror_get_diskname(disk),
2693			    g_mirror_disk_state2str(disk->d_state)));
2694		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2695			/* Previous state should be NEW. */
2696			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2697			    ("Wrong disk state (%s, %s).",
2698			    g_mirror_get_diskname(disk),
2699			    g_mirror_disk_state2str(disk->d_state)));
2700			/*
2701			 * Reset bumping syncid if disk disappeared in STARTING
2702			 * state.
2703			 */
2704			if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2705				sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2706#ifdef	INVARIANTS
2707		} else {
2708			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2709			    sc->sc_name,
2710			    g_mirror_device_state2str(sc->sc_state),
2711			    g_mirror_get_diskname(disk),
2712			    g_mirror_disk_state2str(disk->d_state)));
2713#endif
2714		}
2715		DISK_STATE_CHANGED();
2716		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2717		    sc->sc_name, g_mirror_get_diskname(disk));
2718
2719		g_mirror_destroy_disk(disk);
2720		break;
2721	case G_MIRROR_DISK_STATE_DESTROY:
2722	    {
2723		int error;
2724
2725		error = g_mirror_clear_metadata(disk);
2726		if (error != 0) {
2727			G_MIRROR_DEBUG(0,
2728			    "Device %s: failed to clear metadata on %s: %d.",
2729			    sc->sc_name, g_mirror_get_diskname(disk), error);
2730			break;
2731		}
2732		DISK_STATE_CHANGED();
2733		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2734		    sc->sc_name, g_mirror_get_diskname(disk));
2735
2736		g_mirror_destroy_disk(disk);
2737		sc->sc_ndisks--;
2738		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2739			g_mirror_update_metadata(disk);
2740		}
2741		break;
2742	    }
2743	default:
2744		KASSERT(1 == 0, ("Unknown state (%u).", state));
2745		break;
2746	}
2747	return (0);
2748}
2749#undef	DISK_STATE_CHANGED
2750
2751int
2752g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2753{
2754	struct g_provider *pp;
2755	u_char *buf;
2756	int error;
2757
2758	g_topology_assert();
2759
2760	error = g_access(cp, 1, 0, 0);
2761	if (error != 0)
2762		return (error);
2763	pp = cp->provider;
2764	g_topology_unlock();
2765	/* Metadata are stored on last sector. */
2766	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2767	    &error);
2768	g_topology_lock();
2769	g_access(cp, -1, 0, 0);
2770	if (buf == NULL) {
2771		G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2772		    cp->provider->name, error);
2773		return (error);
2774	}
2775
2776	/* Decode metadata. */
2777	error = mirror_metadata_decode(buf, md);
2778	g_free(buf);
2779	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2780		return (EINVAL);
2781	if (md->md_version > G_MIRROR_VERSION) {
2782		G_MIRROR_DEBUG(0,
2783		    "Kernel module is too old to handle metadata from %s.",
2784		    cp->provider->name);
2785		return (EINVAL);
2786	}
2787	if (error != 0) {
2788		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2789		    cp->provider->name);
2790		return (error);
2791	}
2792
2793	return (0);
2794}
2795
2796static int
2797g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2798    struct g_mirror_metadata *md)
2799{
2800
2801	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2802		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2803		    pp->name, md->md_did);
2804		return (EEXIST);
2805	}
2806	if (md->md_all != sc->sc_ndisks) {
2807		G_MIRROR_DEBUG(1,
2808		    "Invalid '%s' field on disk %s (device %s), skipping.",
2809		    "md_all", pp->name, sc->sc_name);
2810		return (EINVAL);
2811	}
2812	if (md->md_slice != sc->sc_slice) {
2813		G_MIRROR_DEBUG(1,
2814		    "Invalid '%s' field on disk %s (device %s), skipping.",
2815		    "md_slice", pp->name, sc->sc_name);
2816		return (EINVAL);
2817	}
2818	if (md->md_balance != sc->sc_balance) {
2819		G_MIRROR_DEBUG(1,
2820		    "Invalid '%s' field on disk %s (device %s), skipping.",
2821		    "md_balance", pp->name, sc->sc_name);
2822		return (EINVAL);
2823	}
2824#if 0
2825	if (md->md_mediasize != sc->sc_mediasize) {
2826		G_MIRROR_DEBUG(1,
2827		    "Invalid '%s' field on disk %s (device %s), skipping.",
2828		    "md_mediasize", pp->name, sc->sc_name);
2829		return (EINVAL);
2830	}
2831#endif
2832	if (sc->sc_mediasize > pp->mediasize) {
2833		G_MIRROR_DEBUG(1,
2834		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2835		    sc->sc_name);
2836		return (EINVAL);
2837	}
2838	if (md->md_sectorsize != sc->sc_sectorsize) {
2839		G_MIRROR_DEBUG(1,
2840		    "Invalid '%s' field on disk %s (device %s), skipping.",
2841		    "md_sectorsize", pp->name, sc->sc_name);
2842		return (EINVAL);
2843	}
2844	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2845		G_MIRROR_DEBUG(1,
2846		    "Invalid sector size of disk %s (device %s), skipping.",
2847		    pp->name, sc->sc_name);
2848		return (EINVAL);
2849	}
2850	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2851		G_MIRROR_DEBUG(1,
2852		    "Invalid device flags on disk %s (device %s), skipping.",
2853		    pp->name, sc->sc_name);
2854		return (EINVAL);
2855	}
2856	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2857		G_MIRROR_DEBUG(1,
2858		    "Invalid disk flags on disk %s (device %s), skipping.",
2859		    pp->name, sc->sc_name);
2860		return (EINVAL);
2861	}
2862	return (0);
2863}
2864
2865int
2866g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2867    struct g_mirror_metadata *md)
2868{
2869	struct g_mirror_disk *disk;
2870	int error;
2871
2872	g_topology_assert_not();
2873	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2874
2875	error = g_mirror_check_metadata(sc, pp, md);
2876	if (error != 0)
2877		return (error);
2878	if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2879	    md->md_genid < sc->sc_genid) {
2880		G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2881		    pp->name, sc->sc_name);
2882		return (EINVAL);
2883	}
2884	disk = g_mirror_init_disk(sc, pp, md, &error);
2885	if (disk == NULL)
2886		return (error);
2887	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2888	    G_MIRROR_EVENT_WAIT);
2889	if (error != 0)
2890		return (error);
2891	if (md->md_version < G_MIRROR_VERSION) {
2892		G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2893		    pp->name, md->md_version, G_MIRROR_VERSION);
2894		g_mirror_update_metadata(disk);
2895	}
2896	return (0);
2897}
2898
2899static void
2900g_mirror_destroy_delayed(void *arg, int flag)
2901{
2902	struct g_mirror_softc *sc;
2903	int error;
2904
2905	if (flag == EV_CANCEL) {
2906		G_MIRROR_DEBUG(1, "Destroying canceled.");
2907		return;
2908	}
2909	sc = arg;
2910	g_topology_unlock();
2911	sx_xlock(&sc->sc_lock);
2912	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0,
2913	    ("DESTROY flag set on %s.", sc->sc_name));
2914	KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0,
2915	    ("DESTROYING flag not set on %s.", sc->sc_name));
2916	G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name);
2917	error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT);
2918	if (error != 0) {
2919		G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).",
2920		    sc->sc_name, error);
2921		sx_xunlock(&sc->sc_lock);
2922	}
2923	g_topology_lock();
2924}
2925
2926static int
2927g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2928{
2929	struct g_mirror_softc *sc;
2930	int error = 0;
2931
2932	g_topology_assert();
2933	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2934	    acw, ace);
2935
2936	sc = pp->private;
2937	KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
2938
2939	g_topology_unlock();
2940	sx_xlock(&sc->sc_lock);
2941	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 ||
2942	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0 ||
2943	    LIST_EMPTY(&sc->sc_disks)) {
2944		if (acr > 0 || acw > 0 || ace > 0)
2945			error = ENXIO;
2946		goto end;
2947	}
2948	sc->sc_provider_open += acr + acw + ace;
2949	if (pp->acw + acw == 0)
2950		g_mirror_idle(sc, 0);
2951	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0 &&
2952	    sc->sc_provider_open == 0)
2953		g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL);
2954end:
2955	sx_xunlock(&sc->sc_lock);
2956	g_topology_lock();
2957	return (error);
2958}
2959
2960struct g_geom *
2961g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md,
2962    u_int type)
2963{
2964	struct g_mirror_softc *sc;
2965	struct g_geom *gp;
2966	int error, timeout;
2967
2968	g_topology_assert();
2969	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2970	    md->md_mid);
2971
2972	/* One disk is minimum. */
2973	if (md->md_all < 1)
2974		return (NULL);
2975	/*
2976	 * Action geom.
2977	 */
2978	gp = g_new_geomf(mp, "%s", md->md_name);
2979	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2980	gp->start = g_mirror_start;
2981	gp->orphan = g_mirror_orphan;
2982	gp->access = g_mirror_access;
2983	gp->dumpconf = g_mirror_dumpconf;
2984
2985	sc->sc_type = type;
2986	sc->sc_id = md->md_mid;
2987	sc->sc_slice = md->md_slice;
2988	sc->sc_balance = md->md_balance;
2989	sc->sc_mediasize = md->md_mediasize;
2990	sc->sc_sectorsize = md->md_sectorsize;
2991	sc->sc_ndisks = md->md_all;
2992	sc->sc_flags = md->md_mflags;
2993	sc->sc_bump_id = 0;
2994	sc->sc_idle = 1;
2995	sc->sc_last_write = time_uptime;
2996	sc->sc_writes = 0;
2997	sc->sc_refcnt = 1;
2998	sx_init(&sc->sc_lock, "gmirror:lock");
2999	bioq_init(&sc->sc_queue);
3000	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
3001	bioq_init(&sc->sc_regular_delayed);
3002	bioq_init(&sc->sc_inflight);
3003	bioq_init(&sc->sc_sync_delayed);
3004	LIST_INIT(&sc->sc_disks);
3005	TAILQ_INIT(&sc->sc_events);
3006	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
3007	callout_init(&sc->sc_callout, 1);
3008	mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF);
3009	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
3010	gp->softc = sc;
3011	sc->sc_geom = gp;
3012	sc->sc_provider = NULL;
3013	sc->sc_provider_open = 0;
3014	/*
3015	 * Synchronization geom.
3016	 */
3017	gp = g_new_geomf(mp, "%s.sync", md->md_name);
3018	gp->softc = sc;
3019	gp->orphan = g_mirror_orphan;
3020	sc->sc_sync.ds_geom = gp;
3021	sc->sc_sync.ds_ndisks = 0;
3022	error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
3023	    "g_mirror %s", md->md_name);
3024	if (error != 0) {
3025		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
3026		    sc->sc_name);
3027		g_destroy_geom(sc->sc_sync.ds_geom);
3028		g_destroy_geom(sc->sc_geom);
3029		g_mirror_free_device(sc);
3030		return (NULL);
3031	}
3032
3033	G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).",
3034	    sc->sc_name, sc->sc_ndisks, sc->sc_id);
3035
3036	sc->sc_rootmount = root_mount_hold("GMIRROR");
3037	G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3038	/*
3039	 * Run timeout.
3040	 */
3041	timeout = g_mirror_timeout * hz;
3042	callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
3043	return (sc->sc_geom);
3044}
3045
3046int
3047g_mirror_destroy(struct g_mirror_softc *sc, int how)
3048{
3049	struct g_mirror_disk *disk;
3050
3051	g_topology_assert_not();
3052	sx_assert(&sc->sc_lock, SX_XLOCKED);
3053
3054	if (sc->sc_provider_open != 0) {
3055		switch (how) {
3056		case G_MIRROR_DESTROY_SOFT:
3057			G_MIRROR_DEBUG(1,
3058			    "Device %s is still open (%d).", sc->sc_name,
3059			    sc->sc_provider_open);
3060			return (EBUSY);
3061		case G_MIRROR_DESTROY_DELAYED:
3062			G_MIRROR_DEBUG(1,
3063			    "Device %s will be destroyed on last close.",
3064			    sc->sc_name);
3065			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
3066				if (disk->d_state ==
3067				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3068					g_mirror_sync_stop(disk, 1);
3069				}
3070			}
3071			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING;
3072			return (EBUSY);
3073		case G_MIRROR_DESTROY_HARD:
3074			G_MIRROR_DEBUG(1, "Device %s is still open, so it "
3075			    "can't be definitely removed.", sc->sc_name);
3076		}
3077	}
3078
3079	g_topology_lock();
3080	if (sc->sc_geom->softc == NULL) {
3081		g_topology_unlock();
3082		return (0);
3083	}
3084	sc->sc_geom->softc = NULL;
3085	sc->sc_sync.ds_geom->softc = NULL;
3086	g_topology_unlock();
3087
3088	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
3089	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
3090	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3091	sx_xunlock(&sc->sc_lock);
3092	mtx_lock(&sc->sc_queue_mtx);
3093	wakeup(sc);
3094	mtx_unlock(&sc->sc_queue_mtx);
3095	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3096	while (sc->sc_worker != NULL)
3097		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
3098	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3099	sx_xlock(&sc->sc_lock);
3100	g_mirror_destroy_device(sc);
3101	return (0);
3102}
3103
3104static void
3105g_mirror_taste_orphan(struct g_consumer *cp)
3106{
3107
3108	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3109	    cp->provider->name));
3110}
3111
3112static struct g_geom *
3113g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3114{
3115	struct g_mirror_metadata md;
3116	struct g_mirror_softc *sc;
3117	struct g_consumer *cp;
3118	struct g_geom *gp;
3119	int error;
3120
3121	g_topology_assert();
3122	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3123	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
3124
3125	gp = g_new_geomf(mp, "mirror:taste");
3126	/*
3127	 * This orphan function should be never called.
3128	 */
3129	gp->orphan = g_mirror_taste_orphan;
3130	cp = g_new_consumer(gp);
3131	g_attach(cp, pp);
3132	error = g_mirror_read_metadata(cp, &md);
3133	g_detach(cp);
3134	g_destroy_consumer(cp);
3135	g_destroy_geom(gp);
3136	if (error != 0)
3137		return (NULL);
3138	gp = NULL;
3139
3140	if (md.md_provider[0] != '\0' &&
3141	    !g_compare_names(md.md_provider, pp->name))
3142		return (NULL);
3143	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3144		return (NULL);
3145	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
3146		G_MIRROR_DEBUG(0,
3147		    "Device %s: provider %s marked as inactive, skipping.",
3148		    md.md_name, pp->name);
3149		return (NULL);
3150	}
3151	if (g_mirror_debug >= 2)
3152		mirror_metadata_dump(&md);
3153
3154	/*
3155	 * Let's check if device already exists.
3156	 */
3157	sc = NULL;
3158	LIST_FOREACH(gp, &mp->geom, geom) {
3159		sc = gp->softc;
3160		if (sc == NULL)
3161			continue;
3162		if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
3163			continue;
3164		if (sc->sc_sync.ds_geom == gp)
3165			continue;
3166		if (strcmp(md.md_name, sc->sc_name) != 0)
3167			continue;
3168		if (md.md_mid != sc->sc_id) {
3169			G_MIRROR_DEBUG(0, "Device %s already configured.",
3170			    sc->sc_name);
3171			return (NULL);
3172		}
3173		break;
3174	}
3175	if (gp == NULL) {
3176		gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC);
3177		if (gp == NULL) {
3178			G_MIRROR_DEBUG(0, "Cannot create device %s.",
3179			    md.md_name);
3180			return (NULL);
3181		}
3182		sc = gp->softc;
3183	}
3184	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3185	g_topology_unlock();
3186	sx_xlock(&sc->sc_lock);
3187	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING;
3188	error = g_mirror_add_disk(sc, pp, &md);
3189	if (error != 0) {
3190		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3191		    pp->name, gp->name, error);
3192		if (LIST_EMPTY(&sc->sc_disks)) {
3193			g_cancel_event(sc);
3194			g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3195			g_topology_lock();
3196			return (NULL);
3197		}
3198		gp = NULL;
3199	}
3200	sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING;
3201	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
3202		g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3203		g_topology_lock();
3204		return (NULL);
3205	}
3206	sx_xunlock(&sc->sc_lock);
3207	g_topology_lock();
3208	return (gp);
3209}
3210
3211static void
3212g_mirror_resize(struct g_consumer *cp)
3213{
3214	struct g_mirror_disk *disk;
3215
3216	g_topology_assert();
3217	g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name);
3218
3219	disk = cp->private;
3220	if (disk == NULL)
3221		return;
3222	g_topology_unlock();
3223	g_mirror_update_metadata(disk);
3224	g_topology_lock();
3225}
3226
3227static int
3228g_mirror_destroy_geom(struct gctl_req *req __unused,
3229    struct g_class *mp __unused, struct g_geom *gp)
3230{
3231	struct g_mirror_softc *sc;
3232	int error;
3233
3234	g_topology_unlock();
3235	sc = gp->softc;
3236	sx_xlock(&sc->sc_lock);
3237	g_cancel_event(sc);
3238	error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT);
3239	if (error != 0)
3240		sx_xunlock(&sc->sc_lock);
3241	g_topology_lock();
3242	return (error);
3243}
3244
3245static void
3246g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3247    struct g_consumer *cp, struct g_provider *pp)
3248{
3249	struct g_mirror_softc *sc;
3250
3251	g_topology_assert();
3252
3253	sc = gp->softc;
3254	if (sc == NULL)
3255		return;
3256	/* Skip synchronization geom. */
3257	if (gp == sc->sc_sync.ds_geom)
3258		return;
3259	if (pp != NULL) {
3260		/* Nothing here. */
3261	} else if (cp != NULL) {
3262		struct g_mirror_disk *disk;
3263
3264		disk = cp->private;
3265		if (disk == NULL)
3266			return;
3267		g_topology_unlock();
3268		sx_xlock(&sc->sc_lock);
3269		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
3270		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3271			sbuf_printf(sb, "%s<Synchronized>", indent);
3272			if (disk->d_sync.ds_offset == 0)
3273				sbuf_printf(sb, "0%%");
3274			else {
3275				sbuf_printf(sb, "%u%%",
3276				    (u_int)((disk->d_sync.ds_offset * 100) /
3277				    sc->sc_provider->mediasize));
3278			}
3279			sbuf_printf(sb, "</Synchronized>\n");
3280			if (disk->d_sync.ds_offset > 0) {
3281				sbuf_printf(sb, "%s<BytesSynced>%jd"
3282				    "</BytesSynced>\n", indent,
3283				    (intmax_t)disk->d_sync.ds_offset);
3284			}
3285		}
3286		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3287		    disk->d_sync.ds_syncid);
3288		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
3289		    disk->d_genid);
3290		sbuf_printf(sb, "%s<Flags>", indent);
3291		if (disk->d_flags == 0)
3292			sbuf_printf(sb, "NONE");
3293		else {
3294			int first = 1;
3295
3296#define	ADD_FLAG(flag, name)	do {					\
3297	if ((disk->d_flags & (flag)) != 0) {				\
3298		if (!first)						\
3299			sbuf_printf(sb, ", ");				\
3300		else							\
3301			first = 0;					\
3302		sbuf_printf(sb, name);					\
3303	}								\
3304} while (0)
3305			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
3306			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
3307			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
3308			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
3309			    "SYNCHRONIZING");
3310			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3311			ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN");
3312#undef	ADD_FLAG
3313		}
3314		sbuf_printf(sb, "</Flags>\n");
3315		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
3316		    disk->d_priority);
3317		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3318		    g_mirror_disk_state2str(disk->d_state));
3319		sx_xunlock(&sc->sc_lock);
3320		g_topology_lock();
3321	} else {
3322		g_topology_unlock();
3323		sx_xlock(&sc->sc_lock);
3324		sbuf_printf(sb, "%s<Type>", indent);
3325		switch (sc->sc_type) {
3326		case G_MIRROR_TYPE_AUTOMATIC:
3327			sbuf_printf(sb, "AUTOMATIC");
3328			break;
3329		case G_MIRROR_TYPE_MANUAL:
3330			sbuf_printf(sb, "MANUAL");
3331			break;
3332		default:
3333			sbuf_printf(sb, "UNKNOWN");
3334			break;
3335		}
3336		sbuf_printf(sb, "</Type>\n");
3337		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3338		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3339		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3340		sbuf_printf(sb, "%s<Flags>", indent);
3341		if (sc->sc_flags == 0)
3342			sbuf_printf(sb, "NONE");
3343		else {
3344			int first = 1;
3345
3346#define	ADD_FLAG(flag, name)	do {					\
3347	if ((sc->sc_flags & (flag)) != 0) {				\
3348		if (!first)						\
3349			sbuf_printf(sb, ", ");				\
3350		else							\
3351			first = 0;					\
3352		sbuf_printf(sb, name);					\
3353	}								\
3354} while (0)
3355			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3356			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3357#undef	ADD_FLAG
3358		}
3359		sbuf_printf(sb, "</Flags>\n");
3360		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
3361		    (u_int)sc->sc_slice);
3362		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
3363		    balance_name(sc->sc_balance));
3364		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3365		    sc->sc_ndisks);
3366		sbuf_printf(sb, "%s<State>", indent);
3367		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
3368			sbuf_printf(sb, "%s", "STARTING");
3369		else if (sc->sc_ndisks ==
3370		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
3371			sbuf_printf(sb, "%s", "COMPLETE");
3372		else
3373			sbuf_printf(sb, "%s", "DEGRADED");
3374		sbuf_printf(sb, "</State>\n");
3375		sx_xunlock(&sc->sc_lock);
3376		g_topology_lock();
3377	}
3378}
3379
3380static void
3381g_mirror_shutdown_post_sync(void *arg, int howto)
3382{
3383	struct g_class *mp;
3384	struct g_geom *gp, *gp2;
3385	struct g_mirror_softc *sc;
3386	int error;
3387
3388	if (panicstr != NULL)
3389		return;
3390
3391	mp = arg;
3392	g_topology_lock();
3393	g_mirror_shutdown = 1;
3394	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3395		if ((sc = gp->softc) == NULL)
3396			continue;
3397		/* Skip synchronization geom. */
3398		if (gp == sc->sc_sync.ds_geom)
3399			continue;
3400		g_topology_unlock();
3401		sx_xlock(&sc->sc_lock);
3402		g_mirror_idle(sc, -1);
3403		g_cancel_event(sc);
3404		error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED);
3405		if (error != 0)
3406			sx_xunlock(&sc->sc_lock);
3407		g_topology_lock();
3408	}
3409	g_topology_unlock();
3410}
3411
3412static void
3413g_mirror_init(struct g_class *mp)
3414{
3415
3416	g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3417	    g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3418	if (g_mirror_post_sync == NULL)
3419		G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
3420}
3421
3422static void
3423g_mirror_fini(struct g_class *mp)
3424{
3425
3426	if (g_mirror_post_sync != NULL)
3427		EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync);
3428}
3429
3430DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
3431