g_mirror.c revision 142727
1/*-
2 * Copyright (c) 2004-2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 142727 2005-02-27 23:07:47Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/eventhandler.h>
41#include <vm/uma.h>
42#include <geom/geom.h>
43#include <sys/proc.h>
44#include <sys/kthread.h>
45#include <sys/sched.h>
46#include <geom/mirror/g_mirror.h>
47
48
49static MALLOC_DEFINE(M_MIRROR, "mirror data", "GEOM_MIRROR Data");
50
51SYSCTL_DECL(_kern_geom);
52SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff");
53u_int g_mirror_debug = 0;
54TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug);
55SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0,
56    "Debug level");
57static u_int g_mirror_timeout = 4;
58TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout);
59SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout,
60    0, "Time to wait on all mirror components");
61static u_int g_mirror_idletime = 5;
62TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime);
63SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW,
64    &g_mirror_idletime, 0, "Mark components as clean when idling");
65static u_int g_mirror_reqs_per_sync = 5;
66SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, reqs_per_sync, CTLFLAG_RW,
67    &g_mirror_reqs_per_sync, 0,
68    "Number of regular I/O requests per synchronization request");
69static u_int g_mirror_syncs_per_sec = 1000;
70SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, syncs_per_sec, CTLFLAG_RW,
71    &g_mirror_syncs_per_sec, 0,
72    "Number of synchronizations requests per second");
73
74#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
75	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
76	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
77	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
78} while (0)
79
80static eventhandler_tag g_mirror_ehtag = NULL;
81
82static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
83    struct g_geom *gp);
84static g_taste_t g_mirror_taste;
85static void g_mirror_init(struct g_class *mp);
86static void g_mirror_fini(struct g_class *mp);
87
88struct g_class g_mirror_class = {
89	.name = G_MIRROR_CLASS_NAME,
90	.version = G_VERSION,
91	.ctlreq = g_mirror_config,
92	.taste = g_mirror_taste,
93	.destroy_geom = g_mirror_destroy_geom,
94	.init = g_mirror_init,
95	.fini = g_mirror_fini
96};
97
98
99static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
100static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
101static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force);
102static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
103    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
104static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
105
106
107static const char *
108g_mirror_disk_state2str(int state)
109{
110
111	switch (state) {
112	case G_MIRROR_DISK_STATE_NONE:
113		return ("NONE");
114	case G_MIRROR_DISK_STATE_NEW:
115		return ("NEW");
116	case G_MIRROR_DISK_STATE_ACTIVE:
117		return ("ACTIVE");
118	case G_MIRROR_DISK_STATE_STALE:
119		return ("STALE");
120	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
121		return ("SYNCHRONIZING");
122	case G_MIRROR_DISK_STATE_DISCONNECTED:
123		return ("DISCONNECTED");
124	case G_MIRROR_DISK_STATE_DESTROY:
125		return ("DESTROY");
126	default:
127		return ("INVALID");
128	}
129}
130
131static const char *
132g_mirror_device_state2str(int state)
133{
134
135	switch (state) {
136	case G_MIRROR_DEVICE_STATE_STARTING:
137		return ("STARTING");
138	case G_MIRROR_DEVICE_STATE_RUNNING:
139		return ("RUNNING");
140	default:
141		return ("INVALID");
142	}
143}
144
145static const char *
146g_mirror_get_diskname(struct g_mirror_disk *disk)
147{
148
149	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
150		return ("[unknown]");
151	return (disk->d_name);
152}
153
154/*
155 * --- Events handling functions ---
156 * Events in geom_mirror are used to maintain disks and device status
157 * from one thread to simplify locking.
158 */
159static void
160g_mirror_event_free(struct g_mirror_event *ep)
161{
162
163	free(ep, M_MIRROR);
164}
165
166int
167g_mirror_event_send(void *arg, int state, int flags)
168{
169	struct g_mirror_softc *sc;
170	struct g_mirror_disk *disk;
171	struct g_mirror_event *ep;
172	int error;
173
174	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
175	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
176	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
177		disk = NULL;
178		sc = arg;
179	} else {
180		disk = arg;
181		sc = disk->d_softc;
182	}
183	ep->e_disk = disk;
184	ep->e_state = state;
185	ep->e_flags = flags;
186	ep->e_error = 0;
187	mtx_lock(&sc->sc_events_mtx);
188	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
189	mtx_unlock(&sc->sc_events_mtx);
190	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
191	mtx_lock(&sc->sc_queue_mtx);
192	wakeup(sc);
193	mtx_unlock(&sc->sc_queue_mtx);
194	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
195		return (0);
196	g_topology_assert();
197	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
198	g_topology_unlock();
199	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
200		mtx_lock(&sc->sc_events_mtx);
201		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
202		    hz * 5);
203	}
204	/* Don't even try to use 'sc' here, because it could be already dead. */
205	g_topology_lock();
206	error = ep->e_error;
207	g_mirror_event_free(ep);
208	return (error);
209}
210
211static struct g_mirror_event *
212g_mirror_event_get(struct g_mirror_softc *sc)
213{
214	struct g_mirror_event *ep;
215
216	mtx_lock(&sc->sc_events_mtx);
217	ep = TAILQ_FIRST(&sc->sc_events);
218	mtx_unlock(&sc->sc_events_mtx);
219	return (ep);
220}
221
222static void
223g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
224{
225
226	mtx_lock(&sc->sc_events_mtx);
227	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
228	mtx_unlock(&sc->sc_events_mtx);
229}
230
231static void
232g_mirror_event_cancel(struct g_mirror_disk *disk)
233{
234	struct g_mirror_softc *sc;
235	struct g_mirror_event *ep, *tmpep;
236
237	g_topology_assert();
238
239	sc = disk->d_softc;
240	mtx_lock(&sc->sc_events_mtx);
241	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
242		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
243			continue;
244		if (ep->e_disk != disk)
245			continue;
246		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
247		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
248			g_mirror_event_free(ep);
249		else {
250			ep->e_error = ECANCELED;
251			wakeup(ep);
252		}
253	}
254	mtx_unlock(&sc->sc_events_mtx);
255}
256
257/*
258 * Return the number of disks in given state.
259 * If state is equal to -1, count all connected disks.
260 */
261u_int
262g_mirror_ndisks(struct g_mirror_softc *sc, int state)
263{
264	struct g_mirror_disk *disk;
265	u_int n = 0;
266
267	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
268		if (state == -1 || disk->d_state == state)
269			n++;
270	}
271	return (n);
272}
273
274/*
275 * Find a disk in mirror by its disk ID.
276 */
277static struct g_mirror_disk *
278g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
279{
280	struct g_mirror_disk *disk;
281
282	g_topology_assert();
283
284	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
285		if (disk->d_id == id)
286			return (disk);
287	}
288	return (NULL);
289}
290
291static u_int
292g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
293{
294	struct bio *bp;
295	u_int nreqs = 0;
296
297	mtx_lock(&sc->sc_queue_mtx);
298	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
299		if (bp->bio_from == cp)
300			nreqs++;
301	}
302	mtx_unlock(&sc->sc_queue_mtx);
303	return (nreqs);
304}
305
306static int
307g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
308{
309
310	if (cp->index > 0) {
311		G_MIRROR_DEBUG(2,
312		    "I/O requests for %s exist, can't destroy it now.",
313		    cp->provider->name);
314		return (1);
315	}
316	if (g_mirror_nrequests(sc, cp) > 0) {
317		G_MIRROR_DEBUG(2,
318		    "I/O requests for %s in queue, can't destroy it now.",
319		    cp->provider->name);
320		return (1);
321	}
322	return (0);
323}
324
325static void
326g_mirror_destroy_consumer(void *arg, int flags __unused)
327{
328	struct g_consumer *cp;
329
330	cp = arg;
331	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
332	g_detach(cp);
333	g_destroy_consumer(cp);
334}
335
336static void
337g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
338{
339	struct g_provider *pp;
340	int retaste_wait;
341
342	g_topology_assert();
343
344	cp->private = NULL;
345	if (g_mirror_is_busy(sc, cp))
346		return;
347	pp = cp->provider;
348	retaste_wait = 0;
349	if (cp->acw == 1) {
350		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
351			retaste_wait = 1;
352	}
353	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
354	    -cp->acw, -cp->ace, 0);
355	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
356		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
357	if (retaste_wait) {
358		/*
359		 * After retaste event was send (inside g_access()), we can send
360		 * event to detach and destroy consumer.
361		 * A class, which has consumer to the given provider connected
362		 * will not receive retaste event for the provider.
363		 * This is the way how I ignore retaste events when I close
364		 * consumers opened for write: I detach and destroy consumer
365		 * after retaste event is sent.
366		 */
367		g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
368		return;
369	}
370	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
371	g_detach(cp);
372	g_destroy_consumer(cp);
373}
374
375static int
376g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
377{
378	int error;
379
380	g_topology_assert();
381	KASSERT(disk->d_consumer == NULL,
382	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
383
384	disk->d_consumer = g_new_consumer(disk->d_softc->sc_geom);
385	disk->d_consumer->private = disk;
386	disk->d_consumer->index = 0;
387	error = g_attach(disk->d_consumer, pp);
388	if (error != 0)
389		return (error);
390	error = g_access(disk->d_consumer, 1, 1, 1);
391	if (error != 0) {
392		G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
393		    pp->name, error);
394		return (error);
395	}
396
397	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
398	return (0);
399}
400
401static void
402g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
403{
404
405	g_topology_assert();
406
407	if (cp == NULL)
408		return;
409	if (cp->provider != NULL)
410		g_mirror_kill_consumer(sc, cp);
411	else
412		g_destroy_consumer(cp);
413}
414
415/*
416 * Initialize disk. This means allocate memory, create consumer, attach it
417 * to the provider and open access (r1w1e1) to it.
418 */
419static struct g_mirror_disk *
420g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
421    struct g_mirror_metadata *md, int *errorp)
422{
423	struct g_mirror_disk *disk;
424	int error;
425
426	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
427	if (disk == NULL) {
428		error = ENOMEM;
429		goto fail;
430	}
431	disk->d_softc = sc;
432	error = g_mirror_connect_disk(disk, pp);
433	if (error != 0)
434		goto fail;
435	disk->d_id = md->md_did;
436	disk->d_state = G_MIRROR_DISK_STATE_NONE;
437	disk->d_priority = md->md_priority;
438	disk->d_delay.sec = 0;
439	disk->d_delay.frac = 0;
440	binuptime(&disk->d_last_used);
441	disk->d_flags = md->md_dflags;
442	if (md->md_provider[0] != '\0')
443		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
444	disk->d_sync.ds_consumer = NULL;
445	disk->d_sync.ds_offset = md->md_sync_offset;
446	disk->d_sync.ds_offset_done = md->md_sync_offset;
447	disk->d_sync.ds_resync = -1;
448	disk->d_genid = md->md_genid;
449	disk->d_sync.ds_syncid = md->md_syncid;
450	if (errorp != NULL)
451		*errorp = 0;
452	return (disk);
453fail:
454	if (errorp != NULL)
455		*errorp = error;
456	if (disk != NULL) {
457		g_mirror_disconnect_consumer(sc, disk->d_consumer);
458		free(disk, M_MIRROR);
459	}
460	return (NULL);
461}
462
463static void
464g_mirror_destroy_disk(struct g_mirror_disk *disk)
465{
466	struct g_mirror_softc *sc;
467
468	g_topology_assert();
469
470	LIST_REMOVE(disk, d_next);
471	g_mirror_event_cancel(disk);
472	sc = disk->d_softc;
473	if (sc->sc_hint == disk)
474		sc->sc_hint = NULL;
475	switch (disk->d_state) {
476	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
477		g_mirror_sync_stop(disk, 1);
478		/* FALLTHROUGH */
479	case G_MIRROR_DISK_STATE_NEW:
480	case G_MIRROR_DISK_STATE_STALE:
481	case G_MIRROR_DISK_STATE_ACTIVE:
482		g_mirror_disconnect_consumer(sc, disk->d_consumer);
483		free(disk, M_MIRROR);
484		break;
485	default:
486		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
487		    g_mirror_get_diskname(disk),
488		    g_mirror_disk_state2str(disk->d_state)));
489	}
490}
491
492static void
493g_mirror_destroy_device(struct g_mirror_softc *sc)
494{
495	struct g_mirror_disk *disk;
496	struct g_mirror_event *ep;
497	struct g_geom *gp;
498	struct g_consumer *cp, *tmpcp;
499
500	g_topology_assert();
501
502	gp = sc->sc_geom;
503	if (sc->sc_provider != NULL)
504		g_mirror_destroy_provider(sc);
505	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
506	    disk = LIST_FIRST(&sc->sc_disks)) {
507		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
508		g_mirror_update_metadata(disk);
509		g_mirror_destroy_disk(disk);
510	}
511	while ((ep = g_mirror_event_get(sc)) != NULL) {
512		g_mirror_event_remove(sc, ep);
513		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
514			g_mirror_event_free(ep);
515		else {
516			ep->e_error = ECANCELED;
517			ep->e_flags |= G_MIRROR_EVENT_DONE;
518			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
519			mtx_lock(&sc->sc_events_mtx);
520			wakeup(ep);
521			mtx_unlock(&sc->sc_events_mtx);
522		}
523	}
524	callout_drain(&sc->sc_callout);
525	gp->softc = NULL;
526
527	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
528		g_mirror_disconnect_consumer(sc, cp);
529	}
530	sc->sc_sync.ds_geom->softc = NULL;
531	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
532	mtx_destroy(&sc->sc_queue_mtx);
533	mtx_destroy(&sc->sc_events_mtx);
534	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
535	g_wither_geom(gp, ENXIO);
536}
537
538static void
539g_mirror_orphan(struct g_consumer *cp)
540{
541	struct g_mirror_disk *disk;
542
543	g_topology_assert();
544
545	disk = cp->private;
546	if (disk == NULL)
547		return;
548	disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
549	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
550	    G_MIRROR_EVENT_DONTWAIT);
551}
552
553/*
554 * Function should return the next active disk on the list.
555 * It is possible that it will be the same disk as given.
556 * If there are no active disks on list, NULL is returned.
557 */
558static __inline struct g_mirror_disk *
559g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
560{
561	struct g_mirror_disk *dp;
562
563	for (dp = LIST_NEXT(disk, d_next); dp != disk;
564	    dp = LIST_NEXT(dp, d_next)) {
565		if (dp == NULL)
566			dp = LIST_FIRST(&sc->sc_disks);
567		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
568			break;
569	}
570	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
571		return (NULL);
572	return (dp);
573}
574
575static struct g_mirror_disk *
576g_mirror_get_disk(struct g_mirror_softc *sc)
577{
578	struct g_mirror_disk *disk;
579
580	if (sc->sc_hint == NULL) {
581		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
582		if (sc->sc_hint == NULL)
583			return (NULL);
584	}
585	disk = sc->sc_hint;
586	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
587		disk = g_mirror_find_next(sc, disk);
588		if (disk == NULL)
589			return (NULL);
590	}
591	sc->sc_hint = g_mirror_find_next(sc, disk);
592	return (disk);
593}
594
595static int
596g_mirror_write_metadata(struct g_mirror_disk *disk,
597    struct g_mirror_metadata *md)
598{
599	struct g_mirror_softc *sc;
600	struct g_consumer *cp;
601	off_t offset, length;
602	u_char *sector;
603	int error = 0;
604
605	g_topology_assert();
606
607	sc = disk->d_softc;
608	cp = disk->d_consumer;
609	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
610	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
611	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
612	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
613	    cp->acw, cp->ace));
614	length = cp->provider->sectorsize;
615	offset = cp->provider->mediasize - length;
616	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
617	if (md != NULL)
618		mirror_metadata_encode(md, sector);
619	g_topology_unlock();
620	error = g_write_data(cp, offset, sector, length);
621	g_topology_lock();
622	free(sector, M_MIRROR);
623	if (error != 0) {
624		disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_GENID;
625		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
626		    G_MIRROR_EVENT_DONTWAIT);
627	}
628	return (error);
629}
630
631static int
632g_mirror_clear_metadata(struct g_mirror_disk *disk)
633{
634	int error;
635
636	g_topology_assert();
637	error = g_mirror_write_metadata(disk, NULL);
638	if (error == 0) {
639		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
640		    g_mirror_get_diskname(disk));
641	} else {
642		G_MIRROR_DEBUG(0,
643		    "Cannot clear metadata on disk %s (error=%d).",
644		    g_mirror_get_diskname(disk), error);
645	}
646	return (error);
647}
648
649void
650g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
651    struct g_mirror_metadata *md)
652{
653
654	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
655	md->md_version = G_MIRROR_VERSION;
656	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
657	md->md_mid = sc->sc_id;
658	md->md_all = sc->sc_ndisks;
659	md->md_slice = sc->sc_slice;
660	md->md_balance = sc->sc_balance;
661	md->md_genid = sc->sc_genid;
662	md->md_mediasize = sc->sc_mediasize;
663	md->md_sectorsize = sc->sc_sectorsize;
664	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
665	bzero(md->md_provider, sizeof(md->md_provider));
666	if (disk == NULL) {
667		md->md_did = arc4random();
668		md->md_priority = 0;
669		md->md_syncid = 0;
670		md->md_dflags = 0;
671		md->md_sync_offset = 0;
672		md->md_provsize = 0;
673	} else {
674		md->md_did = disk->d_id;
675		md->md_priority = disk->d_priority;
676		md->md_syncid = disk->d_sync.ds_syncid;
677		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
678		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
679			md->md_sync_offset = disk->d_sync.ds_offset_done;
680		else
681			md->md_sync_offset = 0;
682		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
683			strlcpy(md->md_provider,
684			    disk->d_consumer->provider->name,
685			    sizeof(md->md_provider));
686		}
687		md->md_provsize = disk->d_consumer->provider->mediasize;
688	}
689}
690
691void
692g_mirror_update_metadata(struct g_mirror_disk *disk)
693{
694	struct g_mirror_metadata md;
695	int error;
696
697	g_topology_assert();
698	g_mirror_fill_metadata(disk->d_softc, disk, &md);
699	error = g_mirror_write_metadata(disk, &md);
700	if (error == 0) {
701		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
702		    g_mirror_get_diskname(disk));
703	} else {
704		G_MIRROR_DEBUG(0,
705		    "Cannot update metadata on disk %s (error=%d).",
706		    g_mirror_get_diskname(disk), error);
707	}
708}
709
710static void
711g_mirror_bump_syncid(struct g_mirror_softc *sc)
712{
713	struct g_mirror_disk *disk;
714
715	g_topology_assert();
716	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
717	    ("%s called with no active disks (device=%s).", __func__,
718	    sc->sc_name));
719
720	sc->sc_syncid++;
721	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
722	    sc->sc_syncid);
723	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
724		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
725		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
726			disk->d_sync.ds_syncid = sc->sc_syncid;
727			g_mirror_update_metadata(disk);
728		}
729	}
730}
731
732static void
733g_mirror_bump_genid(struct g_mirror_softc *sc)
734{
735	struct g_mirror_disk *disk;
736
737	g_topology_assert();
738	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
739	    ("%s called with no active disks (device=%s).", __func__,
740	    sc->sc_name));
741
742	sc->sc_genid++;
743	G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
744	    sc->sc_genid);
745	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
746		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
747		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
748			disk->d_genid = sc->sc_genid;
749			g_mirror_update_metadata(disk);
750		}
751	}
752}
753
754static void
755g_mirror_idle(struct g_mirror_softc *sc)
756{
757	struct g_mirror_disk *disk;
758
759	if (sc->sc_provider == NULL || sc->sc_provider->acw == 0)
760		return;
761	sc->sc_idle = 1;
762	g_topology_lock();
763	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
764		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
765			continue;
766		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
767		    g_mirror_get_diskname(disk), sc->sc_name);
768		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
769		g_mirror_update_metadata(disk);
770	}
771	g_topology_unlock();
772}
773
774static void
775g_mirror_unidle(struct g_mirror_softc *sc)
776{
777	struct g_mirror_disk *disk;
778
779	sc->sc_idle = 0;
780	g_topology_lock();
781	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
782		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
783			continue;
784		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
785		    g_mirror_get_diskname(disk), sc->sc_name);
786		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
787		g_mirror_update_metadata(disk);
788	}
789	g_topology_unlock();
790}
791
792/*
793 * Return 1 if we should check if mirror is idling.
794 */
795static int
796g_mirror_check_idle(struct g_mirror_softc *sc)
797{
798	struct g_mirror_disk *disk;
799
800	if (sc->sc_idle)
801		return (0);
802	if (sc->sc_provider != NULL && sc->sc_provider->acw == 0)
803		return (0);
804	/*
805	 * Check if there are no in-flight requests.
806	 */
807	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
808		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
809			continue;
810		if (disk->d_consumer->index > 0)
811			return (0);
812	}
813	return (1);
814}
815
816static __inline int
817bintime_cmp(struct bintime *bt1, struct bintime *bt2)
818{
819
820	if (bt1->sec < bt2->sec)
821		return (-1);
822	else if (bt1->sec > bt2->sec)
823		return (1);
824	if (bt1->frac < bt2->frac)
825		return (-1);
826	else if (bt1->frac > bt2->frac)
827		return (1);
828	return (0);
829}
830
831static void
832g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp)
833{
834
835	if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD)
836		return;
837	binuptime(&disk->d_delay);
838	bintime_sub(&disk->d_delay, &bp->bio_t0);
839}
840
841static void
842g_mirror_done(struct bio *bp)
843{
844	struct g_mirror_softc *sc;
845
846	sc = bp->bio_from->geom->softc;
847	bp->bio_cflags |= G_MIRROR_BIO_FLAG_REGULAR;
848	mtx_lock(&sc->sc_queue_mtx);
849	bioq_disksort(&sc->sc_queue, bp);
850	wakeup(sc);
851	mtx_unlock(&sc->sc_queue_mtx);
852}
853
854static void
855g_mirror_regular_request(struct bio *bp)
856{
857	struct g_mirror_softc *sc;
858	struct g_mirror_disk *disk;
859	struct bio *pbp;
860
861	g_topology_assert_not();
862
863	bp->bio_from->index--;
864	pbp = bp->bio_parent;
865	sc = pbp->bio_to->geom->softc;
866	disk = bp->bio_from->private;
867	if (disk == NULL) {
868		g_topology_lock();
869		g_mirror_kill_consumer(sc, bp->bio_from);
870		g_topology_unlock();
871	} else {
872		g_mirror_update_delay(disk, bp);
873	}
874
875	pbp->bio_inbed++;
876	KASSERT(pbp->bio_inbed <= pbp->bio_children,
877	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
878	    pbp->bio_children));
879	if (bp->bio_error == 0 && pbp->bio_error == 0) {
880		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
881		g_destroy_bio(bp);
882		if (pbp->bio_children == pbp->bio_inbed) {
883			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
884			pbp->bio_completed = pbp->bio_length;
885			g_io_deliver(pbp, pbp->bio_error);
886		}
887		return;
888	} else if (bp->bio_error != 0) {
889		if (pbp->bio_error == 0)
890			pbp->bio_error = bp->bio_error;
891		G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
892		    bp->bio_error);
893		if (disk != NULL) {
894			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
895			g_mirror_event_send(disk,
896			    G_MIRROR_DISK_STATE_DISCONNECTED,
897			    G_MIRROR_EVENT_DONTWAIT);
898		}
899		switch (pbp->bio_cmd) {
900		case BIO_DELETE:
901		case BIO_WRITE:
902			pbp->bio_inbed--;
903			pbp->bio_children--;
904			break;
905		}
906	}
907	g_destroy_bio(bp);
908
909	switch (pbp->bio_cmd) {
910	case BIO_READ:
911		if (pbp->bio_children == pbp->bio_inbed) {
912			pbp->bio_error = 0;
913			mtx_lock(&sc->sc_queue_mtx);
914			bioq_disksort(&sc->sc_queue, pbp);
915			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
916			wakeup(sc);
917			mtx_unlock(&sc->sc_queue_mtx);
918		}
919		break;
920	case BIO_DELETE:
921	case BIO_WRITE:
922		if (pbp->bio_children == 0) {
923			/*
924			 * All requests failed.
925			 */
926		} else if (pbp->bio_inbed < pbp->bio_children) {
927			/* Do nothing. */
928			break;
929		} else if (pbp->bio_children == pbp->bio_inbed) {
930			/* Some requests succeeded. */
931			pbp->bio_error = 0;
932			pbp->bio_completed = pbp->bio_length;
933		}
934		g_io_deliver(pbp, pbp->bio_error);
935		break;
936	default:
937		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
938		break;
939	}
940}
941
942static void
943g_mirror_sync_done(struct bio *bp)
944{
945	struct g_mirror_softc *sc;
946
947	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
948	sc = bp->bio_from->geom->softc;
949	bp->bio_cflags |= G_MIRROR_BIO_FLAG_SYNC;
950	mtx_lock(&sc->sc_queue_mtx);
951	bioq_disksort(&sc->sc_queue, bp);
952	wakeup(sc);
953	mtx_unlock(&sc->sc_queue_mtx);
954}
955
956static void
957g_mirror_start(struct bio *bp)
958{
959	struct g_mirror_softc *sc;
960
961	sc = bp->bio_to->geom->softc;
962	/*
963	 * If sc == NULL or there are no valid disks, provider's error
964	 * should be set and g_mirror_start() should not be called at all.
965	 */
966	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
967	    ("Provider's error should be set (error=%d)(mirror=%s).",
968	    bp->bio_to->error, bp->bio_to->name));
969	G_MIRROR_LOGREQ(3, bp, "Request received.");
970
971	switch (bp->bio_cmd) {
972	case BIO_READ:
973	case BIO_WRITE:
974	case BIO_DELETE:
975		break;
976	case BIO_GETATTR:
977	default:
978		g_io_deliver(bp, EOPNOTSUPP);
979		return;
980	}
981	mtx_lock(&sc->sc_queue_mtx);
982	bioq_disksort(&sc->sc_queue, bp);
983	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
984	wakeup(sc);
985	mtx_unlock(&sc->sc_queue_mtx);
986}
987
988/*
989 * Send one synchronization request.
990 */
991static void
992g_mirror_sync_one(struct g_mirror_disk *disk)
993{
994	struct g_mirror_softc *sc;
995	struct bio *bp;
996
997	sc = disk->d_softc;
998	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
999	    ("Disk %s is not marked for synchronization.",
1000	    g_mirror_get_diskname(disk)));
1001
1002	bp = g_new_bio();
1003	if (bp == NULL)
1004		return;
1005	bp->bio_parent = NULL;
1006	bp->bio_cmd = BIO_READ;
1007	bp->bio_offset = disk->d_sync.ds_offset;
1008	bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1009	bp->bio_cflags = 0;
1010	bp->bio_done = g_mirror_sync_done;
1011	bp->bio_data = disk->d_sync.ds_data;
1012	if (bp->bio_data == NULL) {
1013		g_destroy_bio(bp);
1014		return;
1015	}
1016	disk->d_sync.ds_offset += bp->bio_length;
1017	bp->bio_to = sc->sc_provider;
1018	G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1019	disk->d_sync.ds_consumer->index++;
1020	g_io_request(bp, disk->d_sync.ds_consumer);
1021}
1022
1023static void
1024g_mirror_sync_request(struct bio *bp)
1025{
1026	struct g_mirror_softc *sc;
1027	struct g_mirror_disk *disk;
1028
1029	bp->bio_from->index--;
1030	sc = bp->bio_from->geom->softc;
1031	disk = bp->bio_from->private;
1032	if (disk == NULL) {
1033		g_topology_lock();
1034		g_mirror_kill_consumer(sc, bp->bio_from);
1035		g_topology_unlock();
1036		g_destroy_bio(bp);
1037		return;
1038	}
1039
1040	/*
1041	 * Synchronization request.
1042	 */
1043	switch (bp->bio_cmd) {
1044	case BIO_READ:
1045	    {
1046		struct g_consumer *cp;
1047
1048		if (bp->bio_error != 0) {
1049			G_MIRROR_LOGREQ(0, bp,
1050			    "Synchronization request failed (error=%d).",
1051			    bp->bio_error);
1052			g_destroy_bio(bp);
1053			return;
1054		}
1055		G_MIRROR_LOGREQ(3, bp,
1056		    "Synchronization request half-finished.");
1057		bp->bio_cmd = BIO_WRITE;
1058		bp->bio_cflags = 0;
1059		cp = disk->d_consumer;
1060		KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1061		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1062		    cp->acr, cp->acw, cp->ace));
1063		cp->index++;
1064		g_io_request(bp, cp);
1065		return;
1066	    }
1067	case BIO_WRITE:
1068	    {
1069		struct g_mirror_disk_sync *sync;
1070
1071		if (bp->bio_error != 0) {
1072			G_MIRROR_LOGREQ(0, bp,
1073			    "Synchronization request failed (error=%d).",
1074			    bp->bio_error);
1075			g_destroy_bio(bp);
1076			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1077			g_mirror_event_send(disk,
1078			    G_MIRROR_DISK_STATE_DISCONNECTED,
1079			    G_MIRROR_EVENT_DONTWAIT);
1080			return;
1081		}
1082		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1083		sync = &disk->d_sync;
1084		sync->ds_offset_done = bp->bio_offset + bp->bio_length;
1085		g_destroy_bio(bp);
1086		if (sync->ds_resync != -1)
1087			break;
1088		if (sync->ds_offset_done == sc->sc_provider->mediasize) {
1089			/*
1090			 * Disk up-to-date, activate it.
1091			 */
1092			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1093			    G_MIRROR_EVENT_DONTWAIT);
1094			return;
1095		} else if (sync->ds_offset_done % (MAXPHYS * 100) == 0) {
1096			/*
1097			 * Update offset_done on every 100 blocks.
1098			 * XXX: This should be configurable.
1099			 */
1100			g_topology_lock();
1101			g_mirror_update_metadata(disk);
1102			g_topology_unlock();
1103		}
1104		return;
1105	    }
1106	default:
1107		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1108		    bp->bio_cmd, sc->sc_name));
1109		break;
1110	}
1111}
1112
1113static void
1114g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1115{
1116	struct g_mirror_disk *disk;
1117	struct g_consumer *cp;
1118	struct bio *cbp;
1119
1120	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1121		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1122			break;
1123	}
1124	if (disk == NULL) {
1125		if (bp->bio_error == 0)
1126			bp->bio_error = ENXIO;
1127		g_io_deliver(bp, bp->bio_error);
1128		return;
1129	}
1130	cbp = g_clone_bio(bp);
1131	if (cbp == NULL) {
1132		if (bp->bio_error == 0)
1133			bp->bio_error = ENOMEM;
1134		g_io_deliver(bp, bp->bio_error);
1135		return;
1136	}
1137	/*
1138	 * Fill in the component buf structure.
1139	 */
1140	cp = disk->d_consumer;
1141	cbp->bio_done = g_mirror_done;
1142	cbp->bio_to = cp->provider;
1143	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1144	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1145	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1146	    cp->acw, cp->ace));
1147	cp->index++;
1148	g_io_request(cbp, cp);
1149}
1150
1151static void
1152g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1153{
1154	struct g_mirror_disk *disk;
1155	struct g_consumer *cp;
1156	struct bio *cbp;
1157
1158	disk = g_mirror_get_disk(sc);
1159	if (disk == NULL) {
1160		if (bp->bio_error == 0)
1161			bp->bio_error = ENXIO;
1162		g_io_deliver(bp, bp->bio_error);
1163		return;
1164	}
1165	cbp = g_clone_bio(bp);
1166	if (cbp == NULL) {
1167		if (bp->bio_error == 0)
1168			bp->bio_error = ENOMEM;
1169		g_io_deliver(bp, bp->bio_error);
1170		return;
1171	}
1172	/*
1173	 * Fill in the component buf structure.
1174	 */
1175	cp = disk->d_consumer;
1176	cbp->bio_done = g_mirror_done;
1177	cbp->bio_to = cp->provider;
1178	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1179	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1180	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1181	    cp->acw, cp->ace));
1182	cp->index++;
1183	g_io_request(cbp, cp);
1184}
1185
1186static void
1187g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1188{
1189	struct g_mirror_disk *disk, *dp;
1190	struct g_consumer *cp;
1191	struct bio *cbp;
1192	struct bintime curtime;
1193
1194	binuptime(&curtime);
1195	/*
1196	 * Find a disk which the smallest load.
1197	 */
1198	disk = NULL;
1199	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1200		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1201			continue;
1202		/* If disk wasn't used for more than 2 sec, use it. */
1203		if (curtime.sec - dp->d_last_used.sec >= 2) {
1204			disk = dp;
1205			break;
1206		}
1207		if (disk == NULL ||
1208		    bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) {
1209			disk = dp;
1210		}
1211	}
1212	cbp = g_clone_bio(bp);
1213	if (cbp == NULL) {
1214		if (bp->bio_error == 0)
1215			bp->bio_error = ENOMEM;
1216		g_io_deliver(bp, bp->bio_error);
1217		return;
1218	}
1219	/*
1220	 * Fill in the component buf structure.
1221	 */
1222	cp = disk->d_consumer;
1223	cbp->bio_done = g_mirror_done;
1224	cbp->bio_to = cp->provider;
1225	binuptime(&disk->d_last_used);
1226	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1227	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1228	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1229	    cp->acw, cp->ace));
1230	cp->index++;
1231	g_io_request(cbp, cp);
1232}
1233
1234static void
1235g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1236{
1237	struct bio_queue_head queue;
1238	struct g_mirror_disk *disk;
1239	struct g_consumer *cp;
1240	struct bio *cbp;
1241	off_t left, mod, offset, slice;
1242	u_char *data;
1243	u_int ndisks;
1244
1245	if (bp->bio_length <= sc->sc_slice) {
1246		g_mirror_request_round_robin(sc, bp);
1247		return;
1248	}
1249	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1250	slice = bp->bio_length / ndisks;
1251	mod = slice % sc->sc_provider->sectorsize;
1252	if (mod != 0)
1253		slice += sc->sc_provider->sectorsize - mod;
1254	/*
1255	 * Allocate all bios before sending any request, so we can
1256	 * return ENOMEM in nice and clean way.
1257	 */
1258	left = bp->bio_length;
1259	offset = bp->bio_offset;
1260	data = bp->bio_data;
1261	bioq_init(&queue);
1262	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1263		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1264			continue;
1265		cbp = g_clone_bio(bp);
1266		if (cbp == NULL) {
1267			for (cbp = bioq_first(&queue); cbp != NULL;
1268			    cbp = bioq_first(&queue)) {
1269				bioq_remove(&queue, cbp);
1270				g_destroy_bio(cbp);
1271			}
1272			if (bp->bio_error == 0)
1273				bp->bio_error = ENOMEM;
1274			g_io_deliver(bp, bp->bio_error);
1275			return;
1276		}
1277		bioq_insert_tail(&queue, cbp);
1278		cbp->bio_done = g_mirror_done;
1279		cbp->bio_caller1 = disk;
1280		cbp->bio_to = disk->d_consumer->provider;
1281		cbp->bio_offset = offset;
1282		cbp->bio_data = data;
1283		cbp->bio_length = MIN(left, slice);
1284		left -= cbp->bio_length;
1285		if (left == 0)
1286			break;
1287		offset += cbp->bio_length;
1288		data += cbp->bio_length;
1289	}
1290	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1291		bioq_remove(&queue, cbp);
1292		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1293		disk = cbp->bio_caller1;
1294		cbp->bio_caller1 = NULL;
1295		cp = disk->d_consumer;
1296		KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1297		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1298		    cp->acr, cp->acw, cp->ace));
1299		disk->d_consumer->index++;
1300		g_io_request(cbp, disk->d_consumer);
1301	}
1302}
1303
1304static void
1305g_mirror_register_request(struct bio *bp)
1306{
1307	struct g_mirror_softc *sc;
1308
1309	sc = bp->bio_to->geom->softc;
1310	switch (bp->bio_cmd) {
1311	case BIO_READ:
1312		switch (sc->sc_balance) {
1313		case G_MIRROR_BALANCE_LOAD:
1314			g_mirror_request_load(sc, bp);
1315			break;
1316		case G_MIRROR_BALANCE_PREFER:
1317			g_mirror_request_prefer(sc, bp);
1318			break;
1319		case G_MIRROR_BALANCE_ROUND_ROBIN:
1320			g_mirror_request_round_robin(sc, bp);
1321			break;
1322		case G_MIRROR_BALANCE_SPLIT:
1323			g_mirror_request_split(sc, bp);
1324			break;
1325		}
1326		return;
1327	case BIO_WRITE:
1328	case BIO_DELETE:
1329	    {
1330		struct g_mirror_disk *disk;
1331		struct g_mirror_disk_sync *sync;
1332		struct bio_queue_head queue;
1333		struct g_consumer *cp;
1334		struct bio *cbp;
1335
1336		if (sc->sc_idle)
1337			g_mirror_unidle(sc);
1338		/*
1339		 * Allocate all bios before sending any request, so we can
1340		 * return ENOMEM in nice and clean way.
1341		 */
1342		bioq_init(&queue);
1343		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1344			sync = &disk->d_sync;
1345			switch (disk->d_state) {
1346			case G_MIRROR_DISK_STATE_ACTIVE:
1347				break;
1348			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1349				if (bp->bio_offset >= sync->ds_offset)
1350					continue;
1351				else if (bp->bio_offset + bp->bio_length >
1352				    sync->ds_offset_done &&
1353				    (bp->bio_offset < sync->ds_resync ||
1354				     sync->ds_resync == -1)) {
1355					sync->ds_resync = bp->bio_offset -
1356					    (bp->bio_offset % MAXPHYS);
1357				}
1358				break;
1359			default:
1360				continue;
1361			}
1362			cbp = g_clone_bio(bp);
1363			if (cbp == NULL) {
1364				for (cbp = bioq_first(&queue); cbp != NULL;
1365				    cbp = bioq_first(&queue)) {
1366					bioq_remove(&queue, cbp);
1367					g_destroy_bio(cbp);
1368				}
1369				if (bp->bio_error == 0)
1370					bp->bio_error = ENOMEM;
1371				g_io_deliver(bp, bp->bio_error);
1372				return;
1373			}
1374			bioq_insert_tail(&queue, cbp);
1375			cbp->bio_done = g_mirror_done;
1376			cp = disk->d_consumer;
1377			cbp->bio_caller1 = cp;
1378			cbp->bio_to = cp->provider;
1379			KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1380			    ("Consumer %s not opened (r%dw%de%d).",
1381			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1382		}
1383		for (cbp = bioq_first(&queue); cbp != NULL;
1384		    cbp = bioq_first(&queue)) {
1385			bioq_remove(&queue, cbp);
1386			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1387			cp = cbp->bio_caller1;
1388			cbp->bio_caller1 = NULL;
1389			cp->index++;
1390			g_io_request(cbp, cp);
1391		}
1392		/*
1393		 * Bump syncid on first write.
1394		 */
1395		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1396			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1397			g_topology_lock();
1398			g_mirror_bump_syncid(sc);
1399			g_topology_unlock();
1400		}
1401		return;
1402	    }
1403	default:
1404		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1405		    bp->bio_cmd, sc->sc_name));
1406		break;
1407	}
1408}
1409
1410static int
1411g_mirror_can_destroy(struct g_mirror_softc *sc)
1412{
1413	struct g_geom *gp;
1414	struct g_consumer *cp;
1415
1416	g_topology_assert();
1417	gp = sc->sc_geom;
1418	LIST_FOREACH(cp, &gp->consumer, consumer) {
1419		if (g_mirror_is_busy(sc, cp))
1420			return (0);
1421	}
1422	gp = sc->sc_sync.ds_geom;
1423	LIST_FOREACH(cp, &gp->consumer, consumer) {
1424		if (g_mirror_is_busy(sc, cp))
1425			return (0);
1426	}
1427	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1428	    sc->sc_name);
1429	return (1);
1430}
1431
1432static int
1433g_mirror_try_destroy(struct g_mirror_softc *sc)
1434{
1435
1436	g_topology_lock();
1437	if (!g_mirror_can_destroy(sc)) {
1438		g_topology_unlock();
1439		return (0);
1440	}
1441	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1442		g_topology_unlock();
1443		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1444		    &sc->sc_worker);
1445		wakeup(&sc->sc_worker);
1446		sc->sc_worker = NULL;
1447	} else {
1448		g_mirror_destroy_device(sc);
1449		g_topology_unlock();
1450		free(sc, M_MIRROR);
1451	}
1452	return (1);
1453}
1454
1455/*
1456 * Worker thread.
1457 */
1458static void
1459g_mirror_worker(void *arg)
1460{
1461	struct g_mirror_softc *sc;
1462	struct g_mirror_disk *disk;
1463	struct g_mirror_disk_sync *sync;
1464	struct g_mirror_event *ep;
1465	struct bio *bp;
1466	u_int nreqs;
1467
1468	sc = arg;
1469	mtx_lock_spin(&sched_lock);
1470	sched_prio(curthread, PRIBIO);
1471	mtx_unlock_spin(&sched_lock);
1472
1473	nreqs = 0;
1474	for (;;) {
1475		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1476		/*
1477		 * First take a look at events.
1478		 * This is important to handle events before any I/O requests.
1479		 */
1480		ep = g_mirror_event_get(sc);
1481		if (ep != NULL && g_topology_try_lock()) {
1482			g_mirror_event_remove(sc, ep);
1483			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1484				/* Update only device status. */
1485				G_MIRROR_DEBUG(3,
1486				    "Running event for device %s.",
1487				    sc->sc_name);
1488				ep->e_error = 0;
1489				g_mirror_update_device(sc, 1);
1490			} else {
1491				/* Update disk status. */
1492				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1493				     g_mirror_get_diskname(ep->e_disk));
1494				ep->e_error = g_mirror_update_disk(ep->e_disk,
1495				    ep->e_state);
1496				if (ep->e_error == 0)
1497					g_mirror_update_device(sc, 0);
1498			}
1499			g_topology_unlock();
1500			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1501				KASSERT(ep->e_error == 0,
1502				    ("Error cannot be handled."));
1503				g_mirror_event_free(ep);
1504			} else {
1505				ep->e_flags |= G_MIRROR_EVENT_DONE;
1506				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1507				    ep);
1508				mtx_lock(&sc->sc_events_mtx);
1509				wakeup(ep);
1510				mtx_unlock(&sc->sc_events_mtx);
1511			}
1512			if ((sc->sc_flags &
1513			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1514				if (g_mirror_try_destroy(sc))
1515					kthread_exit(0);
1516			}
1517			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1518			continue;
1519		}
1520		/*
1521		 * Now I/O requests.
1522		 */
1523		/* Get first request from the queue. */
1524		mtx_lock(&sc->sc_queue_mtx);
1525		bp = bioq_first(&sc->sc_queue);
1526		if (bp == NULL) {
1527			if (ep != NULL) {
1528				/*
1529				 * No I/O requests and topology lock was
1530				 * already held? Try again.
1531				 */
1532				mtx_unlock(&sc->sc_queue_mtx);
1533				continue;
1534			}
1535			if ((sc->sc_flags &
1536			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1537				mtx_unlock(&sc->sc_queue_mtx);
1538				if (g_mirror_try_destroy(sc))
1539					kthread_exit(0);
1540				mtx_lock(&sc->sc_queue_mtx);
1541			}
1542		}
1543		if (sc->sc_sync.ds_ndisks > 0 &&
1544		    (bp == NULL || nreqs > g_mirror_reqs_per_sync)) {
1545			mtx_unlock(&sc->sc_queue_mtx);
1546			/*
1547			 * It is time for synchronization...
1548			 */
1549			nreqs = 0;
1550			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1551				if (disk->d_state !=
1552				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
1553					continue;
1554				}
1555				sync = &disk->d_sync;
1556				if (sync->ds_offset >=
1557				    sc->sc_provider->mediasize) {
1558					continue;
1559				}
1560				if (sync->ds_offset > sync->ds_offset_done)
1561					continue;
1562				if (sync->ds_resync != -1) {
1563					sync->ds_offset = sync->ds_resync;
1564					sync->ds_offset_done = sync->ds_resync;
1565					sync->ds_resync = -1;
1566				}
1567				g_mirror_sync_one(disk);
1568			}
1569			G_MIRROR_DEBUG(5, "%s: I'm here 2.", __func__);
1570			goto sleep;
1571		}
1572		if (bp == NULL) {
1573			if (g_mirror_check_idle(sc)) {
1574				u_int idletime;
1575
1576				idletime = g_mirror_idletime;
1577				if (idletime == 0)
1578					idletime = 1;
1579				idletime *= hz;
1580				if (msleep(sc, &sc->sc_queue_mtx, PRIBIO | PDROP,
1581				    "m:w1", idletime) == EWOULDBLOCK) {
1582					G_MIRROR_DEBUG(5, "%s: I'm here 3.",
1583					    __func__);
1584					/*
1585					 * No I/O requests in 'idletime' seconds,
1586					 * so mark components as clean.
1587					 */
1588					g_mirror_idle(sc);
1589				}
1590				G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1591			} else {
1592				MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP,
1593				    "m:w2", 0);
1594				G_MIRROR_DEBUG(5, "%s: I'm here 5.", __func__);
1595			}
1596			continue;
1597		}
1598		nreqs++;
1599		bioq_remove(&sc->sc_queue, bp);
1600		mtx_unlock(&sc->sc_queue_mtx);
1601
1602		if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) {
1603			g_mirror_regular_request(bp);
1604		} else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1605			u_int timeout, sps;
1606
1607			g_mirror_sync_request(bp);
1608sleep:
1609			sps = g_mirror_syncs_per_sec;
1610			if (sps == 0) {
1611				G_MIRROR_DEBUG(5, "%s: I'm here 6.", __func__);
1612				continue;
1613			}
1614			if (ep != NULL) {
1615				/*
1616				 * We have some pending events, don't sleep now.
1617				 */
1618				G_MIRROR_DEBUG(5, "%s: I'm here 7.", __func__);
1619				continue;
1620			}
1621			mtx_lock(&sc->sc_queue_mtx);
1622			if (bioq_first(&sc->sc_queue) != NULL) {
1623				mtx_unlock(&sc->sc_queue_mtx);
1624				G_MIRROR_DEBUG(5, "%s: I'm here 8.", __func__);
1625				continue;
1626			}
1627			timeout = hz / sps;
1628			if (timeout == 0)
1629				timeout = 1;
1630			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w3",
1631			    timeout);
1632		} else {
1633			g_mirror_register_request(bp);
1634		}
1635		G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
1636	}
1637}
1638
1639/*
1640 * Open disk's consumer if needed.
1641 */
1642static void
1643g_mirror_update_access(struct g_mirror_disk *disk)
1644{
1645	struct g_provider *pp;
1646
1647	g_topology_assert();
1648
1649	pp = disk->d_softc->sc_provider;
1650	if (pp == NULL)
1651		return;
1652	if (pp->acw > 0) {
1653		if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1654			G_MIRROR_DEBUG(1,
1655			    "Disk %s (device %s) marked as dirty.",
1656			    g_mirror_get_diskname(disk),
1657			    disk->d_softc->sc_name);
1658			disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1659		}
1660	} else if (pp->acw == 0) {
1661		if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1662			G_MIRROR_DEBUG(1,
1663			    "Disk %s (device %s) marked as clean.",
1664			    g_mirror_get_diskname(disk),
1665			    disk->d_softc->sc_name);
1666			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1667		}
1668	}
1669}
1670
1671static void
1672g_mirror_sync_start(struct g_mirror_disk *disk)
1673{
1674	struct g_mirror_softc *sc;
1675	int error;
1676
1677	g_topology_assert();
1678
1679	sc = disk->d_softc;
1680	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1681	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
1682	    sc->sc_state));
1683
1684	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
1685	    g_mirror_get_diskname(disk));
1686	disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1687	KASSERT(disk->d_sync.ds_consumer == NULL,
1688	    ("Sync consumer already exists (device=%s, disk=%s).",
1689	    sc->sc_name, g_mirror_get_diskname(disk)));
1690	disk->d_sync.ds_consumer = g_new_consumer(sc->sc_sync.ds_geom);
1691	disk->d_sync.ds_consumer->private = disk;
1692	disk->d_sync.ds_consumer->index = 0;
1693	error = g_attach(disk->d_sync.ds_consumer, disk->d_softc->sc_provider);
1694	KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
1695	    disk->d_softc->sc_name, error));
1696	error = g_access(disk->d_sync.ds_consumer, 1, 0, 0);
1697	KASSERT(error == 0, ("Cannot open %s (error=%d).",
1698	    disk->d_softc->sc_name, error));
1699	disk->d_sync.ds_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
1700	sc->sc_sync.ds_ndisks++;
1701}
1702
1703/*
1704 * Stop synchronization process.
1705 * type: 0 - synchronization finished
1706 *       1 - synchronization stopped
1707 */
1708static void
1709g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
1710{
1711
1712	g_topology_assert();
1713	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1714	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1715	    g_mirror_disk_state2str(disk->d_state)));
1716	if (disk->d_sync.ds_consumer == NULL)
1717		return;
1718
1719	if (type == 0) {
1720		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
1721		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1722	} else /* if (type == 1) */ {
1723		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
1724		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1725	}
1726	g_mirror_kill_consumer(disk->d_softc, disk->d_sync.ds_consumer);
1727	free(disk->d_sync.ds_data, M_MIRROR);
1728	disk->d_sync.ds_consumer = NULL;
1729	disk->d_softc->sc_sync.ds_ndisks--;
1730	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1731}
1732
1733static void
1734g_mirror_launch_provider(struct g_mirror_softc *sc)
1735{
1736	struct g_mirror_disk *disk;
1737	struct g_provider *pp;
1738
1739	g_topology_assert();
1740
1741	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
1742	pp->mediasize = sc->sc_mediasize;
1743	pp->sectorsize = sc->sc_sectorsize;
1744	sc->sc_provider = pp;
1745	g_error_provider(pp, 0);
1746	G_MIRROR_DEBUG(0, "Device %s: provider %s launched.", sc->sc_name,
1747	    pp->name);
1748	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1749		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1750			g_mirror_sync_start(disk);
1751	}
1752}
1753
1754static void
1755g_mirror_destroy_provider(struct g_mirror_softc *sc)
1756{
1757	struct g_mirror_disk *disk;
1758	struct bio *bp;
1759
1760	g_topology_assert();
1761	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
1762	    sc->sc_name));
1763
1764	g_error_provider(sc->sc_provider, ENXIO);
1765	mtx_lock(&sc->sc_queue_mtx);
1766	while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
1767		bioq_remove(&sc->sc_queue, bp);
1768		g_io_deliver(bp, ENXIO);
1769	}
1770	mtx_unlock(&sc->sc_queue_mtx);
1771	G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
1772	    sc->sc_provider->name);
1773	sc->sc_provider->flags |= G_PF_WITHER;
1774	g_orphan_provider(sc->sc_provider, ENXIO);
1775	sc->sc_provider = NULL;
1776	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1777		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1778			g_mirror_sync_stop(disk, 1);
1779	}
1780}
1781
1782static void
1783g_mirror_go(void *arg)
1784{
1785	struct g_mirror_softc *sc;
1786
1787	sc = arg;
1788	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
1789	g_mirror_event_send(sc, 0,
1790	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
1791}
1792
1793static u_int
1794g_mirror_determine_state(struct g_mirror_disk *disk)
1795{
1796	struct g_mirror_softc *sc;
1797	u_int state;
1798
1799	sc = disk->d_softc;
1800	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
1801		if ((disk->d_flags &
1802		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1803			/* Disk does not need synchronization. */
1804			state = G_MIRROR_DISK_STATE_ACTIVE;
1805		} else {
1806			if ((sc->sc_flags &
1807			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0  ||
1808			    (disk->d_flags &
1809			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1810				/*
1811				 * We can start synchronization from
1812				 * the stored offset.
1813				 */
1814				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1815			} else {
1816				state = G_MIRROR_DISK_STATE_STALE;
1817			}
1818		}
1819	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
1820		/*
1821		 * Reset all synchronization data for this disk,
1822		 * because if it even was synchronized, it was
1823		 * synchronized to disks with different syncid.
1824		 */
1825		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
1826		disk->d_sync.ds_offset = 0;
1827		disk->d_sync.ds_offset_done = 0;
1828		disk->d_sync.ds_syncid = sc->sc_syncid;
1829		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
1830		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1831			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1832		} else {
1833			state = G_MIRROR_DISK_STATE_STALE;
1834		}
1835	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
1836		/*
1837		 * Not good, NOT GOOD!
1838		 * It means that mirror was started on stale disks
1839		 * and more fresh disk just arrive.
1840		 * If there were writes, mirror is fucked up, sorry.
1841		 * I think the best choice here is don't touch
1842		 * this disk and inform the user laudly.
1843		 */
1844		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
1845		    "disk (%s) arrives!! It will not be connected to the "
1846		    "running device.", sc->sc_name,
1847		    g_mirror_get_diskname(disk));
1848		g_mirror_destroy_disk(disk);
1849		state = G_MIRROR_DISK_STATE_NONE;
1850		/* Return immediately, because disk was destroyed. */
1851		return (state);
1852	}
1853	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
1854	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
1855	return (state);
1856}
1857
1858/*
1859 * Update device state.
1860 */
1861static void
1862g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force)
1863{
1864	struct g_mirror_disk *disk;
1865	u_int state;
1866
1867	g_topology_assert();
1868
1869	switch (sc->sc_state) {
1870	case G_MIRROR_DEVICE_STATE_STARTING:
1871	    {
1872		struct g_mirror_disk *pdisk, *tdisk;
1873		u_int dirty, ndisks, genid, syncid;
1874
1875		KASSERT(sc->sc_provider == NULL,
1876		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
1877		/*
1878		 * Are we ready? We are, if all disks are connected or
1879		 * if we have any disks and 'force' is true.
1880		 */
1881		if ((force && g_mirror_ndisks(sc, -1) > 0) ||
1882		    sc->sc_ndisks == g_mirror_ndisks(sc, -1)) {
1883			;
1884		} else if (g_mirror_ndisks(sc, -1) == 0) {
1885			/*
1886			 * Disks went down in starting phase, so destroy
1887			 * device.
1888			 */
1889			callout_drain(&sc->sc_callout);
1890			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1891			return;
1892		} else {
1893			return;
1894		}
1895
1896		/*
1897		 * Activate all disks with the biggest syncid.
1898		 */
1899		if (force) {
1900			/*
1901			 * If 'force' is true, we have been called due to
1902			 * timeout, so don't bother canceling timeout.
1903			 */
1904			ndisks = 0;
1905			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1906				if ((disk->d_flags &
1907				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1908					ndisks++;
1909				}
1910			}
1911			if (ndisks == 0) {
1912				/* No valid disks found, destroy device. */
1913				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1914				return;
1915			}
1916		} else {
1917			/* Cancel timeout. */
1918			callout_drain(&sc->sc_callout);
1919		}
1920
1921		/*
1922		 * Find the biggest genid.
1923		 */
1924		genid = 0;
1925		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1926			if (disk->d_genid > genid)
1927				genid = disk->d_genid;
1928		}
1929		sc->sc_genid = genid;
1930		/*
1931		 * Remove all disks without the biggest genid.
1932		 */
1933		LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
1934			if (disk->d_genid < genid) {
1935				G_MIRROR_DEBUG(0,
1936				    "Component %s (device %s) broken, skipping.",
1937				    g_mirror_get_diskname(disk), sc->sc_name);
1938				g_mirror_destroy_disk(disk);
1939			}
1940		}
1941
1942		/*
1943		 * Find the biggest syncid.
1944		 */
1945		syncid = 0;
1946		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1947			if (disk->d_sync.ds_syncid > syncid)
1948				syncid = disk->d_sync.ds_syncid;
1949		}
1950
1951		/*
1952		 * Here we need to look for dirty disks and if all disks
1953		 * with the biggest syncid are dirty, we have to choose
1954		 * one with the biggest priority and rebuild the rest.
1955		 */
1956		/*
1957		 * Find the number of dirty disks with the biggest syncid.
1958		 * Find the number of disks with the biggest syncid.
1959		 * While here, find a disk with the biggest priority.
1960		 */
1961		dirty = ndisks = 0;
1962		pdisk = NULL;
1963		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1964			if (disk->d_sync.ds_syncid != syncid)
1965				continue;
1966			if ((disk->d_flags &
1967			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1968				continue;
1969			}
1970			ndisks++;
1971			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1972				dirty++;
1973				if (pdisk == NULL ||
1974				    pdisk->d_priority < disk->d_priority) {
1975					pdisk = disk;
1976				}
1977			}
1978		}
1979		if (dirty == 0) {
1980			/* No dirty disks at all, great. */
1981		} else if (dirty == ndisks) {
1982			/*
1983			 * Force synchronization for all dirty disks except one
1984			 * with the biggest priority.
1985			 */
1986			KASSERT(pdisk != NULL, ("pdisk == NULL"));
1987			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
1988			    "master disk for synchronization.",
1989			    g_mirror_get_diskname(pdisk), sc->sc_name);
1990			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1991				if (disk->d_sync.ds_syncid != syncid)
1992					continue;
1993				if ((disk->d_flags &
1994				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1995					continue;
1996				}
1997				KASSERT((disk->d_flags &
1998				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
1999				    ("Disk %s isn't marked as dirty.",
2000				    g_mirror_get_diskname(disk)));
2001				/* Skip the disk with the biggest priority. */
2002				if (disk == pdisk)
2003					continue;
2004				disk->d_sync.ds_syncid = 0;
2005			}
2006		} else if (dirty < ndisks) {
2007			/*
2008			 * Force synchronization for all dirty disks.
2009			 * We have some non-dirty disks.
2010			 */
2011			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2012				if (disk->d_sync.ds_syncid != syncid)
2013					continue;
2014				if ((disk->d_flags &
2015				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2016					continue;
2017				}
2018				if ((disk->d_flags &
2019				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2020					continue;
2021				}
2022				disk->d_sync.ds_syncid = 0;
2023			}
2024		}
2025
2026		/* Reset hint. */
2027		sc->sc_hint = NULL;
2028		sc->sc_syncid = syncid;
2029		if (force) {
2030			/* Remember to bump syncid on first write. */
2031			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2032		}
2033		state = G_MIRROR_DEVICE_STATE_RUNNING;
2034		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2035		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2036		    g_mirror_device_state2str(state));
2037		sc->sc_state = state;
2038		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2039			state = g_mirror_determine_state(disk);
2040			g_mirror_event_send(disk, state,
2041			    G_MIRROR_EVENT_DONTWAIT);
2042			if (state == G_MIRROR_DISK_STATE_STALE)
2043				sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2044		}
2045		wakeup(&g_mirror_class);
2046		break;
2047	    }
2048	case G_MIRROR_DEVICE_STATE_RUNNING:
2049		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2050		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2051			/*
2052			 * No active disks or no disks at all,
2053			 * so destroy device.
2054			 */
2055			if (sc->sc_provider != NULL)
2056				g_mirror_destroy_provider(sc);
2057			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2058			break;
2059		} else if (g_mirror_ndisks(sc,
2060		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2061		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2062			/*
2063			 * We have active disks, launch provider if it doesn't
2064			 * exist.
2065			 */
2066			if (sc->sc_provider == NULL)
2067				g_mirror_launch_provider(sc);
2068		}
2069		/*
2070		 * Genid should be bumped immediately, so do it here.
2071		 */
2072		if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2073			sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2074			g_mirror_bump_genid(sc);
2075		}
2076		break;
2077	default:
2078		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2079		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2080		break;
2081	}
2082}
2083
2084/*
2085 * Update disk state and device state if needed.
2086 */
2087#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
2088	"Disk %s state changed from %s to %s (device %s).",		\
2089	g_mirror_get_diskname(disk),					\
2090	g_mirror_disk_state2str(disk->d_state),				\
2091	g_mirror_disk_state2str(state), sc->sc_name)
2092static int
2093g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2094{
2095	struct g_mirror_softc *sc;
2096
2097	g_topology_assert();
2098
2099	sc = disk->d_softc;
2100again:
2101	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2102	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2103	    g_mirror_disk_state2str(state));
2104	switch (state) {
2105	case G_MIRROR_DISK_STATE_NEW:
2106		/*
2107		 * Possible scenarios:
2108		 * 1. New disk arrive.
2109		 */
2110		/* Previous state should be NONE. */
2111		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2112		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2113		    g_mirror_disk_state2str(disk->d_state)));
2114		DISK_STATE_CHANGED();
2115
2116		disk->d_state = state;
2117		if (LIST_EMPTY(&sc->sc_disks))
2118			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2119		else {
2120			struct g_mirror_disk *dp;
2121
2122			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2123				if (disk->d_priority >= dp->d_priority) {
2124					LIST_INSERT_BEFORE(dp, disk, d_next);
2125					dp = NULL;
2126					break;
2127				}
2128				if (LIST_NEXT(dp, d_next) == NULL)
2129					break;
2130			}
2131			if (dp != NULL)
2132				LIST_INSERT_AFTER(dp, disk, d_next);
2133		}
2134		G_MIRROR_DEBUG(0, "Device %s: provider %s detected.",
2135		    sc->sc_name, g_mirror_get_diskname(disk));
2136		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2137			break;
2138		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2139		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2140		    g_mirror_device_state2str(sc->sc_state),
2141		    g_mirror_get_diskname(disk),
2142		    g_mirror_disk_state2str(disk->d_state)));
2143		state = g_mirror_determine_state(disk);
2144		if (state != G_MIRROR_DISK_STATE_NONE)
2145			goto again;
2146		break;
2147	case G_MIRROR_DISK_STATE_ACTIVE:
2148		/*
2149		 * Possible scenarios:
2150		 * 1. New disk does not need synchronization.
2151		 * 2. Synchronization process finished successfully.
2152		 */
2153		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2154		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2155		    g_mirror_device_state2str(sc->sc_state),
2156		    g_mirror_get_diskname(disk),
2157		    g_mirror_disk_state2str(disk->d_state)));
2158		/* Previous state should be NEW or SYNCHRONIZING. */
2159		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2160		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2161		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2162		    g_mirror_disk_state2str(disk->d_state)));
2163		DISK_STATE_CHANGED();
2164
2165		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2166			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2167		else if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2168			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2169			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2170			g_mirror_sync_stop(disk, 0);
2171		}
2172		disk->d_state = state;
2173		disk->d_sync.ds_offset = 0;
2174		disk->d_sync.ds_offset_done = 0;
2175		g_mirror_update_access(disk);
2176		g_mirror_update_metadata(disk);
2177		G_MIRROR_DEBUG(0, "Device %s: provider %s activated.",
2178		    sc->sc_name, g_mirror_get_diskname(disk));
2179		break;
2180	case G_MIRROR_DISK_STATE_STALE:
2181		/*
2182		 * Possible scenarios:
2183		 * 1. Stale disk was connected.
2184		 */
2185		/* Previous state should be NEW. */
2186		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2187		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2188		    g_mirror_disk_state2str(disk->d_state)));
2189		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2190		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2191		    g_mirror_device_state2str(sc->sc_state),
2192		    g_mirror_get_diskname(disk),
2193		    g_mirror_disk_state2str(disk->d_state)));
2194		/*
2195		 * STALE state is only possible if device is marked
2196		 * NOAUTOSYNC.
2197		 */
2198		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2199		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2200		    g_mirror_device_state2str(sc->sc_state),
2201		    g_mirror_get_diskname(disk),
2202		    g_mirror_disk_state2str(disk->d_state)));
2203		DISK_STATE_CHANGED();
2204
2205		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2206		disk->d_state = state;
2207		g_mirror_update_metadata(disk);
2208		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2209		    sc->sc_name, g_mirror_get_diskname(disk));
2210		break;
2211	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2212		/*
2213		 * Possible scenarios:
2214		 * 1. Disk which needs synchronization was connected.
2215		 */
2216		/* Previous state should be NEW. */
2217		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2218		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2219		    g_mirror_disk_state2str(disk->d_state)));
2220		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2221		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2222		    g_mirror_device_state2str(sc->sc_state),
2223		    g_mirror_get_diskname(disk),
2224		    g_mirror_disk_state2str(disk->d_state)));
2225		DISK_STATE_CHANGED();
2226
2227		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2228			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2229		disk->d_state = state;
2230		if (sc->sc_provider != NULL) {
2231			g_mirror_sync_start(disk);
2232			g_mirror_update_metadata(disk);
2233		}
2234		break;
2235	case G_MIRROR_DISK_STATE_DISCONNECTED:
2236		/*
2237		 * Possible scenarios:
2238		 * 1. Device wasn't running yet, but disk disappear.
2239		 * 2. Disk was active and disapppear.
2240		 * 3. Disk disappear during synchronization process.
2241		 */
2242		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2243			/*
2244			 * Previous state should be ACTIVE, STALE or
2245			 * SYNCHRONIZING.
2246			 */
2247			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2248			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2249			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2250			    ("Wrong disk state (%s, %s).",
2251			    g_mirror_get_diskname(disk),
2252			    g_mirror_disk_state2str(disk->d_state)));
2253		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2254			/* Previous state should be NEW. */
2255			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2256			    ("Wrong disk state (%s, %s).",
2257			    g_mirror_get_diskname(disk),
2258			    g_mirror_disk_state2str(disk->d_state)));
2259			/*
2260			 * Reset bumping syncid if disk disappeared in STARTING
2261			 * state.
2262			 */
2263			if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2264				sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2265#ifdef	INVARIANTS
2266		} else {
2267			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2268			    sc->sc_name,
2269			    g_mirror_device_state2str(sc->sc_state),
2270			    g_mirror_get_diskname(disk),
2271			    g_mirror_disk_state2str(disk->d_state)));
2272#endif
2273		}
2274		DISK_STATE_CHANGED();
2275		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2276		    sc->sc_name, g_mirror_get_diskname(disk));
2277
2278		g_mirror_destroy_disk(disk);
2279		break;
2280	case G_MIRROR_DISK_STATE_DESTROY:
2281	    {
2282		int error;
2283
2284		error = g_mirror_clear_metadata(disk);
2285		if (error != 0)
2286			return (error);
2287		DISK_STATE_CHANGED();
2288		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2289		    sc->sc_name, g_mirror_get_diskname(disk));
2290
2291		g_mirror_destroy_disk(disk);
2292		sc->sc_ndisks--;
2293		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2294			g_mirror_update_metadata(disk);
2295		}
2296		break;
2297	    }
2298	default:
2299		KASSERT(1 == 0, ("Unknown state (%u).", state));
2300		break;
2301	}
2302	return (0);
2303}
2304#undef	DISK_STATE_CHANGED
2305
2306int
2307g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2308{
2309	struct g_provider *pp;
2310	u_char *buf;
2311	int error;
2312
2313	g_topology_assert();
2314
2315	error = g_access(cp, 1, 0, 0);
2316	if (error != 0)
2317		return (error);
2318	pp = cp->provider;
2319	g_topology_unlock();
2320	/* Metadata are stored on last sector. */
2321	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2322	    &error);
2323	g_topology_lock();
2324	g_access(cp, -1, 0, 0);
2325	if (error != 0) {
2326		G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2327		    cp->provider->name, error);
2328		if (buf != NULL)
2329			g_free(buf);
2330		return (error);
2331	}
2332
2333	/* Decode metadata. */
2334	error = mirror_metadata_decode(buf, md);
2335	g_free(buf);
2336	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2337		return (EINVAL);
2338	if (md->md_version > G_MIRROR_VERSION) {
2339		G_MIRROR_DEBUG(0,
2340		    "Kernel module is too old to handle metadata from %s.",
2341		    cp->provider->name);
2342		return (EINVAL);
2343	}
2344	if (error != 0) {
2345		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2346		    cp->provider->name);
2347		return (error);
2348	}
2349
2350	return (0);
2351}
2352
2353static int
2354g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2355    struct g_mirror_metadata *md)
2356{
2357
2358	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2359		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2360		    pp->name, md->md_did);
2361		return (EEXIST);
2362	}
2363	if (md->md_all != sc->sc_ndisks) {
2364		G_MIRROR_DEBUG(1,
2365		    "Invalid '%s' field on disk %s (device %s), skipping.",
2366		    "md_all", pp->name, sc->sc_name);
2367		return (EINVAL);
2368	}
2369	if (md->md_slice != sc->sc_slice) {
2370		G_MIRROR_DEBUG(1,
2371		    "Invalid '%s' field on disk %s (device %s), skipping.",
2372		    "md_slice", pp->name, sc->sc_name);
2373		return (EINVAL);
2374	}
2375	if (md->md_balance != sc->sc_balance) {
2376		G_MIRROR_DEBUG(1,
2377		    "Invalid '%s' field on disk %s (device %s), skipping.",
2378		    "md_balance", pp->name, sc->sc_name);
2379		return (EINVAL);
2380	}
2381	if (md->md_mediasize != sc->sc_mediasize) {
2382		G_MIRROR_DEBUG(1,
2383		    "Invalid '%s' field on disk %s (device %s), skipping.",
2384		    "md_mediasize", pp->name, sc->sc_name);
2385		return (EINVAL);
2386	}
2387	if (sc->sc_mediasize > pp->mediasize) {
2388		G_MIRROR_DEBUG(1,
2389		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2390		    sc->sc_name);
2391		return (EINVAL);
2392	}
2393	if (md->md_sectorsize != sc->sc_sectorsize) {
2394		G_MIRROR_DEBUG(1,
2395		    "Invalid '%s' field on disk %s (device %s), skipping.",
2396		    "md_sectorsize", pp->name, sc->sc_name);
2397		return (EINVAL);
2398	}
2399	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2400		G_MIRROR_DEBUG(1,
2401		    "Invalid sector size of disk %s (device %s), skipping.",
2402		    pp->name, sc->sc_name);
2403		return (EINVAL);
2404	}
2405	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2406		G_MIRROR_DEBUG(1,
2407		    "Invalid device flags on disk %s (device %s), skipping.",
2408		    pp->name, sc->sc_name);
2409		return (EINVAL);
2410	}
2411	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2412		G_MIRROR_DEBUG(1,
2413		    "Invalid disk flags on disk %s (device %s), skipping.",
2414		    pp->name, sc->sc_name);
2415		return (EINVAL);
2416	}
2417	return (0);
2418}
2419
2420int
2421g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2422    struct g_mirror_metadata *md)
2423{
2424	struct g_mirror_disk *disk;
2425	int error;
2426
2427	g_topology_assert();
2428	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2429
2430	error = g_mirror_check_metadata(sc, pp, md);
2431	if (error != 0)
2432		return (error);
2433	if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2434	    md->md_genid < sc->sc_genid) {
2435		G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2436		    pp->name, sc->sc_name);
2437		return (EINVAL);
2438	}
2439	disk = g_mirror_init_disk(sc, pp, md, &error);
2440	if (disk == NULL)
2441		return (error);
2442	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2443	    G_MIRROR_EVENT_WAIT);
2444	if (error != 0)
2445		return (error);
2446	if (md->md_version < G_MIRROR_VERSION) {
2447		G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2448		    pp->name, md->md_version, G_MIRROR_VERSION);
2449		g_mirror_update_metadata(disk);
2450	}
2451	return (0);
2452}
2453
2454static int
2455g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2456{
2457	struct g_mirror_softc *sc;
2458	struct g_mirror_disk *disk;
2459	int dcr, dcw, dce;
2460
2461	g_topology_assert();
2462	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2463	    acw, ace);
2464
2465	dcr = pp->acr + acr;
2466	dcw = pp->acw + acw;
2467	dce = pp->ace + ace;
2468
2469	sc = pp->geom->softc;
2470	if (sc == NULL || LIST_EMPTY(&sc->sc_disks) ||
2471	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
2472		if (acr <= 0 && acw <= 0 && ace <= 0)
2473			return (0);
2474		else
2475			return (ENXIO);
2476	}
2477	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2478		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
2479			continue;
2480		/*
2481		 * Mark disk as dirty on open and unmark on close.
2482		 */
2483		if (pp->acw == 0 && dcw > 0) {
2484			G_MIRROR_DEBUG(1,
2485			    "Disk %s (device %s) marked as dirty.",
2486			    g_mirror_get_diskname(disk), sc->sc_name);
2487			disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2488			g_mirror_update_metadata(disk);
2489		} else if (pp->acw > 0 && dcw == 0) {
2490			G_MIRROR_DEBUG(1,
2491			    "Disk %s (device %s) marked as clean.",
2492			    g_mirror_get_diskname(disk), sc->sc_name);
2493			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2494			g_mirror_update_metadata(disk);
2495		}
2496	}
2497	return (0);
2498}
2499
2500static struct g_geom *
2501g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
2502{
2503	struct g_mirror_softc *sc;
2504	struct g_geom *gp;
2505	int error, timeout;
2506
2507	g_topology_assert();
2508	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2509	    md->md_mid);
2510
2511	/* One disk is minimum. */
2512	if (md->md_all < 1)
2513		return (NULL);
2514	/*
2515	 * Action geom.
2516	 */
2517	gp = g_new_geomf(mp, "%s", md->md_name);
2518	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2519	gp->start = g_mirror_start;
2520	gp->orphan = g_mirror_orphan;
2521	gp->access = g_mirror_access;
2522	gp->dumpconf = g_mirror_dumpconf;
2523
2524	sc->sc_id = md->md_mid;
2525	sc->sc_slice = md->md_slice;
2526	sc->sc_balance = md->md_balance;
2527	sc->sc_mediasize = md->md_mediasize;
2528	sc->sc_sectorsize = md->md_sectorsize;
2529	sc->sc_ndisks = md->md_all;
2530	sc->sc_flags = md->md_mflags;
2531	sc->sc_bump_id = 0;
2532	sc->sc_idle = 0;
2533	bioq_init(&sc->sc_queue);
2534	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
2535	LIST_INIT(&sc->sc_disks);
2536	TAILQ_INIT(&sc->sc_events);
2537	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
2538	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2539	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
2540	gp->softc = sc;
2541	sc->sc_geom = gp;
2542	sc->sc_provider = NULL;
2543	/*
2544	 * Synchronization geom.
2545	 */
2546	gp = g_new_geomf(mp, "%s.sync", md->md_name);
2547	gp->softc = sc;
2548	gp->orphan = g_mirror_orphan;
2549	sc->sc_sync.ds_geom = gp;
2550	sc->sc_sync.ds_ndisks = 0;
2551	error = kthread_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
2552	    "g_mirror %s", md->md_name);
2553	if (error != 0) {
2554		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
2555		    sc->sc_name);
2556		g_destroy_geom(sc->sc_sync.ds_geom);
2557		mtx_destroy(&sc->sc_events_mtx);
2558		mtx_destroy(&sc->sc_queue_mtx);
2559		g_destroy_geom(sc->sc_geom);
2560		free(sc, M_MIRROR);
2561		return (NULL);
2562	}
2563
2564	G_MIRROR_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
2565
2566	/*
2567	 * Run timeout.
2568	 */
2569	timeout = g_mirror_timeout * hz;
2570	callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
2571	return (sc->sc_geom);
2572}
2573
2574int
2575g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force)
2576{
2577	struct g_provider *pp;
2578
2579	g_topology_assert();
2580
2581	if (sc == NULL)
2582		return (ENXIO);
2583	pp = sc->sc_provider;
2584	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
2585		if (force) {
2586			G_MIRROR_DEBUG(1, "Device %s is still open, so it "
2587			    "can't be definitely removed.", pp->name);
2588		} else {
2589			G_MIRROR_DEBUG(1,
2590			    "Device %s is still open (r%dw%de%d).", pp->name,
2591			    pp->acr, pp->acw, pp->ace);
2592			return (EBUSY);
2593		}
2594	}
2595
2596	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2597	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
2598	g_topology_unlock();
2599	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
2600	mtx_lock(&sc->sc_queue_mtx);
2601	wakeup(sc);
2602	mtx_unlock(&sc->sc_queue_mtx);
2603	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
2604	while (sc->sc_worker != NULL)
2605		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
2606	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
2607	g_topology_lock();
2608	g_mirror_destroy_device(sc);
2609	free(sc, M_MIRROR);
2610	return (0);
2611}
2612
2613static void
2614g_mirror_taste_orphan(struct g_consumer *cp)
2615{
2616
2617	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2618	    cp->provider->name));
2619}
2620
2621static struct g_geom *
2622g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2623{
2624	struct g_mirror_metadata md;
2625	struct g_mirror_softc *sc;
2626	struct g_consumer *cp;
2627	struct g_geom *gp;
2628	int error;
2629
2630	g_topology_assert();
2631	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2632	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
2633
2634	gp = g_new_geomf(mp, "mirror:taste");
2635	/*
2636	 * This orphan function should be never called.
2637	 */
2638	gp->orphan = g_mirror_taste_orphan;
2639	cp = g_new_consumer(gp);
2640	g_attach(cp, pp);
2641	error = g_mirror_read_metadata(cp, &md);
2642	g_detach(cp);
2643	g_destroy_consumer(cp);
2644	g_destroy_geom(gp);
2645	if (error != 0)
2646		return (NULL);
2647	gp = NULL;
2648
2649	if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
2650		return (NULL);
2651	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2652		return (NULL);
2653	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
2654		G_MIRROR_DEBUG(0,
2655		    "Device %s: provider %s marked as inactive, skipping.",
2656		    md.md_name, pp->name);
2657		return (NULL);
2658	}
2659	if (g_mirror_debug >= 2)
2660		mirror_metadata_dump(&md);
2661
2662	/*
2663	 * Let's check if device already exists.
2664	 */
2665	sc = NULL;
2666	LIST_FOREACH(gp, &mp->geom, geom) {
2667		sc = gp->softc;
2668		if (sc == NULL)
2669			continue;
2670		if (sc->sc_sync.ds_geom == gp)
2671			continue;
2672		if (strcmp(md.md_name, sc->sc_name) != 0)
2673			continue;
2674		if (md.md_mid != sc->sc_id) {
2675			G_MIRROR_DEBUG(0, "Device %s already configured.",
2676			    sc->sc_name);
2677			return (NULL);
2678		}
2679		break;
2680	}
2681	if (gp == NULL) {
2682		gp = g_mirror_create(mp, &md);
2683		if (gp == NULL) {
2684			G_MIRROR_DEBUG(0, "Cannot create device %s.",
2685			    md.md_name);
2686			return (NULL);
2687		}
2688		sc = gp->softc;
2689	}
2690	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
2691	error = g_mirror_add_disk(sc, pp, &md);
2692	if (error != 0) {
2693		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
2694		    pp->name, gp->name, error);
2695		if (LIST_EMPTY(&sc->sc_disks))
2696			g_mirror_destroy(sc, 1);
2697		return (NULL);
2698	}
2699	return (gp);
2700}
2701
2702static int
2703g_mirror_destroy_geom(struct gctl_req *req __unused,
2704    struct g_class *mp __unused, struct g_geom *gp)
2705{
2706
2707	return (g_mirror_destroy(gp->softc, 0));
2708}
2709
2710static void
2711g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2712    struct g_consumer *cp, struct g_provider *pp)
2713{
2714	struct g_mirror_softc *sc;
2715
2716	g_topology_assert();
2717
2718	sc = gp->softc;
2719	if (sc == NULL)
2720		return;
2721	/* Skip synchronization geom. */
2722	if (gp == sc->sc_sync.ds_geom)
2723		return;
2724	if (pp != NULL) {
2725		/* Nothing here. */
2726	} else if (cp != NULL) {
2727		struct g_mirror_disk *disk;
2728
2729		disk = cp->private;
2730		if (disk == NULL)
2731			return;
2732		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
2733		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2734			sbuf_printf(sb, "%s<Synchronized>", indent);
2735			if (disk->d_sync.ds_offset_done == 0)
2736				sbuf_printf(sb, "0%%");
2737			else {
2738				sbuf_printf(sb, "%u%%",
2739				    (u_int)((disk->d_sync.ds_offset_done * 100) /
2740				    sc->sc_provider->mediasize));
2741			}
2742			sbuf_printf(sb, "</Synchronized>\n");
2743		}
2744		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
2745		    disk->d_sync.ds_syncid);
2746		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
2747		    disk->d_genid);
2748		sbuf_printf(sb, "%s<Flags>", indent);
2749		if (disk->d_flags == 0)
2750			sbuf_printf(sb, "NONE");
2751		else {
2752			int first = 1;
2753
2754#define	ADD_FLAG(flag, name)	do {					\
2755	if ((disk->d_flags & (flag)) != 0) {				\
2756		if (!first)						\
2757			sbuf_printf(sb, ", ");				\
2758		else							\
2759			first = 0;					\
2760		sbuf_printf(sb, name);					\
2761	}								\
2762} while (0)
2763			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
2764			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
2765			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
2766			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
2767			    "SYNCHRONIZING");
2768			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
2769#undef	ADD_FLAG
2770		}
2771		sbuf_printf(sb, "</Flags>\n");
2772		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
2773		    disk->d_priority);
2774		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2775		    g_mirror_disk_state2str(disk->d_state));
2776	} else {
2777		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2778		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
2779		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
2780		sbuf_printf(sb, "%s<Flags>", indent);
2781		if (sc->sc_flags == 0)
2782			sbuf_printf(sb, "NONE");
2783		else {
2784			int first = 1;
2785
2786#define	ADD_FLAG(flag, name)	do {					\
2787	if ((sc->sc_flags & (flag)) != 0) {				\
2788		if (!first)						\
2789			sbuf_printf(sb, ", ");				\
2790		else							\
2791			first = 0;					\
2792		sbuf_printf(sb, name);					\
2793	}								\
2794} while (0)
2795			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
2796#undef	ADD_FLAG
2797		}
2798		sbuf_printf(sb, "</Flags>\n");
2799		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
2800		    (u_int)sc->sc_slice);
2801		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
2802		    balance_name(sc->sc_balance));
2803		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2804		    sc->sc_ndisks);
2805		sbuf_printf(sb, "%s<State>", indent);
2806		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2807			sbuf_printf(sb, "%s", "STARTING");
2808		else if (sc->sc_ndisks ==
2809		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
2810			sbuf_printf(sb, "%s", "COMPLETE");
2811		else
2812			sbuf_printf(sb, "%s", "DEGRADED");
2813		sbuf_printf(sb, "</State>\n");
2814	}
2815}
2816
2817static void
2818g_mirror_shutdown(void *arg, int howto)
2819{
2820	struct g_class *mp;
2821	struct g_geom *gp, *gp2;
2822
2823	mp = arg;
2824	DROP_GIANT();
2825	g_topology_lock();
2826	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2827		if (gp->softc == NULL)
2828			continue;
2829		g_mirror_destroy(gp->softc, 1);
2830	}
2831	g_topology_unlock();
2832	PICKUP_GIANT();
2833#if 0
2834	tsleep(&gp, PRIBIO, "m:shutdown", hz * 20);
2835#endif
2836}
2837
2838static void
2839g_mirror_init(struct g_class *mp)
2840{
2841
2842	g_mirror_ehtag = EVENTHANDLER_REGISTER(shutdown_post_sync,
2843	    g_mirror_shutdown, mp, SHUTDOWN_PRI_FIRST);
2844	if (g_mirror_ehtag == NULL)
2845		G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
2846}
2847
2848static void
2849g_mirror_fini(struct g_class *mp)
2850{
2851
2852	if (g_mirror_ehtag == NULL)
2853		return;
2854	EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_ehtag);
2855}
2856
2857static int
2858g_mirror_can_go(void)
2859{
2860	struct g_mirror_softc *sc;
2861	struct g_geom *gp;
2862	struct g_provider *pp;
2863	int can_go;
2864
2865	DROP_GIANT();
2866	can_go = 1;
2867	g_topology_lock();
2868	LIST_FOREACH(gp, &g_mirror_class.geom, geom) {
2869		sc = gp->softc;
2870		if (sc == NULL) {
2871			can_go = 0;
2872			break;
2873		}
2874		pp = sc->sc_provider;
2875		if (pp == NULL || pp->error != 0) {
2876			can_go = 0;
2877			break;
2878		}
2879	}
2880	g_topology_unlock();
2881	PICKUP_GIANT();
2882	return (can_go);
2883}
2884
2885static void
2886g_mirror_rootwait(void)
2887{
2888
2889	/*
2890	 * HACK: Wait for GEOM, because g_mirror_rootwait() can be called,
2891	 * HACK: before we get providers for tasting.
2892	 */
2893	tsleep(&g_mirror_class, PRIBIO, "mroot", hz * 3);
2894	/*
2895	 * Wait for mirrors in degraded state.
2896	 */
2897	for (;;) {
2898		if (g_mirror_can_go())
2899			break;
2900		tsleep(&g_mirror_class, PRIBIO, "mroot", hz);
2901	}
2902}
2903
2904SYSINIT(g_mirror_root, SI_SUB_RAID, SI_ORDER_FIRST, g_mirror_rootwait, NULL)
2905
2906DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
2907