g_mirror.c revision 145305
11573Srgrimes/*-
21573Srgrimes * Copyright (c) 2004-2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
31573Srgrimes * All rights reserved.
41573Srgrimes *
51573Srgrimes * Redistribution and use in source and binary forms, with or without
61573Srgrimes * modification, are permitted provided that the following conditions
71573Srgrimes * are met:
81573Srgrimes * 1. Redistributions of source code must retain the above copyright
91573Srgrimes *    notice, this list of conditions and the following disclaimer.
101573Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111573Srgrimes *    notice, this list of conditions and the following disclaimer in the
121573Srgrimes *    documentation and/or other materials provided with the distribution.
131573Srgrimes *
141573Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
151573Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161573Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171573Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
181573Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191573Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201573Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211573Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221573Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
231573Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241573Srgrimes * SUCH DAMAGE.
251573Srgrimes */
261573Srgrimes
271573Srgrimes#include <sys/cdefs.h>
281573Srgrimes__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 145305 2005-04-19 21:47:25Z pjd $");
291573Srgrimes
301573Srgrimes#include <sys/param.h>
311573Srgrimes#include <sys/systm.h>
3250476Speter#include <sys/kernel.h>
331573Srgrimes#include <sys/module.h>
3426826Ssteve#include <sys/limits.h>
351573Srgrimes#include <sys/lock.h>
361573Srgrimes#include <sys/mutex.h>
371573Srgrimes#include <sys/bio.h>
381573Srgrimes#include <sys/sysctl.h>
391573Srgrimes#include <sys/malloc.h>
4059460Sphantom#include <sys/eventhandler.h>
4159460Sphantom#include <vm/uma.h>
421573Srgrimes#include <geom/geom.h>
4384306Sru#include <sys/proc.h>
441573Srgrimes#include <sys/kthread.h>
4548835Ssimokawa#include <sys/sched.h>
461573Srgrimes#include <geom/mirror/g_mirror.h>
471573Srgrimes
481573Srgrimes
49108028Srustatic MALLOC_DEFINE(M_MIRROR, "mirror data", "GEOM_MIRROR Data");
501573Srgrimes
511573SrgrimesSYSCTL_DECL(_kern_geom);
5217345SbdeSYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff");
531573Srgrimesu_int g_mirror_debug = 0;
541573SrgrimesTUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug);
551573SrgrimesSYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0,
5617345Sbde    "Debug level");
5717345Sbdestatic u_int g_mirror_timeout = 4;
581573SrgrimesTUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout);
591573SrgrimesSYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout,
601573Srgrimes    0, "Time to wait on all mirror components");
611573Srgrimesstatic u_int g_mirror_idletime = 5;
621573SrgrimesTUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime);
631573SrgrimesSYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW,
641573Srgrimes    &g_mirror_idletime, 0, "Mark components as clean when idling");
651573Srgrimesstatic u_int g_mirror_reqs_per_sync = 5;
661573SrgrimesSYSCTL_UINT(_kern_geom_mirror, OID_AUTO, reqs_per_sync, CTLFLAG_RW,
6717345Sbde    &g_mirror_reqs_per_sync, 0,
681573Srgrimes    "Number of regular I/O requests per synchronization request");
691573Srgrimesstatic u_int g_mirror_syncs_per_sec = 1000;
701573SrgrimesSYSCTL_UINT(_kern_geom_mirror, OID_AUTO, syncs_per_sec, CTLFLAG_RW,
711573Srgrimes    &g_mirror_syncs_per_sec, 0,
721573Srgrimes    "Number of synchronizations requests per second");
731573Srgrimes
741573Srgrimes#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
751573Srgrimes	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
761573Srgrimes	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
77108087Sru	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
781573Srgrimes} while (0)
791573Srgrimes
801573Srgrimesstatic eventhandler_tag g_mirror_ehtag = NULL;
81108087Sru
821573Srgrimesstatic int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
831573Srgrimes    struct g_geom *gp);
841573Srgrimesstatic g_taste_t g_mirror_taste;
851573Srgrimesstatic void g_mirror_init(struct g_class *mp);
861573Srgrimesstatic void g_mirror_fini(struct g_class *mp);
871573Srgrimes
881573Srgrimesstruct g_class g_mirror_class = {
891573Srgrimes	.name = G_MIRROR_CLASS_NAME,
9081629Syar	.version = G_VERSION,
911573Srgrimes	.ctlreq = g_mirror_config,
921573Srgrimes	.taste = g_mirror_taste,
931573Srgrimes	.destroy_geom = g_mirror_destroy_geom,
941573Srgrimes	.init = g_mirror_init,
951573Srgrimes	.fini = g_mirror_fini
961573Srgrimes};
971573Srgrimes
981573Srgrimes
991573Srgrimesstatic void g_mirror_destroy_provider(struct g_mirror_softc *sc);
1001573Srgrimesstatic int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
1011573Srgrimesstatic void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force);
1021573Srgrimesstatic void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
1031573Srgrimes    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
1041573Srgrimesstatic void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
1051573Srgrimes
1061573Srgrimes
1071573Srgrimesstatic const char *
10817906Swoschg_mirror_disk_state2str(int state)
109108028Sru{
11017906Swosch
111108028Sru	switch (state) {
11217906Swosch	case G_MIRROR_DISK_STATE_NONE:
1131573Srgrimes		return ("NONE");
1141573Srgrimes	case G_MIRROR_DISK_STATE_NEW:
1151573Srgrimes		return ("NEW");
1161573Srgrimes	case G_MIRROR_DISK_STATE_ACTIVE:
1171573Srgrimes		return ("ACTIVE");
1181573Srgrimes	case G_MIRROR_DISK_STATE_STALE:
1191573Srgrimes		return ("STALE");
1201573Srgrimes	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1211573Srgrimes		return ("SYNCHRONIZING");
1221573Srgrimes	case G_MIRROR_DISK_STATE_DISCONNECTED:
123		return ("DISCONNECTED");
124	case G_MIRROR_DISK_STATE_DESTROY:
125		return ("DESTROY");
126	default:
127		return ("INVALID");
128	}
129}
130
131static const char *
132g_mirror_device_state2str(int state)
133{
134
135	switch (state) {
136	case G_MIRROR_DEVICE_STATE_STARTING:
137		return ("STARTING");
138	case G_MIRROR_DEVICE_STATE_RUNNING:
139		return ("RUNNING");
140	default:
141		return ("INVALID");
142	}
143}
144
145static const char *
146g_mirror_get_diskname(struct g_mirror_disk *disk)
147{
148
149	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
150		return ("[unknown]");
151	return (disk->d_name);
152}
153
154/*
155 * --- Events handling functions ---
156 * Events in geom_mirror are used to maintain disks and device status
157 * from one thread to simplify locking.
158 */
159static void
160g_mirror_event_free(struct g_mirror_event *ep)
161{
162
163	free(ep, M_MIRROR);
164}
165
166int
167g_mirror_event_send(void *arg, int state, int flags)
168{
169	struct g_mirror_softc *sc;
170	struct g_mirror_disk *disk;
171	struct g_mirror_event *ep;
172	int error;
173
174	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
175	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
176	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
177		disk = NULL;
178		sc = arg;
179	} else {
180		disk = arg;
181		sc = disk->d_softc;
182	}
183	ep->e_disk = disk;
184	ep->e_state = state;
185	ep->e_flags = flags;
186	ep->e_error = 0;
187	mtx_lock(&sc->sc_events_mtx);
188	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
189	mtx_unlock(&sc->sc_events_mtx);
190	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
191	mtx_lock(&sc->sc_queue_mtx);
192	wakeup(sc);
193	mtx_unlock(&sc->sc_queue_mtx);
194	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
195		return (0);
196	g_topology_assert();
197	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
198	g_topology_unlock();
199	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
200		mtx_lock(&sc->sc_events_mtx);
201		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
202		    hz * 5);
203	}
204	/* Don't even try to use 'sc' here, because it could be already dead. */
205	g_topology_lock();
206	error = ep->e_error;
207	g_mirror_event_free(ep);
208	return (error);
209}
210
211static struct g_mirror_event *
212g_mirror_event_get(struct g_mirror_softc *sc)
213{
214	struct g_mirror_event *ep;
215
216	mtx_lock(&sc->sc_events_mtx);
217	ep = TAILQ_FIRST(&sc->sc_events);
218	mtx_unlock(&sc->sc_events_mtx);
219	return (ep);
220}
221
222static void
223g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
224{
225
226	mtx_lock(&sc->sc_events_mtx);
227	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
228	mtx_unlock(&sc->sc_events_mtx);
229}
230
231static void
232g_mirror_event_cancel(struct g_mirror_disk *disk)
233{
234	struct g_mirror_softc *sc;
235	struct g_mirror_event *ep, *tmpep;
236
237	g_topology_assert();
238
239	sc = disk->d_softc;
240	mtx_lock(&sc->sc_events_mtx);
241	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
242		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
243			continue;
244		if (ep->e_disk != disk)
245			continue;
246		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
247		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
248			g_mirror_event_free(ep);
249		else {
250			ep->e_error = ECANCELED;
251			wakeup(ep);
252		}
253	}
254	mtx_unlock(&sc->sc_events_mtx);
255}
256
257/*
258 * Return the number of disks in given state.
259 * If state is equal to -1, count all connected disks.
260 */
261u_int
262g_mirror_ndisks(struct g_mirror_softc *sc, int state)
263{
264	struct g_mirror_disk *disk;
265	u_int n = 0;
266
267	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
268		if (state == -1 || disk->d_state == state)
269			n++;
270	}
271	return (n);
272}
273
274/*
275 * Find a disk in mirror by its disk ID.
276 */
277static struct g_mirror_disk *
278g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
279{
280	struct g_mirror_disk *disk;
281
282	g_topology_assert();
283
284	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
285		if (disk->d_id == id)
286			return (disk);
287	}
288	return (NULL);
289}
290
291static u_int
292g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
293{
294	struct bio *bp;
295	u_int nreqs = 0;
296
297	mtx_lock(&sc->sc_queue_mtx);
298	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
299		if (bp->bio_from == cp)
300			nreqs++;
301	}
302	mtx_unlock(&sc->sc_queue_mtx);
303	return (nreqs);
304}
305
306static int
307g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
308{
309
310	if (cp->index > 0) {
311		G_MIRROR_DEBUG(2,
312		    "I/O requests for %s exist, can't destroy it now.",
313		    cp->provider->name);
314		return (1);
315	}
316	if (g_mirror_nrequests(sc, cp) > 0) {
317		G_MIRROR_DEBUG(2,
318		    "I/O requests for %s in queue, can't destroy it now.",
319		    cp->provider->name);
320		return (1);
321	}
322	return (0);
323}
324
325static void
326g_mirror_destroy_consumer(void *arg, int flags __unused)
327{
328	struct g_consumer *cp;
329
330	cp = arg;
331	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
332	g_detach(cp);
333	g_destroy_consumer(cp);
334}
335
336static void
337g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
338{
339	struct g_provider *pp;
340	int retaste_wait;
341
342	g_topology_assert();
343
344	cp->private = NULL;
345	if (g_mirror_is_busy(sc, cp))
346		return;
347	pp = cp->provider;
348	retaste_wait = 0;
349	if (cp->acw == 1) {
350		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
351			retaste_wait = 1;
352	}
353	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
354	    -cp->acw, -cp->ace, 0);
355	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
356		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
357	if (retaste_wait) {
358		/*
359		 * After retaste event was send (inside g_access()), we can send
360		 * event to detach and destroy consumer.
361		 * A class, which has consumer to the given provider connected
362		 * will not receive retaste event for the provider.
363		 * This is the way how I ignore retaste events when I close
364		 * consumers opened for write: I detach and destroy consumer
365		 * after retaste event is sent.
366		 */
367		g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
368		return;
369	}
370	G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
371	g_detach(cp);
372	g_destroy_consumer(cp);
373}
374
375static int
376g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
377{
378	struct g_consumer *cp;
379	int error;
380
381	g_topology_assert();
382	KASSERT(disk->d_consumer == NULL,
383	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
384
385	cp = g_new_consumer(disk->d_softc->sc_geom);
386	error = g_attach(cp, pp);
387	if (error != 0) {
388		g_destroy_consumer(cp);
389		return (error);
390	}
391	error = g_access(cp, 1, 1, 1);
392	if (error != 0) {
393		g_detach(cp);
394		g_destroy_consumer(cp);
395		G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
396		    pp->name, error);
397		return (error);
398	}
399	disk->d_consumer = cp;
400	disk->d_consumer->private = disk;
401	disk->d_consumer->index = 0;
402
403	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
404	return (0);
405}
406
407static void
408g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
409{
410
411	g_topology_assert();
412
413	if (cp == NULL)
414		return;
415	if (cp->provider != NULL)
416		g_mirror_kill_consumer(sc, cp);
417	else
418		g_destroy_consumer(cp);
419}
420
421/*
422 * Initialize disk. This means allocate memory, create consumer, attach it
423 * to the provider and open access (r1w1e1) to it.
424 */
425static struct g_mirror_disk *
426g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
427    struct g_mirror_metadata *md, int *errorp)
428{
429	struct g_mirror_disk *disk;
430	int error;
431
432	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
433	if (disk == NULL) {
434		error = ENOMEM;
435		goto fail;
436	}
437	disk->d_softc = sc;
438	error = g_mirror_connect_disk(disk, pp);
439	if (error != 0)
440		goto fail;
441	disk->d_id = md->md_did;
442	disk->d_state = G_MIRROR_DISK_STATE_NONE;
443	disk->d_priority = md->md_priority;
444	disk->d_delay.sec = 0;
445	disk->d_delay.frac = 0;
446	binuptime(&disk->d_last_used);
447	disk->d_flags = md->md_dflags;
448	if (md->md_provider[0] != '\0')
449		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
450	disk->d_sync.ds_consumer = NULL;
451	disk->d_sync.ds_offset = md->md_sync_offset;
452	disk->d_sync.ds_offset_done = md->md_sync_offset;
453	disk->d_sync.ds_resync = -1;
454	disk->d_genid = md->md_genid;
455	disk->d_sync.ds_syncid = md->md_syncid;
456	if (errorp != NULL)
457		*errorp = 0;
458	return (disk);
459fail:
460	if (errorp != NULL)
461		*errorp = error;
462	if (disk != NULL)
463		free(disk, M_MIRROR);
464	return (NULL);
465}
466
467static void
468g_mirror_destroy_disk(struct g_mirror_disk *disk)
469{
470	struct g_mirror_softc *sc;
471
472	g_topology_assert();
473
474	LIST_REMOVE(disk, d_next);
475	g_mirror_event_cancel(disk);
476	sc = disk->d_softc;
477	if (sc->sc_hint == disk)
478		sc->sc_hint = NULL;
479	switch (disk->d_state) {
480	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
481		g_mirror_sync_stop(disk, 1);
482		/* FALLTHROUGH */
483	case G_MIRROR_DISK_STATE_NEW:
484	case G_MIRROR_DISK_STATE_STALE:
485	case G_MIRROR_DISK_STATE_ACTIVE:
486		g_mirror_disconnect_consumer(sc, disk->d_consumer);
487		free(disk, M_MIRROR);
488		break;
489	default:
490		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
491		    g_mirror_get_diskname(disk),
492		    g_mirror_disk_state2str(disk->d_state)));
493	}
494}
495
496static void
497g_mirror_destroy_device(struct g_mirror_softc *sc)
498{
499	struct g_mirror_disk *disk;
500	struct g_mirror_event *ep;
501	struct g_geom *gp;
502	struct g_consumer *cp, *tmpcp;
503
504	g_topology_assert();
505
506	gp = sc->sc_geom;
507	if (sc->sc_provider != NULL)
508		g_mirror_destroy_provider(sc);
509	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
510	    disk = LIST_FIRST(&sc->sc_disks)) {
511		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
512		g_mirror_update_metadata(disk);
513		g_mirror_destroy_disk(disk);
514	}
515	while ((ep = g_mirror_event_get(sc)) != NULL) {
516		g_mirror_event_remove(sc, ep);
517		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
518			g_mirror_event_free(ep);
519		else {
520			ep->e_error = ECANCELED;
521			ep->e_flags |= G_MIRROR_EVENT_DONE;
522			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
523			mtx_lock(&sc->sc_events_mtx);
524			wakeup(ep);
525			mtx_unlock(&sc->sc_events_mtx);
526		}
527	}
528	callout_drain(&sc->sc_callout);
529	gp->softc = NULL;
530
531	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
532		g_mirror_disconnect_consumer(sc, cp);
533	}
534	sc->sc_sync.ds_geom->softc = NULL;
535	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
536	mtx_destroy(&sc->sc_queue_mtx);
537	mtx_destroy(&sc->sc_events_mtx);
538	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
539	g_wither_geom(gp, ENXIO);
540}
541
542static void
543g_mirror_orphan(struct g_consumer *cp)
544{
545	struct g_mirror_disk *disk;
546
547	g_topology_assert();
548
549	disk = cp->private;
550	if (disk == NULL)
551		return;
552	disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
553	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
554	    G_MIRROR_EVENT_DONTWAIT);
555}
556
557/*
558 * Function should return the next active disk on the list.
559 * It is possible that it will be the same disk as given.
560 * If there are no active disks on list, NULL is returned.
561 */
562static __inline struct g_mirror_disk *
563g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
564{
565	struct g_mirror_disk *dp;
566
567	for (dp = LIST_NEXT(disk, d_next); dp != disk;
568	    dp = LIST_NEXT(dp, d_next)) {
569		if (dp == NULL)
570			dp = LIST_FIRST(&sc->sc_disks);
571		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
572			break;
573	}
574	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
575		return (NULL);
576	return (dp);
577}
578
579static struct g_mirror_disk *
580g_mirror_get_disk(struct g_mirror_softc *sc)
581{
582	struct g_mirror_disk *disk;
583
584	if (sc->sc_hint == NULL) {
585		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
586		if (sc->sc_hint == NULL)
587			return (NULL);
588	}
589	disk = sc->sc_hint;
590	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
591		disk = g_mirror_find_next(sc, disk);
592		if (disk == NULL)
593			return (NULL);
594	}
595	sc->sc_hint = g_mirror_find_next(sc, disk);
596	return (disk);
597}
598
599static int
600g_mirror_write_metadata(struct g_mirror_disk *disk,
601    struct g_mirror_metadata *md)
602{
603	struct g_mirror_softc *sc;
604	struct g_consumer *cp;
605	off_t offset, length;
606	u_char *sector;
607	int error = 0;
608
609	g_topology_assert();
610
611	sc = disk->d_softc;
612	cp = disk->d_consumer;
613	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
614	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
615	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
616	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
617	    cp->acw, cp->ace));
618	length = cp->provider->sectorsize;
619	offset = cp->provider->mediasize - length;
620	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
621	if (md != NULL)
622		mirror_metadata_encode(md, sector);
623	g_topology_unlock();
624	error = g_write_data(cp, offset, sector, length);
625	g_topology_lock();
626	free(sector, M_MIRROR);
627	if (error != 0) {
628		disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_GENID;
629		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
630		    G_MIRROR_EVENT_DONTWAIT);
631	}
632	return (error);
633}
634
635static int
636g_mirror_clear_metadata(struct g_mirror_disk *disk)
637{
638	int error;
639
640	g_topology_assert();
641	error = g_mirror_write_metadata(disk, NULL);
642	if (error == 0) {
643		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
644		    g_mirror_get_diskname(disk));
645	} else {
646		G_MIRROR_DEBUG(0,
647		    "Cannot clear metadata on disk %s (error=%d).",
648		    g_mirror_get_diskname(disk), error);
649	}
650	return (error);
651}
652
653void
654g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
655    struct g_mirror_metadata *md)
656{
657
658	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
659	md->md_version = G_MIRROR_VERSION;
660	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
661	md->md_mid = sc->sc_id;
662	md->md_all = sc->sc_ndisks;
663	md->md_slice = sc->sc_slice;
664	md->md_balance = sc->sc_balance;
665	md->md_genid = sc->sc_genid;
666	md->md_mediasize = sc->sc_mediasize;
667	md->md_sectorsize = sc->sc_sectorsize;
668	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
669	bzero(md->md_provider, sizeof(md->md_provider));
670	if (disk == NULL) {
671		md->md_did = arc4random();
672		md->md_priority = 0;
673		md->md_syncid = 0;
674		md->md_dflags = 0;
675		md->md_sync_offset = 0;
676		md->md_provsize = 0;
677	} else {
678		md->md_did = disk->d_id;
679		md->md_priority = disk->d_priority;
680		md->md_syncid = disk->d_sync.ds_syncid;
681		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
682		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
683			md->md_sync_offset = disk->d_sync.ds_offset_done;
684		else
685			md->md_sync_offset = 0;
686		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
687			strlcpy(md->md_provider,
688			    disk->d_consumer->provider->name,
689			    sizeof(md->md_provider));
690		}
691		md->md_provsize = disk->d_consumer->provider->mediasize;
692	}
693}
694
695void
696g_mirror_update_metadata(struct g_mirror_disk *disk)
697{
698	struct g_mirror_metadata md;
699	int error;
700
701	g_topology_assert();
702	g_mirror_fill_metadata(disk->d_softc, disk, &md);
703	error = g_mirror_write_metadata(disk, &md);
704	if (error == 0) {
705		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
706		    g_mirror_get_diskname(disk));
707	} else {
708		G_MIRROR_DEBUG(0,
709		    "Cannot update metadata on disk %s (error=%d).",
710		    g_mirror_get_diskname(disk), error);
711	}
712}
713
714static void
715g_mirror_bump_syncid(struct g_mirror_softc *sc)
716{
717	struct g_mirror_disk *disk;
718
719	g_topology_assert();
720	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
721	    ("%s called with no active disks (device=%s).", __func__,
722	    sc->sc_name));
723
724	sc->sc_syncid++;
725	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
726	    sc->sc_syncid);
727	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
728		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
729		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
730			disk->d_sync.ds_syncid = sc->sc_syncid;
731			g_mirror_update_metadata(disk);
732		}
733	}
734}
735
736static void
737g_mirror_bump_genid(struct g_mirror_softc *sc)
738{
739	struct g_mirror_disk *disk;
740
741	g_topology_assert();
742	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
743	    ("%s called with no active disks (device=%s).", __func__,
744	    sc->sc_name));
745
746	sc->sc_genid++;
747	G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
748	    sc->sc_genid);
749	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
750		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
751		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
752			disk->d_genid = sc->sc_genid;
753			g_mirror_update_metadata(disk);
754		}
755	}
756}
757
758static void
759g_mirror_idle(struct g_mirror_softc *sc)
760{
761	struct g_mirror_disk *disk;
762
763	if (sc->sc_provider == NULL || sc->sc_provider->acw == 0)
764		return;
765	sc->sc_idle = 1;
766	g_topology_lock();
767	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
768		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
769			continue;
770		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
771		    g_mirror_get_diskname(disk), sc->sc_name);
772		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
773		g_mirror_update_metadata(disk);
774	}
775	g_topology_unlock();
776}
777
778static void
779g_mirror_unidle(struct g_mirror_softc *sc)
780{
781	struct g_mirror_disk *disk;
782
783	sc->sc_idle = 0;
784	g_topology_lock();
785	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
786		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
787			continue;
788		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
789		    g_mirror_get_diskname(disk), sc->sc_name);
790		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
791		g_mirror_update_metadata(disk);
792	}
793	g_topology_unlock();
794}
795
796/*
797 * Return 1 if we should check if mirror is idling.
798 */
799static int
800g_mirror_check_idle(struct g_mirror_softc *sc)
801{
802	struct g_mirror_disk *disk;
803
804	if (sc->sc_idle)
805		return (0);
806	if (sc->sc_provider != NULL && sc->sc_provider->acw == 0)
807		return (0);
808	/*
809	 * Check if there are no in-flight requests.
810	 */
811	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
812		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
813			continue;
814		if (disk->d_consumer->index > 0)
815			return (0);
816	}
817	return (1);
818}
819
820static __inline int
821bintime_cmp(struct bintime *bt1, struct bintime *bt2)
822{
823
824	if (bt1->sec < bt2->sec)
825		return (-1);
826	else if (bt1->sec > bt2->sec)
827		return (1);
828	if (bt1->frac < bt2->frac)
829		return (-1);
830	else if (bt1->frac > bt2->frac)
831		return (1);
832	return (0);
833}
834
835static void
836g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp)
837{
838
839	if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD)
840		return;
841	binuptime(&disk->d_delay);
842	bintime_sub(&disk->d_delay, &bp->bio_t0);
843}
844
845static void
846g_mirror_done(struct bio *bp)
847{
848	struct g_mirror_softc *sc;
849
850	sc = bp->bio_from->geom->softc;
851	bp->bio_cflags |= G_MIRROR_BIO_FLAG_REGULAR;
852	mtx_lock(&sc->sc_queue_mtx);
853	bioq_disksort(&sc->sc_queue, bp);
854	wakeup(sc);
855	mtx_unlock(&sc->sc_queue_mtx);
856}
857
858static void
859g_mirror_regular_request(struct bio *bp)
860{
861	struct g_mirror_softc *sc;
862	struct g_mirror_disk *disk;
863	struct bio *pbp;
864
865	g_topology_assert_not();
866
867	bp->bio_from->index--;
868	pbp = bp->bio_parent;
869	sc = pbp->bio_to->geom->softc;
870	disk = bp->bio_from->private;
871	if (disk == NULL) {
872		g_topology_lock();
873		g_mirror_kill_consumer(sc, bp->bio_from);
874		g_topology_unlock();
875	} else {
876		g_mirror_update_delay(disk, bp);
877	}
878
879	pbp->bio_inbed++;
880	KASSERT(pbp->bio_inbed <= pbp->bio_children,
881	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
882	    pbp->bio_children));
883	if (bp->bio_error == 0 && pbp->bio_error == 0) {
884		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
885		g_destroy_bio(bp);
886		if (pbp->bio_children == pbp->bio_inbed) {
887			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
888			pbp->bio_completed = pbp->bio_length;
889			g_io_deliver(pbp, pbp->bio_error);
890		}
891		return;
892	} else if (bp->bio_error != 0) {
893		if (pbp->bio_error == 0)
894			pbp->bio_error = bp->bio_error;
895		G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
896		    bp->bio_error);
897		if (disk != NULL) {
898			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
899			g_mirror_event_send(disk,
900			    G_MIRROR_DISK_STATE_DISCONNECTED,
901			    G_MIRROR_EVENT_DONTWAIT);
902		}
903		switch (pbp->bio_cmd) {
904		case BIO_DELETE:
905		case BIO_WRITE:
906			pbp->bio_inbed--;
907			pbp->bio_children--;
908			break;
909		}
910	}
911	g_destroy_bio(bp);
912
913	switch (pbp->bio_cmd) {
914	case BIO_READ:
915		if (pbp->bio_children == pbp->bio_inbed) {
916			pbp->bio_error = 0;
917			mtx_lock(&sc->sc_queue_mtx);
918			bioq_disksort(&sc->sc_queue, pbp);
919			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
920			wakeup(sc);
921			mtx_unlock(&sc->sc_queue_mtx);
922		}
923		break;
924	case BIO_DELETE:
925	case BIO_WRITE:
926		if (pbp->bio_children == 0) {
927			/*
928			 * All requests failed.
929			 */
930		} else if (pbp->bio_inbed < pbp->bio_children) {
931			/* Do nothing. */
932			break;
933		} else if (pbp->bio_children == pbp->bio_inbed) {
934			/* Some requests succeeded. */
935			pbp->bio_error = 0;
936			pbp->bio_completed = pbp->bio_length;
937		}
938		g_io_deliver(pbp, pbp->bio_error);
939		break;
940	default:
941		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
942		break;
943	}
944}
945
946static void
947g_mirror_sync_done(struct bio *bp)
948{
949	struct g_mirror_softc *sc;
950
951	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
952	sc = bp->bio_from->geom->softc;
953	bp->bio_cflags |= G_MIRROR_BIO_FLAG_SYNC;
954	mtx_lock(&sc->sc_queue_mtx);
955	bioq_disksort(&sc->sc_queue, bp);
956	wakeup(sc);
957	mtx_unlock(&sc->sc_queue_mtx);
958}
959
960static void
961g_mirror_start(struct bio *bp)
962{
963	struct g_mirror_softc *sc;
964
965	sc = bp->bio_to->geom->softc;
966	/*
967	 * If sc == NULL or there are no valid disks, provider's error
968	 * should be set and g_mirror_start() should not be called at all.
969	 */
970	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
971	    ("Provider's error should be set (error=%d)(mirror=%s).",
972	    bp->bio_to->error, bp->bio_to->name));
973	G_MIRROR_LOGREQ(3, bp, "Request received.");
974
975	switch (bp->bio_cmd) {
976	case BIO_READ:
977	case BIO_WRITE:
978	case BIO_DELETE:
979		break;
980	case BIO_GETATTR:
981	default:
982		g_io_deliver(bp, EOPNOTSUPP);
983		return;
984	}
985	mtx_lock(&sc->sc_queue_mtx);
986	bioq_disksort(&sc->sc_queue, bp);
987	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
988	wakeup(sc);
989	mtx_unlock(&sc->sc_queue_mtx);
990}
991
992/*
993 * Send one synchronization request.
994 */
995static void
996g_mirror_sync_one(struct g_mirror_disk *disk)
997{
998	struct g_mirror_softc *sc;
999	struct bio *bp;
1000
1001	sc = disk->d_softc;
1002	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1003	    ("Disk %s is not marked for synchronization.",
1004	    g_mirror_get_diskname(disk)));
1005
1006	bp = g_new_bio();
1007	if (bp == NULL)
1008		return;
1009	bp->bio_parent = NULL;
1010	bp->bio_cmd = BIO_READ;
1011	bp->bio_offset = disk->d_sync.ds_offset;
1012	bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1013	bp->bio_cflags = 0;
1014	bp->bio_done = g_mirror_sync_done;
1015	bp->bio_data = disk->d_sync.ds_data;
1016	if (bp->bio_data == NULL) {
1017		g_destroy_bio(bp);
1018		return;
1019	}
1020	disk->d_sync.ds_offset += bp->bio_length;
1021	bp->bio_to = sc->sc_provider;
1022	G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1023	disk->d_sync.ds_consumer->index++;
1024	g_io_request(bp, disk->d_sync.ds_consumer);
1025}
1026
1027static void
1028g_mirror_sync_request(struct bio *bp)
1029{
1030	struct g_mirror_softc *sc;
1031	struct g_mirror_disk *disk;
1032
1033	bp->bio_from->index--;
1034	sc = bp->bio_from->geom->softc;
1035	disk = bp->bio_from->private;
1036	if (disk == NULL) {
1037		g_topology_lock();
1038		g_mirror_kill_consumer(sc, bp->bio_from);
1039		g_topology_unlock();
1040		g_destroy_bio(bp);
1041		return;
1042	}
1043
1044	/*
1045	 * Synchronization request.
1046	 */
1047	switch (bp->bio_cmd) {
1048	case BIO_READ:
1049	    {
1050		struct g_consumer *cp;
1051
1052		if (bp->bio_error != 0) {
1053			G_MIRROR_LOGREQ(0, bp,
1054			    "Synchronization request failed (error=%d).",
1055			    bp->bio_error);
1056			g_destroy_bio(bp);
1057			return;
1058		}
1059		G_MIRROR_LOGREQ(3, bp,
1060		    "Synchronization request half-finished.");
1061		bp->bio_cmd = BIO_WRITE;
1062		bp->bio_cflags = 0;
1063		cp = disk->d_consumer;
1064		KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1065		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1066		    cp->acr, cp->acw, cp->ace));
1067		cp->index++;
1068		g_io_request(bp, cp);
1069		return;
1070	    }
1071	case BIO_WRITE:
1072	    {
1073		struct g_mirror_disk_sync *sync;
1074
1075		if (bp->bio_error != 0) {
1076			G_MIRROR_LOGREQ(0, bp,
1077			    "Synchronization request failed (error=%d).",
1078			    bp->bio_error);
1079			g_destroy_bio(bp);
1080			sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1081			g_mirror_event_send(disk,
1082			    G_MIRROR_DISK_STATE_DISCONNECTED,
1083			    G_MIRROR_EVENT_DONTWAIT);
1084			return;
1085		}
1086		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1087		sync = &disk->d_sync;
1088		sync->ds_offset_done = bp->bio_offset + bp->bio_length;
1089		g_destroy_bio(bp);
1090		if (sync->ds_resync != -1)
1091			break;
1092		if (sync->ds_offset_done == sc->sc_provider->mediasize) {
1093			/*
1094			 * Disk up-to-date, activate it.
1095			 */
1096			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1097			    G_MIRROR_EVENT_DONTWAIT);
1098			return;
1099		} else if (sync->ds_offset_done % (MAXPHYS * 100) == 0) {
1100			/*
1101			 * Update offset_done on every 100 blocks.
1102			 * XXX: This should be configurable.
1103			 */
1104			g_topology_lock();
1105			g_mirror_update_metadata(disk);
1106			g_topology_unlock();
1107		}
1108		return;
1109	    }
1110	default:
1111		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1112		    bp->bio_cmd, sc->sc_name));
1113		break;
1114	}
1115}
1116
1117static void
1118g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1119{
1120	struct g_mirror_disk *disk;
1121	struct g_consumer *cp;
1122	struct bio *cbp;
1123
1124	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1125		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1126			break;
1127	}
1128	if (disk == NULL) {
1129		if (bp->bio_error == 0)
1130			bp->bio_error = ENXIO;
1131		g_io_deliver(bp, bp->bio_error);
1132		return;
1133	}
1134	cbp = g_clone_bio(bp);
1135	if (cbp == NULL) {
1136		if (bp->bio_error == 0)
1137			bp->bio_error = ENOMEM;
1138		g_io_deliver(bp, bp->bio_error);
1139		return;
1140	}
1141	/*
1142	 * Fill in the component buf structure.
1143	 */
1144	cp = disk->d_consumer;
1145	cbp->bio_done = g_mirror_done;
1146	cbp->bio_to = cp->provider;
1147	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1148	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1149	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1150	    cp->acw, cp->ace));
1151	cp->index++;
1152	g_io_request(cbp, cp);
1153}
1154
1155static void
1156g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1157{
1158	struct g_mirror_disk *disk;
1159	struct g_consumer *cp;
1160	struct bio *cbp;
1161
1162	disk = g_mirror_get_disk(sc);
1163	if (disk == NULL) {
1164		if (bp->bio_error == 0)
1165			bp->bio_error = ENXIO;
1166		g_io_deliver(bp, bp->bio_error);
1167		return;
1168	}
1169	cbp = g_clone_bio(bp);
1170	if (cbp == NULL) {
1171		if (bp->bio_error == 0)
1172			bp->bio_error = ENOMEM;
1173		g_io_deliver(bp, bp->bio_error);
1174		return;
1175	}
1176	/*
1177	 * Fill in the component buf structure.
1178	 */
1179	cp = disk->d_consumer;
1180	cbp->bio_done = g_mirror_done;
1181	cbp->bio_to = cp->provider;
1182	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1183	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1184	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1185	    cp->acw, cp->ace));
1186	cp->index++;
1187	g_io_request(cbp, cp);
1188}
1189
1190static void
1191g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1192{
1193	struct g_mirror_disk *disk, *dp;
1194	struct g_consumer *cp;
1195	struct bio *cbp;
1196	struct bintime curtime;
1197
1198	binuptime(&curtime);
1199	/*
1200	 * Find a disk which the smallest load.
1201	 */
1202	disk = NULL;
1203	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1204		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1205			continue;
1206		/* If disk wasn't used for more than 2 sec, use it. */
1207		if (curtime.sec - dp->d_last_used.sec >= 2) {
1208			disk = dp;
1209			break;
1210		}
1211		if (disk == NULL ||
1212		    bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) {
1213			disk = dp;
1214		}
1215	}
1216	cbp = g_clone_bio(bp);
1217	if (cbp == NULL) {
1218		if (bp->bio_error == 0)
1219			bp->bio_error = ENOMEM;
1220		g_io_deliver(bp, bp->bio_error);
1221		return;
1222	}
1223	/*
1224	 * Fill in the component buf structure.
1225	 */
1226	cp = disk->d_consumer;
1227	cbp->bio_done = g_mirror_done;
1228	cbp->bio_to = cp->provider;
1229	binuptime(&disk->d_last_used);
1230	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1231	KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1232	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1233	    cp->acw, cp->ace));
1234	cp->index++;
1235	g_io_request(cbp, cp);
1236}
1237
1238static void
1239g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1240{
1241	struct bio_queue_head queue;
1242	struct g_mirror_disk *disk;
1243	struct g_consumer *cp;
1244	struct bio *cbp;
1245	off_t left, mod, offset, slice;
1246	u_char *data;
1247	u_int ndisks;
1248
1249	if (bp->bio_length <= sc->sc_slice) {
1250		g_mirror_request_round_robin(sc, bp);
1251		return;
1252	}
1253	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1254	slice = bp->bio_length / ndisks;
1255	mod = slice % sc->sc_provider->sectorsize;
1256	if (mod != 0)
1257		slice += sc->sc_provider->sectorsize - mod;
1258	/*
1259	 * Allocate all bios before sending any request, so we can
1260	 * return ENOMEM in nice and clean way.
1261	 */
1262	left = bp->bio_length;
1263	offset = bp->bio_offset;
1264	data = bp->bio_data;
1265	bioq_init(&queue);
1266	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1267		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1268			continue;
1269		cbp = g_clone_bio(bp);
1270		if (cbp == NULL) {
1271			for (cbp = bioq_first(&queue); cbp != NULL;
1272			    cbp = bioq_first(&queue)) {
1273				bioq_remove(&queue, cbp);
1274				g_destroy_bio(cbp);
1275			}
1276			if (bp->bio_error == 0)
1277				bp->bio_error = ENOMEM;
1278			g_io_deliver(bp, bp->bio_error);
1279			return;
1280		}
1281		bioq_insert_tail(&queue, cbp);
1282		cbp->bio_done = g_mirror_done;
1283		cbp->bio_caller1 = disk;
1284		cbp->bio_to = disk->d_consumer->provider;
1285		cbp->bio_offset = offset;
1286		cbp->bio_data = data;
1287		cbp->bio_length = MIN(left, slice);
1288		left -= cbp->bio_length;
1289		if (left == 0)
1290			break;
1291		offset += cbp->bio_length;
1292		data += cbp->bio_length;
1293	}
1294	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1295		bioq_remove(&queue, cbp);
1296		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1297		disk = cbp->bio_caller1;
1298		cbp->bio_caller1 = NULL;
1299		cp = disk->d_consumer;
1300		KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1301		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1302		    cp->acr, cp->acw, cp->ace));
1303		disk->d_consumer->index++;
1304		g_io_request(cbp, disk->d_consumer);
1305	}
1306}
1307
1308static void
1309g_mirror_register_request(struct bio *bp)
1310{
1311	struct g_mirror_softc *sc;
1312
1313	sc = bp->bio_to->geom->softc;
1314	switch (bp->bio_cmd) {
1315	case BIO_READ:
1316		switch (sc->sc_balance) {
1317		case G_MIRROR_BALANCE_LOAD:
1318			g_mirror_request_load(sc, bp);
1319			break;
1320		case G_MIRROR_BALANCE_PREFER:
1321			g_mirror_request_prefer(sc, bp);
1322			break;
1323		case G_MIRROR_BALANCE_ROUND_ROBIN:
1324			g_mirror_request_round_robin(sc, bp);
1325			break;
1326		case G_MIRROR_BALANCE_SPLIT:
1327			g_mirror_request_split(sc, bp);
1328			break;
1329		}
1330		return;
1331	case BIO_WRITE:
1332	case BIO_DELETE:
1333	    {
1334		struct g_mirror_disk *disk;
1335		struct g_mirror_disk_sync *sync;
1336		struct bio_queue_head queue;
1337		struct g_consumer *cp;
1338		struct bio *cbp;
1339
1340		if (sc->sc_idle)
1341			g_mirror_unidle(sc);
1342		/*
1343		 * Allocate all bios before sending any request, so we can
1344		 * return ENOMEM in nice and clean way.
1345		 */
1346		bioq_init(&queue);
1347		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1348			sync = &disk->d_sync;
1349			switch (disk->d_state) {
1350			case G_MIRROR_DISK_STATE_ACTIVE:
1351				break;
1352			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1353				if (bp->bio_offset >= sync->ds_offset)
1354					continue;
1355				else if (bp->bio_offset + bp->bio_length >
1356				    sync->ds_offset_done &&
1357				    (bp->bio_offset < sync->ds_resync ||
1358				     sync->ds_resync == -1)) {
1359					sync->ds_resync = bp->bio_offset -
1360					    (bp->bio_offset % MAXPHYS);
1361				}
1362				break;
1363			default:
1364				continue;
1365			}
1366			cbp = g_clone_bio(bp);
1367			if (cbp == NULL) {
1368				for (cbp = bioq_first(&queue); cbp != NULL;
1369				    cbp = bioq_first(&queue)) {
1370					bioq_remove(&queue, cbp);
1371					g_destroy_bio(cbp);
1372				}
1373				if (bp->bio_error == 0)
1374					bp->bio_error = ENOMEM;
1375				g_io_deliver(bp, bp->bio_error);
1376				return;
1377			}
1378			bioq_insert_tail(&queue, cbp);
1379			cbp->bio_done = g_mirror_done;
1380			cp = disk->d_consumer;
1381			cbp->bio_caller1 = cp;
1382			cbp->bio_to = cp->provider;
1383			KASSERT(cp->acr == 1 && cp->acw == 1 && cp->ace == 1,
1384			    ("Consumer %s not opened (r%dw%de%d).",
1385			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1386		}
1387		for (cbp = bioq_first(&queue); cbp != NULL;
1388		    cbp = bioq_first(&queue)) {
1389			bioq_remove(&queue, cbp);
1390			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1391			cp = cbp->bio_caller1;
1392			cbp->bio_caller1 = NULL;
1393			cp->index++;
1394			g_io_request(cbp, cp);
1395		}
1396		/*
1397		 * Bump syncid on first write.
1398		 */
1399		if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1400			sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1401			g_topology_lock();
1402			g_mirror_bump_syncid(sc);
1403			g_topology_unlock();
1404		}
1405		return;
1406	    }
1407	default:
1408		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1409		    bp->bio_cmd, sc->sc_name));
1410		break;
1411	}
1412}
1413
1414static int
1415g_mirror_can_destroy(struct g_mirror_softc *sc)
1416{
1417	struct g_geom *gp;
1418	struct g_consumer *cp;
1419
1420	g_topology_assert();
1421	gp = sc->sc_geom;
1422	LIST_FOREACH(cp, &gp->consumer, consumer) {
1423		if (g_mirror_is_busy(sc, cp))
1424			return (0);
1425	}
1426	gp = sc->sc_sync.ds_geom;
1427	LIST_FOREACH(cp, &gp->consumer, consumer) {
1428		if (g_mirror_is_busy(sc, cp))
1429			return (0);
1430	}
1431	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1432	    sc->sc_name);
1433	return (1);
1434}
1435
1436static int
1437g_mirror_try_destroy(struct g_mirror_softc *sc)
1438{
1439
1440	g_topology_lock();
1441	if (!g_mirror_can_destroy(sc)) {
1442		g_topology_unlock();
1443		return (0);
1444	}
1445	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1446		g_topology_unlock();
1447		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1448		    &sc->sc_worker);
1449		wakeup(&sc->sc_worker);
1450		sc->sc_worker = NULL;
1451	} else {
1452		g_mirror_destroy_device(sc);
1453		g_topology_unlock();
1454		free(sc, M_MIRROR);
1455	}
1456	return (1);
1457}
1458
1459/*
1460 * Worker thread.
1461 */
1462static void
1463g_mirror_worker(void *arg)
1464{
1465	struct g_mirror_softc *sc;
1466	struct g_mirror_disk *disk;
1467	struct g_mirror_disk_sync *sync;
1468	struct g_mirror_event *ep;
1469	struct bio *bp;
1470	u_int nreqs;
1471
1472	sc = arg;
1473	mtx_lock_spin(&sched_lock);
1474	sched_prio(curthread, PRIBIO);
1475	mtx_unlock_spin(&sched_lock);
1476
1477	nreqs = 0;
1478	for (;;) {
1479		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1480		/*
1481		 * First take a look at events.
1482		 * This is important to handle events before any I/O requests.
1483		 */
1484		ep = g_mirror_event_get(sc);
1485		if (ep != NULL && g_topology_try_lock()) {
1486			g_mirror_event_remove(sc, ep);
1487			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1488				/* Update only device status. */
1489				G_MIRROR_DEBUG(3,
1490				    "Running event for device %s.",
1491				    sc->sc_name);
1492				ep->e_error = 0;
1493				g_mirror_update_device(sc, 1);
1494			} else {
1495				/* Update disk status. */
1496				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1497				     g_mirror_get_diskname(ep->e_disk));
1498				ep->e_error = g_mirror_update_disk(ep->e_disk,
1499				    ep->e_state);
1500				if (ep->e_error == 0)
1501					g_mirror_update_device(sc, 0);
1502			}
1503			g_topology_unlock();
1504			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1505				KASSERT(ep->e_error == 0,
1506				    ("Error cannot be handled."));
1507				g_mirror_event_free(ep);
1508			} else {
1509				ep->e_flags |= G_MIRROR_EVENT_DONE;
1510				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1511				    ep);
1512				mtx_lock(&sc->sc_events_mtx);
1513				wakeup(ep);
1514				mtx_unlock(&sc->sc_events_mtx);
1515			}
1516			if ((sc->sc_flags &
1517			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1518				if (g_mirror_try_destroy(sc))
1519					kthread_exit(0);
1520			}
1521			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1522			continue;
1523		}
1524		/*
1525		 * Now I/O requests.
1526		 */
1527		/* Get first request from the queue. */
1528		mtx_lock(&sc->sc_queue_mtx);
1529		bp = bioq_first(&sc->sc_queue);
1530		if (bp == NULL) {
1531			if (ep != NULL) {
1532				/*
1533				 * No I/O requests and topology lock was
1534				 * already held? Try again.
1535				 */
1536				mtx_unlock(&sc->sc_queue_mtx);
1537				continue;
1538			}
1539			if ((sc->sc_flags &
1540			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1541				mtx_unlock(&sc->sc_queue_mtx);
1542				if (g_mirror_try_destroy(sc))
1543					kthread_exit(0);
1544				mtx_lock(&sc->sc_queue_mtx);
1545			}
1546		}
1547		if (sc->sc_sync.ds_ndisks > 0 &&
1548		    (bp == NULL || nreqs > g_mirror_reqs_per_sync)) {
1549			mtx_unlock(&sc->sc_queue_mtx);
1550			/*
1551			 * It is time for synchronization...
1552			 */
1553			nreqs = 0;
1554			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1555				if (disk->d_state !=
1556				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
1557					continue;
1558				}
1559				sync = &disk->d_sync;
1560				if (sync->ds_offset >=
1561				    sc->sc_provider->mediasize) {
1562					continue;
1563				}
1564				if (sync->ds_offset > sync->ds_offset_done)
1565					continue;
1566				if (sync->ds_resync != -1) {
1567					sync->ds_offset = sync->ds_resync;
1568					sync->ds_offset_done = sync->ds_resync;
1569					sync->ds_resync = -1;
1570				}
1571				g_mirror_sync_one(disk);
1572			}
1573			G_MIRROR_DEBUG(5, "%s: I'm here 2.", __func__);
1574			goto sleep;
1575		}
1576		if (bp == NULL) {
1577			if (g_mirror_check_idle(sc)) {
1578				u_int idletime;
1579
1580				idletime = g_mirror_idletime;
1581				if (idletime == 0)
1582					idletime = 1;
1583				idletime *= hz;
1584				if (msleep(sc, &sc->sc_queue_mtx, PRIBIO | PDROP,
1585				    "m:w1", idletime) == EWOULDBLOCK) {
1586					G_MIRROR_DEBUG(5, "%s: I'm here 3.",
1587					    __func__);
1588					/*
1589					 * No I/O requests in 'idletime' seconds,
1590					 * so mark components as clean.
1591					 */
1592					g_mirror_idle(sc);
1593				}
1594				G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1595			} else {
1596				MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP,
1597				    "m:w2", 0);
1598				G_MIRROR_DEBUG(5, "%s: I'm here 5.", __func__);
1599			}
1600			continue;
1601		}
1602		nreqs++;
1603		bioq_remove(&sc->sc_queue, bp);
1604		mtx_unlock(&sc->sc_queue_mtx);
1605
1606		if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) {
1607			g_mirror_regular_request(bp);
1608		} else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1609			u_int timeout, sps;
1610
1611			g_mirror_sync_request(bp);
1612sleep:
1613			sps = g_mirror_syncs_per_sec;
1614			if (sps == 0) {
1615				G_MIRROR_DEBUG(5, "%s: I'm here 6.", __func__);
1616				continue;
1617			}
1618			if (ep != NULL) {
1619				/*
1620				 * We have some pending events, don't sleep now.
1621				 */
1622				G_MIRROR_DEBUG(5, "%s: I'm here 7.", __func__);
1623				continue;
1624			}
1625			mtx_lock(&sc->sc_queue_mtx);
1626			if (bioq_first(&sc->sc_queue) != NULL) {
1627				mtx_unlock(&sc->sc_queue_mtx);
1628				G_MIRROR_DEBUG(5, "%s: I'm here 8.", __func__);
1629				continue;
1630			}
1631			timeout = hz / sps;
1632			if (timeout == 0)
1633				timeout = 1;
1634			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w3",
1635			    timeout);
1636		} else {
1637			g_mirror_register_request(bp);
1638		}
1639		G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
1640	}
1641}
1642
1643/*
1644 * Open disk's consumer if needed.
1645 */
1646static void
1647g_mirror_update_access(struct g_mirror_disk *disk)
1648{
1649	struct g_provider *pp;
1650
1651	g_topology_assert();
1652
1653	pp = disk->d_softc->sc_provider;
1654	if (pp == NULL)
1655		return;
1656	if (pp->acw > 0) {
1657		if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1658			G_MIRROR_DEBUG(1,
1659			    "Disk %s (device %s) marked as dirty.",
1660			    g_mirror_get_diskname(disk),
1661			    disk->d_softc->sc_name);
1662			disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1663		}
1664	} else if (pp->acw == 0) {
1665		if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1666			G_MIRROR_DEBUG(1,
1667			    "Disk %s (device %s) marked as clean.",
1668			    g_mirror_get_diskname(disk),
1669			    disk->d_softc->sc_name);
1670			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1671		}
1672	}
1673}
1674
1675static void
1676g_mirror_sync_start(struct g_mirror_disk *disk)
1677{
1678	struct g_mirror_softc *sc;
1679	int error;
1680
1681	g_topology_assert();
1682
1683	sc = disk->d_softc;
1684	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1685	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
1686	    sc->sc_state));
1687
1688	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
1689	    g_mirror_get_diskname(disk));
1690	disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1691	KASSERT(disk->d_sync.ds_consumer == NULL,
1692	    ("Sync consumer already exists (device=%s, disk=%s).",
1693	    sc->sc_name, g_mirror_get_diskname(disk)));
1694	disk->d_sync.ds_consumer = g_new_consumer(sc->sc_sync.ds_geom);
1695	disk->d_sync.ds_consumer->private = disk;
1696	disk->d_sync.ds_consumer->index = 0;
1697	error = g_attach(disk->d_sync.ds_consumer, disk->d_softc->sc_provider);
1698	KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
1699	    disk->d_softc->sc_name, error));
1700	error = g_access(disk->d_sync.ds_consumer, 1, 0, 0);
1701	KASSERT(error == 0, ("Cannot open %s (error=%d).",
1702	    disk->d_softc->sc_name, error));
1703	disk->d_sync.ds_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
1704	sc->sc_sync.ds_ndisks++;
1705}
1706
1707/*
1708 * Stop synchronization process.
1709 * type: 0 - synchronization finished
1710 *       1 - synchronization stopped
1711 */
1712static void
1713g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
1714{
1715
1716	g_topology_assert();
1717	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1718	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1719	    g_mirror_disk_state2str(disk->d_state)));
1720	if (disk->d_sync.ds_consumer == NULL)
1721		return;
1722
1723	if (type == 0) {
1724		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
1725		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1726	} else /* if (type == 1) */ {
1727		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
1728		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1729	}
1730	g_mirror_kill_consumer(disk->d_softc, disk->d_sync.ds_consumer);
1731	free(disk->d_sync.ds_data, M_MIRROR);
1732	disk->d_sync.ds_consumer = NULL;
1733	disk->d_softc->sc_sync.ds_ndisks--;
1734	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1735}
1736
1737static void
1738g_mirror_launch_provider(struct g_mirror_softc *sc)
1739{
1740	struct g_mirror_disk *disk;
1741	struct g_provider *pp;
1742
1743	g_topology_assert();
1744
1745	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
1746	pp->mediasize = sc->sc_mediasize;
1747	pp->sectorsize = sc->sc_sectorsize;
1748	sc->sc_provider = pp;
1749	g_error_provider(pp, 0);
1750	G_MIRROR_DEBUG(0, "Device %s: provider %s launched.", sc->sc_name,
1751	    pp->name);
1752	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1753		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1754			g_mirror_sync_start(disk);
1755	}
1756}
1757
1758static void
1759g_mirror_destroy_provider(struct g_mirror_softc *sc)
1760{
1761	struct g_mirror_disk *disk;
1762	struct bio *bp;
1763
1764	g_topology_assert();
1765	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
1766	    sc->sc_name));
1767
1768	g_error_provider(sc->sc_provider, ENXIO);
1769	mtx_lock(&sc->sc_queue_mtx);
1770	while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
1771		bioq_remove(&sc->sc_queue, bp);
1772		g_io_deliver(bp, ENXIO);
1773	}
1774	mtx_unlock(&sc->sc_queue_mtx);
1775	G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
1776	    sc->sc_provider->name);
1777	sc->sc_provider->flags |= G_PF_WITHER;
1778	g_orphan_provider(sc->sc_provider, ENXIO);
1779	sc->sc_provider = NULL;
1780	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1781		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1782			g_mirror_sync_stop(disk, 1);
1783	}
1784}
1785
1786static void
1787g_mirror_go(void *arg)
1788{
1789	struct g_mirror_softc *sc;
1790
1791	sc = arg;
1792	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
1793	g_mirror_event_send(sc, 0,
1794	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
1795}
1796
1797static u_int
1798g_mirror_determine_state(struct g_mirror_disk *disk)
1799{
1800	struct g_mirror_softc *sc;
1801	u_int state;
1802
1803	sc = disk->d_softc;
1804	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
1805		if ((disk->d_flags &
1806		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1807			/* Disk does not need synchronization. */
1808			state = G_MIRROR_DISK_STATE_ACTIVE;
1809		} else {
1810			if ((sc->sc_flags &
1811			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0  ||
1812			    (disk->d_flags &
1813			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1814				/*
1815				 * We can start synchronization from
1816				 * the stored offset.
1817				 */
1818				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1819			} else {
1820				state = G_MIRROR_DISK_STATE_STALE;
1821			}
1822		}
1823	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
1824		/*
1825		 * Reset all synchronization data for this disk,
1826		 * because if it even was synchronized, it was
1827		 * synchronized to disks with different syncid.
1828		 */
1829		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
1830		disk->d_sync.ds_offset = 0;
1831		disk->d_sync.ds_offset_done = 0;
1832		disk->d_sync.ds_syncid = sc->sc_syncid;
1833		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
1834		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1835			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1836		} else {
1837			state = G_MIRROR_DISK_STATE_STALE;
1838		}
1839	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
1840		/*
1841		 * Not good, NOT GOOD!
1842		 * It means that mirror was started on stale disks
1843		 * and more fresh disk just arrive.
1844		 * If there were writes, mirror is fucked up, sorry.
1845		 * I think the best choice here is don't touch
1846		 * this disk and inform the user laudly.
1847		 */
1848		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
1849		    "disk (%s) arrives!! It will not be connected to the "
1850		    "running device.", sc->sc_name,
1851		    g_mirror_get_diskname(disk));
1852		g_mirror_destroy_disk(disk);
1853		state = G_MIRROR_DISK_STATE_NONE;
1854		/* Return immediately, because disk was destroyed. */
1855		return (state);
1856	}
1857	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
1858	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
1859	return (state);
1860}
1861
1862/*
1863 * Update device state.
1864 */
1865static void
1866g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force)
1867{
1868	struct g_mirror_disk *disk;
1869	u_int state;
1870
1871	g_topology_assert();
1872
1873	switch (sc->sc_state) {
1874	case G_MIRROR_DEVICE_STATE_STARTING:
1875	    {
1876		struct g_mirror_disk *pdisk, *tdisk;
1877		u_int dirty, ndisks, genid, syncid;
1878
1879		KASSERT(sc->sc_provider == NULL,
1880		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
1881		/*
1882		 * Are we ready? We are, if all disks are connected or
1883		 * if we have any disks and 'force' is true.
1884		 */
1885		if ((force && g_mirror_ndisks(sc, -1) > 0) ||
1886		    sc->sc_ndisks == g_mirror_ndisks(sc, -1)) {
1887			;
1888		} else if (g_mirror_ndisks(sc, -1) == 0) {
1889			/*
1890			 * Disks went down in starting phase, so destroy
1891			 * device.
1892			 */
1893			root_mount_rel(sc->sc_rootmount);
1894			sc->sc_rootmount = NULL;
1895			callout_drain(&sc->sc_callout);
1896			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1897			return;
1898		} else {
1899			return;
1900		}
1901
1902		root_mount_rel(sc->sc_rootmount);
1903		sc->sc_rootmount = NULL;
1904
1905		/*
1906		 * Activate all disks with the biggest syncid.
1907		 */
1908		if (force) {
1909			/*
1910			 * If 'force' is true, we have been called due to
1911			 * timeout, so don't bother canceling timeout.
1912			 */
1913			ndisks = 0;
1914			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1915				if ((disk->d_flags &
1916				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1917					ndisks++;
1918				}
1919			}
1920			if (ndisks == 0) {
1921				/* No valid disks found, destroy device. */
1922				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1923				return;
1924			}
1925		} else {
1926			/* Cancel timeout. */
1927			callout_drain(&sc->sc_callout);
1928		}
1929
1930		/*
1931		 * Find the biggest genid.
1932		 */
1933		genid = 0;
1934		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1935			if (disk->d_genid > genid)
1936				genid = disk->d_genid;
1937		}
1938		sc->sc_genid = genid;
1939		/*
1940		 * Remove all disks without the biggest genid.
1941		 */
1942		LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
1943			if (disk->d_genid < genid) {
1944				G_MIRROR_DEBUG(0,
1945				    "Component %s (device %s) broken, skipping.",
1946				    g_mirror_get_diskname(disk), sc->sc_name);
1947				g_mirror_destroy_disk(disk);
1948			}
1949		}
1950
1951		/*
1952		 * Find the biggest syncid.
1953		 */
1954		syncid = 0;
1955		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1956			if (disk->d_sync.ds_syncid > syncid)
1957				syncid = disk->d_sync.ds_syncid;
1958		}
1959
1960		/*
1961		 * Here we need to look for dirty disks and if all disks
1962		 * with the biggest syncid are dirty, we have to choose
1963		 * one with the biggest priority and rebuild the rest.
1964		 */
1965		/*
1966		 * Find the number of dirty disks with the biggest syncid.
1967		 * Find the number of disks with the biggest syncid.
1968		 * While here, find a disk with the biggest priority.
1969		 */
1970		dirty = ndisks = 0;
1971		pdisk = NULL;
1972		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1973			if (disk->d_sync.ds_syncid != syncid)
1974				continue;
1975			if ((disk->d_flags &
1976			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1977				continue;
1978			}
1979			ndisks++;
1980			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1981				dirty++;
1982				if (pdisk == NULL ||
1983				    pdisk->d_priority < disk->d_priority) {
1984					pdisk = disk;
1985				}
1986			}
1987		}
1988		if (dirty == 0) {
1989			/* No dirty disks at all, great. */
1990		} else if (dirty == ndisks) {
1991			/*
1992			 * Force synchronization for all dirty disks except one
1993			 * with the biggest priority.
1994			 */
1995			KASSERT(pdisk != NULL, ("pdisk == NULL"));
1996			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
1997			    "master disk for synchronization.",
1998			    g_mirror_get_diskname(pdisk), sc->sc_name);
1999			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2000				if (disk->d_sync.ds_syncid != syncid)
2001					continue;
2002				if ((disk->d_flags &
2003				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2004					continue;
2005				}
2006				KASSERT((disk->d_flags &
2007				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
2008				    ("Disk %s isn't marked as dirty.",
2009				    g_mirror_get_diskname(disk)));
2010				/* Skip the disk with the biggest priority. */
2011				if (disk == pdisk)
2012					continue;
2013				disk->d_sync.ds_syncid = 0;
2014			}
2015		} else if (dirty < ndisks) {
2016			/*
2017			 * Force synchronization for all dirty disks.
2018			 * We have some non-dirty disks.
2019			 */
2020			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2021				if (disk->d_sync.ds_syncid != syncid)
2022					continue;
2023				if ((disk->d_flags &
2024				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2025					continue;
2026				}
2027				if ((disk->d_flags &
2028				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2029					continue;
2030				}
2031				disk->d_sync.ds_syncid = 0;
2032			}
2033		}
2034
2035		/* Reset hint. */
2036		sc->sc_hint = NULL;
2037		sc->sc_syncid = syncid;
2038		if (force) {
2039			/* Remember to bump syncid on first write. */
2040			sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2041		}
2042		state = G_MIRROR_DEVICE_STATE_RUNNING;
2043		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2044		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2045		    g_mirror_device_state2str(state));
2046		sc->sc_state = state;
2047		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2048			state = g_mirror_determine_state(disk);
2049			g_mirror_event_send(disk, state,
2050			    G_MIRROR_EVENT_DONTWAIT);
2051			if (state == G_MIRROR_DISK_STATE_STALE)
2052				sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2053		}
2054		wakeup(&g_mirror_class);
2055		break;
2056	    }
2057	case G_MIRROR_DEVICE_STATE_RUNNING:
2058		if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2059		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2060			/*
2061			 * No active disks or no disks at all,
2062			 * so destroy device.
2063			 */
2064			if (sc->sc_provider != NULL)
2065				g_mirror_destroy_provider(sc);
2066			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2067			break;
2068		} else if (g_mirror_ndisks(sc,
2069		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2070		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2071			/*
2072			 * We have active disks, launch provider if it doesn't
2073			 * exist.
2074			 */
2075			if (sc->sc_provider == NULL)
2076				g_mirror_launch_provider(sc);
2077		}
2078		/*
2079		 * Genid should be bumped immediately, so do it here.
2080		 */
2081		if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2082			sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2083			g_mirror_bump_genid(sc);
2084		}
2085		break;
2086	default:
2087		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2088		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2089		break;
2090	}
2091}
2092
2093/*
2094 * Update disk state and device state if needed.
2095 */
2096#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
2097	"Disk %s state changed from %s to %s (device %s).",		\
2098	g_mirror_get_diskname(disk),					\
2099	g_mirror_disk_state2str(disk->d_state),				\
2100	g_mirror_disk_state2str(state), sc->sc_name)
2101static int
2102g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2103{
2104	struct g_mirror_softc *sc;
2105
2106	g_topology_assert();
2107
2108	sc = disk->d_softc;
2109again:
2110	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2111	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2112	    g_mirror_disk_state2str(state));
2113	switch (state) {
2114	case G_MIRROR_DISK_STATE_NEW:
2115		/*
2116		 * Possible scenarios:
2117		 * 1. New disk arrive.
2118		 */
2119		/* Previous state should be NONE. */
2120		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2121		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2122		    g_mirror_disk_state2str(disk->d_state)));
2123		DISK_STATE_CHANGED();
2124
2125		disk->d_state = state;
2126		if (LIST_EMPTY(&sc->sc_disks))
2127			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2128		else {
2129			struct g_mirror_disk *dp;
2130
2131			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2132				if (disk->d_priority >= dp->d_priority) {
2133					LIST_INSERT_BEFORE(dp, disk, d_next);
2134					dp = NULL;
2135					break;
2136				}
2137				if (LIST_NEXT(dp, d_next) == NULL)
2138					break;
2139			}
2140			if (dp != NULL)
2141				LIST_INSERT_AFTER(dp, disk, d_next);
2142		}
2143		G_MIRROR_DEBUG(0, "Device %s: provider %s detected.",
2144		    sc->sc_name, g_mirror_get_diskname(disk));
2145		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2146			break;
2147		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2148		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2149		    g_mirror_device_state2str(sc->sc_state),
2150		    g_mirror_get_diskname(disk),
2151		    g_mirror_disk_state2str(disk->d_state)));
2152		state = g_mirror_determine_state(disk);
2153		if (state != G_MIRROR_DISK_STATE_NONE)
2154			goto again;
2155		break;
2156	case G_MIRROR_DISK_STATE_ACTIVE:
2157		/*
2158		 * Possible scenarios:
2159		 * 1. New disk does not need synchronization.
2160		 * 2. Synchronization process finished successfully.
2161		 */
2162		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2163		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2164		    g_mirror_device_state2str(sc->sc_state),
2165		    g_mirror_get_diskname(disk),
2166		    g_mirror_disk_state2str(disk->d_state)));
2167		/* Previous state should be NEW or SYNCHRONIZING. */
2168		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2169		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2170		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2171		    g_mirror_disk_state2str(disk->d_state)));
2172		DISK_STATE_CHANGED();
2173
2174		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2175			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2176		else if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2177			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2178			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2179			g_mirror_sync_stop(disk, 0);
2180		}
2181		disk->d_state = state;
2182		disk->d_sync.ds_offset = 0;
2183		disk->d_sync.ds_offset_done = 0;
2184		g_mirror_update_access(disk);
2185		g_mirror_update_metadata(disk);
2186		G_MIRROR_DEBUG(0, "Device %s: provider %s activated.",
2187		    sc->sc_name, g_mirror_get_diskname(disk));
2188		break;
2189	case G_MIRROR_DISK_STATE_STALE:
2190		/*
2191		 * Possible scenarios:
2192		 * 1. Stale disk was connected.
2193		 */
2194		/* Previous state should be NEW. */
2195		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2196		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2197		    g_mirror_disk_state2str(disk->d_state)));
2198		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2199		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2200		    g_mirror_device_state2str(sc->sc_state),
2201		    g_mirror_get_diskname(disk),
2202		    g_mirror_disk_state2str(disk->d_state)));
2203		/*
2204		 * STALE state is only possible if device is marked
2205		 * NOAUTOSYNC.
2206		 */
2207		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2208		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2209		    g_mirror_device_state2str(sc->sc_state),
2210		    g_mirror_get_diskname(disk),
2211		    g_mirror_disk_state2str(disk->d_state)));
2212		DISK_STATE_CHANGED();
2213
2214		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2215		disk->d_state = state;
2216		g_mirror_update_metadata(disk);
2217		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2218		    sc->sc_name, g_mirror_get_diskname(disk));
2219		break;
2220	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2221		/*
2222		 * Possible scenarios:
2223		 * 1. Disk which needs synchronization was connected.
2224		 */
2225		/* Previous state should be NEW. */
2226		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2227		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2228		    g_mirror_disk_state2str(disk->d_state)));
2229		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2230		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2231		    g_mirror_device_state2str(sc->sc_state),
2232		    g_mirror_get_diskname(disk),
2233		    g_mirror_disk_state2str(disk->d_state)));
2234		DISK_STATE_CHANGED();
2235
2236		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2237			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2238		disk->d_state = state;
2239		if (sc->sc_provider != NULL) {
2240			g_mirror_sync_start(disk);
2241			g_mirror_update_metadata(disk);
2242		}
2243		break;
2244	case G_MIRROR_DISK_STATE_DISCONNECTED:
2245		/*
2246		 * Possible scenarios:
2247		 * 1. Device wasn't running yet, but disk disappear.
2248		 * 2. Disk was active and disapppear.
2249		 * 3. Disk disappear during synchronization process.
2250		 */
2251		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2252			/*
2253			 * Previous state should be ACTIVE, STALE or
2254			 * SYNCHRONIZING.
2255			 */
2256			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2257			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2258			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2259			    ("Wrong disk state (%s, %s).",
2260			    g_mirror_get_diskname(disk),
2261			    g_mirror_disk_state2str(disk->d_state)));
2262		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2263			/* Previous state should be NEW. */
2264			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2265			    ("Wrong disk state (%s, %s).",
2266			    g_mirror_get_diskname(disk),
2267			    g_mirror_disk_state2str(disk->d_state)));
2268			/*
2269			 * Reset bumping syncid if disk disappeared in STARTING
2270			 * state.
2271			 */
2272			if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2273				sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2274#ifdef	INVARIANTS
2275		} else {
2276			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2277			    sc->sc_name,
2278			    g_mirror_device_state2str(sc->sc_state),
2279			    g_mirror_get_diskname(disk),
2280			    g_mirror_disk_state2str(disk->d_state)));
2281#endif
2282		}
2283		DISK_STATE_CHANGED();
2284		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2285		    sc->sc_name, g_mirror_get_diskname(disk));
2286
2287		g_mirror_destroy_disk(disk);
2288		break;
2289	case G_MIRROR_DISK_STATE_DESTROY:
2290	    {
2291		int error;
2292
2293		error = g_mirror_clear_metadata(disk);
2294		if (error != 0)
2295			return (error);
2296		DISK_STATE_CHANGED();
2297		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2298		    sc->sc_name, g_mirror_get_diskname(disk));
2299
2300		g_mirror_destroy_disk(disk);
2301		sc->sc_ndisks--;
2302		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2303			g_mirror_update_metadata(disk);
2304		}
2305		break;
2306	    }
2307	default:
2308		KASSERT(1 == 0, ("Unknown state (%u).", state));
2309		break;
2310	}
2311	return (0);
2312}
2313#undef	DISK_STATE_CHANGED
2314
2315int
2316g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2317{
2318	struct g_provider *pp;
2319	u_char *buf;
2320	int error;
2321
2322	g_topology_assert();
2323
2324	error = g_access(cp, 1, 0, 0);
2325	if (error != 0)
2326		return (error);
2327	pp = cp->provider;
2328	g_topology_unlock();
2329	/* Metadata are stored on last sector. */
2330	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2331	    &error);
2332	g_topology_lock();
2333	g_access(cp, -1, 0, 0);
2334	if (error != 0) {
2335		G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2336		    cp->provider->name, error);
2337		if (buf != NULL)
2338			g_free(buf);
2339		return (error);
2340	}
2341
2342	/* Decode metadata. */
2343	error = mirror_metadata_decode(buf, md);
2344	g_free(buf);
2345	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2346		return (EINVAL);
2347	if (md->md_version > G_MIRROR_VERSION) {
2348		G_MIRROR_DEBUG(0,
2349		    "Kernel module is too old to handle metadata from %s.",
2350		    cp->provider->name);
2351		return (EINVAL);
2352	}
2353	if (error != 0) {
2354		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2355		    cp->provider->name);
2356		return (error);
2357	}
2358
2359	return (0);
2360}
2361
2362static int
2363g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2364    struct g_mirror_metadata *md)
2365{
2366
2367	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2368		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2369		    pp->name, md->md_did);
2370		return (EEXIST);
2371	}
2372	if (md->md_all != sc->sc_ndisks) {
2373		G_MIRROR_DEBUG(1,
2374		    "Invalid '%s' field on disk %s (device %s), skipping.",
2375		    "md_all", pp->name, sc->sc_name);
2376		return (EINVAL);
2377	}
2378	if (md->md_slice != sc->sc_slice) {
2379		G_MIRROR_DEBUG(1,
2380		    "Invalid '%s' field on disk %s (device %s), skipping.",
2381		    "md_slice", pp->name, sc->sc_name);
2382		return (EINVAL);
2383	}
2384	if (md->md_balance != sc->sc_balance) {
2385		G_MIRROR_DEBUG(1,
2386		    "Invalid '%s' field on disk %s (device %s), skipping.",
2387		    "md_balance", pp->name, sc->sc_name);
2388		return (EINVAL);
2389	}
2390	if (md->md_mediasize != sc->sc_mediasize) {
2391		G_MIRROR_DEBUG(1,
2392		    "Invalid '%s' field on disk %s (device %s), skipping.",
2393		    "md_mediasize", pp->name, sc->sc_name);
2394		return (EINVAL);
2395	}
2396	if (sc->sc_mediasize > pp->mediasize) {
2397		G_MIRROR_DEBUG(1,
2398		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2399		    sc->sc_name);
2400		return (EINVAL);
2401	}
2402	if (md->md_sectorsize != sc->sc_sectorsize) {
2403		G_MIRROR_DEBUG(1,
2404		    "Invalid '%s' field on disk %s (device %s), skipping.",
2405		    "md_sectorsize", pp->name, sc->sc_name);
2406		return (EINVAL);
2407	}
2408	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2409		G_MIRROR_DEBUG(1,
2410		    "Invalid sector size of disk %s (device %s), skipping.",
2411		    pp->name, sc->sc_name);
2412		return (EINVAL);
2413	}
2414	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2415		G_MIRROR_DEBUG(1,
2416		    "Invalid device flags on disk %s (device %s), skipping.",
2417		    pp->name, sc->sc_name);
2418		return (EINVAL);
2419	}
2420	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2421		G_MIRROR_DEBUG(1,
2422		    "Invalid disk flags on disk %s (device %s), skipping.",
2423		    pp->name, sc->sc_name);
2424		return (EINVAL);
2425	}
2426	return (0);
2427}
2428
2429int
2430g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2431    struct g_mirror_metadata *md)
2432{
2433	struct g_mirror_disk *disk;
2434	int error;
2435
2436	g_topology_assert();
2437	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2438
2439	error = g_mirror_check_metadata(sc, pp, md);
2440	if (error != 0)
2441		return (error);
2442	if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2443	    md->md_genid < sc->sc_genid) {
2444		G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2445		    pp->name, sc->sc_name);
2446		return (EINVAL);
2447	}
2448	disk = g_mirror_init_disk(sc, pp, md, &error);
2449	if (disk == NULL)
2450		return (error);
2451	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2452	    G_MIRROR_EVENT_WAIT);
2453	if (error != 0)
2454		return (error);
2455	if (md->md_version < G_MIRROR_VERSION) {
2456		G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2457		    pp->name, md->md_version, G_MIRROR_VERSION);
2458		g_mirror_update_metadata(disk);
2459	}
2460	return (0);
2461}
2462
2463static int
2464g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2465{
2466	struct g_mirror_softc *sc;
2467	struct g_mirror_disk *disk;
2468	int dcr, dcw, dce;
2469
2470	g_topology_assert();
2471	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2472	    acw, ace);
2473
2474	dcr = pp->acr + acr;
2475	dcw = pp->acw + acw;
2476	dce = pp->ace + ace;
2477
2478	sc = pp->geom->softc;
2479	if (sc == NULL || LIST_EMPTY(&sc->sc_disks) ||
2480	    (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
2481		if (acr <= 0 && acw <= 0 && ace <= 0)
2482			return (0);
2483		else
2484			return (ENXIO);
2485	}
2486	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2487		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
2488			continue;
2489		/*
2490		 * Mark disk as dirty on open and unmark on close.
2491		 */
2492		if (pp->acw == 0 && dcw > 0) {
2493			G_MIRROR_DEBUG(1,
2494			    "Disk %s (device %s) marked as dirty.",
2495			    g_mirror_get_diskname(disk), sc->sc_name);
2496			disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2497			g_mirror_update_metadata(disk);
2498		} else if (pp->acw > 0 && dcw == 0) {
2499			G_MIRROR_DEBUG(1,
2500			    "Disk %s (device %s) marked as clean.",
2501			    g_mirror_get_diskname(disk), sc->sc_name);
2502			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2503			g_mirror_update_metadata(disk);
2504		}
2505	}
2506	return (0);
2507}
2508
2509static struct g_geom *
2510g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
2511{
2512	struct g_mirror_softc *sc;
2513	struct g_geom *gp;
2514	int error, timeout;
2515
2516	g_topology_assert();
2517	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2518	    md->md_mid);
2519
2520	/* One disk is minimum. */
2521	if (md->md_all < 1)
2522		return (NULL);
2523	/*
2524	 * Action geom.
2525	 */
2526	gp = g_new_geomf(mp, "%s", md->md_name);
2527	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2528	gp->start = g_mirror_start;
2529	gp->orphan = g_mirror_orphan;
2530	gp->access = g_mirror_access;
2531	gp->dumpconf = g_mirror_dumpconf;
2532
2533	sc->sc_id = md->md_mid;
2534	sc->sc_slice = md->md_slice;
2535	sc->sc_balance = md->md_balance;
2536	sc->sc_mediasize = md->md_mediasize;
2537	sc->sc_sectorsize = md->md_sectorsize;
2538	sc->sc_ndisks = md->md_all;
2539	sc->sc_flags = md->md_mflags;
2540	sc->sc_bump_id = 0;
2541	sc->sc_idle = 0;
2542	bioq_init(&sc->sc_queue);
2543	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
2544	LIST_INIT(&sc->sc_disks);
2545	TAILQ_INIT(&sc->sc_events);
2546	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
2547	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2548	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
2549	gp->softc = sc;
2550	sc->sc_geom = gp;
2551	sc->sc_provider = NULL;
2552	/*
2553	 * Synchronization geom.
2554	 */
2555	gp = g_new_geomf(mp, "%s.sync", md->md_name);
2556	gp->softc = sc;
2557	gp->orphan = g_mirror_orphan;
2558	sc->sc_sync.ds_geom = gp;
2559	sc->sc_sync.ds_ndisks = 0;
2560	error = kthread_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
2561	    "g_mirror %s", md->md_name);
2562	if (error != 0) {
2563		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
2564		    sc->sc_name);
2565		g_destroy_geom(sc->sc_sync.ds_geom);
2566		mtx_destroy(&sc->sc_events_mtx);
2567		mtx_destroy(&sc->sc_queue_mtx);
2568		g_destroy_geom(sc->sc_geom);
2569		free(sc, M_MIRROR);
2570		return (NULL);
2571	}
2572
2573	G_MIRROR_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
2574
2575	sc->sc_rootmount = root_mount_hold("GMIRROR");
2576	/*
2577	 * Run timeout.
2578	 */
2579	timeout = g_mirror_timeout * hz;
2580	callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
2581	return (sc->sc_geom);
2582}
2583
2584int
2585g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force)
2586{
2587	struct g_provider *pp;
2588
2589	g_topology_assert();
2590
2591	if (sc == NULL)
2592		return (ENXIO);
2593	pp = sc->sc_provider;
2594	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
2595		if (force) {
2596			G_MIRROR_DEBUG(1, "Device %s is still open, so it "
2597			    "can't be definitely removed.", pp->name);
2598		} else {
2599			G_MIRROR_DEBUG(1,
2600			    "Device %s is still open (r%dw%de%d).", pp->name,
2601			    pp->acr, pp->acw, pp->ace);
2602			return (EBUSY);
2603		}
2604	}
2605
2606	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2607	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
2608	g_topology_unlock();
2609	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
2610	mtx_lock(&sc->sc_queue_mtx);
2611	wakeup(sc);
2612	mtx_unlock(&sc->sc_queue_mtx);
2613	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
2614	while (sc->sc_worker != NULL)
2615		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
2616	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
2617	g_topology_lock();
2618	g_mirror_destroy_device(sc);
2619	free(sc, M_MIRROR);
2620	return (0);
2621}
2622
2623static void
2624g_mirror_taste_orphan(struct g_consumer *cp)
2625{
2626
2627	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2628	    cp->provider->name));
2629}
2630
2631static struct g_geom *
2632g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2633{
2634	struct g_mirror_metadata md;
2635	struct g_mirror_softc *sc;
2636	struct g_consumer *cp;
2637	struct g_geom *gp;
2638	int error;
2639
2640	g_topology_assert();
2641	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2642	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
2643
2644	gp = g_new_geomf(mp, "mirror:taste");
2645	/*
2646	 * This orphan function should be never called.
2647	 */
2648	gp->orphan = g_mirror_taste_orphan;
2649	cp = g_new_consumer(gp);
2650	g_attach(cp, pp);
2651	error = g_mirror_read_metadata(cp, &md);
2652	g_detach(cp);
2653	g_destroy_consumer(cp);
2654	g_destroy_geom(gp);
2655	if (error != 0)
2656		return (NULL);
2657	gp = NULL;
2658
2659	if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
2660		return (NULL);
2661	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2662		return (NULL);
2663	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
2664		G_MIRROR_DEBUG(0,
2665		    "Device %s: provider %s marked as inactive, skipping.",
2666		    md.md_name, pp->name);
2667		return (NULL);
2668	}
2669	if (g_mirror_debug >= 2)
2670		mirror_metadata_dump(&md);
2671
2672	/*
2673	 * Let's check if device already exists.
2674	 */
2675	sc = NULL;
2676	LIST_FOREACH(gp, &mp->geom, geom) {
2677		sc = gp->softc;
2678		if (sc == NULL)
2679			continue;
2680		if (sc->sc_sync.ds_geom == gp)
2681			continue;
2682		if (strcmp(md.md_name, sc->sc_name) != 0)
2683			continue;
2684		if (md.md_mid != sc->sc_id) {
2685			G_MIRROR_DEBUG(0, "Device %s already configured.",
2686			    sc->sc_name);
2687			return (NULL);
2688		}
2689		break;
2690	}
2691	if (gp == NULL) {
2692		gp = g_mirror_create(mp, &md);
2693		if (gp == NULL) {
2694			G_MIRROR_DEBUG(0, "Cannot create device %s.",
2695			    md.md_name);
2696			return (NULL);
2697		}
2698		sc = gp->softc;
2699	}
2700	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
2701	error = g_mirror_add_disk(sc, pp, &md);
2702	if (error != 0) {
2703		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
2704		    pp->name, gp->name, error);
2705		if (LIST_EMPTY(&sc->sc_disks))
2706			g_mirror_destroy(sc, 1);
2707		return (NULL);
2708	}
2709	return (gp);
2710}
2711
2712static int
2713g_mirror_destroy_geom(struct gctl_req *req __unused,
2714    struct g_class *mp __unused, struct g_geom *gp)
2715{
2716
2717	return (g_mirror_destroy(gp->softc, 0));
2718}
2719
2720static void
2721g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2722    struct g_consumer *cp, struct g_provider *pp)
2723{
2724	struct g_mirror_softc *sc;
2725
2726	g_topology_assert();
2727
2728	sc = gp->softc;
2729	if (sc == NULL)
2730		return;
2731	/* Skip synchronization geom. */
2732	if (gp == sc->sc_sync.ds_geom)
2733		return;
2734	if (pp != NULL) {
2735		/* Nothing here. */
2736	} else if (cp != NULL) {
2737		struct g_mirror_disk *disk;
2738
2739		disk = cp->private;
2740		if (disk == NULL)
2741			return;
2742		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
2743		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2744			sbuf_printf(sb, "%s<Synchronized>", indent);
2745			if (disk->d_sync.ds_offset_done == 0)
2746				sbuf_printf(sb, "0%%");
2747			else {
2748				sbuf_printf(sb, "%u%%",
2749				    (u_int)((disk->d_sync.ds_offset_done * 100) /
2750				    sc->sc_provider->mediasize));
2751			}
2752			sbuf_printf(sb, "</Synchronized>\n");
2753		}
2754		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
2755		    disk->d_sync.ds_syncid);
2756		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
2757		    disk->d_genid);
2758		sbuf_printf(sb, "%s<Flags>", indent);
2759		if (disk->d_flags == 0)
2760			sbuf_printf(sb, "NONE");
2761		else {
2762			int first = 1;
2763
2764#define	ADD_FLAG(flag, name)	do {					\
2765	if ((disk->d_flags & (flag)) != 0) {				\
2766		if (!first)						\
2767			sbuf_printf(sb, ", ");				\
2768		else							\
2769			first = 0;					\
2770		sbuf_printf(sb, name);					\
2771	}								\
2772} while (0)
2773			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
2774			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
2775			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
2776			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
2777			    "SYNCHRONIZING");
2778			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
2779#undef	ADD_FLAG
2780		}
2781		sbuf_printf(sb, "</Flags>\n");
2782		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
2783		    disk->d_priority);
2784		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2785		    g_mirror_disk_state2str(disk->d_state));
2786	} else {
2787		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2788		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
2789		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
2790		sbuf_printf(sb, "%s<Flags>", indent);
2791		if (sc->sc_flags == 0)
2792			sbuf_printf(sb, "NONE");
2793		else {
2794			int first = 1;
2795
2796#define	ADD_FLAG(flag, name)	do {					\
2797	if ((sc->sc_flags & (flag)) != 0) {				\
2798		if (!first)						\
2799			sbuf_printf(sb, ", ");				\
2800		else							\
2801			first = 0;					\
2802		sbuf_printf(sb, name);					\
2803	}								\
2804} while (0)
2805			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
2806#undef	ADD_FLAG
2807		}
2808		sbuf_printf(sb, "</Flags>\n");
2809		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
2810		    (u_int)sc->sc_slice);
2811		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
2812		    balance_name(sc->sc_balance));
2813		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2814		    sc->sc_ndisks);
2815		sbuf_printf(sb, "%s<State>", indent);
2816		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2817			sbuf_printf(sb, "%s", "STARTING");
2818		else if (sc->sc_ndisks ==
2819		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
2820			sbuf_printf(sb, "%s", "COMPLETE");
2821		else
2822			sbuf_printf(sb, "%s", "DEGRADED");
2823		sbuf_printf(sb, "</State>\n");
2824	}
2825}
2826
2827static void
2828g_mirror_shutdown(void *arg, int howto)
2829{
2830	struct g_class *mp;
2831	struct g_geom *gp, *gp2;
2832
2833	mp = arg;
2834	DROP_GIANT();
2835	g_topology_lock();
2836	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2837		if (gp->softc == NULL)
2838			continue;
2839		g_mirror_destroy(gp->softc, 1);
2840	}
2841	g_topology_unlock();
2842	PICKUP_GIANT();
2843#if 0
2844	tsleep(&gp, PRIBIO, "m:shutdown", hz * 20);
2845#endif
2846}
2847
2848static void
2849g_mirror_init(struct g_class *mp)
2850{
2851
2852	g_mirror_ehtag = EVENTHANDLER_REGISTER(shutdown_post_sync,
2853	    g_mirror_shutdown, mp, SHUTDOWN_PRI_FIRST);
2854	if (g_mirror_ehtag == NULL)
2855		G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
2856}
2857
2858static void
2859g_mirror_fini(struct g_class *mp)
2860{
2861
2862	if (g_mirror_ehtag == NULL)
2863		return;
2864	EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_ehtag);
2865}
2866
2867DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
2868