g_mirror.c revision 134344
1/*-
2 * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/mirror/g_mirror.c 134344 2004-08-26 12:42:47Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/module.h>
34#include <sys/limits.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/bitstring.h>
41#include <vm/uma.h>
42#include <machine/atomic.h>
43#include <geom/geom.h>
44#include <sys/proc.h>
45#include <sys/kthread.h>
46#include <geom/mirror/g_mirror.h>
47
48
49static MALLOC_DEFINE(M_MIRROR, "mirror data", "GEOM_MIRROR Data");
50
51SYSCTL_DECL(_kern_geom);
52SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff");
53u_int g_mirror_debug = 0;
54SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0,
55    "Debug level");
56static u_int g_mirror_timeout = 8;
57TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout);
58SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout,
59    0, "Time to wait on all mirror components");
60static u_int g_mirror_reqs_per_sync = 5;
61SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, reqs_per_sync, CTLFLAG_RW,
62    &g_mirror_reqs_per_sync, 0,
63    "Number of regular I/O requests per synchronization request");
64static u_int g_mirror_syncs_per_sec = 100;
65SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, syncs_per_sec, CTLFLAG_RW,
66    &g_mirror_syncs_per_sec, 0,
67    "Number of synchronizations requests per second");
68
69#define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
70	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
71	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
72	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
73} while (0)
74
75
76static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
77    struct g_geom *gp);
78static g_taste_t g_mirror_taste;
79
80struct g_class g_mirror_class = {
81	.name = G_MIRROR_CLASS_NAME,
82	.version = G_VERSION,
83	.ctlreq = g_mirror_config,
84	.taste = g_mirror_taste,
85	.destroy_geom = g_mirror_destroy_geom
86};
87
88
89static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
90static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
91static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force);
92static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
93    struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
94static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
95
96
97static const char *
98g_mirror_disk_state2str(int state)
99{
100
101	switch (state) {
102	case G_MIRROR_DISK_STATE_NONE:
103		return ("NONE");
104	case G_MIRROR_DISK_STATE_NEW:
105		return ("NEW");
106	case G_MIRROR_DISK_STATE_ACTIVE:
107		return ("ACTIVE");
108	case G_MIRROR_DISK_STATE_STALE:
109		return ("STALE");
110	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
111		return ("SYNCHRONIZING");
112	case G_MIRROR_DISK_STATE_DISCONNECTED:
113		return ("DISCONNECTED");
114	case G_MIRROR_DISK_STATE_DESTROY:
115		return ("DESTROY");
116	default:
117		return ("INVALID");
118	}
119}
120
121static const char *
122g_mirror_device_state2str(int state)
123{
124
125	switch (state) {
126	case G_MIRROR_DEVICE_STATE_STARTING:
127		return ("STARTING");
128	case G_MIRROR_DEVICE_STATE_RUNNING:
129		return ("RUNNING");
130	default:
131		return ("INVALID");
132	}
133}
134
135static const char *
136g_mirror_get_diskname(struct g_mirror_disk *disk)
137{
138
139	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
140		return ("[unknown]");
141	return (disk->d_name);
142}
143
144/*
145 * --- Events handling functions ---
146 * Events in geom_mirror are used to maintain disks and device status
147 * from one thread to simplify locking.
148 */
149static void
150g_mirror_event_free(struct g_mirror_event *ep)
151{
152
153	free(ep, M_MIRROR);
154}
155
156int
157g_mirror_event_send(void *arg, int state, int flags)
158{
159	struct g_mirror_softc *sc;
160	struct g_mirror_disk *disk;
161	struct g_mirror_event *ep;
162	int error;
163
164	ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
165	G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
166	if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
167		disk = NULL;
168		sc = arg;
169	} else {
170		disk = arg;
171		sc = disk->d_softc;
172	}
173	ep->e_disk = disk;
174	ep->e_state = state;
175	ep->e_flags = flags;
176	ep->e_error = 0;
177	mtx_lock(&sc->sc_events_mtx);
178	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
179	mtx_unlock(&sc->sc_events_mtx);
180	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
181	mtx_lock(&sc->sc_queue_mtx);
182	wakeup(sc);
183	mtx_unlock(&sc->sc_queue_mtx);
184	if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
185		return (0);
186	g_topology_assert();
187	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
188	g_topology_unlock();
189	while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
190		mtx_lock(&sc->sc_events_mtx);
191		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
192		    hz * 5);
193	}
194	/* Don't even try to use 'sc' here, because it could be already dead. */
195	g_topology_lock();
196	error = ep->e_error;
197	g_mirror_event_free(ep);
198	return (error);
199}
200
201static struct g_mirror_event *
202g_mirror_event_get(struct g_mirror_softc *sc)
203{
204	struct g_mirror_event *ep;
205
206	mtx_lock(&sc->sc_events_mtx);
207	ep = TAILQ_FIRST(&sc->sc_events);
208	if (ep != NULL)
209		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
210	mtx_unlock(&sc->sc_events_mtx);
211	return (ep);
212}
213
214static void
215g_mirror_event_cancel(struct g_mirror_disk *disk)
216{
217	struct g_mirror_softc *sc;
218	struct g_mirror_event *ep, *tmpep;
219
220	g_topology_assert();
221
222	sc = disk->d_softc;
223	mtx_lock(&sc->sc_events_mtx);
224	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
225		if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
226			continue;
227		if (ep->e_disk != disk)
228			continue;
229		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
230		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
231			g_mirror_event_free(ep);
232		else {
233			ep->e_error = ECANCELED;
234			wakeup(ep);
235		}
236	}
237	mtx_unlock(&sc->sc_events_mtx);
238}
239
240/*
241 * Return the number of disks in given state.
242 * If state is equal to -1, count all connected disks.
243 */
244u_int
245g_mirror_ndisks(struct g_mirror_softc *sc, int state)
246{
247	struct g_mirror_disk *disk;
248	u_int n = 0;
249
250	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
251		if (state == -1 || disk->d_state == state)
252			n++;
253	}
254	return (n);
255}
256
257/*
258 * Find a disk in mirror by its disk ID.
259 */
260static struct g_mirror_disk *
261g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
262{
263	struct g_mirror_disk *disk;
264
265	g_topology_assert();
266
267	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
268		if (disk->d_id == id)
269			return (disk);
270	}
271	return (NULL);
272}
273
274static u_int
275g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
276{
277	struct bio *bp;
278	u_int nreqs = 0;
279
280	mtx_lock(&sc->sc_queue_mtx);
281	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
282		if (bp->bio_from == cp)
283			nreqs++;
284	}
285	mtx_unlock(&sc->sc_queue_mtx);
286	return (nreqs);
287}
288
289static int
290g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
291{
292
293	if (cp->nstart != cp->nend) {
294		G_MIRROR_DEBUG(2,
295		    "I/O requests for %s exist, can't destroy it now.",
296		    cp->provider->name);
297		return (1);
298	}
299	if (g_mirror_nrequests(sc, cp) > 0) {
300		G_MIRROR_DEBUG(2,
301		    "I/O requests for %s in queue, can't destroy it now.",
302		    cp->provider->name);
303		return (1);
304	}
305	return (0);
306}
307
308static void
309g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
310{
311
312	g_topology_assert();
313
314	cp->private = NULL;
315	if (g_mirror_is_busy(sc, cp))
316		return;
317	G_MIRROR_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
318	g_detach(cp);
319	g_destroy_consumer(cp);
320}
321
322static int
323g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
324{
325	int error;
326
327	g_topology_assert();
328	KASSERT(disk->d_consumer == NULL,
329	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
330
331	disk->d_consumer = g_new_consumer(disk->d_softc->sc_geom);
332	disk->d_consumer->private = disk;
333	error = g_attach(disk->d_consumer, pp);
334	if (error != 0)
335		return (error);
336	G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
337	return (0);
338}
339
340static void
341g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
342{
343
344	g_topology_assert();
345
346	if (cp == NULL)
347		return;
348	if (cp->provider != NULL) {
349		G_MIRROR_DEBUG(2, "Disk %s disconnected.", cp->provider->name);
350		if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) {
351			G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
352			    cp->provider->name, -cp->acr, -cp->acw, -cp->ace,
353			    0);
354			g_access(cp, -cp->acr, -cp->acw, -cp->ace);
355		}
356		g_mirror_kill_consumer(sc, cp);
357	} else {
358		g_destroy_consumer(cp);
359	}
360}
361
362/*
363 * Initialize disk. This means allocate memory, create consumer, attach it
364 * to the provider and open access (r1w1e1) to it.
365 */
366static struct g_mirror_disk *
367g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
368    struct g_mirror_metadata *md, int *errorp)
369{
370	struct g_mirror_disk *disk;
371	int error;
372
373	disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
374	if (disk == NULL) {
375		error = ENOMEM;
376		goto fail;
377	}
378	disk->d_softc = sc;
379	error = g_mirror_connect_disk(disk, pp);
380	if (error != 0)
381		goto fail;
382	disk->d_id = md->md_did;
383	disk->d_state = G_MIRROR_DISK_STATE_NONE;
384	disk->d_priority = md->md_priority;
385	disk->d_delay.sec = 0;
386	disk->d_delay.frac = 0;
387	binuptime(&disk->d_last_used);
388	disk->d_flags = md->md_dflags;
389	if (md->md_provider[0] != '\0')
390		disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
391	disk->d_sync.ds_consumer = NULL;
392	disk->d_sync.ds_offset = md->md_sync_offset;
393	disk->d_sync.ds_offset_done = md->md_sync_offset;
394	disk->d_sync.ds_syncid = md->md_syncid;
395	if (errorp != NULL)
396		*errorp = 0;
397	return (disk);
398fail:
399	if (errorp != NULL)
400		*errorp = error;
401	if (disk != NULL) {
402		g_mirror_disconnect_consumer(sc, disk->d_consumer);
403		free(disk, M_MIRROR);
404	}
405	return (NULL);
406}
407
408static void
409g_mirror_destroy_disk(struct g_mirror_disk *disk)
410{
411	struct g_mirror_softc *sc;
412
413	g_topology_assert();
414
415	LIST_REMOVE(disk, d_next);
416	g_mirror_event_cancel(disk);
417	sc = disk->d_softc;
418	if (sc->sc_hint == disk)
419		sc->sc_hint = NULL;
420	switch (disk->d_state) {
421	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
422		g_mirror_sync_stop(disk, 1);
423		/* FALLTHROUGH */
424	case G_MIRROR_DISK_STATE_NEW:
425	case G_MIRROR_DISK_STATE_STALE:
426	case G_MIRROR_DISK_STATE_ACTIVE:
427		g_mirror_disconnect_consumer(sc, disk->d_consumer);
428		free(disk, M_MIRROR);
429		break;
430	default:
431		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
432		    g_mirror_get_diskname(disk),
433		    g_mirror_disk_state2str(disk->d_state)));
434	}
435}
436
437static void
438g_mirror_destroy_device(struct g_mirror_softc *sc)
439{
440	struct g_mirror_disk *disk;
441	struct g_mirror_event *ep;
442	struct g_geom *gp;
443	struct g_consumer *cp, *tmpcp;
444
445	g_topology_assert();
446
447	gp = sc->sc_geom;
448	if (sc->sc_provider != NULL)
449		g_mirror_destroy_provider(sc);
450	for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
451	    disk = LIST_FIRST(&sc->sc_disks)) {
452		g_mirror_destroy_disk(disk);
453	}
454	while ((ep = g_mirror_event_get(sc)) != NULL) {
455		if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
456			g_mirror_event_free(ep);
457		else {
458			ep->e_error = ECANCELED;
459			ep->e_flags |= G_MIRROR_EVENT_DONE;
460			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
461			mtx_lock(&sc->sc_events_mtx);
462			wakeup(ep);
463			mtx_unlock(&sc->sc_events_mtx);
464		}
465	}
466	callout_drain(&sc->sc_callout);
467	gp->softc = NULL;
468
469	LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
470		g_mirror_disconnect_consumer(sc, cp);
471	}
472	sc->sc_sync.ds_geom->softc = NULL;
473	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
474	mtx_destroy(&sc->sc_queue_mtx);
475	mtx_destroy(&sc->sc_events_mtx);
476	G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
477	g_wither_geom(gp, ENXIO);
478}
479
480static void
481g_mirror_orphan(struct g_consumer *cp)
482{
483	struct g_mirror_disk *disk;
484
485	g_topology_assert();
486
487	disk = cp->private;
488	if (disk == NULL)
489		return;
490	disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
491	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
492	    G_MIRROR_EVENT_DONTWAIT);
493}
494
495static void
496g_mirror_spoiled(struct g_consumer *cp)
497{
498	struct g_mirror_disk *disk;
499
500	g_topology_assert();
501
502	disk = cp->private;
503	if (disk == NULL)
504		return;
505	disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
506	g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
507	    G_MIRROR_EVENT_DONTWAIT);
508}
509
510/*
511 * Function should return the next active disk on the list.
512 * It is possible that it will be the same disk as given.
513 * If there are no active disks on list, NULL is returned.
514 */
515static __inline struct g_mirror_disk *
516g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
517{
518	struct g_mirror_disk *dp;
519
520	for (dp = LIST_NEXT(disk, d_next); dp != disk;
521	    dp = LIST_NEXT(dp, d_next)) {
522		if (dp == NULL)
523			dp = LIST_FIRST(&sc->sc_disks);
524		if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
525			break;
526	}
527	if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
528		return (NULL);
529	return (dp);
530}
531
532static struct g_mirror_disk *
533g_mirror_get_disk(struct g_mirror_softc *sc)
534{
535	struct g_mirror_disk *disk;
536
537	if (sc->sc_hint == NULL) {
538		sc->sc_hint = LIST_FIRST(&sc->sc_disks);
539		if (sc->sc_hint == NULL)
540			return (NULL);
541	}
542	disk = sc->sc_hint;
543	if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
544		disk = g_mirror_find_next(sc, disk);
545		if (disk == NULL)
546			return (NULL);
547	}
548	sc->sc_hint = g_mirror_find_next(sc, disk);
549	return (disk);
550}
551
552static int
553g_mirror_write_metadata(struct g_mirror_disk *disk,
554    struct g_mirror_metadata *md)
555{
556	struct g_mirror_softc *sc;
557	struct g_consumer *cp;
558	off_t offset, length;
559	u_char *sector;
560	int close = 0, error = 0;
561
562	g_topology_assert();
563
564	sc = disk->d_softc;
565	cp = disk->d_consumer;
566	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
567	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
568	length = cp->provider->sectorsize;
569	offset = cp->provider->mediasize - length;
570	sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
571	/*
572	 * Open consumer if it wasn't opened and remember to close it.
573	 */
574	if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
575		error = g_access(cp, 0, 1, 1);
576		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
577		    cp->provider->name, 0, 1, 1, error);
578		if (error == 0)
579			close = 1;
580#ifdef	INVARIANTS
581	} else {
582		KASSERT(cp->acw > 0 && cp->ace > 0,
583		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
584		    cp->acr, cp->acw, cp->ace));
585#endif
586	}
587	if (error == 0) {
588		if (md != NULL)
589			mirror_metadata_encode(md, sector);
590		g_topology_unlock();
591		error = g_write_data(cp, offset, sector, length);
592		g_topology_lock();
593	}
594	free(sector, M_MIRROR);
595	if (close) {
596		g_access(cp, 0, -1, -1);
597		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
598		    cp->provider->name, 0, -1, -1, 0);
599	}
600	if (error != 0) {
601		disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
602		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
603		    G_MIRROR_EVENT_DONTWAIT);
604	}
605	return (error);
606}
607
608static int
609g_mirror_clear_metadata(struct g_mirror_disk *disk)
610{
611	int error;
612
613	g_topology_assert();
614	error = g_mirror_write_metadata(disk, NULL);
615	if (error == 0) {
616		G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
617		    g_mirror_get_diskname(disk));
618	} else {
619		G_MIRROR_DEBUG(0,
620		    "Cannot clear metadata on disk %s (error=%d).",
621		    g_mirror_get_diskname(disk), error);
622	}
623	return (error);
624}
625
626void
627g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
628    struct g_mirror_metadata *md)
629{
630
631	strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
632	md->md_version = G_MIRROR_VERSION;
633	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
634	md->md_mid = sc->sc_id;
635	md->md_all = sc->sc_ndisks;
636	md->md_slice = sc->sc_slice;
637	md->md_balance = sc->sc_balance;
638	md->md_mediasize = sc->sc_mediasize;
639	md->md_sectorsize = sc->sc_sectorsize;
640	md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
641	bzero(md->md_provider, sizeof(md->md_provider));
642	if (disk == NULL) {
643		md->md_did = arc4random();
644		md->md_priority = 0;
645		md->md_syncid = 0;
646		md->md_dflags = 0;
647		md->md_sync_offset = 0;
648	} else {
649		md->md_did = disk->d_id;
650		md->md_priority = disk->d_priority;
651		md->md_syncid = disk->d_sync.ds_syncid;
652		md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
653		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
654			md->md_sync_offset = disk->d_sync.ds_offset_done;
655		else
656			md->md_sync_offset = 0;
657		if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
658			strlcpy(md->md_provider,
659			    disk->d_consumer->provider->name,
660			    sizeof(md->md_provider));
661		}
662	}
663}
664
665void
666g_mirror_update_metadata(struct g_mirror_disk *disk)
667{
668	struct g_mirror_metadata md;
669	int error;
670
671	g_topology_assert();
672	g_mirror_fill_metadata(disk->d_softc, disk, &md);
673	error = g_mirror_write_metadata(disk, &md);
674	if (error == 0) {
675		G_MIRROR_DEBUG(2, "Metadata on %s updated.",
676		    g_mirror_get_diskname(disk));
677	} else {
678		G_MIRROR_DEBUG(0,
679		    "Cannot update metadata on disk %s (error=%d).",
680		    g_mirror_get_diskname(disk), error);
681	}
682}
683
684static void
685g_mirror_bump_syncid(struct g_mirror_softc *sc)
686{
687	struct g_mirror_disk *disk;
688
689	g_topology_assert();
690	KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
691	    ("%s called with no active disks (device=%s).", __func__,
692	    sc->sc_name));
693
694	sc->sc_syncid++;
695	G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
696	    sc->sc_syncid);
697	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
698		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
699		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
700			disk->d_sync.ds_syncid = sc->sc_syncid;
701			g_mirror_update_metadata(disk);
702		}
703	}
704}
705
706static __inline int
707bintime_cmp(struct bintime *bt1, struct bintime *bt2)
708{
709
710	if (bt1->sec < bt2->sec)
711		return (-1);
712	else if (bt1->sec > bt2->sec)
713		return (1);
714	if (bt1->frac < bt2->frac)
715		return (-1);
716	else if (bt1->frac > bt2->frac)
717		return (1);
718	return (0);
719}
720
721static void
722g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp)
723{
724
725	if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD)
726		return;
727	binuptime(&disk->d_delay);
728	bintime_sub(&disk->d_delay, &bp->bio_t0);
729}
730
731static void
732g_mirror_done(struct bio *bp)
733{
734	struct g_mirror_softc *sc;
735
736	sc = bp->bio_from->geom->softc;
737	bp->bio_cflags |= G_MIRROR_BIO_FLAG_REGULAR;
738	mtx_lock(&sc->sc_queue_mtx);
739	bioq_disksort(&sc->sc_queue, bp);
740	wakeup(sc);
741	mtx_unlock(&sc->sc_queue_mtx);
742}
743
744static void
745g_mirror_regular_request(struct bio *bp)
746{
747	struct g_mirror_softc *sc;
748	struct g_mirror_disk *disk;
749	struct bio *pbp;
750
751	g_topology_assert_not();
752
753	pbp = bp->bio_parent;
754	sc = pbp->bio_to->geom->softc;
755	disk = bp->bio_from->private;
756	if (disk == NULL) {
757		g_topology_lock();
758		g_mirror_kill_consumer(sc, bp->bio_from);
759		g_topology_unlock();
760	} else {
761		g_mirror_update_delay(disk, bp);
762	}
763
764	pbp->bio_inbed++;
765	KASSERT(pbp->bio_inbed <= pbp->bio_children,
766	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
767	    pbp->bio_children));
768	if (bp->bio_error == 0 && pbp->bio_error == 0) {
769		G_MIRROR_LOGREQ(3, bp, "Request delivered.");
770		g_destroy_bio(bp);
771		if (pbp->bio_children == pbp->bio_inbed) {
772			G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
773			pbp->bio_completed = pbp->bio_length;
774			g_io_deliver(pbp, pbp->bio_error);
775		}
776		return;
777	} else if (bp->bio_error != 0) {
778		if (pbp->bio_error == 0)
779			pbp->bio_error = bp->bio_error;
780		G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
781		    bp->bio_error);
782		if (disk != NULL) {
783			sc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
784			g_mirror_event_send(disk,
785			    G_MIRROR_DISK_STATE_DISCONNECTED,
786			    G_MIRROR_EVENT_DONTWAIT);
787		}
788		switch (pbp->bio_cmd) {
789		case BIO_DELETE:
790		case BIO_WRITE:
791			pbp->bio_inbed--;
792			pbp->bio_children--;
793			break;
794		}
795	}
796	g_destroy_bio(bp);
797
798	switch (pbp->bio_cmd) {
799	case BIO_READ:
800		if (pbp->bio_children == pbp->bio_inbed) {
801			pbp->bio_error = 0;
802			mtx_lock(&sc->sc_queue_mtx);
803			bioq_disksort(&sc->sc_queue, pbp);
804			G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
805			wakeup(sc);
806			mtx_unlock(&sc->sc_queue_mtx);
807		}
808		break;
809	case BIO_DELETE:
810	case BIO_WRITE:
811		if (pbp->bio_children == 0) {
812			/*
813			 * All requests failed.
814			 */
815		} else if (pbp->bio_inbed < pbp->bio_children) {
816			/* Do nothing. */
817			break;
818		} else if (pbp->bio_children == pbp->bio_inbed) {
819			/* Some requests succeeded. */
820			pbp->bio_error = 0;
821			pbp->bio_completed = pbp->bio_length;
822		}
823		g_io_deliver(pbp, pbp->bio_error);
824		break;
825	default:
826		KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
827		break;
828	}
829}
830
831static void
832g_mirror_sync_done(struct bio *bp)
833{
834	struct g_mirror_softc *sc;
835
836	G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
837	sc = bp->bio_from->geom->softc;
838	bp->bio_cflags |= G_MIRROR_BIO_FLAG_SYNC;
839	mtx_lock(&sc->sc_queue_mtx);
840	bioq_disksort(&sc->sc_queue, bp);
841	wakeup(sc);
842	mtx_unlock(&sc->sc_queue_mtx);
843}
844
845static void
846g_mirror_start(struct bio *bp)
847{
848	struct g_mirror_softc *sc;
849
850	sc = bp->bio_to->geom->softc;
851	/*
852	 * If sc == NULL or there are no valid disks, provider's error
853	 * should be set and g_mirror_start() should not be called at all.
854	 */
855	KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
856	    ("Provider's error should be set (error=%d)(mirror=%s).",
857	    bp->bio_to->error, bp->bio_to->name));
858	G_MIRROR_LOGREQ(3, bp, "Request received.");
859
860	switch (bp->bio_cmd) {
861	case BIO_READ:
862	case BIO_WRITE:
863	case BIO_DELETE:
864		break;
865	case BIO_GETATTR:
866	default:
867		g_io_deliver(bp, EOPNOTSUPP);
868		return;
869	}
870	mtx_lock(&sc->sc_queue_mtx);
871	bioq_disksort(&sc->sc_queue, bp);
872	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
873	wakeup(sc);
874	mtx_unlock(&sc->sc_queue_mtx);
875}
876
877/*
878 * Send one synchronization request.
879 */
880static void
881g_mirror_sync_one(struct g_mirror_disk *disk)
882{
883	struct g_mirror_softc *sc;
884	struct bio *bp;
885
886	sc = disk->d_softc;
887	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
888	    ("Disk %s is not marked for synchronization.",
889	    g_mirror_get_diskname(disk)));
890
891	bp = g_new_bio();
892	if (bp == NULL)
893		return;
894	bp->bio_parent = NULL;
895	bp->bio_cmd = BIO_READ;
896	bp->bio_offset = disk->d_sync.ds_offset;
897	bp->bio_length = MIN(G_MIRROR_SYNC_BLOCK_SIZE,
898	    sc->sc_mediasize - bp->bio_offset);
899	bp->bio_cflags = 0;
900	bp->bio_done = g_mirror_sync_done;
901	bp->bio_data = disk->d_sync.ds_data;
902	if (bp->bio_data == NULL) {
903		g_destroy_bio(bp);
904		return;
905	}
906	disk->d_sync.ds_offset += bp->bio_length;
907	bp->bio_to = sc->sc_provider;
908	G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
909	g_io_request(bp, disk->d_sync.ds_consumer);
910}
911
912static void
913g_mirror_sync_request(struct bio *bp)
914{
915	struct g_mirror_softc *sc;
916	struct g_mirror_disk *disk;
917
918	sc = bp->bio_from->geom->softc;
919	disk = bp->bio_from->private;
920	if (disk == NULL) {
921		g_topology_lock();
922		g_mirror_kill_consumer(sc, bp->bio_from);
923		g_topology_unlock();
924		g_destroy_bio(bp);
925		return;
926	}
927
928	/*
929	 * Synchronization request.
930	 */
931	switch (bp->bio_cmd) {
932	case BIO_READ:
933	    {
934		struct g_consumer *cp;
935
936		if (bp->bio_error != 0) {
937			G_MIRROR_LOGREQ(0, bp,
938			    "Synchronization request failed (error=%d).",
939			    bp->bio_error);
940			g_destroy_bio(bp);
941			return;
942		}
943		bp->bio_cmd = BIO_WRITE;
944		bp->bio_cflags = 0;
945		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
946		cp = disk->d_consumer;
947		KASSERT(cp->acr == 0 && cp->acw == 1 && cp->ace == 1,
948		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
949		    cp->acr, cp->acw, cp->ace));
950		g_io_request(bp, cp);
951		return;
952	    }
953	case BIO_WRITE:
954		if (bp->bio_error != 0) {
955			G_MIRROR_LOGREQ(0, bp,
956			    "Synchronization request failed (error=%d).",
957			    bp->bio_error);
958			g_destroy_bio(bp);
959			sc->sc_bump_syncid = G_MIRROR_BUMP_IMMEDIATELY;
960			g_mirror_event_send(disk,
961			    G_MIRROR_DISK_STATE_DISCONNECTED,
962			    G_MIRROR_EVENT_DONTWAIT);
963			return;
964		}
965		G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
966		disk->d_sync.ds_offset_done = bp->bio_offset + bp->bio_length;
967		g_destroy_bio(bp);
968		if (disk->d_sync.ds_offset_done == sc->sc_provider->mediasize) {
969			/*
970			 * Disk up-to-date, activate it.
971			 */
972			g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
973			    G_MIRROR_EVENT_DONTWAIT);
974			return;
975		} else if ((disk->d_sync.ds_offset_done %
976		    (G_MIRROR_SYNC_BLOCK_SIZE * 100)) == 0) {
977			/*
978			 * Update offset_done on every 100 blocks.
979			 * XXX: This should be configurable.
980			 */
981			g_topology_lock();
982			g_mirror_update_metadata(disk);
983			g_topology_unlock();
984		}
985		return;
986	default:
987		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
988		    bp->bio_cmd, sc->sc_name));
989		break;
990	}
991}
992
993static void
994g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
995{
996	struct g_mirror_disk *disk;
997	struct g_consumer *cp;
998	struct bio *cbp;
999
1000	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1001		if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1002			break;
1003	}
1004	if (disk == NULL) {
1005		if (bp->bio_error == 0)
1006			bp->bio_error = ENXIO;
1007		g_io_deliver(bp, bp->bio_error);
1008		return;
1009	}
1010	cbp = g_clone_bio(bp);
1011	if (cbp == NULL) {
1012		if (bp->bio_error == 0)
1013			bp->bio_error = ENOMEM;
1014		g_io_deliver(bp, bp->bio_error);
1015		return;
1016	}
1017	/*
1018	 * Fill in the component buf structure.
1019	 */
1020	cp = disk->d_consumer;
1021	cbp->bio_done = g_mirror_done;
1022	cbp->bio_to = cp->provider;
1023	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1024	KASSERT(cp->acr > 0 && cp->ace > 0,
1025	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1026	    cp->acw, cp->ace));
1027	g_io_request(cbp, cp);
1028}
1029
1030static void
1031g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1032{
1033	struct g_mirror_disk *disk;
1034	struct g_consumer *cp;
1035	struct bio *cbp;
1036
1037	disk = g_mirror_get_disk(sc);
1038	if (disk == NULL) {
1039		if (bp->bio_error == 0)
1040			bp->bio_error = ENXIO;
1041		g_io_deliver(bp, bp->bio_error);
1042		return;
1043	}
1044	cbp = g_clone_bio(bp);
1045	if (cbp == NULL) {
1046		if (bp->bio_error == 0)
1047			bp->bio_error = ENOMEM;
1048		g_io_deliver(bp, bp->bio_error);
1049		return;
1050	}
1051	/*
1052	 * Fill in the component buf structure.
1053	 */
1054	cp = disk->d_consumer;
1055	cbp->bio_done = g_mirror_done;
1056	cbp->bio_to = cp->provider;
1057	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1058	KASSERT(cp->acr > 0 && cp->ace > 0,
1059	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1060	    cp->acw, cp->ace));
1061	g_io_request(cbp, cp);
1062}
1063
1064static void
1065g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1066{
1067	struct g_mirror_disk *disk, *dp;
1068	struct g_consumer *cp;
1069	struct bio *cbp;
1070	struct bintime curtime;
1071
1072	binuptime(&curtime);
1073	/*
1074	 * Find a disk which the smallest load.
1075	 */
1076	disk = NULL;
1077	LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1078		if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1079			continue;
1080		/* If disk wasn't used for more than 2 sec, use it. */
1081		if (curtime.sec - dp->d_last_used.sec >= 2) {
1082			disk = dp;
1083			break;
1084		}
1085		if (disk == NULL ||
1086		    bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) {
1087			disk = dp;
1088		}
1089	}
1090	cbp = g_clone_bio(bp);
1091	if (cbp == NULL) {
1092		if (bp->bio_error == 0)
1093			bp->bio_error = ENOMEM;
1094		g_io_deliver(bp, bp->bio_error);
1095		return;
1096	}
1097	/*
1098	 * Fill in the component buf structure.
1099	 */
1100	cp = disk->d_consumer;
1101	cbp->bio_done = g_mirror_done;
1102	cbp->bio_to = cp->provider;
1103	binuptime(&disk->d_last_used);
1104	G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1105	KASSERT(cp->acr > 0 && cp->ace > 0,
1106	    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1107	    cp->acw, cp->ace));
1108	g_io_request(cbp, cp);
1109}
1110
1111static void
1112g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1113{
1114	struct bio_queue_head queue;
1115	struct g_mirror_disk *disk;
1116	struct g_consumer *cp;
1117	struct bio *cbp;
1118	off_t left, mod, offset, slice;
1119	u_char *data;
1120	u_int ndisks;
1121
1122	if (bp->bio_length <= sc->sc_slice) {
1123		g_mirror_request_round_robin(sc, bp);
1124		return;
1125	}
1126	ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1127	slice = bp->bio_length / ndisks;
1128	mod = slice % sc->sc_provider->sectorsize;
1129	if (mod != 0)
1130		slice += sc->sc_provider->sectorsize - mod;
1131	/*
1132	 * Allocate all bios before sending any request, so we can
1133	 * return ENOMEM in nice and clean way.
1134	 */
1135	left = bp->bio_length;
1136	offset = bp->bio_offset;
1137	data = bp->bio_data;
1138	bioq_init(&queue);
1139	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1140		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1141			continue;
1142		cbp = g_clone_bio(bp);
1143		if (cbp == NULL) {
1144			for (cbp = bioq_first(&queue); cbp != NULL;
1145			    cbp = bioq_first(&queue)) {
1146				bioq_remove(&queue, cbp);
1147				g_destroy_bio(cbp);
1148			}
1149			if (bp->bio_error == 0)
1150				bp->bio_error = ENOMEM;
1151			g_io_deliver(bp, bp->bio_error);
1152			return;
1153		}
1154		bioq_insert_tail(&queue, cbp);
1155		cbp->bio_done = g_mirror_done;
1156		cbp->bio_caller1 = disk;
1157		cbp->bio_to = disk->d_consumer->provider;
1158		cbp->bio_offset = offset;
1159		cbp->bio_data = data;
1160		cbp->bio_length = MIN(left, slice);
1161		left -= cbp->bio_length;
1162		if (left == 0)
1163			break;
1164		offset += cbp->bio_length;
1165		data += cbp->bio_length;
1166	}
1167	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1168		bioq_remove(&queue, cbp);
1169		G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1170		disk = cbp->bio_caller1;
1171		cbp->bio_caller1 = NULL;
1172		cp = disk->d_consumer;
1173		KASSERT(cp->acr > 0 && cp->ace > 0,
1174		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1175		    cp->acr, cp->acw, cp->ace));
1176		g_io_request(cbp, disk->d_consumer);
1177	}
1178}
1179
1180static void
1181g_mirror_register_request(struct bio *bp)
1182{
1183	struct g_mirror_softc *sc;
1184
1185	sc = bp->bio_to->geom->softc;
1186	switch (bp->bio_cmd) {
1187	case BIO_READ:
1188		switch (sc->sc_balance) {
1189		case G_MIRROR_BALANCE_LOAD:
1190			g_mirror_request_load(sc, bp);
1191			break;
1192		case G_MIRROR_BALANCE_PREFER:
1193			g_mirror_request_prefer(sc, bp);
1194			break;
1195		case G_MIRROR_BALANCE_ROUND_ROBIN:
1196			g_mirror_request_round_robin(sc, bp);
1197			break;
1198		case G_MIRROR_BALANCE_SPLIT:
1199			g_mirror_request_split(sc, bp);
1200			break;
1201		}
1202		return;
1203	case BIO_WRITE:
1204	case BIO_DELETE:
1205	    {
1206		struct g_mirror_disk *disk;
1207		struct bio_queue_head queue;
1208		struct g_consumer *cp;
1209		struct bio *cbp;
1210
1211		/*
1212		 * Allocate all bios before sending any request, so we can
1213		 * return ENOMEM in nice and clean way.
1214		 */
1215		bioq_init(&queue);
1216		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1217			switch (disk->d_state) {
1218			case G_MIRROR_DISK_STATE_ACTIVE:
1219				break;
1220			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1221				if (bp->bio_offset >= disk->d_sync.ds_offset)
1222					continue;
1223				break;
1224			default:
1225				continue;
1226			}
1227			cbp = g_clone_bio(bp);
1228			if (cbp == NULL) {
1229				for (cbp = bioq_first(&queue); cbp != NULL;
1230				    cbp = bioq_first(&queue)) {
1231					bioq_remove(&queue, cbp);
1232					g_destroy_bio(cbp);
1233				}
1234				if (bp->bio_error == 0)
1235					bp->bio_error = ENOMEM;
1236				g_io_deliver(bp, bp->bio_error);
1237				return;
1238			}
1239			bioq_insert_tail(&queue, cbp);
1240		}
1241		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1242			switch (disk->d_state) {
1243			case G_MIRROR_DISK_STATE_ACTIVE:
1244				break;
1245			case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1246				if (bp->bio_offset >= disk->d_sync.ds_offset)
1247					continue;
1248				break;
1249			default:
1250				continue;
1251			}
1252			cbp = bioq_first(&queue);
1253			KASSERT(cbp != NULL, ("NULL cbp! (device %s).",
1254			    sc->sc_name));
1255			bioq_remove(&queue, cbp);
1256			cp = disk->d_consumer;
1257			cbp->bio_done = g_mirror_done;
1258			cbp->bio_to = cp->provider;
1259			G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1260			KASSERT(cp->acw > 0 && cp->ace > 0,
1261			    ("Consumer %s not opened (r%dw%de%d).",
1262			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1263			g_io_request(cbp, cp);
1264		}
1265		/*
1266		 * Bump syncid on first write.
1267		 */
1268		if (sc->sc_bump_syncid == G_MIRROR_BUMP_ON_FIRST_WRITE) {
1269			sc->sc_bump_syncid = 0;
1270			g_topology_lock();
1271			g_mirror_bump_syncid(sc);
1272			g_topology_unlock();
1273		}
1274		return;
1275	    }
1276	default:
1277		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1278		    bp->bio_cmd, sc->sc_name));
1279		break;
1280	}
1281}
1282
1283static int
1284g_mirror_can_destroy(struct g_mirror_softc *sc)
1285{
1286	struct g_geom *gp;
1287	struct g_consumer *cp;
1288
1289	g_topology_assert();
1290	gp = sc->sc_geom;
1291	LIST_FOREACH(cp, &gp->consumer, consumer) {
1292		if (g_mirror_is_busy(sc, cp))
1293			return (0);
1294	}
1295	gp = sc->sc_sync.ds_geom;
1296	LIST_FOREACH(cp, &gp->consumer, consumer) {
1297		if (g_mirror_is_busy(sc, cp))
1298			return (0);
1299	}
1300	G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1301	    sc->sc_name);
1302	return (1);
1303}
1304
1305static int
1306g_mirror_try_destroy(struct g_mirror_softc *sc)
1307{
1308
1309	if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1310		g_topology_lock();
1311		if (!g_mirror_can_destroy(sc)) {
1312			g_topology_unlock();
1313			return (0);
1314		}
1315		g_topology_unlock();
1316		G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1317		    &sc->sc_worker);
1318		wakeup(&sc->sc_worker);
1319		sc->sc_worker = NULL;
1320	} else {
1321		g_topology_lock();
1322		if (!g_mirror_can_destroy(sc)) {
1323			g_topology_unlock();
1324			return (0);
1325		}
1326		g_mirror_destroy_device(sc);
1327		g_topology_unlock();
1328		free(sc, M_MIRROR);
1329	}
1330	return (1);
1331}
1332
1333/*
1334 * Worker thread.
1335 */
1336static void
1337g_mirror_worker(void *arg)
1338{
1339	struct g_mirror_softc *sc;
1340	struct g_mirror_disk *disk;
1341	struct g_mirror_event *ep;
1342	struct bio *bp;
1343	u_int nreqs;
1344
1345	sc = arg;
1346	curthread->td_base_pri = PRIBIO;
1347
1348	nreqs = 0;
1349	for (;;) {
1350		G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1351		/*
1352		 * First take a look at events.
1353		 * This is important to handle events before any I/O requests.
1354		 */
1355		ep = g_mirror_event_get(sc);
1356		if (ep != NULL) {
1357			g_topology_lock();
1358			if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1359				/* Update only device status. */
1360				G_MIRROR_DEBUG(3,
1361				    "Running event for device %s.",
1362				    sc->sc_name);
1363				ep->e_error = 0;
1364				g_mirror_update_device(sc, 1);
1365			} else {
1366				/* Update disk status. */
1367				G_MIRROR_DEBUG(3, "Running event for disk %s.",
1368				     g_mirror_get_diskname(ep->e_disk));
1369				ep->e_error = g_mirror_update_disk(ep->e_disk,
1370				    ep->e_state);
1371				if (ep->e_error == 0)
1372					g_mirror_update_device(sc, 0);
1373			}
1374			g_topology_unlock();
1375			if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1376				KASSERT(ep->e_error == 0,
1377				    ("Error cannot be handled."));
1378				g_mirror_event_free(ep);
1379			} else {
1380				ep->e_flags |= G_MIRROR_EVENT_DONE;
1381				G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1382				    ep);
1383				mtx_lock(&sc->sc_events_mtx);
1384				wakeup(ep);
1385				mtx_unlock(&sc->sc_events_mtx);
1386			}
1387			if ((sc->sc_flags &
1388			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1389				if (g_mirror_try_destroy(sc))
1390					kthread_exit(0);
1391			}
1392			G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1393			continue;
1394		}
1395		/*
1396		 * Now I/O requests.
1397		 */
1398		/* Get first request from the queue. */
1399		mtx_lock(&sc->sc_queue_mtx);
1400		bp = bioq_first(&sc->sc_queue);
1401		if (bp == NULL) {
1402			if ((sc->sc_flags &
1403			    G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1404				mtx_unlock(&sc->sc_queue_mtx);
1405				if (g_mirror_try_destroy(sc))
1406					kthread_exit(0);
1407				mtx_lock(&sc->sc_queue_mtx);
1408			}
1409		}
1410		if (sc->sc_sync.ds_ndisks > 0 &&
1411		    (bp == NULL || nreqs > g_mirror_reqs_per_sync)) {
1412			mtx_unlock(&sc->sc_queue_mtx);
1413			/*
1414			 * It is time for synchronization...
1415			 */
1416			nreqs = 0;
1417			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1418				if (disk->d_state !=
1419				    G_MIRROR_DISK_STATE_SYNCHRONIZING) {
1420					continue;
1421				}
1422				if (disk->d_sync.ds_offset >=
1423				    sc->sc_provider->mediasize) {
1424					continue;
1425				}
1426				if (disk->d_sync.ds_offset >
1427				    disk->d_sync.ds_offset_done) {
1428					continue;
1429				}
1430				g_mirror_sync_one(disk);
1431			}
1432			G_MIRROR_DEBUG(5, "%s: I'm here 2.", __func__);
1433			goto sleep;
1434		}
1435		if (bp == NULL) {
1436			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 0);
1437			G_MIRROR_DEBUG(5, "%s: I'm here 3.", __func__);
1438			continue;
1439		}
1440		nreqs++;
1441		bioq_remove(&sc->sc_queue, bp);
1442		mtx_unlock(&sc->sc_queue_mtx);
1443
1444		if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) {
1445			g_mirror_regular_request(bp);
1446		} else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1447			u_int timeout, sps;
1448
1449			g_mirror_sync_request(bp);
1450sleep:
1451			sps = atomic_load_acq_int(&g_mirror_syncs_per_sec);
1452			if (sps == 0) {
1453				G_MIRROR_DEBUG(5, "%s: I'm here 5.", __func__);
1454				continue;
1455			}
1456			mtx_lock(&sc->sc_queue_mtx);
1457			if (bioq_first(&sc->sc_queue) != NULL) {
1458				mtx_unlock(&sc->sc_queue_mtx);
1459				G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1460				continue;
1461			}
1462			timeout = hz / sps;
1463			if (timeout == 0)
1464				timeout = 1;
1465			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w2",
1466			    timeout);
1467		} else {
1468			g_mirror_register_request(bp);
1469		}
1470		G_MIRROR_DEBUG(5, "%s: I'm here 6.", __func__);
1471	}
1472}
1473
1474/*
1475 * Open disk's consumer if needed.
1476 */
1477static void
1478g_mirror_update_access(struct g_mirror_disk *disk)
1479{
1480	struct g_provider *pp;
1481	struct g_consumer *cp;
1482	int acr, acw, ace, cpw, error;
1483
1484	g_topology_assert();
1485
1486	cp = disk->d_consumer;
1487	pp = disk->d_softc->sc_provider;
1488	if (pp == NULL) {
1489		acr = -cp->acr;
1490		acw = -cp->acw;
1491		ace = -cp->ace;
1492	} else {
1493		acr = pp->acr - cp->acr;
1494		acw = pp->acw - cp->acw;
1495		ace = pp->ace - cp->ace;
1496		/* Grab an extra "exclusive" bit. */
1497		if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
1498			ace++;
1499	}
1500	if (acr == 0 && acw == 0 && ace == 0)
1501		return;
1502	cpw = cp->acw;
1503	error = g_access(cp, acr, acw, ace);
1504	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", cp->provider->name, acr,
1505	    acw, ace, error);
1506	if (error != 0) {
1507		disk->d_softc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
1508		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
1509		    G_MIRROR_EVENT_DONTWAIT);
1510		return;
1511	}
1512	if (cpw == 0 && cp->acw > 0) {
1513		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
1514		    g_mirror_get_diskname(disk), disk->d_softc->sc_name);
1515		disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1516	} else if (cpw > 0 && cp->acw == 0) {
1517		G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
1518		    g_mirror_get_diskname(disk), disk->d_softc->sc_name);
1519		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1520	}
1521}
1522
1523static void
1524g_mirror_sync_start(struct g_mirror_disk *disk)
1525{
1526	struct g_mirror_softc *sc;
1527	struct g_consumer *cp;
1528	int error;
1529
1530	g_topology_assert();
1531
1532	sc = disk->d_softc;
1533	KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1534	    ("Device not in RUNNING state (%s, %u).", sc->sc_name,
1535	    sc->sc_state));
1536	cp = disk->d_consumer;
1537	KASSERT(cp->acr == 0 && cp->acw == 0 && cp->ace == 0,
1538	    ("Consumer %s already opened.", cp->provider->name));
1539
1540	G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
1541	    g_mirror_get_diskname(disk));
1542	error = g_access(cp, 0, 1, 1);
1543	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", cp->provider->name, 0, 1,
1544	    1, error);
1545	if (error != 0) {
1546		g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
1547		    G_MIRROR_EVENT_DONTWAIT);
1548		return;
1549	}
1550	disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1551	KASSERT(disk->d_sync.ds_consumer == NULL,
1552	    ("Sync consumer already exists (device=%s, disk=%s).",
1553	    sc->sc_name, g_mirror_get_diskname(disk)));
1554	disk->d_sync.ds_consumer = g_new_consumer(sc->sc_sync.ds_geom);
1555	disk->d_sync.ds_consumer->private = disk;
1556	error = g_attach(disk->d_sync.ds_consumer, disk->d_softc->sc_provider);
1557	KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
1558	    disk->d_softc->sc_name, error));
1559	error = g_access(disk->d_sync.ds_consumer, 1, 0, 0);
1560	KASSERT(error == 0, ("Cannot open %s (error=%d).",
1561	    disk->d_softc->sc_name, error));
1562	disk->d_sync.ds_data = malloc(G_MIRROR_SYNC_BLOCK_SIZE, M_MIRROR,
1563	    M_WAITOK);
1564	sc->sc_sync.ds_ndisks++;
1565}
1566
1567/*
1568 * Stop synchronization process.
1569 * type: 0 - synchronization finished
1570 *       1 - synchronization stopped
1571 */
1572static void
1573g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
1574{
1575	struct g_consumer *cp;
1576
1577	g_topology_assert();
1578	KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1579	    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1580	    g_mirror_disk_state2str(disk->d_state)));
1581	if (disk->d_sync.ds_consumer == NULL)
1582		return;
1583
1584	if (type == 0) {
1585		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
1586		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1587	} else /* if (type == 1) */ {
1588		G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
1589		    disk->d_softc->sc_name, g_mirror_get_diskname(disk));
1590	}
1591	cp = disk->d_sync.ds_consumer;
1592	g_access(cp, -1, 0, 0);
1593	g_mirror_kill_consumer(disk->d_softc, cp);
1594	free(disk->d_sync.ds_data, M_MIRROR);
1595	disk->d_sync.ds_consumer = NULL;
1596	disk->d_softc->sc_sync.ds_ndisks--;
1597	cp = disk->d_consumer;
1598	KASSERT(cp->acr == 0 && cp->acw == 1 && cp->ace == 1,
1599	    ("Consumer %s not opened.", cp->provider->name));
1600	g_access(cp, 0, -1, -1);
1601	G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", cp->provider->name, 0, -1,
1602	    -1, 0);
1603	disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1604}
1605
1606static void
1607g_mirror_launch_provider(struct g_mirror_softc *sc)
1608{
1609	struct g_mirror_disk *disk;
1610	struct g_provider *pp;
1611
1612	g_topology_assert();
1613
1614	pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
1615	pp->mediasize = sc->sc_mediasize;
1616	pp->sectorsize = sc->sc_sectorsize;
1617	sc->sc_provider = pp;
1618	g_error_provider(pp, 0);
1619	G_MIRROR_DEBUG(0, "Device %s: provider %s launched.", sc->sc_name,
1620	    pp->name);
1621	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1622		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1623			g_mirror_sync_start(disk);
1624	}
1625}
1626
1627static void
1628g_mirror_destroy_provider(struct g_mirror_softc *sc)
1629{
1630	struct g_mirror_disk *disk;
1631	struct bio *bp;
1632
1633	g_topology_assert();
1634	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
1635	    sc->sc_name));
1636
1637	g_error_provider(sc->sc_provider, ENXIO);
1638	mtx_lock(&sc->sc_queue_mtx);
1639	while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
1640		bioq_remove(&sc->sc_queue, bp);
1641		g_io_deliver(bp, ENXIO);
1642	}
1643	mtx_unlock(&sc->sc_queue_mtx);
1644	G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
1645	    sc->sc_provider->name);
1646	sc->sc_provider->flags |= G_PF_WITHER;
1647	g_orphan_provider(sc->sc_provider, ENXIO);
1648	sc->sc_provider = NULL;
1649	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1650		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
1651			g_mirror_sync_stop(disk, 1);
1652	}
1653}
1654
1655static void
1656g_mirror_go(void *arg)
1657{
1658	struct g_mirror_softc *sc;
1659
1660	sc = arg;
1661	G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
1662	g_mirror_event_send(sc, 0,
1663	    G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
1664}
1665
1666static u_int
1667g_mirror_determine_state(struct g_mirror_disk *disk)
1668{
1669	struct g_mirror_softc *sc;
1670	u_int state;
1671
1672	sc = disk->d_softc;
1673	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
1674		if ((disk->d_flags &
1675		    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1676			/* Disk does not need synchronization. */
1677			state = G_MIRROR_DISK_STATE_ACTIVE;
1678		} else {
1679			if ((sc->sc_flags &
1680			     G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0  ||
1681			    (disk->d_flags &
1682			     G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1683				/*
1684				 * We can start synchronization from
1685				 * the stored offset.
1686				 */
1687				state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1688			} else {
1689				state = G_MIRROR_DISK_STATE_STALE;
1690			}
1691		}
1692	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
1693		/*
1694		 * Reset all synchronization data for this disk,
1695		 * because if it even was synchronized, it was
1696		 * synchronized to disks with different syncid.
1697		 */
1698		disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
1699		disk->d_sync.ds_offset = 0;
1700		disk->d_sync.ds_offset_done = 0;
1701		disk->d_sync.ds_syncid = sc->sc_syncid;
1702		if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
1703		    (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
1704			state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
1705		} else {
1706			state = G_MIRROR_DISK_STATE_STALE;
1707		}
1708	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
1709		/*
1710		 * Not good, NOT GOOD!
1711		 * It means that mirror was started on stale disks
1712		 * and more fresh disk just arrive.
1713		 * If there were writes, mirror is fucked up, sorry.
1714		 * I think the best choice here is don't touch
1715		 * this disk and inform the user laudly.
1716		 */
1717		G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
1718		    "disk (%s) arrives!! It will not be connected to the "
1719		    "running device.", sc->sc_name,
1720		    g_mirror_get_diskname(disk));
1721		g_mirror_destroy_disk(disk);
1722		state = G_MIRROR_DISK_STATE_NONE;
1723		/* Return immediately, because disk was destroyed. */
1724		return (state);
1725	}
1726	G_MIRROR_DEBUG(3, "State for %s disk: %s.",
1727	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
1728	return (state);
1729}
1730
1731/*
1732 * Update device state.
1733 */
1734static void
1735g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force)
1736{
1737	struct g_mirror_disk *disk;
1738	u_int state;
1739
1740	g_topology_assert();
1741
1742	switch (sc->sc_state) {
1743	case G_MIRROR_DEVICE_STATE_STARTING:
1744	    {
1745		struct g_mirror_disk *pdisk;
1746		u_int dirty, ndisks, syncid;
1747
1748		KASSERT(sc->sc_provider == NULL,
1749		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
1750		/*
1751		 * Are we ready? We are, if all disks are connected or
1752		 * if we have any disks and 'force' is true.
1753		 */
1754		if ((force && g_mirror_ndisks(sc, -1) > 0) ||
1755		    sc->sc_ndisks == g_mirror_ndisks(sc, -1)) {
1756			;
1757		} else if (g_mirror_ndisks(sc, -1) == 0) {
1758			/*
1759			 * Disks went down in starting phase, so destroy
1760			 * device.
1761			 */
1762			callout_drain(&sc->sc_callout);
1763			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1764			return;
1765		} else {
1766			return;
1767		}
1768
1769		/*
1770		 * Activate all disks with the biggest syncid.
1771		 */
1772		if (force) {
1773			/*
1774			 * If 'force' is true, we have been called due to
1775			 * timeout, so don't bother canceling timeout.
1776			 */
1777			ndisks = 0;
1778			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1779				if ((disk->d_flags &
1780				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
1781					ndisks++;
1782				}
1783			}
1784			if (ndisks == 0) {
1785				/* No valid disks found, destroy device. */
1786				sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1787				return;
1788			}
1789		} else {
1790			/* Cancel timeout. */
1791			callout_drain(&sc->sc_callout);
1792		}
1793
1794		/*
1795		 * Find disk with the biggest syncid.
1796		 */
1797		syncid = 0;
1798		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1799			if (disk->d_sync.ds_syncid > syncid)
1800				syncid = disk->d_sync.ds_syncid;
1801		}
1802
1803		/*
1804		 * Here we need to look for dirty disks and if all disks
1805		 * with the biggest syncid are dirty, we have to choose
1806		 * one with the biggest priority and rebuild the rest.
1807		 */
1808		/*
1809		 * Find the number of dirty disks with the biggest syncid.
1810		 * Find the number of disks with the biggest syncid.
1811		 * While here, find a disk with the biggest priority.
1812		 */
1813		dirty = ndisks = 0;
1814		pdisk = NULL;
1815		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1816			if (disk->d_sync.ds_syncid != syncid)
1817				continue;
1818			if ((disk->d_flags &
1819			    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1820				continue;
1821			}
1822			ndisks++;
1823			if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1824				dirty++;
1825				if (pdisk == NULL ||
1826				    pdisk->d_priority < disk->d_priority) {
1827					pdisk = disk;
1828				}
1829			}
1830		}
1831		if (dirty == 0) {
1832			/* No dirty disks at all, great. */
1833		} else if (dirty == ndisks) {
1834			/*
1835			 * Force synchronization for all dirty disks except one
1836			 * with the biggest priority.
1837			 */
1838			KASSERT(pdisk != NULL, ("pdisk == NULL"));
1839			G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
1840			    "master disk for synchronization.",
1841			    g_mirror_get_diskname(pdisk), sc->sc_name);
1842			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1843				if (disk->d_sync.ds_syncid != syncid)
1844					continue;
1845				if ((disk->d_flags &
1846				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1847					continue;
1848				}
1849				KASSERT((disk->d_flags &
1850				    G_MIRROR_DISK_FLAG_DIRTY) != 0,
1851				    ("Disk %s isn't marked as dirty.",
1852				    g_mirror_get_diskname(disk)));
1853				/* Skip the disk with the biggest priority. */
1854				if (disk == pdisk)
1855					continue;
1856				disk->d_sync.ds_syncid = 0;
1857			}
1858		} else if (dirty < ndisks) {
1859			/*
1860			 * Force synchronization for all dirty disks.
1861			 * We have some non-dirty disks.
1862			 */
1863			LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1864				if (disk->d_sync.ds_syncid != syncid)
1865					continue;
1866				if ((disk->d_flags &
1867				    G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
1868					continue;
1869				}
1870				if ((disk->d_flags &
1871				    G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1872					continue;
1873				}
1874				disk->d_sync.ds_syncid = 0;
1875			}
1876		}
1877
1878		/* Reset hint. */
1879		sc->sc_hint = NULL;
1880		sc->sc_syncid = syncid;
1881		if (force) {
1882			/* Remember to bump syncid on first write. */
1883			sc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
1884		}
1885		state = G_MIRROR_DEVICE_STATE_RUNNING;
1886		G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
1887		    sc->sc_name, g_mirror_device_state2str(sc->sc_state),
1888		    g_mirror_device_state2str(state));
1889		sc->sc_state = state;
1890		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1891			state = g_mirror_determine_state(disk);
1892			g_mirror_event_send(disk, state,
1893			    G_MIRROR_EVENT_DONTWAIT);
1894			if (state == G_MIRROR_DISK_STATE_STALE) {
1895				sc->sc_bump_syncid =
1896				    G_MIRROR_BUMP_ON_FIRST_WRITE;
1897			}
1898		}
1899		break;
1900	    }
1901	case G_MIRROR_DEVICE_STATE_RUNNING:
1902		if (g_mirror_ndisks(sc, -1) == 0) {
1903			/*
1904			 * No disks at all, we need to destroy device.
1905			 */
1906			sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
1907			break;
1908		} else if (g_mirror_ndisks(sc,
1909		    G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
1910		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
1911			/*
1912			 * No active disks, destroy provider.
1913			 */
1914			if (sc->sc_provider != NULL)
1915				g_mirror_destroy_provider(sc);
1916			break;
1917		} else if (g_mirror_ndisks(sc,
1918		    G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
1919		    g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
1920			/*
1921			 * We have active disks, launch provider if it doesn't
1922			 * exist.
1923			 */
1924			if (sc->sc_provider == NULL)
1925				g_mirror_launch_provider(sc);
1926		}
1927		/*
1928		 * Bump syncid here, if we need to do it immediately.
1929		 */
1930		if (sc->sc_bump_syncid == G_MIRROR_BUMP_IMMEDIATELY) {
1931			sc->sc_bump_syncid = 0;
1932			g_mirror_bump_syncid(sc);
1933		}
1934		break;
1935	default:
1936		KASSERT(1 == 0, ("Wrong device state (%s, %s).",
1937		    sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
1938		break;
1939	}
1940}
1941
1942/*
1943 * Update disk state and device state if needed.
1944 */
1945#define	DISK_STATE_CHANGED()	G_MIRROR_DEBUG(1,			\
1946	"Disk %s state changed from %s to %s (device %s).",		\
1947	g_mirror_get_diskname(disk),					\
1948	g_mirror_disk_state2str(disk->d_state),				\
1949	g_mirror_disk_state2str(state), sc->sc_name)
1950static int
1951g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
1952{
1953	struct g_mirror_softc *sc;
1954
1955	g_topology_assert();
1956
1957	sc = disk->d_softc;
1958again:
1959	G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
1960	    g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
1961	    g_mirror_disk_state2str(state));
1962	switch (state) {
1963	case G_MIRROR_DISK_STATE_NEW:
1964		/*
1965		 * Possible scenarios:
1966		 * 1. New disk arrive.
1967		 */
1968		/* Previous state should be NONE. */
1969		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
1970		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1971		    g_mirror_disk_state2str(disk->d_state)));
1972		DISK_STATE_CHANGED();
1973
1974		disk->d_state = state;
1975		if (LIST_EMPTY(&sc->sc_disks))
1976			LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
1977		else {
1978			struct g_mirror_disk *dp;
1979
1980			LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1981				if (disk->d_priority >= dp->d_priority) {
1982					LIST_INSERT_BEFORE(dp, disk, d_next);
1983					dp = NULL;
1984					break;
1985				}
1986				if (LIST_NEXT(dp, d_next) == NULL)
1987					break;
1988			}
1989			if (dp != NULL)
1990				LIST_INSERT_AFTER(dp, disk, d_next);
1991		}
1992		G_MIRROR_DEBUG(0, "Device %s: provider %s detected.",
1993		    sc->sc_name, g_mirror_get_diskname(disk));
1994		if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
1995			break;
1996		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1997		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
1998		    g_mirror_device_state2str(sc->sc_state),
1999		    g_mirror_get_diskname(disk),
2000		    g_mirror_disk_state2str(disk->d_state)));
2001		state = g_mirror_determine_state(disk);
2002		if (state != G_MIRROR_DISK_STATE_NONE)
2003			goto again;
2004		break;
2005	case G_MIRROR_DISK_STATE_ACTIVE:
2006		/*
2007		 * Possible scenarios:
2008		 * 1. New disk does not need synchronization.
2009		 * 2. Synchronization process finished successfully.
2010		 */
2011		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2012		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2013		    g_mirror_device_state2str(sc->sc_state),
2014		    g_mirror_get_diskname(disk),
2015		    g_mirror_disk_state2str(disk->d_state)));
2016		/* Previous state should be NEW or SYNCHRONIZING. */
2017		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2018		    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2019		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2020		    g_mirror_disk_state2str(disk->d_state)));
2021		DISK_STATE_CHANGED();
2022
2023		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2024			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2025		else if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2026			disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2027			disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2028			g_mirror_sync_stop(disk, 0);
2029		}
2030		disk->d_state = state;
2031		disk->d_sync.ds_offset = 0;
2032		disk->d_sync.ds_offset_done = 0;
2033		g_mirror_update_access(disk);
2034		g_mirror_update_metadata(disk);
2035		G_MIRROR_DEBUG(0, "Device %s: provider %s activated.",
2036		    sc->sc_name, g_mirror_get_diskname(disk));
2037		break;
2038	case G_MIRROR_DISK_STATE_STALE:
2039		/*
2040		 * Possible scenarios:
2041		 * 1. Stale disk was connected.
2042		 */
2043		/* Previous state should be NEW. */
2044		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2045		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2046		    g_mirror_disk_state2str(disk->d_state)));
2047		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2048		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2049		    g_mirror_device_state2str(sc->sc_state),
2050		    g_mirror_get_diskname(disk),
2051		    g_mirror_disk_state2str(disk->d_state)));
2052		/*
2053		 * STALE state is only possible if device is marked
2054		 * NOAUTOSYNC.
2055		 */
2056		KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2057		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2058		    g_mirror_device_state2str(sc->sc_state),
2059		    g_mirror_get_diskname(disk),
2060		    g_mirror_disk_state2str(disk->d_state)));
2061		DISK_STATE_CHANGED();
2062
2063		disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2064		disk->d_state = state;
2065		g_mirror_update_metadata(disk);
2066		G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2067		    sc->sc_name, g_mirror_get_diskname(disk));
2068		break;
2069	case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2070		/*
2071		 * Possible scenarios:
2072		 * 1. Disk which needs synchronization was connected.
2073		 */
2074		/* Previous state should be NEW. */
2075		KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2076		    ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2077		    g_mirror_disk_state2str(disk->d_state)));
2078		KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2079		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2080		    g_mirror_device_state2str(sc->sc_state),
2081		    g_mirror_get_diskname(disk),
2082		    g_mirror_disk_state2str(disk->d_state)));
2083		DISK_STATE_CHANGED();
2084
2085		if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2086			disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2087		disk->d_state = state;
2088		if (sc->sc_provider != NULL) {
2089			g_mirror_sync_start(disk);
2090			g_mirror_update_metadata(disk);
2091		}
2092		break;
2093	case G_MIRROR_DISK_STATE_DISCONNECTED:
2094		/*
2095		 * Possible scenarios:
2096		 * 1. Device wasn't running yet, but disk disappear.
2097		 * 2. Disk was active and disapppear.
2098		 * 3. Disk disappear during synchronization process.
2099		 */
2100		if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2101			/*
2102			 * Previous state should be ACTIVE, STALE or
2103			 * SYNCHRONIZING.
2104			 */
2105			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2106			    disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2107			    disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2108			    ("Wrong disk state (%s, %s).",
2109			    g_mirror_get_diskname(disk),
2110			    g_mirror_disk_state2str(disk->d_state)));
2111		} else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2112			/* Previous state should be NEW. */
2113			KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2114			    ("Wrong disk state (%s, %s).",
2115			    g_mirror_get_diskname(disk),
2116			    g_mirror_disk_state2str(disk->d_state)));
2117			/*
2118			 * Reset bumping syncid if disk disappeared in STARTING
2119			 * state.
2120			 */
2121			if (sc->sc_bump_syncid == G_MIRROR_BUMP_ON_FIRST_WRITE)
2122				sc->sc_bump_syncid = 0;
2123#ifdef	INVARIANTS
2124		} else {
2125			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2126			    sc->sc_name,
2127			    g_mirror_device_state2str(sc->sc_state),
2128			    g_mirror_get_diskname(disk),
2129			    g_mirror_disk_state2str(disk->d_state)));
2130#endif
2131		}
2132		DISK_STATE_CHANGED();
2133		G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2134		    sc->sc_name, g_mirror_get_diskname(disk));
2135
2136		g_mirror_destroy_disk(disk);
2137		break;
2138	case G_MIRROR_DISK_STATE_DESTROY:
2139	    {
2140		int error;
2141
2142		error = g_mirror_clear_metadata(disk);
2143		if (error != 0)
2144			return (error);
2145		DISK_STATE_CHANGED();
2146		G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2147		    sc->sc_name, g_mirror_get_diskname(disk));
2148
2149		g_mirror_destroy_disk(disk);
2150		sc->sc_ndisks--;
2151		LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2152			g_mirror_update_metadata(disk);
2153		}
2154		break;
2155	    }
2156	default:
2157		KASSERT(1 == 0, ("Unknown state (%u).", state));
2158		break;
2159	}
2160	return (0);
2161}
2162#undef	DISK_STATE_CHANGED
2163
2164static int
2165g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2166{
2167	struct g_provider *pp;
2168	u_char *buf;
2169	int error;
2170
2171	g_topology_assert();
2172
2173	error = g_access(cp, 1, 0, 0);
2174	if (error != 0)
2175		return (error);
2176	pp = cp->provider;
2177	g_topology_unlock();
2178	/* Metadata are stored on last sector. */
2179	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2180	    &error);
2181	g_topology_lock();
2182	if (buf == NULL) {
2183		g_access(cp, -1, 0, 0);
2184		return (error);
2185	}
2186	if (error != 0) {
2187		g_access(cp, -1, 0, 0);
2188		g_free(buf);
2189		return (error);
2190	}
2191	error = g_access(cp, -1, 0, 0);
2192	KASSERT(error == 0, ("Cannot decrease access count for %s.", pp->name));
2193
2194	/* Decode metadata. */
2195	error = mirror_metadata_decode(buf, md);
2196	g_free(buf);
2197	if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2198		return (EINVAL);
2199	if (error != 0) {
2200		G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2201		    cp->provider->name);
2202		return (error);
2203	}
2204
2205	return (0);
2206}
2207
2208static int
2209g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2210    struct g_mirror_metadata *md)
2211{
2212
2213	if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2214		G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2215		    pp->name, md->md_did);
2216		return (EEXIST);
2217	}
2218	if (md->md_all != sc->sc_ndisks) {
2219		G_MIRROR_DEBUG(1,
2220		    "Invalid '%s' field on disk %s (device %s), skipping.",
2221		    "md_all", pp->name, sc->sc_name);
2222		return (EINVAL);
2223	}
2224	if (md->md_slice != sc->sc_slice) {
2225		G_MIRROR_DEBUG(1,
2226		    "Invalid '%s' field on disk %s (device %s), skipping.",
2227		    "md_slice", pp->name, sc->sc_name);
2228		return (EINVAL);
2229	}
2230	if (md->md_balance != sc->sc_balance) {
2231		G_MIRROR_DEBUG(1,
2232		    "Invalid '%s' field on disk %s (device %s), skipping.",
2233		    "md_balance", pp->name, sc->sc_name);
2234		return (EINVAL);
2235	}
2236	if (md->md_mediasize != sc->sc_mediasize) {
2237		G_MIRROR_DEBUG(1,
2238		    "Invalid '%s' field on disk %s (device %s), skipping.",
2239		    "md_mediasize", pp->name, sc->sc_name);
2240		return (EINVAL);
2241	}
2242	if (sc->sc_mediasize > pp->mediasize) {
2243		G_MIRROR_DEBUG(1,
2244		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2245		    sc->sc_name);
2246		return (EINVAL);
2247	}
2248	if (md->md_sectorsize != sc->sc_sectorsize) {
2249		G_MIRROR_DEBUG(1,
2250		    "Invalid '%s' field on disk %s (device %s), skipping.",
2251		    "md_sectorsize", pp->name, sc->sc_name);
2252		return (EINVAL);
2253	}
2254	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2255		G_MIRROR_DEBUG(1,
2256		    "Invalid sector size of disk %s (device %s), skipping.",
2257		    pp->name, sc->sc_name);
2258		return (EINVAL);
2259	}
2260	if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2261		G_MIRROR_DEBUG(1,
2262		    "Invalid device flags on disk %s (device %s), skipping.",
2263		    pp->name, sc->sc_name);
2264		return (EINVAL);
2265	}
2266	if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2267		G_MIRROR_DEBUG(1,
2268		    "Invalid disk flags on disk %s (device %s), skipping.",
2269		    pp->name, sc->sc_name);
2270		return (EINVAL);
2271	}
2272	return (0);
2273}
2274
2275static int
2276g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2277    struct g_mirror_metadata *md)
2278{
2279	struct g_mirror_disk *disk;
2280	int error;
2281
2282	g_topology_assert();
2283	G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2284
2285	error = g_mirror_check_metadata(sc, pp, md);
2286	if (error != 0)
2287		return (error);
2288	disk = g_mirror_init_disk(sc, pp, md, &error);
2289	if (disk == NULL)
2290		return (error);
2291	error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2292	    G_MIRROR_EVENT_WAIT);
2293	return (error);
2294}
2295
2296static int
2297g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2298{
2299	struct g_mirror_softc *sc;
2300	struct g_mirror_disk *disk;
2301	int dcr, dcw, dce, err, error;
2302
2303	g_topology_assert();
2304	G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2305	    acw, ace);
2306
2307	dcr = pp->acr + acr;
2308	dcw = pp->acw + acw;
2309	dce = pp->ace + ace;
2310
2311	/* On first open, grab an extra "exclusive" bit */
2312	if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
2313		ace++;
2314	/* ... and let go of it on last close */
2315	if (dcr == 0 && dcw == 0 && dce == 0)
2316		ace--;
2317
2318	sc = pp->geom->softc;
2319	if (sc == NULL || LIST_EMPTY(&sc->sc_disks)) {
2320		if (acr <= 0 && acw <= 0 && ace <= 0)
2321			return (0);
2322		else
2323			return (ENXIO);
2324	}
2325	error = ENXIO;
2326	LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2327		if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
2328			continue;
2329		err = g_access(disk->d_consumer, acr, acw, ace);
2330		G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d",
2331		    g_mirror_get_diskname(disk), acr, acw, ace, err);
2332		if (err == 0) {
2333			/*
2334			 * Mark disk as dirty on open and unmark on close.
2335			 */
2336			if (pp->acw == 0 && dcw > 0) {
2337				G_MIRROR_DEBUG(1,
2338				    "Disk %s (device %s) marked as dirty.",
2339				    g_mirror_get_diskname(disk), sc->sc_name);
2340				disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2341				g_mirror_update_metadata(disk);
2342			} else if (pp->acw > 0 && dcw == 0) {
2343				G_MIRROR_DEBUG(1,
2344				    "Disk %s (device %s) marked as clean.",
2345				    g_mirror_get_diskname(disk), sc->sc_name);
2346				disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2347				g_mirror_update_metadata(disk);
2348			}
2349			error = 0;
2350		} else {
2351			sc->sc_bump_syncid = G_MIRROR_BUMP_ON_FIRST_WRITE;
2352			g_mirror_event_send(disk,
2353			    G_MIRROR_DISK_STATE_DISCONNECTED,
2354			    G_MIRROR_EVENT_DONTWAIT);
2355		}
2356	}
2357	return (error);
2358}
2359
2360static struct g_geom *
2361g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
2362{
2363	struct g_mirror_softc *sc;
2364	struct g_geom *gp;
2365	int error, timeout;
2366
2367	g_topology_assert();
2368	G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2369	    md->md_mid);
2370
2371	/* One disk is minimum. */
2372	if (md->md_all < 1)
2373		return (NULL);
2374	/*
2375	 * Action geom.
2376	 */
2377	gp = g_new_geomf(mp, "%s", md->md_name);
2378	sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2379	gp->start = g_mirror_start;
2380	gp->spoiled = g_mirror_spoiled;
2381	gp->orphan = g_mirror_orphan;
2382	gp->access = g_mirror_access;
2383	gp->dumpconf = g_mirror_dumpconf;
2384
2385	sc->sc_id = md->md_mid;
2386	sc->sc_slice = md->md_slice;
2387	sc->sc_balance = md->md_balance;
2388	sc->sc_mediasize = md->md_mediasize;
2389	sc->sc_sectorsize = md->md_sectorsize;
2390	sc->sc_ndisks = md->md_all;
2391	sc->sc_flags = md->md_mflags;
2392	sc->sc_bump_syncid = 0;
2393	bioq_init(&sc->sc_queue);
2394	mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
2395	LIST_INIT(&sc->sc_disks);
2396	TAILQ_INIT(&sc->sc_events);
2397	mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
2398	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2399	sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
2400	gp->softc = sc;
2401	sc->sc_geom = gp;
2402	sc->sc_provider = NULL;
2403	/*
2404	 * Synchronization geom.
2405	 */
2406	gp = g_new_geomf(mp, "%s.sync", md->md_name);
2407	gp->softc = sc;
2408	gp->orphan = g_mirror_orphan;
2409	sc->sc_sync.ds_geom = gp;
2410	sc->sc_sync.ds_ndisks = 0;
2411	error = kthread_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
2412	    "g_mirror %s", md->md_name);
2413	if (error != 0) {
2414		G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
2415		    sc->sc_name);
2416		g_destroy_geom(sc->sc_sync.ds_geom);
2417		mtx_destroy(&sc->sc_events_mtx);
2418		mtx_destroy(&sc->sc_queue_mtx);
2419		g_destroy_geom(sc->sc_geom);
2420		free(sc, M_MIRROR);
2421		return (NULL);
2422	}
2423
2424	G_MIRROR_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
2425
2426	/*
2427	 * Run timeout.
2428	 */
2429	timeout = atomic_load_acq_int(&g_mirror_timeout);
2430	callout_reset(&sc->sc_callout, timeout * hz, g_mirror_go, sc);
2431	return (sc->sc_geom);
2432}
2433
2434int
2435g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force)
2436{
2437	struct g_provider *pp;
2438
2439	g_topology_assert();
2440
2441	if (sc == NULL)
2442		return (ENXIO);
2443	pp = sc->sc_provider;
2444	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
2445		if (force) {
2446			G_MIRROR_DEBUG(0, "Device %s is still open, so it "
2447			    "can't be definitely removed.", pp->name);
2448		} else {
2449			G_MIRROR_DEBUG(1,
2450			    "Device %s is still open (r%dw%de%d).", pp->name,
2451			    pp->acr, pp->acw, pp->ace);
2452			return (EBUSY);
2453		}
2454	}
2455
2456	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2457	sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
2458	g_topology_unlock();
2459	G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
2460	mtx_lock(&sc->sc_queue_mtx);
2461	wakeup(sc);
2462	mtx_unlock(&sc->sc_queue_mtx);
2463	G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
2464	while (sc->sc_worker != NULL)
2465		tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
2466	G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
2467	g_topology_lock();
2468	g_mirror_destroy_device(sc);
2469	free(sc, M_MIRROR);
2470	return (0);
2471}
2472
2473static void
2474g_mirror_taste_orphan(struct g_consumer *cp)
2475{
2476
2477	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2478	    cp->provider->name));
2479}
2480
2481static struct g_geom *
2482g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2483{
2484	struct g_mirror_metadata md;
2485	struct g_mirror_softc *sc;
2486	struct g_consumer *cp;
2487	struct g_geom *gp;
2488	int error;
2489
2490	g_topology_assert();
2491	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2492	G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
2493	/* Skip providers with 0 sectorsize. */
2494	if (pp->sectorsize == 0)
2495		return (NULL);
2496
2497	gp = g_new_geomf(mp, "mirror:taste");
2498	/*
2499	 * This orphan function should be never called.
2500	 */
2501	gp->orphan = g_mirror_taste_orphan;
2502	cp = g_new_consumer(gp);
2503	g_attach(cp, pp);
2504	error = g_mirror_read_metadata(cp, &md);
2505	g_detach(cp);
2506	g_destroy_consumer(cp);
2507	g_destroy_geom(gp);
2508	if (error != 0)
2509		return (NULL);
2510	gp = NULL;
2511
2512	if (md.md_version > G_MIRROR_VERSION) {
2513		printf("geom_mirror.ko module is too old to handle %s.\n",
2514		    pp->name);
2515		return (NULL);
2516	}
2517	if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
2518		return (NULL);
2519	if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
2520		G_MIRROR_DEBUG(0,
2521		    "Device %s: provider %s marked as inactive, skipping.",
2522		    md.md_name, pp->name);
2523		return (NULL);
2524	}
2525	if (g_mirror_debug >= 2)
2526		mirror_metadata_dump(&md);
2527
2528	/*
2529	 * Let's check if device already exists.
2530	 */
2531	LIST_FOREACH(gp, &mp->geom, geom) {
2532		sc = gp->softc;
2533		if (sc == NULL)
2534			continue;
2535		if (sc->sc_sync.ds_geom == gp)
2536			continue;
2537		if (strcmp(md.md_name, sc->sc_name) != 0)
2538			continue;
2539		if (md.md_mid != sc->sc_id) {
2540			G_MIRROR_DEBUG(0, "Device %s already configured.",
2541			    sc->sc_name);
2542			return (NULL);
2543		}
2544		break;
2545	}
2546	if (gp == NULL) {
2547		gp = g_mirror_create(mp, &md);
2548		if (gp == NULL) {
2549			G_MIRROR_DEBUG(0, "Cannot create device %s.",
2550			    md.md_name);
2551			return (NULL);
2552		}
2553		sc = gp->softc;
2554	}
2555	G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
2556	error = g_mirror_add_disk(sc, pp, &md);
2557	if (error != 0) {
2558		G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
2559		    pp->name, gp->name, error);
2560		if (LIST_EMPTY(&sc->sc_disks))
2561			g_mirror_destroy(sc, 1);
2562		return (NULL);
2563	}
2564	return (gp);
2565}
2566
2567static int
2568g_mirror_destroy_geom(struct gctl_req *req __unused,
2569    struct g_class *mp __unused, struct g_geom *gp)
2570{
2571
2572	return (g_mirror_destroy(gp->softc, 0));
2573}
2574
2575static void
2576g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2577    struct g_consumer *cp, struct g_provider *pp)
2578{
2579	struct g_mirror_softc *sc;
2580
2581	g_topology_assert();
2582
2583	sc = gp->softc;
2584	if (sc == NULL)
2585		return;
2586	/* Skip synchronization geom. */
2587	if (gp == sc->sc_sync.ds_geom)
2588		return;
2589	if (pp != NULL) {
2590		/* Nothing here. */
2591	} else if (cp != NULL) {
2592		struct g_mirror_disk *disk;
2593
2594		disk = cp->private;
2595		if (disk == NULL)
2596			return;
2597		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
2598		if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2599			sbuf_printf(sb, "%s<Synchronized>", indent);
2600			if (disk->d_sync.ds_offset_done == 0)
2601				sbuf_printf(sb, "0%%");
2602			else {
2603				sbuf_printf(sb, "%u%%",
2604				    (u_int)((disk->d_sync.ds_offset_done * 100) /
2605				    sc->sc_provider->mediasize));
2606			}
2607			sbuf_printf(sb, "</Synchronized>\n");
2608		}
2609		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
2610		    disk->d_sync.ds_syncid);
2611		sbuf_printf(sb, "%s<Flags>", indent);
2612		if (disk->d_flags == 0)
2613			sbuf_printf(sb, "NONE");
2614		else {
2615			int first = 1;
2616
2617#define	ADD_FLAG(flag, name)	do {					\
2618	if ((disk->d_flags & (flag)) != 0) {				\
2619		if (!first)						\
2620			sbuf_printf(sb, ", ");				\
2621		else							\
2622			first = 0;					\
2623		sbuf_printf(sb, name);					\
2624	}								\
2625} while (0)
2626			ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
2627			ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
2628			ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
2629			ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
2630			    "SYNCHRONIZING");
2631			ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
2632#undef	ADD_FLAG
2633		}
2634		sbuf_printf(sb, "</Flags>\n");
2635		sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
2636		    disk->d_priority);
2637		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2638		    g_mirror_disk_state2str(disk->d_state));
2639	} else {
2640		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2641		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
2642		sbuf_printf(sb, "%s<Flags>", indent);
2643		if (sc->sc_flags == 0)
2644			sbuf_printf(sb, "NONE");
2645		else {
2646			int first = 1;
2647
2648#define	ADD_FLAG(flag, name)	do {					\
2649	if ((sc->sc_flags & (flag)) != 0) {				\
2650		if (!first)						\
2651			sbuf_printf(sb, ", ");				\
2652		else							\
2653			first = 0;					\
2654		sbuf_printf(sb, name);					\
2655	}								\
2656} while (0)
2657			ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
2658#undef	ADD_FLAG
2659		}
2660		sbuf_printf(sb, "</Flags>\n");
2661		sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
2662		    (u_int)sc->sc_slice);
2663		sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
2664		    balance_name(sc->sc_balance));
2665		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2666		    sc->sc_ndisks);
2667	}
2668}
2669
2670DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
2671