1/*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2009-2010 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Pawel Jakub Dawidek
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bio.h>
37#include <sys/conf.h>
38#include <sys/kernel.h>
39#include <sys/kthread.h>
40#include <sys/fcntl.h>
41#include <sys/linker.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/mutex.h>
45#include <sys/proc.h>
46#include <sys/limits.h>
47#include <sys/queue.h>
48#include <sys/sbuf.h>
49#include <sys/sysctl.h>
50#include <sys/signalvar.h>
51#include <sys/time.h>
52#include <machine/atomic.h>
53
54#include <geom/geom.h>
55#include <geom/gate/g_gate.h>
56
57FEATURE(geom_gate, "GEOM Gate module");
58
59static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
60
61SYSCTL_DECL(_kern_geom);
62static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
63    "GEOM_GATE configuration");
64static int g_gate_debug = 0;
65TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug);
66SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
67    "Debug level");
68static u_int g_gate_maxunits = 256;
69TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits);
70SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
71    &g_gate_maxunits, 0, "Maximum number of ggate devices");
72
73struct g_class g_gate_class = {
74	.name = G_GATE_CLASS_NAME,
75	.version = G_VERSION,
76};
77
78static struct cdev *status_dev;
79static d_ioctl_t g_gate_ioctl;
80static struct cdevsw g_gate_cdevsw = {
81	.d_version =	D_VERSION,
82	.d_ioctl =	g_gate_ioctl,
83	.d_name =	G_GATE_CTL_NAME
84};
85
86
87static struct g_gate_softc **g_gate_units;
88static u_int g_gate_nunits;
89static struct mtx g_gate_units_lock;
90
91static int
92g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
93{
94	struct g_provider *pp;
95	struct g_consumer *cp;
96	struct g_geom *gp;
97	struct bio *bp;
98
99	g_topology_assert();
100	mtx_assert(&g_gate_units_lock, MA_OWNED);
101	pp = sc->sc_provider;
102	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
103		mtx_unlock(&g_gate_units_lock);
104		return (EBUSY);
105	}
106	mtx_unlock(&g_gate_units_lock);
107	mtx_lock(&sc->sc_queue_mtx);
108	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
109		sc->sc_flags |= G_GATE_FLAG_DESTROY;
110	wakeup(sc);
111	mtx_unlock(&sc->sc_queue_mtx);
112	gp = pp->geom;
113	pp->flags |= G_PF_WITHER;
114	g_orphan_provider(pp, ENXIO);
115	callout_drain(&sc->sc_callout);
116	mtx_lock(&sc->sc_queue_mtx);
117	while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) {
118		bioq_remove(&sc->sc_inqueue, bp);
119		sc->sc_queue_count--;
120		G_GATE_LOGREQ(1, bp, "Request canceled.");
121		g_io_deliver(bp, ENXIO);
122	}
123	while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) {
124		bioq_remove(&sc->sc_outqueue, bp);
125		sc->sc_queue_count--;
126		G_GATE_LOGREQ(1, bp, "Request canceled.");
127		g_io_deliver(bp, ENXIO);
128	}
129	mtx_unlock(&sc->sc_queue_mtx);
130	g_topology_unlock();
131	mtx_lock(&g_gate_units_lock);
132	/* One reference is ours. */
133	sc->sc_ref--;
134	while (sc->sc_ref > 0)
135		msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
136	g_gate_units[sc->sc_unit] = NULL;
137	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
138	g_gate_nunits--;
139	mtx_unlock(&g_gate_units_lock);
140	mtx_destroy(&sc->sc_queue_mtx);
141	g_topology_lock();
142	if ((cp = sc->sc_readcons) != NULL) {
143		sc->sc_readcons = NULL;
144		(void)g_access(cp, -1, 0, 0);
145		g_detach(cp);
146		g_destroy_consumer(cp);
147	}
148	G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
149	gp->softc = NULL;
150	g_wither_geom(gp, ENXIO);
151	sc->sc_provider = NULL;
152	free(sc, M_GATE);
153	return (0);
154}
155
156static int
157g_gate_access(struct g_provider *pp, int dr, int dw, int de)
158{
159	struct g_gate_softc *sc;
160
161	if (dr <= 0 && dw <= 0 && de <= 0)
162		return (0);
163	sc = pp->geom->softc;
164	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
165		return (ENXIO);
166	/* XXX: Hack to allow read-only mounts. */
167#if 0
168	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
169		return (EPERM);
170#endif
171	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
172		return (EPERM);
173	return (0);
174}
175
176static void
177g_gate_queue_io(struct bio *bp)
178{
179	struct g_gate_softc *sc;
180
181	sc = bp->bio_to->geom->softc;
182	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
183		g_io_deliver(bp, ENXIO);
184		return;
185	}
186
187	mtx_lock(&sc->sc_queue_mtx);
188
189	if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
190		mtx_unlock(&sc->sc_queue_mtx);
191		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
192		g_io_deliver(bp, ENOMEM);
193		return;
194	}
195
196	bp->bio_driver1 = (void *)sc->sc_seq;
197	sc->sc_seq++;
198	sc->sc_queue_count++;
199
200	bioq_insert_tail(&sc->sc_inqueue, bp);
201	wakeup(sc);
202
203	mtx_unlock(&sc->sc_queue_mtx);
204}
205
206static void
207g_gate_done(struct bio *cbp)
208{
209	struct bio *pbp;
210
211	pbp = cbp->bio_parent;
212	if (cbp->bio_error == 0) {
213		pbp->bio_completed = cbp->bio_completed;
214		g_destroy_bio(cbp);
215		pbp->bio_inbed++;
216		g_io_deliver(pbp, 0);
217	} else {
218		/* If direct read failed, pass it through userland daemon. */
219		g_destroy_bio(cbp);
220		pbp->bio_children--;
221		g_gate_queue_io(pbp);
222	}
223}
224
225static void
226g_gate_start(struct bio *pbp)
227{
228	struct g_gate_softc *sc;
229
230	sc = pbp->bio_to->geom->softc;
231	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
232		g_io_deliver(pbp, ENXIO);
233		return;
234	}
235	G_GATE_LOGREQ(2, pbp, "Request received.");
236	switch (pbp->bio_cmd) {
237	case BIO_READ:
238		if (sc->sc_readcons != NULL) {
239			struct bio *cbp;
240
241			cbp = g_clone_bio(pbp);
242			if (cbp == NULL) {
243				g_io_deliver(pbp, ENOMEM);
244				return;
245			}
246			cbp->bio_done = g_gate_done;
247			cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
248			cbp->bio_to = sc->sc_readcons->provider;
249			g_io_request(cbp, sc->sc_readcons);
250			return;
251		}
252		break;
253	case BIO_DELETE:
254	case BIO_WRITE:
255	case BIO_FLUSH:
256		/* XXX: Hack to allow read-only mounts. */
257		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
258			g_io_deliver(pbp, EPERM);
259			return;
260		}
261		break;
262	case BIO_GETATTR:
263	default:
264		G_GATE_LOGREQ(2, pbp, "Ignoring request.");
265		g_io_deliver(pbp, EOPNOTSUPP);
266		return;
267	}
268
269	g_gate_queue_io(pbp);
270}
271
272static struct g_gate_softc *
273g_gate_hold(int unit, const char *name)
274{
275	struct g_gate_softc *sc = NULL;
276
277	mtx_lock(&g_gate_units_lock);
278	if (unit >= 0 && unit < g_gate_maxunits)
279		sc = g_gate_units[unit];
280	else if (unit == G_GATE_NAME_GIVEN) {
281		KASSERT(name != NULL, ("name is NULL"));
282		for (unit = 0; unit < g_gate_maxunits; unit++) {
283			if (g_gate_units[unit] == NULL)
284				continue;
285			if (strcmp(name,
286			    g_gate_units[unit]->sc_provider->name) != 0) {
287				continue;
288			}
289			sc = g_gate_units[unit];
290			break;
291		}
292	}
293	if (sc != NULL)
294		sc->sc_ref++;
295	mtx_unlock(&g_gate_units_lock);
296	return (sc);
297}
298
299static void
300g_gate_release(struct g_gate_softc *sc)
301{
302
303	g_topology_assert_not();
304	mtx_lock(&g_gate_units_lock);
305	sc->sc_ref--;
306	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
307	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
308		wakeup(&sc->sc_ref);
309	mtx_unlock(&g_gate_units_lock);
310}
311
312static int
313g_gate_getunit(int unit, int *errorp)
314{
315
316	mtx_assert(&g_gate_units_lock, MA_OWNED);
317	if (unit >= 0) {
318		if (unit >= g_gate_maxunits)
319			*errorp = EINVAL;
320		else if (g_gate_units[unit] == NULL)
321			return (unit);
322		else
323			*errorp = EEXIST;
324	} else {
325		for (unit = 0; unit < g_gate_maxunits; unit++) {
326			if (g_gate_units[unit] == NULL)
327				return (unit);
328		}
329		*errorp = ENFILE;
330	}
331	return (-1);
332}
333
334static void
335g_gate_guard(void *arg)
336{
337	struct g_gate_softc *sc;
338	struct bintime curtime;
339	struct bio *bp, *bp2;
340
341	sc = arg;
342	binuptime(&curtime);
343	g_gate_hold(sc->sc_unit, NULL);
344	mtx_lock(&sc->sc_queue_mtx);
345	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
346		if (curtime.sec - bp->bio_t0.sec < 5)
347			continue;
348		bioq_remove(&sc->sc_inqueue, bp);
349		sc->sc_queue_count--;
350		G_GATE_LOGREQ(1, bp, "Request timeout.");
351		g_io_deliver(bp, EIO);
352	}
353	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
354		if (curtime.sec - bp->bio_t0.sec < 5)
355			continue;
356		bioq_remove(&sc->sc_outqueue, bp);
357		sc->sc_queue_count--;
358		G_GATE_LOGREQ(1, bp, "Request timeout.");
359		g_io_deliver(bp, EIO);
360	}
361	mtx_unlock(&sc->sc_queue_mtx);
362	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
363		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
364		    g_gate_guard, sc);
365	}
366	g_gate_release(sc);
367}
368
369static void
370g_gate_orphan(struct g_consumer *cp)
371{
372	struct g_gate_softc *sc;
373	struct g_geom *gp;
374
375	g_topology_assert();
376	gp = cp->geom;
377	sc = gp->softc;
378	if (sc == NULL)
379		return;
380	KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
381	    sc->sc_readcons));
382	sc->sc_readcons = NULL;
383	G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
384	    cp->provider->name);
385	(void)g_access(cp, -1, 0, 0);
386	g_detach(cp);
387	g_destroy_consumer(cp);
388}
389
390static void
391g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
392    struct g_consumer *cp, struct g_provider *pp)
393{
394	struct g_gate_softc *sc;
395
396	sc = gp->softc;
397	if (sc == NULL || pp != NULL || cp != NULL)
398		return;
399	sc = g_gate_hold(sc->sc_unit, NULL);
400	if (sc == NULL)
401		return;
402	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
403		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
404	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
405		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
406		    "write-only");
407	} else {
408		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
409		    "read-write");
410	}
411	if (sc->sc_readcons != NULL) {
412		sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
413		    indent, (intmax_t)sc->sc_readoffset);
414		sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
415		    indent, sc->sc_readcons->provider->name);
416	}
417	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
418	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
419	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
420	    sc->sc_queue_count);
421	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
422	    sc->sc_queue_size);
423	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
424	sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
425	g_topology_unlock();
426	g_gate_release(sc);
427	g_topology_lock();
428}
429
430static int
431g_gate_create(struct g_gate_ctl_create *ggio)
432{
433	struct g_gate_softc *sc;
434	struct g_geom *gp;
435	struct g_provider *pp, *ropp;
436	struct g_consumer *cp;
437	char name[NAME_MAX];
438	int error = 0, unit;
439
440	if (ggio->gctl_mediasize <= 0) {
441		G_GATE_DEBUG(1, "Invalid media size.");
442		return (EINVAL);
443	}
444	if (ggio->gctl_sectorsize <= 0) {
445		G_GATE_DEBUG(1, "Invalid sector size.");
446		return (EINVAL);
447	}
448	if (!powerof2(ggio->gctl_sectorsize)) {
449		G_GATE_DEBUG(1, "Invalid sector size.");
450		return (EINVAL);
451	}
452	if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
453		G_GATE_DEBUG(1, "Invalid media size.");
454		return (EINVAL);
455	}
456	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
457	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
458		G_GATE_DEBUG(1, "Invalid flags.");
459		return (EINVAL);
460	}
461	if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
462	    ggio->gctl_unit != G_GATE_NAME_GIVEN &&
463	    ggio->gctl_unit < 0) {
464		G_GATE_DEBUG(1, "Invalid unit number.");
465		return (EINVAL);
466	}
467	if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
468	    ggio->gctl_name[0] == '\0') {
469		G_GATE_DEBUG(1, "No device name.");
470		return (EINVAL);
471	}
472
473	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
474	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
475	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
476	sc->sc_seq = 1;
477	bioq_init(&sc->sc_inqueue);
478	bioq_init(&sc->sc_outqueue);
479	mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
480	sc->sc_queue_count = 0;
481	sc->sc_queue_size = ggio->gctl_maxcount;
482	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
483		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
484	sc->sc_timeout = ggio->gctl_timeout;
485	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
486
487	mtx_lock(&g_gate_units_lock);
488	sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
489	if (sc->sc_unit < 0)
490		goto fail1;
491	if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
492		snprintf(name, sizeof(name), "%s", ggio->gctl_name);
493	else {
494		snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
495		    sc->sc_unit);
496	}
497	/* Check for name collision. */
498	for (unit = 0; unit < g_gate_maxunits; unit++) {
499		if (g_gate_units[unit] == NULL)
500			continue;
501		if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
502			continue;
503		error = EEXIST;
504		goto fail1;
505	}
506	sc->sc_name = name;
507	g_gate_units[sc->sc_unit] = sc;
508	g_gate_nunits++;
509	mtx_unlock(&g_gate_units_lock);
510
511	g_topology_lock();
512
513	if (ggio->gctl_readprov[0] == '\0') {
514		ropp = NULL;
515	} else {
516		ropp = g_provider_by_name(ggio->gctl_readprov);
517		if (ropp == NULL) {
518			G_GATE_DEBUG(1, "Provider %s doesn't exist.",
519			    ggio->gctl_readprov);
520			error = EINVAL;
521			goto fail2;
522		}
523		if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
524			G_GATE_DEBUG(1, "Invalid read offset.");
525			error = EINVAL;
526			goto fail2;
527		}
528		if (ggio->gctl_mediasize + ggio->gctl_readoffset >
529		    ropp->mediasize) {
530			G_GATE_DEBUG(1, "Invalid read offset or media size.");
531			error = EINVAL;
532			goto fail2;
533		}
534	}
535
536	gp = g_new_geomf(&g_gate_class, "%s", name);
537	gp->start = g_gate_start;
538	gp->access = g_gate_access;
539	gp->orphan = g_gate_orphan;
540	gp->dumpconf = g_gate_dumpconf;
541	gp->softc = sc;
542
543	if (ropp != NULL) {
544		cp = g_new_consumer(gp);
545		error = g_attach(cp, ropp);
546		if (error != 0) {
547			G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
548			goto fail3;
549		}
550		error = g_access(cp, 1, 0, 0);
551		if (error != 0) {
552			G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
553			g_detach(cp);
554			goto fail3;
555		}
556		sc->sc_readcons = cp;
557		sc->sc_readoffset = ggio->gctl_readoffset;
558	}
559
560	ggio->gctl_unit = sc->sc_unit;
561
562	pp = g_new_providerf(gp, "%s", name);
563	pp->mediasize = ggio->gctl_mediasize;
564	pp->sectorsize = ggio->gctl_sectorsize;
565	sc->sc_provider = pp;
566	g_error_provider(pp, 0);
567
568	g_topology_unlock();
569	mtx_lock(&g_gate_units_lock);
570	sc->sc_name = sc->sc_provider->name;
571	mtx_unlock(&g_gate_units_lock);
572	G_GATE_DEBUG(1, "Device %s created.", gp->name);
573
574	if (sc->sc_timeout > 0) {
575		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
576		    g_gate_guard, sc);
577	}
578	return (0);
579fail3:
580	g_destroy_consumer(cp);
581	g_destroy_geom(gp);
582fail2:
583	g_topology_unlock();
584	mtx_lock(&g_gate_units_lock);
585	g_gate_units[sc->sc_unit] = NULL;
586	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
587	g_gate_nunits--;
588fail1:
589	mtx_unlock(&g_gate_units_lock);
590	mtx_destroy(&sc->sc_queue_mtx);
591	free(sc, M_GATE);
592	return (error);
593}
594
595static int
596g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
597{
598	struct g_provider *pp;
599	struct g_consumer *cp;
600	int error;
601
602	if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
603		if (ggio->gctl_mediasize <= 0) {
604			G_GATE_DEBUG(1, "Invalid media size.");
605			return (EINVAL);
606		}
607		pp = sc->sc_provider;
608		if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
609			G_GATE_DEBUG(1, "Invalid media size.");
610			return (EINVAL);
611		}
612		/* TODO */
613		return (EOPNOTSUPP);
614	}
615
616	if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
617		(void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
618
619	cp = NULL;
620
621	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
622		g_topology_lock();
623		if (sc->sc_readcons != NULL) {
624			cp = sc->sc_readcons;
625			sc->sc_readcons = NULL;
626			(void)g_access(cp, -1, 0, 0);
627			g_detach(cp);
628			g_destroy_consumer(cp);
629		}
630		if (ggio->gctl_readprov[0] != '\0') {
631			pp = g_provider_by_name(ggio->gctl_readprov);
632			if (pp == NULL) {
633				g_topology_unlock();
634				G_GATE_DEBUG(1, "Provider %s doesn't exist.",
635				    ggio->gctl_readprov);
636				return (EINVAL);
637			}
638			cp = g_new_consumer(sc->sc_provider->geom);
639			error = g_attach(cp, pp);
640			if (error != 0) {
641				G_GATE_DEBUG(1, "Unable to attach to %s.",
642				    pp->name);
643			} else {
644				error = g_access(cp, 1, 0, 0);
645				if (error != 0) {
646					G_GATE_DEBUG(1, "Unable to access %s.",
647					    pp->name);
648					g_detach(cp);
649				}
650			}
651			if (error != 0) {
652				g_destroy_consumer(cp);
653				g_topology_unlock();
654				return (error);
655			}
656		}
657	} else {
658		cp = sc->sc_readcons;
659	}
660
661	if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
662		if (cp == NULL) {
663			G_GATE_DEBUG(1, "No read provider.");
664			return (EINVAL);
665		}
666		pp = sc->sc_provider;
667		if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
668			G_GATE_DEBUG(1, "Invalid read offset.");
669			return (EINVAL);
670		}
671		if (pp->mediasize + ggio->gctl_readoffset >
672		    cp->provider->mediasize) {
673			G_GATE_DEBUG(1, "Invalid read offset or media size.");
674			return (EINVAL);
675		}
676		sc->sc_readoffset = ggio->gctl_readoffset;
677	}
678
679	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
680		sc->sc_readcons = cp;
681		g_topology_unlock();
682	}
683
684	return (0);
685}
686
687#define	G_GATE_CHECK_VERSION(ggio)	do {				\
688	if ((ggio)->gctl_version != G_GATE_VERSION) {			\
689		printf("Version mismatch %d != %d.\n",			\
690		    ggio->gctl_version, G_GATE_VERSION);		\
691		return (EINVAL);					\
692	}								\
693} while (0)
694static int
695g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
696{
697	struct g_gate_softc *sc;
698	struct bio *bp;
699	int error = 0;
700
701	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
702	    flags, td);
703
704	switch (cmd) {
705	case G_GATE_CMD_CREATE:
706	    {
707		struct g_gate_ctl_create *ggio = (void *)addr;
708
709		G_GATE_CHECK_VERSION(ggio);
710		error = g_gate_create(ggio);
711		/*
712		 * Reset TDP_GEOM flag.
713		 * There are pending events for sure, because we just created
714		 * new provider and other classes want to taste it, but we
715		 * cannot answer on I/O requests until we're here.
716		 */
717		td->td_pflags &= ~TDP_GEOM;
718		return (error);
719	    }
720	case G_GATE_CMD_MODIFY:
721	    {
722		struct g_gate_ctl_modify *ggio = (void *)addr;
723
724		G_GATE_CHECK_VERSION(ggio);
725		sc = g_gate_hold(ggio->gctl_unit, NULL);
726		if (sc == NULL)
727			return (ENXIO);
728		error = g_gate_modify(sc, ggio);
729		g_gate_release(sc);
730		return (error);
731	    }
732	case G_GATE_CMD_DESTROY:
733	    {
734		struct g_gate_ctl_destroy *ggio = (void *)addr;
735
736		G_GATE_CHECK_VERSION(ggio);
737		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
738		if (sc == NULL)
739			return (ENXIO);
740		g_topology_lock();
741		mtx_lock(&g_gate_units_lock);
742		error = g_gate_destroy(sc, ggio->gctl_force);
743		g_topology_unlock();
744		if (error != 0)
745			g_gate_release(sc);
746		return (error);
747	    }
748	case G_GATE_CMD_CANCEL:
749	    {
750		struct g_gate_ctl_cancel *ggio = (void *)addr;
751		struct bio *tbp, *lbp;
752
753		G_GATE_CHECK_VERSION(ggio);
754		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
755		if (sc == NULL)
756			return (ENXIO);
757		lbp = NULL;
758		mtx_lock(&sc->sc_queue_mtx);
759		TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
760			if (ggio->gctl_seq == 0 ||
761			    ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
762				G_GATE_LOGREQ(1, bp, "Request canceled.");
763				bioq_remove(&sc->sc_outqueue, bp);
764				/*
765				 * Be sure to put requests back onto incoming
766				 * queue in the proper order.
767				 */
768				if (lbp == NULL)
769					bioq_insert_head(&sc->sc_inqueue, bp);
770				else {
771					TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
772					    lbp, bp, bio_queue);
773				}
774				lbp = bp;
775				/*
776				 * If only one request was canceled, leave now.
777				 */
778				if (ggio->gctl_seq != 0)
779					break;
780			}
781		}
782		if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
783			ggio->gctl_unit = sc->sc_unit;
784		mtx_unlock(&sc->sc_queue_mtx);
785		g_gate_release(sc);
786		return (error);
787	    }
788	case G_GATE_CMD_START:
789	    {
790		struct g_gate_ctl_io *ggio = (void *)addr;
791
792		G_GATE_CHECK_VERSION(ggio);
793		sc = g_gate_hold(ggio->gctl_unit, NULL);
794		if (sc == NULL)
795			return (ENXIO);
796		error = 0;
797		for (;;) {
798			mtx_lock(&sc->sc_queue_mtx);
799			bp = bioq_first(&sc->sc_inqueue);
800			if (bp != NULL)
801				break;
802			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
803				ggio->gctl_error = ECANCELED;
804				mtx_unlock(&sc->sc_queue_mtx);
805				goto start_end;
806			}
807			if (msleep(sc, &sc->sc_queue_mtx,
808			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
809				ggio->gctl_error = ECANCELED;
810				goto start_end;
811			}
812		}
813		ggio->gctl_cmd = bp->bio_cmd;
814		if (bp->bio_cmd == BIO_WRITE &&
815		    bp->bio_length > ggio->gctl_length) {
816			mtx_unlock(&sc->sc_queue_mtx);
817			ggio->gctl_length = bp->bio_length;
818			ggio->gctl_error = ENOMEM;
819			goto start_end;
820		}
821		bioq_remove(&sc->sc_inqueue, bp);
822		bioq_insert_tail(&sc->sc_outqueue, bp);
823		mtx_unlock(&sc->sc_queue_mtx);
824
825		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
826		ggio->gctl_offset = bp->bio_offset;
827		ggio->gctl_length = bp->bio_length;
828
829		switch (bp->bio_cmd) {
830		case BIO_READ:
831		case BIO_DELETE:
832		case BIO_FLUSH:
833			break;
834		case BIO_WRITE:
835			error = copyout(bp->bio_data, ggio->gctl_data,
836			    bp->bio_length);
837			if (error != 0) {
838				mtx_lock(&sc->sc_queue_mtx);
839				bioq_remove(&sc->sc_outqueue, bp);
840				bioq_insert_head(&sc->sc_inqueue, bp);
841				mtx_unlock(&sc->sc_queue_mtx);
842				goto start_end;
843			}
844			break;
845		}
846start_end:
847		g_gate_release(sc);
848		return (error);
849	    }
850	case G_GATE_CMD_DONE:
851	    {
852		struct g_gate_ctl_io *ggio = (void *)addr;
853
854		G_GATE_CHECK_VERSION(ggio);
855		sc = g_gate_hold(ggio->gctl_unit, NULL);
856		if (sc == NULL)
857			return (ENOENT);
858		error = 0;
859		mtx_lock(&sc->sc_queue_mtx);
860		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
861			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
862				break;
863		}
864		if (bp != NULL) {
865			bioq_remove(&sc->sc_outqueue, bp);
866			sc->sc_queue_count--;
867		}
868		mtx_unlock(&sc->sc_queue_mtx);
869		if (bp == NULL) {
870			/*
871			 * Request was probably canceled.
872			 */
873			goto done_end;
874		}
875		if (ggio->gctl_error == EAGAIN) {
876			bp->bio_error = 0;
877			G_GATE_LOGREQ(1, bp, "Request desisted.");
878			mtx_lock(&sc->sc_queue_mtx);
879			sc->sc_queue_count++;
880			bioq_insert_head(&sc->sc_inqueue, bp);
881			wakeup(sc);
882			mtx_unlock(&sc->sc_queue_mtx);
883		} else {
884			bp->bio_error = ggio->gctl_error;
885			if (bp->bio_error == 0) {
886				bp->bio_completed = bp->bio_length;
887				switch (bp->bio_cmd) {
888				case BIO_READ:
889					error = copyin(ggio->gctl_data,
890					    bp->bio_data, bp->bio_length);
891					if (error != 0)
892						bp->bio_error = error;
893					break;
894				case BIO_DELETE:
895				case BIO_WRITE:
896				case BIO_FLUSH:
897					break;
898				}
899			}
900			G_GATE_LOGREQ(2, bp, "Request done.");
901			g_io_deliver(bp, bp->bio_error);
902		}
903done_end:
904		g_gate_release(sc);
905		return (error);
906	    }
907	}
908	return (ENOIOCTL);
909}
910
911static void
912g_gate_device(void)
913{
914
915	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
916	    G_GATE_CTL_NAME);
917}
918
919static int
920g_gate_modevent(module_t mod, int type, void *data)
921{
922	int error = 0;
923
924	switch (type) {
925	case MOD_LOAD:
926		mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
927		g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
928		    M_GATE, M_WAITOK | M_ZERO);
929		g_gate_nunits = 0;
930		g_gate_device();
931		break;
932	case MOD_UNLOAD:
933		mtx_lock(&g_gate_units_lock);
934		if (g_gate_nunits > 0) {
935			mtx_unlock(&g_gate_units_lock);
936			error = EBUSY;
937			break;
938		}
939		mtx_unlock(&g_gate_units_lock);
940		mtx_destroy(&g_gate_units_lock);
941		if (status_dev != 0)
942			destroy_dev(status_dev);
943		free(g_gate_units, M_GATE);
944		break;
945	default:
946		return (EOPNOTSUPP);
947		break;
948	}
949
950	return (error);
951}
952static moduledata_t g_gate_module = {
953	G_GATE_MOD_NAME,
954	g_gate_modevent,
955	NULL
956};
957DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
958DECLARE_GEOM_CLASS(g_gate_class, g_gate);
959