Deleted Added
sdiff udiff text old ( 162149 ) new ( 201145 )
full compact
1/*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/gate/g_gate.c 201145 2009-12-28 22:56:30Z antoine $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bio.h>
33#include <sys/conf.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/fcntl.h>
37#include <sys/linker.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/limits.h>
43#include <sys/queue.h>
44#include <sys/sysctl.h>
45#include <sys/signalvar.h>
46#include <sys/time.h>
47#include <machine/atomic.h>
48
49#include <geom/geom.h>
50#include <geom/gate/g_gate.h>
51
52static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
53
54SYSCTL_DECL(_kern_geom);
55SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
56static u_int g_gate_debug = 0;
57SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
58 "Debug level");
59
60struct g_class g_gate_class = {
61 .name = G_GATE_CLASS_NAME,
62 .version = G_VERSION,
63};
64
65static struct cdev *status_dev;
66static d_ioctl_t g_gate_ioctl;
67static struct cdevsw g_gate_cdevsw = {
68 .d_version = D_VERSION,
69 .d_ioctl = g_gate_ioctl,
70 .d_name = G_GATE_CTL_NAME
71};
72
73
74static LIST_HEAD(, g_gate_softc) g_gate_list =
75 LIST_HEAD_INITIALIZER(g_gate_list);
76static struct mtx g_gate_list_mtx;
77
78
79static int
80g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
81{
82 struct g_provider *pp;
83 struct g_geom *gp;
84 struct bio *bp;
85
86 g_topology_assert();
87 mtx_assert(&g_gate_list_mtx, MA_OWNED);
88 pp = sc->sc_provider;
89 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
90 mtx_unlock(&g_gate_list_mtx);
91 return (EBUSY);
92 }
93 mtx_unlock(&g_gate_list_mtx);
94 mtx_lock(&sc->sc_queue_mtx);
95 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
96 sc->sc_flags |= G_GATE_FLAG_DESTROY;
97 wakeup(sc);
98 mtx_unlock(&sc->sc_queue_mtx);
99 gp = pp->geom;
100 pp->flags |= G_PF_WITHER;
101 g_orphan_provider(pp, ENXIO);
102 callout_drain(&sc->sc_callout);
103 mtx_lock(&sc->sc_queue_mtx);
104 for (;;) {
105 bp = bioq_first(&sc->sc_inqueue);
106 if (bp != NULL) {
107 bioq_remove(&sc->sc_inqueue, bp);
108 sc->sc_queue_count--;
109 G_GATE_LOGREQ(1, bp, "Request canceled.");
110 g_io_deliver(bp, ENXIO);
111 } else {
112 break;
113 }
114 }
115 for (;;) {
116 bp = bioq_first(&sc->sc_outqueue);
117 if (bp != NULL) {
118 bioq_remove(&sc->sc_outqueue, bp);
119 sc->sc_queue_count--;
120 G_GATE_LOGREQ(1, bp, "Request canceled.");
121 g_io_deliver(bp, ENXIO);
122 } else {
123 break;
124 }
125 }
126 mtx_unlock(&sc->sc_queue_mtx);
127 g_topology_unlock();
128 mtx_lock(&g_gate_list_mtx);
129 /* One reference is ours. */
130 sc->sc_ref--;
131 while (sc->sc_ref > 0) {
132 msleep(&sc->sc_ref, &g_gate_list_mtx, 0, "gg:destroy", 0);
133 }
134 LIST_REMOVE(sc, sc_next);
135 mtx_unlock(&g_gate_list_mtx);
136 mtx_destroy(&sc->sc_queue_mtx);
137 g_topology_lock();
138 G_GATE_DEBUG(0, "Device %s destroyed.", gp->name);
139 gp->softc = NULL;
140 g_wither_geom(gp, ENXIO);
141 sc->sc_provider = NULL;
142 free(sc, M_GATE);
143 return (0);
144}
145
146static int
147g_gate_access(struct g_provider *pp, int dr, int dw, int de)
148{
149 struct g_gate_softc *sc;
150
151 if (dr <= 0 && dw <= 0 && de <= 0)
152 return (0);
153 sc = pp->geom->softc;
154 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
155 return (ENXIO);
156 /* XXX: Hack to allow read-only mounts. */
157#if 0
158 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
159 return (EPERM);
160#endif
161 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
162 return (EPERM);
163 return (0);
164}
165
166static void
167g_gate_start(struct bio *bp)
168{
169 struct g_gate_softc *sc;
170
171 sc = bp->bio_to->geom->softc;
172 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
173 g_io_deliver(bp, ENXIO);
174 return;
175 }
176 G_GATE_LOGREQ(2, bp, "Request received.");
177 switch (bp->bio_cmd) {
178 case BIO_READ:
179 break;
180 case BIO_DELETE:
181 case BIO_WRITE:
182 /* XXX: Hack to allow read-only mounts. */
183 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
184 g_io_deliver(bp, EPERM);
185 return;
186 }
187 break;
188 case BIO_GETATTR:
189 default:
190 G_GATE_LOGREQ(2, bp, "Ignoring request.");
191 g_io_deliver(bp, EOPNOTSUPP);
192 return;
193 }
194
195 mtx_lock(&sc->sc_queue_mtx);
196 if (sc->sc_queue_count > sc->sc_queue_size) {
197 mtx_unlock(&sc->sc_queue_mtx);
198 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
199 g_io_deliver(bp, EIO);
200 return;
201 }
202
203 bp->bio_driver1 = (void *)sc->sc_seq;
204 sc->sc_seq++;
205 sc->sc_queue_count++;
206
207 bioq_insert_tail(&sc->sc_inqueue, bp);
208 wakeup(sc);
209
210 mtx_unlock(&sc->sc_queue_mtx);
211}
212
213static struct g_gate_softc *
214g_gate_hold(u_int unit)
215{
216 struct g_gate_softc *sc;
217
218 mtx_lock(&g_gate_list_mtx);
219 LIST_FOREACH(sc, &g_gate_list, sc_next) {
220 if (sc->sc_unit == unit)
221 break;
222 }
223 if (sc != NULL)
224 sc->sc_ref++;
225 mtx_unlock(&g_gate_list_mtx);
226 return (sc);
227}
228
229static void
230g_gate_release(struct g_gate_softc *sc)
231{
232
233 g_topology_assert_not();
234 mtx_lock(&g_gate_list_mtx);
235 sc->sc_ref--;
236 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
237 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
238 wakeup(&sc->sc_ref);
239 mtx_unlock(&g_gate_list_mtx);
240 } else {
241 mtx_unlock(&g_gate_list_mtx);
242 }
243}
244
245static int
246g_gate_getunit(int unit)
247{
248 struct g_gate_softc *sc;
249
250 mtx_assert(&g_gate_list_mtx, MA_OWNED);
251 if (unit >= 0) {
252 LIST_FOREACH(sc, &g_gate_list, sc_next) {
253 if (sc->sc_unit == unit)
254 return (-1);
255 }
256 } else {
257 unit = 0;
258once_again:
259 LIST_FOREACH(sc, &g_gate_list, sc_next) {
260 if (sc->sc_unit == unit) {
261 if (++unit > 666)
262 return (-1);
263 goto once_again;
264 }
265 }
266 }
267 return (unit);
268}
269
270static void
271g_gate_guard(void *arg)
272{
273 struct g_gate_softc *sc;
274 struct bintime curtime;
275 struct bio *bp, *bp2;
276
277 sc = arg;
278 binuptime(&curtime);
279 g_gate_hold(sc->sc_unit);
280 mtx_lock(&sc->sc_queue_mtx);
281 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
282 if (curtime.sec - bp->bio_t0.sec < 5)
283 continue;
284 bioq_remove(&sc->sc_inqueue, bp);
285 sc->sc_queue_count--;
286 G_GATE_LOGREQ(1, bp, "Request timeout.");
287 g_io_deliver(bp, EIO);
288 }
289 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
290 if (curtime.sec - bp->bio_t0.sec < 5)
291 continue;
292 bioq_remove(&sc->sc_outqueue, bp);
293 sc->sc_queue_count--;
294 G_GATE_LOGREQ(1, bp, "Request timeout.");
295 g_io_deliver(bp, EIO);
296 }
297 mtx_unlock(&sc->sc_queue_mtx);
298 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
299 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
300 g_gate_guard, sc);
301 }
302 g_gate_release(sc);
303}
304
305static void
306g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
307 struct g_consumer *cp, struct g_provider *pp)
308{
309 struct g_gate_softc *sc;
310
311 sc = gp->softc;
312 if (sc == NULL || pp != NULL || cp != NULL)
313 return;
314 g_gate_hold(sc->sc_unit);
315 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
316 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
317 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
318 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
319 "write-only");
320 } else {
321 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
322 "read-write");
323 }
324 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
325 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
326 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
327 sc->sc_queue_count);
328 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
329 sc->sc_queue_size);
330 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
331 g_topology_unlock();
332 g_gate_release(sc);
333 g_topology_lock();
334}
335
336static int
337g_gate_create(struct g_gate_ctl_create *ggio)
338{
339 struct g_gate_softc *sc;
340 struct g_geom *gp;
341 struct g_provider *pp;
342
343 if (ggio->gctl_mediasize == 0) {
344 G_GATE_DEBUG(1, "Invalid media size.");
345 return (EINVAL);
346 }
347 if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
348 G_GATE_DEBUG(1, "Invalid sector size.");
349 return (EINVAL);
350 }
351 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
352 G_GATE_DEBUG(1, "Invalid media size.");
353 return (EINVAL);
354 }
355 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
356 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
357 G_GATE_DEBUG(1, "Invalid flags.");
358 return (EINVAL);
359 }
360 if (ggio->gctl_unit < -1) {
361 G_GATE_DEBUG(1, "Invalid unit number.");
362 return (EINVAL);
363 }
364
365 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
366 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
367 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
368 sc->sc_seq = 0;
369 bioq_init(&sc->sc_inqueue);
370 bioq_init(&sc->sc_outqueue);
371 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
372 sc->sc_queue_count = 0;
373 sc->sc_queue_size = ggio->gctl_maxcount;
374 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
375 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
376 sc->sc_timeout = ggio->gctl_timeout;
377 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
378 mtx_lock(&g_gate_list_mtx);
379 ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit);
380 if (ggio->gctl_unit == -1) {
381 mtx_unlock(&g_gate_list_mtx);
382 mtx_destroy(&sc->sc_queue_mtx);
383 free(sc, M_GATE);
384 return (EBUSY);
385 }
386 sc->sc_unit = ggio->gctl_unit;
387 LIST_INSERT_HEAD(&g_gate_list, sc, sc_next);
388 mtx_unlock(&g_gate_list_mtx);
389
390 g_topology_lock();
391 gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME,
392 sc->sc_unit);
393 gp->start = g_gate_start;
394 gp->access = g_gate_access;
395 gp->dumpconf = g_gate_dumpconf;
396 gp->softc = sc;
397 pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit);
398 pp->mediasize = ggio->gctl_mediasize;
399 pp->sectorsize = ggio->gctl_sectorsize;
400 sc->sc_provider = pp;
401 g_error_provider(pp, 0);
402 g_topology_unlock();
403
404 if (sc->sc_timeout > 0) {
405 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
406 g_gate_guard, sc);
407 }
408 return (0);
409}
410
411#define G_GATE_CHECK_VERSION(ggio) do { \
412 if ((ggio)->gctl_version != G_GATE_VERSION) { \
413 printf("Version mismatch %d != %d.\n", \
414 ggio->gctl_version, G_GATE_VERSION); \
415 return (EINVAL); \
416 } \
417} while (0)
418static int
419g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
420{
421 struct g_gate_softc *sc;
422 struct bio *bp;
423 int error = 0;
424
425 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
426 flags, td);
427
428 switch (cmd) {
429 case G_GATE_CMD_CREATE:
430 {
431 struct g_gate_ctl_create *ggio = (void *)addr;
432
433 G_GATE_CHECK_VERSION(ggio);
434 error = g_gate_create(ggio);
435 /*
436 * Reset TDP_GEOM flag.
437 * There are pending events for sure, because we just created
438 * new provider and other classes want to taste it, but we
439 * cannot answer on I/O requests until we're here.
440 */
441 td->td_pflags &= ~TDP_GEOM;
442 return (error);
443 }
444 case G_GATE_CMD_DESTROY:
445 {
446 struct g_gate_ctl_destroy *ggio = (void *)addr;
447
448 G_GATE_CHECK_VERSION(ggio);
449 sc = g_gate_hold(ggio->gctl_unit);
450 if (sc == NULL)
451 return (ENXIO);
452 g_topology_lock();
453 mtx_lock(&g_gate_list_mtx);
454 error = g_gate_destroy(sc, ggio->gctl_force);
455 g_topology_unlock();
456 if (error != 0)
457 g_gate_release(sc);
458 return (error);
459 }
460 case G_GATE_CMD_CANCEL:
461 {
462 struct g_gate_ctl_cancel *ggio = (void *)addr;
463 struct bio *tbp, *lbp;
464
465 G_GATE_CHECK_VERSION(ggio);
466 sc = g_gate_hold(ggio->gctl_unit);
467 if (sc == NULL)
468 return (ENXIO);
469 lbp = NULL;
470 mtx_lock(&sc->sc_queue_mtx);
471 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
472 if (ggio->gctl_seq == 0 ||
473 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
474 G_GATE_LOGREQ(1, bp, "Request canceled.");
475 bioq_remove(&sc->sc_outqueue, bp);
476 /*
477 * Be sure to put requests back onto incoming
478 * queue in the proper order.
479 */
480 if (lbp == NULL)
481 bioq_insert_head(&sc->sc_inqueue, bp);
482 else {
483 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
484 lbp, bp, bio_queue);
485 }
486 lbp = bp;
487 /*
488 * If only one request was canceled, leave now.
489 */
490 if (ggio->gctl_seq != 0)
491 break;
492 }
493 }
494 mtx_unlock(&sc->sc_queue_mtx);
495 g_gate_release(sc);
496 return (error);
497 }
498 case G_GATE_CMD_START:
499 {
500 struct g_gate_ctl_io *ggio = (void *)addr;
501
502 G_GATE_CHECK_VERSION(ggio);
503 sc = g_gate_hold(ggio->gctl_unit);
504 if (sc == NULL)
505 return (ENXIO);
506 error = 0;
507 for (;;) {
508 mtx_lock(&sc->sc_queue_mtx);
509 bp = bioq_first(&sc->sc_inqueue);
510 if (bp != NULL)
511 break;
512 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
513 ggio->gctl_error = ECANCELED;
514 mtx_unlock(&sc->sc_queue_mtx);
515 goto start_end;
516 }
517 if (msleep(sc, &sc->sc_queue_mtx,
518 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
519 ggio->gctl_error = ECANCELED;
520 goto start_end;
521 }
522 }
523 ggio->gctl_cmd = bp->bio_cmd;
524 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
525 bp->bio_length > ggio->gctl_length) {
526 mtx_unlock(&sc->sc_queue_mtx);
527 ggio->gctl_length = bp->bio_length;
528 ggio->gctl_error = ENOMEM;
529 goto start_end;
530 }
531 bioq_remove(&sc->sc_inqueue, bp);
532 bioq_insert_tail(&sc->sc_outqueue, bp);
533 mtx_unlock(&sc->sc_queue_mtx);
534
535 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
536 ggio->gctl_offset = bp->bio_offset;
537 ggio->gctl_length = bp->bio_length;
538
539 switch (bp->bio_cmd) {
540 case BIO_READ:
541 break;
542 case BIO_DELETE:
543 case BIO_WRITE:
544 error = copyout(bp->bio_data, ggio->gctl_data,
545 bp->bio_length);
546 if (error != 0) {
547 mtx_lock(&sc->sc_queue_mtx);
548 bioq_remove(&sc->sc_outqueue, bp);
549 bioq_insert_head(&sc->sc_inqueue, bp);
550 mtx_unlock(&sc->sc_queue_mtx);
551 goto start_end;
552 }
553 break;
554 }
555start_end:
556 g_gate_release(sc);
557 return (error);
558 }
559 case G_GATE_CMD_DONE:
560 {
561 struct g_gate_ctl_io *ggio = (void *)addr;
562
563 G_GATE_CHECK_VERSION(ggio);
564 sc = g_gate_hold(ggio->gctl_unit);
565 if (sc == NULL)
566 return (ENOENT);
567 error = 0;
568 mtx_lock(&sc->sc_queue_mtx);
569 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
570 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
571 break;
572 }
573 if (bp != NULL) {
574 bioq_remove(&sc->sc_outqueue, bp);
575 sc->sc_queue_count--;
576 }
577 mtx_unlock(&sc->sc_queue_mtx);
578 if (bp == NULL) {
579 /*
580 * Request was probably canceled.
581 */
582 goto done_end;
583 }
584 if (ggio->gctl_error == EAGAIN) {
585 bp->bio_error = 0;
586 G_GATE_LOGREQ(1, bp, "Request desisted.");
587 mtx_lock(&sc->sc_queue_mtx);
588 sc->sc_queue_count++;
589 bioq_insert_head(&sc->sc_inqueue, bp);
590 wakeup(sc);
591 mtx_unlock(&sc->sc_queue_mtx);
592 } else {
593 bp->bio_error = ggio->gctl_error;
594 if (bp->bio_error == 0) {
595 bp->bio_completed = bp->bio_length;
596 switch (bp->bio_cmd) {
597 case BIO_READ:
598 error = copyin(ggio->gctl_data,
599 bp->bio_data, bp->bio_length);
600 if (error != 0)
601 bp->bio_error = error;
602 break;
603 case BIO_DELETE:
604 case BIO_WRITE:
605 break;
606 }
607 }
608 G_GATE_LOGREQ(2, bp, "Request done.");
609 g_io_deliver(bp, bp->bio_error);
610 }
611done_end:
612 g_gate_release(sc);
613 return (error);
614 }
615 }
616 return (ENOIOCTL);
617}
618
619static void
620g_gate_device(void)
621{
622
623 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
624 G_GATE_CTL_NAME);
625}
626
627static int
628g_gate_modevent(module_t mod, int type, void *data)
629{
630 int error = 0;
631
632 switch (type) {
633 case MOD_LOAD:
634 mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF);
635 g_gate_device();
636 break;
637 case MOD_UNLOAD:
638 mtx_lock(&g_gate_list_mtx);
639 if (!LIST_EMPTY(&g_gate_list)) {
640 mtx_unlock(&g_gate_list_mtx);
641 error = EBUSY;
642 break;
643 }
644 mtx_unlock(&g_gate_list_mtx);
645 mtx_destroy(&g_gate_list_mtx);
646 if (status_dev != 0)
647 destroy_dev(status_dev);
648 break;
649 default:
650 return (EOPNOTSUPP);
651 break;
652 }
653
654 return (error);
655}
656static moduledata_t g_gate_module = {
657 G_GATE_MOD_NAME,
658 g_gate_modevent,
659 NULL
660};
661DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
662DECLARE_GEOM_CLASS(g_gate_class, g_gate);