g_gate.c revision 141972
1128760Spjd/*- 2128760Spjd * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3128760Spjd * All rights reserved. 4128760Spjd * 5128760Spjd * Redistribution and use in source and binary forms, with or without 6128760Spjd * modification, are permitted provided that the following conditions 7128760Spjd * are met: 8128760Spjd * 1. Redistributions of source code must retain the above copyright 9128760Spjd * notice, this list of conditions and the following disclaimer. 10128760Spjd * 2. Redistributions in binary form must reproduce the above copyright 11128760Spjd * notice, this list of conditions and the following disclaimer in the 12128760Spjd * documentation and/or other materials provided with the distribution. 13128760Spjd * 14128760Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15128760Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16128760Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17128760Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18128760Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19128760Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20128760Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21128760Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22128760Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23128760Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24128760Spjd * SUCH DAMAGE. 25128760Spjd * 26128760Spjd * $FreeBSD: head/sys/geom/gate/g_gate.c 141972 2005-02-16 16:12:28Z pjd $ 27128760Spjd */ 28128760Spjd 29128760Spjd#include <sys/param.h> 30128760Spjd#include <sys/systm.h> 31128760Spjd#include <sys/bio.h> 32128760Spjd#include <sys/conf.h> 33128760Spjd#include <sys/kernel.h> 34128760Spjd#include <sys/kthread.h> 35128760Spjd#include <sys/fcntl.h> 36128760Spjd#include <sys/linker.h> 37128760Spjd#include <sys/lock.h> 38128760Spjd#include <sys/malloc.h> 39128760Spjd#include <sys/mutex.h> 40128760Spjd#include <sys/proc.h> 41128760Spjd#include <sys/limits.h> 42128760Spjd#include <sys/queue.h> 43128760Spjd#include <sys/sysctl.h> 44128760Spjd#include <sys/signalvar.h> 45128760Spjd#include <sys/time.h> 46128760Spjd#include <machine/atomic.h> 47128760Spjd 48128760Spjd#include <geom/geom.h> 49128760Spjd#include <geom/gate/g_gate.h> 50128760Spjd 51128760Spjdstatic MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data"); 52128760Spjd 53128760SpjdSYSCTL_DECL(_kern_geom); 54128760SpjdSYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff"); 55128889Spjdstatic u_int g_gate_debug = 0; 56128760SpjdSYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0, 57128760Spjd "Debug level"); 58128760Spjd 59128760Spjdstatic int g_gate_destroy_geom(struct gctl_req *, struct g_class *, 60128760Spjd struct g_geom *); 61128760Spjdstruct g_class g_gate_class = { 62128760Spjd .name = G_GATE_CLASS_NAME, 63133318Sphk .version = G_VERSION, 64128760Spjd .destroy_geom = g_gate_destroy_geom 65128760Spjd}; 66128760Spjd 67130585Sphkstatic struct cdev *status_dev; 68128760Spjdstatic d_ioctl_t g_gate_ioctl; 69128760Spjdstatic struct cdevsw g_gate_cdevsw = { 70128760Spjd .d_version = D_VERSION, 71128760Spjd .d_ioctl = g_gate_ioctl, 72128760Spjd .d_name = G_GATE_CTL_NAME 73128760Spjd}; 74128760Spjd 75128760Spjd 76128760Spjdstatic LIST_HEAD(, g_gate_softc) g_gate_list = 77128760Spjd LIST_HEAD_INITIALIZER(&g_gate_list); 78128760Spjdstatic struct mtx g_gate_list_mtx; 79128760Spjd 80128760Spjd 81128760Spjdstatic void 82128760Spjdg_gate_wither(struct g_gate_softc *sc) 83128760Spjd{ 84128760Spjd 85128835Spjd atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY); 86128760Spjd} 87128760Spjd 88128760Spjdstatic int 89128760Spjdg_gate_destroy(struct g_gate_softc *sc, boolean_t force) 90128760Spjd{ 91128760Spjd struct g_provider *pp; 92128760Spjd struct bio *bp; 93128760Spjd 94128760Spjd g_topology_assert(); 95128760Spjd mtx_assert(&g_gate_list_mtx, MA_OWNED); 96128760Spjd pp = sc->sc_provider; 97128760Spjd if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 98128760Spjd mtx_unlock(&g_gate_list_mtx); 99128760Spjd return (EBUSY); 100128760Spjd } 101128760Spjd if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 102128760Spjd g_gate_wither(sc); 103128760Spjd LIST_REMOVE(sc, sc_next); 104128760Spjd } 105128760Spjd mtx_unlock(&g_gate_list_mtx); 106141561Spjd mtx_lock(&sc->sc_queue_mtx); 107128760Spjd wakeup(sc); 108141561Spjd mtx_unlock(&sc->sc_queue_mtx); 109128760Spjd if (sc->sc_ref > 0) { 110128760Spjd G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name); 111128760Spjd return (0); 112128760Spjd } 113128760Spjd callout_drain(&sc->sc_callout); 114141561Spjd mtx_lock(&sc->sc_queue_mtx); 115128760Spjd for (;;) { 116128760Spjd bp = bioq_first(&sc->sc_inqueue); 117128760Spjd if (bp != NULL) { 118128760Spjd bioq_remove(&sc->sc_inqueue, bp); 119141561Spjd sc->sc_queue_count--; 120128760Spjd G_GATE_LOGREQ(1, bp, "Request canceled."); 121128760Spjd g_io_deliver(bp, ENXIO); 122128760Spjd } else { 123128760Spjd break; 124128760Spjd } 125128760Spjd } 126128760Spjd for (;;) { 127128760Spjd bp = bioq_first(&sc->sc_outqueue); 128128760Spjd if (bp != NULL) { 129128760Spjd bioq_remove(&sc->sc_outqueue, bp); 130141561Spjd sc->sc_queue_count--; 131128760Spjd G_GATE_LOGREQ(1, bp, "Request canceled."); 132128760Spjd g_io_deliver(bp, ENXIO); 133128760Spjd } else { 134128760Spjd break; 135128760Spjd } 136128760Spjd } 137141561Spjd mtx_destroy(&sc->sc_queue_mtx); 138128760Spjd G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name); 139128760Spjd pp->geom->softc = NULL; 140128760Spjd g_wither_geom(pp->geom, ENXIO); 141128760Spjd sc->sc_provider = NULL; 142128760Spjd free(sc, M_GATE); 143128760Spjd return (0); 144128760Spjd} 145128760Spjd 146128760Spjdstatic void 147128760Spjdg_gate_destroy_it(void *arg, int flag __unused) 148128760Spjd{ 149128760Spjd struct g_gate_softc *sc; 150128760Spjd 151128760Spjd g_topology_assert(); 152128760Spjd sc = arg; 153128760Spjd mtx_lock(&g_gate_list_mtx); 154128760Spjd g_gate_destroy(sc, 1); 155128760Spjd} 156128760Spjd 157128760Spjdstatic int 158128760Spjdg_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) 159128760Spjd{ 160128760Spjd 161128760Spjd g_topology_assert(); 162128760Spjd mtx_lock(&g_gate_list_mtx); 163128760Spjd return (g_gate_destroy(gp->softc, 0)); 164128760Spjd} 165128760Spjd 166128760Spjdstatic int 167128760Spjdg_gate_access(struct g_provider *pp, int dr, int dw, int de) 168128760Spjd{ 169128760Spjd struct g_gate_softc *sc; 170128760Spjd 171128760Spjd if (dr <= 0 && dw <= 0 && de <= 0) 172128760Spjd return (0); 173128760Spjd sc = pp->geom->softc; 174128760Spjd if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 175128760Spjd return (ENXIO); 176131188Spjd /* XXX: Hack to allow read-only mounts. */ 177131188Spjd#if 0 178128760Spjd if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0) 179128760Spjd return (EPERM); 180131188Spjd#endif 181128760Spjd if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0) 182128760Spjd return (EPERM); 183128760Spjd return (0); 184128760Spjd} 185128760Spjd 186128760Spjdstatic void 187128760Spjdg_gate_start(struct bio *bp) 188128760Spjd{ 189128760Spjd struct g_gate_softc *sc; 190128760Spjd 191128760Spjd sc = bp->bio_to->geom->softc; 192128760Spjd if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 193128760Spjd g_io_deliver(bp, ENXIO); 194128760Spjd return; 195128760Spjd } 196128760Spjd G_GATE_LOGREQ(2, bp, "Request received."); 197128760Spjd switch (bp->bio_cmd) { 198128760Spjd case BIO_READ: 199131188Spjd break; 200128760Spjd case BIO_DELETE: 201128760Spjd case BIO_WRITE: 202131188Spjd /* XXX: Hack to allow read-only mounts. */ 203131188Spjd if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 204131188Spjd g_io_deliver(bp, EPERM); 205131188Spjd return; 206131188Spjd } 207128760Spjd break; 208128760Spjd case BIO_GETATTR: 209128760Spjd default: 210128760Spjd G_GATE_LOGREQ(2, bp, "Ignoring request."); 211128760Spjd g_io_deliver(bp, EOPNOTSUPP); 212128760Spjd return; 213128760Spjd } 214128760Spjd 215141561Spjd mtx_lock(&sc->sc_queue_mtx); 216141561Spjd if (sc->sc_queue_count > sc->sc_queue_size) { 217141742Spjd mtx_unlock(&sc->sc_queue_mtx); 218128760Spjd G_GATE_LOGREQ(1, bp, "Queue full, request canceled."); 219128760Spjd g_io_deliver(bp, EIO); 220128760Spjd return; 221128760Spjd } 222141561Spjd 223128760Spjd bp->bio_driver1 = (void *)sc->sc_seq; 224128760Spjd sc->sc_seq++; 225141561Spjd sc->sc_queue_count++; 226128760Spjd 227141312Spjd bioq_insert_tail(&sc->sc_inqueue, bp); 228128957Spjd wakeup(sc); 229141561Spjd 230141561Spjd mtx_unlock(&sc->sc_queue_mtx); 231128760Spjd} 232128760Spjd 233128760Spjdstatic struct g_gate_softc * 234128760Spjdg_gate_find(u_int unit) 235128760Spjd{ 236128760Spjd struct g_gate_softc *sc; 237128760Spjd 238128760Spjd mtx_assert(&g_gate_list_mtx, MA_OWNED); 239128760Spjd LIST_FOREACH(sc, &g_gate_list, sc_next) { 240128760Spjd if (sc->sc_unit == unit) 241128760Spjd break; 242128760Spjd } 243128760Spjd return (sc); 244128760Spjd} 245128760Spjd 246128760Spjdstatic struct g_gate_softc * 247128760Spjdg_gate_hold(u_int unit) 248128760Spjd{ 249128760Spjd struct g_gate_softc *sc; 250128760Spjd 251128760Spjd mtx_lock(&g_gate_list_mtx); 252128760Spjd sc = g_gate_find(unit); 253128760Spjd if (sc != NULL) { 254128760Spjd if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 255128760Spjd sc = NULL; 256128760Spjd else 257128760Spjd sc->sc_ref++; 258128760Spjd } 259128760Spjd mtx_unlock(&g_gate_list_mtx); 260128760Spjd return (sc); 261128760Spjd} 262128760Spjd 263128760Spjdstatic void 264128760Spjdg_gate_release(struct g_gate_softc *sc) 265128760Spjd{ 266128760Spjd 267128760Spjd g_topology_assert_not(); 268128760Spjd mtx_lock(&g_gate_list_mtx); 269128760Spjd sc->sc_ref--; 270128760Spjd KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name)); 271128760Spjd if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 272128760Spjd mtx_unlock(&g_gate_list_mtx); 273128760Spjd g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL); 274128760Spjd } else { 275128760Spjd mtx_unlock(&g_gate_list_mtx); 276128760Spjd } 277128760Spjd} 278128760Spjd 279128760Spjdstatic int 280128760Spjdg_gate_getunit(int unit) 281128760Spjd{ 282128760Spjd struct g_gate_softc *sc; 283128760Spjd 284128760Spjd mtx_assert(&g_gate_list_mtx, MA_OWNED); 285128760Spjd if (unit >= 0) { 286128760Spjd LIST_FOREACH(sc, &g_gate_list, sc_next) { 287128760Spjd if (sc->sc_unit == unit) 288128760Spjd return (-1); 289128760Spjd } 290128760Spjd } else { 291128760Spjd unit = 0; 292128760Spjdonce_again: 293128760Spjd LIST_FOREACH(sc, &g_gate_list, sc_next) { 294128760Spjd if (sc->sc_unit == unit) { 295128760Spjd if (++unit > 666) 296128760Spjd return (-1); 297128760Spjd goto once_again; 298128760Spjd } 299128760Spjd } 300128760Spjd } 301128760Spjd return (unit); 302128760Spjd} 303128760Spjd 304128760Spjdstatic void 305128760Spjdg_gate_guard(void *arg) 306128760Spjd{ 307128760Spjd struct g_gate_softc *sc; 308128760Spjd struct bintime curtime; 309128760Spjd struct bio *bp, *bp2; 310128760Spjd 311128760Spjd sc = arg; 312128760Spjd binuptime(&curtime); 313128760Spjd g_gate_hold(sc->sc_unit); 314141561Spjd mtx_lock(&sc->sc_queue_mtx); 315128760Spjd TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) { 316128760Spjd if (curtime.sec - bp->bio_t0.sec < 5) 317128760Spjd continue; 318128760Spjd bioq_remove(&sc->sc_inqueue, bp); 319141561Spjd sc->sc_queue_count--; 320128760Spjd G_GATE_LOGREQ(1, bp, "Request timeout."); 321128760Spjd g_io_deliver(bp, EIO); 322128760Spjd } 323128760Spjd TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) { 324128760Spjd if (curtime.sec - bp->bio_t0.sec < 5) 325128760Spjd continue; 326128760Spjd bioq_remove(&sc->sc_outqueue, bp); 327141561Spjd sc->sc_queue_count--; 328128760Spjd G_GATE_LOGREQ(1, bp, "Request timeout."); 329128760Spjd g_io_deliver(bp, EIO); 330128760Spjd } 331141561Spjd mtx_unlock(&sc->sc_queue_mtx); 332128760Spjd if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 333128760Spjd callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 334128760Spjd g_gate_guard, sc); 335128760Spjd } 336128760Spjd g_gate_release(sc); 337128760Spjd} 338128760Spjd 339128760Spjdstatic void 340128760Spjdg_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 341128760Spjd struct g_consumer *cp, struct g_provider *pp) 342128760Spjd{ 343128760Spjd struct g_gate_softc *sc; 344128760Spjd 345128760Spjd sc = gp->softc; 346128760Spjd if (sc == NULL || pp != NULL || cp != NULL) 347128760Spjd return; 348128760Spjd g_gate_hold(sc->sc_unit); 349128760Spjd if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 350128760Spjd sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only"); 351128760Spjd } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) { 352128760Spjd sbuf_printf(sb, "%s<access>%s</access>\n", indent, 353128760Spjd "write-only"); 354128760Spjd } else { 355128760Spjd sbuf_printf(sb, "%s<access>%s</access>\n", indent, 356128760Spjd "read-write"); 357128760Spjd } 358128760Spjd sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout); 359128760Spjd sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info); 360128760Spjd sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent, 361128760Spjd sc->sc_queue_count); 362128760Spjd sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent, 363128760Spjd sc->sc_queue_size); 364128760Spjd sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref); 365130836Spjd g_topology_unlock(); 366128760Spjd g_gate_release(sc); 367130836Spjd g_topology_lock(); 368128760Spjd} 369128760Spjd 370128760Spjdstatic int 371128760Spjdg_gate_create(struct g_gate_ctl_create *ggio) 372128760Spjd{ 373128760Spjd struct g_gate_softc *sc; 374128760Spjd struct g_geom *gp; 375128760Spjd struct g_provider *pp; 376128760Spjd 377128760Spjd if (ggio->gctl_mediasize == 0) { 378128760Spjd G_GATE_DEBUG(1, "Invalid media size."); 379128760Spjd return (EINVAL); 380128760Spjd } 381128760Spjd if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) { 382128760Spjd G_GATE_DEBUG(1, "Invalid sector size."); 383128760Spjd return (EINVAL); 384128760Spjd } 385141312Spjd if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) { 386141312Spjd G_GATE_DEBUG(1, "Invalid media size."); 387141312Spjd return (EINVAL); 388141312Spjd } 389128760Spjd if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 && 390128760Spjd (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) { 391128760Spjd G_GATE_DEBUG(1, "Invalid flags."); 392128760Spjd return (EINVAL); 393128760Spjd } 394128760Spjd if (ggio->gctl_unit < -1) { 395128760Spjd G_GATE_DEBUG(1, "Invalid unit number."); 396128760Spjd return (EINVAL); 397128760Spjd } 398128760Spjd 399128760Spjd sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO); 400128760Spjd sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS); 401128760Spjd strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info)); 402128760Spjd sc->sc_seq = 0; 403128760Spjd bioq_init(&sc->sc_inqueue); 404128760Spjd bioq_init(&sc->sc_outqueue); 405141561Spjd mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF); 406128760Spjd sc->sc_queue_count = 0; 407128760Spjd sc->sc_queue_size = ggio->gctl_maxcount; 408128760Spjd if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE) 409128760Spjd sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE; 410128760Spjd sc->sc_timeout = ggio->gctl_timeout; 411128881Spjd callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 412128760Spjd mtx_lock(&g_gate_list_mtx); 413128881Spjd ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit); 414128881Spjd if (ggio->gctl_unit == -1) { 415136056Spjd mtx_unlock(&g_gate_list_mtx); 416141561Spjd mtx_destroy(&sc->sc_queue_mtx); 417128881Spjd free(sc, M_GATE); 418128881Spjd return (EBUSY); 419128881Spjd } 420128760Spjd sc->sc_unit = ggio->gctl_unit; 421128760Spjd LIST_INSERT_HEAD(&g_gate_list, sc, sc_next); 422128760Spjd mtx_unlock(&g_gate_list_mtx); 423128760Spjd 424128760Spjd g_topology_lock(); 425128760Spjd gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME, 426128760Spjd sc->sc_unit); 427128760Spjd gp->start = g_gate_start; 428128760Spjd gp->access = g_gate_access; 429128760Spjd gp->dumpconf = g_gate_dumpconf; 430128760Spjd gp->softc = sc; 431128760Spjd pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit); 432128760Spjd pp->mediasize = ggio->gctl_mediasize; 433128760Spjd pp->sectorsize = ggio->gctl_sectorsize; 434128760Spjd sc->sc_provider = pp; 435128760Spjd g_error_provider(pp, 0); 436128760Spjd g_topology_unlock(); 437128760Spjd 438128760Spjd if (sc->sc_timeout > 0) { 439128760Spjd callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 440128760Spjd g_gate_guard, sc); 441128760Spjd } 442128760Spjd return (0); 443128760Spjd} 444128760Spjd 445128760Spjd#define G_GATE_CHECK_VERSION(ggio) do { \ 446128760Spjd if ((ggio)->gctl_version != G_GATE_VERSION) \ 447128760Spjd return (EINVAL); \ 448128760Spjd} while (0) 449128760Spjdstatic int 450130585Sphkg_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 451128760Spjd{ 452128760Spjd struct g_gate_softc *sc; 453128760Spjd struct bio *bp; 454128760Spjd int error = 0; 455128760Spjd 456128760Spjd G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr, 457128760Spjd flags, td); 458128760Spjd 459128760Spjd switch (cmd) { 460128760Spjd case G_GATE_CMD_CREATE: 461128760Spjd { 462128760Spjd struct g_gate_ctl_create *ggio = (void *)addr; 463128760Spjd 464128760Spjd G_GATE_CHECK_VERSION(ggio); 465138014Spjd error = g_gate_create(ggio); 466141972Spjd /* 467141972Spjd * Reset TDP_GEOM flag. 468141972Spjd * There are pending events for sure, because we just created 469141972Spjd * new provider and other classes want to taste it, but we 470141972Spjd * cannot answer on I/O requests until we're here. 471141972Spjd */ 472141972Spjd td->td_pflags &= ~TDP_GEOM; 473138014Spjd return (error); 474128760Spjd } 475128760Spjd case G_GATE_CMD_DESTROY: 476128760Spjd { 477128760Spjd struct g_gate_ctl_destroy *ggio = (void *)addr; 478128760Spjd 479128760Spjd G_GATE_CHECK_VERSION(ggio); 480128760Spjd sc = g_gate_hold(ggio->gctl_unit); 481128760Spjd if (sc == NULL) 482128760Spjd return (ENXIO); 483128760Spjd g_topology_lock(); 484128760Spjd mtx_lock(&g_gate_list_mtx); 485128760Spjd error = g_gate_destroy(sc, ggio->gctl_force); 486128760Spjd if (error == 0) 487128760Spjd g_gate_wither(sc); 488128760Spjd g_topology_unlock(); 489128760Spjd g_gate_release(sc); 490128760Spjd return (error); 491128760Spjd } 492128760Spjd case G_GATE_CMD_START: 493128760Spjd { 494128760Spjd struct g_gate_ctl_io *ggio = (void *)addr; 495128760Spjd 496128760Spjd G_GATE_CHECK_VERSION(ggio); 497141561Spjd sc = g_gate_find(ggio->gctl_unit); 498128760Spjd if (sc == NULL) 499128760Spjd return (ENXIO); 500128760Spjd for (;;) { 501141561Spjd mtx_lock(&sc->sc_queue_mtx); 502128760Spjd bp = bioq_first(&sc->sc_inqueue); 503128760Spjd if (bp != NULL) 504128760Spjd break; 505141561Spjd if (msleep(sc, &sc->sc_queue_mtx, 506128760Spjd PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) { 507128760Spjd ggio->gctl_error = ECANCELED; 508128760Spjd return (0); 509128760Spjd } 510128760Spjd if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 511128760Spjd ggio->gctl_error = ECANCELED; 512128760Spjd return (0); 513128760Spjd } 514128760Spjd } 515128881Spjd ggio->gctl_cmd = bp->bio_cmd; 516128760Spjd if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) && 517128760Spjd bp->bio_length > ggio->gctl_length) { 518141561Spjd mtx_unlock(&sc->sc_queue_mtx); 519128760Spjd ggio->gctl_length = bp->bio_length; 520128760Spjd ggio->gctl_error = ENOMEM; 521128760Spjd return (0); 522128760Spjd } 523128760Spjd bioq_remove(&sc->sc_inqueue, bp); 524141561Spjd bioq_insert_tail(&sc->sc_outqueue, bp); 525141561Spjd mtx_unlock(&sc->sc_queue_mtx); 526141561Spjd 527128835Spjd ggio->gctl_seq = (uintptr_t)bp->bio_driver1; 528128760Spjd ggio->gctl_offset = bp->bio_offset; 529128760Spjd ggio->gctl_length = bp->bio_length; 530128760Spjd switch (bp->bio_cmd) { 531128760Spjd case BIO_READ: 532128760Spjd break; 533128760Spjd case BIO_DELETE: 534128760Spjd case BIO_WRITE: 535128760Spjd error = copyout(bp->bio_data, ggio->gctl_data, 536128760Spjd bp->bio_length); 537128760Spjd if (error != 0) { 538141561Spjd mtx_lock(&sc->sc_queue_mtx); 539141561Spjd bioq_remove(&sc->sc_outqueue, bp); 540141312Spjd bioq_insert_head(&sc->sc_inqueue, bp); 541141561Spjd mtx_unlock(&sc->sc_queue_mtx); 542128760Spjd return (error); 543128760Spjd } 544128760Spjd break; 545128760Spjd } 546128760Spjd return (0); 547128760Spjd } 548128760Spjd case G_GATE_CMD_DONE: 549128760Spjd { 550128760Spjd struct g_gate_ctl_io *ggio = (void *)addr; 551128760Spjd 552128760Spjd G_GATE_CHECK_VERSION(ggio); 553141561Spjd sc = g_gate_find(ggio->gctl_unit); 554128760Spjd if (sc == NULL) 555128760Spjd return (ENOENT); 556141561Spjd mtx_lock(&sc->sc_queue_mtx); 557128760Spjd TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) { 558128835Spjd if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1) 559128760Spjd break; 560128760Spjd } 561128760Spjd if (bp != NULL) { 562128760Spjd bioq_remove(&sc->sc_outqueue, bp); 563141561Spjd sc->sc_queue_count--; 564128760Spjd } 565141561Spjd mtx_unlock(&sc->sc_queue_mtx); 566128760Spjd if (bp == NULL) { 567128760Spjd /* 568128760Spjd * Request was probably canceled. 569128760Spjd */ 570128760Spjd return (0); 571128760Spjd } 572128760Spjd if (ggio->gctl_error == EAGAIN) { 573128760Spjd bp->bio_error = 0; 574128760Spjd G_GATE_LOGREQ(1, bp, "Request desisted."); 575141561Spjd mtx_lock(&sc->sc_queue_mtx); 576141561Spjd sc->sc_queue_count++; 577141312Spjd bioq_insert_head(&sc->sc_inqueue, bp); 578128957Spjd wakeup(sc); 579141561Spjd mtx_unlock(&sc->sc_queue_mtx); 580128760Spjd } else { 581128760Spjd bp->bio_error = ggio->gctl_error; 582128760Spjd if (bp->bio_error == 0) { 583128760Spjd bp->bio_completed = bp->bio_length; 584128760Spjd switch (bp->bio_cmd) { 585128760Spjd case BIO_READ: 586128760Spjd error = copyin(ggio->gctl_data, 587128760Spjd bp->bio_data, bp->bio_length); 588128760Spjd if (error != 0) 589128760Spjd bp->bio_error = error; 590128760Spjd break; 591128760Spjd case BIO_DELETE: 592128760Spjd case BIO_WRITE: 593128760Spjd break; 594128760Spjd } 595128760Spjd } 596128760Spjd G_GATE_LOGREQ(2, bp, "Request done."); 597128760Spjd g_io_deliver(bp, bp->bio_error); 598128760Spjd } 599128760Spjd return (error); 600128760Spjd } 601128760Spjd } 602128760Spjd return (ENOIOCTL); 603128760Spjd} 604128760Spjd 605128760Spjdstatic void 606131411Spjdg_gate_device(void) 607128760Spjd{ 608128760Spjd 609128760Spjd status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600, 610128760Spjd G_GATE_CTL_NAME); 611128760Spjd} 612128760Spjd 613128760Spjdstatic int 614128760Spjdg_gate_modevent(module_t mod, int type, void *data) 615128760Spjd{ 616128760Spjd int error = 0; 617128760Spjd 618128760Spjd switch (type) { 619128760Spjd case MOD_LOAD: 620128760Spjd mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF); 621131411Spjd g_gate_device(); 622128760Spjd break; 623128760Spjd case MOD_UNLOAD: 624128760Spjd mtx_lock(&g_gate_list_mtx); 625128760Spjd if (!LIST_EMPTY(&g_gate_list)) { 626128760Spjd mtx_unlock(&g_gate_list_mtx); 627128760Spjd error = EBUSY; 628128760Spjd break; 629128760Spjd } 630128760Spjd mtx_unlock(&g_gate_list_mtx); 631128760Spjd mtx_destroy(&g_gate_list_mtx); 632128760Spjd if (status_dev != 0) 633128760Spjd destroy_dev(status_dev); 634128760Spjd break; 635128760Spjd default: 636132199Sphk return (EOPNOTSUPP); 637128760Spjd break; 638128760Spjd } 639128760Spjd 640128760Spjd return (error); 641128760Spjd} 642128760Spjdstatic moduledata_t g_gate_module = { 643128760Spjd G_GATE_MOD_NAME, 644128760Spjd g_gate_modevent, 645128760Spjd NULL 646128760Spjd}; 647128760SpjdDECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 648128760SpjdDECLARE_GEOM_CLASS(g_gate_class, g_gate); 649