ugen.c revision 1.136
1/* $NetBSD: ugen.c,v 1.136 2017/10/25 08:12:39 maya Exp $ */ 2 3/* 4 * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology. 10 * 11 * Copyright (c) 2006 BBN Technologies Corp. All rights reserved. 12 * Effort sponsored in part by the Defense Advanced Research Projects 13 * Agency (DARPA) and the Department of the Interior National Business 14 * Center under agreement number NBCHC050166. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39#include <sys/cdefs.h> 40__KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.136 2017/10/25 08:12:39 maya Exp $"); 41 42#ifdef _KERNEL_OPT 43#include "opt_compat_netbsd.h" 44#include "opt_usb.h" 45#endif 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/kmem.h> 51#include <sys/device.h> 52#include <sys/ioctl.h> 53#include <sys/conf.h> 54#include <sys/tty.h> 55#include <sys/file.h> 56#include <sys/select.h> 57#include <sys/proc.h> 58#include <sys/vnode.h> 59#include <sys/poll.h> 60 61#include <dev/usb/usb.h> 62#include <dev/usb/usbdi.h> 63#include <dev/usb/usbdi_util.h> 64 65#ifdef UGEN_DEBUG 66#define DPRINTF(x) if (ugendebug) printf x 67#define DPRINTFN(n,x) if (ugendebug>(n)) printf x 68int ugendebug = 0; 69#else 70#define DPRINTF(x) 71#define DPRINTFN(n,x) 72#endif 73 74#define UGEN_CHUNK 128 /* chunk size for read */ 75#define UGEN_IBSIZE 1020 /* buffer size */ 76#define UGEN_BBSIZE 1024 77 78#define UGEN_NISOREQS 4 /* number of outstanding xfer requests */ 79#define UGEN_NISORFRMS 8 /* number of transactions per req */ 80#define UGEN_NISOFRAMES (UGEN_NISORFRMS * UGEN_NISOREQS) 81 82#define UGEN_BULK_RA_WB_BUFSIZE 16384 /* default buffer size */ 83#define UGEN_BULK_RA_WB_BUFMAX (1 << 20) /* maximum allowed buffer */ 84 85struct isoreq { 86 struct ugen_endpoint *sce; 87 struct usbd_xfer *xfer; 88 void *dmabuf; 89 uint16_t sizes[UGEN_NISORFRMS]; 90}; 91 92struct ugen_endpoint { 93 struct ugen_softc *sc; 94 usb_endpoint_descriptor_t *edesc; 95 struct usbd_interface *iface; 96 int state; 97#define UGEN_ASLP 0x02 /* waiting for data */ 98#define UGEN_SHORT_OK 0x04 /* short xfers are OK */ 99#define UGEN_BULK_RA 0x08 /* in bulk read-ahead mode */ 100#define UGEN_BULK_WB 0x10 /* in bulk write-behind mode */ 101#define UGEN_RA_WB_STOP 0x20 /* RA/WB xfer is stopped (buffer full/empty) */ 102 struct usbd_pipe *pipeh; 103 struct clist q; 104 u_char *ibuf; /* start of buffer (circular for isoc) */ 105 u_char *fill; /* location for input (isoc) */ 106 u_char *limit; /* end of circular buffer (isoc) */ 107 u_char *cur; /* current read location (isoc) */ 108 uint32_t timeout; 109 uint32_t ra_wb_bufsize; /* requested size for RA/WB buffer */ 110 uint32_t ra_wb_reqsize; /* requested xfer length for RA/WB */ 111 uint32_t ra_wb_used; /* how much is in buffer */ 112 uint32_t ra_wb_xferlen; /* current xfer length for RA/WB */ 113 struct usbd_xfer *ra_wb_xfer; 114 struct isoreq isoreqs[UGEN_NISOREQS]; 115 /* Keep these last; we don't overwrite them in ugen_set_config() */ 116#define UGEN_ENDPOINT_NONZERO_CRUFT offsetof(struct ugen_endpoint, rsel) 117 struct selinfo rsel; 118 kcondvar_t cv; 119}; 120 121struct ugen_softc { 122 device_t sc_dev; /* base device */ 123 struct usbd_device *sc_udev; 124 125 kmutex_t sc_lock; 126 kcondvar_t sc_detach_cv; 127 128 char sc_is_open[USB_MAX_ENDPOINTS]; 129 struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2]; 130#define OUT 0 131#define IN 1 132 133 int sc_refcnt; 134 char sc_buffer[UGEN_BBSIZE]; 135 u_char sc_dying; 136}; 137 138dev_type_open(ugenopen); 139dev_type_close(ugenclose); 140dev_type_read(ugenread); 141dev_type_write(ugenwrite); 142dev_type_ioctl(ugenioctl); 143dev_type_poll(ugenpoll); 144dev_type_kqfilter(ugenkqfilter); 145 146const struct cdevsw ugen_cdevsw = { 147 .d_open = ugenopen, 148 .d_close = ugenclose, 149 .d_read = ugenread, 150 .d_write = ugenwrite, 151 .d_ioctl = ugenioctl, 152 .d_stop = nostop, 153 .d_tty = notty, 154 .d_poll = ugenpoll, 155 .d_mmap = nommap, 156 .d_kqfilter = ugenkqfilter, 157 .d_discard = nodiscard, 158 .d_flag = D_OTHER, 159}; 160 161Static void ugenintr(struct usbd_xfer *, void *, 162 usbd_status); 163Static void ugen_isoc_rintr(struct usbd_xfer *, void *, 164 usbd_status); 165Static void ugen_bulkra_intr(struct usbd_xfer *, void *, 166 usbd_status); 167Static void ugen_bulkwb_intr(struct usbd_xfer *, void *, 168 usbd_status); 169Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int); 170Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int); 171Static int ugen_do_ioctl(struct ugen_softc *, int, u_long, 172 void *, int, struct lwp *); 173Static int ugen_set_config(struct ugen_softc *, int); 174Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *, 175 int, int *); 176Static usbd_status ugen_set_interface(struct ugen_softc *, int, int); 177Static int ugen_get_alt_index(struct ugen_softc *, int); 178Static void ugen_clear_endpoints(struct ugen_softc *); 179 180#define UGENUNIT(n) ((minor(n) >> 4) & 0xf) 181#define UGENENDPOINT(n) (minor(n) & 0xf) 182#define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e))) 183 184int ugen_match(device_t, cfdata_t, void *); 185void ugen_attach(device_t, device_t, void *); 186int ugen_detach(device_t, int); 187int ugen_activate(device_t, enum devact); 188extern struct cfdriver ugen_cd; 189CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, 190 ugen_detach, ugen_activate); 191 192/* toggle to control attach priority. -1 means "let autoconf decide" */ 193int ugen_override = -1; 194 195int 196ugen_match(device_t parent, cfdata_t match, void *aux) 197{ 198 struct usb_attach_arg *uaa = aux; 199 int override; 200 201 if (ugen_override != -1) 202 override = ugen_override; 203 else 204 override = match->cf_flags & 1; 205 206 if (override) 207 return UMATCH_HIGHEST; 208 else if (uaa->uaa_usegeneric) 209 return UMATCH_GENERIC; 210 else 211 return UMATCH_NONE; 212} 213 214void 215ugen_attach(device_t parent, device_t self, void *aux) 216{ 217 struct ugen_softc *sc = device_private(self); 218 struct usb_attach_arg *uaa = aux; 219 struct usbd_device *udev; 220 char *devinfop; 221 usbd_status err; 222 int i, dir, conf; 223 224 aprint_naive("\n"); 225 aprint_normal("\n"); 226 227 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 228 cv_init(&sc->sc_detach_cv, "ugendet"); 229 230 devinfop = usbd_devinfo_alloc(uaa->uaa_device, 0); 231 aprint_normal_dev(self, "%s\n", devinfop); 232 usbd_devinfo_free(devinfop); 233 234 sc->sc_dev = self; 235 sc->sc_udev = udev = uaa->uaa_device; 236 237 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 238 for (dir = OUT; dir <= IN; dir++) { 239 struct ugen_endpoint *sce; 240 241 sce = &sc->sc_endpoints[i][dir]; 242 selinit(&sce->rsel); 243 cv_init(&sce->cv, "ugensce"); 244 } 245 } 246 247 /* First set configuration index 0, the default one for ugen. */ 248 err = usbd_set_config_index(udev, 0, 0); 249 if (err) { 250 aprint_error_dev(self, 251 "setting configuration index 0 failed\n"); 252 sc->sc_dying = 1; 253 return; 254 } 255 conf = usbd_get_config_descriptor(udev)->bConfigurationValue; 256 257 /* Set up all the local state for this configuration. */ 258 err = ugen_set_config(sc, conf); 259 if (err) { 260 aprint_error_dev(self, "setting configuration %d failed\n", 261 conf); 262 sc->sc_dying = 1; 263 return; 264 } 265 266 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); 267 268 if (!pmf_device_register(self, NULL, NULL)) 269 aprint_error_dev(self, "couldn't establish power handler\n"); 270 271 return; 272} 273 274Static void 275ugen_clear_endpoints(struct ugen_softc *sc) 276{ 277 278 /* Clear out the old info, but leave the selinfo and cv initialised. */ 279 for (int i = 0; i < USB_MAX_ENDPOINTS; i++) { 280 for (int dir = OUT; dir <= IN; dir++) { 281 struct ugen_endpoint *sce = &sc->sc_endpoints[i][dir]; 282 memset(sce, 0, UGEN_ENDPOINT_NONZERO_CRUFT); 283 } 284 } 285} 286 287Static int 288ugen_set_config(struct ugen_softc *sc, int configno) 289{ 290 struct usbd_device *dev = sc->sc_udev; 291 usb_config_descriptor_t *cdesc; 292 struct usbd_interface *iface; 293 usb_endpoint_descriptor_t *ed; 294 struct ugen_endpoint *sce; 295 uint8_t niface, nendpt; 296 int ifaceno, endptno, endpt; 297 usbd_status err; 298 int dir; 299 300 DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n", 301 device_xname(sc->sc_dev), configno, sc)); 302 303 /* 304 * We start at 1, not 0, because we don't care whether the 305 * control endpoint is open or not. It is always present. 306 */ 307 for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++) 308 if (sc->sc_is_open[endptno]) { 309 DPRINTFN(1, 310 ("ugen_set_config: %s - endpoint %d is open\n", 311 device_xname(sc->sc_dev), endptno)); 312 return USBD_IN_USE; 313 } 314 315 /* Avoid setting the current value. */ 316 cdesc = usbd_get_config_descriptor(dev); 317 if (!cdesc || cdesc->bConfigurationValue != configno) { 318 err = usbd_set_config_no(dev, configno, 1); 319 if (err) 320 return err; 321 } 322 323 err = usbd_interface_count(dev, &niface); 324 if (err) 325 return err; 326 327 ugen_clear_endpoints(sc); 328 329 for (ifaceno = 0; ifaceno < niface; ifaceno++) { 330 DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno)); 331 err = usbd_device2interface_handle(dev, ifaceno, &iface); 332 if (err) 333 return err; 334 err = usbd_endpoint_count(iface, &nendpt); 335 if (err) 336 return err; 337 for (endptno = 0; endptno < nendpt; endptno++) { 338 ed = usbd_interface2endpoint_descriptor(iface,endptno); 339 KASSERT(ed != NULL); 340 endpt = ed->bEndpointAddress; 341 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 342 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 343 DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x" 344 "(%d,%d), sce=%p\n", 345 endptno, endpt, UE_GET_ADDR(endpt), 346 UE_GET_DIR(endpt), sce)); 347 sce->sc = sc; 348 sce->edesc = ed; 349 sce->iface = iface; 350 } 351 } 352 return USBD_NORMAL_COMPLETION; 353} 354 355int 356ugenopen(dev_t dev, int flag, int mode, struct lwp *l) 357{ 358 struct ugen_softc *sc; 359 int unit = UGENUNIT(dev); 360 int endpt = UGENENDPOINT(dev); 361 usb_endpoint_descriptor_t *edesc; 362 struct ugen_endpoint *sce; 363 int dir, isize; 364 usbd_status err; 365 struct usbd_xfer *xfer; 366 int i, j; 367 368 sc = device_lookup_private(&ugen_cd, unit); 369 if (sc == NULL || sc->sc_dying) 370 return ENXIO; 371 372 DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", 373 flag, mode, unit, endpt)); 374 375 /* The control endpoint allows multiple opens. */ 376 if (endpt == USB_CONTROL_ENDPOINT) { 377 sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; 378 return 0; 379 } 380 381 if (sc->sc_is_open[endpt]) 382 return EBUSY; 383 384 /* Make sure there are pipes for all directions. */ 385 for (dir = OUT; dir <= IN; dir++) { 386 if (flag & (dir == OUT ? FWRITE : FREAD)) { 387 sce = &sc->sc_endpoints[endpt][dir]; 388 if (sce->edesc == NULL) 389 return ENXIO; 390 } 391 } 392 393 /* Actually open the pipes. */ 394 /* XXX Should back out properly if it fails. */ 395 for (dir = OUT; dir <= IN; dir++) { 396 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 397 continue; 398 sce = &sc->sc_endpoints[endpt][dir]; 399 sce->state = 0; 400 sce->timeout = USBD_NO_TIMEOUT; 401 DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", 402 sc, endpt, dir, sce)); 403 edesc = sce->edesc; 404 switch (edesc->bmAttributes & UE_XFERTYPE) { 405 case UE_INTERRUPT: 406 if (dir == OUT) { 407 err = usbd_open_pipe(sce->iface, 408 edesc->bEndpointAddress, 0, &sce->pipeh); 409 if (err) 410 return EIO; 411 break; 412 } 413 isize = UGETW(edesc->wMaxPacketSize); 414 if (isize == 0) /* shouldn't happen */ 415 return EINVAL; 416 sce->ibuf = kmem_alloc(isize, KM_SLEEP); 417 DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", 418 endpt, isize)); 419 if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1) { 420 kmem_free(sce->ibuf, isize); 421 sce->ibuf = NULL; 422 return ENOMEM; 423 } 424 err = usbd_open_pipe_intr(sce->iface, 425 edesc->bEndpointAddress, 426 USBD_SHORT_XFER_OK, &sce->pipeh, sce, 427 sce->ibuf, isize, ugenintr, 428 USBD_DEFAULT_INTERVAL); 429 if (err) { 430 clfree(&sce->q); 431 kmem_free(sce->ibuf, isize); 432 sce->ibuf = NULL; 433 return EIO; 434 } 435 DPRINTFN(5, ("ugenopen: interrupt open done\n")); 436 break; 437 case UE_BULK: 438 err = usbd_open_pipe(sce->iface, 439 edesc->bEndpointAddress, 0, &sce->pipeh); 440 if (err) 441 return EIO; 442 sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE; 443 /* 444 * Use request size for non-RA/WB transfers 445 * as the default. 446 */ 447 sce->ra_wb_reqsize = UGEN_BBSIZE; 448 break; 449 case UE_ISOCHRONOUS: 450 if (dir == OUT) 451 return EINVAL; 452 isize = UGETW(edesc->wMaxPacketSize); 453 if (isize == 0) /* shouldn't happen */ 454 return EINVAL; 455 sce->ibuf = kmem_alloc(isize * UGEN_NISOFRAMES, 456 KM_SLEEP); 457 sce->cur = sce->fill = sce->ibuf; 458 sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; 459 DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", 460 endpt, isize)); 461 err = usbd_open_pipe(sce->iface, 462 edesc->bEndpointAddress, 0, &sce->pipeh); 463 if (err) { 464 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES); 465 sce->ibuf = NULL; 466 return EIO; 467 } 468 for (i = 0; i < UGEN_NISOREQS; ++i) { 469 sce->isoreqs[i].sce = sce; 470 err = usbd_create_xfer(sce->pipeh, 471 isize * UGEN_NISORFRMS, 0, UGEN_NISORFRMS, 472 &xfer); 473 if (err) 474 goto bad; 475 sce->isoreqs[i].xfer = xfer; 476 sce->isoreqs[i].dmabuf = usbd_get_buffer(xfer); 477 for (j = 0; j < UGEN_NISORFRMS; ++j) 478 sce->isoreqs[i].sizes[j] = isize; 479 usbd_setup_isoc_xfer(xfer, &sce->isoreqs[i], 480 sce->isoreqs[i].sizes, UGEN_NISORFRMS, 0, 481 ugen_isoc_rintr); 482 (void)usbd_transfer(xfer); 483 } 484 DPRINTFN(5, ("ugenopen: isoc open done\n")); 485 break; 486 bad: 487 while (--i >= 0) /* implicit buffer free */ 488 usbd_destroy_xfer(sce->isoreqs[i].xfer); 489 usbd_close_pipe(sce->pipeh); 490 sce->pipeh = NULL; 491 kmem_free(sce->ibuf, isize * UGEN_NISOFRAMES); 492 sce->ibuf = NULL; 493 return ENOMEM; 494 case UE_CONTROL: 495 sce->timeout = USBD_DEFAULT_TIMEOUT; 496 return EINVAL; 497 } 498 } 499 sc->sc_is_open[endpt] = 1; 500 return 0; 501} 502 503int 504ugenclose(dev_t dev, int flag, int mode, struct lwp *l) 505{ 506 int endpt = UGENENDPOINT(dev); 507 struct ugen_softc *sc; 508 struct ugen_endpoint *sce; 509 int dir; 510 int i; 511 512 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 513 if (sc == NULL || sc->sc_dying) 514 return ENXIO; 515 516 DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n", 517 flag, mode, UGENUNIT(dev), endpt)); 518 519#ifdef DIAGNOSTIC 520 if (!sc->sc_is_open[endpt]) { 521 printf("ugenclose: not open\n"); 522 return EINVAL; 523 } 524#endif 525 526 if (endpt == USB_CONTROL_ENDPOINT) { 527 DPRINTFN(5, ("ugenclose: close control\n")); 528 sc->sc_is_open[endpt] = 0; 529 return 0; 530 } 531 532 for (dir = OUT; dir <= IN; dir++) { 533 if (!(flag & (dir == OUT ? FWRITE : FREAD))) 534 continue; 535 sce = &sc->sc_endpoints[endpt][dir]; 536 if (sce->pipeh == NULL) 537 continue; 538 DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n", 539 endpt, dir, sce)); 540 541 usbd_abort_pipe(sce->pipeh); 542 543 int isize = UGETW(sce->edesc->wMaxPacketSize); 544 int msize = 0; 545 546 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 547 case UE_INTERRUPT: 548 ndflush(&sce->q, sce->q.c_cc); 549 clfree(&sce->q); 550 msize = isize; 551 break; 552 case UE_ISOCHRONOUS: 553 for (i = 0; i < UGEN_NISOREQS; ++i) 554 usbd_destroy_xfer(sce->isoreqs[i].xfer); 555 msize = isize * UGEN_NISOFRAMES; 556 break; 557 case UE_BULK: 558 if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB)) { 559 usbd_destroy_xfer(sce->ra_wb_xfer); 560 msize = sce->ra_wb_bufsize; 561 } 562 break; 563 default: 564 break; 565 } 566 usbd_close_pipe(sce->pipeh); 567 sce->pipeh = NULL; 568 if (sce->ibuf != NULL) { 569 kmem_free(sce->ibuf, msize); 570 sce->ibuf = NULL; 571 } 572 } 573 sc->sc_is_open[endpt] = 0; 574 575 return 0; 576} 577 578Static int 579ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) 580{ 581 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; 582 uint32_t n, tn; 583 struct usbd_xfer *xfer; 584 usbd_status err; 585 int error = 0; 586 587 DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt)); 588 589 if (endpt == USB_CONTROL_ENDPOINT) 590 return ENODEV; 591 592#ifdef DIAGNOSTIC 593 if (sce->edesc == NULL) { 594 printf("ugenread: no edesc\n"); 595 return EIO; 596 } 597 if (sce->pipeh == NULL) { 598 printf("ugenread: no pipe\n"); 599 return EIO; 600 } 601#endif 602 603 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 604 case UE_INTERRUPT: 605 /* Block until activity occurred. */ 606 mutex_enter(&sc->sc_lock); 607 while (sce->q.c_cc == 0) { 608 if (flag & IO_NDELAY) { 609 mutex_exit(&sc->sc_lock); 610 return EWOULDBLOCK; 611 } 612 sce->state |= UGEN_ASLP; 613 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 614 /* "ugenri" */ 615 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock, 616 mstohz(sce->timeout)); 617 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 618 if (sc->sc_dying) 619 error = EIO; 620 if (error) { 621 sce->state &= ~UGEN_ASLP; 622 break; 623 } 624 } 625 mutex_exit(&sc->sc_lock); 626 627 /* Transfer as many chunks as possible. */ 628 while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { 629 n = min(sce->q.c_cc, uio->uio_resid); 630 if (n > sizeof(sc->sc_buffer)) 631 n = sizeof(sc->sc_buffer); 632 633 /* Remove a small chunk from the input queue. */ 634 q_to_b(&sce->q, sc->sc_buffer, n); 635 DPRINTFN(5, ("ugenread: got %d chars\n", n)); 636 637 /* Copy the data to the user process. */ 638 error = uiomove(sc->sc_buffer, n, uio); 639 if (error) 640 break; 641 } 642 break; 643 case UE_BULK: 644 if (sce->state & UGEN_BULK_RA) { 645 DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n", 646 uio->uio_resid, sce->ra_wb_used)); 647 xfer = sce->ra_wb_xfer; 648 649 mutex_enter(&sc->sc_lock); 650 if (sce->ra_wb_used == 0 && flag & IO_NDELAY) { 651 mutex_exit(&sc->sc_lock); 652 return EWOULDBLOCK; 653 } 654 while (uio->uio_resid > 0 && !error) { 655 while (sce->ra_wb_used == 0) { 656 sce->state |= UGEN_ASLP; 657 DPRINTFN(5, 658 ("ugenread: sleep on %p\n", 659 sce)); 660 /* "ugenrb" */ 661 error = cv_timedwait_sig(&sce->cv, 662 &sc->sc_lock, mstohz(sce->timeout)); 663 DPRINTFN(5, 664 ("ugenread: woke, error=%d\n", 665 error)); 666 if (sc->sc_dying) 667 error = EIO; 668 if (error) { 669 sce->state &= ~UGEN_ASLP; 670 break; 671 } 672 } 673 674 /* Copy data to the process. */ 675 while (uio->uio_resid > 0 676 && sce->ra_wb_used > 0) { 677 n = min(uio->uio_resid, 678 sce->ra_wb_used); 679 n = min(n, sce->limit - sce->cur); 680 error = uiomove(sce->cur, n, uio); 681 if (error) 682 break; 683 sce->cur += n; 684 sce->ra_wb_used -= n; 685 if (sce->cur == sce->limit) 686 sce->cur = sce->ibuf; 687 } 688 689 /* 690 * If the transfers stopped because the 691 * buffer was full, restart them. 692 */ 693 if (sce->state & UGEN_RA_WB_STOP && 694 sce->ra_wb_used < sce->limit - sce->ibuf) { 695 n = (sce->limit - sce->ibuf) 696 - sce->ra_wb_used; 697 usbd_setup_xfer(xfer, sce, NULL, 698 min(n, sce->ra_wb_xferlen), 699 0, USBD_NO_TIMEOUT, 700 ugen_bulkra_intr); 701 sce->state &= ~UGEN_RA_WB_STOP; 702 err = usbd_transfer(xfer); 703 if (err != USBD_IN_PROGRESS) 704 /* 705 * The transfer has not been 706 * queued. Setting STOP 707 * will make us try 708 * again at the next read. 709 */ 710 sce->state |= UGEN_RA_WB_STOP; 711 } 712 } 713 mutex_exit(&sc->sc_lock); 714 break; 715 } 716 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE, 717 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0, 718 0, &xfer); 719 if (error) 720 return error; 721 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 722 DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); 723 tn = n; 724 err = usbd_bulk_transfer(xfer, sce->pipeh, 725 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0, 726 sce->timeout, sc->sc_buffer, &tn); 727 if (err) { 728 if (err == USBD_INTERRUPTED) 729 error = EINTR; 730 else if (err == USBD_TIMEOUT) 731 error = ETIMEDOUT; 732 else 733 error = EIO; 734 break; 735 } 736 DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); 737 error = uiomove(sc->sc_buffer, tn, uio); 738 if (error || tn < n) 739 break; 740 } 741 usbd_destroy_xfer(xfer); 742 break; 743 case UE_ISOCHRONOUS: 744 mutex_enter(&sc->sc_lock); 745 while (sce->cur == sce->fill) { 746 if (flag & IO_NDELAY) { 747 mutex_exit(&sc->sc_lock); 748 return EWOULDBLOCK; 749 } 750 sce->state |= UGEN_ASLP; 751 /* "ugenri" */ 752 DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); 753 error = cv_timedwait_sig(&sce->cv, &sc->sc_lock, 754 mstohz(sce->timeout)); 755 DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); 756 if (sc->sc_dying) 757 error = EIO; 758 if (error) { 759 sce->state &= ~UGEN_ASLP; 760 break; 761 } 762 } 763 764 while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { 765 if(sce->fill > sce->cur) 766 n = min(sce->fill - sce->cur, uio->uio_resid); 767 else 768 n = min(sce->limit - sce->cur, uio->uio_resid); 769 770 DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); 771 772 /* Copy the data to the user process. */ 773 error = uiomove(sce->cur, n, uio); 774 if (error) 775 break; 776 sce->cur += n; 777 if (sce->cur >= sce->limit) 778 sce->cur = sce->ibuf; 779 } 780 mutex_exit(&sc->sc_lock); 781 break; 782 783 784 default: 785 return ENXIO; 786 } 787 return error; 788} 789 790int 791ugenread(dev_t dev, struct uio *uio, int flag) 792{ 793 int endpt = UGENENDPOINT(dev); 794 struct ugen_softc *sc; 795 int error; 796 797 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 798 if (sc == NULL || sc->sc_dying) 799 return ENXIO; 800 801 mutex_enter(&sc->sc_lock); 802 sc->sc_refcnt++; 803 mutex_exit(&sc->sc_lock); 804 805 error = ugen_do_read(sc, endpt, uio, flag); 806 807 mutex_enter(&sc->sc_lock); 808 if (--sc->sc_refcnt < 0) 809 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv); 810 mutex_exit(&sc->sc_lock); 811 812 return error; 813} 814 815Static int 816ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, 817 int flag) 818{ 819 struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; 820 uint32_t n; 821 int error = 0; 822 uint32_t tn; 823 char *dbuf; 824 struct usbd_xfer *xfer; 825 usbd_status err; 826 827 DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt)); 828 829 if (endpt == USB_CONTROL_ENDPOINT) 830 return ENODEV; 831 832#ifdef DIAGNOSTIC 833 if (sce->edesc == NULL) { 834 printf("ugenwrite: no edesc\n"); 835 return EIO; 836 } 837 if (sce->pipeh == NULL) { 838 printf("ugenwrite: no pipe\n"); 839 return EIO; 840 } 841#endif 842 843 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 844 case UE_BULK: 845 if (sce->state & UGEN_BULK_WB) { 846 DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n", 847 uio->uio_resid, sce->ra_wb_used)); 848 xfer = sce->ra_wb_xfer; 849 850 mutex_enter(&sc->sc_lock); 851 if (sce->ra_wb_used == sce->limit - sce->ibuf && 852 flag & IO_NDELAY) { 853 mutex_exit(&sc->sc_lock); 854 return EWOULDBLOCK; 855 } 856 while (uio->uio_resid > 0 && !error) { 857 while (sce->ra_wb_used == 858 sce->limit - sce->ibuf) { 859 sce->state |= UGEN_ASLP; 860 DPRINTFN(5, 861 ("ugenwrite: sleep on %p\n", 862 sce)); 863 /* "ugenwb" */ 864 error = cv_timedwait_sig(&sce->cv, 865 &sc->sc_lock, mstohz(sce->timeout)); 866 DPRINTFN(5, 867 ("ugenwrite: woke, error=%d\n", 868 error)); 869 if (sc->sc_dying) 870 error = EIO; 871 if (error) { 872 sce->state &= ~UGEN_ASLP; 873 break; 874 } 875 } 876 877 /* Copy data from the process. */ 878 while (uio->uio_resid > 0 && 879 sce->ra_wb_used < sce->limit - sce->ibuf) { 880 n = min(uio->uio_resid, 881 (sce->limit - sce->ibuf) 882 - sce->ra_wb_used); 883 n = min(n, sce->limit - sce->fill); 884 error = uiomove(sce->fill, n, uio); 885 if (error) 886 break; 887 sce->fill += n; 888 sce->ra_wb_used += n; 889 if (sce->fill == sce->limit) 890 sce->fill = sce->ibuf; 891 } 892 893 /* 894 * If the transfers stopped because the 895 * buffer was empty, restart them. 896 */ 897 if (sce->state & UGEN_RA_WB_STOP && 898 sce->ra_wb_used > 0) { 899 dbuf = (char *)usbd_get_buffer(xfer); 900 n = min(sce->ra_wb_used, 901 sce->ra_wb_xferlen); 902 tn = min(n, sce->limit - sce->cur); 903 memcpy(dbuf, sce->cur, tn); 904 dbuf += tn; 905 if (n - tn > 0) 906 memcpy(dbuf, sce->ibuf, 907 n - tn); 908 usbd_setup_xfer(xfer, sce, NULL, n, 909 0, USBD_NO_TIMEOUT, 910 ugen_bulkwb_intr); 911 sce->state &= ~UGEN_RA_WB_STOP; 912 err = usbd_transfer(xfer); 913 if (err != USBD_IN_PROGRESS) 914 /* 915 * The transfer has not been 916 * queued. Setting STOP 917 * will make us try again 918 * at the next read. 919 */ 920 sce->state |= UGEN_RA_WB_STOP; 921 } 922 } 923 mutex_exit(&sc->sc_lock); 924 break; 925 } 926 error = usbd_create_xfer(sce->pipeh, UGEN_BBSIZE, 927 sce->state & UGEN_SHORT_OK ? USBD_SHORT_XFER_OK : 0, 928 0, &xfer); 929 if (error) 930 return error; 931 while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { 932 error = uiomove(sc->sc_buffer, n, uio); 933 if (error) 934 break; 935 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 936 err = usbd_bulk_transfer(xfer, sce->pipeh, 0, sce->timeout, 937 sc->sc_buffer, &n); 938 if (err) { 939 if (err == USBD_INTERRUPTED) 940 error = EINTR; 941 else if (err == USBD_TIMEOUT) 942 error = ETIMEDOUT; 943 else 944 error = EIO; 945 break; 946 } 947 } 948 usbd_destroy_xfer(xfer); 949 break; 950 case UE_INTERRUPT: 951 error = usbd_create_xfer(sce->pipeh, 952 UGETW(sce->edesc->wMaxPacketSize), 0, 0, &xfer); 953 if (error) 954 return error; 955 while ((n = min(UGETW(sce->edesc->wMaxPacketSize), 956 uio->uio_resid)) != 0) { 957 error = uiomove(sc->sc_buffer, n, uio); 958 if (error) 959 break; 960 DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); 961 err = usbd_intr_transfer(xfer, sce->pipeh, 0, 962 sce->timeout, sc->sc_buffer, &n); 963 if (err) { 964 if (err == USBD_INTERRUPTED) 965 error = EINTR; 966 else if (err == USBD_TIMEOUT) 967 error = ETIMEDOUT; 968 else 969 error = EIO; 970 break; 971 } 972 } 973 usbd_destroy_xfer(xfer); 974 break; 975 default: 976 return ENXIO; 977 } 978 return error; 979} 980 981int 982ugenwrite(dev_t dev, struct uio *uio, int flag) 983{ 984 int endpt = UGENENDPOINT(dev); 985 struct ugen_softc *sc; 986 int error; 987 988 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 989 if (sc == NULL || sc->sc_dying) 990 return ENXIO; 991 992 mutex_enter(&sc->sc_lock); 993 sc->sc_refcnt++; 994 mutex_exit(&sc->sc_lock); 995 996 error = ugen_do_write(sc, endpt, uio, flag); 997 998 mutex_enter(&sc->sc_lock); 999 if (--sc->sc_refcnt < 0) 1000 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv); 1001 mutex_exit(&sc->sc_lock); 1002 1003 return error; 1004} 1005 1006int 1007ugen_activate(device_t self, enum devact act) 1008{ 1009 struct ugen_softc *sc = device_private(self); 1010 1011 switch (act) { 1012 case DVACT_DEACTIVATE: 1013 sc->sc_dying = 1; 1014 return 0; 1015 default: 1016 return EOPNOTSUPP; 1017 } 1018} 1019 1020int 1021ugen_detach(device_t self, int flags) 1022{ 1023 struct ugen_softc *sc = device_private(self); 1024 struct ugen_endpoint *sce; 1025 int i, dir; 1026 int maj, mn; 1027 1028 DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags)); 1029 1030 sc->sc_dying = 1; 1031 pmf_device_deregister(self); 1032 /* Abort all pipes. Causes processes waiting for transfer to wake. */ 1033 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1034 for (dir = OUT; dir <= IN; dir++) { 1035 sce = &sc->sc_endpoints[i][dir]; 1036 if (sce->pipeh) 1037 usbd_abort_pipe(sce->pipeh); 1038 } 1039 } 1040 1041 mutex_enter(&sc->sc_lock); 1042 if (--sc->sc_refcnt >= 0) { 1043 /* Wake everyone */ 1044 for (i = 0; i < USB_MAX_ENDPOINTS; i++) 1045 cv_signal(&sc->sc_endpoints[i][IN].cv); 1046 /* Wait for processes to go away. */ 1047 usb_detach_wait(sc->sc_dev, &sc->sc_detach_cv, &sc->sc_lock); 1048 } 1049 mutex_exit(&sc->sc_lock); 1050 1051 /* locate the major number */ 1052 maj = cdevsw_lookup_major(&ugen_cdevsw); 1053 1054 /* Nuke the vnodes for any open instances (calls close). */ 1055 mn = device_unit(self) * USB_MAX_ENDPOINTS; 1056 vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); 1057 1058 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev); 1059 1060 for (i = 0; i < USB_MAX_ENDPOINTS; i++) { 1061 for (dir = OUT; dir <= IN; dir++) { 1062 sce = &sc->sc_endpoints[i][dir]; 1063 seldestroy(&sce->rsel); 1064 cv_destroy(&sce->cv); 1065 } 1066 } 1067 1068 cv_destroy(&sc->sc_detach_cv); 1069 mutex_destroy(&sc->sc_lock); 1070 1071 return 0; 1072} 1073 1074Static void 1075ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status) 1076{ 1077 struct ugen_endpoint *sce = addr; 1078 struct ugen_softc *sc = sce->sc; 1079 uint32_t count; 1080 u_char *ibuf; 1081 1082 if (status == USBD_CANCELLED) 1083 return; 1084 1085 if (status != USBD_NORMAL_COMPLETION) { 1086 DPRINTF(("ugenintr: status=%d\n", status)); 1087 if (status == USBD_STALLED) 1088 usbd_clear_endpoint_stall_async(sce->pipeh); 1089 return; 1090 } 1091 1092 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1093 ibuf = sce->ibuf; 1094 1095 DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n", 1096 xfer, status, count)); 1097 DPRINTFN(5, (" data = %02x %02x %02x\n", 1098 ibuf[0], ibuf[1], ibuf[2])); 1099 1100 (void)b_to_q(ibuf, count, &sce->q); 1101 1102 mutex_enter(&sc->sc_lock); 1103 if (sce->state & UGEN_ASLP) { 1104 sce->state &= ~UGEN_ASLP; 1105 DPRINTFN(5, ("ugen_intr: waking %p\n", sce)); 1106 cv_signal(&sce->cv); 1107 } 1108 mutex_exit(&sc->sc_lock); 1109 selnotify(&sce->rsel, 0, 0); 1110} 1111 1112Static void 1113ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr, 1114 usbd_status status) 1115{ 1116 struct isoreq *req = addr; 1117 struct ugen_endpoint *sce = req->sce; 1118 struct ugen_softc *sc = sce->sc; 1119 uint32_t count, n; 1120 int i, isize; 1121 1122 /* Return if we are aborting. */ 1123 if (status == USBD_CANCELLED) 1124 return; 1125 1126 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1127 DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n", 1128 (long)(req - sce->isoreqs), count)); 1129 1130 /* throw away oldest input if the buffer is full */ 1131 if(sce->fill < sce->cur && sce->cur <= sce->fill + count) { 1132 sce->cur += count; 1133 if(sce->cur >= sce->limit) 1134 sce->cur = sce->ibuf + (sce->limit - sce->cur); 1135 DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n", 1136 count)); 1137 } 1138 1139 isize = UGETW(sce->edesc->wMaxPacketSize); 1140 for (i = 0; i < UGEN_NISORFRMS; i++) { 1141 uint32_t actlen = req->sizes[i]; 1142 char const *tbuf = (char const *)req->dmabuf + isize * i; 1143 1144 /* copy data to buffer */ 1145 while (actlen > 0) { 1146 n = min(actlen, sce->limit - sce->fill); 1147 memcpy(sce->fill, tbuf, n); 1148 1149 tbuf += n; 1150 actlen -= n; 1151 sce->fill += n; 1152 if(sce->fill == sce->limit) 1153 sce->fill = sce->ibuf; 1154 } 1155 1156 /* setup size for next transfer */ 1157 req->sizes[i] = isize; 1158 } 1159 1160 usbd_setup_isoc_xfer(xfer, req, req->sizes, UGEN_NISORFRMS, 0, 1161 ugen_isoc_rintr); 1162 (void)usbd_transfer(xfer); 1163 1164 mutex_enter(&sc->sc_lock); 1165 if (sce->state & UGEN_ASLP) { 1166 sce->state &= ~UGEN_ASLP; 1167 DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce)); 1168 cv_signal(&sce->cv); 1169 } 1170 mutex_exit(&sc->sc_lock); 1171 selnotify(&sce->rsel, 0, 0); 1172} 1173 1174Static void 1175ugen_bulkra_intr(struct usbd_xfer *xfer, void *addr, 1176 usbd_status status) 1177{ 1178 struct ugen_endpoint *sce = addr; 1179 struct ugen_softc *sc = sce->sc; 1180 uint32_t count, n; 1181 char const *tbuf; 1182 usbd_status err; 1183 1184 /* Return if we are aborting. */ 1185 if (status == USBD_CANCELLED) 1186 return; 1187 1188 if (status != USBD_NORMAL_COMPLETION) { 1189 DPRINTF(("ugen_bulkra_intr: status=%d\n", status)); 1190 sce->state |= UGEN_RA_WB_STOP; 1191 if (status == USBD_STALLED) 1192 usbd_clear_endpoint_stall_async(sce->pipeh); 1193 return; 1194 } 1195 1196 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1197 1198 /* Keep track of how much is in the buffer. */ 1199 sce->ra_wb_used += count; 1200 1201 /* Copy data to buffer. */ 1202 tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer); 1203 n = min(count, sce->limit - sce->fill); 1204 memcpy(sce->fill, tbuf, n); 1205 tbuf += n; 1206 count -= n; 1207 sce->fill += n; 1208 if (sce->fill == sce->limit) 1209 sce->fill = sce->ibuf; 1210 if (count > 0) { 1211 memcpy(sce->fill, tbuf, count); 1212 sce->fill += count; 1213 } 1214 1215 /* Set up the next request if necessary. */ 1216 n = (sce->limit - sce->ibuf) - sce->ra_wb_used; 1217 if (n > 0) { 1218 usbd_setup_xfer(xfer, sce, NULL, min(n, sce->ra_wb_xferlen), 0, 1219 USBD_NO_TIMEOUT, ugen_bulkra_intr); 1220 err = usbd_transfer(xfer); 1221 if (err != USBD_IN_PROGRESS) { 1222 printf("usbd_bulkra_intr: error=%d\n", err); 1223 /* 1224 * The transfer has not been queued. Setting STOP 1225 * will make us try again at the next read. 1226 */ 1227 sce->state |= UGEN_RA_WB_STOP; 1228 } 1229 } 1230 else 1231 sce->state |= UGEN_RA_WB_STOP; 1232 1233 mutex_enter(&sc->sc_lock); 1234 if (sce->state & UGEN_ASLP) { 1235 sce->state &= ~UGEN_ASLP; 1236 DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce)); 1237 cv_signal(&sce->cv); 1238 } 1239 mutex_exit(&sc->sc_lock); 1240 selnotify(&sce->rsel, 0, 0); 1241} 1242 1243Static void 1244ugen_bulkwb_intr(struct usbd_xfer *xfer, void *addr, 1245 usbd_status status) 1246{ 1247 struct ugen_endpoint *sce = addr; 1248 struct ugen_softc *sc = sce->sc; 1249 uint32_t count, n; 1250 char *tbuf; 1251 usbd_status err; 1252 1253 /* Return if we are aborting. */ 1254 if (status == USBD_CANCELLED) 1255 return; 1256 1257 if (status != USBD_NORMAL_COMPLETION) { 1258 DPRINTF(("ugen_bulkwb_intr: status=%d\n", status)); 1259 sce->state |= UGEN_RA_WB_STOP; 1260 if (status == USBD_STALLED) 1261 usbd_clear_endpoint_stall_async(sce->pipeh); 1262 return; 1263 } 1264 1265 usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL); 1266 1267 /* Keep track of how much is in the buffer. */ 1268 sce->ra_wb_used -= count; 1269 1270 /* Update buffer pointers. */ 1271 sce->cur += count; 1272 if (sce->cur >= sce->limit) 1273 sce->cur = sce->ibuf + (sce->cur - sce->limit); 1274 1275 /* Set up next request if necessary. */ 1276 if (sce->ra_wb_used > 0) { 1277 /* copy data from buffer */ 1278 tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer); 1279 count = min(sce->ra_wb_used, sce->ra_wb_xferlen); 1280 n = min(count, sce->limit - sce->cur); 1281 memcpy(tbuf, sce->cur, n); 1282 tbuf += n; 1283 if (count - n > 0) 1284 memcpy(tbuf, sce->ibuf, count - n); 1285 1286 usbd_setup_xfer(xfer, sce, NULL, count, 0, USBD_NO_TIMEOUT, 1287 ugen_bulkwb_intr); 1288 err = usbd_transfer(xfer); 1289 if (err != USBD_IN_PROGRESS) { 1290 printf("usbd_bulkwb_intr: error=%d\n", err); 1291 /* 1292 * The transfer has not been queued. Setting STOP 1293 * will make us try again at the next write. 1294 */ 1295 sce->state |= UGEN_RA_WB_STOP; 1296 } 1297 } 1298 else 1299 sce->state |= UGEN_RA_WB_STOP; 1300 1301 mutex_enter(&sc->sc_lock); 1302 if (sce->state & UGEN_ASLP) { 1303 sce->state &= ~UGEN_ASLP; 1304 DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce)); 1305 cv_signal(&sce->cv); 1306 } 1307 mutex_exit(&sc->sc_lock); 1308 selnotify(&sce->rsel, 0, 0); 1309} 1310 1311Static usbd_status 1312ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno) 1313{ 1314 struct usbd_interface *iface; 1315 usb_endpoint_descriptor_t *ed; 1316 usbd_status err; 1317 struct ugen_endpoint *sce; 1318 uint8_t niface, nendpt, endptno, endpt; 1319 int dir; 1320 1321 DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno)); 1322 1323 err = usbd_interface_count(sc->sc_udev, &niface); 1324 if (err) 1325 return err; 1326 if (ifaceidx < 0 || ifaceidx >= niface) 1327 return USBD_INVAL; 1328 1329 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1330 if (err) 1331 return err; 1332 err = usbd_endpoint_count(iface, &nendpt); 1333 if (err) 1334 return err; 1335 1336 /* change setting */ 1337 err = usbd_set_interface(iface, altno); 1338 if (err) 1339 return err; 1340 1341 err = usbd_endpoint_count(iface, &nendpt); 1342 if (err) 1343 return err; 1344 1345 ugen_clear_endpoints(sc); 1346 1347 for (endptno = 0; endptno < nendpt; endptno++) { 1348 ed = usbd_interface2endpoint_descriptor(iface,endptno); 1349 KASSERT(ed != NULL); 1350 endpt = ed->bEndpointAddress; 1351 dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT; 1352 sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir]; 1353 sce->sc = sc; 1354 sce->edesc = ed; 1355 sce->iface = iface; 1356 } 1357 return 0; 1358} 1359 1360/* Retrieve a complete descriptor for a certain device and index. */ 1361Static usb_config_descriptor_t * 1362ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp) 1363{ 1364 usb_config_descriptor_t *cdesc, *tdesc, cdescr; 1365 int len; 1366 usbd_status err; 1367 1368 if (index == USB_CURRENT_CONFIG_INDEX) { 1369 tdesc = usbd_get_config_descriptor(sc->sc_udev); 1370 len = UGETW(tdesc->wTotalLength); 1371 if (lenp) 1372 *lenp = len; 1373 cdesc = kmem_alloc(len, KM_SLEEP); 1374 memcpy(cdesc, tdesc, len); 1375 DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len)); 1376 } else { 1377 err = usbd_get_config_desc(sc->sc_udev, index, &cdescr); 1378 if (err) 1379 return 0; 1380 len = UGETW(cdescr.wTotalLength); 1381 DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len)); 1382 if (lenp) 1383 *lenp = len; 1384 cdesc = kmem_alloc(len, KM_SLEEP); 1385 err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len); 1386 if (err) { 1387 kmem_free(cdesc, len); 1388 return 0; 1389 } 1390 } 1391 return cdesc; 1392} 1393 1394Static int 1395ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx) 1396{ 1397 struct usbd_interface *iface; 1398 usbd_status err; 1399 1400 err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface); 1401 if (err) 1402 return -1; 1403 return usbd_get_interface_altindex(iface); 1404} 1405 1406Static int 1407ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, 1408 void *addr, int flag, struct lwp *l) 1409{ 1410 struct ugen_endpoint *sce; 1411 usbd_status err; 1412 struct usbd_interface *iface; 1413 struct usb_config_desc *cd; 1414 usb_config_descriptor_t *cdesc; 1415 struct usb_interface_desc *id; 1416 usb_interface_descriptor_t *idesc; 1417 struct usb_endpoint_desc *ed; 1418 usb_endpoint_descriptor_t *edesc; 1419 struct usb_alt_interface *ai; 1420 struct usb_string_desc *si; 1421 uint8_t conf, alt; 1422 int cdesclen; 1423 int error; 1424 1425 DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); 1426 if (sc->sc_dying) 1427 return EIO; 1428 1429 switch (cmd) { 1430 case FIONBIO: 1431 /* All handled in the upper FS layer. */ 1432 return 0; 1433 case USB_SET_SHORT_XFER: 1434 if (endpt == USB_CONTROL_ENDPOINT) 1435 return EINVAL; 1436 /* This flag only affects read */ 1437 sce = &sc->sc_endpoints[endpt][IN]; 1438 if (sce == NULL || sce->pipeh == NULL) 1439 return EINVAL; 1440 if (*(int *)addr) 1441 sce->state |= UGEN_SHORT_OK; 1442 else 1443 sce->state &= ~UGEN_SHORT_OK; 1444 return 0; 1445 case USB_SET_TIMEOUT: 1446 sce = &sc->sc_endpoints[endpt][IN]; 1447 if (sce == NULL 1448 /* XXX this shouldn't happen, but the distinction between 1449 input and output pipes isn't clear enough. 1450 || sce->pipeh == NULL */ 1451 ) 1452 return EINVAL; 1453 sce->timeout = *(int *)addr; 1454 return 0; 1455 case USB_SET_BULK_RA: 1456 if (endpt == USB_CONTROL_ENDPOINT) 1457 return EINVAL; 1458 sce = &sc->sc_endpoints[endpt][IN]; 1459 if (sce == NULL || sce->pipeh == NULL) 1460 return EINVAL; 1461 edesc = sce->edesc; 1462 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1463 return EINVAL; 1464 1465 if (*(int *)addr) { 1466 /* Only turn RA on if it's currently off. */ 1467 if (sce->state & UGEN_BULK_RA) 1468 return 0; 1469 1470 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1471 /* shouldn't happen */ 1472 return EINVAL; 1473 error = usbd_create_xfer(sce->pipeh, 1474 sce->ra_wb_reqsize, 0, 0, &sce->ra_wb_xfer); 1475 if (error) 1476 return error; 1477 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1478 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP); 1479 sce->fill = sce->cur = sce->ibuf; 1480 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1481 sce->ra_wb_used = 0; 1482 sce->state |= UGEN_BULK_RA; 1483 sce->state &= ~UGEN_RA_WB_STOP; 1484 /* Now start reading. */ 1485 usbd_setup_xfer(sce->ra_wb_xfer, sce, NULL, 1486 min(sce->ra_wb_xferlen, sce->ra_wb_bufsize), 1487 0, USBD_NO_TIMEOUT, ugen_bulkra_intr); 1488 err = usbd_transfer(sce->ra_wb_xfer); 1489 if (err != USBD_IN_PROGRESS) { 1490 sce->state &= ~UGEN_BULK_RA; 1491 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1492 sce->ibuf = NULL; 1493 usbd_destroy_xfer(sce->ra_wb_xfer); 1494 return EIO; 1495 } 1496 } else { 1497 /* Only turn RA off if it's currently on. */ 1498 if (!(sce->state & UGEN_BULK_RA)) 1499 return 0; 1500 1501 sce->state &= ~UGEN_BULK_RA; 1502 usbd_abort_pipe(sce->pipeh); 1503 usbd_destroy_xfer(sce->ra_wb_xfer); 1504 /* 1505 * XXX Discard whatever's in the buffer, but we 1506 * should keep it around and drain the buffer 1507 * instead. 1508 */ 1509 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1510 sce->ibuf = NULL; 1511 } 1512 return 0; 1513 case USB_SET_BULK_WB: 1514 if (endpt == USB_CONTROL_ENDPOINT) 1515 return EINVAL; 1516 sce = &sc->sc_endpoints[endpt][OUT]; 1517 if (sce == NULL || sce->pipeh == NULL) 1518 return EINVAL; 1519 edesc = sce->edesc; 1520 if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK) 1521 return EINVAL; 1522 1523 if (*(int *)addr) { 1524 /* Only turn WB on if it's currently off. */ 1525 if (sce->state & UGEN_BULK_WB) 1526 return 0; 1527 1528 if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0) 1529 /* shouldn't happen */ 1530 return EINVAL; 1531 error = usbd_create_xfer(sce->pipeh, sce->ra_wb_reqsize, 1532 0, 0, &sce->ra_wb_xfer); 1533 sce->ra_wb_xferlen = sce->ra_wb_reqsize; 1534 sce->ibuf = kmem_alloc(sce->ra_wb_bufsize, KM_SLEEP); 1535 sce->fill = sce->cur = sce->ibuf; 1536 sce->limit = sce->ibuf + sce->ra_wb_bufsize; 1537 sce->ra_wb_used = 0; 1538 sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP; 1539 } else { 1540 /* Only turn WB off if it's currently on. */ 1541 if (!(sce->state & UGEN_BULK_WB)) 1542 return 0; 1543 1544 sce->state &= ~UGEN_BULK_WB; 1545 /* 1546 * XXX Discard whatever's in the buffer, but we 1547 * should keep it around and keep writing to 1548 * drain the buffer instead. 1549 */ 1550 usbd_abort_pipe(sce->pipeh); 1551 usbd_destroy_xfer(sce->ra_wb_xfer); 1552 kmem_free(sce->ibuf, sce->ra_wb_bufsize); 1553 sce->ibuf = NULL; 1554 } 1555 return 0; 1556 case USB_SET_BULK_RA_OPT: 1557 case USB_SET_BULK_WB_OPT: 1558 { 1559 struct usb_bulk_ra_wb_opt *opt; 1560 1561 if (endpt == USB_CONTROL_ENDPOINT) 1562 return EINVAL; 1563 opt = (struct usb_bulk_ra_wb_opt *)addr; 1564 if (cmd == USB_SET_BULK_RA_OPT) 1565 sce = &sc->sc_endpoints[endpt][IN]; 1566 else 1567 sce = &sc->sc_endpoints[endpt][OUT]; 1568 if (sce == NULL || sce->pipeh == NULL) 1569 return EINVAL; 1570 if (opt->ra_wb_buffer_size < 1 || 1571 opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX || 1572 opt->ra_wb_request_size < 1 || 1573 opt->ra_wb_request_size > opt->ra_wb_buffer_size) 1574 return EINVAL; 1575 /* 1576 * XXX These changes do not take effect until the 1577 * next time RA/WB mode is enabled but they ought to 1578 * take effect immediately. 1579 */ 1580 sce->ra_wb_bufsize = opt->ra_wb_buffer_size; 1581 sce->ra_wb_reqsize = opt->ra_wb_request_size; 1582 return 0; 1583 } 1584 default: 1585 break; 1586 } 1587 1588 if (endpt != USB_CONTROL_ENDPOINT) 1589 return EINVAL; 1590 1591 switch (cmd) { 1592#ifdef UGEN_DEBUG 1593 case USB_SETDEBUG: 1594 ugendebug = *(int *)addr; 1595 break; 1596#endif 1597 case USB_GET_CONFIG: 1598 err = usbd_get_config(sc->sc_udev, &conf); 1599 if (err) 1600 return EIO; 1601 *(int *)addr = conf; 1602 break; 1603 case USB_SET_CONFIG: 1604 if (!(flag & FWRITE)) 1605 return EPERM; 1606 err = ugen_set_config(sc, *(int *)addr); 1607 switch (err) { 1608 case USBD_NORMAL_COMPLETION: 1609 break; 1610 case USBD_IN_USE: 1611 return EBUSY; 1612 default: 1613 return EIO; 1614 } 1615 break; 1616 case USB_GET_ALTINTERFACE: 1617 ai = (struct usb_alt_interface *)addr; 1618 err = usbd_device2interface_handle(sc->sc_udev, 1619 ai->uai_interface_index, &iface); 1620 if (err) 1621 return EINVAL; 1622 idesc = usbd_get_interface_descriptor(iface); 1623 if (idesc == NULL) 1624 return EIO; 1625 ai->uai_alt_no = idesc->bAlternateSetting; 1626 break; 1627 case USB_SET_ALTINTERFACE: 1628 if (!(flag & FWRITE)) 1629 return EPERM; 1630 ai = (struct usb_alt_interface *)addr; 1631 err = usbd_device2interface_handle(sc->sc_udev, 1632 ai->uai_interface_index, &iface); 1633 if (err) 1634 return EINVAL; 1635 err = ugen_set_interface(sc, ai->uai_interface_index, 1636 ai->uai_alt_no); 1637 if (err) 1638 return EINVAL; 1639 break; 1640 case USB_GET_NO_ALT: 1641 ai = (struct usb_alt_interface *)addr; 1642 cdesc = ugen_get_cdesc(sc, ai->uai_config_index, &cdesclen); 1643 if (cdesc == NULL) 1644 return EINVAL; 1645 idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); 1646 if (idesc == NULL) { 1647 kmem_free(cdesc, cdesclen); 1648 return EINVAL; 1649 } 1650 ai->uai_alt_no = usbd_get_no_alts(cdesc, 1651 idesc->bInterfaceNumber); 1652 kmem_free(cdesc, cdesclen); 1653 break; 1654 case USB_GET_DEVICE_DESC: 1655 *(usb_device_descriptor_t *)addr = 1656 *usbd_get_device_descriptor(sc->sc_udev); 1657 break; 1658 case USB_GET_CONFIG_DESC: 1659 cd = (struct usb_config_desc *)addr; 1660 cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, &cdesclen); 1661 if (cdesc == NULL) 1662 return EINVAL; 1663 cd->ucd_desc = *cdesc; 1664 kmem_free(cdesc, cdesclen); 1665 break; 1666 case USB_GET_INTERFACE_DESC: 1667 id = (struct usb_interface_desc *)addr; 1668 cdesc = ugen_get_cdesc(sc, id->uid_config_index, &cdesclen); 1669 if (cdesc == NULL) 1670 return EINVAL; 1671 if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && 1672 id->uid_alt_index == USB_CURRENT_ALT_INDEX) 1673 alt = ugen_get_alt_index(sc, id->uid_interface_index); 1674 else 1675 alt = id->uid_alt_index; 1676 idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); 1677 if (idesc == NULL) { 1678 kmem_free(cdesc, cdesclen); 1679 return EINVAL; 1680 } 1681 id->uid_desc = *idesc; 1682 kmem_free(cdesc, cdesclen); 1683 break; 1684 case USB_GET_ENDPOINT_DESC: 1685 ed = (struct usb_endpoint_desc *)addr; 1686 cdesc = ugen_get_cdesc(sc, ed->ued_config_index, &cdesclen); 1687 if (cdesc == NULL) 1688 return EINVAL; 1689 if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && 1690 ed->ued_alt_index == USB_CURRENT_ALT_INDEX) 1691 alt = ugen_get_alt_index(sc, ed->ued_interface_index); 1692 else 1693 alt = ed->ued_alt_index; 1694 edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, 1695 alt, ed->ued_endpoint_index); 1696 if (edesc == NULL) { 1697 kmem_free(cdesc, cdesclen); 1698 return EINVAL; 1699 } 1700 ed->ued_desc = *edesc; 1701 kmem_free(cdesc, cdesclen); 1702 break; 1703 case USB_GET_FULL_DESC: 1704 { 1705 int len; 1706 struct iovec iov; 1707 struct uio uio; 1708 struct usb_full_desc *fd = (struct usb_full_desc *)addr; 1709 1710 cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &cdesclen); 1711 if (cdesc == NULL) 1712 return EINVAL; 1713 len = cdesclen; 1714 if (len > fd->ufd_size) 1715 len = fd->ufd_size; 1716 iov.iov_base = (void *)fd->ufd_data; 1717 iov.iov_len = len; 1718 uio.uio_iov = &iov; 1719 uio.uio_iovcnt = 1; 1720 uio.uio_resid = len; 1721 uio.uio_offset = 0; 1722 uio.uio_rw = UIO_READ; 1723 uio.uio_vmspace = l->l_proc->p_vmspace; 1724 error = uiomove((void *)cdesc, len, &uio); 1725 kmem_free(cdesc, cdesclen); 1726 return error; 1727 } 1728 case USB_GET_STRING_DESC: { 1729 int len; 1730 si = (struct usb_string_desc *)addr; 1731 err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, 1732 si->usd_language_id, &si->usd_desc, &len); 1733 if (err) 1734 return EINVAL; 1735 break; 1736 } 1737 case USB_DO_REQUEST: 1738 { 1739 struct usb_ctl_request *ur = (void *)addr; 1740 int len = UGETW(ur->ucr_request.wLength); 1741 struct iovec iov; 1742 struct uio uio; 1743 void *ptr = 0; 1744 usbd_status xerr; 1745 1746 error = 0; 1747 1748 if (!(flag & FWRITE)) 1749 return EPERM; 1750 /* Avoid requests that would damage the bus integrity. */ 1751 if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1752 ur->ucr_request.bRequest == UR_SET_ADDRESS) || 1753 (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && 1754 ur->ucr_request.bRequest == UR_SET_CONFIG) || 1755 (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && 1756 ur->ucr_request.bRequest == UR_SET_INTERFACE)) 1757 return EINVAL; 1758 1759 if (len < 0 || len > 32767) 1760 return EINVAL; 1761 if (len != 0) { 1762 iov.iov_base = (void *)ur->ucr_data; 1763 iov.iov_len = len; 1764 uio.uio_iov = &iov; 1765 uio.uio_iovcnt = 1; 1766 uio.uio_resid = len; 1767 uio.uio_offset = 0; 1768 uio.uio_rw = 1769 ur->ucr_request.bmRequestType & UT_READ ? 1770 UIO_READ : UIO_WRITE; 1771 uio.uio_vmspace = l->l_proc->p_vmspace; 1772 ptr = kmem_alloc(len, KM_SLEEP); 1773 if (uio.uio_rw == UIO_WRITE) { 1774 error = uiomove(ptr, len, &uio); 1775 if (error) 1776 goto ret; 1777 } 1778 } 1779 sce = &sc->sc_endpoints[endpt][IN]; 1780 xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, 1781 ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); 1782 if (xerr) { 1783 error = EIO; 1784 goto ret; 1785 } 1786 if (len != 0) { 1787 if (uio.uio_rw == UIO_READ) { 1788 size_t alen = min(len, ur->ucr_actlen); 1789 error = uiomove(ptr, alen, &uio); 1790 if (error) 1791 goto ret; 1792 } 1793 } 1794 ret: 1795 if (ptr) 1796 kmem_free(ptr, len); 1797 return error; 1798 } 1799 case USB_GET_DEVICEINFO: 1800 usbd_fill_deviceinfo(sc->sc_udev, 1801 (struct usb_device_info *)addr, 0); 1802 break; 1803#ifdef COMPAT_30 1804 case USB_GET_DEVICEINFO_OLD: 1805 usbd_fill_deviceinfo_old(sc->sc_udev, 1806 (struct usb_device_info_old *)addr, 0); 1807 1808 break; 1809#endif 1810 default: 1811 return EINVAL; 1812 } 1813 return 0; 1814} 1815 1816int 1817ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) 1818{ 1819 int endpt = UGENENDPOINT(dev); 1820 struct ugen_softc *sc; 1821 int error; 1822 1823 sc = device_lookup_private(& ugen_cd, UGENUNIT(dev)); 1824 if (sc == NULL || sc->sc_dying) 1825 return ENXIO; 1826 1827 sc->sc_refcnt++; 1828 error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l); 1829 if (--sc->sc_refcnt < 0) 1830 usb_detach_broadcast(sc->sc_dev, &sc->sc_detach_cv); 1831 return error; 1832} 1833 1834int 1835ugenpoll(dev_t dev, int events, struct lwp *l) 1836{ 1837 struct ugen_softc *sc; 1838 struct ugen_endpoint *sce_in, *sce_out; 1839 int revents = 0; 1840 1841 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev)); 1842 if (sc == NULL) 1843 return ENXIO; 1844 1845 if (sc->sc_dying) 1846 return POLLHUP; 1847 1848 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) 1849 return ENODEV; 1850 1851 sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 1852 sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 1853 if (sce_in == NULL && sce_out == NULL) 1854 return POLLERR; 1855#ifdef DIAGNOSTIC 1856 if (!sce_in->edesc && !sce_out->edesc) { 1857 printf("ugenpoll: no edesc\n"); 1858 return POLLERR; 1859 } 1860 /* It's possible to have only one pipe open. */ 1861 if (!sce_in->pipeh && !sce_out->pipeh) { 1862 printf("ugenpoll: no pipe\n"); 1863 return POLLERR; 1864 } 1865#endif 1866 1867 mutex_enter(&sc->sc_lock); 1868 if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM))) 1869 switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) { 1870 case UE_INTERRUPT: 1871 if (sce_in->q.c_cc > 0) 1872 revents |= events & (POLLIN | POLLRDNORM); 1873 else 1874 selrecord(l, &sce_in->rsel); 1875 break; 1876 case UE_ISOCHRONOUS: 1877 if (sce_in->cur != sce_in->fill) 1878 revents |= events & (POLLIN | POLLRDNORM); 1879 else 1880 selrecord(l, &sce_in->rsel); 1881 break; 1882 case UE_BULK: 1883 if (sce_in->state & UGEN_BULK_RA) { 1884 if (sce_in->ra_wb_used > 0) 1885 revents |= events & 1886 (POLLIN | POLLRDNORM); 1887 else 1888 selrecord(l, &sce_in->rsel); 1889 break; 1890 } 1891 /* 1892 * We have no easy way of determining if a read will 1893 * yield any data or a write will happen. 1894 * Pretend they will. 1895 */ 1896 revents |= events & (POLLIN | POLLRDNORM); 1897 break; 1898 default: 1899 break; 1900 } 1901 if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM))) 1902 switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) { 1903 case UE_INTERRUPT: 1904 case UE_ISOCHRONOUS: 1905 /* XXX unimplemented */ 1906 break; 1907 case UE_BULK: 1908 if (sce_out->state & UGEN_BULK_WB) { 1909 if (sce_out->ra_wb_used < 1910 sce_out->limit - sce_out->ibuf) 1911 revents |= events & 1912 (POLLOUT | POLLWRNORM); 1913 else 1914 selrecord(l, &sce_out->rsel); 1915 break; 1916 } 1917 /* 1918 * We have no easy way of determining if a read will 1919 * yield any data or a write will happen. 1920 * Pretend they will. 1921 */ 1922 revents |= events & (POLLOUT | POLLWRNORM); 1923 break; 1924 default: 1925 break; 1926 } 1927 1928 mutex_exit(&sc->sc_lock); 1929 1930 return revents; 1931} 1932 1933static void 1934filt_ugenrdetach(struct knote *kn) 1935{ 1936 struct ugen_endpoint *sce = kn->kn_hook; 1937 struct ugen_softc *sc = sce->sc; 1938 1939 mutex_enter(&sc->sc_lock); 1940 SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext); 1941 mutex_exit(&sc->sc_lock); 1942} 1943 1944static int 1945filt_ugenread_intr(struct knote *kn, long hint) 1946{ 1947 struct ugen_endpoint *sce = kn->kn_hook; 1948 struct ugen_softc *sc = sce->sc; 1949 1950 if (sc->sc_dying) 1951 return 0; 1952 1953 kn->kn_data = sce->q.c_cc; 1954 return kn->kn_data > 0; 1955} 1956 1957static int 1958filt_ugenread_isoc(struct knote *kn, long hint) 1959{ 1960 struct ugen_endpoint *sce = kn->kn_hook; 1961 struct ugen_softc *sc = sce->sc; 1962 1963 if (sc->sc_dying) 1964 return 0; 1965 1966 if (sce->cur == sce->fill) 1967 return 0; 1968 1969 if (sce->cur < sce->fill) 1970 kn->kn_data = sce->fill - sce->cur; 1971 else 1972 kn->kn_data = (sce->limit - sce->cur) + 1973 (sce->fill - sce->ibuf); 1974 1975 return 1; 1976} 1977 1978static int 1979filt_ugenread_bulk(struct knote *kn, long hint) 1980{ 1981 struct ugen_endpoint *sce = kn->kn_hook; 1982 struct ugen_softc *sc = sce->sc; 1983 1984 if (sc->sc_dying) 1985 return 0; 1986 1987 if (!(sce->state & UGEN_BULK_RA)) 1988 /* 1989 * We have no easy way of determining if a read will 1990 * yield any data or a write will happen. 1991 * So, emulate "seltrue". 1992 */ 1993 return filt_seltrue(kn, hint); 1994 1995 if (sce->ra_wb_used == 0) 1996 return 0; 1997 1998 kn->kn_data = sce->ra_wb_used; 1999 2000 return 1; 2001} 2002 2003static int 2004filt_ugenwrite_bulk(struct knote *kn, long hint) 2005{ 2006 struct ugen_endpoint *sce = kn->kn_hook; 2007 struct ugen_softc *sc = sce->sc; 2008 2009 if (sc->sc_dying) 2010 return 0; 2011 2012 if (!(sce->state & UGEN_BULK_WB)) 2013 /* 2014 * We have no easy way of determining if a read will 2015 * yield any data or a write will happen. 2016 * So, emulate "seltrue". 2017 */ 2018 return filt_seltrue(kn, hint); 2019 2020 if (sce->ra_wb_used == sce->limit - sce->ibuf) 2021 return 0; 2022 2023 kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used; 2024 2025 return 1; 2026} 2027 2028static const struct filterops ugenread_intr_filtops = { 2029 .f_isfd = 1, 2030 .f_attach = NULL, 2031 .f_detach = filt_ugenrdetach, 2032 .f_event = filt_ugenread_intr, 2033}; 2034 2035static const struct filterops ugenread_isoc_filtops = { 2036 .f_isfd = 1, 2037 .f_attach = NULL, 2038 .f_detach = filt_ugenrdetach, 2039 .f_event = filt_ugenread_isoc, 2040}; 2041 2042static const struct filterops ugenread_bulk_filtops = { 2043 .f_isfd = 1, 2044 .f_attach = NULL, 2045 .f_detach = filt_ugenrdetach, 2046 .f_event = filt_ugenread_bulk, 2047}; 2048 2049static const struct filterops ugenwrite_bulk_filtops = { 2050 .f_isfd = 1, 2051 .f_attach = NULL, 2052 .f_detach = filt_ugenrdetach, 2053 .f_event = filt_ugenwrite_bulk, 2054}; 2055 2056int 2057ugenkqfilter(dev_t dev, struct knote *kn) 2058{ 2059 struct ugen_softc *sc; 2060 struct ugen_endpoint *sce; 2061 struct klist *klist; 2062 2063 sc = device_lookup_private(&ugen_cd, UGENUNIT(dev)); 2064 if (sc == NULL || sc->sc_dying) 2065 return ENXIO; 2066 2067 if (UGENENDPOINT(dev) == USB_CONTROL_ENDPOINT) 2068 return ENODEV; 2069 2070 switch (kn->kn_filter) { 2071 case EVFILT_READ: 2072 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN]; 2073 if (sce == NULL) 2074 return EINVAL; 2075 2076 klist = &sce->rsel.sel_klist; 2077 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2078 case UE_INTERRUPT: 2079 kn->kn_fop = &ugenread_intr_filtops; 2080 break; 2081 case UE_ISOCHRONOUS: 2082 kn->kn_fop = &ugenread_isoc_filtops; 2083 break; 2084 case UE_BULK: 2085 kn->kn_fop = &ugenread_bulk_filtops; 2086 break; 2087 default: 2088 return EINVAL; 2089 } 2090 break; 2091 2092 case EVFILT_WRITE: 2093 sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT]; 2094 if (sce == NULL) 2095 return EINVAL; 2096 2097 klist = &sce->rsel.sel_klist; 2098 switch (sce->edesc->bmAttributes & UE_XFERTYPE) { 2099 case UE_INTERRUPT: 2100 case UE_ISOCHRONOUS: 2101 /* XXX poll doesn't support this */ 2102 return EINVAL; 2103 2104 case UE_BULK: 2105 kn->kn_fop = &ugenwrite_bulk_filtops; 2106 break; 2107 default: 2108 return EINVAL; 2109 } 2110 break; 2111 2112 default: 2113 return EINVAL; 2114 } 2115 2116 kn->kn_hook = sce; 2117 2118 mutex_enter(&sc->sc_lock); 2119 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 2120 mutex_exit(&sc->sc_lock); 2121 2122 return 0; 2123} 2124