Deleted Added
full compact
usb_transfer.c (213435) usb_transfer.c (215649)
1/* $FreeBSD: head/sys/dev/usb/usb_transfer.c 213435 2010-10-04 23:18:05Z hselasky $ */
1/* $FreeBSD: head/sys/dev/usb/usb_transfer.c 215649 2010-11-22 01:11:28Z weongyo $ */
2/*-
3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/stdint.h>
28#include <sys/stddef.h>
29#include <sys/param.h>
30#include <sys/queue.h>
31#include <sys/types.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/linker_set.h>
36#include <sys/module.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/condvar.h>
40#include <sys/sysctl.h>
41#include <sys/sx.h>
42#include <sys/unistd.h>
43#include <sys/callout.h>
44#include <sys/malloc.h>
45#include <sys/priv.h>
46
47#include <dev/usb/usb.h>
48#include <dev/usb/usbdi.h>
49#include <dev/usb/usbdi_util.h>
50
51#define USB_DEBUG_VAR usb_debug
52
53#include <dev/usb/usb_core.h>
54#include <dev/usb/usb_busdma.h>
55#include <dev/usb/usb_process.h>
56#include <dev/usb/usb_transfer.h>
57#include <dev/usb/usb_device.h>
58#include <dev/usb/usb_debug.h>
59#include <dev/usb/usb_util.h>
60
61#include <dev/usb/usb_controller.h>
62#include <dev/usb/usb_bus.h>
2/*-
3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/stdint.h>
28#include <sys/stddef.h>
29#include <sys/param.h>
30#include <sys/queue.h>
31#include <sys/types.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/linker_set.h>
36#include <sys/module.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/condvar.h>
40#include <sys/sysctl.h>
41#include <sys/sx.h>
42#include <sys/unistd.h>
43#include <sys/callout.h>
44#include <sys/malloc.h>
45#include <sys/priv.h>
46
47#include <dev/usb/usb.h>
48#include <dev/usb/usbdi.h>
49#include <dev/usb/usbdi_util.h>
50
51#define USB_DEBUG_VAR usb_debug
52
53#include <dev/usb/usb_core.h>
54#include <dev/usb/usb_busdma.h>
55#include <dev/usb/usb_process.h>
56#include <dev/usb/usb_transfer.h>
57#include <dev/usb/usb_device.h>
58#include <dev/usb/usb_debug.h>
59#include <dev/usb/usb_util.h>
60
61#include <dev/usb/usb_controller.h>
62#include <dev/usb/usb_bus.h>
63#include <dev/usb/usb_pf.h>
63
64struct usb_std_packet_size {
65 struct {
66 uint16_t min; /* inclusive */
67 uint16_t max; /* inclusive */
68 } range;
69
70 uint16_t fixed[4];
71};
72
73static usb_callback_t usb_request_callback;
74
75static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
76
77 /* This transfer is used for generic control endpoint transfers */
78
79 [0] = {
80 .type = UE_CONTROL,
81 .endpoint = 0x00, /* Control endpoint */
82 .direction = UE_DIR_ANY,
83 .bufsize = USB_EP0_BUFSIZE, /* bytes */
84 .flags = {.proxy_buffer = 1,},
85 .callback = &usb_request_callback,
86 .usb_mode = USB_MODE_DUAL, /* both modes */
87 },
88
89 /* This transfer is used for generic clear stall only */
90
91 [1] = {
92 .type = UE_CONTROL,
93 .endpoint = 0x00, /* Control pipe */
94 .direction = UE_DIR_ANY,
95 .bufsize = sizeof(struct usb_device_request),
96 .callback = &usb_do_clear_stall_callback,
97 .timeout = 1000, /* 1 second */
98 .interval = 50, /* 50ms */
99 .usb_mode = USB_MODE_HOST,
100 },
101};
102
103/* function prototypes */
104
105static void usbd_update_max_frame_size(struct usb_xfer *);
106static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
107static void usbd_control_transfer_init(struct usb_xfer *);
108static int usbd_setup_ctrl_transfer(struct usb_xfer *);
109static void usb_callback_proc(struct usb_proc_msg *);
110static void usbd_callback_ss_done_defer(struct usb_xfer *);
111static void usbd_callback_wrapper(struct usb_xfer_queue *);
112static void usbd_transfer_start_cb(void *);
113static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
114static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
115 uint8_t type, enum usb_dev_speed speed);
116
117/*------------------------------------------------------------------------*
118 * usb_request_callback
119 *------------------------------------------------------------------------*/
120static void
121usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
122{
123 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
124 usb_handle_request_callback(xfer, error);
125 else
126 usbd_do_request_callback(xfer, error);
127}
128
129/*------------------------------------------------------------------------*
130 * usbd_update_max_frame_size
131 *
132 * This function updates the maximum frame size, hence high speed USB
133 * can transfer multiple consecutive packets.
134 *------------------------------------------------------------------------*/
135static void
136usbd_update_max_frame_size(struct usb_xfer *xfer)
137{
138 /* compute maximum frame size */
139 /* this computation should not overflow 16-bit */
140 /* max = 15 * 1024 */
141
142 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
143}
144
145/*------------------------------------------------------------------------*
146 * usbd_get_dma_delay
147 *
148 * The following function is called when we need to
149 * synchronize with DMA hardware.
150 *
151 * Returns:
152 * 0: no DMA delay required
153 * Else: milliseconds of DMA delay
154 *------------------------------------------------------------------------*/
155usb_timeout_t
156usbd_get_dma_delay(struct usb_device *udev)
157{
158 struct usb_bus_methods *mtod;
159 uint32_t temp;
160
161 mtod = udev->bus->methods;
162 temp = 0;
163
164 if (mtod->get_dma_delay) {
165 (mtod->get_dma_delay) (udev, &temp);
166 /*
167 * Round up and convert to milliseconds. Note that we use
168 * 1024 milliseconds per second. to save a division.
169 */
170 temp += 0x3FF;
171 temp /= 0x400;
172 }
173 return (temp);
174}
175
176/*------------------------------------------------------------------------*
177 * usbd_transfer_setup_sub_malloc
178 *
179 * This function will allocate one or more DMA'able memory chunks
180 * according to "size", "align" and "count" arguments. "ppc" is
181 * pointed to a linear array of USB page caches afterwards.
182 *
183 * Returns:
184 * 0: Success
185 * Else: Failure
186 *------------------------------------------------------------------------*/
187#if USB_HAVE_BUSDMA
188uint8_t
189usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
190 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
191 usb_size_t count)
192{
193 struct usb_page_cache *pc;
194 struct usb_page *pg;
195 void *buf;
196 usb_size_t n_dma_pc;
197 usb_size_t n_obj;
198 usb_size_t x;
199 usb_size_t y;
200 usb_size_t r;
201 usb_size_t z;
202
203 USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n",
204 align));
205 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
206
207 if (count == 0) {
208 return (0); /* nothing to allocate */
209 }
210 /*
211 * Make sure that the size is aligned properly.
212 */
213 size = -((-size) & (-align));
214
215 /*
216 * Try multi-allocation chunks to reduce the number of DMA
217 * allocations, hence DMA allocations are slow.
218 */
219 if (size >= PAGE_SIZE) {
220 n_dma_pc = count;
221 n_obj = 1;
222 } else {
223 /* compute number of objects per page */
224 n_obj = (PAGE_SIZE / size);
225 /*
226 * Compute number of DMA chunks, rounded up
227 * to nearest one:
228 */
229 n_dma_pc = ((count + n_obj - 1) / n_obj);
230 }
231
232 if (parm->buf == NULL) {
233 /* for the future */
234 parm->dma_page_ptr += n_dma_pc;
235 parm->dma_page_cache_ptr += n_dma_pc;
236 parm->dma_page_ptr += count;
237 parm->xfer_page_cache_ptr += count;
238 return (0);
239 }
240 for (x = 0; x != n_dma_pc; x++) {
241 /* need to initialize the page cache */
242 parm->dma_page_cache_ptr[x].tag_parent =
243 &parm->curr_xfer->xroot->dma_parent_tag;
244 }
245 for (x = 0; x != count; x++) {
246 /* need to initialize the page cache */
247 parm->xfer_page_cache_ptr[x].tag_parent =
248 &parm->curr_xfer->xroot->dma_parent_tag;
249 }
250
251 if (ppc) {
252 *ppc = parm->xfer_page_cache_ptr;
253 }
254 r = count; /* set remainder count */
255 z = n_obj * size; /* set allocation size */
256 pc = parm->xfer_page_cache_ptr;
257 pg = parm->dma_page_ptr;
258
259 for (x = 0; x != n_dma_pc; x++) {
260
261 if (r < n_obj) {
262 /* compute last remainder */
263 z = r * size;
264 n_obj = r;
265 }
266 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
267 pg, z, align)) {
268 return (1); /* failure */
269 }
270 /* Set beginning of current buffer */
271 buf = parm->dma_page_cache_ptr->buffer;
272 /* Make room for one DMA page cache and one page */
273 parm->dma_page_cache_ptr++;
274 pg++;
275
276 for (y = 0; (y != n_obj); y++, r--, pc++, pg++) {
277
278 /* Load sub-chunk into DMA */
279 if (usb_pc_dmamap_create(pc, size)) {
280 return (1); /* failure */
281 }
282 pc->buffer = USB_ADD_BYTES(buf, y * size);
283 pc->page_start = pg;
284
285 mtx_lock(pc->tag_parent->mtx);
286 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
287 mtx_unlock(pc->tag_parent->mtx);
288 return (1); /* failure */
289 }
290 mtx_unlock(pc->tag_parent->mtx);
291 }
292 }
293
294 parm->xfer_page_cache_ptr = pc;
295 parm->dma_page_ptr = pg;
296 return (0);
297}
298#endif
299
300/*------------------------------------------------------------------------*
301 * usbd_transfer_setup_sub - transfer setup subroutine
302 *
303 * This function must be called from the "xfer_setup" callback of the
304 * USB Host or Device controller driver when setting up an USB
305 * transfer. This function will setup correct packet sizes, buffer
306 * sizes, flags and more, that are stored in the "usb_xfer"
307 * structure.
308 *------------------------------------------------------------------------*/
309void
310usbd_transfer_setup_sub(struct usb_setup_params *parm)
311{
312 enum {
313 REQ_SIZE = 8,
314 MIN_PKT = 8,
315 };
316 struct usb_xfer *xfer = parm->curr_xfer;
317 const struct usb_config *setup = parm->curr_setup;
318 struct usb_endpoint_ss_comp_descriptor *ecomp;
319 struct usb_endpoint_descriptor *edesc;
320 struct usb_std_packet_size std_size;
321 usb_frcount_t n_frlengths;
322 usb_frcount_t n_frbuffers;
323 usb_frcount_t x;
324 uint8_t type;
325 uint8_t zmps;
326
327 /*
328 * Sanity check. The following parameters must be initialized before
329 * calling this function.
330 */
331 if ((parm->hc_max_packet_size == 0) ||
332 (parm->hc_max_packet_count == 0) ||
333 (parm->hc_max_frame_size == 0)) {
334 parm->err = USB_ERR_INVAL;
335 goto done;
336 }
337 edesc = xfer->endpoint->edesc;
338 ecomp = xfer->endpoint->ecomp;
339
340 type = (edesc->bmAttributes & UE_XFERTYPE);
341
342 xfer->flags = setup->flags;
343 xfer->nframes = setup->frames;
344 xfer->timeout = setup->timeout;
345 xfer->callback = setup->callback;
346 xfer->interval = setup->interval;
347 xfer->endpointno = edesc->bEndpointAddress;
348 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
349 xfer->max_packet_count = 1;
350 /* make a shadow copy: */
351 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
352
353 parm->bufsize = setup->bufsize;
354
355 switch (parm->speed) {
356 case USB_SPEED_HIGH:
357 switch (type) {
358 case UE_ISOCHRONOUS:
359 case UE_INTERRUPT:
360 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
361
362 /* check for invalid max packet count */
363 if (xfer->max_packet_count > 3)
364 xfer->max_packet_count = 3;
365 break;
366 default:
367 break;
368 }
369 xfer->max_packet_size &= 0x7FF;
370 break;
371 case USB_SPEED_SUPER:
372 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
373
374 if (ecomp != NULL)
375 xfer->max_packet_count += ecomp->bMaxBurst;
376
377 if ((xfer->max_packet_count == 0) ||
378 (xfer->max_packet_count > 16))
379 xfer->max_packet_count = 16;
380
381 switch (type) {
382 case UE_CONTROL:
383 xfer->max_packet_count = 1;
384 break;
385 case UE_ISOCHRONOUS:
386 if (ecomp != NULL) {
387 uint8_t mult;
388
389 mult = (ecomp->bmAttributes & 3) + 1;
390 if (mult > 3)
391 mult = 3;
392
393 xfer->max_packet_count *= mult;
394 }
395 break;
396 default:
397 break;
398 }
399 xfer->max_packet_size &= 0x7FF;
400 break;
401 default:
402 break;
403 }
404 /* range check "max_packet_count" */
405
406 if (xfer->max_packet_count > parm->hc_max_packet_count) {
407 xfer->max_packet_count = parm->hc_max_packet_count;
408 }
409 /* filter "wMaxPacketSize" according to HC capabilities */
410
411 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
412 (xfer->max_packet_size == 0)) {
413 xfer->max_packet_size = parm->hc_max_packet_size;
414 }
415 /* filter "wMaxPacketSize" according to standard sizes */
416
417 usbd_get_std_packet_size(&std_size, type, parm->speed);
418
419 if (std_size.range.min || std_size.range.max) {
420
421 if (xfer->max_packet_size < std_size.range.min) {
422 xfer->max_packet_size = std_size.range.min;
423 }
424 if (xfer->max_packet_size > std_size.range.max) {
425 xfer->max_packet_size = std_size.range.max;
426 }
427 } else {
428
429 if (xfer->max_packet_size >= std_size.fixed[3]) {
430 xfer->max_packet_size = std_size.fixed[3];
431 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
432 xfer->max_packet_size = std_size.fixed[2];
433 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
434 xfer->max_packet_size = std_size.fixed[1];
435 } else {
436 /* only one possibility left */
437 xfer->max_packet_size = std_size.fixed[0];
438 }
439 }
440
441 /* compute "max_frame_size" */
442
443 usbd_update_max_frame_size(xfer);
444
445 /* check interrupt interval and transfer pre-delay */
446
447 if (type == UE_ISOCHRONOUS) {
448
449 uint16_t frame_limit;
450
451 xfer->interval = 0; /* not used, must be zero */
452 xfer->flags_int.isochronous_xfr = 1; /* set flag */
453
454 if (xfer->timeout == 0) {
455 /*
456 * set a default timeout in
457 * case something goes wrong!
458 */
459 xfer->timeout = 1000 / 4;
460 }
461 switch (parm->speed) {
462 case USB_SPEED_LOW:
463 case USB_SPEED_FULL:
464 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
465 xfer->fps_shift = 0;
466 break;
467 default:
468 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
469 xfer->fps_shift = edesc->bInterval;
470 if (xfer->fps_shift > 0)
471 xfer->fps_shift--;
472 if (xfer->fps_shift > 3)
473 xfer->fps_shift = 3;
474 break;
475 }
476
477 if (xfer->nframes > frame_limit) {
478 /*
479 * this is not going to work
480 * cross hardware
481 */
482 parm->err = USB_ERR_INVAL;
483 goto done;
484 }
485 if (xfer->nframes == 0) {
486 /*
487 * this is not a valid value
488 */
489 parm->err = USB_ERR_ZERO_NFRAMES;
490 goto done;
491 }
492 } else {
493
494 /*
495 * If a value is specified use that else check the
496 * endpoint descriptor!
497 */
498 if (type == UE_INTERRUPT) {
499
500 uint32_t temp;
501
502 if (xfer->interval == 0) {
503
504 xfer->interval = edesc->bInterval;
505
506 switch (parm->speed) {
507 case USB_SPEED_LOW:
508 case USB_SPEED_FULL:
509 break;
510 default:
511 /* 125us -> 1ms */
512 if (xfer->interval < 4)
513 xfer->interval = 1;
514 else if (xfer->interval > 16)
515 xfer->interval = (1 << (16 - 4));
516 else
517 xfer->interval =
518 (1 << (xfer->interval - 4));
519 break;
520 }
521 }
522
523 if (xfer->interval == 0) {
524 /*
525 * One millisecond is the smallest
526 * interval we support:
527 */
528 xfer->interval = 1;
529 }
530
531 xfer->fps_shift = 0;
532 temp = 1;
533
534 while ((temp != 0) && (temp < xfer->interval)) {
535 xfer->fps_shift++;
536 temp *= 2;
537 }
538
539 switch (parm->speed) {
540 case USB_SPEED_LOW:
541 case USB_SPEED_FULL:
542 break;
543 default:
544 xfer->fps_shift += 3;
545 break;
546 }
547 }
548 }
549
550 /*
551 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
552 * to be equal to zero when setting up USB transfers, hence
553 * this leads to alot of extra code in the USB kernel.
554 */
555
556 if ((xfer->max_frame_size == 0) ||
557 (xfer->max_packet_size == 0)) {
558
559 zmps = 1;
560
561 if ((parm->bufsize <= MIN_PKT) &&
562 (type != UE_CONTROL) &&
563 (type != UE_BULK)) {
564
565 /* workaround */
566 xfer->max_packet_size = MIN_PKT;
567 xfer->max_packet_count = 1;
568 parm->bufsize = 0; /* automatic setup length */
569 usbd_update_max_frame_size(xfer);
570
571 } else {
572 parm->err = USB_ERR_ZERO_MAXP;
573 goto done;
574 }
575
576 } else {
577 zmps = 0;
578 }
579
580 /*
581 * check if we should setup a default
582 * length:
583 */
584
585 if (parm->bufsize == 0) {
586
587 parm->bufsize = xfer->max_frame_size;
588
589 if (type == UE_ISOCHRONOUS) {
590 parm->bufsize *= xfer->nframes;
591 }
592 }
593 /*
594 * check if we are about to setup a proxy
595 * type of buffer:
596 */
597
598 if (xfer->flags.proxy_buffer) {
599
600 /* round bufsize up */
601
602 parm->bufsize += (xfer->max_frame_size - 1);
603
604 if (parm->bufsize < xfer->max_frame_size) {
605 /* length wrapped around */
606 parm->err = USB_ERR_INVAL;
607 goto done;
608 }
609 /* subtract remainder */
610
611 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
612
613 /* add length of USB device request structure, if any */
614
615 if (type == UE_CONTROL) {
616 parm->bufsize += REQ_SIZE; /* SETUP message */
617 }
618 }
619 xfer->max_data_length = parm->bufsize;
620
621 /* Setup "n_frlengths" and "n_frbuffers" */
622
623 if (type == UE_ISOCHRONOUS) {
624 n_frlengths = xfer->nframes;
625 n_frbuffers = 1;
626 } else {
627
628 if (type == UE_CONTROL) {
629 xfer->flags_int.control_xfr = 1;
630 if (xfer->nframes == 0) {
631 if (parm->bufsize <= REQ_SIZE) {
632 /*
633 * there will never be any data
634 * stage
635 */
636 xfer->nframes = 1;
637 } else {
638 xfer->nframes = 2;
639 }
640 }
641 } else {
642 if (xfer->nframes == 0) {
643 xfer->nframes = 1;
644 }
645 }
646
647 n_frlengths = xfer->nframes;
648 n_frbuffers = xfer->nframes;
649 }
650
651 /*
652 * check if we have room for the
653 * USB device request structure:
654 */
655
656 if (type == UE_CONTROL) {
657
658 if (xfer->max_data_length < REQ_SIZE) {
659 /* length wrapped around or too small bufsize */
660 parm->err = USB_ERR_INVAL;
661 goto done;
662 }
663 xfer->max_data_length -= REQ_SIZE;
664 }
665 /* setup "frlengths" */
666 xfer->frlengths = parm->xfer_length_ptr;
667 parm->xfer_length_ptr += n_frlengths;
668
669 /* setup "frbuffers" */
670 xfer->frbuffers = parm->xfer_page_cache_ptr;
671 parm->xfer_page_cache_ptr += n_frbuffers;
672
673 /* initialize max frame count */
674 xfer->max_frame_count = xfer->nframes;
675
676 /*
677 * check if we need to setup
678 * a local buffer:
679 */
680
681 if (!xfer->flags.ext_buffer) {
682
683 /* align data */
684 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
685
686 if (parm->buf) {
687
688 xfer->local_buffer =
689 USB_ADD_BYTES(parm->buf, parm->size[0]);
690
691 usbd_xfer_set_frame_offset(xfer, 0, 0);
692
693 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
694 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
695 }
696 }
697 parm->size[0] += parm->bufsize;
698
699 /* align data again */
700 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
701 }
702 /*
703 * Compute maximum buffer size
704 */
705
706 if (parm->bufsize_max < parm->bufsize) {
707 parm->bufsize_max = parm->bufsize;
708 }
709#if USB_HAVE_BUSDMA
710 if (xfer->flags_int.bdma_enable) {
711 /*
712 * Setup "dma_page_ptr".
713 *
714 * Proof for formula below:
715 *
716 * Assume there are three USB frames having length "a", "b" and
717 * "c". These USB frames will at maximum need "z"
718 * "usb_page" structures. "z" is given by:
719 *
720 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
721 * ((c / USB_PAGE_SIZE) + 2);
722 *
723 * Constraining "a", "b" and "c" like this:
724 *
725 * (a + b + c) <= parm->bufsize
726 *
727 * We know that:
728 *
729 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
730 *
731 * Here is the general formula:
732 */
733 xfer->dma_page_ptr = parm->dma_page_ptr;
734 parm->dma_page_ptr += (2 * n_frbuffers);
735 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
736 }
737#endif
738 if (zmps) {
739 /* correct maximum data length */
740 xfer->max_data_length = 0;
741 }
742 /* subtract USB frame remainder from "hc_max_frame_size" */
743
744 xfer->max_hc_frame_size =
745 (parm->hc_max_frame_size -
746 (parm->hc_max_frame_size % xfer->max_frame_size));
747
748 if (xfer->max_hc_frame_size == 0) {
749 parm->err = USB_ERR_INVAL;
750 goto done;
751 }
752
753 /* initialize frame buffers */
754
755 if (parm->buf) {
756 for (x = 0; x != n_frbuffers; x++) {
757 xfer->frbuffers[x].tag_parent =
758 &xfer->xroot->dma_parent_tag;
759#if USB_HAVE_BUSDMA
760 if (xfer->flags_int.bdma_enable &&
761 (parm->bufsize_max > 0)) {
762
763 if (usb_pc_dmamap_create(
764 xfer->frbuffers + x,
765 parm->bufsize_max)) {
766 parm->err = USB_ERR_NOMEM;
767 goto done;
768 }
769 }
770#endif
771 }
772 }
773done:
774 if (parm->err) {
775 /*
776 * Set some dummy values so that we avoid division by zero:
777 */
778 xfer->max_hc_frame_size = 1;
779 xfer->max_frame_size = 1;
780 xfer->max_packet_size = 1;
781 xfer->max_data_length = 0;
782 xfer->nframes = 0;
783 xfer->max_frame_count = 0;
784 }
785}
786
787/*------------------------------------------------------------------------*
788 * usbd_transfer_setup - setup an array of USB transfers
789 *
790 * NOTE: You must always call "usbd_transfer_unsetup" after calling
791 * "usbd_transfer_setup" if success was returned.
792 *
793 * The idea is that the USB device driver should pre-allocate all its
794 * transfers by one call to this function.
795 *
796 * Return values:
797 * 0: Success
798 * Else: Failure
799 *------------------------------------------------------------------------*/
800usb_error_t
801usbd_transfer_setup(struct usb_device *udev,
802 const uint8_t *ifaces, struct usb_xfer **ppxfer,
803 const struct usb_config *setup_start, uint16_t n_setup,
804 void *priv_sc, struct mtx *xfer_mtx)
805{
806 struct usb_xfer dummy;
807 struct usb_setup_params parm;
808 const struct usb_config *setup_end = setup_start + n_setup;
809 const struct usb_config *setup;
810 struct usb_endpoint *ep;
811 struct usb_xfer_root *info;
812 struct usb_xfer *xfer;
813 void *buf = NULL;
814 uint16_t n;
815 uint16_t refcount;
816
817 parm.err = 0;
818 refcount = 0;
819 info = NULL;
820
821 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
822 "usbd_transfer_setup can sleep!");
823
824 /* do some checking first */
825
826 if (n_setup == 0) {
827 DPRINTFN(6, "setup array has zero length!\n");
828 return (USB_ERR_INVAL);
829 }
830 if (ifaces == 0) {
831 DPRINTFN(6, "ifaces array is NULL!\n");
832 return (USB_ERR_INVAL);
833 }
834 if (xfer_mtx == NULL) {
835 DPRINTFN(6, "using global lock\n");
836 xfer_mtx = &Giant;
837 }
838 /* sanity checks */
839 for (setup = setup_start, n = 0;
840 setup != setup_end; setup++, n++) {
841 if (setup->bufsize == (usb_frlength_t)-1) {
842 parm.err = USB_ERR_BAD_BUFSIZE;
843 DPRINTF("invalid bufsize\n");
844 }
845 if (setup->callback == NULL) {
846 parm.err = USB_ERR_NO_CALLBACK;
847 DPRINTF("no callback\n");
848 }
849 ppxfer[n] = NULL;
850 }
851
852 if (parm.err) {
853 goto done;
854 }
855 bzero(&parm, sizeof(parm));
856
857 parm.udev = udev;
858 parm.speed = usbd_get_speed(udev);
859 parm.hc_max_packet_count = 1;
860
861 if (parm.speed >= USB_SPEED_MAX) {
862 parm.err = USB_ERR_INVAL;
863 goto done;
864 }
865 /* setup all transfers */
866
867 while (1) {
868
869 if (buf) {
870 /*
871 * Initialize the "usb_xfer_root" structure,
872 * which is common for all our USB transfers.
873 */
874 info = USB_ADD_BYTES(buf, 0);
875
876 info->memory_base = buf;
877 info->memory_size = parm.size[0];
878
879#if USB_HAVE_BUSDMA
880 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]);
881 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]);
882#endif
883 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]);
884 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]);
885
886 cv_init(&info->cv_drain, "WDRAIN");
887
888 info->xfer_mtx = xfer_mtx;
889#if USB_HAVE_BUSDMA
890 usb_dma_tag_setup(&info->dma_parent_tag,
891 parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag,
892 xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max);
893#endif
894
895 info->bus = udev->bus;
896 info->udev = udev;
897
898 TAILQ_INIT(&info->done_q.head);
899 info->done_q.command = &usbd_callback_wrapper;
900#if USB_HAVE_BUSDMA
901 TAILQ_INIT(&info->dma_q.head);
902 info->dma_q.command = &usb_bdma_work_loop;
903#endif
904 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
905 info->done_m[0].xroot = info;
906 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
907 info->done_m[1].xroot = info;
908
909 /*
910 * In device side mode control endpoint
911 * requests need to run from a separate
912 * context, else there is a chance of
913 * deadlock!
914 */
915 if (setup_start == usb_control_ep_cfg)
916 info->done_p =
917 &udev->bus->control_xfer_proc;
918 else if (xfer_mtx == &Giant)
919 info->done_p =
920 &udev->bus->giant_callback_proc;
921 else
922 info->done_p =
923 &udev->bus->non_giant_callback_proc;
924 }
925 /* reset sizes */
926
927 parm.size[0] = 0;
928 parm.buf = buf;
929 parm.size[0] += sizeof(info[0]);
930
931 for (setup = setup_start, n = 0;
932 setup != setup_end; setup++, n++) {
933
934 /* skip USB transfers without callbacks: */
935 if (setup->callback == NULL) {
936 continue;
937 }
938 /* see if there is a matching endpoint */
939 ep = usbd_get_endpoint(udev,
940 ifaces[setup->if_index], setup);
941
942 if ((ep == NULL) || (ep->methods == NULL)) {
943 if (setup->flags.no_pipe_ok)
944 continue;
945 if ((setup->usb_mode != USB_MODE_DUAL) &&
946 (setup->usb_mode != udev->flags.usb_mode))
947 continue;
948 parm.err = USB_ERR_NO_PIPE;
949 goto done;
950 }
951
952 /* align data properly */
953 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
954
955 /* store current setup pointer */
956 parm.curr_setup = setup;
957
958 if (buf) {
959 /*
960 * Common initialization of the
961 * "usb_xfer" structure.
962 */
963 xfer = USB_ADD_BYTES(buf, parm.size[0]);
964 xfer->address = udev->address;
965 xfer->priv_sc = priv_sc;
966 xfer->xroot = info;
967
968 usb_callout_init_mtx(&xfer->timeout_handle,
969 &udev->bus->bus_mtx, 0);
970 } else {
971 /*
972 * Setup a dummy xfer, hence we are
973 * writing to the "usb_xfer"
974 * structure pointed to by "xfer"
975 * before we have allocated any
976 * memory:
977 */
978 xfer = &dummy;
979 bzero(&dummy, sizeof(dummy));
980 refcount++;
981 }
982
983 /* set transfer endpoint pointer */
984 xfer->endpoint = ep;
985
986 parm.size[0] += sizeof(xfer[0]);
987 parm.methods = xfer->endpoint->methods;
988 parm.curr_xfer = xfer;
989
990 /*
991 * Call the Host or Device controller transfer
992 * setup routine:
993 */
994 (udev->bus->methods->xfer_setup) (&parm);
995
996 /* check for error */
997 if (parm.err)
998 goto done;
999
1000 if (buf) {
1001 /*
1002 * Increment the endpoint refcount. This
1003 * basically prevents setting a new
1004 * configuration and alternate setting
1005 * when USB transfers are in use on
1006 * the given interface. Search the USB
1007 * code for "endpoint->refcount_alloc" if you
1008 * want more information.
1009 */
1010 USB_BUS_LOCK(info->bus);
1011 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1012 parm.err = USB_ERR_INVAL;
1013
1014 xfer->endpoint->refcount_alloc++;
1015
1016 if (xfer->endpoint->refcount_alloc == 0)
1017 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1018 USB_BUS_UNLOCK(info->bus);
1019
1020 /*
1021 * Whenever we set ppxfer[] then we
1022 * also need to increment the
1023 * "setup_refcount":
1024 */
1025 info->setup_refcount++;
1026
1027 /*
1028 * Transfer is successfully setup and
1029 * can be used:
1030 */
1031 ppxfer[n] = xfer;
1032 }
1033
1034 /* check for error */
1035 if (parm.err)
1036 goto done;
1037 }
1038
1039 if (buf || parm.err) {
1040 goto done;
1041 }
1042 if (refcount == 0) {
1043 /* no transfers - nothing to do ! */
1044 goto done;
1045 }
1046 /* align data properly */
1047 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1048
1049 /* store offset temporarily */
1050 parm.size[1] = parm.size[0];
1051
1052 /*
1053 * The number of DMA tags required depends on
1054 * the number of endpoints. The current estimate
1055 * for maximum number of DMA tags per endpoint
1056 * is two.
1057 */
1058 parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX);
1059
1060 /*
1061 * DMA tags for QH, TD, Data and more.
1062 */
1063 parm.dma_tag_max += 8;
1064
1065 parm.dma_tag_p += parm.dma_tag_max;
1066
1067 parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
1068 ((uint8_t *)0);
1069
1070 /* align data properly */
1071 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1072
1073 /* store offset temporarily */
1074 parm.size[3] = parm.size[0];
1075
1076 parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
1077 ((uint8_t *)0);
1078
1079 /* align data properly */
1080 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1081
1082 /* store offset temporarily */
1083 parm.size[4] = parm.size[0];
1084
1085 parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
1086 ((uint8_t *)0);
1087
1088 /* store end offset temporarily */
1089 parm.size[5] = parm.size[0];
1090
1091 parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
1092 ((uint8_t *)0);
1093
1094 /* store end offset temporarily */
1095
1096 parm.size[2] = parm.size[0];
1097
1098 /* align data properly */
1099 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1100
1101 parm.size[6] = parm.size[0];
1102
1103 parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
1104 ((uint8_t *)0);
1105
1106 /* align data properly */
1107 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1108
1109 /* allocate zeroed memory */
1110 buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
1111
1112 if (buf == NULL) {
1113 parm.err = USB_ERR_NOMEM;
1114 DPRINTFN(0, "cannot allocate memory block for "
1115 "configuration (%d bytes)\n",
1116 parm.size[0]);
1117 goto done;
1118 }
1119 parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]);
1120 parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]);
1121 parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]);
1122 parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]);
1123 parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]);
1124 }
1125
1126done:
1127 if (buf) {
1128 if (info->setup_refcount == 0) {
1129 /*
1130 * "usbd_transfer_unsetup_sub" will unlock
1131 * the bus mutex before returning !
1132 */
1133 USB_BUS_LOCK(info->bus);
1134
1135 /* something went wrong */
1136 usbd_transfer_unsetup_sub(info, 0);
1137 }
1138 }
1139 if (parm.err) {
1140 usbd_transfer_unsetup(ppxfer, n_setup);
1141 }
1142 return (parm.err);
1143}
1144
1145/*------------------------------------------------------------------------*
1146 * usbd_transfer_unsetup_sub - factored out code
1147 *------------------------------------------------------------------------*/
1148static void
1149usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1150{
1151 struct usb_page_cache *pc;
1152
1153 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1154
1155 /* wait for any outstanding DMA operations */
1156
1157 if (needs_delay) {
1158 usb_timeout_t temp;
1159 temp = usbd_get_dma_delay(info->udev);
1160 if (temp != 0) {
1161 usb_pause_mtx(&info->bus->bus_mtx,
1162 USB_MS_TO_TICKS(temp));
1163 }
1164 }
1165
1166 /* make sure that our done messages are not queued anywhere */
1167 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1168
1169 USB_BUS_UNLOCK(info->bus);
1170
1171#if USB_HAVE_BUSDMA
1172 /* free DMA'able memory, if any */
1173 pc = info->dma_page_cache_start;
1174 while (pc != info->dma_page_cache_end) {
1175 usb_pc_free_mem(pc);
1176 pc++;
1177 }
1178
1179 /* free DMA maps in all "xfer->frbuffers" */
1180 pc = info->xfer_page_cache_start;
1181 while (pc != info->xfer_page_cache_end) {
1182 usb_pc_dmamap_destroy(pc);
1183 pc++;
1184 }
1185
1186 /* free all DMA tags */
1187 usb_dma_tag_unsetup(&info->dma_parent_tag);
1188#endif
1189
1190 cv_destroy(&info->cv_drain);
1191
1192 /*
1193 * free the "memory_base" last, hence the "info" structure is
1194 * contained within the "memory_base"!
1195 */
1196 free(info->memory_base, M_USB);
1197}
1198
1199/*------------------------------------------------------------------------*
1200 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1201 *
1202 * NOTE: All USB transfers in progress will get called back passing
1203 * the error code "USB_ERR_CANCELLED" before this function
1204 * returns.
1205 *------------------------------------------------------------------------*/
1206void
1207usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1208{
1209 struct usb_xfer *xfer;
1210 struct usb_xfer_root *info;
1211 uint8_t needs_delay = 0;
1212
1213 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1214 "usbd_transfer_unsetup can sleep!");
1215
1216 while (n_setup--) {
1217 xfer = pxfer[n_setup];
1218
1219 if (xfer == NULL)
1220 continue;
1221
1222 info = xfer->xroot;
1223
1224 USB_XFER_LOCK(xfer);
1225 USB_BUS_LOCK(info->bus);
1226
1227 /*
1228 * HINT: when you start/stop a transfer, it might be a
1229 * good idea to directly use the "pxfer[]" structure:
1230 *
1231 * usbd_transfer_start(sc->pxfer[0]);
1232 * usbd_transfer_stop(sc->pxfer[0]);
1233 *
1234 * That way, if your code has many parts that will not
1235 * stop running under the same lock, in other words
1236 * "xfer_mtx", the usbd_transfer_start and
1237 * usbd_transfer_stop functions will simply return
1238 * when they detect a NULL pointer argument.
1239 *
1240 * To avoid any races we clear the "pxfer[]" pointer
1241 * while holding the private mutex of the driver:
1242 */
1243 pxfer[n_setup] = NULL;
1244
1245 USB_BUS_UNLOCK(info->bus);
1246 USB_XFER_UNLOCK(xfer);
1247
1248 usbd_transfer_drain(xfer);
1249
1250#if USB_HAVE_BUSDMA
1251 if (xfer->flags_int.bdma_enable)
1252 needs_delay = 1;
1253#endif
1254 /*
1255 * NOTE: default endpoint does not have an
1256 * interface, even if endpoint->iface_index == 0
1257 */
1258 USB_BUS_LOCK(info->bus);
1259 xfer->endpoint->refcount_alloc--;
1260 USB_BUS_UNLOCK(info->bus);
1261
1262 usb_callout_drain(&xfer->timeout_handle);
1263
1264 USB_BUS_LOCK(info->bus);
1265
1266 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1267 "reference count\n"));
1268
1269 info->setup_refcount--;
1270
1271 if (info->setup_refcount == 0) {
1272 usbd_transfer_unsetup_sub(info,
1273 needs_delay);
1274 } else {
1275 USB_BUS_UNLOCK(info->bus);
1276 }
1277 }
1278}
1279
1280/*------------------------------------------------------------------------*
1281 * usbd_control_transfer_init - factored out code
1282 *
1283 * In USB Device Mode we have to wait for the SETUP packet which
1284 * containst the "struct usb_device_request" structure, before we can
1285 * transfer any data. In USB Host Mode we already have the SETUP
1286 * packet at the moment the USB transfer is started. This leads us to
1287 * having to setup the USB transfer at two different places in
1288 * time. This function just contains factored out control transfer
1289 * initialisation code, so that we don't duplicate the code.
1290 *------------------------------------------------------------------------*/
1291static void
1292usbd_control_transfer_init(struct usb_xfer *xfer)
1293{
1294 struct usb_device_request req;
1295
1296 /* copy out the USB request header */
1297
1298 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1299
1300 /* setup remainder */
1301
1302 xfer->flags_int.control_rem = UGETW(req.wLength);
1303
1304 /* copy direction to endpoint variable */
1305
1306 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1307 xfer->endpointno |=
1308 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1309}
1310
1311/*------------------------------------------------------------------------*
1312 * usbd_setup_ctrl_transfer
1313 *
1314 * This function handles initialisation of control transfers. Control
1315 * transfers are special in that regard that they can both transmit
1316 * and receive data.
1317 *
1318 * Return values:
1319 * 0: Success
1320 * Else: Failure
1321 *------------------------------------------------------------------------*/
1322static int
1323usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1324{
1325 usb_frlength_t len;
1326
1327 /* Check for control endpoint stall */
1328 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1329 /* the control transfer is no longer active */
1330 xfer->flags_int.control_stall = 1;
1331 xfer->flags_int.control_act = 0;
1332 } else {
1333 /* don't stall control transfer by default */
1334 xfer->flags_int.control_stall = 0;
1335 }
1336
1337 /* Check for invalid number of frames */
1338 if (xfer->nframes > 2) {
1339 /*
1340 * If you need to split a control transfer, you
1341 * have to do one part at a time. Only with
1342 * non-control transfers you can do multiple
1343 * parts a time.
1344 */
1345 DPRINTFN(0, "Too many frames: %u\n",
1346 (unsigned int)xfer->nframes);
1347 goto error;
1348 }
1349
1350 /*
1351 * Check if there is a control
1352 * transfer in progress:
1353 */
1354 if (xfer->flags_int.control_act) {
1355
1356 if (xfer->flags_int.control_hdr) {
1357
1358 /* clear send header flag */
1359
1360 xfer->flags_int.control_hdr = 0;
1361
1362 /* setup control transfer */
1363 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1364 usbd_control_transfer_init(xfer);
1365 }
1366 }
1367 /* get data length */
1368
1369 len = xfer->sumlen;
1370
1371 } else {
1372
1373 /* the size of the SETUP structure is hardcoded ! */
1374
1375 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1376 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1377 xfer->frlengths[0], sizeof(struct
1378 usb_device_request));
1379 goto error;
1380 }
1381 /* check USB mode */
1382 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1383
1384 /* check number of frames */
1385 if (xfer->nframes != 1) {
1386 /*
1387 * We need to receive the setup
1388 * message first so that we know the
1389 * data direction!
1390 */
1391 DPRINTF("Misconfigured transfer\n");
1392 goto error;
1393 }
1394 /*
1395 * Set a dummy "control_rem" value. This
1396 * variable will be overwritten later by a
1397 * call to "usbd_control_transfer_init()" !
1398 */
1399 xfer->flags_int.control_rem = 0xFFFF;
1400 } else {
1401
1402 /* setup "endpoint" and "control_rem" */
1403
1404 usbd_control_transfer_init(xfer);
1405 }
1406
1407 /* set transfer-header flag */
1408
1409 xfer->flags_int.control_hdr = 1;
1410
1411 /* get data length */
1412
1413 len = (xfer->sumlen - sizeof(struct usb_device_request));
1414 }
1415
1416 /* check if there is a length mismatch */
1417
1418 if (len > xfer->flags_int.control_rem) {
1419 DPRINTFN(0, "Length (%d) greater than "
1420 "remaining length (%d)\n", len,
1421 xfer->flags_int.control_rem);
1422 goto error;
1423 }
1424 /* check if we are doing a short transfer */
1425
1426 if (xfer->flags.force_short_xfer) {
1427 xfer->flags_int.control_rem = 0;
1428 } else {
1429 if ((len != xfer->max_data_length) &&
1430 (len != xfer->flags_int.control_rem) &&
1431 (xfer->nframes != 1)) {
1432 DPRINTFN(0, "Short control transfer without "
1433 "force_short_xfer set\n");
1434 goto error;
1435 }
1436 xfer->flags_int.control_rem -= len;
1437 }
1438
1439 /* the status part is executed when "control_act" is 0 */
1440
1441 if ((xfer->flags_int.control_rem > 0) ||
1442 (xfer->flags.manual_status)) {
1443 /* don't execute the STATUS stage yet */
1444 xfer->flags_int.control_act = 1;
1445
1446 /* sanity check */
1447 if ((!xfer->flags_int.control_hdr) &&
1448 (xfer->nframes == 1)) {
1449 /*
1450 * This is not a valid operation!
1451 */
1452 DPRINTFN(0, "Invalid parameter "
1453 "combination\n");
1454 goto error;
1455 }
1456 } else {
1457 /* time to execute the STATUS stage */
1458 xfer->flags_int.control_act = 0;
1459 }
1460 return (0); /* success */
1461
1462error:
1463 return (1); /* failure */
1464}
1465
1466/*------------------------------------------------------------------------*
1467 * usbd_transfer_submit - start USB hardware for the given transfer
1468 *
1469 * This function should only be called from the USB callback.
1470 *------------------------------------------------------------------------*/
1471void
1472usbd_transfer_submit(struct usb_xfer *xfer)
1473{
1474 struct usb_xfer_root *info;
1475 struct usb_bus *bus;
1476 usb_frcount_t x;
1477
1478 info = xfer->xroot;
1479 bus = info->bus;
1480
1481 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1482 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1483 "read" : "write");
1484
1485#ifdef USB_DEBUG
1486 if (USB_DEBUG_VAR > 0) {
1487 USB_BUS_LOCK(bus);
1488
1489 usb_dump_endpoint(xfer->endpoint);
1490
1491 USB_BUS_UNLOCK(bus);
1492 }
1493#endif
1494
1495 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1496 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1497
1498 /* Only open the USB transfer once! */
1499 if (!xfer->flags_int.open) {
1500 xfer->flags_int.open = 1;
1501
1502 DPRINTF("open\n");
1503
1504 USB_BUS_LOCK(bus);
1505 (xfer->endpoint->methods->open) (xfer);
1506 USB_BUS_UNLOCK(bus);
1507 }
1508 /* set "transferring" flag */
1509 xfer->flags_int.transferring = 1;
1510
1511#if USB_HAVE_POWERD
1512 /* increment power reference */
1513 usbd_transfer_power_ref(xfer, 1);
1514#endif
1515 /*
1516 * Check if the transfer is waiting on a queue, most
1517 * frequently the "done_q":
1518 */
1519 if (xfer->wait_queue) {
1520 USB_BUS_LOCK(bus);
1521 usbd_transfer_dequeue(xfer);
1522 USB_BUS_UNLOCK(bus);
1523 }
1524 /* clear "did_dma_delay" flag */
1525 xfer->flags_int.did_dma_delay = 0;
1526
1527 /* clear "did_close" flag */
1528 xfer->flags_int.did_close = 0;
1529
1530#if USB_HAVE_BUSDMA
1531 /* clear "bdma_setup" flag */
1532 xfer->flags_int.bdma_setup = 0;
1533#endif
1534 /* by default we cannot cancel any USB transfer immediately */
1535 xfer->flags_int.can_cancel_immed = 0;
1536
1537 /* clear lengths and frame counts by default */
1538 xfer->sumlen = 0;
1539 xfer->actlen = 0;
1540 xfer->aframes = 0;
1541
1542 /* clear any previous errors */
1543 xfer->error = 0;
1544
1545 /* Check if the device is still alive */
1546 if (info->udev->state < USB_STATE_POWERED) {
1547 USB_BUS_LOCK(bus);
1548 /*
1549 * Must return cancelled error code else
1550 * device drivers can hang.
1551 */
1552 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1553 USB_BUS_UNLOCK(bus);
1554 return;
1555 }
1556
1557 /* sanity check */
1558 if (xfer->nframes == 0) {
1559 if (xfer->flags.stall_pipe) {
1560 /*
1561 * Special case - want to stall without transferring
1562 * any data:
1563 */
1564 DPRINTF("xfer=%p nframes=0: stall "
1565 "or clear stall!\n", xfer);
1566 USB_BUS_LOCK(bus);
1567 xfer->flags_int.can_cancel_immed = 1;
1568 /* start the transfer */
1569 usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer);
1570 USB_BUS_UNLOCK(bus);
1571 return;
1572 }
1573 USB_BUS_LOCK(bus);
1574 usbd_transfer_done(xfer, USB_ERR_INVAL);
1575 USB_BUS_UNLOCK(bus);
1576 return;
1577 }
1578 /* compute total transfer length */
1579
1580 for (x = 0; x != xfer->nframes; x++) {
1581 xfer->sumlen += xfer->frlengths[x];
1582 if (xfer->sumlen < xfer->frlengths[x]) {
1583 /* length wrapped around */
1584 USB_BUS_LOCK(bus);
1585 usbd_transfer_done(xfer, USB_ERR_INVAL);
1586 USB_BUS_UNLOCK(bus);
1587 return;
1588 }
1589 }
1590
1591 /* clear some internal flags */
1592
1593 xfer->flags_int.short_xfer_ok = 0;
1594 xfer->flags_int.short_frames_ok = 0;
1595
1596 /* check if this is a control transfer */
1597
1598 if (xfer->flags_int.control_xfr) {
1599
1600 if (usbd_setup_ctrl_transfer(xfer)) {
1601 USB_BUS_LOCK(bus);
1602 usbd_transfer_done(xfer, USB_ERR_STALLED);
1603 USB_BUS_UNLOCK(bus);
1604 return;
1605 }
1606 }
1607 /*
1608 * Setup filtered version of some transfer flags,
1609 * in case of data read direction
1610 */
1611 if (USB_GET_DATA_ISREAD(xfer)) {
1612
1613 if (xfer->flags.short_frames_ok) {
1614 xfer->flags_int.short_xfer_ok = 1;
1615 xfer->flags_int.short_frames_ok = 1;
1616 } else if (xfer->flags.short_xfer_ok) {
1617 xfer->flags_int.short_xfer_ok = 1;
1618
1619 /* check for control transfer */
1620 if (xfer->flags_int.control_xfr) {
1621 /*
1622 * 1) Control transfers do not support
1623 * reception of multiple short USB
1624 * frames in host mode and device side
1625 * mode, with exception of:
1626 *
1627 * 2) Due to sometimes buggy device
1628 * side firmware we need to do a
1629 * STATUS stage in case of short
1630 * control transfers in USB host mode.
1631 * The STATUS stage then becomes the
1632 * "alt_next" to the DATA stage.
1633 */
1634 xfer->flags_int.short_frames_ok = 1;
1635 }
1636 }
1637 }
1638 /*
1639 * Check if BUS-DMA support is enabled and try to load virtual
1640 * buffers into DMA, if any:
1641 */
1642#if USB_HAVE_BUSDMA
1643 if (xfer->flags_int.bdma_enable) {
1644 /* insert the USB transfer last in the BUS-DMA queue */
1645 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1646 return;
1647 }
1648#endif
1649 /*
1650 * Enter the USB transfer into the Host Controller or
1651 * Device Controller schedule:
1652 */
1653 usbd_pipe_enter(xfer);
1654}
1655
1656/*------------------------------------------------------------------------*
1657 * usbd_pipe_enter - factored out code
1658 *------------------------------------------------------------------------*/
1659void
1660usbd_pipe_enter(struct usb_xfer *xfer)
1661{
1662 struct usb_endpoint *ep;
1663
1664 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1665
1666 USB_BUS_LOCK(xfer->xroot->bus);
1667
1668 ep = xfer->endpoint;
1669
1670 DPRINTF("enter\n");
1671
1672 /* enter the transfer */
1673 (ep->methods->enter) (xfer);
1674
1675 xfer->flags_int.can_cancel_immed = 1;
1676
1677 /* check for transfer error */
1678 if (xfer->error) {
1679 /* some error has happened */
1680 usbd_transfer_done(xfer, 0);
1681 USB_BUS_UNLOCK(xfer->xroot->bus);
1682 return;
1683 }
1684
1685 /* start the transfer */
1686 usb_command_wrapper(&ep->endpoint_q, xfer);
1687 USB_BUS_UNLOCK(xfer->xroot->bus);
1688}
1689
1690/*------------------------------------------------------------------------*
1691 * usbd_transfer_start - start an USB transfer
1692 *
1693 * NOTE: Calling this function more than one time will only
1694 * result in a single transfer start, until the USB transfer
1695 * completes.
1696 *------------------------------------------------------------------------*/
1697void
1698usbd_transfer_start(struct usb_xfer *xfer)
1699{
1700 if (xfer == NULL) {
1701 /* transfer is gone */
1702 return;
1703 }
1704 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1705
1706 /* mark the USB transfer started */
1707
1708 if (!xfer->flags_int.started) {
1709 /* lock the BUS lock to avoid races updating flags_int */
1710 USB_BUS_LOCK(xfer->xroot->bus);
1711 xfer->flags_int.started = 1;
1712 USB_BUS_UNLOCK(xfer->xroot->bus);
1713 }
1714 /* check if the USB transfer callback is already transferring */
1715
1716 if (xfer->flags_int.transferring) {
1717 return;
1718 }
1719 USB_BUS_LOCK(xfer->xroot->bus);
1720 /* call the USB transfer callback */
1721 usbd_callback_ss_done_defer(xfer);
1722 USB_BUS_UNLOCK(xfer->xroot->bus);
1723}
1724
1725/*------------------------------------------------------------------------*
1726 * usbd_transfer_stop - stop an USB transfer
1727 *
1728 * NOTE: Calling this function more than one time will only
1729 * result in a single transfer stop.
1730 * NOTE: When this function returns it is not safe to free nor
1731 * reuse any DMA buffers. See "usbd_transfer_drain()".
1732 *------------------------------------------------------------------------*/
1733void
1734usbd_transfer_stop(struct usb_xfer *xfer)
1735{
1736 struct usb_endpoint *ep;
1737
1738 if (xfer == NULL) {
1739 /* transfer is gone */
1740 return;
1741 }
1742 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1743
1744 /* check if the USB transfer was ever opened */
1745
1746 if (!xfer->flags_int.open) {
1747 if (xfer->flags_int.started) {
1748 /* nothing to do except clearing the "started" flag */
1749 /* lock the BUS lock to avoid races updating flags_int */
1750 USB_BUS_LOCK(xfer->xroot->bus);
1751 xfer->flags_int.started = 0;
1752 USB_BUS_UNLOCK(xfer->xroot->bus);
1753 }
1754 return;
1755 }
1756 /* try to stop the current USB transfer */
1757
1758 USB_BUS_LOCK(xfer->xroot->bus);
1759 /* override any previous error */
1760 xfer->error = USB_ERR_CANCELLED;
1761
1762 /*
1763 * Clear "open" and "started" when both private and USB lock
1764 * is locked so that we don't get a race updating "flags_int"
1765 */
1766 xfer->flags_int.open = 0;
1767 xfer->flags_int.started = 0;
1768
1769 /*
1770 * Check if we can cancel the USB transfer immediately.
1771 */
1772 if (xfer->flags_int.transferring) {
1773 if (xfer->flags_int.can_cancel_immed &&
1774 (!xfer->flags_int.did_close)) {
1775 DPRINTF("close\n");
1776 /*
1777 * The following will lead to an USB_ERR_CANCELLED
1778 * error code being passed to the USB callback.
1779 */
1780 (xfer->endpoint->methods->close) (xfer);
1781 /* only close once */
1782 xfer->flags_int.did_close = 1;
1783 } else {
1784 /* need to wait for the next done callback */
1785 }
1786 } else {
1787 DPRINTF("close\n");
1788
1789 /* close here and now */
1790 (xfer->endpoint->methods->close) (xfer);
1791
1792 /*
1793 * Any additional DMA delay is done by
1794 * "usbd_transfer_unsetup()".
1795 */
1796
1797 /*
1798 * Special case. Check if we need to restart a blocked
1799 * endpoint.
1800 */
1801 ep = xfer->endpoint;
1802
1803 /*
1804 * If the current USB transfer is completing we need
1805 * to start the next one:
1806 */
1807 if (ep->endpoint_q.curr == xfer) {
1808 usb_command_wrapper(&ep->endpoint_q, NULL);
1809 }
1810 }
1811
1812 USB_BUS_UNLOCK(xfer->xroot->bus);
1813}
1814
1815/*------------------------------------------------------------------------*
1816 * usbd_transfer_pending
1817 *
1818 * This function will check if an USB transfer is pending which is a
1819 * little bit complicated!
1820 * Return values:
1821 * 0: Not pending
1822 * 1: Pending: The USB transfer will receive a callback in the future.
1823 *------------------------------------------------------------------------*/
1824uint8_t
1825usbd_transfer_pending(struct usb_xfer *xfer)
1826{
1827 struct usb_xfer_root *info;
1828 struct usb_xfer_queue *pq;
1829
1830 if (xfer == NULL) {
1831 /* transfer is gone */
1832 return (0);
1833 }
1834 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1835
1836 if (xfer->flags_int.transferring) {
1837 /* trivial case */
1838 return (1);
1839 }
1840 USB_BUS_LOCK(xfer->xroot->bus);
1841 if (xfer->wait_queue) {
1842 /* we are waiting on a queue somewhere */
1843 USB_BUS_UNLOCK(xfer->xroot->bus);
1844 return (1);
1845 }
1846 info = xfer->xroot;
1847 pq = &info->done_q;
1848
1849 if (pq->curr == xfer) {
1850 /* we are currently scheduled for callback */
1851 USB_BUS_UNLOCK(xfer->xroot->bus);
1852 return (1);
1853 }
1854 /* we are not pending */
1855 USB_BUS_UNLOCK(xfer->xroot->bus);
1856 return (0);
1857}
1858
1859/*------------------------------------------------------------------------*
1860 * usbd_transfer_drain
1861 *
1862 * This function will stop the USB transfer and wait for any
1863 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1864 * are loaded into DMA can safely be freed or reused after that this
1865 * function has returned.
1866 *------------------------------------------------------------------------*/
1867void
1868usbd_transfer_drain(struct usb_xfer *xfer)
1869{
1870 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1871 "usbd_transfer_drain can sleep!");
1872
1873 if (xfer == NULL) {
1874 /* transfer is gone */
1875 return;
1876 }
1877 if (xfer->xroot->xfer_mtx != &Giant) {
1878 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1879 }
1880 USB_XFER_LOCK(xfer);
1881
1882 usbd_transfer_stop(xfer);
1883
1884 while (usbd_transfer_pending(xfer) ||
1885 xfer->flags_int.doing_callback) {
1886
1887 /*
1888 * It is allowed that the callback can drop its
1889 * transfer mutex. In that case checking only
1890 * "usbd_transfer_pending()" is not enough to tell if
1891 * the USB transfer is fully drained. We also need to
1892 * check the internal "doing_callback" flag.
1893 */
1894 xfer->flags_int.draining = 1;
1895
1896 /*
1897 * Wait until the current outstanding USB
1898 * transfer is complete !
1899 */
1900 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
1901 }
1902 USB_XFER_UNLOCK(xfer);
1903}
1904
1905struct usb_page_cache *
1906usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1907{
1908 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1909
1910 return (&xfer->frbuffers[frindex]);
1911}
1912
1913/*------------------------------------------------------------------------*
1914 * usbd_xfer_get_fps_shift
1915 *
1916 * The following function is only useful for isochronous transfers. It
1917 * returns how many times the frame execution rate has been shifted
1918 * down.
1919 *
1920 * Return value:
1921 * Success: 0..3
1922 * Failure: 0
1923 *------------------------------------------------------------------------*/
1924uint8_t
1925usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
1926{
1927 return (xfer->fps_shift);
1928}
1929
1930usb_frlength_t
1931usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
1932{
1933 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1934
1935 return (xfer->frlengths[frindex]);
1936}
1937
1938/*------------------------------------------------------------------------*
1939 * usbd_xfer_set_frame_data
1940 *
1941 * This function sets the pointer of the buffer that should
1942 * loaded directly into DMA for the given USB frame. Passing "ptr"
1943 * equal to NULL while the corresponding "frlength" is greater
1944 * than zero gives undefined results!
1945 *------------------------------------------------------------------------*/
1946void
1947usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1948 void *ptr, usb_frlength_t len)
1949{
1950 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1951
1952 /* set virtual address to load and length */
1953 xfer->frbuffers[frindex].buffer = ptr;
1954 usbd_xfer_set_frame_len(xfer, frindex, len);
1955}
1956
1957void
1958usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1959 void **ptr, int *len)
1960{
1961 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1962
1963 if (ptr != NULL)
1964 *ptr = xfer->frbuffers[frindex].buffer;
1965 if (len != NULL)
1966 *len = xfer->frlengths[frindex];
1967}
1968
1969void
1970usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
1971 int *nframes)
1972{
1973 if (actlen != NULL)
1974 *actlen = xfer->actlen;
1975 if (sumlen != NULL)
1976 *sumlen = xfer->sumlen;
1977 if (aframes != NULL)
1978 *aframes = xfer->aframes;
1979 if (nframes != NULL)
1980 *nframes = xfer->nframes;
1981}
1982
1983/*------------------------------------------------------------------------*
1984 * usbd_xfer_set_frame_offset
1985 *
1986 * This function sets the frame data buffer offset relative to the beginning
1987 * of the USB DMA buffer allocated for this USB transfer.
1988 *------------------------------------------------------------------------*/
1989void
1990usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
1991 usb_frcount_t frindex)
1992{
1993 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
1994 "when the USB buffer is external\n"));
1995 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1996
1997 /* set virtual address to load */
1998 xfer->frbuffers[frindex].buffer =
1999 USB_ADD_BYTES(xfer->local_buffer, offset);
2000}
2001
2002void
2003usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2004{
2005 xfer->interval = i;
2006}
2007
2008void
2009usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2010{
2011 xfer->timeout = t;
2012}
2013
2014void
2015usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2016{
2017 xfer->nframes = n;
2018}
2019
2020usb_frcount_t
2021usbd_xfer_max_frames(struct usb_xfer *xfer)
2022{
2023 return (xfer->max_frame_count);
2024}
2025
2026usb_frlength_t
2027usbd_xfer_max_len(struct usb_xfer *xfer)
2028{
2029 return (xfer->max_data_length);
2030}
2031
2032usb_frlength_t
2033usbd_xfer_max_framelen(struct usb_xfer *xfer)
2034{
2035 return (xfer->max_frame_size);
2036}
2037
2038void
2039usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2040 usb_frlength_t len)
2041{
2042 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2043
2044 xfer->frlengths[frindex] = len;
2045}
2046
2047/*------------------------------------------------------------------------*
2048 * usb_callback_proc - factored out code
2049 *
2050 * This function performs USB callbacks.
2051 *------------------------------------------------------------------------*/
2052static void
2053usb_callback_proc(struct usb_proc_msg *_pm)
2054{
2055 struct usb_done_msg *pm = (void *)_pm;
2056 struct usb_xfer_root *info = pm->xroot;
2057
2058 /* Change locking order */
2059 USB_BUS_UNLOCK(info->bus);
2060
2061 /*
2062 * We exploit the fact that the mutex is the same for all
2063 * callbacks that will be called from this thread:
2064 */
2065 mtx_lock(info->xfer_mtx);
2066 USB_BUS_LOCK(info->bus);
2067
2068 /* Continue where we lost track */
2069 usb_command_wrapper(&info->done_q,
2070 info->done_q.curr);
2071
2072 mtx_unlock(info->xfer_mtx);
2073}
2074
2075/*------------------------------------------------------------------------*
2076 * usbd_callback_ss_done_defer
2077 *
2078 * This function will defer the start, stop and done callback to the
2079 * correct thread.
2080 *------------------------------------------------------------------------*/
2081static void
2082usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2083{
2084 struct usb_xfer_root *info = xfer->xroot;
2085 struct usb_xfer_queue *pq = &info->done_q;
2086
2087 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2088
2089 if (pq->curr != xfer) {
2090 usbd_transfer_enqueue(pq, xfer);
2091 }
2092 if (!pq->recurse_1) {
2093
2094 /*
2095 * We have to postpone the callback due to the fact we
2096 * will have a Lock Order Reversal, LOR, if we try to
2097 * proceed !
2098 */
2099 if (usb_proc_msignal(info->done_p,
2100 &info->done_m[0], &info->done_m[1])) {
2101 /* ignore */
2102 }
2103 } else {
2104 /* clear second recurse flag */
2105 pq->recurse_2 = 0;
2106 }
2107 return;
2108
2109}
2110
2111/*------------------------------------------------------------------------*
2112 * usbd_callback_wrapper
2113 *
2114 * This is a wrapper for USB callbacks. This wrapper does some
2115 * auto-magic things like figuring out if we can call the callback
2116 * directly from the current context or if we need to wakeup the
2117 * interrupt process.
2118 *------------------------------------------------------------------------*/
2119static void
2120usbd_callback_wrapper(struct usb_xfer_queue *pq)
2121{
2122 struct usb_xfer *xfer = pq->curr;
2123 struct usb_xfer_root *info = xfer->xroot;
2124
2125 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2126 if (!mtx_owned(info->xfer_mtx)) {
2127 /*
2128 * Cases that end up here:
2129 *
2130 * 5) HW interrupt done callback or other source.
2131 */
2132 DPRINTFN(3, "case 5\n");
2133
2134 /*
2135 * We have to postpone the callback due to the fact we
2136 * will have a Lock Order Reversal, LOR, if we try to
2137 * proceed !
2138 */
2139 if (usb_proc_msignal(info->done_p,
2140 &info->done_m[0], &info->done_m[1])) {
2141 /* ignore */
2142 }
2143 return;
2144 }
2145 /*
2146 * Cases that end up here:
2147 *
2148 * 1) We are starting a transfer
2149 * 2) We are prematurely calling back a transfer
2150 * 3) We are stopping a transfer
2151 * 4) We are doing an ordinary callback
2152 */
2153 DPRINTFN(3, "case 1-4\n");
2154 /* get next USB transfer in the queue */
2155 info->done_q.curr = NULL;
2156
2157 /* set flag in case of drain */
2158 xfer->flags_int.doing_callback = 1;
2159
2160 USB_BUS_UNLOCK(info->bus);
2161 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2162
2163 /* set correct USB state for callback */
2164 if (!xfer->flags_int.transferring) {
2165 xfer->usb_state = USB_ST_SETUP;
2166 if (!xfer->flags_int.started) {
2167 /* we got stopped before we even got started */
2168 USB_BUS_LOCK(info->bus);
2169 goto done;
2170 }
2171 } else {
2172
2173 if (usbd_callback_wrapper_sub(xfer)) {
2174 /* the callback has been deferred */
2175 USB_BUS_LOCK(info->bus);
2176 goto done;
2177 }
2178#if USB_HAVE_POWERD
2179 /* decrement power reference */
2180 usbd_transfer_power_ref(xfer, -1);
2181#endif
2182 xfer->flags_int.transferring = 0;
2183
2184 if (xfer->error) {
2185 xfer->usb_state = USB_ST_ERROR;
2186 } else {
2187 /* set transferred state */
2188 xfer->usb_state = USB_ST_TRANSFERRED;
2189#if USB_HAVE_BUSDMA
2190 /* sync DMA memory, if any */
2191 if (xfer->flags_int.bdma_enable &&
2192 (!xfer->flags_int.bdma_no_post_sync)) {
2193 usb_bdma_post_sync(xfer);
2194 }
2195#endif
2196 }
2197 }
2198
64
65struct usb_std_packet_size {
66 struct {
67 uint16_t min; /* inclusive */
68 uint16_t max; /* inclusive */
69 } range;
70
71 uint16_t fixed[4];
72};
73
74static usb_callback_t usb_request_callback;
75
76static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
77
78 /* This transfer is used for generic control endpoint transfers */
79
80 [0] = {
81 .type = UE_CONTROL,
82 .endpoint = 0x00, /* Control endpoint */
83 .direction = UE_DIR_ANY,
84 .bufsize = USB_EP0_BUFSIZE, /* bytes */
85 .flags = {.proxy_buffer = 1,},
86 .callback = &usb_request_callback,
87 .usb_mode = USB_MODE_DUAL, /* both modes */
88 },
89
90 /* This transfer is used for generic clear stall only */
91
92 [1] = {
93 .type = UE_CONTROL,
94 .endpoint = 0x00, /* Control pipe */
95 .direction = UE_DIR_ANY,
96 .bufsize = sizeof(struct usb_device_request),
97 .callback = &usb_do_clear_stall_callback,
98 .timeout = 1000, /* 1 second */
99 .interval = 50, /* 50ms */
100 .usb_mode = USB_MODE_HOST,
101 },
102};
103
104/* function prototypes */
105
106static void usbd_update_max_frame_size(struct usb_xfer *);
107static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
108static void usbd_control_transfer_init(struct usb_xfer *);
109static int usbd_setup_ctrl_transfer(struct usb_xfer *);
110static void usb_callback_proc(struct usb_proc_msg *);
111static void usbd_callback_ss_done_defer(struct usb_xfer *);
112static void usbd_callback_wrapper(struct usb_xfer_queue *);
113static void usbd_transfer_start_cb(void *);
114static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
115static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
116 uint8_t type, enum usb_dev_speed speed);
117
118/*------------------------------------------------------------------------*
119 * usb_request_callback
120 *------------------------------------------------------------------------*/
121static void
122usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
123{
124 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
125 usb_handle_request_callback(xfer, error);
126 else
127 usbd_do_request_callback(xfer, error);
128}
129
130/*------------------------------------------------------------------------*
131 * usbd_update_max_frame_size
132 *
133 * This function updates the maximum frame size, hence high speed USB
134 * can transfer multiple consecutive packets.
135 *------------------------------------------------------------------------*/
136static void
137usbd_update_max_frame_size(struct usb_xfer *xfer)
138{
139 /* compute maximum frame size */
140 /* this computation should not overflow 16-bit */
141 /* max = 15 * 1024 */
142
143 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
144}
145
146/*------------------------------------------------------------------------*
147 * usbd_get_dma_delay
148 *
149 * The following function is called when we need to
150 * synchronize with DMA hardware.
151 *
152 * Returns:
153 * 0: no DMA delay required
154 * Else: milliseconds of DMA delay
155 *------------------------------------------------------------------------*/
156usb_timeout_t
157usbd_get_dma_delay(struct usb_device *udev)
158{
159 struct usb_bus_methods *mtod;
160 uint32_t temp;
161
162 mtod = udev->bus->methods;
163 temp = 0;
164
165 if (mtod->get_dma_delay) {
166 (mtod->get_dma_delay) (udev, &temp);
167 /*
168 * Round up and convert to milliseconds. Note that we use
169 * 1024 milliseconds per second. to save a division.
170 */
171 temp += 0x3FF;
172 temp /= 0x400;
173 }
174 return (temp);
175}
176
177/*------------------------------------------------------------------------*
178 * usbd_transfer_setup_sub_malloc
179 *
180 * This function will allocate one or more DMA'able memory chunks
181 * according to "size", "align" and "count" arguments. "ppc" is
182 * pointed to a linear array of USB page caches afterwards.
183 *
184 * Returns:
185 * 0: Success
186 * Else: Failure
187 *------------------------------------------------------------------------*/
188#if USB_HAVE_BUSDMA
189uint8_t
190usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
191 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
192 usb_size_t count)
193{
194 struct usb_page_cache *pc;
195 struct usb_page *pg;
196 void *buf;
197 usb_size_t n_dma_pc;
198 usb_size_t n_obj;
199 usb_size_t x;
200 usb_size_t y;
201 usb_size_t r;
202 usb_size_t z;
203
204 USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n",
205 align));
206 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
207
208 if (count == 0) {
209 return (0); /* nothing to allocate */
210 }
211 /*
212 * Make sure that the size is aligned properly.
213 */
214 size = -((-size) & (-align));
215
216 /*
217 * Try multi-allocation chunks to reduce the number of DMA
218 * allocations, hence DMA allocations are slow.
219 */
220 if (size >= PAGE_SIZE) {
221 n_dma_pc = count;
222 n_obj = 1;
223 } else {
224 /* compute number of objects per page */
225 n_obj = (PAGE_SIZE / size);
226 /*
227 * Compute number of DMA chunks, rounded up
228 * to nearest one:
229 */
230 n_dma_pc = ((count + n_obj - 1) / n_obj);
231 }
232
233 if (parm->buf == NULL) {
234 /* for the future */
235 parm->dma_page_ptr += n_dma_pc;
236 parm->dma_page_cache_ptr += n_dma_pc;
237 parm->dma_page_ptr += count;
238 parm->xfer_page_cache_ptr += count;
239 return (0);
240 }
241 for (x = 0; x != n_dma_pc; x++) {
242 /* need to initialize the page cache */
243 parm->dma_page_cache_ptr[x].tag_parent =
244 &parm->curr_xfer->xroot->dma_parent_tag;
245 }
246 for (x = 0; x != count; x++) {
247 /* need to initialize the page cache */
248 parm->xfer_page_cache_ptr[x].tag_parent =
249 &parm->curr_xfer->xroot->dma_parent_tag;
250 }
251
252 if (ppc) {
253 *ppc = parm->xfer_page_cache_ptr;
254 }
255 r = count; /* set remainder count */
256 z = n_obj * size; /* set allocation size */
257 pc = parm->xfer_page_cache_ptr;
258 pg = parm->dma_page_ptr;
259
260 for (x = 0; x != n_dma_pc; x++) {
261
262 if (r < n_obj) {
263 /* compute last remainder */
264 z = r * size;
265 n_obj = r;
266 }
267 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
268 pg, z, align)) {
269 return (1); /* failure */
270 }
271 /* Set beginning of current buffer */
272 buf = parm->dma_page_cache_ptr->buffer;
273 /* Make room for one DMA page cache and one page */
274 parm->dma_page_cache_ptr++;
275 pg++;
276
277 for (y = 0; (y != n_obj); y++, r--, pc++, pg++) {
278
279 /* Load sub-chunk into DMA */
280 if (usb_pc_dmamap_create(pc, size)) {
281 return (1); /* failure */
282 }
283 pc->buffer = USB_ADD_BYTES(buf, y * size);
284 pc->page_start = pg;
285
286 mtx_lock(pc->tag_parent->mtx);
287 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
288 mtx_unlock(pc->tag_parent->mtx);
289 return (1); /* failure */
290 }
291 mtx_unlock(pc->tag_parent->mtx);
292 }
293 }
294
295 parm->xfer_page_cache_ptr = pc;
296 parm->dma_page_ptr = pg;
297 return (0);
298}
299#endif
300
301/*------------------------------------------------------------------------*
302 * usbd_transfer_setup_sub - transfer setup subroutine
303 *
304 * This function must be called from the "xfer_setup" callback of the
305 * USB Host or Device controller driver when setting up an USB
306 * transfer. This function will setup correct packet sizes, buffer
307 * sizes, flags and more, that are stored in the "usb_xfer"
308 * structure.
309 *------------------------------------------------------------------------*/
310void
311usbd_transfer_setup_sub(struct usb_setup_params *parm)
312{
313 enum {
314 REQ_SIZE = 8,
315 MIN_PKT = 8,
316 };
317 struct usb_xfer *xfer = parm->curr_xfer;
318 const struct usb_config *setup = parm->curr_setup;
319 struct usb_endpoint_ss_comp_descriptor *ecomp;
320 struct usb_endpoint_descriptor *edesc;
321 struct usb_std_packet_size std_size;
322 usb_frcount_t n_frlengths;
323 usb_frcount_t n_frbuffers;
324 usb_frcount_t x;
325 uint8_t type;
326 uint8_t zmps;
327
328 /*
329 * Sanity check. The following parameters must be initialized before
330 * calling this function.
331 */
332 if ((parm->hc_max_packet_size == 0) ||
333 (parm->hc_max_packet_count == 0) ||
334 (parm->hc_max_frame_size == 0)) {
335 parm->err = USB_ERR_INVAL;
336 goto done;
337 }
338 edesc = xfer->endpoint->edesc;
339 ecomp = xfer->endpoint->ecomp;
340
341 type = (edesc->bmAttributes & UE_XFERTYPE);
342
343 xfer->flags = setup->flags;
344 xfer->nframes = setup->frames;
345 xfer->timeout = setup->timeout;
346 xfer->callback = setup->callback;
347 xfer->interval = setup->interval;
348 xfer->endpointno = edesc->bEndpointAddress;
349 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
350 xfer->max_packet_count = 1;
351 /* make a shadow copy: */
352 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
353
354 parm->bufsize = setup->bufsize;
355
356 switch (parm->speed) {
357 case USB_SPEED_HIGH:
358 switch (type) {
359 case UE_ISOCHRONOUS:
360 case UE_INTERRUPT:
361 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
362
363 /* check for invalid max packet count */
364 if (xfer->max_packet_count > 3)
365 xfer->max_packet_count = 3;
366 break;
367 default:
368 break;
369 }
370 xfer->max_packet_size &= 0x7FF;
371 break;
372 case USB_SPEED_SUPER:
373 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
374
375 if (ecomp != NULL)
376 xfer->max_packet_count += ecomp->bMaxBurst;
377
378 if ((xfer->max_packet_count == 0) ||
379 (xfer->max_packet_count > 16))
380 xfer->max_packet_count = 16;
381
382 switch (type) {
383 case UE_CONTROL:
384 xfer->max_packet_count = 1;
385 break;
386 case UE_ISOCHRONOUS:
387 if (ecomp != NULL) {
388 uint8_t mult;
389
390 mult = (ecomp->bmAttributes & 3) + 1;
391 if (mult > 3)
392 mult = 3;
393
394 xfer->max_packet_count *= mult;
395 }
396 break;
397 default:
398 break;
399 }
400 xfer->max_packet_size &= 0x7FF;
401 break;
402 default:
403 break;
404 }
405 /* range check "max_packet_count" */
406
407 if (xfer->max_packet_count > parm->hc_max_packet_count) {
408 xfer->max_packet_count = parm->hc_max_packet_count;
409 }
410 /* filter "wMaxPacketSize" according to HC capabilities */
411
412 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
413 (xfer->max_packet_size == 0)) {
414 xfer->max_packet_size = parm->hc_max_packet_size;
415 }
416 /* filter "wMaxPacketSize" according to standard sizes */
417
418 usbd_get_std_packet_size(&std_size, type, parm->speed);
419
420 if (std_size.range.min || std_size.range.max) {
421
422 if (xfer->max_packet_size < std_size.range.min) {
423 xfer->max_packet_size = std_size.range.min;
424 }
425 if (xfer->max_packet_size > std_size.range.max) {
426 xfer->max_packet_size = std_size.range.max;
427 }
428 } else {
429
430 if (xfer->max_packet_size >= std_size.fixed[3]) {
431 xfer->max_packet_size = std_size.fixed[3];
432 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
433 xfer->max_packet_size = std_size.fixed[2];
434 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
435 xfer->max_packet_size = std_size.fixed[1];
436 } else {
437 /* only one possibility left */
438 xfer->max_packet_size = std_size.fixed[0];
439 }
440 }
441
442 /* compute "max_frame_size" */
443
444 usbd_update_max_frame_size(xfer);
445
446 /* check interrupt interval and transfer pre-delay */
447
448 if (type == UE_ISOCHRONOUS) {
449
450 uint16_t frame_limit;
451
452 xfer->interval = 0; /* not used, must be zero */
453 xfer->flags_int.isochronous_xfr = 1; /* set flag */
454
455 if (xfer->timeout == 0) {
456 /*
457 * set a default timeout in
458 * case something goes wrong!
459 */
460 xfer->timeout = 1000 / 4;
461 }
462 switch (parm->speed) {
463 case USB_SPEED_LOW:
464 case USB_SPEED_FULL:
465 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
466 xfer->fps_shift = 0;
467 break;
468 default:
469 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
470 xfer->fps_shift = edesc->bInterval;
471 if (xfer->fps_shift > 0)
472 xfer->fps_shift--;
473 if (xfer->fps_shift > 3)
474 xfer->fps_shift = 3;
475 break;
476 }
477
478 if (xfer->nframes > frame_limit) {
479 /*
480 * this is not going to work
481 * cross hardware
482 */
483 parm->err = USB_ERR_INVAL;
484 goto done;
485 }
486 if (xfer->nframes == 0) {
487 /*
488 * this is not a valid value
489 */
490 parm->err = USB_ERR_ZERO_NFRAMES;
491 goto done;
492 }
493 } else {
494
495 /*
496 * If a value is specified use that else check the
497 * endpoint descriptor!
498 */
499 if (type == UE_INTERRUPT) {
500
501 uint32_t temp;
502
503 if (xfer->interval == 0) {
504
505 xfer->interval = edesc->bInterval;
506
507 switch (parm->speed) {
508 case USB_SPEED_LOW:
509 case USB_SPEED_FULL:
510 break;
511 default:
512 /* 125us -> 1ms */
513 if (xfer->interval < 4)
514 xfer->interval = 1;
515 else if (xfer->interval > 16)
516 xfer->interval = (1 << (16 - 4));
517 else
518 xfer->interval =
519 (1 << (xfer->interval - 4));
520 break;
521 }
522 }
523
524 if (xfer->interval == 0) {
525 /*
526 * One millisecond is the smallest
527 * interval we support:
528 */
529 xfer->interval = 1;
530 }
531
532 xfer->fps_shift = 0;
533 temp = 1;
534
535 while ((temp != 0) && (temp < xfer->interval)) {
536 xfer->fps_shift++;
537 temp *= 2;
538 }
539
540 switch (parm->speed) {
541 case USB_SPEED_LOW:
542 case USB_SPEED_FULL:
543 break;
544 default:
545 xfer->fps_shift += 3;
546 break;
547 }
548 }
549 }
550
551 /*
552 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
553 * to be equal to zero when setting up USB transfers, hence
554 * this leads to alot of extra code in the USB kernel.
555 */
556
557 if ((xfer->max_frame_size == 0) ||
558 (xfer->max_packet_size == 0)) {
559
560 zmps = 1;
561
562 if ((parm->bufsize <= MIN_PKT) &&
563 (type != UE_CONTROL) &&
564 (type != UE_BULK)) {
565
566 /* workaround */
567 xfer->max_packet_size = MIN_PKT;
568 xfer->max_packet_count = 1;
569 parm->bufsize = 0; /* automatic setup length */
570 usbd_update_max_frame_size(xfer);
571
572 } else {
573 parm->err = USB_ERR_ZERO_MAXP;
574 goto done;
575 }
576
577 } else {
578 zmps = 0;
579 }
580
581 /*
582 * check if we should setup a default
583 * length:
584 */
585
586 if (parm->bufsize == 0) {
587
588 parm->bufsize = xfer->max_frame_size;
589
590 if (type == UE_ISOCHRONOUS) {
591 parm->bufsize *= xfer->nframes;
592 }
593 }
594 /*
595 * check if we are about to setup a proxy
596 * type of buffer:
597 */
598
599 if (xfer->flags.proxy_buffer) {
600
601 /* round bufsize up */
602
603 parm->bufsize += (xfer->max_frame_size - 1);
604
605 if (parm->bufsize < xfer->max_frame_size) {
606 /* length wrapped around */
607 parm->err = USB_ERR_INVAL;
608 goto done;
609 }
610 /* subtract remainder */
611
612 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
613
614 /* add length of USB device request structure, if any */
615
616 if (type == UE_CONTROL) {
617 parm->bufsize += REQ_SIZE; /* SETUP message */
618 }
619 }
620 xfer->max_data_length = parm->bufsize;
621
622 /* Setup "n_frlengths" and "n_frbuffers" */
623
624 if (type == UE_ISOCHRONOUS) {
625 n_frlengths = xfer->nframes;
626 n_frbuffers = 1;
627 } else {
628
629 if (type == UE_CONTROL) {
630 xfer->flags_int.control_xfr = 1;
631 if (xfer->nframes == 0) {
632 if (parm->bufsize <= REQ_SIZE) {
633 /*
634 * there will never be any data
635 * stage
636 */
637 xfer->nframes = 1;
638 } else {
639 xfer->nframes = 2;
640 }
641 }
642 } else {
643 if (xfer->nframes == 0) {
644 xfer->nframes = 1;
645 }
646 }
647
648 n_frlengths = xfer->nframes;
649 n_frbuffers = xfer->nframes;
650 }
651
652 /*
653 * check if we have room for the
654 * USB device request structure:
655 */
656
657 if (type == UE_CONTROL) {
658
659 if (xfer->max_data_length < REQ_SIZE) {
660 /* length wrapped around or too small bufsize */
661 parm->err = USB_ERR_INVAL;
662 goto done;
663 }
664 xfer->max_data_length -= REQ_SIZE;
665 }
666 /* setup "frlengths" */
667 xfer->frlengths = parm->xfer_length_ptr;
668 parm->xfer_length_ptr += n_frlengths;
669
670 /* setup "frbuffers" */
671 xfer->frbuffers = parm->xfer_page_cache_ptr;
672 parm->xfer_page_cache_ptr += n_frbuffers;
673
674 /* initialize max frame count */
675 xfer->max_frame_count = xfer->nframes;
676
677 /*
678 * check if we need to setup
679 * a local buffer:
680 */
681
682 if (!xfer->flags.ext_buffer) {
683
684 /* align data */
685 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
686
687 if (parm->buf) {
688
689 xfer->local_buffer =
690 USB_ADD_BYTES(parm->buf, parm->size[0]);
691
692 usbd_xfer_set_frame_offset(xfer, 0, 0);
693
694 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
695 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
696 }
697 }
698 parm->size[0] += parm->bufsize;
699
700 /* align data again */
701 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
702 }
703 /*
704 * Compute maximum buffer size
705 */
706
707 if (parm->bufsize_max < parm->bufsize) {
708 parm->bufsize_max = parm->bufsize;
709 }
710#if USB_HAVE_BUSDMA
711 if (xfer->flags_int.bdma_enable) {
712 /*
713 * Setup "dma_page_ptr".
714 *
715 * Proof for formula below:
716 *
717 * Assume there are three USB frames having length "a", "b" and
718 * "c". These USB frames will at maximum need "z"
719 * "usb_page" structures. "z" is given by:
720 *
721 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
722 * ((c / USB_PAGE_SIZE) + 2);
723 *
724 * Constraining "a", "b" and "c" like this:
725 *
726 * (a + b + c) <= parm->bufsize
727 *
728 * We know that:
729 *
730 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
731 *
732 * Here is the general formula:
733 */
734 xfer->dma_page_ptr = parm->dma_page_ptr;
735 parm->dma_page_ptr += (2 * n_frbuffers);
736 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
737 }
738#endif
739 if (zmps) {
740 /* correct maximum data length */
741 xfer->max_data_length = 0;
742 }
743 /* subtract USB frame remainder from "hc_max_frame_size" */
744
745 xfer->max_hc_frame_size =
746 (parm->hc_max_frame_size -
747 (parm->hc_max_frame_size % xfer->max_frame_size));
748
749 if (xfer->max_hc_frame_size == 0) {
750 parm->err = USB_ERR_INVAL;
751 goto done;
752 }
753
754 /* initialize frame buffers */
755
756 if (parm->buf) {
757 for (x = 0; x != n_frbuffers; x++) {
758 xfer->frbuffers[x].tag_parent =
759 &xfer->xroot->dma_parent_tag;
760#if USB_HAVE_BUSDMA
761 if (xfer->flags_int.bdma_enable &&
762 (parm->bufsize_max > 0)) {
763
764 if (usb_pc_dmamap_create(
765 xfer->frbuffers + x,
766 parm->bufsize_max)) {
767 parm->err = USB_ERR_NOMEM;
768 goto done;
769 }
770 }
771#endif
772 }
773 }
774done:
775 if (parm->err) {
776 /*
777 * Set some dummy values so that we avoid division by zero:
778 */
779 xfer->max_hc_frame_size = 1;
780 xfer->max_frame_size = 1;
781 xfer->max_packet_size = 1;
782 xfer->max_data_length = 0;
783 xfer->nframes = 0;
784 xfer->max_frame_count = 0;
785 }
786}
787
788/*------------------------------------------------------------------------*
789 * usbd_transfer_setup - setup an array of USB transfers
790 *
791 * NOTE: You must always call "usbd_transfer_unsetup" after calling
792 * "usbd_transfer_setup" if success was returned.
793 *
794 * The idea is that the USB device driver should pre-allocate all its
795 * transfers by one call to this function.
796 *
797 * Return values:
798 * 0: Success
799 * Else: Failure
800 *------------------------------------------------------------------------*/
801usb_error_t
802usbd_transfer_setup(struct usb_device *udev,
803 const uint8_t *ifaces, struct usb_xfer **ppxfer,
804 const struct usb_config *setup_start, uint16_t n_setup,
805 void *priv_sc, struct mtx *xfer_mtx)
806{
807 struct usb_xfer dummy;
808 struct usb_setup_params parm;
809 const struct usb_config *setup_end = setup_start + n_setup;
810 const struct usb_config *setup;
811 struct usb_endpoint *ep;
812 struct usb_xfer_root *info;
813 struct usb_xfer *xfer;
814 void *buf = NULL;
815 uint16_t n;
816 uint16_t refcount;
817
818 parm.err = 0;
819 refcount = 0;
820 info = NULL;
821
822 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
823 "usbd_transfer_setup can sleep!");
824
825 /* do some checking first */
826
827 if (n_setup == 0) {
828 DPRINTFN(6, "setup array has zero length!\n");
829 return (USB_ERR_INVAL);
830 }
831 if (ifaces == 0) {
832 DPRINTFN(6, "ifaces array is NULL!\n");
833 return (USB_ERR_INVAL);
834 }
835 if (xfer_mtx == NULL) {
836 DPRINTFN(6, "using global lock\n");
837 xfer_mtx = &Giant;
838 }
839 /* sanity checks */
840 for (setup = setup_start, n = 0;
841 setup != setup_end; setup++, n++) {
842 if (setup->bufsize == (usb_frlength_t)-1) {
843 parm.err = USB_ERR_BAD_BUFSIZE;
844 DPRINTF("invalid bufsize\n");
845 }
846 if (setup->callback == NULL) {
847 parm.err = USB_ERR_NO_CALLBACK;
848 DPRINTF("no callback\n");
849 }
850 ppxfer[n] = NULL;
851 }
852
853 if (parm.err) {
854 goto done;
855 }
856 bzero(&parm, sizeof(parm));
857
858 parm.udev = udev;
859 parm.speed = usbd_get_speed(udev);
860 parm.hc_max_packet_count = 1;
861
862 if (parm.speed >= USB_SPEED_MAX) {
863 parm.err = USB_ERR_INVAL;
864 goto done;
865 }
866 /* setup all transfers */
867
868 while (1) {
869
870 if (buf) {
871 /*
872 * Initialize the "usb_xfer_root" structure,
873 * which is common for all our USB transfers.
874 */
875 info = USB_ADD_BYTES(buf, 0);
876
877 info->memory_base = buf;
878 info->memory_size = parm.size[0];
879
880#if USB_HAVE_BUSDMA
881 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]);
882 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]);
883#endif
884 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]);
885 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]);
886
887 cv_init(&info->cv_drain, "WDRAIN");
888
889 info->xfer_mtx = xfer_mtx;
890#if USB_HAVE_BUSDMA
891 usb_dma_tag_setup(&info->dma_parent_tag,
892 parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag,
893 xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max);
894#endif
895
896 info->bus = udev->bus;
897 info->udev = udev;
898
899 TAILQ_INIT(&info->done_q.head);
900 info->done_q.command = &usbd_callback_wrapper;
901#if USB_HAVE_BUSDMA
902 TAILQ_INIT(&info->dma_q.head);
903 info->dma_q.command = &usb_bdma_work_loop;
904#endif
905 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
906 info->done_m[0].xroot = info;
907 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
908 info->done_m[1].xroot = info;
909
910 /*
911 * In device side mode control endpoint
912 * requests need to run from a separate
913 * context, else there is a chance of
914 * deadlock!
915 */
916 if (setup_start == usb_control_ep_cfg)
917 info->done_p =
918 &udev->bus->control_xfer_proc;
919 else if (xfer_mtx == &Giant)
920 info->done_p =
921 &udev->bus->giant_callback_proc;
922 else
923 info->done_p =
924 &udev->bus->non_giant_callback_proc;
925 }
926 /* reset sizes */
927
928 parm.size[0] = 0;
929 parm.buf = buf;
930 parm.size[0] += sizeof(info[0]);
931
932 for (setup = setup_start, n = 0;
933 setup != setup_end; setup++, n++) {
934
935 /* skip USB transfers without callbacks: */
936 if (setup->callback == NULL) {
937 continue;
938 }
939 /* see if there is a matching endpoint */
940 ep = usbd_get_endpoint(udev,
941 ifaces[setup->if_index], setup);
942
943 if ((ep == NULL) || (ep->methods == NULL)) {
944 if (setup->flags.no_pipe_ok)
945 continue;
946 if ((setup->usb_mode != USB_MODE_DUAL) &&
947 (setup->usb_mode != udev->flags.usb_mode))
948 continue;
949 parm.err = USB_ERR_NO_PIPE;
950 goto done;
951 }
952
953 /* align data properly */
954 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
955
956 /* store current setup pointer */
957 parm.curr_setup = setup;
958
959 if (buf) {
960 /*
961 * Common initialization of the
962 * "usb_xfer" structure.
963 */
964 xfer = USB_ADD_BYTES(buf, parm.size[0]);
965 xfer->address = udev->address;
966 xfer->priv_sc = priv_sc;
967 xfer->xroot = info;
968
969 usb_callout_init_mtx(&xfer->timeout_handle,
970 &udev->bus->bus_mtx, 0);
971 } else {
972 /*
973 * Setup a dummy xfer, hence we are
974 * writing to the "usb_xfer"
975 * structure pointed to by "xfer"
976 * before we have allocated any
977 * memory:
978 */
979 xfer = &dummy;
980 bzero(&dummy, sizeof(dummy));
981 refcount++;
982 }
983
984 /* set transfer endpoint pointer */
985 xfer->endpoint = ep;
986
987 parm.size[0] += sizeof(xfer[0]);
988 parm.methods = xfer->endpoint->methods;
989 parm.curr_xfer = xfer;
990
991 /*
992 * Call the Host or Device controller transfer
993 * setup routine:
994 */
995 (udev->bus->methods->xfer_setup) (&parm);
996
997 /* check for error */
998 if (parm.err)
999 goto done;
1000
1001 if (buf) {
1002 /*
1003 * Increment the endpoint refcount. This
1004 * basically prevents setting a new
1005 * configuration and alternate setting
1006 * when USB transfers are in use on
1007 * the given interface. Search the USB
1008 * code for "endpoint->refcount_alloc" if you
1009 * want more information.
1010 */
1011 USB_BUS_LOCK(info->bus);
1012 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1013 parm.err = USB_ERR_INVAL;
1014
1015 xfer->endpoint->refcount_alloc++;
1016
1017 if (xfer->endpoint->refcount_alloc == 0)
1018 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1019 USB_BUS_UNLOCK(info->bus);
1020
1021 /*
1022 * Whenever we set ppxfer[] then we
1023 * also need to increment the
1024 * "setup_refcount":
1025 */
1026 info->setup_refcount++;
1027
1028 /*
1029 * Transfer is successfully setup and
1030 * can be used:
1031 */
1032 ppxfer[n] = xfer;
1033 }
1034
1035 /* check for error */
1036 if (parm.err)
1037 goto done;
1038 }
1039
1040 if (buf || parm.err) {
1041 goto done;
1042 }
1043 if (refcount == 0) {
1044 /* no transfers - nothing to do ! */
1045 goto done;
1046 }
1047 /* align data properly */
1048 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1049
1050 /* store offset temporarily */
1051 parm.size[1] = parm.size[0];
1052
1053 /*
1054 * The number of DMA tags required depends on
1055 * the number of endpoints. The current estimate
1056 * for maximum number of DMA tags per endpoint
1057 * is two.
1058 */
1059 parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX);
1060
1061 /*
1062 * DMA tags for QH, TD, Data and more.
1063 */
1064 parm.dma_tag_max += 8;
1065
1066 parm.dma_tag_p += parm.dma_tag_max;
1067
1068 parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
1069 ((uint8_t *)0);
1070
1071 /* align data properly */
1072 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1073
1074 /* store offset temporarily */
1075 parm.size[3] = parm.size[0];
1076
1077 parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
1078 ((uint8_t *)0);
1079
1080 /* align data properly */
1081 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1082
1083 /* store offset temporarily */
1084 parm.size[4] = parm.size[0];
1085
1086 parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
1087 ((uint8_t *)0);
1088
1089 /* store end offset temporarily */
1090 parm.size[5] = parm.size[0];
1091
1092 parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
1093 ((uint8_t *)0);
1094
1095 /* store end offset temporarily */
1096
1097 parm.size[2] = parm.size[0];
1098
1099 /* align data properly */
1100 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1101
1102 parm.size[6] = parm.size[0];
1103
1104 parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
1105 ((uint8_t *)0);
1106
1107 /* align data properly */
1108 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1109
1110 /* allocate zeroed memory */
1111 buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
1112
1113 if (buf == NULL) {
1114 parm.err = USB_ERR_NOMEM;
1115 DPRINTFN(0, "cannot allocate memory block for "
1116 "configuration (%d bytes)\n",
1117 parm.size[0]);
1118 goto done;
1119 }
1120 parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]);
1121 parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]);
1122 parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]);
1123 parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]);
1124 parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]);
1125 }
1126
1127done:
1128 if (buf) {
1129 if (info->setup_refcount == 0) {
1130 /*
1131 * "usbd_transfer_unsetup_sub" will unlock
1132 * the bus mutex before returning !
1133 */
1134 USB_BUS_LOCK(info->bus);
1135
1136 /* something went wrong */
1137 usbd_transfer_unsetup_sub(info, 0);
1138 }
1139 }
1140 if (parm.err) {
1141 usbd_transfer_unsetup(ppxfer, n_setup);
1142 }
1143 return (parm.err);
1144}
1145
1146/*------------------------------------------------------------------------*
1147 * usbd_transfer_unsetup_sub - factored out code
1148 *------------------------------------------------------------------------*/
1149static void
1150usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1151{
1152 struct usb_page_cache *pc;
1153
1154 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1155
1156 /* wait for any outstanding DMA operations */
1157
1158 if (needs_delay) {
1159 usb_timeout_t temp;
1160 temp = usbd_get_dma_delay(info->udev);
1161 if (temp != 0) {
1162 usb_pause_mtx(&info->bus->bus_mtx,
1163 USB_MS_TO_TICKS(temp));
1164 }
1165 }
1166
1167 /* make sure that our done messages are not queued anywhere */
1168 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1169
1170 USB_BUS_UNLOCK(info->bus);
1171
1172#if USB_HAVE_BUSDMA
1173 /* free DMA'able memory, if any */
1174 pc = info->dma_page_cache_start;
1175 while (pc != info->dma_page_cache_end) {
1176 usb_pc_free_mem(pc);
1177 pc++;
1178 }
1179
1180 /* free DMA maps in all "xfer->frbuffers" */
1181 pc = info->xfer_page_cache_start;
1182 while (pc != info->xfer_page_cache_end) {
1183 usb_pc_dmamap_destroy(pc);
1184 pc++;
1185 }
1186
1187 /* free all DMA tags */
1188 usb_dma_tag_unsetup(&info->dma_parent_tag);
1189#endif
1190
1191 cv_destroy(&info->cv_drain);
1192
1193 /*
1194 * free the "memory_base" last, hence the "info" structure is
1195 * contained within the "memory_base"!
1196 */
1197 free(info->memory_base, M_USB);
1198}
1199
1200/*------------------------------------------------------------------------*
1201 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1202 *
1203 * NOTE: All USB transfers in progress will get called back passing
1204 * the error code "USB_ERR_CANCELLED" before this function
1205 * returns.
1206 *------------------------------------------------------------------------*/
1207void
1208usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1209{
1210 struct usb_xfer *xfer;
1211 struct usb_xfer_root *info;
1212 uint8_t needs_delay = 0;
1213
1214 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1215 "usbd_transfer_unsetup can sleep!");
1216
1217 while (n_setup--) {
1218 xfer = pxfer[n_setup];
1219
1220 if (xfer == NULL)
1221 continue;
1222
1223 info = xfer->xroot;
1224
1225 USB_XFER_LOCK(xfer);
1226 USB_BUS_LOCK(info->bus);
1227
1228 /*
1229 * HINT: when you start/stop a transfer, it might be a
1230 * good idea to directly use the "pxfer[]" structure:
1231 *
1232 * usbd_transfer_start(sc->pxfer[0]);
1233 * usbd_transfer_stop(sc->pxfer[0]);
1234 *
1235 * That way, if your code has many parts that will not
1236 * stop running under the same lock, in other words
1237 * "xfer_mtx", the usbd_transfer_start and
1238 * usbd_transfer_stop functions will simply return
1239 * when they detect a NULL pointer argument.
1240 *
1241 * To avoid any races we clear the "pxfer[]" pointer
1242 * while holding the private mutex of the driver:
1243 */
1244 pxfer[n_setup] = NULL;
1245
1246 USB_BUS_UNLOCK(info->bus);
1247 USB_XFER_UNLOCK(xfer);
1248
1249 usbd_transfer_drain(xfer);
1250
1251#if USB_HAVE_BUSDMA
1252 if (xfer->flags_int.bdma_enable)
1253 needs_delay = 1;
1254#endif
1255 /*
1256 * NOTE: default endpoint does not have an
1257 * interface, even if endpoint->iface_index == 0
1258 */
1259 USB_BUS_LOCK(info->bus);
1260 xfer->endpoint->refcount_alloc--;
1261 USB_BUS_UNLOCK(info->bus);
1262
1263 usb_callout_drain(&xfer->timeout_handle);
1264
1265 USB_BUS_LOCK(info->bus);
1266
1267 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1268 "reference count\n"));
1269
1270 info->setup_refcount--;
1271
1272 if (info->setup_refcount == 0) {
1273 usbd_transfer_unsetup_sub(info,
1274 needs_delay);
1275 } else {
1276 USB_BUS_UNLOCK(info->bus);
1277 }
1278 }
1279}
1280
1281/*------------------------------------------------------------------------*
1282 * usbd_control_transfer_init - factored out code
1283 *
1284 * In USB Device Mode we have to wait for the SETUP packet which
1285 * containst the "struct usb_device_request" structure, before we can
1286 * transfer any data. In USB Host Mode we already have the SETUP
1287 * packet at the moment the USB transfer is started. This leads us to
1288 * having to setup the USB transfer at two different places in
1289 * time. This function just contains factored out control transfer
1290 * initialisation code, so that we don't duplicate the code.
1291 *------------------------------------------------------------------------*/
1292static void
1293usbd_control_transfer_init(struct usb_xfer *xfer)
1294{
1295 struct usb_device_request req;
1296
1297 /* copy out the USB request header */
1298
1299 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1300
1301 /* setup remainder */
1302
1303 xfer->flags_int.control_rem = UGETW(req.wLength);
1304
1305 /* copy direction to endpoint variable */
1306
1307 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1308 xfer->endpointno |=
1309 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1310}
1311
1312/*------------------------------------------------------------------------*
1313 * usbd_setup_ctrl_transfer
1314 *
1315 * This function handles initialisation of control transfers. Control
1316 * transfers are special in that regard that they can both transmit
1317 * and receive data.
1318 *
1319 * Return values:
1320 * 0: Success
1321 * Else: Failure
1322 *------------------------------------------------------------------------*/
1323static int
1324usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1325{
1326 usb_frlength_t len;
1327
1328 /* Check for control endpoint stall */
1329 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1330 /* the control transfer is no longer active */
1331 xfer->flags_int.control_stall = 1;
1332 xfer->flags_int.control_act = 0;
1333 } else {
1334 /* don't stall control transfer by default */
1335 xfer->flags_int.control_stall = 0;
1336 }
1337
1338 /* Check for invalid number of frames */
1339 if (xfer->nframes > 2) {
1340 /*
1341 * If you need to split a control transfer, you
1342 * have to do one part at a time. Only with
1343 * non-control transfers you can do multiple
1344 * parts a time.
1345 */
1346 DPRINTFN(0, "Too many frames: %u\n",
1347 (unsigned int)xfer->nframes);
1348 goto error;
1349 }
1350
1351 /*
1352 * Check if there is a control
1353 * transfer in progress:
1354 */
1355 if (xfer->flags_int.control_act) {
1356
1357 if (xfer->flags_int.control_hdr) {
1358
1359 /* clear send header flag */
1360
1361 xfer->flags_int.control_hdr = 0;
1362
1363 /* setup control transfer */
1364 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1365 usbd_control_transfer_init(xfer);
1366 }
1367 }
1368 /* get data length */
1369
1370 len = xfer->sumlen;
1371
1372 } else {
1373
1374 /* the size of the SETUP structure is hardcoded ! */
1375
1376 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1377 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1378 xfer->frlengths[0], sizeof(struct
1379 usb_device_request));
1380 goto error;
1381 }
1382 /* check USB mode */
1383 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1384
1385 /* check number of frames */
1386 if (xfer->nframes != 1) {
1387 /*
1388 * We need to receive the setup
1389 * message first so that we know the
1390 * data direction!
1391 */
1392 DPRINTF("Misconfigured transfer\n");
1393 goto error;
1394 }
1395 /*
1396 * Set a dummy "control_rem" value. This
1397 * variable will be overwritten later by a
1398 * call to "usbd_control_transfer_init()" !
1399 */
1400 xfer->flags_int.control_rem = 0xFFFF;
1401 } else {
1402
1403 /* setup "endpoint" and "control_rem" */
1404
1405 usbd_control_transfer_init(xfer);
1406 }
1407
1408 /* set transfer-header flag */
1409
1410 xfer->flags_int.control_hdr = 1;
1411
1412 /* get data length */
1413
1414 len = (xfer->sumlen - sizeof(struct usb_device_request));
1415 }
1416
1417 /* check if there is a length mismatch */
1418
1419 if (len > xfer->flags_int.control_rem) {
1420 DPRINTFN(0, "Length (%d) greater than "
1421 "remaining length (%d)\n", len,
1422 xfer->flags_int.control_rem);
1423 goto error;
1424 }
1425 /* check if we are doing a short transfer */
1426
1427 if (xfer->flags.force_short_xfer) {
1428 xfer->flags_int.control_rem = 0;
1429 } else {
1430 if ((len != xfer->max_data_length) &&
1431 (len != xfer->flags_int.control_rem) &&
1432 (xfer->nframes != 1)) {
1433 DPRINTFN(0, "Short control transfer without "
1434 "force_short_xfer set\n");
1435 goto error;
1436 }
1437 xfer->flags_int.control_rem -= len;
1438 }
1439
1440 /* the status part is executed when "control_act" is 0 */
1441
1442 if ((xfer->flags_int.control_rem > 0) ||
1443 (xfer->flags.manual_status)) {
1444 /* don't execute the STATUS stage yet */
1445 xfer->flags_int.control_act = 1;
1446
1447 /* sanity check */
1448 if ((!xfer->flags_int.control_hdr) &&
1449 (xfer->nframes == 1)) {
1450 /*
1451 * This is not a valid operation!
1452 */
1453 DPRINTFN(0, "Invalid parameter "
1454 "combination\n");
1455 goto error;
1456 }
1457 } else {
1458 /* time to execute the STATUS stage */
1459 xfer->flags_int.control_act = 0;
1460 }
1461 return (0); /* success */
1462
1463error:
1464 return (1); /* failure */
1465}
1466
1467/*------------------------------------------------------------------------*
1468 * usbd_transfer_submit - start USB hardware for the given transfer
1469 *
1470 * This function should only be called from the USB callback.
1471 *------------------------------------------------------------------------*/
1472void
1473usbd_transfer_submit(struct usb_xfer *xfer)
1474{
1475 struct usb_xfer_root *info;
1476 struct usb_bus *bus;
1477 usb_frcount_t x;
1478
1479 info = xfer->xroot;
1480 bus = info->bus;
1481
1482 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1483 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1484 "read" : "write");
1485
1486#ifdef USB_DEBUG
1487 if (USB_DEBUG_VAR > 0) {
1488 USB_BUS_LOCK(bus);
1489
1490 usb_dump_endpoint(xfer->endpoint);
1491
1492 USB_BUS_UNLOCK(bus);
1493 }
1494#endif
1495
1496 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1497 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1498
1499 /* Only open the USB transfer once! */
1500 if (!xfer->flags_int.open) {
1501 xfer->flags_int.open = 1;
1502
1503 DPRINTF("open\n");
1504
1505 USB_BUS_LOCK(bus);
1506 (xfer->endpoint->methods->open) (xfer);
1507 USB_BUS_UNLOCK(bus);
1508 }
1509 /* set "transferring" flag */
1510 xfer->flags_int.transferring = 1;
1511
1512#if USB_HAVE_POWERD
1513 /* increment power reference */
1514 usbd_transfer_power_ref(xfer, 1);
1515#endif
1516 /*
1517 * Check if the transfer is waiting on a queue, most
1518 * frequently the "done_q":
1519 */
1520 if (xfer->wait_queue) {
1521 USB_BUS_LOCK(bus);
1522 usbd_transfer_dequeue(xfer);
1523 USB_BUS_UNLOCK(bus);
1524 }
1525 /* clear "did_dma_delay" flag */
1526 xfer->flags_int.did_dma_delay = 0;
1527
1528 /* clear "did_close" flag */
1529 xfer->flags_int.did_close = 0;
1530
1531#if USB_HAVE_BUSDMA
1532 /* clear "bdma_setup" flag */
1533 xfer->flags_int.bdma_setup = 0;
1534#endif
1535 /* by default we cannot cancel any USB transfer immediately */
1536 xfer->flags_int.can_cancel_immed = 0;
1537
1538 /* clear lengths and frame counts by default */
1539 xfer->sumlen = 0;
1540 xfer->actlen = 0;
1541 xfer->aframes = 0;
1542
1543 /* clear any previous errors */
1544 xfer->error = 0;
1545
1546 /* Check if the device is still alive */
1547 if (info->udev->state < USB_STATE_POWERED) {
1548 USB_BUS_LOCK(bus);
1549 /*
1550 * Must return cancelled error code else
1551 * device drivers can hang.
1552 */
1553 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1554 USB_BUS_UNLOCK(bus);
1555 return;
1556 }
1557
1558 /* sanity check */
1559 if (xfer->nframes == 0) {
1560 if (xfer->flags.stall_pipe) {
1561 /*
1562 * Special case - want to stall without transferring
1563 * any data:
1564 */
1565 DPRINTF("xfer=%p nframes=0: stall "
1566 "or clear stall!\n", xfer);
1567 USB_BUS_LOCK(bus);
1568 xfer->flags_int.can_cancel_immed = 1;
1569 /* start the transfer */
1570 usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer);
1571 USB_BUS_UNLOCK(bus);
1572 return;
1573 }
1574 USB_BUS_LOCK(bus);
1575 usbd_transfer_done(xfer, USB_ERR_INVAL);
1576 USB_BUS_UNLOCK(bus);
1577 return;
1578 }
1579 /* compute total transfer length */
1580
1581 for (x = 0; x != xfer->nframes; x++) {
1582 xfer->sumlen += xfer->frlengths[x];
1583 if (xfer->sumlen < xfer->frlengths[x]) {
1584 /* length wrapped around */
1585 USB_BUS_LOCK(bus);
1586 usbd_transfer_done(xfer, USB_ERR_INVAL);
1587 USB_BUS_UNLOCK(bus);
1588 return;
1589 }
1590 }
1591
1592 /* clear some internal flags */
1593
1594 xfer->flags_int.short_xfer_ok = 0;
1595 xfer->flags_int.short_frames_ok = 0;
1596
1597 /* check if this is a control transfer */
1598
1599 if (xfer->flags_int.control_xfr) {
1600
1601 if (usbd_setup_ctrl_transfer(xfer)) {
1602 USB_BUS_LOCK(bus);
1603 usbd_transfer_done(xfer, USB_ERR_STALLED);
1604 USB_BUS_UNLOCK(bus);
1605 return;
1606 }
1607 }
1608 /*
1609 * Setup filtered version of some transfer flags,
1610 * in case of data read direction
1611 */
1612 if (USB_GET_DATA_ISREAD(xfer)) {
1613
1614 if (xfer->flags.short_frames_ok) {
1615 xfer->flags_int.short_xfer_ok = 1;
1616 xfer->flags_int.short_frames_ok = 1;
1617 } else if (xfer->flags.short_xfer_ok) {
1618 xfer->flags_int.short_xfer_ok = 1;
1619
1620 /* check for control transfer */
1621 if (xfer->flags_int.control_xfr) {
1622 /*
1623 * 1) Control transfers do not support
1624 * reception of multiple short USB
1625 * frames in host mode and device side
1626 * mode, with exception of:
1627 *
1628 * 2) Due to sometimes buggy device
1629 * side firmware we need to do a
1630 * STATUS stage in case of short
1631 * control transfers in USB host mode.
1632 * The STATUS stage then becomes the
1633 * "alt_next" to the DATA stage.
1634 */
1635 xfer->flags_int.short_frames_ok = 1;
1636 }
1637 }
1638 }
1639 /*
1640 * Check if BUS-DMA support is enabled and try to load virtual
1641 * buffers into DMA, if any:
1642 */
1643#if USB_HAVE_BUSDMA
1644 if (xfer->flags_int.bdma_enable) {
1645 /* insert the USB transfer last in the BUS-DMA queue */
1646 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1647 return;
1648 }
1649#endif
1650 /*
1651 * Enter the USB transfer into the Host Controller or
1652 * Device Controller schedule:
1653 */
1654 usbd_pipe_enter(xfer);
1655}
1656
1657/*------------------------------------------------------------------------*
1658 * usbd_pipe_enter - factored out code
1659 *------------------------------------------------------------------------*/
1660void
1661usbd_pipe_enter(struct usb_xfer *xfer)
1662{
1663 struct usb_endpoint *ep;
1664
1665 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1666
1667 USB_BUS_LOCK(xfer->xroot->bus);
1668
1669 ep = xfer->endpoint;
1670
1671 DPRINTF("enter\n");
1672
1673 /* enter the transfer */
1674 (ep->methods->enter) (xfer);
1675
1676 xfer->flags_int.can_cancel_immed = 1;
1677
1678 /* check for transfer error */
1679 if (xfer->error) {
1680 /* some error has happened */
1681 usbd_transfer_done(xfer, 0);
1682 USB_BUS_UNLOCK(xfer->xroot->bus);
1683 return;
1684 }
1685
1686 /* start the transfer */
1687 usb_command_wrapper(&ep->endpoint_q, xfer);
1688 USB_BUS_UNLOCK(xfer->xroot->bus);
1689}
1690
1691/*------------------------------------------------------------------------*
1692 * usbd_transfer_start - start an USB transfer
1693 *
1694 * NOTE: Calling this function more than one time will only
1695 * result in a single transfer start, until the USB transfer
1696 * completes.
1697 *------------------------------------------------------------------------*/
1698void
1699usbd_transfer_start(struct usb_xfer *xfer)
1700{
1701 if (xfer == NULL) {
1702 /* transfer is gone */
1703 return;
1704 }
1705 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1706
1707 /* mark the USB transfer started */
1708
1709 if (!xfer->flags_int.started) {
1710 /* lock the BUS lock to avoid races updating flags_int */
1711 USB_BUS_LOCK(xfer->xroot->bus);
1712 xfer->flags_int.started = 1;
1713 USB_BUS_UNLOCK(xfer->xroot->bus);
1714 }
1715 /* check if the USB transfer callback is already transferring */
1716
1717 if (xfer->flags_int.transferring) {
1718 return;
1719 }
1720 USB_BUS_LOCK(xfer->xroot->bus);
1721 /* call the USB transfer callback */
1722 usbd_callback_ss_done_defer(xfer);
1723 USB_BUS_UNLOCK(xfer->xroot->bus);
1724}
1725
1726/*------------------------------------------------------------------------*
1727 * usbd_transfer_stop - stop an USB transfer
1728 *
1729 * NOTE: Calling this function more than one time will only
1730 * result in a single transfer stop.
1731 * NOTE: When this function returns it is not safe to free nor
1732 * reuse any DMA buffers. See "usbd_transfer_drain()".
1733 *------------------------------------------------------------------------*/
1734void
1735usbd_transfer_stop(struct usb_xfer *xfer)
1736{
1737 struct usb_endpoint *ep;
1738
1739 if (xfer == NULL) {
1740 /* transfer is gone */
1741 return;
1742 }
1743 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1744
1745 /* check if the USB transfer was ever opened */
1746
1747 if (!xfer->flags_int.open) {
1748 if (xfer->flags_int.started) {
1749 /* nothing to do except clearing the "started" flag */
1750 /* lock the BUS lock to avoid races updating flags_int */
1751 USB_BUS_LOCK(xfer->xroot->bus);
1752 xfer->flags_int.started = 0;
1753 USB_BUS_UNLOCK(xfer->xroot->bus);
1754 }
1755 return;
1756 }
1757 /* try to stop the current USB transfer */
1758
1759 USB_BUS_LOCK(xfer->xroot->bus);
1760 /* override any previous error */
1761 xfer->error = USB_ERR_CANCELLED;
1762
1763 /*
1764 * Clear "open" and "started" when both private and USB lock
1765 * is locked so that we don't get a race updating "flags_int"
1766 */
1767 xfer->flags_int.open = 0;
1768 xfer->flags_int.started = 0;
1769
1770 /*
1771 * Check if we can cancel the USB transfer immediately.
1772 */
1773 if (xfer->flags_int.transferring) {
1774 if (xfer->flags_int.can_cancel_immed &&
1775 (!xfer->flags_int.did_close)) {
1776 DPRINTF("close\n");
1777 /*
1778 * The following will lead to an USB_ERR_CANCELLED
1779 * error code being passed to the USB callback.
1780 */
1781 (xfer->endpoint->methods->close) (xfer);
1782 /* only close once */
1783 xfer->flags_int.did_close = 1;
1784 } else {
1785 /* need to wait for the next done callback */
1786 }
1787 } else {
1788 DPRINTF("close\n");
1789
1790 /* close here and now */
1791 (xfer->endpoint->methods->close) (xfer);
1792
1793 /*
1794 * Any additional DMA delay is done by
1795 * "usbd_transfer_unsetup()".
1796 */
1797
1798 /*
1799 * Special case. Check if we need to restart a blocked
1800 * endpoint.
1801 */
1802 ep = xfer->endpoint;
1803
1804 /*
1805 * If the current USB transfer is completing we need
1806 * to start the next one:
1807 */
1808 if (ep->endpoint_q.curr == xfer) {
1809 usb_command_wrapper(&ep->endpoint_q, NULL);
1810 }
1811 }
1812
1813 USB_BUS_UNLOCK(xfer->xroot->bus);
1814}
1815
1816/*------------------------------------------------------------------------*
1817 * usbd_transfer_pending
1818 *
1819 * This function will check if an USB transfer is pending which is a
1820 * little bit complicated!
1821 * Return values:
1822 * 0: Not pending
1823 * 1: Pending: The USB transfer will receive a callback in the future.
1824 *------------------------------------------------------------------------*/
1825uint8_t
1826usbd_transfer_pending(struct usb_xfer *xfer)
1827{
1828 struct usb_xfer_root *info;
1829 struct usb_xfer_queue *pq;
1830
1831 if (xfer == NULL) {
1832 /* transfer is gone */
1833 return (0);
1834 }
1835 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1836
1837 if (xfer->flags_int.transferring) {
1838 /* trivial case */
1839 return (1);
1840 }
1841 USB_BUS_LOCK(xfer->xroot->bus);
1842 if (xfer->wait_queue) {
1843 /* we are waiting on a queue somewhere */
1844 USB_BUS_UNLOCK(xfer->xroot->bus);
1845 return (1);
1846 }
1847 info = xfer->xroot;
1848 pq = &info->done_q;
1849
1850 if (pq->curr == xfer) {
1851 /* we are currently scheduled for callback */
1852 USB_BUS_UNLOCK(xfer->xroot->bus);
1853 return (1);
1854 }
1855 /* we are not pending */
1856 USB_BUS_UNLOCK(xfer->xroot->bus);
1857 return (0);
1858}
1859
1860/*------------------------------------------------------------------------*
1861 * usbd_transfer_drain
1862 *
1863 * This function will stop the USB transfer and wait for any
1864 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1865 * are loaded into DMA can safely be freed or reused after that this
1866 * function has returned.
1867 *------------------------------------------------------------------------*/
1868void
1869usbd_transfer_drain(struct usb_xfer *xfer)
1870{
1871 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1872 "usbd_transfer_drain can sleep!");
1873
1874 if (xfer == NULL) {
1875 /* transfer is gone */
1876 return;
1877 }
1878 if (xfer->xroot->xfer_mtx != &Giant) {
1879 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1880 }
1881 USB_XFER_LOCK(xfer);
1882
1883 usbd_transfer_stop(xfer);
1884
1885 while (usbd_transfer_pending(xfer) ||
1886 xfer->flags_int.doing_callback) {
1887
1888 /*
1889 * It is allowed that the callback can drop its
1890 * transfer mutex. In that case checking only
1891 * "usbd_transfer_pending()" is not enough to tell if
1892 * the USB transfer is fully drained. We also need to
1893 * check the internal "doing_callback" flag.
1894 */
1895 xfer->flags_int.draining = 1;
1896
1897 /*
1898 * Wait until the current outstanding USB
1899 * transfer is complete !
1900 */
1901 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
1902 }
1903 USB_XFER_UNLOCK(xfer);
1904}
1905
1906struct usb_page_cache *
1907usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1908{
1909 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1910
1911 return (&xfer->frbuffers[frindex]);
1912}
1913
1914/*------------------------------------------------------------------------*
1915 * usbd_xfer_get_fps_shift
1916 *
1917 * The following function is only useful for isochronous transfers. It
1918 * returns how many times the frame execution rate has been shifted
1919 * down.
1920 *
1921 * Return value:
1922 * Success: 0..3
1923 * Failure: 0
1924 *------------------------------------------------------------------------*/
1925uint8_t
1926usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
1927{
1928 return (xfer->fps_shift);
1929}
1930
1931usb_frlength_t
1932usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
1933{
1934 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1935
1936 return (xfer->frlengths[frindex]);
1937}
1938
1939/*------------------------------------------------------------------------*
1940 * usbd_xfer_set_frame_data
1941 *
1942 * This function sets the pointer of the buffer that should
1943 * loaded directly into DMA for the given USB frame. Passing "ptr"
1944 * equal to NULL while the corresponding "frlength" is greater
1945 * than zero gives undefined results!
1946 *------------------------------------------------------------------------*/
1947void
1948usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1949 void *ptr, usb_frlength_t len)
1950{
1951 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1952
1953 /* set virtual address to load and length */
1954 xfer->frbuffers[frindex].buffer = ptr;
1955 usbd_xfer_set_frame_len(xfer, frindex, len);
1956}
1957
1958void
1959usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1960 void **ptr, int *len)
1961{
1962 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1963
1964 if (ptr != NULL)
1965 *ptr = xfer->frbuffers[frindex].buffer;
1966 if (len != NULL)
1967 *len = xfer->frlengths[frindex];
1968}
1969
1970void
1971usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
1972 int *nframes)
1973{
1974 if (actlen != NULL)
1975 *actlen = xfer->actlen;
1976 if (sumlen != NULL)
1977 *sumlen = xfer->sumlen;
1978 if (aframes != NULL)
1979 *aframes = xfer->aframes;
1980 if (nframes != NULL)
1981 *nframes = xfer->nframes;
1982}
1983
1984/*------------------------------------------------------------------------*
1985 * usbd_xfer_set_frame_offset
1986 *
1987 * This function sets the frame data buffer offset relative to the beginning
1988 * of the USB DMA buffer allocated for this USB transfer.
1989 *------------------------------------------------------------------------*/
1990void
1991usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
1992 usb_frcount_t frindex)
1993{
1994 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
1995 "when the USB buffer is external\n"));
1996 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1997
1998 /* set virtual address to load */
1999 xfer->frbuffers[frindex].buffer =
2000 USB_ADD_BYTES(xfer->local_buffer, offset);
2001}
2002
2003void
2004usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2005{
2006 xfer->interval = i;
2007}
2008
2009void
2010usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2011{
2012 xfer->timeout = t;
2013}
2014
2015void
2016usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2017{
2018 xfer->nframes = n;
2019}
2020
2021usb_frcount_t
2022usbd_xfer_max_frames(struct usb_xfer *xfer)
2023{
2024 return (xfer->max_frame_count);
2025}
2026
2027usb_frlength_t
2028usbd_xfer_max_len(struct usb_xfer *xfer)
2029{
2030 return (xfer->max_data_length);
2031}
2032
2033usb_frlength_t
2034usbd_xfer_max_framelen(struct usb_xfer *xfer)
2035{
2036 return (xfer->max_frame_size);
2037}
2038
2039void
2040usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2041 usb_frlength_t len)
2042{
2043 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2044
2045 xfer->frlengths[frindex] = len;
2046}
2047
2048/*------------------------------------------------------------------------*
2049 * usb_callback_proc - factored out code
2050 *
2051 * This function performs USB callbacks.
2052 *------------------------------------------------------------------------*/
2053static void
2054usb_callback_proc(struct usb_proc_msg *_pm)
2055{
2056 struct usb_done_msg *pm = (void *)_pm;
2057 struct usb_xfer_root *info = pm->xroot;
2058
2059 /* Change locking order */
2060 USB_BUS_UNLOCK(info->bus);
2061
2062 /*
2063 * We exploit the fact that the mutex is the same for all
2064 * callbacks that will be called from this thread:
2065 */
2066 mtx_lock(info->xfer_mtx);
2067 USB_BUS_LOCK(info->bus);
2068
2069 /* Continue where we lost track */
2070 usb_command_wrapper(&info->done_q,
2071 info->done_q.curr);
2072
2073 mtx_unlock(info->xfer_mtx);
2074}
2075
2076/*------------------------------------------------------------------------*
2077 * usbd_callback_ss_done_defer
2078 *
2079 * This function will defer the start, stop and done callback to the
2080 * correct thread.
2081 *------------------------------------------------------------------------*/
2082static void
2083usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2084{
2085 struct usb_xfer_root *info = xfer->xroot;
2086 struct usb_xfer_queue *pq = &info->done_q;
2087
2088 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2089
2090 if (pq->curr != xfer) {
2091 usbd_transfer_enqueue(pq, xfer);
2092 }
2093 if (!pq->recurse_1) {
2094
2095 /*
2096 * We have to postpone the callback due to the fact we
2097 * will have a Lock Order Reversal, LOR, if we try to
2098 * proceed !
2099 */
2100 if (usb_proc_msignal(info->done_p,
2101 &info->done_m[0], &info->done_m[1])) {
2102 /* ignore */
2103 }
2104 } else {
2105 /* clear second recurse flag */
2106 pq->recurse_2 = 0;
2107 }
2108 return;
2109
2110}
2111
2112/*------------------------------------------------------------------------*
2113 * usbd_callback_wrapper
2114 *
2115 * This is a wrapper for USB callbacks. This wrapper does some
2116 * auto-magic things like figuring out if we can call the callback
2117 * directly from the current context or if we need to wakeup the
2118 * interrupt process.
2119 *------------------------------------------------------------------------*/
2120static void
2121usbd_callback_wrapper(struct usb_xfer_queue *pq)
2122{
2123 struct usb_xfer *xfer = pq->curr;
2124 struct usb_xfer_root *info = xfer->xroot;
2125
2126 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2127 if (!mtx_owned(info->xfer_mtx)) {
2128 /*
2129 * Cases that end up here:
2130 *
2131 * 5) HW interrupt done callback or other source.
2132 */
2133 DPRINTFN(3, "case 5\n");
2134
2135 /*
2136 * We have to postpone the callback due to the fact we
2137 * will have a Lock Order Reversal, LOR, if we try to
2138 * proceed !
2139 */
2140 if (usb_proc_msignal(info->done_p,
2141 &info->done_m[0], &info->done_m[1])) {
2142 /* ignore */
2143 }
2144 return;
2145 }
2146 /*
2147 * Cases that end up here:
2148 *
2149 * 1) We are starting a transfer
2150 * 2) We are prematurely calling back a transfer
2151 * 3) We are stopping a transfer
2152 * 4) We are doing an ordinary callback
2153 */
2154 DPRINTFN(3, "case 1-4\n");
2155 /* get next USB transfer in the queue */
2156 info->done_q.curr = NULL;
2157
2158 /* set flag in case of drain */
2159 xfer->flags_int.doing_callback = 1;
2160
2161 USB_BUS_UNLOCK(info->bus);
2162 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2163
2164 /* set correct USB state for callback */
2165 if (!xfer->flags_int.transferring) {
2166 xfer->usb_state = USB_ST_SETUP;
2167 if (!xfer->flags_int.started) {
2168 /* we got stopped before we even got started */
2169 USB_BUS_LOCK(info->bus);
2170 goto done;
2171 }
2172 } else {
2173
2174 if (usbd_callback_wrapper_sub(xfer)) {
2175 /* the callback has been deferred */
2176 USB_BUS_LOCK(info->bus);
2177 goto done;
2178 }
2179#if USB_HAVE_POWERD
2180 /* decrement power reference */
2181 usbd_transfer_power_ref(xfer, -1);
2182#endif
2183 xfer->flags_int.transferring = 0;
2184
2185 if (xfer->error) {
2186 xfer->usb_state = USB_ST_ERROR;
2187 } else {
2188 /* set transferred state */
2189 xfer->usb_state = USB_ST_TRANSFERRED;
2190#if USB_HAVE_BUSDMA
2191 /* sync DMA memory, if any */
2192 if (xfer->flags_int.bdma_enable &&
2193 (!xfer->flags_int.bdma_no_post_sync)) {
2194 usb_bdma_post_sync(xfer);
2195 }
2196#endif
2197 }
2198 }
2199
2200 if (xfer->usb_state != USB_ST_SETUP)
2201 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2202
2199 /* call processing routine */
2200 (xfer->callback) (xfer, xfer->error);
2201
2202 /* pickup the USB mutex again */
2203 USB_BUS_LOCK(info->bus);
2204
2205 /*
2206 * Check if we got started after that we got cancelled, but
2207 * before we managed to do the callback.
2208 */
2209 if ((!xfer->flags_int.open) &&
2210 (xfer->flags_int.started) &&
2211 (xfer->usb_state == USB_ST_ERROR)) {
2212 /* clear flag in case of drain */
2213 xfer->flags_int.doing_callback = 0;
2214 /* try to loop, but not recursivly */
2215 usb_command_wrapper(&info->done_q, xfer);
2216 return;
2217 }
2218
2219done:
2220 /* clear flag in case of drain */
2221 xfer->flags_int.doing_callback = 0;
2222
2223 /*
2224 * Check if we are draining.
2225 */
2226 if (xfer->flags_int.draining &&
2227 (!xfer->flags_int.transferring)) {
2228 /* "usbd_transfer_drain()" is waiting for end of transfer */
2229 xfer->flags_int.draining = 0;
2230 cv_broadcast(&info->cv_drain);
2231 }
2232
2233 /* do the next callback, if any */
2234 usb_command_wrapper(&info->done_q,
2235 info->done_q.curr);
2236}
2237
2238/*------------------------------------------------------------------------*
2239 * usb_dma_delay_done_cb
2240 *
2241 * This function is called when the DMA delay has been exectuded, and
2242 * will make sure that the callback is called to complete the USB
2243 * transfer. This code path is ususally only used when there is an USB
2244 * error like USB_ERR_CANCELLED.
2245 *------------------------------------------------------------------------*/
2246void
2247usb_dma_delay_done_cb(struct usb_xfer *xfer)
2248{
2249 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2250
2251 DPRINTFN(3, "Completed %p\n", xfer);
2252
2253 /* queue callback for execution, again */
2254 usbd_transfer_done(xfer, 0);
2255}
2256
2257/*------------------------------------------------------------------------*
2258 * usbd_transfer_dequeue
2259 *
2260 * - This function is used to remove an USB transfer from a USB
2261 * transfer queue.
2262 *
2263 * - This function can be called multiple times in a row.
2264 *------------------------------------------------------------------------*/
2265void
2266usbd_transfer_dequeue(struct usb_xfer *xfer)
2267{
2268 struct usb_xfer_queue *pq;
2269
2270 pq = xfer->wait_queue;
2271 if (pq) {
2272 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2273 xfer->wait_queue = NULL;
2274 }
2275}
2276
2277/*------------------------------------------------------------------------*
2278 * usbd_transfer_enqueue
2279 *
2280 * - This function is used to insert an USB transfer into a USB *
2281 * transfer queue.
2282 *
2283 * - This function can be called multiple times in a row.
2284 *------------------------------------------------------------------------*/
2285void
2286usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2287{
2288 /*
2289 * Insert the USB transfer into the queue, if it is not
2290 * already on a USB transfer queue:
2291 */
2292 if (xfer->wait_queue == NULL) {
2293 xfer->wait_queue = pq;
2294 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2295 }
2296}
2297
2298/*------------------------------------------------------------------------*
2299 * usbd_transfer_done
2300 *
2301 * - This function is used to remove an USB transfer from the busdma,
2302 * pipe or interrupt queue.
2303 *
2304 * - This function is used to queue the USB transfer on the done
2305 * queue.
2306 *
2307 * - This function is used to stop any USB transfer timeouts.
2308 *------------------------------------------------------------------------*/
2309void
2310usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2311{
2312 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2313
2314 DPRINTF("err=%s\n", usbd_errstr(error));
2315
2316 /*
2317 * If we are not transferring then just return.
2318 * This can happen during transfer cancel.
2319 */
2320 if (!xfer->flags_int.transferring) {
2321 DPRINTF("not transferring\n");
2322 /* end of control transfer, if any */
2323 xfer->flags_int.control_act = 0;
2324 return;
2325 }
2326 /* only set transfer error if not already set */
2327 if (!xfer->error) {
2328 xfer->error = error;
2329 }
2330 /* stop any callouts */
2331 usb_callout_stop(&xfer->timeout_handle);
2332
2333 /*
2334 * If we are waiting on a queue, just remove the USB transfer
2335 * from the queue, if any. We should have the required locks
2336 * locked to do the remove when this function is called.
2337 */
2338 usbd_transfer_dequeue(xfer);
2339
2340#if USB_HAVE_BUSDMA
2341 if (mtx_owned(xfer->xroot->xfer_mtx)) {
2342 struct usb_xfer_queue *pq;
2343
2344 /*
2345 * If the private USB lock is not locked, then we assume
2346 * that the BUS-DMA load stage has been passed:
2347 */
2348 pq = &xfer->xroot->dma_q;
2349
2350 if (pq->curr == xfer) {
2351 /* start the next BUS-DMA load, if any */
2352 usb_command_wrapper(pq, NULL);
2353 }
2354 }
2355#endif
2356 /* keep some statistics */
2357 if (xfer->error) {
2358 xfer->xroot->bus->stats_err.uds_requests
2359 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2360 } else {
2361 xfer->xroot->bus->stats_ok.uds_requests
2362 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2363 }
2364
2365 /* call the USB transfer callback */
2366 usbd_callback_ss_done_defer(xfer);
2367}
2368
2369/*------------------------------------------------------------------------*
2370 * usbd_transfer_start_cb
2371 *
2372 * This function is called to start the USB transfer when
2373 * "xfer->interval" is greater than zero, and and the endpoint type is
2374 * BULK or CONTROL.
2375 *------------------------------------------------------------------------*/
2376static void
2377usbd_transfer_start_cb(void *arg)
2378{
2379 struct usb_xfer *xfer = arg;
2380 struct usb_endpoint *ep = xfer->endpoint;
2381
2382 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2383
2384 DPRINTF("start\n");
2385
2203 /* call processing routine */
2204 (xfer->callback) (xfer, xfer->error);
2205
2206 /* pickup the USB mutex again */
2207 USB_BUS_LOCK(info->bus);
2208
2209 /*
2210 * Check if we got started after that we got cancelled, but
2211 * before we managed to do the callback.
2212 */
2213 if ((!xfer->flags_int.open) &&
2214 (xfer->flags_int.started) &&
2215 (xfer->usb_state == USB_ST_ERROR)) {
2216 /* clear flag in case of drain */
2217 xfer->flags_int.doing_callback = 0;
2218 /* try to loop, but not recursivly */
2219 usb_command_wrapper(&info->done_q, xfer);
2220 return;
2221 }
2222
2223done:
2224 /* clear flag in case of drain */
2225 xfer->flags_int.doing_callback = 0;
2226
2227 /*
2228 * Check if we are draining.
2229 */
2230 if (xfer->flags_int.draining &&
2231 (!xfer->flags_int.transferring)) {
2232 /* "usbd_transfer_drain()" is waiting for end of transfer */
2233 xfer->flags_int.draining = 0;
2234 cv_broadcast(&info->cv_drain);
2235 }
2236
2237 /* do the next callback, if any */
2238 usb_command_wrapper(&info->done_q,
2239 info->done_q.curr);
2240}
2241
2242/*------------------------------------------------------------------------*
2243 * usb_dma_delay_done_cb
2244 *
2245 * This function is called when the DMA delay has been exectuded, and
2246 * will make sure that the callback is called to complete the USB
2247 * transfer. This code path is ususally only used when there is an USB
2248 * error like USB_ERR_CANCELLED.
2249 *------------------------------------------------------------------------*/
2250void
2251usb_dma_delay_done_cb(struct usb_xfer *xfer)
2252{
2253 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2254
2255 DPRINTFN(3, "Completed %p\n", xfer);
2256
2257 /* queue callback for execution, again */
2258 usbd_transfer_done(xfer, 0);
2259}
2260
2261/*------------------------------------------------------------------------*
2262 * usbd_transfer_dequeue
2263 *
2264 * - This function is used to remove an USB transfer from a USB
2265 * transfer queue.
2266 *
2267 * - This function can be called multiple times in a row.
2268 *------------------------------------------------------------------------*/
2269void
2270usbd_transfer_dequeue(struct usb_xfer *xfer)
2271{
2272 struct usb_xfer_queue *pq;
2273
2274 pq = xfer->wait_queue;
2275 if (pq) {
2276 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2277 xfer->wait_queue = NULL;
2278 }
2279}
2280
2281/*------------------------------------------------------------------------*
2282 * usbd_transfer_enqueue
2283 *
2284 * - This function is used to insert an USB transfer into a USB *
2285 * transfer queue.
2286 *
2287 * - This function can be called multiple times in a row.
2288 *------------------------------------------------------------------------*/
2289void
2290usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2291{
2292 /*
2293 * Insert the USB transfer into the queue, if it is not
2294 * already on a USB transfer queue:
2295 */
2296 if (xfer->wait_queue == NULL) {
2297 xfer->wait_queue = pq;
2298 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2299 }
2300}
2301
2302/*------------------------------------------------------------------------*
2303 * usbd_transfer_done
2304 *
2305 * - This function is used to remove an USB transfer from the busdma,
2306 * pipe or interrupt queue.
2307 *
2308 * - This function is used to queue the USB transfer on the done
2309 * queue.
2310 *
2311 * - This function is used to stop any USB transfer timeouts.
2312 *------------------------------------------------------------------------*/
2313void
2314usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2315{
2316 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2317
2318 DPRINTF("err=%s\n", usbd_errstr(error));
2319
2320 /*
2321 * If we are not transferring then just return.
2322 * This can happen during transfer cancel.
2323 */
2324 if (!xfer->flags_int.transferring) {
2325 DPRINTF("not transferring\n");
2326 /* end of control transfer, if any */
2327 xfer->flags_int.control_act = 0;
2328 return;
2329 }
2330 /* only set transfer error if not already set */
2331 if (!xfer->error) {
2332 xfer->error = error;
2333 }
2334 /* stop any callouts */
2335 usb_callout_stop(&xfer->timeout_handle);
2336
2337 /*
2338 * If we are waiting on a queue, just remove the USB transfer
2339 * from the queue, if any. We should have the required locks
2340 * locked to do the remove when this function is called.
2341 */
2342 usbd_transfer_dequeue(xfer);
2343
2344#if USB_HAVE_BUSDMA
2345 if (mtx_owned(xfer->xroot->xfer_mtx)) {
2346 struct usb_xfer_queue *pq;
2347
2348 /*
2349 * If the private USB lock is not locked, then we assume
2350 * that the BUS-DMA load stage has been passed:
2351 */
2352 pq = &xfer->xroot->dma_q;
2353
2354 if (pq->curr == xfer) {
2355 /* start the next BUS-DMA load, if any */
2356 usb_command_wrapper(pq, NULL);
2357 }
2358 }
2359#endif
2360 /* keep some statistics */
2361 if (xfer->error) {
2362 xfer->xroot->bus->stats_err.uds_requests
2363 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2364 } else {
2365 xfer->xroot->bus->stats_ok.uds_requests
2366 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2367 }
2368
2369 /* call the USB transfer callback */
2370 usbd_callback_ss_done_defer(xfer);
2371}
2372
2373/*------------------------------------------------------------------------*
2374 * usbd_transfer_start_cb
2375 *
2376 * This function is called to start the USB transfer when
2377 * "xfer->interval" is greater than zero, and and the endpoint type is
2378 * BULK or CONTROL.
2379 *------------------------------------------------------------------------*/
2380static void
2381usbd_transfer_start_cb(void *arg)
2382{
2383 struct usb_xfer *xfer = arg;
2384 struct usb_endpoint *ep = xfer->endpoint;
2385
2386 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2387
2388 DPRINTF("start\n");
2389
2390 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2391
2386 /* start the transfer */
2387 (ep->methods->start) (xfer);
2388
2389 xfer->flags_int.can_cancel_immed = 1;
2390
2391 /* check for error */
2392 if (xfer->error) {
2393 /* some error has happened */
2394 usbd_transfer_done(xfer, 0);
2395 }
2396}
2397
2398/*------------------------------------------------------------------------*
2399 * usbd_xfer_set_stall
2400 *
2401 * This function is used to set the stall flag outside the
2402 * callback. This function is NULL safe.
2403 *------------------------------------------------------------------------*/
2404void
2405usbd_xfer_set_stall(struct usb_xfer *xfer)
2406{
2407 if (xfer == NULL) {
2408 /* tearing down */
2409 return;
2410 }
2411 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2412
2413 /* avoid any races by locking the USB mutex */
2414 USB_BUS_LOCK(xfer->xroot->bus);
2415 xfer->flags.stall_pipe = 1;
2416 USB_BUS_UNLOCK(xfer->xroot->bus);
2417}
2418
2419int
2420usbd_xfer_is_stalled(struct usb_xfer *xfer)
2421{
2422 return (xfer->endpoint->is_stalled);
2423}
2424
2425/*------------------------------------------------------------------------*
2426 * usbd_transfer_clear_stall
2427 *
2428 * This function is used to clear the stall flag outside the
2429 * callback. This function is NULL safe.
2430 *------------------------------------------------------------------------*/
2431void
2432usbd_transfer_clear_stall(struct usb_xfer *xfer)
2433{
2434 if (xfer == NULL) {
2435 /* tearing down */
2436 return;
2437 }
2438 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2439
2440 /* avoid any races by locking the USB mutex */
2441 USB_BUS_LOCK(xfer->xroot->bus);
2442
2443 xfer->flags.stall_pipe = 0;
2444
2445 USB_BUS_UNLOCK(xfer->xroot->bus);
2446}
2447
2448/*------------------------------------------------------------------------*
2449 * usbd_pipe_start
2450 *
2451 * This function is used to add an USB transfer to the pipe transfer list.
2452 *------------------------------------------------------------------------*/
2453void
2454usbd_pipe_start(struct usb_xfer_queue *pq)
2455{
2456 struct usb_endpoint *ep;
2457 struct usb_xfer *xfer;
2458 uint8_t type;
2459
2460 xfer = pq->curr;
2461 ep = xfer->endpoint;
2462
2463 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2464
2465 /*
2466 * If the endpoint is already stalled we do nothing !
2467 */
2468 if (ep->is_stalled) {
2469 return;
2470 }
2471 /*
2472 * Check if we are supposed to stall the endpoint:
2473 */
2474 if (xfer->flags.stall_pipe) {
2475 struct usb_device *udev;
2476 struct usb_xfer_root *info;
2477
2478 /* clear stall command */
2479 xfer->flags.stall_pipe = 0;
2480
2481 /* get pointer to USB device */
2482 info = xfer->xroot;
2483 udev = info->udev;
2484
2485 /*
2486 * Only stall BULK and INTERRUPT endpoints.
2487 */
2488 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2489 if ((type == UE_BULK) ||
2490 (type == UE_INTERRUPT)) {
2491 uint8_t did_stall;
2492
2493 did_stall = 1;
2494
2495 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2496 (udev->bus->methods->set_stall) (
2497 udev, NULL, ep, &did_stall);
2498 } else if (udev->ctrl_xfer[1]) {
2499 info = udev->ctrl_xfer[1]->xroot;
2500 usb_proc_msignal(
2501 &info->bus->non_giant_callback_proc,
2502 &udev->cs_msg[0], &udev->cs_msg[1]);
2503 } else {
2504 /* should not happen */
2505 DPRINTFN(0, "No stall handler\n");
2506 }
2507 /*
2508 * Check if we should stall. Some USB hardware
2509 * handles set- and clear-stall in hardware.
2510 */
2511 if (did_stall) {
2512 /*
2513 * The transfer will be continued when
2514 * the clear-stall control endpoint
2515 * message is received.
2516 */
2517 ep->is_stalled = 1;
2518 return;
2519 }
2520 } else if (type == UE_ISOCHRONOUS) {
2521
2522 /*
2523 * Make sure any FIFO overflow or other FIFO
2524 * error conditions go away by resetting the
2525 * endpoint FIFO through the clear stall
2526 * method.
2527 */
2528 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2529 (udev->bus->methods->clear_stall) (udev, ep);
2530 }
2531 }
2532 }
2533 /* Set or clear stall complete - special case */
2534 if (xfer->nframes == 0) {
2535 /* we are complete */
2536 xfer->aframes = 0;
2537 usbd_transfer_done(xfer, 0);
2538 return;
2539 }
2540 /*
2541 * Handled cases:
2542 *
2543 * 1) Start the first transfer queued.
2544 *
2545 * 2) Re-start the current USB transfer.
2546 */
2547 /*
2548 * Check if there should be any
2549 * pre transfer start delay:
2550 */
2551 if (xfer->interval > 0) {
2552 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2553 if ((type == UE_BULK) ||
2554 (type == UE_CONTROL)) {
2555 usbd_transfer_timeout_ms(xfer,
2556 &usbd_transfer_start_cb,
2557 xfer->interval);
2558 return;
2559 }
2560 }
2561 DPRINTF("start\n");
2562
2392 /* start the transfer */
2393 (ep->methods->start) (xfer);
2394
2395 xfer->flags_int.can_cancel_immed = 1;
2396
2397 /* check for error */
2398 if (xfer->error) {
2399 /* some error has happened */
2400 usbd_transfer_done(xfer, 0);
2401 }
2402}
2403
2404/*------------------------------------------------------------------------*
2405 * usbd_xfer_set_stall
2406 *
2407 * This function is used to set the stall flag outside the
2408 * callback. This function is NULL safe.
2409 *------------------------------------------------------------------------*/
2410void
2411usbd_xfer_set_stall(struct usb_xfer *xfer)
2412{
2413 if (xfer == NULL) {
2414 /* tearing down */
2415 return;
2416 }
2417 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2418
2419 /* avoid any races by locking the USB mutex */
2420 USB_BUS_LOCK(xfer->xroot->bus);
2421 xfer->flags.stall_pipe = 1;
2422 USB_BUS_UNLOCK(xfer->xroot->bus);
2423}
2424
2425int
2426usbd_xfer_is_stalled(struct usb_xfer *xfer)
2427{
2428 return (xfer->endpoint->is_stalled);
2429}
2430
2431/*------------------------------------------------------------------------*
2432 * usbd_transfer_clear_stall
2433 *
2434 * This function is used to clear the stall flag outside the
2435 * callback. This function is NULL safe.
2436 *------------------------------------------------------------------------*/
2437void
2438usbd_transfer_clear_stall(struct usb_xfer *xfer)
2439{
2440 if (xfer == NULL) {
2441 /* tearing down */
2442 return;
2443 }
2444 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2445
2446 /* avoid any races by locking the USB mutex */
2447 USB_BUS_LOCK(xfer->xroot->bus);
2448
2449 xfer->flags.stall_pipe = 0;
2450
2451 USB_BUS_UNLOCK(xfer->xroot->bus);
2452}
2453
2454/*------------------------------------------------------------------------*
2455 * usbd_pipe_start
2456 *
2457 * This function is used to add an USB transfer to the pipe transfer list.
2458 *------------------------------------------------------------------------*/
2459void
2460usbd_pipe_start(struct usb_xfer_queue *pq)
2461{
2462 struct usb_endpoint *ep;
2463 struct usb_xfer *xfer;
2464 uint8_t type;
2465
2466 xfer = pq->curr;
2467 ep = xfer->endpoint;
2468
2469 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2470
2471 /*
2472 * If the endpoint is already stalled we do nothing !
2473 */
2474 if (ep->is_stalled) {
2475 return;
2476 }
2477 /*
2478 * Check if we are supposed to stall the endpoint:
2479 */
2480 if (xfer->flags.stall_pipe) {
2481 struct usb_device *udev;
2482 struct usb_xfer_root *info;
2483
2484 /* clear stall command */
2485 xfer->flags.stall_pipe = 0;
2486
2487 /* get pointer to USB device */
2488 info = xfer->xroot;
2489 udev = info->udev;
2490
2491 /*
2492 * Only stall BULK and INTERRUPT endpoints.
2493 */
2494 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2495 if ((type == UE_BULK) ||
2496 (type == UE_INTERRUPT)) {
2497 uint8_t did_stall;
2498
2499 did_stall = 1;
2500
2501 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2502 (udev->bus->methods->set_stall) (
2503 udev, NULL, ep, &did_stall);
2504 } else if (udev->ctrl_xfer[1]) {
2505 info = udev->ctrl_xfer[1]->xroot;
2506 usb_proc_msignal(
2507 &info->bus->non_giant_callback_proc,
2508 &udev->cs_msg[0], &udev->cs_msg[1]);
2509 } else {
2510 /* should not happen */
2511 DPRINTFN(0, "No stall handler\n");
2512 }
2513 /*
2514 * Check if we should stall. Some USB hardware
2515 * handles set- and clear-stall in hardware.
2516 */
2517 if (did_stall) {
2518 /*
2519 * The transfer will be continued when
2520 * the clear-stall control endpoint
2521 * message is received.
2522 */
2523 ep->is_stalled = 1;
2524 return;
2525 }
2526 } else if (type == UE_ISOCHRONOUS) {
2527
2528 /*
2529 * Make sure any FIFO overflow or other FIFO
2530 * error conditions go away by resetting the
2531 * endpoint FIFO through the clear stall
2532 * method.
2533 */
2534 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2535 (udev->bus->methods->clear_stall) (udev, ep);
2536 }
2537 }
2538 }
2539 /* Set or clear stall complete - special case */
2540 if (xfer->nframes == 0) {
2541 /* we are complete */
2542 xfer->aframes = 0;
2543 usbd_transfer_done(xfer, 0);
2544 return;
2545 }
2546 /*
2547 * Handled cases:
2548 *
2549 * 1) Start the first transfer queued.
2550 *
2551 * 2) Re-start the current USB transfer.
2552 */
2553 /*
2554 * Check if there should be any
2555 * pre transfer start delay:
2556 */
2557 if (xfer->interval > 0) {
2558 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2559 if ((type == UE_BULK) ||
2560 (type == UE_CONTROL)) {
2561 usbd_transfer_timeout_ms(xfer,
2562 &usbd_transfer_start_cb,
2563 xfer->interval);
2564 return;
2565 }
2566 }
2567 DPRINTF("start\n");
2568
2569 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2570
2563 /* start USB transfer */
2564 (ep->methods->start) (xfer);
2565
2566 xfer->flags_int.can_cancel_immed = 1;
2567
2568 /* check for error */
2569 if (xfer->error) {
2570 /* some error has happened */
2571 usbd_transfer_done(xfer, 0);
2572 }
2573}
2574
2575/*------------------------------------------------------------------------*
2576 * usbd_transfer_timeout_ms
2577 *
2578 * This function is used to setup a timeout on the given USB
2579 * transfer. If the timeout has been deferred the callback given by
2580 * "cb" will get called after "ms" milliseconds.
2581 *------------------------------------------------------------------------*/
2582void
2583usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2584 void (*cb) (void *arg), usb_timeout_t ms)
2585{
2586 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2587
2588 /* defer delay */
2589 usb_callout_reset(&xfer->timeout_handle,
2590 USB_MS_TO_TICKS(ms), cb, xfer);
2591}
2592
2593/*------------------------------------------------------------------------*
2594 * usbd_callback_wrapper_sub
2595 *
2596 * - This function will update variables in an USB transfer after
2597 * that the USB transfer is complete.
2598 *
2599 * - This function is used to start the next USB transfer on the
2600 * ep transfer queue, if any.
2601 *
2602 * NOTE: In some special cases the USB transfer will not be removed from
2603 * the pipe queue, but remain first. To enforce USB transfer removal call
2604 * this function passing the error code "USB_ERR_CANCELLED".
2605 *
2606 * Return values:
2607 * 0: Success.
2608 * Else: The callback has been deferred.
2609 *------------------------------------------------------------------------*/
2610static uint8_t
2611usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2612{
2613 struct usb_endpoint *ep;
2614 struct usb_bus *bus;
2615 usb_frcount_t x;
2616
2617 bus = xfer->xroot->bus;
2618
2619 if ((!xfer->flags_int.open) &&
2620 (!xfer->flags_int.did_close)) {
2621 DPRINTF("close\n");
2622 USB_BUS_LOCK(bus);
2623 (xfer->endpoint->methods->close) (xfer);
2624 USB_BUS_UNLOCK(bus);
2625 /* only close once */
2626 xfer->flags_int.did_close = 1;
2627 return (1); /* wait for new callback */
2628 }
2629 /*
2630 * If we have a non-hardware induced error we
2631 * need to do the DMA delay!
2632 */
2633 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2634 (xfer->error == USB_ERR_CANCELLED ||
2635 xfer->error == USB_ERR_TIMEOUT ||
2636 bus->methods->start_dma_delay != NULL)) {
2637
2638 usb_timeout_t temp;
2639
2640 /* only delay once */
2641 xfer->flags_int.did_dma_delay = 1;
2642
2643 /* we can not cancel this delay */
2644 xfer->flags_int.can_cancel_immed = 0;
2645
2646 temp = usbd_get_dma_delay(xfer->xroot->udev);
2647
2648 DPRINTFN(3, "DMA delay, %u ms, "
2649 "on %p\n", temp, xfer);
2650
2651 if (temp != 0) {
2652 USB_BUS_LOCK(bus);
2653 /*
2654 * Some hardware solutions have dedicated
2655 * events when it is safe to free DMA'ed
2656 * memory. For the other hardware platforms we
2657 * use a static delay.
2658 */
2659 if (bus->methods->start_dma_delay != NULL) {
2660 (bus->methods->start_dma_delay) (xfer);
2661 } else {
2662 usbd_transfer_timeout_ms(xfer,
2663 (void *)&usb_dma_delay_done_cb, temp);
2664 }
2665 USB_BUS_UNLOCK(bus);
2666 return (1); /* wait for new callback */
2667 }
2668 }
2669 /* check actual number of frames */
2670 if (xfer->aframes > xfer->nframes) {
2671 if (xfer->error == 0) {
2672 panic("%s: actual number of frames, %d, is "
2673 "greater than initial number of frames, %d\n",
2674 __FUNCTION__, xfer->aframes, xfer->nframes);
2675 } else {
2676 /* just set some valid value */
2677 xfer->aframes = xfer->nframes;
2678 }
2679 }
2680 /* compute actual length */
2681 xfer->actlen = 0;
2682
2683 for (x = 0; x != xfer->aframes; x++) {
2684 xfer->actlen += xfer->frlengths[x];
2685 }
2686
2687 /*
2688 * Frames that were not transferred get zero actual length in
2689 * case the USB device driver does not check the actual number
2690 * of frames transferred, "xfer->aframes":
2691 */
2692 for (; x < xfer->nframes; x++) {
2693 usbd_xfer_set_frame_len(xfer, x, 0);
2694 }
2695
2696 /* check actual length */
2697 if (xfer->actlen > xfer->sumlen) {
2698 if (xfer->error == 0) {
2699 panic("%s: actual length, %d, is greater than "
2700 "initial length, %d\n",
2701 __FUNCTION__, xfer->actlen, xfer->sumlen);
2702 } else {
2703 /* just set some valid value */
2704 xfer->actlen = xfer->sumlen;
2705 }
2706 }
2707 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2708 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2709 xfer->aframes, xfer->nframes);
2710
2711 if (xfer->error) {
2712 /* end of control transfer, if any */
2713 xfer->flags_int.control_act = 0;
2714
2715 /* check if we should block the execution queue */
2716 if ((xfer->error != USB_ERR_CANCELLED) &&
2717 (xfer->flags.pipe_bof)) {
2718 DPRINTFN(2, "xfer=%p: Block On Failure "
2719 "on endpoint=%p\n", xfer, xfer->endpoint);
2720 goto done;
2721 }
2722 } else {
2723 /* check for short transfers */
2724 if (xfer->actlen < xfer->sumlen) {
2725
2726 /* end of control transfer, if any */
2727 xfer->flags_int.control_act = 0;
2728
2729 if (!xfer->flags_int.short_xfer_ok) {
2730 xfer->error = USB_ERR_SHORT_XFER;
2731 if (xfer->flags.pipe_bof) {
2732 DPRINTFN(2, "xfer=%p: Block On Failure on "
2733 "Short Transfer on endpoint %p.\n",
2734 xfer, xfer->endpoint);
2735 goto done;
2736 }
2737 }
2738 } else {
2739 /*
2740 * Check if we are in the middle of a
2741 * control transfer:
2742 */
2743 if (xfer->flags_int.control_act) {
2744 DPRINTFN(5, "xfer=%p: Control transfer "
2745 "active on endpoint=%p\n", xfer, xfer->endpoint);
2746 goto done;
2747 }
2748 }
2749 }
2750
2751 ep = xfer->endpoint;
2752
2753 /*
2754 * If the current USB transfer is completing we need to start the
2755 * next one:
2756 */
2757 USB_BUS_LOCK(bus);
2758 if (ep->endpoint_q.curr == xfer) {
2759 usb_command_wrapper(&ep->endpoint_q, NULL);
2760
2761 if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) {
2762 /* there is another USB transfer waiting */
2763 } else {
2764 /* this is the last USB transfer */
2765 /* clear isochronous sync flag */
2766 xfer->endpoint->is_synced = 0;
2767 }
2768 }
2769 USB_BUS_UNLOCK(bus);
2770done:
2771 return (0);
2772}
2773
2774/*------------------------------------------------------------------------*
2775 * usb_command_wrapper
2776 *
2777 * This function is used to execute commands non-recursivly on an USB
2778 * transfer.
2779 *------------------------------------------------------------------------*/
2780void
2781usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2782{
2783 if (xfer) {
2784 /*
2785 * If the transfer is not already processing,
2786 * queue it!
2787 */
2788 if (pq->curr != xfer) {
2789 usbd_transfer_enqueue(pq, xfer);
2790 if (pq->curr != NULL) {
2791 /* something is already processing */
2792 DPRINTFN(6, "busy %p\n", pq->curr);
2793 return;
2794 }
2795 }
2796 } else {
2797 /* Get next element in queue */
2798 pq->curr = NULL;
2799 }
2800
2801 if (!pq->recurse_1) {
2802
2803 do {
2804
2805 /* set both recurse flags */
2806 pq->recurse_1 = 1;
2807 pq->recurse_2 = 1;
2808
2809 if (pq->curr == NULL) {
2810 xfer = TAILQ_FIRST(&pq->head);
2811 if (xfer) {
2812 TAILQ_REMOVE(&pq->head, xfer,
2813 wait_entry);
2814 xfer->wait_queue = NULL;
2815 pq->curr = xfer;
2816 } else {
2817 break;
2818 }
2819 }
2820 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2821 (pq->command) (pq);
2822 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2823
2824 } while (!pq->recurse_2);
2825
2826 /* clear first recurse flag */
2827 pq->recurse_1 = 0;
2828
2829 } else {
2830 /* clear second recurse flag */
2831 pq->recurse_2 = 0;
2832 }
2833}
2834
2835/*------------------------------------------------------------------------*
2836 * usbd_ctrl_transfer_setup
2837 *
2838 * This function is used to setup the default USB control endpoint
2839 * transfer.
2840 *------------------------------------------------------------------------*/
2841void
2842usbd_ctrl_transfer_setup(struct usb_device *udev)
2843{
2844 struct usb_xfer *xfer;
2845 uint8_t no_resetup;
2846 uint8_t iface_index;
2847
2848 /* check for root HUB */
2849 if (udev->parent_hub == NULL)
2850 return;
2851repeat:
2852
2853 xfer = udev->ctrl_xfer[0];
2854 if (xfer) {
2855 USB_XFER_LOCK(xfer);
2856 no_resetup =
2857 ((xfer->address == udev->address) &&
2858 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2859 udev->ddesc.bMaxPacketSize));
2860 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2861 if (no_resetup) {
2862 /*
2863 * NOTE: checking "xfer->address" and
2864 * starting the USB transfer must be
2865 * atomic!
2866 */
2867 usbd_transfer_start(xfer);
2868 }
2869 }
2870 USB_XFER_UNLOCK(xfer);
2871 } else {
2872 no_resetup = 0;
2873 }
2874
2875 if (no_resetup) {
2876 /*
2877 * All parameters are exactly the same like before.
2878 * Just return.
2879 */
2880 return;
2881 }
2882 /*
2883 * Update wMaxPacketSize for the default control endpoint:
2884 */
2885 udev->ctrl_ep_desc.wMaxPacketSize[0] =
2886 udev->ddesc.bMaxPacketSize;
2887
2888 /*
2889 * Unsetup any existing USB transfer:
2890 */
2891 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
2892
2893 /*
2894 * Try to setup a new USB transfer for the
2895 * default control endpoint:
2896 */
2897 iface_index = 0;
2898 if (usbd_transfer_setup(udev, &iface_index,
2899 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
2900 &udev->device_mtx)) {
2901 DPRINTFN(0, "could not setup default "
2902 "USB transfer\n");
2903 } else {
2904 goto repeat;
2905 }
2906}
2907
2908/*------------------------------------------------------------------------*
2909 * usbd_clear_data_toggle - factored out code
2910 *
2911 * NOTE: the intention of this function is not to reset the hardware
2912 * data toggle.
2913 *------------------------------------------------------------------------*/
2914void
2915usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
2916{
2917 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
2918
2919 /* check that we have a valid case */
2920 if (udev->flags.usb_mode == USB_MODE_HOST &&
2921 udev->parent_hub != NULL &&
2922 udev->bus->methods->clear_stall != NULL &&
2923 ep->methods != NULL) {
2924 (udev->bus->methods->clear_stall) (udev, ep);
2925 }
2926}
2927
2928/*------------------------------------------------------------------------*
2929 * usbd_clear_data_toggle - factored out code
2930 *
2931 * NOTE: the intention of this function is not to reset the hardware
2932 * data toggle on the USB device side.
2933 *------------------------------------------------------------------------*/
2934void
2935usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
2936{
2937 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
2938
2939 USB_BUS_LOCK(udev->bus);
2940 ep->toggle_next = 0;
2941 /* some hardware needs a callback to clear the data toggle */
2942 usbd_clear_stall_locked(udev, ep);
2943 USB_BUS_UNLOCK(udev->bus);
2944}
2945
2946/*------------------------------------------------------------------------*
2947 * usbd_clear_stall_callback - factored out clear stall callback
2948 *
2949 * Input parameters:
2950 * xfer1: Clear Stall Control Transfer
2951 * xfer2: Stalled USB Transfer
2952 *
2953 * This function is NULL safe.
2954 *
2955 * Return values:
2956 * 0: In progress
2957 * Else: Finished
2958 *
2959 * Clear stall config example:
2960 *
2961 * static const struct usb_config my_clearstall = {
2962 * .type = UE_CONTROL,
2963 * .endpoint = 0,
2964 * .direction = UE_DIR_ANY,
2965 * .interval = 50, //50 milliseconds
2966 * .bufsize = sizeof(struct usb_device_request),
2967 * .timeout = 1000, //1.000 seconds
2968 * .callback = &my_clear_stall_callback, // **
2969 * .usb_mode = USB_MODE_HOST,
2970 * };
2971 *
2972 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
2973 * passing the correct parameters.
2974 *------------------------------------------------------------------------*/
2975uint8_t
2976usbd_clear_stall_callback(struct usb_xfer *xfer1,
2977 struct usb_xfer *xfer2)
2978{
2979 struct usb_device_request req;
2980
2981 if (xfer2 == NULL) {
2982 /* looks like we are tearing down */
2983 DPRINTF("NULL input parameter\n");
2984 return (0);
2985 }
2986 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
2987 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
2988
2989 switch (USB_GET_STATE(xfer1)) {
2990 case USB_ST_SETUP:
2991
2992 /*
2993 * pre-clear the data toggle to DATA0 ("umass.c" and
2994 * "ata-usb.c" depends on this)
2995 */
2996
2997 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
2998
2999 /* setup a clear-stall packet */
3000
3001 req.bmRequestType = UT_WRITE_ENDPOINT;
3002 req.bRequest = UR_CLEAR_FEATURE;
3003 USETW(req.wValue, UF_ENDPOINT_HALT);
3004 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3005 req.wIndex[1] = 0;
3006 USETW(req.wLength, 0);
3007
3008 /*
3009 * "usbd_transfer_setup_sub()" will ensure that
3010 * we have sufficient room in the buffer for
3011 * the request structure!
3012 */
3013
3014 /* copy in the transfer */
3015
3016 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3017
3018 /* set length */
3019 xfer1->frlengths[0] = sizeof(req);
3020 xfer1->nframes = 1;
3021
3022 usbd_transfer_submit(xfer1);
3023 return (0);
3024
3025 case USB_ST_TRANSFERRED:
3026 break;
3027
3028 default: /* Error */
3029 if (xfer1->error == USB_ERR_CANCELLED) {
3030 return (0);
3031 }
3032 break;
3033 }
3034 return (1); /* Clear Stall Finished */
3035}
3036
3037/*------------------------------------------------------------------------*
3038 * usbd_transfer_poll
3039 *
3040 * The following function gets called from the USB keyboard driver and
3041 * UMASS when the system has paniced.
3042 *
3043 * NOTE: It is currently not possible to resume normal operation on
3044 * the USB controller which has been polled, due to clearing of the
3045 * "up_dsleep" and "up_msleep" flags.
3046 *------------------------------------------------------------------------*/
3047void
3048usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3049{
3050 struct usb_xfer *xfer;
3051 struct usb_xfer_root *xroot;
3052 struct usb_device *udev;
3053 struct usb_proc_msg *pm;
3054 uint16_t n;
3055 uint16_t drop_bus;
3056 uint16_t drop_xfer;
3057
3058 for (n = 0; n != max; n++) {
3059 /* Extra checks to avoid panic */
3060 xfer = ppxfer[n];
3061 if (xfer == NULL)
3062 continue; /* no USB transfer */
3063 xroot = xfer->xroot;
3064 if (xroot == NULL)
3065 continue; /* no USB root */
3066 udev = xroot->udev;
3067 if (udev == NULL)
3068 continue; /* no USB device */
3069 if (udev->bus == NULL)
3070 continue; /* no BUS structure */
3071 if (udev->bus->methods == NULL)
3072 continue; /* no BUS methods */
3073 if (udev->bus->methods->xfer_poll == NULL)
3074 continue; /* no poll method */
3075
3076 /* make sure that the BUS mutex is not locked */
3077 drop_bus = 0;
3078 while (mtx_owned(&xroot->udev->bus->bus_mtx)) {
3079 mtx_unlock(&xroot->udev->bus->bus_mtx);
3080 drop_bus++;
3081 }
3082
3083 /* make sure that the transfer mutex is not locked */
3084 drop_xfer = 0;
3085 while (mtx_owned(xroot->xfer_mtx)) {
3086 mtx_unlock(xroot->xfer_mtx);
3087 drop_xfer++;
3088 }
3089
3090 /* Make sure cv_signal() and cv_broadcast() is not called */
3091 udev->bus->control_xfer_proc.up_msleep = 0;
3092 udev->bus->explore_proc.up_msleep = 0;
3093 udev->bus->giant_callback_proc.up_msleep = 0;
3094 udev->bus->non_giant_callback_proc.up_msleep = 0;
3095
3096 /* poll USB hardware */
3097 (udev->bus->methods->xfer_poll) (udev->bus);
3098
3099 USB_BUS_LOCK(xroot->bus);
3100
3101 /* check for clear stall */
3102 if (udev->ctrl_xfer[1] != NULL) {
3103
3104 /* poll clear stall start */
3105 pm = &udev->cs_msg[0].hdr;
3106 (pm->pm_callback) (pm);
3107 /* poll clear stall done thread */
3108 pm = &udev->ctrl_xfer[1]->
3109 xroot->done_m[0].hdr;
3110 (pm->pm_callback) (pm);
3111 }
3112
3113 /* poll done thread */
3114 pm = &xroot->done_m[0].hdr;
3115 (pm->pm_callback) (pm);
3116
3117 USB_BUS_UNLOCK(xroot->bus);
3118
3119 /* restore transfer mutex */
3120 while (drop_xfer--)
3121 mtx_lock(xroot->xfer_mtx);
3122
3123 /* restore BUS mutex */
3124 while (drop_bus--)
3125 mtx_lock(&xroot->udev->bus->bus_mtx);
3126 }
3127}
3128
3129static void
3130usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3131 uint8_t type, enum usb_dev_speed speed)
3132{
3133 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3134 [USB_SPEED_LOW] = 8,
3135 [USB_SPEED_FULL] = 64,
3136 [USB_SPEED_HIGH] = 1024,
3137 [USB_SPEED_VARIABLE] = 1024,
3138 [USB_SPEED_SUPER] = 1024,
3139 };
3140
3141 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3142 [USB_SPEED_LOW] = 0, /* invalid */
3143 [USB_SPEED_FULL] = 1023,
3144 [USB_SPEED_HIGH] = 1024,
3145 [USB_SPEED_VARIABLE] = 3584,
3146 [USB_SPEED_SUPER] = 1024,
3147 };
3148
3149 static const uint16_t control_min[USB_SPEED_MAX] = {
3150 [USB_SPEED_LOW] = 8,
3151 [USB_SPEED_FULL] = 8,
3152 [USB_SPEED_HIGH] = 64,
3153 [USB_SPEED_VARIABLE] = 512,
3154 [USB_SPEED_SUPER] = 512,
3155 };
3156
3157 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3158 [USB_SPEED_LOW] = 8,
3159 [USB_SPEED_FULL] = 8,
3160 [USB_SPEED_HIGH] = 512,
3161 [USB_SPEED_VARIABLE] = 512,
3162 [USB_SPEED_SUPER] = 1024,
3163 };
3164
3165 uint16_t temp;
3166
3167 memset(ptr, 0, sizeof(*ptr));
3168
3169 switch (type) {
3170 case UE_INTERRUPT:
3171 ptr->range.max = intr_range_max[speed];
3172 break;
3173 case UE_ISOCHRONOUS:
3174 ptr->range.max = isoc_range_max[speed];
3175 break;
3176 default:
3177 if (type == UE_BULK)
3178 temp = bulk_min[speed];
3179 else /* UE_CONTROL */
3180 temp = control_min[speed];
3181
3182 /* default is fixed */
3183 ptr->fixed[0] = temp;
3184 ptr->fixed[1] = temp;
3185 ptr->fixed[2] = temp;
3186 ptr->fixed[3] = temp;
3187
3188 if (speed == USB_SPEED_FULL) {
3189 /* multiple sizes */
3190 ptr->fixed[1] = 16;
3191 ptr->fixed[2] = 32;
3192 ptr->fixed[3] = 64;
3193 }
3194 if ((speed == USB_SPEED_VARIABLE) &&
3195 (type == UE_BULK)) {
3196 /* multiple sizes */
3197 ptr->fixed[2] = 1024;
3198 ptr->fixed[3] = 1536;
3199 }
3200 break;
3201 }
3202}
3203
3204void *
3205usbd_xfer_softc(struct usb_xfer *xfer)
3206{
3207 return (xfer->priv_sc);
3208}
3209
3210void *
3211usbd_xfer_get_priv(struct usb_xfer *xfer)
3212{
3213 return (xfer->priv_fifo);
3214}
3215
3216void
3217usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3218{
3219 xfer->priv_fifo = ptr;
3220}
3221
3222uint8_t
3223usbd_xfer_state(struct usb_xfer *xfer)
3224{
3225 return (xfer->usb_state);
3226}
3227
3228void
3229usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3230{
3231 switch (flag) {
3232 case USB_FORCE_SHORT_XFER:
3233 xfer->flags.force_short_xfer = 1;
3234 break;
3235 case USB_SHORT_XFER_OK:
3236 xfer->flags.short_xfer_ok = 1;
3237 break;
3238 case USB_MULTI_SHORT_OK:
3239 xfer->flags.short_frames_ok = 1;
3240 break;
3241 case USB_MANUAL_STATUS:
3242 xfer->flags.manual_status = 1;
3243 break;
3244 }
3245}
3246
3247void
3248usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3249{
3250 switch (flag) {
3251 case USB_FORCE_SHORT_XFER:
3252 xfer->flags.force_short_xfer = 0;
3253 break;
3254 case USB_SHORT_XFER_OK:
3255 xfer->flags.short_xfer_ok = 0;
3256 break;
3257 case USB_MULTI_SHORT_OK:
3258 xfer->flags.short_frames_ok = 0;
3259 break;
3260 case USB_MANUAL_STATUS:
3261 xfer->flags.manual_status = 0;
3262 break;
3263 }
3264}
3265
3266/*
3267 * The following function returns in milliseconds when the isochronous
3268 * transfer was completed by the hardware. The returned value wraps
3269 * around 65536 milliseconds.
3270 */
3271uint16_t
3272usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3273{
3274 return (xfer->isoc_time_complete);
3275}
2571 /* start USB transfer */
2572 (ep->methods->start) (xfer);
2573
2574 xfer->flags_int.can_cancel_immed = 1;
2575
2576 /* check for error */
2577 if (xfer->error) {
2578 /* some error has happened */
2579 usbd_transfer_done(xfer, 0);
2580 }
2581}
2582
2583/*------------------------------------------------------------------------*
2584 * usbd_transfer_timeout_ms
2585 *
2586 * This function is used to setup a timeout on the given USB
2587 * transfer. If the timeout has been deferred the callback given by
2588 * "cb" will get called after "ms" milliseconds.
2589 *------------------------------------------------------------------------*/
2590void
2591usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2592 void (*cb) (void *arg), usb_timeout_t ms)
2593{
2594 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2595
2596 /* defer delay */
2597 usb_callout_reset(&xfer->timeout_handle,
2598 USB_MS_TO_TICKS(ms), cb, xfer);
2599}
2600
2601/*------------------------------------------------------------------------*
2602 * usbd_callback_wrapper_sub
2603 *
2604 * - This function will update variables in an USB transfer after
2605 * that the USB transfer is complete.
2606 *
2607 * - This function is used to start the next USB transfer on the
2608 * ep transfer queue, if any.
2609 *
2610 * NOTE: In some special cases the USB transfer will not be removed from
2611 * the pipe queue, but remain first. To enforce USB transfer removal call
2612 * this function passing the error code "USB_ERR_CANCELLED".
2613 *
2614 * Return values:
2615 * 0: Success.
2616 * Else: The callback has been deferred.
2617 *------------------------------------------------------------------------*/
2618static uint8_t
2619usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2620{
2621 struct usb_endpoint *ep;
2622 struct usb_bus *bus;
2623 usb_frcount_t x;
2624
2625 bus = xfer->xroot->bus;
2626
2627 if ((!xfer->flags_int.open) &&
2628 (!xfer->flags_int.did_close)) {
2629 DPRINTF("close\n");
2630 USB_BUS_LOCK(bus);
2631 (xfer->endpoint->methods->close) (xfer);
2632 USB_BUS_UNLOCK(bus);
2633 /* only close once */
2634 xfer->flags_int.did_close = 1;
2635 return (1); /* wait for new callback */
2636 }
2637 /*
2638 * If we have a non-hardware induced error we
2639 * need to do the DMA delay!
2640 */
2641 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2642 (xfer->error == USB_ERR_CANCELLED ||
2643 xfer->error == USB_ERR_TIMEOUT ||
2644 bus->methods->start_dma_delay != NULL)) {
2645
2646 usb_timeout_t temp;
2647
2648 /* only delay once */
2649 xfer->flags_int.did_dma_delay = 1;
2650
2651 /* we can not cancel this delay */
2652 xfer->flags_int.can_cancel_immed = 0;
2653
2654 temp = usbd_get_dma_delay(xfer->xroot->udev);
2655
2656 DPRINTFN(3, "DMA delay, %u ms, "
2657 "on %p\n", temp, xfer);
2658
2659 if (temp != 0) {
2660 USB_BUS_LOCK(bus);
2661 /*
2662 * Some hardware solutions have dedicated
2663 * events when it is safe to free DMA'ed
2664 * memory. For the other hardware platforms we
2665 * use a static delay.
2666 */
2667 if (bus->methods->start_dma_delay != NULL) {
2668 (bus->methods->start_dma_delay) (xfer);
2669 } else {
2670 usbd_transfer_timeout_ms(xfer,
2671 (void *)&usb_dma_delay_done_cb, temp);
2672 }
2673 USB_BUS_UNLOCK(bus);
2674 return (1); /* wait for new callback */
2675 }
2676 }
2677 /* check actual number of frames */
2678 if (xfer->aframes > xfer->nframes) {
2679 if (xfer->error == 0) {
2680 panic("%s: actual number of frames, %d, is "
2681 "greater than initial number of frames, %d\n",
2682 __FUNCTION__, xfer->aframes, xfer->nframes);
2683 } else {
2684 /* just set some valid value */
2685 xfer->aframes = xfer->nframes;
2686 }
2687 }
2688 /* compute actual length */
2689 xfer->actlen = 0;
2690
2691 for (x = 0; x != xfer->aframes; x++) {
2692 xfer->actlen += xfer->frlengths[x];
2693 }
2694
2695 /*
2696 * Frames that were not transferred get zero actual length in
2697 * case the USB device driver does not check the actual number
2698 * of frames transferred, "xfer->aframes":
2699 */
2700 for (; x < xfer->nframes; x++) {
2701 usbd_xfer_set_frame_len(xfer, x, 0);
2702 }
2703
2704 /* check actual length */
2705 if (xfer->actlen > xfer->sumlen) {
2706 if (xfer->error == 0) {
2707 panic("%s: actual length, %d, is greater than "
2708 "initial length, %d\n",
2709 __FUNCTION__, xfer->actlen, xfer->sumlen);
2710 } else {
2711 /* just set some valid value */
2712 xfer->actlen = xfer->sumlen;
2713 }
2714 }
2715 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2716 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2717 xfer->aframes, xfer->nframes);
2718
2719 if (xfer->error) {
2720 /* end of control transfer, if any */
2721 xfer->flags_int.control_act = 0;
2722
2723 /* check if we should block the execution queue */
2724 if ((xfer->error != USB_ERR_CANCELLED) &&
2725 (xfer->flags.pipe_bof)) {
2726 DPRINTFN(2, "xfer=%p: Block On Failure "
2727 "on endpoint=%p\n", xfer, xfer->endpoint);
2728 goto done;
2729 }
2730 } else {
2731 /* check for short transfers */
2732 if (xfer->actlen < xfer->sumlen) {
2733
2734 /* end of control transfer, if any */
2735 xfer->flags_int.control_act = 0;
2736
2737 if (!xfer->flags_int.short_xfer_ok) {
2738 xfer->error = USB_ERR_SHORT_XFER;
2739 if (xfer->flags.pipe_bof) {
2740 DPRINTFN(2, "xfer=%p: Block On Failure on "
2741 "Short Transfer on endpoint %p.\n",
2742 xfer, xfer->endpoint);
2743 goto done;
2744 }
2745 }
2746 } else {
2747 /*
2748 * Check if we are in the middle of a
2749 * control transfer:
2750 */
2751 if (xfer->flags_int.control_act) {
2752 DPRINTFN(5, "xfer=%p: Control transfer "
2753 "active on endpoint=%p\n", xfer, xfer->endpoint);
2754 goto done;
2755 }
2756 }
2757 }
2758
2759 ep = xfer->endpoint;
2760
2761 /*
2762 * If the current USB transfer is completing we need to start the
2763 * next one:
2764 */
2765 USB_BUS_LOCK(bus);
2766 if (ep->endpoint_q.curr == xfer) {
2767 usb_command_wrapper(&ep->endpoint_q, NULL);
2768
2769 if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) {
2770 /* there is another USB transfer waiting */
2771 } else {
2772 /* this is the last USB transfer */
2773 /* clear isochronous sync flag */
2774 xfer->endpoint->is_synced = 0;
2775 }
2776 }
2777 USB_BUS_UNLOCK(bus);
2778done:
2779 return (0);
2780}
2781
2782/*------------------------------------------------------------------------*
2783 * usb_command_wrapper
2784 *
2785 * This function is used to execute commands non-recursivly on an USB
2786 * transfer.
2787 *------------------------------------------------------------------------*/
2788void
2789usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2790{
2791 if (xfer) {
2792 /*
2793 * If the transfer is not already processing,
2794 * queue it!
2795 */
2796 if (pq->curr != xfer) {
2797 usbd_transfer_enqueue(pq, xfer);
2798 if (pq->curr != NULL) {
2799 /* something is already processing */
2800 DPRINTFN(6, "busy %p\n", pq->curr);
2801 return;
2802 }
2803 }
2804 } else {
2805 /* Get next element in queue */
2806 pq->curr = NULL;
2807 }
2808
2809 if (!pq->recurse_1) {
2810
2811 do {
2812
2813 /* set both recurse flags */
2814 pq->recurse_1 = 1;
2815 pq->recurse_2 = 1;
2816
2817 if (pq->curr == NULL) {
2818 xfer = TAILQ_FIRST(&pq->head);
2819 if (xfer) {
2820 TAILQ_REMOVE(&pq->head, xfer,
2821 wait_entry);
2822 xfer->wait_queue = NULL;
2823 pq->curr = xfer;
2824 } else {
2825 break;
2826 }
2827 }
2828 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2829 (pq->command) (pq);
2830 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2831
2832 } while (!pq->recurse_2);
2833
2834 /* clear first recurse flag */
2835 pq->recurse_1 = 0;
2836
2837 } else {
2838 /* clear second recurse flag */
2839 pq->recurse_2 = 0;
2840 }
2841}
2842
2843/*------------------------------------------------------------------------*
2844 * usbd_ctrl_transfer_setup
2845 *
2846 * This function is used to setup the default USB control endpoint
2847 * transfer.
2848 *------------------------------------------------------------------------*/
2849void
2850usbd_ctrl_transfer_setup(struct usb_device *udev)
2851{
2852 struct usb_xfer *xfer;
2853 uint8_t no_resetup;
2854 uint8_t iface_index;
2855
2856 /* check for root HUB */
2857 if (udev->parent_hub == NULL)
2858 return;
2859repeat:
2860
2861 xfer = udev->ctrl_xfer[0];
2862 if (xfer) {
2863 USB_XFER_LOCK(xfer);
2864 no_resetup =
2865 ((xfer->address == udev->address) &&
2866 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2867 udev->ddesc.bMaxPacketSize));
2868 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2869 if (no_resetup) {
2870 /*
2871 * NOTE: checking "xfer->address" and
2872 * starting the USB transfer must be
2873 * atomic!
2874 */
2875 usbd_transfer_start(xfer);
2876 }
2877 }
2878 USB_XFER_UNLOCK(xfer);
2879 } else {
2880 no_resetup = 0;
2881 }
2882
2883 if (no_resetup) {
2884 /*
2885 * All parameters are exactly the same like before.
2886 * Just return.
2887 */
2888 return;
2889 }
2890 /*
2891 * Update wMaxPacketSize for the default control endpoint:
2892 */
2893 udev->ctrl_ep_desc.wMaxPacketSize[0] =
2894 udev->ddesc.bMaxPacketSize;
2895
2896 /*
2897 * Unsetup any existing USB transfer:
2898 */
2899 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
2900
2901 /*
2902 * Try to setup a new USB transfer for the
2903 * default control endpoint:
2904 */
2905 iface_index = 0;
2906 if (usbd_transfer_setup(udev, &iface_index,
2907 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
2908 &udev->device_mtx)) {
2909 DPRINTFN(0, "could not setup default "
2910 "USB transfer\n");
2911 } else {
2912 goto repeat;
2913 }
2914}
2915
2916/*------------------------------------------------------------------------*
2917 * usbd_clear_data_toggle - factored out code
2918 *
2919 * NOTE: the intention of this function is not to reset the hardware
2920 * data toggle.
2921 *------------------------------------------------------------------------*/
2922void
2923usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
2924{
2925 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
2926
2927 /* check that we have a valid case */
2928 if (udev->flags.usb_mode == USB_MODE_HOST &&
2929 udev->parent_hub != NULL &&
2930 udev->bus->methods->clear_stall != NULL &&
2931 ep->methods != NULL) {
2932 (udev->bus->methods->clear_stall) (udev, ep);
2933 }
2934}
2935
2936/*------------------------------------------------------------------------*
2937 * usbd_clear_data_toggle - factored out code
2938 *
2939 * NOTE: the intention of this function is not to reset the hardware
2940 * data toggle on the USB device side.
2941 *------------------------------------------------------------------------*/
2942void
2943usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
2944{
2945 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
2946
2947 USB_BUS_LOCK(udev->bus);
2948 ep->toggle_next = 0;
2949 /* some hardware needs a callback to clear the data toggle */
2950 usbd_clear_stall_locked(udev, ep);
2951 USB_BUS_UNLOCK(udev->bus);
2952}
2953
2954/*------------------------------------------------------------------------*
2955 * usbd_clear_stall_callback - factored out clear stall callback
2956 *
2957 * Input parameters:
2958 * xfer1: Clear Stall Control Transfer
2959 * xfer2: Stalled USB Transfer
2960 *
2961 * This function is NULL safe.
2962 *
2963 * Return values:
2964 * 0: In progress
2965 * Else: Finished
2966 *
2967 * Clear stall config example:
2968 *
2969 * static const struct usb_config my_clearstall = {
2970 * .type = UE_CONTROL,
2971 * .endpoint = 0,
2972 * .direction = UE_DIR_ANY,
2973 * .interval = 50, //50 milliseconds
2974 * .bufsize = sizeof(struct usb_device_request),
2975 * .timeout = 1000, //1.000 seconds
2976 * .callback = &my_clear_stall_callback, // **
2977 * .usb_mode = USB_MODE_HOST,
2978 * };
2979 *
2980 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
2981 * passing the correct parameters.
2982 *------------------------------------------------------------------------*/
2983uint8_t
2984usbd_clear_stall_callback(struct usb_xfer *xfer1,
2985 struct usb_xfer *xfer2)
2986{
2987 struct usb_device_request req;
2988
2989 if (xfer2 == NULL) {
2990 /* looks like we are tearing down */
2991 DPRINTF("NULL input parameter\n");
2992 return (0);
2993 }
2994 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
2995 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
2996
2997 switch (USB_GET_STATE(xfer1)) {
2998 case USB_ST_SETUP:
2999
3000 /*
3001 * pre-clear the data toggle to DATA0 ("umass.c" and
3002 * "ata-usb.c" depends on this)
3003 */
3004
3005 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3006
3007 /* setup a clear-stall packet */
3008
3009 req.bmRequestType = UT_WRITE_ENDPOINT;
3010 req.bRequest = UR_CLEAR_FEATURE;
3011 USETW(req.wValue, UF_ENDPOINT_HALT);
3012 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3013 req.wIndex[1] = 0;
3014 USETW(req.wLength, 0);
3015
3016 /*
3017 * "usbd_transfer_setup_sub()" will ensure that
3018 * we have sufficient room in the buffer for
3019 * the request structure!
3020 */
3021
3022 /* copy in the transfer */
3023
3024 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3025
3026 /* set length */
3027 xfer1->frlengths[0] = sizeof(req);
3028 xfer1->nframes = 1;
3029
3030 usbd_transfer_submit(xfer1);
3031 return (0);
3032
3033 case USB_ST_TRANSFERRED:
3034 break;
3035
3036 default: /* Error */
3037 if (xfer1->error == USB_ERR_CANCELLED) {
3038 return (0);
3039 }
3040 break;
3041 }
3042 return (1); /* Clear Stall Finished */
3043}
3044
3045/*------------------------------------------------------------------------*
3046 * usbd_transfer_poll
3047 *
3048 * The following function gets called from the USB keyboard driver and
3049 * UMASS when the system has paniced.
3050 *
3051 * NOTE: It is currently not possible to resume normal operation on
3052 * the USB controller which has been polled, due to clearing of the
3053 * "up_dsleep" and "up_msleep" flags.
3054 *------------------------------------------------------------------------*/
3055void
3056usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3057{
3058 struct usb_xfer *xfer;
3059 struct usb_xfer_root *xroot;
3060 struct usb_device *udev;
3061 struct usb_proc_msg *pm;
3062 uint16_t n;
3063 uint16_t drop_bus;
3064 uint16_t drop_xfer;
3065
3066 for (n = 0; n != max; n++) {
3067 /* Extra checks to avoid panic */
3068 xfer = ppxfer[n];
3069 if (xfer == NULL)
3070 continue; /* no USB transfer */
3071 xroot = xfer->xroot;
3072 if (xroot == NULL)
3073 continue; /* no USB root */
3074 udev = xroot->udev;
3075 if (udev == NULL)
3076 continue; /* no USB device */
3077 if (udev->bus == NULL)
3078 continue; /* no BUS structure */
3079 if (udev->bus->methods == NULL)
3080 continue; /* no BUS methods */
3081 if (udev->bus->methods->xfer_poll == NULL)
3082 continue; /* no poll method */
3083
3084 /* make sure that the BUS mutex is not locked */
3085 drop_bus = 0;
3086 while (mtx_owned(&xroot->udev->bus->bus_mtx)) {
3087 mtx_unlock(&xroot->udev->bus->bus_mtx);
3088 drop_bus++;
3089 }
3090
3091 /* make sure that the transfer mutex is not locked */
3092 drop_xfer = 0;
3093 while (mtx_owned(xroot->xfer_mtx)) {
3094 mtx_unlock(xroot->xfer_mtx);
3095 drop_xfer++;
3096 }
3097
3098 /* Make sure cv_signal() and cv_broadcast() is not called */
3099 udev->bus->control_xfer_proc.up_msleep = 0;
3100 udev->bus->explore_proc.up_msleep = 0;
3101 udev->bus->giant_callback_proc.up_msleep = 0;
3102 udev->bus->non_giant_callback_proc.up_msleep = 0;
3103
3104 /* poll USB hardware */
3105 (udev->bus->methods->xfer_poll) (udev->bus);
3106
3107 USB_BUS_LOCK(xroot->bus);
3108
3109 /* check for clear stall */
3110 if (udev->ctrl_xfer[1] != NULL) {
3111
3112 /* poll clear stall start */
3113 pm = &udev->cs_msg[0].hdr;
3114 (pm->pm_callback) (pm);
3115 /* poll clear stall done thread */
3116 pm = &udev->ctrl_xfer[1]->
3117 xroot->done_m[0].hdr;
3118 (pm->pm_callback) (pm);
3119 }
3120
3121 /* poll done thread */
3122 pm = &xroot->done_m[0].hdr;
3123 (pm->pm_callback) (pm);
3124
3125 USB_BUS_UNLOCK(xroot->bus);
3126
3127 /* restore transfer mutex */
3128 while (drop_xfer--)
3129 mtx_lock(xroot->xfer_mtx);
3130
3131 /* restore BUS mutex */
3132 while (drop_bus--)
3133 mtx_lock(&xroot->udev->bus->bus_mtx);
3134 }
3135}
3136
3137static void
3138usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3139 uint8_t type, enum usb_dev_speed speed)
3140{
3141 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3142 [USB_SPEED_LOW] = 8,
3143 [USB_SPEED_FULL] = 64,
3144 [USB_SPEED_HIGH] = 1024,
3145 [USB_SPEED_VARIABLE] = 1024,
3146 [USB_SPEED_SUPER] = 1024,
3147 };
3148
3149 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3150 [USB_SPEED_LOW] = 0, /* invalid */
3151 [USB_SPEED_FULL] = 1023,
3152 [USB_SPEED_HIGH] = 1024,
3153 [USB_SPEED_VARIABLE] = 3584,
3154 [USB_SPEED_SUPER] = 1024,
3155 };
3156
3157 static const uint16_t control_min[USB_SPEED_MAX] = {
3158 [USB_SPEED_LOW] = 8,
3159 [USB_SPEED_FULL] = 8,
3160 [USB_SPEED_HIGH] = 64,
3161 [USB_SPEED_VARIABLE] = 512,
3162 [USB_SPEED_SUPER] = 512,
3163 };
3164
3165 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3166 [USB_SPEED_LOW] = 8,
3167 [USB_SPEED_FULL] = 8,
3168 [USB_SPEED_HIGH] = 512,
3169 [USB_SPEED_VARIABLE] = 512,
3170 [USB_SPEED_SUPER] = 1024,
3171 };
3172
3173 uint16_t temp;
3174
3175 memset(ptr, 0, sizeof(*ptr));
3176
3177 switch (type) {
3178 case UE_INTERRUPT:
3179 ptr->range.max = intr_range_max[speed];
3180 break;
3181 case UE_ISOCHRONOUS:
3182 ptr->range.max = isoc_range_max[speed];
3183 break;
3184 default:
3185 if (type == UE_BULK)
3186 temp = bulk_min[speed];
3187 else /* UE_CONTROL */
3188 temp = control_min[speed];
3189
3190 /* default is fixed */
3191 ptr->fixed[0] = temp;
3192 ptr->fixed[1] = temp;
3193 ptr->fixed[2] = temp;
3194 ptr->fixed[3] = temp;
3195
3196 if (speed == USB_SPEED_FULL) {
3197 /* multiple sizes */
3198 ptr->fixed[1] = 16;
3199 ptr->fixed[2] = 32;
3200 ptr->fixed[3] = 64;
3201 }
3202 if ((speed == USB_SPEED_VARIABLE) &&
3203 (type == UE_BULK)) {
3204 /* multiple sizes */
3205 ptr->fixed[2] = 1024;
3206 ptr->fixed[3] = 1536;
3207 }
3208 break;
3209 }
3210}
3211
3212void *
3213usbd_xfer_softc(struct usb_xfer *xfer)
3214{
3215 return (xfer->priv_sc);
3216}
3217
3218void *
3219usbd_xfer_get_priv(struct usb_xfer *xfer)
3220{
3221 return (xfer->priv_fifo);
3222}
3223
3224void
3225usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3226{
3227 xfer->priv_fifo = ptr;
3228}
3229
3230uint8_t
3231usbd_xfer_state(struct usb_xfer *xfer)
3232{
3233 return (xfer->usb_state);
3234}
3235
3236void
3237usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3238{
3239 switch (flag) {
3240 case USB_FORCE_SHORT_XFER:
3241 xfer->flags.force_short_xfer = 1;
3242 break;
3243 case USB_SHORT_XFER_OK:
3244 xfer->flags.short_xfer_ok = 1;
3245 break;
3246 case USB_MULTI_SHORT_OK:
3247 xfer->flags.short_frames_ok = 1;
3248 break;
3249 case USB_MANUAL_STATUS:
3250 xfer->flags.manual_status = 1;
3251 break;
3252 }
3253}
3254
3255void
3256usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3257{
3258 switch (flag) {
3259 case USB_FORCE_SHORT_XFER:
3260 xfer->flags.force_short_xfer = 0;
3261 break;
3262 case USB_SHORT_XFER_OK:
3263 xfer->flags.short_xfer_ok = 0;
3264 break;
3265 case USB_MULTI_SHORT_OK:
3266 xfer->flags.short_frames_ok = 0;
3267 break;
3268 case USB_MANUAL_STATUS:
3269 xfer->flags.manual_status = 0;
3270 break;
3271 }
3272}
3273
3274/*
3275 * The following function returns in milliseconds when the isochronous
3276 * transfer was completed by the hardware. The returned value wraps
3277 * around 65536 milliseconds.
3278 */
3279uint16_t
3280usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3281{
3282 return (xfer->isoc_time_complete);
3283}