Deleted Added
full compact
usb_transfer.c (246616) usb_transfer.c (259218)
1/* $FreeBSD: head/sys/dev/usb/usb_transfer.c 246616 2013-02-10 10:56:13Z hselasky $ */
1/* $FreeBSD: head/sys/dev/usb/usb_transfer.c 259218 2013-12-11 13:20:32Z hselasky $ */
2/*-
3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifdef USB_GLOBAL_INCLUDE_FILE
28#include USB_GLOBAL_INCLUDE_FILE
29#else
30#include <sys/stdint.h>
31#include <sys/stddef.h>
32#include <sys/param.h>
33#include <sys/queue.h>
34#include <sys/types.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>
38#include <sys/module.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/condvar.h>
42#include <sys/sysctl.h>
43#include <sys/sx.h>
44#include <sys/unistd.h>
45#include <sys/callout.h>
46#include <sys/malloc.h>
47#include <sys/priv.h>
48#include <sys/proc.h>
49
50#include <dev/usb/usb.h>
51#include <dev/usb/usbdi.h>
52#include <dev/usb/usbdi_util.h>
53
54#define USB_DEBUG_VAR usb_debug
55
56#include <dev/usb/usb_core.h>
57#include <dev/usb/usb_busdma.h>
58#include <dev/usb/usb_process.h>
59#include <dev/usb/usb_transfer.h>
60#include <dev/usb/usb_device.h>
61#include <dev/usb/usb_debug.h>
62#include <dev/usb/usb_util.h>
63
64#include <dev/usb/usb_controller.h>
65#include <dev/usb/usb_bus.h>
66#include <dev/usb/usb_pf.h>
67#endif /* USB_GLOBAL_INCLUDE_FILE */
68
69struct usb_std_packet_size {
70 struct {
71 uint16_t min; /* inclusive */
72 uint16_t max; /* inclusive */
73 } range;
74
75 uint16_t fixed[4];
76};
77
78static usb_callback_t usb_request_callback;
79
80static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
81
82 /* This transfer is used for generic control endpoint transfers */
83
84 [0] = {
85 .type = UE_CONTROL,
86 .endpoint = 0x00, /* Control endpoint */
87 .direction = UE_DIR_ANY,
88 .bufsize = USB_EP0_BUFSIZE, /* bytes */
89 .flags = {.proxy_buffer = 1,},
90 .callback = &usb_request_callback,
91 .usb_mode = USB_MODE_DUAL, /* both modes */
92 },
93
94 /* This transfer is used for generic clear stall only */
95
96 [1] = {
97 .type = UE_CONTROL,
98 .endpoint = 0x00, /* Control pipe */
99 .direction = UE_DIR_ANY,
100 .bufsize = sizeof(struct usb_device_request),
101 .callback = &usb_do_clear_stall_callback,
102 .timeout = 1000, /* 1 second */
103 .interval = 50, /* 50ms */
104 .usb_mode = USB_MODE_HOST,
105 },
106};
107
108/* function prototypes */
109
110static void usbd_update_max_frame_size(struct usb_xfer *);
111static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
112static void usbd_control_transfer_init(struct usb_xfer *);
113static int usbd_setup_ctrl_transfer(struct usb_xfer *);
114static void usb_callback_proc(struct usb_proc_msg *);
115static void usbd_callback_ss_done_defer(struct usb_xfer *);
116static void usbd_callback_wrapper(struct usb_xfer_queue *);
117static void usbd_transfer_start_cb(void *);
118static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
119static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
120 uint8_t type, enum usb_dev_speed speed);
121
122/*------------------------------------------------------------------------*
123 * usb_request_callback
124 *------------------------------------------------------------------------*/
125static void
126usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
127{
128 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
129 usb_handle_request_callback(xfer, error);
130 else
131 usbd_do_request_callback(xfer, error);
132}
133
134/*------------------------------------------------------------------------*
135 * usbd_update_max_frame_size
136 *
137 * This function updates the maximum frame size, hence high speed USB
138 * can transfer multiple consecutive packets.
139 *------------------------------------------------------------------------*/
140static void
141usbd_update_max_frame_size(struct usb_xfer *xfer)
142{
143 /* compute maximum frame size */
144 /* this computation should not overflow 16-bit */
145 /* max = 15 * 1024 */
146
147 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
148}
149
150/*------------------------------------------------------------------------*
151 * usbd_get_dma_delay
152 *
153 * The following function is called when we need to
154 * synchronize with DMA hardware.
155 *
156 * Returns:
157 * 0: no DMA delay required
158 * Else: milliseconds of DMA delay
159 *------------------------------------------------------------------------*/
160usb_timeout_t
161usbd_get_dma_delay(struct usb_device *udev)
162{
2/*-
3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifdef USB_GLOBAL_INCLUDE_FILE
28#include USB_GLOBAL_INCLUDE_FILE
29#else
30#include <sys/stdint.h>
31#include <sys/stddef.h>
32#include <sys/param.h>
33#include <sys/queue.h>
34#include <sys/types.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>
38#include <sys/module.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/condvar.h>
42#include <sys/sysctl.h>
43#include <sys/sx.h>
44#include <sys/unistd.h>
45#include <sys/callout.h>
46#include <sys/malloc.h>
47#include <sys/priv.h>
48#include <sys/proc.h>
49
50#include <dev/usb/usb.h>
51#include <dev/usb/usbdi.h>
52#include <dev/usb/usbdi_util.h>
53
54#define USB_DEBUG_VAR usb_debug
55
56#include <dev/usb/usb_core.h>
57#include <dev/usb/usb_busdma.h>
58#include <dev/usb/usb_process.h>
59#include <dev/usb/usb_transfer.h>
60#include <dev/usb/usb_device.h>
61#include <dev/usb/usb_debug.h>
62#include <dev/usb/usb_util.h>
63
64#include <dev/usb/usb_controller.h>
65#include <dev/usb/usb_bus.h>
66#include <dev/usb/usb_pf.h>
67#endif /* USB_GLOBAL_INCLUDE_FILE */
68
69struct usb_std_packet_size {
70 struct {
71 uint16_t min; /* inclusive */
72 uint16_t max; /* inclusive */
73 } range;
74
75 uint16_t fixed[4];
76};
77
78static usb_callback_t usb_request_callback;
79
80static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
81
82 /* This transfer is used for generic control endpoint transfers */
83
84 [0] = {
85 .type = UE_CONTROL,
86 .endpoint = 0x00, /* Control endpoint */
87 .direction = UE_DIR_ANY,
88 .bufsize = USB_EP0_BUFSIZE, /* bytes */
89 .flags = {.proxy_buffer = 1,},
90 .callback = &usb_request_callback,
91 .usb_mode = USB_MODE_DUAL, /* both modes */
92 },
93
94 /* This transfer is used for generic clear stall only */
95
96 [1] = {
97 .type = UE_CONTROL,
98 .endpoint = 0x00, /* Control pipe */
99 .direction = UE_DIR_ANY,
100 .bufsize = sizeof(struct usb_device_request),
101 .callback = &usb_do_clear_stall_callback,
102 .timeout = 1000, /* 1 second */
103 .interval = 50, /* 50ms */
104 .usb_mode = USB_MODE_HOST,
105 },
106};
107
108/* function prototypes */
109
110static void usbd_update_max_frame_size(struct usb_xfer *);
111static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
112static void usbd_control_transfer_init(struct usb_xfer *);
113static int usbd_setup_ctrl_transfer(struct usb_xfer *);
114static void usb_callback_proc(struct usb_proc_msg *);
115static void usbd_callback_ss_done_defer(struct usb_xfer *);
116static void usbd_callback_wrapper(struct usb_xfer_queue *);
117static void usbd_transfer_start_cb(void *);
118static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
119static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
120 uint8_t type, enum usb_dev_speed speed);
121
122/*------------------------------------------------------------------------*
123 * usb_request_callback
124 *------------------------------------------------------------------------*/
125static void
126usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
127{
128 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
129 usb_handle_request_callback(xfer, error);
130 else
131 usbd_do_request_callback(xfer, error);
132}
133
134/*------------------------------------------------------------------------*
135 * usbd_update_max_frame_size
136 *
137 * This function updates the maximum frame size, hence high speed USB
138 * can transfer multiple consecutive packets.
139 *------------------------------------------------------------------------*/
140static void
141usbd_update_max_frame_size(struct usb_xfer *xfer)
142{
143 /* compute maximum frame size */
144 /* this computation should not overflow 16-bit */
145 /* max = 15 * 1024 */
146
147 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
148}
149
150/*------------------------------------------------------------------------*
151 * usbd_get_dma_delay
152 *
153 * The following function is called when we need to
154 * synchronize with DMA hardware.
155 *
156 * Returns:
157 * 0: no DMA delay required
158 * Else: milliseconds of DMA delay
159 *------------------------------------------------------------------------*/
160usb_timeout_t
161usbd_get_dma_delay(struct usb_device *udev)
162{
163 struct usb_bus_methods *mtod;
163 const struct usb_bus_methods *mtod;
164 uint32_t temp;
165
166 mtod = udev->bus->methods;
167 temp = 0;
168
169 if (mtod->get_dma_delay) {
170 (mtod->get_dma_delay) (udev, &temp);
171 /*
172 * Round up and convert to milliseconds. Note that we use
173 * 1024 milliseconds per second. to save a division.
174 */
175 temp += 0x3FF;
176 temp /= 0x400;
177 }
178 return (temp);
179}
180
181/*------------------------------------------------------------------------*
182 * usbd_transfer_setup_sub_malloc
183 *
184 * This function will allocate one or more DMA'able memory chunks
185 * according to "size", "align" and "count" arguments. "ppc" is
186 * pointed to a linear array of USB page caches afterwards.
187 *
188 * If the "align" argument is equal to "1" a non-contiguous allocation
189 * can happen. Else if the "align" argument is greater than "1", the
190 * allocation will always be contiguous in memory.
191 *
192 * Returns:
193 * 0: Success
194 * Else: Failure
195 *------------------------------------------------------------------------*/
196#if USB_HAVE_BUSDMA
197uint8_t
198usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
199 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
200 usb_size_t count)
201{
202 struct usb_page_cache *pc;
203 struct usb_page *pg;
204 void *buf;
205 usb_size_t n_dma_pc;
206 usb_size_t n_dma_pg;
207 usb_size_t n_obj;
208 usb_size_t x;
209 usb_size_t y;
210 usb_size_t r;
211 usb_size_t z;
212
213 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
214 align));
215 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
216
217 if (count == 0) {
218 return (0); /* nothing to allocate */
219 }
220 /*
221 * Make sure that the size is aligned properly.
222 */
223 size = -((-size) & (-align));
224
225 /*
226 * Try multi-allocation chunks to reduce the number of DMA
227 * allocations, hence DMA allocations are slow.
228 */
229 if (align == 1) {
230 /* special case - non-cached multi page DMA memory */
231 n_dma_pc = count;
232 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
233 n_obj = 1;
234 } else if (size >= USB_PAGE_SIZE) {
235 n_dma_pc = count;
236 n_dma_pg = 1;
237 n_obj = 1;
238 } else {
239 /* compute number of objects per page */
240 n_obj = (USB_PAGE_SIZE / size);
241 /*
242 * Compute number of DMA chunks, rounded up
243 * to nearest one:
244 */
245 n_dma_pc = ((count + n_obj - 1) / n_obj);
246 n_dma_pg = 1;
247 }
248
249 /*
250 * DMA memory is allocated once, but mapped twice. That's why
251 * there is one list for auto-free and another list for
252 * non-auto-free which only holds the mapping and not the
253 * allocation.
254 */
255 if (parm->buf == NULL) {
256 /* reserve memory (auto-free) */
257 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
258 parm->dma_page_cache_ptr += n_dma_pc;
259
260 /* reserve memory (no-auto-free) */
261 parm->dma_page_ptr += count * n_dma_pg;
262 parm->xfer_page_cache_ptr += count;
263 return (0);
264 }
265 for (x = 0; x != n_dma_pc; x++) {
266 /* need to initialize the page cache */
267 parm->dma_page_cache_ptr[x].tag_parent =
268 &parm->curr_xfer->xroot->dma_parent_tag;
269 }
270 for (x = 0; x != count; x++) {
271 /* need to initialize the page cache */
272 parm->xfer_page_cache_ptr[x].tag_parent =
273 &parm->curr_xfer->xroot->dma_parent_tag;
274 }
275
276 if (ppc) {
277 *ppc = parm->xfer_page_cache_ptr;
278 }
279 r = count; /* set remainder count */
280 z = n_obj * size; /* set allocation size */
281 pc = parm->xfer_page_cache_ptr;
282 pg = parm->dma_page_ptr;
283
284 for (x = 0; x != n_dma_pc; x++) {
285
286 if (r < n_obj) {
287 /* compute last remainder */
288 z = r * size;
289 n_obj = r;
290 }
291 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
292 pg, z, align)) {
293 return (1); /* failure */
294 }
295 /* Set beginning of current buffer */
296 buf = parm->dma_page_cache_ptr->buffer;
297 /* Make room for one DMA page cache and one page */
298 parm->dma_page_cache_ptr++;
299 pg += n_dma_pg;
300
301 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
302
303 /* Load sub-chunk into DMA */
304 if (usb_pc_dmamap_create(pc, size)) {
305 return (1); /* failure */
306 }
307 pc->buffer = USB_ADD_BYTES(buf, y * size);
308 pc->page_start = pg;
309
310 mtx_lock(pc->tag_parent->mtx);
311 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
312 mtx_unlock(pc->tag_parent->mtx);
313 return (1); /* failure */
314 }
315 mtx_unlock(pc->tag_parent->mtx);
316 }
317 }
318
319 parm->xfer_page_cache_ptr = pc;
320 parm->dma_page_ptr = pg;
321 return (0);
322}
323#endif
324
325/*------------------------------------------------------------------------*
326 * usbd_transfer_setup_sub - transfer setup subroutine
327 *
328 * This function must be called from the "xfer_setup" callback of the
329 * USB Host or Device controller driver when setting up an USB
330 * transfer. This function will setup correct packet sizes, buffer
331 * sizes, flags and more, that are stored in the "usb_xfer"
332 * structure.
333 *------------------------------------------------------------------------*/
334void
335usbd_transfer_setup_sub(struct usb_setup_params *parm)
336{
337 enum {
338 REQ_SIZE = 8,
339 MIN_PKT = 8,
340 };
341 struct usb_xfer *xfer = parm->curr_xfer;
342 const struct usb_config *setup = parm->curr_setup;
343 struct usb_endpoint_ss_comp_descriptor *ecomp;
344 struct usb_endpoint_descriptor *edesc;
345 struct usb_std_packet_size std_size;
346 usb_frcount_t n_frlengths;
347 usb_frcount_t n_frbuffers;
348 usb_frcount_t x;
349 uint8_t type;
350 uint8_t zmps;
351
352 /*
353 * Sanity check. The following parameters must be initialized before
354 * calling this function.
355 */
356 if ((parm->hc_max_packet_size == 0) ||
357 (parm->hc_max_packet_count == 0) ||
358 (parm->hc_max_frame_size == 0)) {
359 parm->err = USB_ERR_INVAL;
360 goto done;
361 }
362 edesc = xfer->endpoint->edesc;
363 ecomp = xfer->endpoint->ecomp;
364
365 type = (edesc->bmAttributes & UE_XFERTYPE);
366
367 xfer->flags = setup->flags;
368 xfer->nframes = setup->frames;
369 xfer->timeout = setup->timeout;
370 xfer->callback = setup->callback;
371 xfer->interval = setup->interval;
372 xfer->endpointno = edesc->bEndpointAddress;
373 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
374 xfer->max_packet_count = 1;
375 /* make a shadow copy: */
376 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
377
378 parm->bufsize = setup->bufsize;
379
380 switch (parm->speed) {
381 case USB_SPEED_HIGH:
382 switch (type) {
383 case UE_ISOCHRONOUS:
384 case UE_INTERRUPT:
385 xfer->max_packet_count +=
386 (xfer->max_packet_size >> 11) & 3;
387
388 /* check for invalid max packet count */
389 if (xfer->max_packet_count > 3)
390 xfer->max_packet_count = 3;
391 break;
392 default:
393 break;
394 }
395 xfer->max_packet_size &= 0x7FF;
396 break;
397 case USB_SPEED_SUPER:
398 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
399
400 if (ecomp != NULL)
401 xfer->max_packet_count += ecomp->bMaxBurst;
402
403 if ((xfer->max_packet_count == 0) ||
404 (xfer->max_packet_count > 16))
405 xfer->max_packet_count = 16;
406
407 switch (type) {
408 case UE_CONTROL:
409 xfer->max_packet_count = 1;
410 break;
411 case UE_ISOCHRONOUS:
412 if (ecomp != NULL) {
413 uint8_t mult;
414
415 mult = UE_GET_SS_ISO_MULT(
416 ecomp->bmAttributes) + 1;
417 if (mult > 3)
418 mult = 3;
419
420 xfer->max_packet_count *= mult;
421 }
422 break;
423 default:
424 break;
425 }
426 xfer->max_packet_size &= 0x7FF;
427 break;
428 default:
429 break;
430 }
431 /* range check "max_packet_count" */
432
433 if (xfer->max_packet_count > parm->hc_max_packet_count) {
434 xfer->max_packet_count = parm->hc_max_packet_count;
435 }
436 /* filter "wMaxPacketSize" according to HC capabilities */
437
438 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
439 (xfer->max_packet_size == 0)) {
440 xfer->max_packet_size = parm->hc_max_packet_size;
441 }
442 /* filter "wMaxPacketSize" according to standard sizes */
443
444 usbd_get_std_packet_size(&std_size, type, parm->speed);
445
446 if (std_size.range.min || std_size.range.max) {
447
448 if (xfer->max_packet_size < std_size.range.min) {
449 xfer->max_packet_size = std_size.range.min;
450 }
451 if (xfer->max_packet_size > std_size.range.max) {
452 xfer->max_packet_size = std_size.range.max;
453 }
454 } else {
455
456 if (xfer->max_packet_size >= std_size.fixed[3]) {
457 xfer->max_packet_size = std_size.fixed[3];
458 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
459 xfer->max_packet_size = std_size.fixed[2];
460 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
461 xfer->max_packet_size = std_size.fixed[1];
462 } else {
463 /* only one possibility left */
464 xfer->max_packet_size = std_size.fixed[0];
465 }
466 }
467
468 /* compute "max_frame_size" */
469
470 usbd_update_max_frame_size(xfer);
471
472 /* check interrupt interval and transfer pre-delay */
473
474 if (type == UE_ISOCHRONOUS) {
475
476 uint16_t frame_limit;
477
478 xfer->interval = 0; /* not used, must be zero */
479 xfer->flags_int.isochronous_xfr = 1; /* set flag */
480
481 if (xfer->timeout == 0) {
482 /*
483 * set a default timeout in
484 * case something goes wrong!
485 */
486 xfer->timeout = 1000 / 4;
487 }
488 switch (parm->speed) {
489 case USB_SPEED_LOW:
490 case USB_SPEED_FULL:
491 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
492 xfer->fps_shift = 0;
493 break;
494 default:
495 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
496 xfer->fps_shift = edesc->bInterval;
497 if (xfer->fps_shift > 0)
498 xfer->fps_shift--;
499 if (xfer->fps_shift > 3)
500 xfer->fps_shift = 3;
501 if (xfer->flags.pre_scale_frames != 0)
502 xfer->nframes <<= (3 - xfer->fps_shift);
503 break;
504 }
505
506 if (xfer->nframes > frame_limit) {
507 /*
508 * this is not going to work
509 * cross hardware
510 */
511 parm->err = USB_ERR_INVAL;
512 goto done;
513 }
514 if (xfer->nframes == 0) {
515 /*
516 * this is not a valid value
517 */
518 parm->err = USB_ERR_ZERO_NFRAMES;
519 goto done;
520 }
521 } else {
522
523 /*
524 * If a value is specified use that else check the
525 * endpoint descriptor!
526 */
527 if (type == UE_INTERRUPT) {
528
529 uint32_t temp;
530
531 if (xfer->interval == 0) {
532
533 xfer->interval = edesc->bInterval;
534
535 switch (parm->speed) {
536 case USB_SPEED_LOW:
537 case USB_SPEED_FULL:
538 break;
539 default:
540 /* 125us -> 1ms */
541 if (xfer->interval < 4)
542 xfer->interval = 1;
543 else if (xfer->interval > 16)
544 xfer->interval = (1 << (16 - 4));
545 else
546 xfer->interval =
547 (1 << (xfer->interval - 4));
548 break;
549 }
550 }
551
552 if (xfer->interval == 0) {
553 /*
554 * One millisecond is the smallest
555 * interval we support:
556 */
557 xfer->interval = 1;
558 }
559
560 xfer->fps_shift = 0;
561 temp = 1;
562
563 while ((temp != 0) && (temp < xfer->interval)) {
564 xfer->fps_shift++;
565 temp *= 2;
566 }
567
568 switch (parm->speed) {
569 case USB_SPEED_LOW:
570 case USB_SPEED_FULL:
571 break;
572 default:
573 xfer->fps_shift += 3;
574 break;
575 }
576 }
577 }
578
579 /*
580 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
581 * to be equal to zero when setting up USB transfers, hence
582 * this leads to alot of extra code in the USB kernel.
583 */
584
585 if ((xfer->max_frame_size == 0) ||
586 (xfer->max_packet_size == 0)) {
587
588 zmps = 1;
589
590 if ((parm->bufsize <= MIN_PKT) &&
591 (type != UE_CONTROL) &&
592 (type != UE_BULK)) {
593
594 /* workaround */
595 xfer->max_packet_size = MIN_PKT;
596 xfer->max_packet_count = 1;
597 parm->bufsize = 0; /* automatic setup length */
598 usbd_update_max_frame_size(xfer);
599
600 } else {
601 parm->err = USB_ERR_ZERO_MAXP;
602 goto done;
603 }
604
605 } else {
606 zmps = 0;
607 }
608
609 /*
610 * check if we should setup a default
611 * length:
612 */
613
614 if (parm->bufsize == 0) {
615
616 parm->bufsize = xfer->max_frame_size;
617
618 if (type == UE_ISOCHRONOUS) {
619 parm->bufsize *= xfer->nframes;
620 }
621 }
622 /*
623 * check if we are about to setup a proxy
624 * type of buffer:
625 */
626
627 if (xfer->flags.proxy_buffer) {
628
629 /* round bufsize up */
630
631 parm->bufsize += (xfer->max_frame_size - 1);
632
633 if (parm->bufsize < xfer->max_frame_size) {
634 /* length wrapped around */
635 parm->err = USB_ERR_INVAL;
636 goto done;
637 }
638 /* subtract remainder */
639
640 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
641
642 /* add length of USB device request structure, if any */
643
644 if (type == UE_CONTROL) {
645 parm->bufsize += REQ_SIZE; /* SETUP message */
646 }
647 }
648 xfer->max_data_length = parm->bufsize;
649
650 /* Setup "n_frlengths" and "n_frbuffers" */
651
652 if (type == UE_ISOCHRONOUS) {
653 n_frlengths = xfer->nframes;
654 n_frbuffers = 1;
655 } else {
656
657 if (type == UE_CONTROL) {
658 xfer->flags_int.control_xfr = 1;
659 if (xfer->nframes == 0) {
660 if (parm->bufsize <= REQ_SIZE) {
661 /*
662 * there will never be any data
663 * stage
664 */
665 xfer->nframes = 1;
666 } else {
667 xfer->nframes = 2;
668 }
669 }
670 } else {
671 if (xfer->nframes == 0) {
672 xfer->nframes = 1;
673 }
674 }
675
676 n_frlengths = xfer->nframes;
677 n_frbuffers = xfer->nframes;
678 }
679
680 /*
681 * check if we have room for the
682 * USB device request structure:
683 */
684
685 if (type == UE_CONTROL) {
686
687 if (xfer->max_data_length < REQ_SIZE) {
688 /* length wrapped around or too small bufsize */
689 parm->err = USB_ERR_INVAL;
690 goto done;
691 }
692 xfer->max_data_length -= REQ_SIZE;
693 }
694 /*
695 * Setup "frlengths" and shadow "frlengths" for keeping the
696 * initial frame lengths when a USB transfer is complete. This
697 * information is useful when computing isochronous offsets.
698 */
699 xfer->frlengths = parm->xfer_length_ptr;
700 parm->xfer_length_ptr += 2 * n_frlengths;
701
702 /* setup "frbuffers" */
703 xfer->frbuffers = parm->xfer_page_cache_ptr;
704 parm->xfer_page_cache_ptr += n_frbuffers;
705
706 /* initialize max frame count */
707 xfer->max_frame_count = xfer->nframes;
708
709 /*
710 * check if we need to setup
711 * a local buffer:
712 */
713
714 if (!xfer->flags.ext_buffer) {
715#if USB_HAVE_BUSDMA
716 struct usb_page_search page_info;
717 struct usb_page_cache *pc;
718
719 if (usbd_transfer_setup_sub_malloc(parm,
720 &pc, parm->bufsize, 1, 1)) {
721 parm->err = USB_ERR_NOMEM;
722 } else if (parm->buf != NULL) {
723
724 usbd_get_page(pc, 0, &page_info);
725
726 xfer->local_buffer = page_info.buffer;
727
728 usbd_xfer_set_frame_offset(xfer, 0, 0);
729
730 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
731 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
732 }
733 }
734#else
735 /* align data */
736 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
737
738 if (parm->buf != NULL) {
739 xfer->local_buffer =
740 USB_ADD_BYTES(parm->buf, parm->size[0]);
741
742 usbd_xfer_set_frame_offset(xfer, 0, 0);
743
744 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
745 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
746 }
747 }
748 parm->size[0] += parm->bufsize;
749
750 /* align data again */
751 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
752#endif
753 }
754 /*
755 * Compute maximum buffer size
756 */
757
758 if (parm->bufsize_max < parm->bufsize) {
759 parm->bufsize_max = parm->bufsize;
760 }
761#if USB_HAVE_BUSDMA
762 if (xfer->flags_int.bdma_enable) {
763 /*
764 * Setup "dma_page_ptr".
765 *
766 * Proof for formula below:
767 *
768 * Assume there are three USB frames having length "a", "b" and
769 * "c". These USB frames will at maximum need "z"
770 * "usb_page" structures. "z" is given by:
771 *
772 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
773 * ((c / USB_PAGE_SIZE) + 2);
774 *
775 * Constraining "a", "b" and "c" like this:
776 *
777 * (a + b + c) <= parm->bufsize
778 *
779 * We know that:
780 *
781 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
782 *
783 * Here is the general formula:
784 */
785 xfer->dma_page_ptr = parm->dma_page_ptr;
786 parm->dma_page_ptr += (2 * n_frbuffers);
787 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
788 }
789#endif
790 if (zmps) {
791 /* correct maximum data length */
792 xfer->max_data_length = 0;
793 }
794 /* subtract USB frame remainder from "hc_max_frame_size" */
795
796 xfer->max_hc_frame_size =
797 (parm->hc_max_frame_size -
798 (parm->hc_max_frame_size % xfer->max_frame_size));
799
800 if (xfer->max_hc_frame_size == 0) {
801 parm->err = USB_ERR_INVAL;
802 goto done;
803 }
804
805 /* initialize frame buffers */
806
807 if (parm->buf) {
808 for (x = 0; x != n_frbuffers; x++) {
809 xfer->frbuffers[x].tag_parent =
810 &xfer->xroot->dma_parent_tag;
811#if USB_HAVE_BUSDMA
812 if (xfer->flags_int.bdma_enable &&
813 (parm->bufsize_max > 0)) {
814
815 if (usb_pc_dmamap_create(
816 xfer->frbuffers + x,
817 parm->bufsize_max)) {
818 parm->err = USB_ERR_NOMEM;
819 goto done;
820 }
821 }
822#endif
823 }
824 }
825done:
826 if (parm->err) {
827 /*
828 * Set some dummy values so that we avoid division by zero:
829 */
830 xfer->max_hc_frame_size = 1;
831 xfer->max_frame_size = 1;
832 xfer->max_packet_size = 1;
833 xfer->max_data_length = 0;
834 xfer->nframes = 0;
835 xfer->max_frame_count = 0;
836 }
837}
838
839/*------------------------------------------------------------------------*
840 * usbd_transfer_setup - setup an array of USB transfers
841 *
842 * NOTE: You must always call "usbd_transfer_unsetup" after calling
843 * "usbd_transfer_setup" if success was returned.
844 *
845 * The idea is that the USB device driver should pre-allocate all its
846 * transfers by one call to this function.
847 *
848 * Return values:
849 * 0: Success
850 * Else: Failure
851 *------------------------------------------------------------------------*/
852usb_error_t
853usbd_transfer_setup(struct usb_device *udev,
854 const uint8_t *ifaces, struct usb_xfer **ppxfer,
855 const struct usb_config *setup_start, uint16_t n_setup,
856 void *priv_sc, struct mtx *xfer_mtx)
857{
858 const struct usb_config *setup_end = setup_start + n_setup;
859 const struct usb_config *setup;
860 struct usb_setup_params *parm;
861 struct usb_endpoint *ep;
862 struct usb_xfer_root *info;
863 struct usb_xfer *xfer;
864 void *buf = NULL;
865 usb_error_t error = 0;
866 uint16_t n;
867 uint16_t refcount;
868 uint8_t do_unlock;
869
870 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
871 "usbd_transfer_setup can sleep!");
872
873 /* do some checking first */
874
875 if (n_setup == 0) {
876 DPRINTFN(6, "setup array has zero length!\n");
877 return (USB_ERR_INVAL);
878 }
879 if (ifaces == 0) {
880 DPRINTFN(6, "ifaces array is NULL!\n");
881 return (USB_ERR_INVAL);
882 }
883 if (xfer_mtx == NULL) {
884 DPRINTFN(6, "using global lock\n");
885 xfer_mtx = &Giant;
886 }
887
888 /* more sanity checks */
889
890 for (setup = setup_start, n = 0;
891 setup != setup_end; setup++, n++) {
892 if (setup->bufsize == (usb_frlength_t)-1) {
893 error = USB_ERR_BAD_BUFSIZE;
894 DPRINTF("invalid bufsize\n");
895 }
896 if (setup->callback == NULL) {
897 error = USB_ERR_NO_CALLBACK;
898 DPRINTF("no callback\n");
899 }
900 ppxfer[n] = NULL;
901 }
902
903 if (error)
904 return (error);
905
906 /* Protect scratch area */
907 do_unlock = usbd_enum_lock(udev);
908
909 refcount = 0;
910 info = NULL;
911
912 parm = &udev->scratch.xfer_setup[0].parm;
913 memset(parm, 0, sizeof(*parm));
914
915 parm->udev = udev;
916 parm->speed = usbd_get_speed(udev);
917 parm->hc_max_packet_count = 1;
918
919 if (parm->speed >= USB_SPEED_MAX) {
920 parm->err = USB_ERR_INVAL;
921 goto done;
922 }
923 /* setup all transfers */
924
925 while (1) {
926
927 if (buf) {
928 /*
929 * Initialize the "usb_xfer_root" structure,
930 * which is common for all our USB transfers.
931 */
932 info = USB_ADD_BYTES(buf, 0);
933
934 info->memory_base = buf;
935 info->memory_size = parm->size[0];
936
937#if USB_HAVE_BUSDMA
938 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
939 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
940#endif
941 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
942 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
943
944 cv_init(&info->cv_drain, "WDRAIN");
945
946 info->xfer_mtx = xfer_mtx;
947#if USB_HAVE_BUSDMA
948 usb_dma_tag_setup(&info->dma_parent_tag,
949 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
950 xfer_mtx, &usb_bdma_done_event, 32, parm->dma_tag_max);
951#endif
952
953 info->bus = udev->bus;
954 info->udev = udev;
955
956 TAILQ_INIT(&info->done_q.head);
957 info->done_q.command = &usbd_callback_wrapper;
958#if USB_HAVE_BUSDMA
959 TAILQ_INIT(&info->dma_q.head);
960 info->dma_q.command = &usb_bdma_work_loop;
961#endif
962 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
963 info->done_m[0].xroot = info;
964 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
965 info->done_m[1].xroot = info;
966
967 /*
968 * In device side mode control endpoint
969 * requests need to run from a separate
970 * context, else there is a chance of
971 * deadlock!
972 */
973 if (setup_start == usb_control_ep_cfg)
974 info->done_p =
975 USB_BUS_CONTROL_XFER_PROC(udev->bus);
976 else if (xfer_mtx == &Giant)
977 info->done_p =
978 USB_BUS_GIANT_PROC(udev->bus);
979 else
980 info->done_p =
981 USB_BUS_NON_GIANT_PROC(udev->bus);
982 }
983 /* reset sizes */
984
985 parm->size[0] = 0;
986 parm->buf = buf;
987 parm->size[0] += sizeof(info[0]);
988
989 for (setup = setup_start, n = 0;
990 setup != setup_end; setup++, n++) {
991
992 /* skip USB transfers without callbacks: */
993 if (setup->callback == NULL) {
994 continue;
995 }
996 /* see if there is a matching endpoint */
997 ep = usbd_get_endpoint(udev,
998 ifaces[setup->if_index], setup);
999
1000 /*
1001 * Check that the USB PIPE is valid and that
1002 * the endpoint mode is proper.
1003 *
1004 * Make sure we don't allocate a streams
1005 * transfer when such a combination is not
1006 * valid.
1007 */
1008 if ((ep == NULL) || (ep->methods == NULL) ||
1009 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1010 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1011 (setup->stream_id != 0 &&
1012 (setup->stream_id >= USB_MAX_EP_STREAMS ||
1013 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1014 if (setup->flags.no_pipe_ok)
1015 continue;
1016 if ((setup->usb_mode != USB_MODE_DUAL) &&
1017 (setup->usb_mode != udev->flags.usb_mode))
1018 continue;
1019 parm->err = USB_ERR_NO_PIPE;
1020 goto done;
1021 }
1022
1023 /* align data properly */
1024 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1025
1026 /* store current setup pointer */
1027 parm->curr_setup = setup;
1028
1029 if (buf) {
1030 /*
1031 * Common initialization of the
1032 * "usb_xfer" structure.
1033 */
1034 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1035 xfer->address = udev->address;
1036 xfer->priv_sc = priv_sc;
1037 xfer->xroot = info;
1038
1039 usb_callout_init_mtx(&xfer->timeout_handle,
1040 &udev->bus->bus_mtx, 0);
1041 } else {
1042 /*
1043 * Setup a dummy xfer, hence we are
1044 * writing to the "usb_xfer"
1045 * structure pointed to by "xfer"
1046 * before we have allocated any
1047 * memory:
1048 */
1049 xfer = &udev->scratch.xfer_setup[0].dummy;
1050 memset(xfer, 0, sizeof(*xfer));
1051 refcount++;
1052 }
1053
1054 /* set transfer endpoint pointer */
1055 xfer->endpoint = ep;
1056
1057 /* set transfer stream ID */
1058 xfer->stream_id = setup->stream_id;
1059
1060 parm->size[0] += sizeof(xfer[0]);
1061 parm->methods = xfer->endpoint->methods;
1062 parm->curr_xfer = xfer;
1063
1064 /*
1065 * Call the Host or Device controller transfer
1066 * setup routine:
1067 */
1068 (udev->bus->methods->xfer_setup) (parm);
1069
1070 /* check for error */
1071 if (parm->err)
1072 goto done;
1073
1074 if (buf) {
1075 /*
1076 * Increment the endpoint refcount. This
1077 * basically prevents setting a new
1078 * configuration and alternate setting
1079 * when USB transfers are in use on
1080 * the given interface. Search the USB
1081 * code for "endpoint->refcount_alloc" if you
1082 * want more information.
1083 */
1084 USB_BUS_LOCK(info->bus);
1085 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1086 parm->err = USB_ERR_INVAL;
1087
1088 xfer->endpoint->refcount_alloc++;
1089
1090 if (xfer->endpoint->refcount_alloc == 0)
1091 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1092 USB_BUS_UNLOCK(info->bus);
1093
1094 /*
1095 * Whenever we set ppxfer[] then we
1096 * also need to increment the
1097 * "setup_refcount":
1098 */
1099 info->setup_refcount++;
1100
1101 /*
1102 * Transfer is successfully setup and
1103 * can be used:
1104 */
1105 ppxfer[n] = xfer;
1106 }
1107
1108 /* check for error */
1109 if (parm->err)
1110 goto done;
1111 }
1112
1113 if (buf != NULL || parm->err != 0)
1114 goto done;
1115
1116 /* if no transfers, nothing to do */
1117 if (refcount == 0)
1118 goto done;
1119
1120 /* align data properly */
1121 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1122
1123 /* store offset temporarily */
1124 parm->size[1] = parm->size[0];
1125
1126 /*
1127 * The number of DMA tags required depends on
1128 * the number of endpoints. The current estimate
1129 * for maximum number of DMA tags per endpoint
1130 * is three:
1131 * 1) for loading memory
1132 * 2) for allocating memory
1133 * 3) for fixing memory [UHCI]
1134 */
1135 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1136
1137 /*
1138 * DMA tags for QH, TD, Data and more.
1139 */
1140 parm->dma_tag_max += 8;
1141
1142 parm->dma_tag_p += parm->dma_tag_max;
1143
1144 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1145 ((uint8_t *)0);
1146
1147 /* align data properly */
1148 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1149
1150 /* store offset temporarily */
1151 parm->size[3] = parm->size[0];
1152
1153 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1154 ((uint8_t *)0);
1155
1156 /* align data properly */
1157 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1158
1159 /* store offset temporarily */
1160 parm->size[4] = parm->size[0];
1161
1162 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1163 ((uint8_t *)0);
1164
1165 /* store end offset temporarily */
1166 parm->size[5] = parm->size[0];
1167
1168 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1169 ((uint8_t *)0);
1170
1171 /* store end offset temporarily */
1172
1173 parm->size[2] = parm->size[0];
1174
1175 /* align data properly */
1176 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1177
1178 parm->size[6] = parm->size[0];
1179
1180 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1181 ((uint8_t *)0);
1182
1183 /* align data properly */
1184 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1185
1186 /* allocate zeroed memory */
1187 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1188
1189 if (buf == NULL) {
1190 parm->err = USB_ERR_NOMEM;
1191 DPRINTFN(0, "cannot allocate memory block for "
1192 "configuration (%d bytes)\n",
1193 parm->size[0]);
1194 goto done;
1195 }
1196 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1197 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1198 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1199 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1200 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1201 }
1202
1203done:
1204 if (buf) {
1205 if (info->setup_refcount == 0) {
1206 /*
1207 * "usbd_transfer_unsetup_sub" will unlock
1208 * the bus mutex before returning !
1209 */
1210 USB_BUS_LOCK(info->bus);
1211
1212 /* something went wrong */
1213 usbd_transfer_unsetup_sub(info, 0);
1214 }
1215 }
1216
1217 /* check if any errors happened */
1218 if (parm->err)
1219 usbd_transfer_unsetup(ppxfer, n_setup);
1220
1221 error = parm->err;
1222
1223 if (do_unlock)
1224 usbd_enum_unlock(udev);
1225
1226 return (error);
1227}
1228
1229/*------------------------------------------------------------------------*
1230 * usbd_transfer_unsetup_sub - factored out code
1231 *------------------------------------------------------------------------*/
1232static void
1233usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1234{
1235#if USB_HAVE_BUSDMA
1236 struct usb_page_cache *pc;
1237#endif
1238
1239 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1240
1241 /* wait for any outstanding DMA operations */
1242
1243 if (needs_delay) {
1244 usb_timeout_t temp;
1245 temp = usbd_get_dma_delay(info->udev);
1246 if (temp != 0) {
1247 usb_pause_mtx(&info->bus->bus_mtx,
1248 USB_MS_TO_TICKS(temp));
1249 }
1250 }
1251
1252 /* make sure that our done messages are not queued anywhere */
1253 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1254
1255 USB_BUS_UNLOCK(info->bus);
1256
1257#if USB_HAVE_BUSDMA
1258 /* free DMA'able memory, if any */
1259 pc = info->dma_page_cache_start;
1260 while (pc != info->dma_page_cache_end) {
1261 usb_pc_free_mem(pc);
1262 pc++;
1263 }
1264
1265 /* free DMA maps in all "xfer->frbuffers" */
1266 pc = info->xfer_page_cache_start;
1267 while (pc != info->xfer_page_cache_end) {
1268 usb_pc_dmamap_destroy(pc);
1269 pc++;
1270 }
1271
1272 /* free all DMA tags */
1273 usb_dma_tag_unsetup(&info->dma_parent_tag);
1274#endif
1275
1276 cv_destroy(&info->cv_drain);
1277
1278 /*
1279 * free the "memory_base" last, hence the "info" structure is
1280 * contained within the "memory_base"!
1281 */
1282 free(info->memory_base, M_USB);
1283}
1284
1285/*------------------------------------------------------------------------*
1286 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1287 *
1288 * NOTE: All USB transfers in progress will get called back passing
1289 * the error code "USB_ERR_CANCELLED" before this function
1290 * returns.
1291 *------------------------------------------------------------------------*/
1292void
1293usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1294{
1295 struct usb_xfer *xfer;
1296 struct usb_xfer_root *info;
1297 uint8_t needs_delay = 0;
1298
1299 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1300 "usbd_transfer_unsetup can sleep!");
1301
1302 while (n_setup--) {
1303 xfer = pxfer[n_setup];
1304
1305 if (xfer == NULL)
1306 continue;
1307
1308 info = xfer->xroot;
1309
1310 USB_XFER_LOCK(xfer);
1311 USB_BUS_LOCK(info->bus);
1312
1313 /*
1314 * HINT: when you start/stop a transfer, it might be a
1315 * good idea to directly use the "pxfer[]" structure:
1316 *
1317 * usbd_transfer_start(sc->pxfer[0]);
1318 * usbd_transfer_stop(sc->pxfer[0]);
1319 *
1320 * That way, if your code has many parts that will not
1321 * stop running under the same lock, in other words
1322 * "xfer_mtx", the usbd_transfer_start and
1323 * usbd_transfer_stop functions will simply return
1324 * when they detect a NULL pointer argument.
1325 *
1326 * To avoid any races we clear the "pxfer[]" pointer
1327 * while holding the private mutex of the driver:
1328 */
1329 pxfer[n_setup] = NULL;
1330
1331 USB_BUS_UNLOCK(info->bus);
1332 USB_XFER_UNLOCK(xfer);
1333
1334 usbd_transfer_drain(xfer);
1335
1336#if USB_HAVE_BUSDMA
1337 if (xfer->flags_int.bdma_enable)
1338 needs_delay = 1;
1339#endif
1340 /*
1341 * NOTE: default endpoint does not have an
1342 * interface, even if endpoint->iface_index == 0
1343 */
1344 USB_BUS_LOCK(info->bus);
1345 xfer->endpoint->refcount_alloc--;
1346 USB_BUS_UNLOCK(info->bus);
1347
1348 usb_callout_drain(&xfer->timeout_handle);
1349
1350 USB_BUS_LOCK(info->bus);
1351
1352 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1353 "reference count\n"));
1354
1355 info->setup_refcount--;
1356
1357 if (info->setup_refcount == 0) {
1358 usbd_transfer_unsetup_sub(info,
1359 needs_delay);
1360 } else {
1361 USB_BUS_UNLOCK(info->bus);
1362 }
1363 }
1364}
1365
1366/*------------------------------------------------------------------------*
1367 * usbd_control_transfer_init - factored out code
1368 *
1369 * In USB Device Mode we have to wait for the SETUP packet which
1370 * containst the "struct usb_device_request" structure, before we can
1371 * transfer any data. In USB Host Mode we already have the SETUP
1372 * packet at the moment the USB transfer is started. This leads us to
1373 * having to setup the USB transfer at two different places in
1374 * time. This function just contains factored out control transfer
1375 * initialisation code, so that we don't duplicate the code.
1376 *------------------------------------------------------------------------*/
1377static void
1378usbd_control_transfer_init(struct usb_xfer *xfer)
1379{
1380 struct usb_device_request req;
1381
1382 /* copy out the USB request header */
1383
1384 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1385
1386 /* setup remainder */
1387
1388 xfer->flags_int.control_rem = UGETW(req.wLength);
1389
1390 /* copy direction to endpoint variable */
1391
1392 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1393 xfer->endpointno |=
1394 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1395}
1396
1397/*------------------------------------------------------------------------*
1398 * usbd_setup_ctrl_transfer
1399 *
1400 * This function handles initialisation of control transfers. Control
1401 * transfers are special in that regard that they can both transmit
1402 * and receive data.
1403 *
1404 * Return values:
1405 * 0: Success
1406 * Else: Failure
1407 *------------------------------------------------------------------------*/
1408static int
1409usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1410{
1411 usb_frlength_t len;
1412
1413 /* Check for control endpoint stall */
1414 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1415 /* the control transfer is no longer active */
1416 xfer->flags_int.control_stall = 1;
1417 xfer->flags_int.control_act = 0;
1418 } else {
1419 /* don't stall control transfer by default */
1420 xfer->flags_int.control_stall = 0;
1421 }
1422
1423 /* Check for invalid number of frames */
1424 if (xfer->nframes > 2) {
1425 /*
1426 * If you need to split a control transfer, you
1427 * have to do one part at a time. Only with
1428 * non-control transfers you can do multiple
1429 * parts a time.
1430 */
1431 DPRINTFN(0, "Too many frames: %u\n",
1432 (unsigned int)xfer->nframes);
1433 goto error;
1434 }
1435
1436 /*
1437 * Check if there is a control
1438 * transfer in progress:
1439 */
1440 if (xfer->flags_int.control_act) {
1441
1442 if (xfer->flags_int.control_hdr) {
1443
1444 /* clear send header flag */
1445
1446 xfer->flags_int.control_hdr = 0;
1447
1448 /* setup control transfer */
1449 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1450 usbd_control_transfer_init(xfer);
1451 }
1452 }
1453 /* get data length */
1454
1455 len = xfer->sumlen;
1456
1457 } else {
1458
1459 /* the size of the SETUP structure is hardcoded ! */
1460
1461 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1462 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1463 xfer->frlengths[0], sizeof(struct
1464 usb_device_request));
1465 goto error;
1466 }
1467 /* check USB mode */
1468 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1469
1470 /* check number of frames */
1471 if (xfer->nframes != 1) {
1472 /*
1473 * We need to receive the setup
1474 * message first so that we know the
1475 * data direction!
1476 */
1477 DPRINTF("Misconfigured transfer\n");
1478 goto error;
1479 }
1480 /*
1481 * Set a dummy "control_rem" value. This
1482 * variable will be overwritten later by a
1483 * call to "usbd_control_transfer_init()" !
1484 */
1485 xfer->flags_int.control_rem = 0xFFFF;
1486 } else {
1487
1488 /* setup "endpoint" and "control_rem" */
1489
1490 usbd_control_transfer_init(xfer);
1491 }
1492
1493 /* set transfer-header flag */
1494
1495 xfer->flags_int.control_hdr = 1;
1496
1497 /* get data length */
1498
1499 len = (xfer->sumlen - sizeof(struct usb_device_request));
1500 }
1501
1502 /* check if there is a length mismatch */
1503
1504 if (len > xfer->flags_int.control_rem) {
1505 DPRINTFN(0, "Length (%d) greater than "
1506 "remaining length (%d)\n", len,
1507 xfer->flags_int.control_rem);
1508 goto error;
1509 }
1510 /* check if we are doing a short transfer */
1511
1512 if (xfer->flags.force_short_xfer) {
1513 xfer->flags_int.control_rem = 0;
1514 } else {
1515 if ((len != xfer->max_data_length) &&
1516 (len != xfer->flags_int.control_rem) &&
1517 (xfer->nframes != 1)) {
1518 DPRINTFN(0, "Short control transfer without "
1519 "force_short_xfer set\n");
1520 goto error;
1521 }
1522 xfer->flags_int.control_rem -= len;
1523 }
1524
1525 /* the status part is executed when "control_act" is 0 */
1526
1527 if ((xfer->flags_int.control_rem > 0) ||
1528 (xfer->flags.manual_status)) {
1529 /* don't execute the STATUS stage yet */
1530 xfer->flags_int.control_act = 1;
1531
1532 /* sanity check */
1533 if ((!xfer->flags_int.control_hdr) &&
1534 (xfer->nframes == 1)) {
1535 /*
1536 * This is not a valid operation!
1537 */
1538 DPRINTFN(0, "Invalid parameter "
1539 "combination\n");
1540 goto error;
1541 }
1542 } else {
1543 /* time to execute the STATUS stage */
1544 xfer->flags_int.control_act = 0;
1545 }
1546 return (0); /* success */
1547
1548error:
1549 return (1); /* failure */
1550}
1551
1552/*------------------------------------------------------------------------*
1553 * usbd_transfer_submit - start USB hardware for the given transfer
1554 *
1555 * This function should only be called from the USB callback.
1556 *------------------------------------------------------------------------*/
1557void
1558usbd_transfer_submit(struct usb_xfer *xfer)
1559{
1560 struct usb_xfer_root *info;
1561 struct usb_bus *bus;
1562 usb_frcount_t x;
1563
1564 info = xfer->xroot;
1565 bus = info->bus;
1566
1567 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1568 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1569 "read" : "write");
1570
1571#ifdef USB_DEBUG
1572 if (USB_DEBUG_VAR > 0) {
1573 USB_BUS_LOCK(bus);
1574
1575 usb_dump_endpoint(xfer->endpoint);
1576
1577 USB_BUS_UNLOCK(bus);
1578 }
1579#endif
1580
1581 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1582 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1583
1584 /* Only open the USB transfer once! */
1585 if (!xfer->flags_int.open) {
1586 xfer->flags_int.open = 1;
1587
1588 DPRINTF("open\n");
1589
1590 USB_BUS_LOCK(bus);
1591 (xfer->endpoint->methods->open) (xfer);
1592 USB_BUS_UNLOCK(bus);
1593 }
1594 /* set "transferring" flag */
1595 xfer->flags_int.transferring = 1;
1596
1597#if USB_HAVE_POWERD
1598 /* increment power reference */
1599 usbd_transfer_power_ref(xfer, 1);
1600#endif
1601 /*
1602 * Check if the transfer is waiting on a queue, most
1603 * frequently the "done_q":
1604 */
1605 if (xfer->wait_queue) {
1606 USB_BUS_LOCK(bus);
1607 usbd_transfer_dequeue(xfer);
1608 USB_BUS_UNLOCK(bus);
1609 }
1610 /* clear "did_dma_delay" flag */
1611 xfer->flags_int.did_dma_delay = 0;
1612
1613 /* clear "did_close" flag */
1614 xfer->flags_int.did_close = 0;
1615
1616#if USB_HAVE_BUSDMA
1617 /* clear "bdma_setup" flag */
1618 xfer->flags_int.bdma_setup = 0;
1619#endif
1620 /* by default we cannot cancel any USB transfer immediately */
1621 xfer->flags_int.can_cancel_immed = 0;
1622
1623 /* clear lengths and frame counts by default */
1624 xfer->sumlen = 0;
1625 xfer->actlen = 0;
1626 xfer->aframes = 0;
1627
1628 /* clear any previous errors */
1629 xfer->error = 0;
1630
1631 /* Check if the device is still alive */
1632 if (info->udev->state < USB_STATE_POWERED) {
1633 USB_BUS_LOCK(bus);
1634 /*
1635 * Must return cancelled error code else
1636 * device drivers can hang.
1637 */
1638 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1639 USB_BUS_UNLOCK(bus);
1640 return;
1641 }
1642
1643 /* sanity check */
1644 if (xfer->nframes == 0) {
1645 if (xfer->flags.stall_pipe) {
1646 /*
1647 * Special case - want to stall without transferring
1648 * any data:
1649 */
1650 DPRINTF("xfer=%p nframes=0: stall "
1651 "or clear stall!\n", xfer);
1652 USB_BUS_LOCK(bus);
1653 xfer->flags_int.can_cancel_immed = 1;
1654 /* start the transfer */
1655 usb_command_wrapper(&xfer->endpoint->
1656 endpoint_q[xfer->stream_id], xfer);
1657 USB_BUS_UNLOCK(bus);
1658 return;
1659 }
1660 USB_BUS_LOCK(bus);
1661 usbd_transfer_done(xfer, USB_ERR_INVAL);
1662 USB_BUS_UNLOCK(bus);
1663 return;
1664 }
1665 /* compute some variables */
1666
1667 for (x = 0; x != xfer->nframes; x++) {
1668 /* make a copy of the frlenghts[] */
1669 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1670 /* compute total transfer length */
1671 xfer->sumlen += xfer->frlengths[x];
1672 if (xfer->sumlen < xfer->frlengths[x]) {
1673 /* length wrapped around */
1674 USB_BUS_LOCK(bus);
1675 usbd_transfer_done(xfer, USB_ERR_INVAL);
1676 USB_BUS_UNLOCK(bus);
1677 return;
1678 }
1679 }
1680
1681 /* clear some internal flags */
1682
1683 xfer->flags_int.short_xfer_ok = 0;
1684 xfer->flags_int.short_frames_ok = 0;
1685
1686 /* check if this is a control transfer */
1687
1688 if (xfer->flags_int.control_xfr) {
1689
1690 if (usbd_setup_ctrl_transfer(xfer)) {
1691 USB_BUS_LOCK(bus);
1692 usbd_transfer_done(xfer, USB_ERR_STALLED);
1693 USB_BUS_UNLOCK(bus);
1694 return;
1695 }
1696 }
1697 /*
1698 * Setup filtered version of some transfer flags,
1699 * in case of data read direction
1700 */
1701 if (USB_GET_DATA_ISREAD(xfer)) {
1702
1703 if (xfer->flags.short_frames_ok) {
1704 xfer->flags_int.short_xfer_ok = 1;
1705 xfer->flags_int.short_frames_ok = 1;
1706 } else if (xfer->flags.short_xfer_ok) {
1707 xfer->flags_int.short_xfer_ok = 1;
1708
1709 /* check for control transfer */
1710 if (xfer->flags_int.control_xfr) {
1711 /*
1712 * 1) Control transfers do not support
1713 * reception of multiple short USB
1714 * frames in host mode and device side
1715 * mode, with exception of:
1716 *
1717 * 2) Due to sometimes buggy device
1718 * side firmware we need to do a
1719 * STATUS stage in case of short
1720 * control transfers in USB host mode.
1721 * The STATUS stage then becomes the
1722 * "alt_next" to the DATA stage.
1723 */
1724 xfer->flags_int.short_frames_ok = 1;
1725 }
1726 }
1727 }
1728 /*
1729 * Check if BUS-DMA support is enabled and try to load virtual
1730 * buffers into DMA, if any:
1731 */
1732#if USB_HAVE_BUSDMA
1733 if (xfer->flags_int.bdma_enable) {
1734 /* insert the USB transfer last in the BUS-DMA queue */
1735 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1736 return;
1737 }
1738#endif
1739 /*
1740 * Enter the USB transfer into the Host Controller or
1741 * Device Controller schedule:
1742 */
1743 usbd_pipe_enter(xfer);
1744}
1745
1746/*------------------------------------------------------------------------*
1747 * usbd_pipe_enter - factored out code
1748 *------------------------------------------------------------------------*/
1749void
1750usbd_pipe_enter(struct usb_xfer *xfer)
1751{
1752 struct usb_endpoint *ep;
1753
1754 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1755
1756 USB_BUS_LOCK(xfer->xroot->bus);
1757
1758 ep = xfer->endpoint;
1759
1760 DPRINTF("enter\n");
1761
1762 /* the transfer can now be cancelled */
1763 xfer->flags_int.can_cancel_immed = 1;
1764
1765 /* enter the transfer */
1766 (ep->methods->enter) (xfer);
1767
1768 /* check for transfer error */
1769 if (xfer->error) {
1770 /* some error has happened */
1771 usbd_transfer_done(xfer, 0);
1772 USB_BUS_UNLOCK(xfer->xroot->bus);
1773 return;
1774 }
1775
1776 /* start the transfer */
1777 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1778 USB_BUS_UNLOCK(xfer->xroot->bus);
1779}
1780
1781/*------------------------------------------------------------------------*
1782 * usbd_transfer_start - start an USB transfer
1783 *
1784 * NOTE: Calling this function more than one time will only
1785 * result in a single transfer start, until the USB transfer
1786 * completes.
1787 *------------------------------------------------------------------------*/
1788void
1789usbd_transfer_start(struct usb_xfer *xfer)
1790{
1791 if (xfer == NULL) {
1792 /* transfer is gone */
1793 return;
1794 }
1795 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1796
1797 /* mark the USB transfer started */
1798
1799 if (!xfer->flags_int.started) {
1800 /* lock the BUS lock to avoid races updating flags_int */
1801 USB_BUS_LOCK(xfer->xroot->bus);
1802 xfer->flags_int.started = 1;
1803 USB_BUS_UNLOCK(xfer->xroot->bus);
1804 }
1805 /* check if the USB transfer callback is already transferring */
1806
1807 if (xfer->flags_int.transferring) {
1808 return;
1809 }
1810 USB_BUS_LOCK(xfer->xroot->bus);
1811 /* call the USB transfer callback */
1812 usbd_callback_ss_done_defer(xfer);
1813 USB_BUS_UNLOCK(xfer->xroot->bus);
1814}
1815
1816/*------------------------------------------------------------------------*
1817 * usbd_transfer_stop - stop an USB transfer
1818 *
1819 * NOTE: Calling this function more than one time will only
1820 * result in a single transfer stop.
1821 * NOTE: When this function returns it is not safe to free nor
1822 * reuse any DMA buffers. See "usbd_transfer_drain()".
1823 *------------------------------------------------------------------------*/
1824void
1825usbd_transfer_stop(struct usb_xfer *xfer)
1826{
1827 struct usb_endpoint *ep;
1828
1829 if (xfer == NULL) {
1830 /* transfer is gone */
1831 return;
1832 }
1833 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1834
1835 /* check if the USB transfer was ever opened */
1836
1837 if (!xfer->flags_int.open) {
1838 if (xfer->flags_int.started) {
1839 /* nothing to do except clearing the "started" flag */
1840 /* lock the BUS lock to avoid races updating flags_int */
1841 USB_BUS_LOCK(xfer->xroot->bus);
1842 xfer->flags_int.started = 0;
1843 USB_BUS_UNLOCK(xfer->xroot->bus);
1844 }
1845 return;
1846 }
1847 /* try to stop the current USB transfer */
1848
1849 USB_BUS_LOCK(xfer->xroot->bus);
1850 /* override any previous error */
1851 xfer->error = USB_ERR_CANCELLED;
1852
1853 /*
1854 * Clear "open" and "started" when both private and USB lock
1855 * is locked so that we don't get a race updating "flags_int"
1856 */
1857 xfer->flags_int.open = 0;
1858 xfer->flags_int.started = 0;
1859
1860 /*
1861 * Check if we can cancel the USB transfer immediately.
1862 */
1863 if (xfer->flags_int.transferring) {
1864 if (xfer->flags_int.can_cancel_immed &&
1865 (!xfer->flags_int.did_close)) {
1866 DPRINTF("close\n");
1867 /*
1868 * The following will lead to an USB_ERR_CANCELLED
1869 * error code being passed to the USB callback.
1870 */
1871 (xfer->endpoint->methods->close) (xfer);
1872 /* only close once */
1873 xfer->flags_int.did_close = 1;
1874 } else {
1875 /* need to wait for the next done callback */
1876 }
1877 } else {
1878 DPRINTF("close\n");
1879
1880 /* close here and now */
1881 (xfer->endpoint->methods->close) (xfer);
1882
1883 /*
1884 * Any additional DMA delay is done by
1885 * "usbd_transfer_unsetup()".
1886 */
1887
1888 /*
1889 * Special case. Check if we need to restart a blocked
1890 * endpoint.
1891 */
1892 ep = xfer->endpoint;
1893
1894 /*
1895 * If the current USB transfer is completing we need
1896 * to start the next one:
1897 */
1898 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1899 usb_command_wrapper(
1900 &ep->endpoint_q[xfer->stream_id], NULL);
1901 }
1902 }
1903
1904 USB_BUS_UNLOCK(xfer->xroot->bus);
1905}
1906
1907/*------------------------------------------------------------------------*
1908 * usbd_transfer_pending
1909 *
1910 * This function will check if an USB transfer is pending which is a
1911 * little bit complicated!
1912 * Return values:
1913 * 0: Not pending
1914 * 1: Pending: The USB transfer will receive a callback in the future.
1915 *------------------------------------------------------------------------*/
1916uint8_t
1917usbd_transfer_pending(struct usb_xfer *xfer)
1918{
1919 struct usb_xfer_root *info;
1920 struct usb_xfer_queue *pq;
1921
1922 if (xfer == NULL) {
1923 /* transfer is gone */
1924 return (0);
1925 }
1926 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1927
1928 if (xfer->flags_int.transferring) {
1929 /* trivial case */
1930 return (1);
1931 }
1932 USB_BUS_LOCK(xfer->xroot->bus);
1933 if (xfer->wait_queue) {
1934 /* we are waiting on a queue somewhere */
1935 USB_BUS_UNLOCK(xfer->xroot->bus);
1936 return (1);
1937 }
1938 info = xfer->xroot;
1939 pq = &info->done_q;
1940
1941 if (pq->curr == xfer) {
1942 /* we are currently scheduled for callback */
1943 USB_BUS_UNLOCK(xfer->xroot->bus);
1944 return (1);
1945 }
1946 /* we are not pending */
1947 USB_BUS_UNLOCK(xfer->xroot->bus);
1948 return (0);
1949}
1950
1951/*------------------------------------------------------------------------*
1952 * usbd_transfer_drain
1953 *
1954 * This function will stop the USB transfer and wait for any
1955 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1956 * are loaded into DMA can safely be freed or reused after that this
1957 * function has returned.
1958 *------------------------------------------------------------------------*/
1959void
1960usbd_transfer_drain(struct usb_xfer *xfer)
1961{
1962 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1963 "usbd_transfer_drain can sleep!");
1964
1965 if (xfer == NULL) {
1966 /* transfer is gone */
1967 return;
1968 }
1969 if (xfer->xroot->xfer_mtx != &Giant) {
1970 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1971 }
1972 USB_XFER_LOCK(xfer);
1973
1974 usbd_transfer_stop(xfer);
1975
1976 while (usbd_transfer_pending(xfer) ||
1977 xfer->flags_int.doing_callback) {
1978
1979 /*
1980 * It is allowed that the callback can drop its
1981 * transfer mutex. In that case checking only
1982 * "usbd_transfer_pending()" is not enough to tell if
1983 * the USB transfer is fully drained. We also need to
1984 * check the internal "doing_callback" flag.
1985 */
1986 xfer->flags_int.draining = 1;
1987
1988 /*
1989 * Wait until the current outstanding USB
1990 * transfer is complete !
1991 */
1992 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
1993 }
1994 USB_XFER_UNLOCK(xfer);
1995}
1996
1997struct usb_page_cache *
1998usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1999{
2000 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2001
2002 return (&xfer->frbuffers[frindex]);
2003}
2004
2005void *
2006usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2007{
2008 struct usb_page_search page_info;
2009
2010 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2011
2012 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2013 return (page_info.buffer);
2014}
2015
2016/*------------------------------------------------------------------------*
2017 * usbd_xfer_get_fps_shift
2018 *
2019 * The following function is only useful for isochronous transfers. It
2020 * returns how many times the frame execution rate has been shifted
2021 * down.
2022 *
2023 * Return value:
2024 * Success: 0..3
2025 * Failure: 0
2026 *------------------------------------------------------------------------*/
2027uint8_t
2028usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2029{
2030 return (xfer->fps_shift);
2031}
2032
2033usb_frlength_t
2034usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2035{
2036 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2037
2038 return (xfer->frlengths[frindex]);
2039}
2040
2041/*------------------------------------------------------------------------*
2042 * usbd_xfer_set_frame_data
2043 *
2044 * This function sets the pointer of the buffer that should
2045 * loaded directly into DMA for the given USB frame. Passing "ptr"
2046 * equal to NULL while the corresponding "frlength" is greater
2047 * than zero gives undefined results!
2048 *------------------------------------------------------------------------*/
2049void
2050usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2051 void *ptr, usb_frlength_t len)
2052{
2053 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2054
2055 /* set virtual address to load and length */
2056 xfer->frbuffers[frindex].buffer = ptr;
2057 usbd_xfer_set_frame_len(xfer, frindex, len);
2058}
2059
2060void
2061usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2062 void **ptr, int *len)
2063{
2064 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2065
2066 if (ptr != NULL)
2067 *ptr = xfer->frbuffers[frindex].buffer;
2068 if (len != NULL)
2069 *len = xfer->frlengths[frindex];
2070}
2071
2072/*------------------------------------------------------------------------*
2073 * usbd_xfer_old_frame_length
2074 *
2075 * This function returns the framelength of the given frame at the
2076 * time the transfer was submitted. This function can be used to
2077 * compute the starting data pointer of the next isochronous frame
2078 * when an isochronous transfer has completed.
2079 *------------------------------------------------------------------------*/
2080usb_frlength_t
2081usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2082{
2083 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2084
2085 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2086}
2087
2088void
2089usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2090 int *nframes)
2091{
2092 if (actlen != NULL)
2093 *actlen = xfer->actlen;
2094 if (sumlen != NULL)
2095 *sumlen = xfer->sumlen;
2096 if (aframes != NULL)
2097 *aframes = xfer->aframes;
2098 if (nframes != NULL)
2099 *nframes = xfer->nframes;
2100}
2101
2102/*------------------------------------------------------------------------*
2103 * usbd_xfer_set_frame_offset
2104 *
2105 * This function sets the frame data buffer offset relative to the beginning
2106 * of the USB DMA buffer allocated for this USB transfer.
2107 *------------------------------------------------------------------------*/
2108void
2109usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2110 usb_frcount_t frindex)
2111{
2112 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2113 "when the USB buffer is external\n"));
2114 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2115
2116 /* set virtual address to load */
2117 xfer->frbuffers[frindex].buffer =
2118 USB_ADD_BYTES(xfer->local_buffer, offset);
2119}
2120
2121void
2122usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2123{
2124 xfer->interval = i;
2125}
2126
2127void
2128usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2129{
2130 xfer->timeout = t;
2131}
2132
2133void
2134usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2135{
2136 xfer->nframes = n;
2137}
2138
2139usb_frcount_t
2140usbd_xfer_max_frames(struct usb_xfer *xfer)
2141{
2142 return (xfer->max_frame_count);
2143}
2144
2145usb_frlength_t
2146usbd_xfer_max_len(struct usb_xfer *xfer)
2147{
2148 return (xfer->max_data_length);
2149}
2150
2151usb_frlength_t
2152usbd_xfer_max_framelen(struct usb_xfer *xfer)
2153{
2154 return (xfer->max_frame_size);
2155}
2156
2157void
2158usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2159 usb_frlength_t len)
2160{
2161 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2162
2163 xfer->frlengths[frindex] = len;
2164}
2165
2166/*------------------------------------------------------------------------*
2167 * usb_callback_proc - factored out code
2168 *
2169 * This function performs USB callbacks.
2170 *------------------------------------------------------------------------*/
2171static void
2172usb_callback_proc(struct usb_proc_msg *_pm)
2173{
2174 struct usb_done_msg *pm = (void *)_pm;
2175 struct usb_xfer_root *info = pm->xroot;
2176
2177 /* Change locking order */
2178 USB_BUS_UNLOCK(info->bus);
2179
2180 /*
2181 * We exploit the fact that the mutex is the same for all
2182 * callbacks that will be called from this thread:
2183 */
2184 mtx_lock(info->xfer_mtx);
2185 USB_BUS_LOCK(info->bus);
2186
2187 /* Continue where we lost track */
2188 usb_command_wrapper(&info->done_q,
2189 info->done_q.curr);
2190
2191 mtx_unlock(info->xfer_mtx);
2192}
2193
2194/*------------------------------------------------------------------------*
2195 * usbd_callback_ss_done_defer
2196 *
2197 * This function will defer the start, stop and done callback to the
2198 * correct thread.
2199 *------------------------------------------------------------------------*/
2200static void
2201usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2202{
2203 struct usb_xfer_root *info = xfer->xroot;
2204 struct usb_xfer_queue *pq = &info->done_q;
2205
2206 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2207
2208 if (pq->curr != xfer) {
2209 usbd_transfer_enqueue(pq, xfer);
2210 }
2211 if (!pq->recurse_1) {
2212
2213 /*
2214 * We have to postpone the callback due to the fact we
2215 * will have a Lock Order Reversal, LOR, if we try to
2216 * proceed !
2217 */
2218 if (usb_proc_msignal(info->done_p,
2219 &info->done_m[0], &info->done_m[1])) {
2220 /* ignore */
2221 }
2222 } else {
2223 /* clear second recurse flag */
2224 pq->recurse_2 = 0;
2225 }
2226 return;
2227
2228}
2229
2230/*------------------------------------------------------------------------*
2231 * usbd_callback_wrapper
2232 *
2233 * This is a wrapper for USB callbacks. This wrapper does some
2234 * auto-magic things like figuring out if we can call the callback
2235 * directly from the current context or if we need to wakeup the
2236 * interrupt process.
2237 *------------------------------------------------------------------------*/
2238static void
2239usbd_callback_wrapper(struct usb_xfer_queue *pq)
2240{
2241 struct usb_xfer *xfer = pq->curr;
2242 struct usb_xfer_root *info = xfer->xroot;
2243
2244 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2245 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2246 /*
2247 * Cases that end up here:
2248 *
2249 * 5) HW interrupt done callback or other source.
2250 */
2251 DPRINTFN(3, "case 5\n");
2252
2253 /*
2254 * We have to postpone the callback due to the fact we
2255 * will have a Lock Order Reversal, LOR, if we try to
2256 * proceed !
2257 */
2258 if (usb_proc_msignal(info->done_p,
2259 &info->done_m[0], &info->done_m[1])) {
2260 /* ignore */
2261 }
2262 return;
2263 }
2264 /*
2265 * Cases that end up here:
2266 *
2267 * 1) We are starting a transfer
2268 * 2) We are prematurely calling back a transfer
2269 * 3) We are stopping a transfer
2270 * 4) We are doing an ordinary callback
2271 */
2272 DPRINTFN(3, "case 1-4\n");
2273 /* get next USB transfer in the queue */
2274 info->done_q.curr = NULL;
2275
2276 /* set flag in case of drain */
2277 xfer->flags_int.doing_callback = 1;
2278
2279 USB_BUS_UNLOCK(info->bus);
2280 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2281
2282 /* set correct USB state for callback */
2283 if (!xfer->flags_int.transferring) {
2284 xfer->usb_state = USB_ST_SETUP;
2285 if (!xfer->flags_int.started) {
2286 /* we got stopped before we even got started */
2287 USB_BUS_LOCK(info->bus);
2288 goto done;
2289 }
2290 } else {
2291
2292 if (usbd_callback_wrapper_sub(xfer)) {
2293 /* the callback has been deferred */
2294 USB_BUS_LOCK(info->bus);
2295 goto done;
2296 }
2297#if USB_HAVE_POWERD
2298 /* decrement power reference */
2299 usbd_transfer_power_ref(xfer, -1);
2300#endif
2301 xfer->flags_int.transferring = 0;
2302
2303 if (xfer->error) {
2304 xfer->usb_state = USB_ST_ERROR;
2305 } else {
2306 /* set transferred state */
2307 xfer->usb_state = USB_ST_TRANSFERRED;
2308#if USB_HAVE_BUSDMA
2309 /* sync DMA memory, if any */
2310 if (xfer->flags_int.bdma_enable &&
2311 (!xfer->flags_int.bdma_no_post_sync)) {
2312 usb_bdma_post_sync(xfer);
2313 }
2314#endif
2315 }
2316 }
2317
2318#if USB_HAVE_PF
2319 if (xfer->usb_state != USB_ST_SETUP)
2320 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2321#endif
2322 /* call processing routine */
2323 (xfer->callback) (xfer, xfer->error);
2324
2325 /* pickup the USB mutex again */
2326 USB_BUS_LOCK(info->bus);
2327
2328 /*
2329 * Check if we got started after that we got cancelled, but
2330 * before we managed to do the callback.
2331 */
2332 if ((!xfer->flags_int.open) &&
2333 (xfer->flags_int.started) &&
2334 (xfer->usb_state == USB_ST_ERROR)) {
2335 /* clear flag in case of drain */
2336 xfer->flags_int.doing_callback = 0;
2337 /* try to loop, but not recursivly */
2338 usb_command_wrapper(&info->done_q, xfer);
2339 return;
2340 }
2341
2342done:
2343 /* clear flag in case of drain */
2344 xfer->flags_int.doing_callback = 0;
2345
2346 /*
2347 * Check if we are draining.
2348 */
2349 if (xfer->flags_int.draining &&
2350 (!xfer->flags_int.transferring)) {
2351 /* "usbd_transfer_drain()" is waiting for end of transfer */
2352 xfer->flags_int.draining = 0;
2353 cv_broadcast(&info->cv_drain);
2354 }
2355
2356 /* do the next callback, if any */
2357 usb_command_wrapper(&info->done_q,
2358 info->done_q.curr);
2359}
2360
2361/*------------------------------------------------------------------------*
2362 * usb_dma_delay_done_cb
2363 *
2364 * This function is called when the DMA delay has been exectuded, and
2365 * will make sure that the callback is called to complete the USB
2366 * transfer. This code path is ususally only used when there is an USB
2367 * error like USB_ERR_CANCELLED.
2368 *------------------------------------------------------------------------*/
2369void
2370usb_dma_delay_done_cb(struct usb_xfer *xfer)
2371{
2372 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2373
2374 DPRINTFN(3, "Completed %p\n", xfer);
2375
2376 /* queue callback for execution, again */
2377 usbd_transfer_done(xfer, 0);
2378}
2379
2380/*------------------------------------------------------------------------*
2381 * usbd_transfer_dequeue
2382 *
2383 * - This function is used to remove an USB transfer from a USB
2384 * transfer queue.
2385 *
2386 * - This function can be called multiple times in a row.
2387 *------------------------------------------------------------------------*/
2388void
2389usbd_transfer_dequeue(struct usb_xfer *xfer)
2390{
2391 struct usb_xfer_queue *pq;
2392
2393 pq = xfer->wait_queue;
2394 if (pq) {
2395 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2396 xfer->wait_queue = NULL;
2397 }
2398}
2399
2400/*------------------------------------------------------------------------*
2401 * usbd_transfer_enqueue
2402 *
2403 * - This function is used to insert an USB transfer into a USB *
2404 * transfer queue.
2405 *
2406 * - This function can be called multiple times in a row.
2407 *------------------------------------------------------------------------*/
2408void
2409usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2410{
2411 /*
2412 * Insert the USB transfer into the queue, if it is not
2413 * already on a USB transfer queue:
2414 */
2415 if (xfer->wait_queue == NULL) {
2416 xfer->wait_queue = pq;
2417 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2418 }
2419}
2420
2421/*------------------------------------------------------------------------*
2422 * usbd_transfer_done
2423 *
2424 * - This function is used to remove an USB transfer from the busdma,
2425 * pipe or interrupt queue.
2426 *
2427 * - This function is used to queue the USB transfer on the done
2428 * queue.
2429 *
2430 * - This function is used to stop any USB transfer timeouts.
2431 *------------------------------------------------------------------------*/
2432void
2433usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2434{
2435 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2436
2437 DPRINTF("err=%s\n", usbd_errstr(error));
2438
2439 /*
2440 * If we are not transferring then just return.
2441 * This can happen during transfer cancel.
2442 */
2443 if (!xfer->flags_int.transferring) {
2444 DPRINTF("not transferring\n");
2445 /* end of control transfer, if any */
2446 xfer->flags_int.control_act = 0;
2447 return;
2448 }
2449 /* only set transfer error if not already set */
2450 if (!xfer->error) {
2451 xfer->error = error;
2452 }
2453 /* stop any callouts */
2454 usb_callout_stop(&xfer->timeout_handle);
2455
2456 /*
2457 * If we are waiting on a queue, just remove the USB transfer
2458 * from the queue, if any. We should have the required locks
2459 * locked to do the remove when this function is called.
2460 */
2461 usbd_transfer_dequeue(xfer);
2462
2463#if USB_HAVE_BUSDMA
2464 if (mtx_owned(xfer->xroot->xfer_mtx)) {
2465 struct usb_xfer_queue *pq;
2466
2467 /*
2468 * If the private USB lock is not locked, then we assume
2469 * that the BUS-DMA load stage has been passed:
2470 */
2471 pq = &xfer->xroot->dma_q;
2472
2473 if (pq->curr == xfer) {
2474 /* start the next BUS-DMA load, if any */
2475 usb_command_wrapper(pq, NULL);
2476 }
2477 }
2478#endif
2479 /* keep some statistics */
2480 if (xfer->error) {
2481 xfer->xroot->bus->stats_err.uds_requests
2482 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2483 } else {
2484 xfer->xroot->bus->stats_ok.uds_requests
2485 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2486 }
2487
2488 /* call the USB transfer callback */
2489 usbd_callback_ss_done_defer(xfer);
2490}
2491
2492/*------------------------------------------------------------------------*
2493 * usbd_transfer_start_cb
2494 *
2495 * This function is called to start the USB transfer when
2496 * "xfer->interval" is greater than zero, and and the endpoint type is
2497 * BULK or CONTROL.
2498 *------------------------------------------------------------------------*/
2499static void
2500usbd_transfer_start_cb(void *arg)
2501{
2502 struct usb_xfer *xfer = arg;
2503 struct usb_endpoint *ep = xfer->endpoint;
2504
2505 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2506
2507 DPRINTF("start\n");
2508
2509#if USB_HAVE_PF
2510 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2511#endif
2512
2513 /* the transfer can now be cancelled */
2514 xfer->flags_int.can_cancel_immed = 1;
2515
2516 /* start USB transfer, if no error */
2517 if (xfer->error == 0)
2518 (ep->methods->start) (xfer);
2519
2520 /* check for transfer error */
2521 if (xfer->error) {
2522 /* some error has happened */
2523 usbd_transfer_done(xfer, 0);
2524 }
2525}
2526
2527/*------------------------------------------------------------------------*
2528 * usbd_xfer_set_stall
2529 *
2530 * This function is used to set the stall flag outside the
2531 * callback. This function is NULL safe.
2532 *------------------------------------------------------------------------*/
2533void
2534usbd_xfer_set_stall(struct usb_xfer *xfer)
2535{
2536 if (xfer == NULL) {
2537 /* tearing down */
2538 return;
2539 }
2540 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2541
2542 /* avoid any races by locking the USB mutex */
2543 USB_BUS_LOCK(xfer->xroot->bus);
2544 xfer->flags.stall_pipe = 1;
2545 USB_BUS_UNLOCK(xfer->xroot->bus);
2546}
2547
2548int
2549usbd_xfer_is_stalled(struct usb_xfer *xfer)
2550{
2551 return (xfer->endpoint->is_stalled);
2552}
2553
2554/*------------------------------------------------------------------------*
2555 * usbd_transfer_clear_stall
2556 *
2557 * This function is used to clear the stall flag outside the
2558 * callback. This function is NULL safe.
2559 *------------------------------------------------------------------------*/
2560void
2561usbd_transfer_clear_stall(struct usb_xfer *xfer)
2562{
2563 if (xfer == NULL) {
2564 /* tearing down */
2565 return;
2566 }
2567 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2568
2569 /* avoid any races by locking the USB mutex */
2570 USB_BUS_LOCK(xfer->xroot->bus);
2571
2572 xfer->flags.stall_pipe = 0;
2573
2574 USB_BUS_UNLOCK(xfer->xroot->bus);
2575}
2576
2577/*------------------------------------------------------------------------*
2578 * usbd_pipe_start
2579 *
2580 * This function is used to add an USB transfer to the pipe transfer list.
2581 *------------------------------------------------------------------------*/
2582void
2583usbd_pipe_start(struct usb_xfer_queue *pq)
2584{
2585 struct usb_endpoint *ep;
2586 struct usb_xfer *xfer;
2587 uint8_t type;
2588
2589 xfer = pq->curr;
2590 ep = xfer->endpoint;
2591
2592 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2593
2594 /*
2595 * If the endpoint is already stalled we do nothing !
2596 */
2597 if (ep->is_stalled) {
2598 return;
2599 }
2600 /*
2601 * Check if we are supposed to stall the endpoint:
2602 */
2603 if (xfer->flags.stall_pipe) {
2604 struct usb_device *udev;
2605 struct usb_xfer_root *info;
2606
2607 /* clear stall command */
2608 xfer->flags.stall_pipe = 0;
2609
2610 /* get pointer to USB device */
2611 info = xfer->xroot;
2612 udev = info->udev;
2613
2614 /*
2615 * Only stall BULK and INTERRUPT endpoints.
2616 */
2617 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2618 if ((type == UE_BULK) ||
2619 (type == UE_INTERRUPT)) {
2620 uint8_t did_stall;
2621
2622 did_stall = 1;
2623
2624 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2625 (udev->bus->methods->set_stall) (
2626 udev, ep, &did_stall);
2627 } else if (udev->ctrl_xfer[1]) {
2628 info = udev->ctrl_xfer[1]->xroot;
2629 usb_proc_msignal(
2630 USB_BUS_NON_GIANT_PROC(info->bus),
2631 &udev->cs_msg[0], &udev->cs_msg[1]);
2632 } else {
2633 /* should not happen */
2634 DPRINTFN(0, "No stall handler\n");
2635 }
2636 /*
2637 * Check if we should stall. Some USB hardware
2638 * handles set- and clear-stall in hardware.
2639 */
2640 if (did_stall) {
2641 /*
2642 * The transfer will be continued when
2643 * the clear-stall control endpoint
2644 * message is received.
2645 */
2646 ep->is_stalled = 1;
2647 return;
2648 }
2649 } else if (type == UE_ISOCHRONOUS) {
2650
2651 /*
2652 * Make sure any FIFO overflow or other FIFO
2653 * error conditions go away by resetting the
2654 * endpoint FIFO through the clear stall
2655 * method.
2656 */
2657 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2658 (udev->bus->methods->clear_stall) (udev, ep);
2659 }
2660 }
2661 }
2662 /* Set or clear stall complete - special case */
2663 if (xfer->nframes == 0) {
2664 /* we are complete */
2665 xfer->aframes = 0;
2666 usbd_transfer_done(xfer, 0);
2667 return;
2668 }
2669 /*
2670 * Handled cases:
2671 *
2672 * 1) Start the first transfer queued.
2673 *
2674 * 2) Re-start the current USB transfer.
2675 */
2676 /*
2677 * Check if there should be any
2678 * pre transfer start delay:
2679 */
2680 if (xfer->interval > 0) {
2681 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2682 if ((type == UE_BULK) ||
2683 (type == UE_CONTROL)) {
2684 usbd_transfer_timeout_ms(xfer,
2685 &usbd_transfer_start_cb,
2686 xfer->interval);
2687 return;
2688 }
2689 }
2690 DPRINTF("start\n");
2691
2692#if USB_HAVE_PF
2693 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2694#endif
2695 /* the transfer can now be cancelled */
2696 xfer->flags_int.can_cancel_immed = 1;
2697
2698 /* start USB transfer, if no error */
2699 if (xfer->error == 0)
2700 (ep->methods->start) (xfer);
2701
2702 /* check for transfer error */
2703 if (xfer->error) {
2704 /* some error has happened */
2705 usbd_transfer_done(xfer, 0);
2706 }
2707}
2708
2709/*------------------------------------------------------------------------*
2710 * usbd_transfer_timeout_ms
2711 *
2712 * This function is used to setup a timeout on the given USB
2713 * transfer. If the timeout has been deferred the callback given by
2714 * "cb" will get called after "ms" milliseconds.
2715 *------------------------------------------------------------------------*/
2716void
2717usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2718 void (*cb) (void *arg), usb_timeout_t ms)
2719{
2720 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2721
2722 /* defer delay */
2723 usb_callout_reset(&xfer->timeout_handle,
2724 USB_MS_TO_TICKS(ms), cb, xfer);
2725}
2726
2727/*------------------------------------------------------------------------*
2728 * usbd_callback_wrapper_sub
2729 *
2730 * - This function will update variables in an USB transfer after
2731 * that the USB transfer is complete.
2732 *
2733 * - This function is used to start the next USB transfer on the
2734 * ep transfer queue, if any.
2735 *
2736 * NOTE: In some special cases the USB transfer will not be removed from
2737 * the pipe queue, but remain first. To enforce USB transfer removal call
2738 * this function passing the error code "USB_ERR_CANCELLED".
2739 *
2740 * Return values:
2741 * 0: Success.
2742 * Else: The callback has been deferred.
2743 *------------------------------------------------------------------------*/
2744static uint8_t
2745usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2746{
2747 struct usb_endpoint *ep;
2748 struct usb_bus *bus;
2749 usb_frcount_t x;
2750
2751 bus = xfer->xroot->bus;
2752
2753 if ((!xfer->flags_int.open) &&
2754 (!xfer->flags_int.did_close)) {
2755 DPRINTF("close\n");
2756 USB_BUS_LOCK(bus);
2757 (xfer->endpoint->methods->close) (xfer);
2758 USB_BUS_UNLOCK(bus);
2759 /* only close once */
2760 xfer->flags_int.did_close = 1;
2761 return (1); /* wait for new callback */
2762 }
2763 /*
2764 * If we have a non-hardware induced error we
2765 * need to do the DMA delay!
2766 */
2767 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2768 (xfer->error == USB_ERR_CANCELLED ||
2769 xfer->error == USB_ERR_TIMEOUT ||
2770 bus->methods->start_dma_delay != NULL)) {
2771
2772 usb_timeout_t temp;
2773
2774 /* only delay once */
2775 xfer->flags_int.did_dma_delay = 1;
2776
2777 /* we can not cancel this delay */
2778 xfer->flags_int.can_cancel_immed = 0;
2779
2780 temp = usbd_get_dma_delay(xfer->xroot->udev);
2781
2782 DPRINTFN(3, "DMA delay, %u ms, "
2783 "on %p\n", temp, xfer);
2784
2785 if (temp != 0) {
2786 USB_BUS_LOCK(bus);
2787 /*
2788 * Some hardware solutions have dedicated
2789 * events when it is safe to free DMA'ed
2790 * memory. For the other hardware platforms we
2791 * use a static delay.
2792 */
2793 if (bus->methods->start_dma_delay != NULL) {
2794 (bus->methods->start_dma_delay) (xfer);
2795 } else {
2796 usbd_transfer_timeout_ms(xfer,
2797 (void (*)(void *))&usb_dma_delay_done_cb,
2798 temp);
2799 }
2800 USB_BUS_UNLOCK(bus);
2801 return (1); /* wait for new callback */
2802 }
2803 }
2804 /* check actual number of frames */
2805 if (xfer->aframes > xfer->nframes) {
2806 if (xfer->error == 0) {
2807 panic("%s: actual number of frames, %d, is "
2808 "greater than initial number of frames, %d\n",
2809 __FUNCTION__, xfer->aframes, xfer->nframes);
2810 } else {
2811 /* just set some valid value */
2812 xfer->aframes = xfer->nframes;
2813 }
2814 }
2815 /* compute actual length */
2816 xfer->actlen = 0;
2817
2818 for (x = 0; x != xfer->aframes; x++) {
2819 xfer->actlen += xfer->frlengths[x];
2820 }
2821
2822 /*
2823 * Frames that were not transferred get zero actual length in
2824 * case the USB device driver does not check the actual number
2825 * of frames transferred, "xfer->aframes":
2826 */
2827 for (; x < xfer->nframes; x++) {
2828 usbd_xfer_set_frame_len(xfer, x, 0);
2829 }
2830
2831 /* check actual length */
2832 if (xfer->actlen > xfer->sumlen) {
2833 if (xfer->error == 0) {
2834 panic("%s: actual length, %d, is greater than "
2835 "initial length, %d\n",
2836 __FUNCTION__, xfer->actlen, xfer->sumlen);
2837 } else {
2838 /* just set some valid value */
2839 xfer->actlen = xfer->sumlen;
2840 }
2841 }
2842 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2843 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2844 xfer->aframes, xfer->nframes);
2845
2846 if (xfer->error) {
2847 /* end of control transfer, if any */
2848 xfer->flags_int.control_act = 0;
2849
2850 /* check if we should block the execution queue */
2851 if ((xfer->error != USB_ERR_CANCELLED) &&
2852 (xfer->flags.pipe_bof)) {
2853 DPRINTFN(2, "xfer=%p: Block On Failure "
2854 "on endpoint=%p\n", xfer, xfer->endpoint);
2855 goto done;
2856 }
2857 } else {
2858 /* check for short transfers */
2859 if (xfer->actlen < xfer->sumlen) {
2860
2861 /* end of control transfer, if any */
2862 xfer->flags_int.control_act = 0;
2863
2864 if (!xfer->flags_int.short_xfer_ok) {
2865 xfer->error = USB_ERR_SHORT_XFER;
2866 if (xfer->flags.pipe_bof) {
2867 DPRINTFN(2, "xfer=%p: Block On Failure on "
2868 "Short Transfer on endpoint %p.\n",
2869 xfer, xfer->endpoint);
2870 goto done;
2871 }
2872 }
2873 } else {
2874 /*
2875 * Check if we are in the middle of a
2876 * control transfer:
2877 */
2878 if (xfer->flags_int.control_act) {
2879 DPRINTFN(5, "xfer=%p: Control transfer "
2880 "active on endpoint=%p\n", xfer, xfer->endpoint);
2881 goto done;
2882 }
2883 }
2884 }
2885
2886 ep = xfer->endpoint;
2887
2888 /*
2889 * If the current USB transfer is completing we need to start the
2890 * next one:
2891 */
2892 USB_BUS_LOCK(bus);
2893 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2894 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2895
2896 if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
2897 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
2898 /* there is another USB transfer waiting */
2899 } else {
2900 /* this is the last USB transfer */
2901 /* clear isochronous sync flag */
2902 xfer->endpoint->is_synced = 0;
2903 }
2904 }
2905 USB_BUS_UNLOCK(bus);
2906done:
2907 return (0);
2908}
2909
2910/*------------------------------------------------------------------------*
2911 * usb_command_wrapper
2912 *
2913 * This function is used to execute commands non-recursivly on an USB
2914 * transfer.
2915 *------------------------------------------------------------------------*/
2916void
2917usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2918{
2919 if (xfer) {
2920 /*
2921 * If the transfer is not already processing,
2922 * queue it!
2923 */
2924 if (pq->curr != xfer) {
2925 usbd_transfer_enqueue(pq, xfer);
2926 if (pq->curr != NULL) {
2927 /* something is already processing */
2928 DPRINTFN(6, "busy %p\n", pq->curr);
2929 return;
2930 }
2931 }
2932 } else {
2933 /* Get next element in queue */
2934 pq->curr = NULL;
2935 }
2936
2937 if (!pq->recurse_1) {
2938
2939 do {
2940
2941 /* set both recurse flags */
2942 pq->recurse_1 = 1;
2943 pq->recurse_2 = 1;
2944
2945 if (pq->curr == NULL) {
2946 xfer = TAILQ_FIRST(&pq->head);
2947 if (xfer) {
2948 TAILQ_REMOVE(&pq->head, xfer,
2949 wait_entry);
2950 xfer->wait_queue = NULL;
2951 pq->curr = xfer;
2952 } else {
2953 break;
2954 }
2955 }
2956 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2957 (pq->command) (pq);
2958 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2959
2960 } while (!pq->recurse_2);
2961
2962 /* clear first recurse flag */
2963 pq->recurse_1 = 0;
2964
2965 } else {
2966 /* clear second recurse flag */
2967 pq->recurse_2 = 0;
2968 }
2969}
2970
2971/*------------------------------------------------------------------------*
2972 * usbd_ctrl_transfer_setup
2973 *
2974 * This function is used to setup the default USB control endpoint
2975 * transfer.
2976 *------------------------------------------------------------------------*/
2977void
2978usbd_ctrl_transfer_setup(struct usb_device *udev)
2979{
2980 struct usb_xfer *xfer;
2981 uint8_t no_resetup;
2982 uint8_t iface_index;
2983
2984 /* check for root HUB */
2985 if (udev->parent_hub == NULL)
2986 return;
2987repeat:
2988
2989 xfer = udev->ctrl_xfer[0];
2990 if (xfer) {
2991 USB_XFER_LOCK(xfer);
2992 no_resetup =
2993 ((xfer->address == udev->address) &&
2994 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2995 udev->ddesc.bMaxPacketSize));
2996 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2997 if (no_resetup) {
2998 /*
2999 * NOTE: checking "xfer->address" and
3000 * starting the USB transfer must be
3001 * atomic!
3002 */
3003 usbd_transfer_start(xfer);
3004 }
3005 }
3006 USB_XFER_UNLOCK(xfer);
3007 } else {
3008 no_resetup = 0;
3009 }
3010
3011 if (no_resetup) {
3012 /*
3013 * All parameters are exactly the same like before.
3014 * Just return.
3015 */
3016 return;
3017 }
3018 /*
3019 * Update wMaxPacketSize for the default control endpoint:
3020 */
3021 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3022 udev->ddesc.bMaxPacketSize;
3023
3024 /*
3025 * Unsetup any existing USB transfer:
3026 */
3027 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3028
3029 /*
3030 * Reset clear stall error counter.
3031 */
3032 udev->clear_stall_errors = 0;
3033
3034 /*
3035 * Try to setup a new USB transfer for the
3036 * default control endpoint:
3037 */
3038 iface_index = 0;
3039 if (usbd_transfer_setup(udev, &iface_index,
3040 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3041 &udev->device_mtx)) {
3042 DPRINTFN(0, "could not setup default "
3043 "USB transfer\n");
3044 } else {
3045 goto repeat;
3046 }
3047}
3048
3049/*------------------------------------------------------------------------*
3050 * usbd_clear_data_toggle - factored out code
3051 *
3052 * NOTE: the intention of this function is not to reset the hardware
3053 * data toggle.
3054 *------------------------------------------------------------------------*/
3055void
3056usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3057{
3058 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3059
3060 /* check that we have a valid case */
3061 if (udev->flags.usb_mode == USB_MODE_HOST &&
3062 udev->parent_hub != NULL &&
3063 udev->bus->methods->clear_stall != NULL &&
3064 ep->methods != NULL) {
3065 (udev->bus->methods->clear_stall) (udev, ep);
3066 }
3067}
3068
3069/*------------------------------------------------------------------------*
3070 * usbd_clear_data_toggle - factored out code
3071 *
3072 * NOTE: the intention of this function is not to reset the hardware
3073 * data toggle on the USB device side.
3074 *------------------------------------------------------------------------*/
3075void
3076usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3077{
3078 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3079
3080 USB_BUS_LOCK(udev->bus);
3081 ep->toggle_next = 0;
3082 /* some hardware needs a callback to clear the data toggle */
3083 usbd_clear_stall_locked(udev, ep);
3084 USB_BUS_UNLOCK(udev->bus);
3085}
3086
3087/*------------------------------------------------------------------------*
3088 * usbd_clear_stall_callback - factored out clear stall callback
3089 *
3090 * Input parameters:
3091 * xfer1: Clear Stall Control Transfer
3092 * xfer2: Stalled USB Transfer
3093 *
3094 * This function is NULL safe.
3095 *
3096 * Return values:
3097 * 0: In progress
3098 * Else: Finished
3099 *
3100 * Clear stall config example:
3101 *
3102 * static const struct usb_config my_clearstall = {
3103 * .type = UE_CONTROL,
3104 * .endpoint = 0,
3105 * .direction = UE_DIR_ANY,
3106 * .interval = 50, //50 milliseconds
3107 * .bufsize = sizeof(struct usb_device_request),
3108 * .timeout = 1000, //1.000 seconds
3109 * .callback = &my_clear_stall_callback, // **
3110 * .usb_mode = USB_MODE_HOST,
3111 * };
3112 *
3113 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3114 * passing the correct parameters.
3115 *------------------------------------------------------------------------*/
3116uint8_t
3117usbd_clear_stall_callback(struct usb_xfer *xfer1,
3118 struct usb_xfer *xfer2)
3119{
3120 struct usb_device_request req;
3121
3122 if (xfer2 == NULL) {
3123 /* looks like we are tearing down */
3124 DPRINTF("NULL input parameter\n");
3125 return (0);
3126 }
3127 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3128 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3129
3130 switch (USB_GET_STATE(xfer1)) {
3131 case USB_ST_SETUP:
3132
3133 /*
3134 * pre-clear the data toggle to DATA0 ("umass.c" and
3135 * "ata-usb.c" depends on this)
3136 */
3137
3138 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3139
3140 /* setup a clear-stall packet */
3141
3142 req.bmRequestType = UT_WRITE_ENDPOINT;
3143 req.bRequest = UR_CLEAR_FEATURE;
3144 USETW(req.wValue, UF_ENDPOINT_HALT);
3145 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3146 req.wIndex[1] = 0;
3147 USETW(req.wLength, 0);
3148
3149 /*
3150 * "usbd_transfer_setup_sub()" will ensure that
3151 * we have sufficient room in the buffer for
3152 * the request structure!
3153 */
3154
3155 /* copy in the transfer */
3156
3157 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3158
3159 /* set length */
3160 xfer1->frlengths[0] = sizeof(req);
3161 xfer1->nframes = 1;
3162
3163 usbd_transfer_submit(xfer1);
3164 return (0);
3165
3166 case USB_ST_TRANSFERRED:
3167 break;
3168
3169 default: /* Error */
3170 if (xfer1->error == USB_ERR_CANCELLED) {
3171 return (0);
3172 }
3173 break;
3174 }
3175 return (1); /* Clear Stall Finished */
3176}
3177
3178/*------------------------------------------------------------------------*
3179 * usbd_transfer_poll
3180 *
3181 * The following function gets called from the USB keyboard driver and
3182 * UMASS when the system has paniced.
3183 *
3184 * NOTE: It is currently not possible to resume normal operation on
3185 * the USB controller which has been polled, due to clearing of the
3186 * "up_dsleep" and "up_msleep" flags.
3187 *------------------------------------------------------------------------*/
3188void
3189usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3190{
3191 struct usb_xfer *xfer;
3192 struct usb_xfer_root *xroot;
3193 struct usb_device *udev;
3194 struct usb_proc_msg *pm;
3195 uint16_t n;
3196 uint16_t drop_bus;
3197 uint16_t drop_xfer;
3198
3199 for (n = 0; n != max; n++) {
3200 /* Extra checks to avoid panic */
3201 xfer = ppxfer[n];
3202 if (xfer == NULL)
3203 continue; /* no USB transfer */
3204 xroot = xfer->xroot;
3205 if (xroot == NULL)
3206 continue; /* no USB root */
3207 udev = xroot->udev;
3208 if (udev == NULL)
3209 continue; /* no USB device */
3210 if (udev->bus == NULL)
3211 continue; /* no BUS structure */
3212 if (udev->bus->methods == NULL)
3213 continue; /* no BUS methods */
3214 if (udev->bus->methods->xfer_poll == NULL)
3215 continue; /* no poll method */
3216
3217 /* make sure that the BUS mutex is not locked */
3218 drop_bus = 0;
3219 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3220 mtx_unlock(&xroot->udev->bus->bus_mtx);
3221 drop_bus++;
3222 }
3223
3224 /* make sure that the transfer mutex is not locked */
3225 drop_xfer = 0;
3226 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3227 mtx_unlock(xroot->xfer_mtx);
3228 drop_xfer++;
3229 }
3230
3231 /* Make sure cv_signal() and cv_broadcast() is not called */
3232 USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3233 USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3234 USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3235 USB_BUS_NON_GIANT_PROC(udev->bus)->up_msleep = 0;
3236
3237 /* poll USB hardware */
3238 (udev->bus->methods->xfer_poll) (udev->bus);
3239
3240 USB_BUS_LOCK(xroot->bus);
3241
3242 /* check for clear stall */
3243 if (udev->ctrl_xfer[1] != NULL) {
3244
3245 /* poll clear stall start */
3246 pm = &udev->cs_msg[0].hdr;
3247 (pm->pm_callback) (pm);
3248 /* poll clear stall done thread */
3249 pm = &udev->ctrl_xfer[1]->
3250 xroot->done_m[0].hdr;
3251 (pm->pm_callback) (pm);
3252 }
3253
3254 /* poll done thread */
3255 pm = &xroot->done_m[0].hdr;
3256 (pm->pm_callback) (pm);
3257
3258 USB_BUS_UNLOCK(xroot->bus);
3259
3260 /* restore transfer mutex */
3261 while (drop_xfer--)
3262 mtx_lock(xroot->xfer_mtx);
3263
3264 /* restore BUS mutex */
3265 while (drop_bus--)
3266 mtx_lock(&xroot->udev->bus->bus_mtx);
3267 }
3268}
3269
3270static void
3271usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3272 uint8_t type, enum usb_dev_speed speed)
3273{
3274 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3275 [USB_SPEED_LOW] = 8,
3276 [USB_SPEED_FULL] = 64,
3277 [USB_SPEED_HIGH] = 1024,
3278 [USB_SPEED_VARIABLE] = 1024,
3279 [USB_SPEED_SUPER] = 1024,
3280 };
3281
3282 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3283 [USB_SPEED_LOW] = 0, /* invalid */
3284 [USB_SPEED_FULL] = 1023,
3285 [USB_SPEED_HIGH] = 1024,
3286 [USB_SPEED_VARIABLE] = 3584,
3287 [USB_SPEED_SUPER] = 1024,
3288 };
3289
3290 static const uint16_t control_min[USB_SPEED_MAX] = {
3291 [USB_SPEED_LOW] = 8,
3292 [USB_SPEED_FULL] = 8,
3293 [USB_SPEED_HIGH] = 64,
3294 [USB_SPEED_VARIABLE] = 512,
3295 [USB_SPEED_SUPER] = 512,
3296 };
3297
3298 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3299 [USB_SPEED_LOW] = 8,
3300 [USB_SPEED_FULL] = 8,
3301 [USB_SPEED_HIGH] = 512,
3302 [USB_SPEED_VARIABLE] = 512,
3303 [USB_SPEED_SUPER] = 1024,
3304 };
3305
3306 uint16_t temp;
3307
3308 memset(ptr, 0, sizeof(*ptr));
3309
3310 switch (type) {
3311 case UE_INTERRUPT:
3312 ptr->range.max = intr_range_max[speed];
3313 break;
3314 case UE_ISOCHRONOUS:
3315 ptr->range.max = isoc_range_max[speed];
3316 break;
3317 default:
3318 if (type == UE_BULK)
3319 temp = bulk_min[speed];
3320 else /* UE_CONTROL */
3321 temp = control_min[speed];
3322
3323 /* default is fixed */
3324 ptr->fixed[0] = temp;
3325 ptr->fixed[1] = temp;
3326 ptr->fixed[2] = temp;
3327 ptr->fixed[3] = temp;
3328
3329 if (speed == USB_SPEED_FULL) {
3330 /* multiple sizes */
3331 ptr->fixed[1] = 16;
3332 ptr->fixed[2] = 32;
3333 ptr->fixed[3] = 64;
3334 }
3335 if ((speed == USB_SPEED_VARIABLE) &&
3336 (type == UE_BULK)) {
3337 /* multiple sizes */
3338 ptr->fixed[2] = 1024;
3339 ptr->fixed[3] = 1536;
3340 }
3341 break;
3342 }
3343}
3344
3345void *
3346usbd_xfer_softc(struct usb_xfer *xfer)
3347{
3348 return (xfer->priv_sc);
3349}
3350
3351void *
3352usbd_xfer_get_priv(struct usb_xfer *xfer)
3353{
3354 return (xfer->priv_fifo);
3355}
3356
3357void
3358usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3359{
3360 xfer->priv_fifo = ptr;
3361}
3362
3363uint8_t
3364usbd_xfer_state(struct usb_xfer *xfer)
3365{
3366 return (xfer->usb_state);
3367}
3368
3369void
3370usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3371{
3372 switch (flag) {
3373 case USB_FORCE_SHORT_XFER:
3374 xfer->flags.force_short_xfer = 1;
3375 break;
3376 case USB_SHORT_XFER_OK:
3377 xfer->flags.short_xfer_ok = 1;
3378 break;
3379 case USB_MULTI_SHORT_OK:
3380 xfer->flags.short_frames_ok = 1;
3381 break;
3382 case USB_MANUAL_STATUS:
3383 xfer->flags.manual_status = 1;
3384 break;
3385 }
3386}
3387
3388void
3389usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3390{
3391 switch (flag) {
3392 case USB_FORCE_SHORT_XFER:
3393 xfer->flags.force_short_xfer = 0;
3394 break;
3395 case USB_SHORT_XFER_OK:
3396 xfer->flags.short_xfer_ok = 0;
3397 break;
3398 case USB_MULTI_SHORT_OK:
3399 xfer->flags.short_frames_ok = 0;
3400 break;
3401 case USB_MANUAL_STATUS:
3402 xfer->flags.manual_status = 0;
3403 break;
3404 }
3405}
3406
3407/*
3408 * The following function returns in milliseconds when the isochronous
3409 * transfer was completed by the hardware. The returned value wraps
3410 * around 65536 milliseconds.
3411 */
3412uint16_t
3413usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3414{
3415 return (xfer->isoc_time_complete);
3416}
164 uint32_t temp;
165
166 mtod = udev->bus->methods;
167 temp = 0;
168
169 if (mtod->get_dma_delay) {
170 (mtod->get_dma_delay) (udev, &temp);
171 /*
172 * Round up and convert to milliseconds. Note that we use
173 * 1024 milliseconds per second. to save a division.
174 */
175 temp += 0x3FF;
176 temp /= 0x400;
177 }
178 return (temp);
179}
180
181/*------------------------------------------------------------------------*
182 * usbd_transfer_setup_sub_malloc
183 *
184 * This function will allocate one or more DMA'able memory chunks
185 * according to "size", "align" and "count" arguments. "ppc" is
186 * pointed to a linear array of USB page caches afterwards.
187 *
188 * If the "align" argument is equal to "1" a non-contiguous allocation
189 * can happen. Else if the "align" argument is greater than "1", the
190 * allocation will always be contiguous in memory.
191 *
192 * Returns:
193 * 0: Success
194 * Else: Failure
195 *------------------------------------------------------------------------*/
196#if USB_HAVE_BUSDMA
197uint8_t
198usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
199 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
200 usb_size_t count)
201{
202 struct usb_page_cache *pc;
203 struct usb_page *pg;
204 void *buf;
205 usb_size_t n_dma_pc;
206 usb_size_t n_dma_pg;
207 usb_size_t n_obj;
208 usb_size_t x;
209 usb_size_t y;
210 usb_size_t r;
211 usb_size_t z;
212
213 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
214 align));
215 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
216
217 if (count == 0) {
218 return (0); /* nothing to allocate */
219 }
220 /*
221 * Make sure that the size is aligned properly.
222 */
223 size = -((-size) & (-align));
224
225 /*
226 * Try multi-allocation chunks to reduce the number of DMA
227 * allocations, hence DMA allocations are slow.
228 */
229 if (align == 1) {
230 /* special case - non-cached multi page DMA memory */
231 n_dma_pc = count;
232 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
233 n_obj = 1;
234 } else if (size >= USB_PAGE_SIZE) {
235 n_dma_pc = count;
236 n_dma_pg = 1;
237 n_obj = 1;
238 } else {
239 /* compute number of objects per page */
240 n_obj = (USB_PAGE_SIZE / size);
241 /*
242 * Compute number of DMA chunks, rounded up
243 * to nearest one:
244 */
245 n_dma_pc = ((count + n_obj - 1) / n_obj);
246 n_dma_pg = 1;
247 }
248
249 /*
250 * DMA memory is allocated once, but mapped twice. That's why
251 * there is one list for auto-free and another list for
252 * non-auto-free which only holds the mapping and not the
253 * allocation.
254 */
255 if (parm->buf == NULL) {
256 /* reserve memory (auto-free) */
257 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
258 parm->dma_page_cache_ptr += n_dma_pc;
259
260 /* reserve memory (no-auto-free) */
261 parm->dma_page_ptr += count * n_dma_pg;
262 parm->xfer_page_cache_ptr += count;
263 return (0);
264 }
265 for (x = 0; x != n_dma_pc; x++) {
266 /* need to initialize the page cache */
267 parm->dma_page_cache_ptr[x].tag_parent =
268 &parm->curr_xfer->xroot->dma_parent_tag;
269 }
270 for (x = 0; x != count; x++) {
271 /* need to initialize the page cache */
272 parm->xfer_page_cache_ptr[x].tag_parent =
273 &parm->curr_xfer->xroot->dma_parent_tag;
274 }
275
276 if (ppc) {
277 *ppc = parm->xfer_page_cache_ptr;
278 }
279 r = count; /* set remainder count */
280 z = n_obj * size; /* set allocation size */
281 pc = parm->xfer_page_cache_ptr;
282 pg = parm->dma_page_ptr;
283
284 for (x = 0; x != n_dma_pc; x++) {
285
286 if (r < n_obj) {
287 /* compute last remainder */
288 z = r * size;
289 n_obj = r;
290 }
291 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
292 pg, z, align)) {
293 return (1); /* failure */
294 }
295 /* Set beginning of current buffer */
296 buf = parm->dma_page_cache_ptr->buffer;
297 /* Make room for one DMA page cache and one page */
298 parm->dma_page_cache_ptr++;
299 pg += n_dma_pg;
300
301 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
302
303 /* Load sub-chunk into DMA */
304 if (usb_pc_dmamap_create(pc, size)) {
305 return (1); /* failure */
306 }
307 pc->buffer = USB_ADD_BYTES(buf, y * size);
308 pc->page_start = pg;
309
310 mtx_lock(pc->tag_parent->mtx);
311 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
312 mtx_unlock(pc->tag_parent->mtx);
313 return (1); /* failure */
314 }
315 mtx_unlock(pc->tag_parent->mtx);
316 }
317 }
318
319 parm->xfer_page_cache_ptr = pc;
320 parm->dma_page_ptr = pg;
321 return (0);
322}
323#endif
324
325/*------------------------------------------------------------------------*
326 * usbd_transfer_setup_sub - transfer setup subroutine
327 *
328 * This function must be called from the "xfer_setup" callback of the
329 * USB Host or Device controller driver when setting up an USB
330 * transfer. This function will setup correct packet sizes, buffer
331 * sizes, flags and more, that are stored in the "usb_xfer"
332 * structure.
333 *------------------------------------------------------------------------*/
334void
335usbd_transfer_setup_sub(struct usb_setup_params *parm)
336{
337 enum {
338 REQ_SIZE = 8,
339 MIN_PKT = 8,
340 };
341 struct usb_xfer *xfer = parm->curr_xfer;
342 const struct usb_config *setup = parm->curr_setup;
343 struct usb_endpoint_ss_comp_descriptor *ecomp;
344 struct usb_endpoint_descriptor *edesc;
345 struct usb_std_packet_size std_size;
346 usb_frcount_t n_frlengths;
347 usb_frcount_t n_frbuffers;
348 usb_frcount_t x;
349 uint8_t type;
350 uint8_t zmps;
351
352 /*
353 * Sanity check. The following parameters must be initialized before
354 * calling this function.
355 */
356 if ((parm->hc_max_packet_size == 0) ||
357 (parm->hc_max_packet_count == 0) ||
358 (parm->hc_max_frame_size == 0)) {
359 parm->err = USB_ERR_INVAL;
360 goto done;
361 }
362 edesc = xfer->endpoint->edesc;
363 ecomp = xfer->endpoint->ecomp;
364
365 type = (edesc->bmAttributes & UE_XFERTYPE);
366
367 xfer->flags = setup->flags;
368 xfer->nframes = setup->frames;
369 xfer->timeout = setup->timeout;
370 xfer->callback = setup->callback;
371 xfer->interval = setup->interval;
372 xfer->endpointno = edesc->bEndpointAddress;
373 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
374 xfer->max_packet_count = 1;
375 /* make a shadow copy: */
376 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
377
378 parm->bufsize = setup->bufsize;
379
380 switch (parm->speed) {
381 case USB_SPEED_HIGH:
382 switch (type) {
383 case UE_ISOCHRONOUS:
384 case UE_INTERRUPT:
385 xfer->max_packet_count +=
386 (xfer->max_packet_size >> 11) & 3;
387
388 /* check for invalid max packet count */
389 if (xfer->max_packet_count > 3)
390 xfer->max_packet_count = 3;
391 break;
392 default:
393 break;
394 }
395 xfer->max_packet_size &= 0x7FF;
396 break;
397 case USB_SPEED_SUPER:
398 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
399
400 if (ecomp != NULL)
401 xfer->max_packet_count += ecomp->bMaxBurst;
402
403 if ((xfer->max_packet_count == 0) ||
404 (xfer->max_packet_count > 16))
405 xfer->max_packet_count = 16;
406
407 switch (type) {
408 case UE_CONTROL:
409 xfer->max_packet_count = 1;
410 break;
411 case UE_ISOCHRONOUS:
412 if (ecomp != NULL) {
413 uint8_t mult;
414
415 mult = UE_GET_SS_ISO_MULT(
416 ecomp->bmAttributes) + 1;
417 if (mult > 3)
418 mult = 3;
419
420 xfer->max_packet_count *= mult;
421 }
422 break;
423 default:
424 break;
425 }
426 xfer->max_packet_size &= 0x7FF;
427 break;
428 default:
429 break;
430 }
431 /* range check "max_packet_count" */
432
433 if (xfer->max_packet_count > parm->hc_max_packet_count) {
434 xfer->max_packet_count = parm->hc_max_packet_count;
435 }
436 /* filter "wMaxPacketSize" according to HC capabilities */
437
438 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
439 (xfer->max_packet_size == 0)) {
440 xfer->max_packet_size = parm->hc_max_packet_size;
441 }
442 /* filter "wMaxPacketSize" according to standard sizes */
443
444 usbd_get_std_packet_size(&std_size, type, parm->speed);
445
446 if (std_size.range.min || std_size.range.max) {
447
448 if (xfer->max_packet_size < std_size.range.min) {
449 xfer->max_packet_size = std_size.range.min;
450 }
451 if (xfer->max_packet_size > std_size.range.max) {
452 xfer->max_packet_size = std_size.range.max;
453 }
454 } else {
455
456 if (xfer->max_packet_size >= std_size.fixed[3]) {
457 xfer->max_packet_size = std_size.fixed[3];
458 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
459 xfer->max_packet_size = std_size.fixed[2];
460 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
461 xfer->max_packet_size = std_size.fixed[1];
462 } else {
463 /* only one possibility left */
464 xfer->max_packet_size = std_size.fixed[0];
465 }
466 }
467
468 /* compute "max_frame_size" */
469
470 usbd_update_max_frame_size(xfer);
471
472 /* check interrupt interval and transfer pre-delay */
473
474 if (type == UE_ISOCHRONOUS) {
475
476 uint16_t frame_limit;
477
478 xfer->interval = 0; /* not used, must be zero */
479 xfer->flags_int.isochronous_xfr = 1; /* set flag */
480
481 if (xfer->timeout == 0) {
482 /*
483 * set a default timeout in
484 * case something goes wrong!
485 */
486 xfer->timeout = 1000 / 4;
487 }
488 switch (parm->speed) {
489 case USB_SPEED_LOW:
490 case USB_SPEED_FULL:
491 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
492 xfer->fps_shift = 0;
493 break;
494 default:
495 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
496 xfer->fps_shift = edesc->bInterval;
497 if (xfer->fps_shift > 0)
498 xfer->fps_shift--;
499 if (xfer->fps_shift > 3)
500 xfer->fps_shift = 3;
501 if (xfer->flags.pre_scale_frames != 0)
502 xfer->nframes <<= (3 - xfer->fps_shift);
503 break;
504 }
505
506 if (xfer->nframes > frame_limit) {
507 /*
508 * this is not going to work
509 * cross hardware
510 */
511 parm->err = USB_ERR_INVAL;
512 goto done;
513 }
514 if (xfer->nframes == 0) {
515 /*
516 * this is not a valid value
517 */
518 parm->err = USB_ERR_ZERO_NFRAMES;
519 goto done;
520 }
521 } else {
522
523 /*
524 * If a value is specified use that else check the
525 * endpoint descriptor!
526 */
527 if (type == UE_INTERRUPT) {
528
529 uint32_t temp;
530
531 if (xfer->interval == 0) {
532
533 xfer->interval = edesc->bInterval;
534
535 switch (parm->speed) {
536 case USB_SPEED_LOW:
537 case USB_SPEED_FULL:
538 break;
539 default:
540 /* 125us -> 1ms */
541 if (xfer->interval < 4)
542 xfer->interval = 1;
543 else if (xfer->interval > 16)
544 xfer->interval = (1 << (16 - 4));
545 else
546 xfer->interval =
547 (1 << (xfer->interval - 4));
548 break;
549 }
550 }
551
552 if (xfer->interval == 0) {
553 /*
554 * One millisecond is the smallest
555 * interval we support:
556 */
557 xfer->interval = 1;
558 }
559
560 xfer->fps_shift = 0;
561 temp = 1;
562
563 while ((temp != 0) && (temp < xfer->interval)) {
564 xfer->fps_shift++;
565 temp *= 2;
566 }
567
568 switch (parm->speed) {
569 case USB_SPEED_LOW:
570 case USB_SPEED_FULL:
571 break;
572 default:
573 xfer->fps_shift += 3;
574 break;
575 }
576 }
577 }
578
579 /*
580 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
581 * to be equal to zero when setting up USB transfers, hence
582 * this leads to alot of extra code in the USB kernel.
583 */
584
585 if ((xfer->max_frame_size == 0) ||
586 (xfer->max_packet_size == 0)) {
587
588 zmps = 1;
589
590 if ((parm->bufsize <= MIN_PKT) &&
591 (type != UE_CONTROL) &&
592 (type != UE_BULK)) {
593
594 /* workaround */
595 xfer->max_packet_size = MIN_PKT;
596 xfer->max_packet_count = 1;
597 parm->bufsize = 0; /* automatic setup length */
598 usbd_update_max_frame_size(xfer);
599
600 } else {
601 parm->err = USB_ERR_ZERO_MAXP;
602 goto done;
603 }
604
605 } else {
606 zmps = 0;
607 }
608
609 /*
610 * check if we should setup a default
611 * length:
612 */
613
614 if (parm->bufsize == 0) {
615
616 parm->bufsize = xfer->max_frame_size;
617
618 if (type == UE_ISOCHRONOUS) {
619 parm->bufsize *= xfer->nframes;
620 }
621 }
622 /*
623 * check if we are about to setup a proxy
624 * type of buffer:
625 */
626
627 if (xfer->flags.proxy_buffer) {
628
629 /* round bufsize up */
630
631 parm->bufsize += (xfer->max_frame_size - 1);
632
633 if (parm->bufsize < xfer->max_frame_size) {
634 /* length wrapped around */
635 parm->err = USB_ERR_INVAL;
636 goto done;
637 }
638 /* subtract remainder */
639
640 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
641
642 /* add length of USB device request structure, if any */
643
644 if (type == UE_CONTROL) {
645 parm->bufsize += REQ_SIZE; /* SETUP message */
646 }
647 }
648 xfer->max_data_length = parm->bufsize;
649
650 /* Setup "n_frlengths" and "n_frbuffers" */
651
652 if (type == UE_ISOCHRONOUS) {
653 n_frlengths = xfer->nframes;
654 n_frbuffers = 1;
655 } else {
656
657 if (type == UE_CONTROL) {
658 xfer->flags_int.control_xfr = 1;
659 if (xfer->nframes == 0) {
660 if (parm->bufsize <= REQ_SIZE) {
661 /*
662 * there will never be any data
663 * stage
664 */
665 xfer->nframes = 1;
666 } else {
667 xfer->nframes = 2;
668 }
669 }
670 } else {
671 if (xfer->nframes == 0) {
672 xfer->nframes = 1;
673 }
674 }
675
676 n_frlengths = xfer->nframes;
677 n_frbuffers = xfer->nframes;
678 }
679
680 /*
681 * check if we have room for the
682 * USB device request structure:
683 */
684
685 if (type == UE_CONTROL) {
686
687 if (xfer->max_data_length < REQ_SIZE) {
688 /* length wrapped around or too small bufsize */
689 parm->err = USB_ERR_INVAL;
690 goto done;
691 }
692 xfer->max_data_length -= REQ_SIZE;
693 }
694 /*
695 * Setup "frlengths" and shadow "frlengths" for keeping the
696 * initial frame lengths when a USB transfer is complete. This
697 * information is useful when computing isochronous offsets.
698 */
699 xfer->frlengths = parm->xfer_length_ptr;
700 parm->xfer_length_ptr += 2 * n_frlengths;
701
702 /* setup "frbuffers" */
703 xfer->frbuffers = parm->xfer_page_cache_ptr;
704 parm->xfer_page_cache_ptr += n_frbuffers;
705
706 /* initialize max frame count */
707 xfer->max_frame_count = xfer->nframes;
708
709 /*
710 * check if we need to setup
711 * a local buffer:
712 */
713
714 if (!xfer->flags.ext_buffer) {
715#if USB_HAVE_BUSDMA
716 struct usb_page_search page_info;
717 struct usb_page_cache *pc;
718
719 if (usbd_transfer_setup_sub_malloc(parm,
720 &pc, parm->bufsize, 1, 1)) {
721 parm->err = USB_ERR_NOMEM;
722 } else if (parm->buf != NULL) {
723
724 usbd_get_page(pc, 0, &page_info);
725
726 xfer->local_buffer = page_info.buffer;
727
728 usbd_xfer_set_frame_offset(xfer, 0, 0);
729
730 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
731 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
732 }
733 }
734#else
735 /* align data */
736 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
737
738 if (parm->buf != NULL) {
739 xfer->local_buffer =
740 USB_ADD_BYTES(parm->buf, parm->size[0]);
741
742 usbd_xfer_set_frame_offset(xfer, 0, 0);
743
744 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
745 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
746 }
747 }
748 parm->size[0] += parm->bufsize;
749
750 /* align data again */
751 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
752#endif
753 }
754 /*
755 * Compute maximum buffer size
756 */
757
758 if (parm->bufsize_max < parm->bufsize) {
759 parm->bufsize_max = parm->bufsize;
760 }
761#if USB_HAVE_BUSDMA
762 if (xfer->flags_int.bdma_enable) {
763 /*
764 * Setup "dma_page_ptr".
765 *
766 * Proof for formula below:
767 *
768 * Assume there are three USB frames having length "a", "b" and
769 * "c". These USB frames will at maximum need "z"
770 * "usb_page" structures. "z" is given by:
771 *
772 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
773 * ((c / USB_PAGE_SIZE) + 2);
774 *
775 * Constraining "a", "b" and "c" like this:
776 *
777 * (a + b + c) <= parm->bufsize
778 *
779 * We know that:
780 *
781 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
782 *
783 * Here is the general formula:
784 */
785 xfer->dma_page_ptr = parm->dma_page_ptr;
786 parm->dma_page_ptr += (2 * n_frbuffers);
787 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
788 }
789#endif
790 if (zmps) {
791 /* correct maximum data length */
792 xfer->max_data_length = 0;
793 }
794 /* subtract USB frame remainder from "hc_max_frame_size" */
795
796 xfer->max_hc_frame_size =
797 (parm->hc_max_frame_size -
798 (parm->hc_max_frame_size % xfer->max_frame_size));
799
800 if (xfer->max_hc_frame_size == 0) {
801 parm->err = USB_ERR_INVAL;
802 goto done;
803 }
804
805 /* initialize frame buffers */
806
807 if (parm->buf) {
808 for (x = 0; x != n_frbuffers; x++) {
809 xfer->frbuffers[x].tag_parent =
810 &xfer->xroot->dma_parent_tag;
811#if USB_HAVE_BUSDMA
812 if (xfer->flags_int.bdma_enable &&
813 (parm->bufsize_max > 0)) {
814
815 if (usb_pc_dmamap_create(
816 xfer->frbuffers + x,
817 parm->bufsize_max)) {
818 parm->err = USB_ERR_NOMEM;
819 goto done;
820 }
821 }
822#endif
823 }
824 }
825done:
826 if (parm->err) {
827 /*
828 * Set some dummy values so that we avoid division by zero:
829 */
830 xfer->max_hc_frame_size = 1;
831 xfer->max_frame_size = 1;
832 xfer->max_packet_size = 1;
833 xfer->max_data_length = 0;
834 xfer->nframes = 0;
835 xfer->max_frame_count = 0;
836 }
837}
838
839/*------------------------------------------------------------------------*
840 * usbd_transfer_setup - setup an array of USB transfers
841 *
842 * NOTE: You must always call "usbd_transfer_unsetup" after calling
843 * "usbd_transfer_setup" if success was returned.
844 *
845 * The idea is that the USB device driver should pre-allocate all its
846 * transfers by one call to this function.
847 *
848 * Return values:
849 * 0: Success
850 * Else: Failure
851 *------------------------------------------------------------------------*/
852usb_error_t
853usbd_transfer_setup(struct usb_device *udev,
854 const uint8_t *ifaces, struct usb_xfer **ppxfer,
855 const struct usb_config *setup_start, uint16_t n_setup,
856 void *priv_sc, struct mtx *xfer_mtx)
857{
858 const struct usb_config *setup_end = setup_start + n_setup;
859 const struct usb_config *setup;
860 struct usb_setup_params *parm;
861 struct usb_endpoint *ep;
862 struct usb_xfer_root *info;
863 struct usb_xfer *xfer;
864 void *buf = NULL;
865 usb_error_t error = 0;
866 uint16_t n;
867 uint16_t refcount;
868 uint8_t do_unlock;
869
870 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
871 "usbd_transfer_setup can sleep!");
872
873 /* do some checking first */
874
875 if (n_setup == 0) {
876 DPRINTFN(6, "setup array has zero length!\n");
877 return (USB_ERR_INVAL);
878 }
879 if (ifaces == 0) {
880 DPRINTFN(6, "ifaces array is NULL!\n");
881 return (USB_ERR_INVAL);
882 }
883 if (xfer_mtx == NULL) {
884 DPRINTFN(6, "using global lock\n");
885 xfer_mtx = &Giant;
886 }
887
888 /* more sanity checks */
889
890 for (setup = setup_start, n = 0;
891 setup != setup_end; setup++, n++) {
892 if (setup->bufsize == (usb_frlength_t)-1) {
893 error = USB_ERR_BAD_BUFSIZE;
894 DPRINTF("invalid bufsize\n");
895 }
896 if (setup->callback == NULL) {
897 error = USB_ERR_NO_CALLBACK;
898 DPRINTF("no callback\n");
899 }
900 ppxfer[n] = NULL;
901 }
902
903 if (error)
904 return (error);
905
906 /* Protect scratch area */
907 do_unlock = usbd_enum_lock(udev);
908
909 refcount = 0;
910 info = NULL;
911
912 parm = &udev->scratch.xfer_setup[0].parm;
913 memset(parm, 0, sizeof(*parm));
914
915 parm->udev = udev;
916 parm->speed = usbd_get_speed(udev);
917 parm->hc_max_packet_count = 1;
918
919 if (parm->speed >= USB_SPEED_MAX) {
920 parm->err = USB_ERR_INVAL;
921 goto done;
922 }
923 /* setup all transfers */
924
925 while (1) {
926
927 if (buf) {
928 /*
929 * Initialize the "usb_xfer_root" structure,
930 * which is common for all our USB transfers.
931 */
932 info = USB_ADD_BYTES(buf, 0);
933
934 info->memory_base = buf;
935 info->memory_size = parm->size[0];
936
937#if USB_HAVE_BUSDMA
938 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
939 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
940#endif
941 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
942 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
943
944 cv_init(&info->cv_drain, "WDRAIN");
945
946 info->xfer_mtx = xfer_mtx;
947#if USB_HAVE_BUSDMA
948 usb_dma_tag_setup(&info->dma_parent_tag,
949 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
950 xfer_mtx, &usb_bdma_done_event, 32, parm->dma_tag_max);
951#endif
952
953 info->bus = udev->bus;
954 info->udev = udev;
955
956 TAILQ_INIT(&info->done_q.head);
957 info->done_q.command = &usbd_callback_wrapper;
958#if USB_HAVE_BUSDMA
959 TAILQ_INIT(&info->dma_q.head);
960 info->dma_q.command = &usb_bdma_work_loop;
961#endif
962 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
963 info->done_m[0].xroot = info;
964 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
965 info->done_m[1].xroot = info;
966
967 /*
968 * In device side mode control endpoint
969 * requests need to run from a separate
970 * context, else there is a chance of
971 * deadlock!
972 */
973 if (setup_start == usb_control_ep_cfg)
974 info->done_p =
975 USB_BUS_CONTROL_XFER_PROC(udev->bus);
976 else if (xfer_mtx == &Giant)
977 info->done_p =
978 USB_BUS_GIANT_PROC(udev->bus);
979 else
980 info->done_p =
981 USB_BUS_NON_GIANT_PROC(udev->bus);
982 }
983 /* reset sizes */
984
985 parm->size[0] = 0;
986 parm->buf = buf;
987 parm->size[0] += sizeof(info[0]);
988
989 for (setup = setup_start, n = 0;
990 setup != setup_end; setup++, n++) {
991
992 /* skip USB transfers without callbacks: */
993 if (setup->callback == NULL) {
994 continue;
995 }
996 /* see if there is a matching endpoint */
997 ep = usbd_get_endpoint(udev,
998 ifaces[setup->if_index], setup);
999
1000 /*
1001 * Check that the USB PIPE is valid and that
1002 * the endpoint mode is proper.
1003 *
1004 * Make sure we don't allocate a streams
1005 * transfer when such a combination is not
1006 * valid.
1007 */
1008 if ((ep == NULL) || (ep->methods == NULL) ||
1009 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1010 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1011 (setup->stream_id != 0 &&
1012 (setup->stream_id >= USB_MAX_EP_STREAMS ||
1013 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1014 if (setup->flags.no_pipe_ok)
1015 continue;
1016 if ((setup->usb_mode != USB_MODE_DUAL) &&
1017 (setup->usb_mode != udev->flags.usb_mode))
1018 continue;
1019 parm->err = USB_ERR_NO_PIPE;
1020 goto done;
1021 }
1022
1023 /* align data properly */
1024 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1025
1026 /* store current setup pointer */
1027 parm->curr_setup = setup;
1028
1029 if (buf) {
1030 /*
1031 * Common initialization of the
1032 * "usb_xfer" structure.
1033 */
1034 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1035 xfer->address = udev->address;
1036 xfer->priv_sc = priv_sc;
1037 xfer->xroot = info;
1038
1039 usb_callout_init_mtx(&xfer->timeout_handle,
1040 &udev->bus->bus_mtx, 0);
1041 } else {
1042 /*
1043 * Setup a dummy xfer, hence we are
1044 * writing to the "usb_xfer"
1045 * structure pointed to by "xfer"
1046 * before we have allocated any
1047 * memory:
1048 */
1049 xfer = &udev->scratch.xfer_setup[0].dummy;
1050 memset(xfer, 0, sizeof(*xfer));
1051 refcount++;
1052 }
1053
1054 /* set transfer endpoint pointer */
1055 xfer->endpoint = ep;
1056
1057 /* set transfer stream ID */
1058 xfer->stream_id = setup->stream_id;
1059
1060 parm->size[0] += sizeof(xfer[0]);
1061 parm->methods = xfer->endpoint->methods;
1062 parm->curr_xfer = xfer;
1063
1064 /*
1065 * Call the Host or Device controller transfer
1066 * setup routine:
1067 */
1068 (udev->bus->methods->xfer_setup) (parm);
1069
1070 /* check for error */
1071 if (parm->err)
1072 goto done;
1073
1074 if (buf) {
1075 /*
1076 * Increment the endpoint refcount. This
1077 * basically prevents setting a new
1078 * configuration and alternate setting
1079 * when USB transfers are in use on
1080 * the given interface. Search the USB
1081 * code for "endpoint->refcount_alloc" if you
1082 * want more information.
1083 */
1084 USB_BUS_LOCK(info->bus);
1085 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1086 parm->err = USB_ERR_INVAL;
1087
1088 xfer->endpoint->refcount_alloc++;
1089
1090 if (xfer->endpoint->refcount_alloc == 0)
1091 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1092 USB_BUS_UNLOCK(info->bus);
1093
1094 /*
1095 * Whenever we set ppxfer[] then we
1096 * also need to increment the
1097 * "setup_refcount":
1098 */
1099 info->setup_refcount++;
1100
1101 /*
1102 * Transfer is successfully setup and
1103 * can be used:
1104 */
1105 ppxfer[n] = xfer;
1106 }
1107
1108 /* check for error */
1109 if (parm->err)
1110 goto done;
1111 }
1112
1113 if (buf != NULL || parm->err != 0)
1114 goto done;
1115
1116 /* if no transfers, nothing to do */
1117 if (refcount == 0)
1118 goto done;
1119
1120 /* align data properly */
1121 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1122
1123 /* store offset temporarily */
1124 parm->size[1] = parm->size[0];
1125
1126 /*
1127 * The number of DMA tags required depends on
1128 * the number of endpoints. The current estimate
1129 * for maximum number of DMA tags per endpoint
1130 * is three:
1131 * 1) for loading memory
1132 * 2) for allocating memory
1133 * 3) for fixing memory [UHCI]
1134 */
1135 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1136
1137 /*
1138 * DMA tags for QH, TD, Data and more.
1139 */
1140 parm->dma_tag_max += 8;
1141
1142 parm->dma_tag_p += parm->dma_tag_max;
1143
1144 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1145 ((uint8_t *)0);
1146
1147 /* align data properly */
1148 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1149
1150 /* store offset temporarily */
1151 parm->size[3] = parm->size[0];
1152
1153 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1154 ((uint8_t *)0);
1155
1156 /* align data properly */
1157 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1158
1159 /* store offset temporarily */
1160 parm->size[4] = parm->size[0];
1161
1162 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1163 ((uint8_t *)0);
1164
1165 /* store end offset temporarily */
1166 parm->size[5] = parm->size[0];
1167
1168 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1169 ((uint8_t *)0);
1170
1171 /* store end offset temporarily */
1172
1173 parm->size[2] = parm->size[0];
1174
1175 /* align data properly */
1176 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1177
1178 parm->size[6] = parm->size[0];
1179
1180 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1181 ((uint8_t *)0);
1182
1183 /* align data properly */
1184 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1185
1186 /* allocate zeroed memory */
1187 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1188
1189 if (buf == NULL) {
1190 parm->err = USB_ERR_NOMEM;
1191 DPRINTFN(0, "cannot allocate memory block for "
1192 "configuration (%d bytes)\n",
1193 parm->size[0]);
1194 goto done;
1195 }
1196 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1197 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1198 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1199 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1200 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1201 }
1202
1203done:
1204 if (buf) {
1205 if (info->setup_refcount == 0) {
1206 /*
1207 * "usbd_transfer_unsetup_sub" will unlock
1208 * the bus mutex before returning !
1209 */
1210 USB_BUS_LOCK(info->bus);
1211
1212 /* something went wrong */
1213 usbd_transfer_unsetup_sub(info, 0);
1214 }
1215 }
1216
1217 /* check if any errors happened */
1218 if (parm->err)
1219 usbd_transfer_unsetup(ppxfer, n_setup);
1220
1221 error = parm->err;
1222
1223 if (do_unlock)
1224 usbd_enum_unlock(udev);
1225
1226 return (error);
1227}
1228
1229/*------------------------------------------------------------------------*
1230 * usbd_transfer_unsetup_sub - factored out code
1231 *------------------------------------------------------------------------*/
1232static void
1233usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1234{
1235#if USB_HAVE_BUSDMA
1236 struct usb_page_cache *pc;
1237#endif
1238
1239 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1240
1241 /* wait for any outstanding DMA operations */
1242
1243 if (needs_delay) {
1244 usb_timeout_t temp;
1245 temp = usbd_get_dma_delay(info->udev);
1246 if (temp != 0) {
1247 usb_pause_mtx(&info->bus->bus_mtx,
1248 USB_MS_TO_TICKS(temp));
1249 }
1250 }
1251
1252 /* make sure that our done messages are not queued anywhere */
1253 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1254
1255 USB_BUS_UNLOCK(info->bus);
1256
1257#if USB_HAVE_BUSDMA
1258 /* free DMA'able memory, if any */
1259 pc = info->dma_page_cache_start;
1260 while (pc != info->dma_page_cache_end) {
1261 usb_pc_free_mem(pc);
1262 pc++;
1263 }
1264
1265 /* free DMA maps in all "xfer->frbuffers" */
1266 pc = info->xfer_page_cache_start;
1267 while (pc != info->xfer_page_cache_end) {
1268 usb_pc_dmamap_destroy(pc);
1269 pc++;
1270 }
1271
1272 /* free all DMA tags */
1273 usb_dma_tag_unsetup(&info->dma_parent_tag);
1274#endif
1275
1276 cv_destroy(&info->cv_drain);
1277
1278 /*
1279 * free the "memory_base" last, hence the "info" structure is
1280 * contained within the "memory_base"!
1281 */
1282 free(info->memory_base, M_USB);
1283}
1284
1285/*------------------------------------------------------------------------*
1286 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1287 *
1288 * NOTE: All USB transfers in progress will get called back passing
1289 * the error code "USB_ERR_CANCELLED" before this function
1290 * returns.
1291 *------------------------------------------------------------------------*/
1292void
1293usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1294{
1295 struct usb_xfer *xfer;
1296 struct usb_xfer_root *info;
1297 uint8_t needs_delay = 0;
1298
1299 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1300 "usbd_transfer_unsetup can sleep!");
1301
1302 while (n_setup--) {
1303 xfer = pxfer[n_setup];
1304
1305 if (xfer == NULL)
1306 continue;
1307
1308 info = xfer->xroot;
1309
1310 USB_XFER_LOCK(xfer);
1311 USB_BUS_LOCK(info->bus);
1312
1313 /*
1314 * HINT: when you start/stop a transfer, it might be a
1315 * good idea to directly use the "pxfer[]" structure:
1316 *
1317 * usbd_transfer_start(sc->pxfer[0]);
1318 * usbd_transfer_stop(sc->pxfer[0]);
1319 *
1320 * That way, if your code has many parts that will not
1321 * stop running under the same lock, in other words
1322 * "xfer_mtx", the usbd_transfer_start and
1323 * usbd_transfer_stop functions will simply return
1324 * when they detect a NULL pointer argument.
1325 *
1326 * To avoid any races we clear the "pxfer[]" pointer
1327 * while holding the private mutex of the driver:
1328 */
1329 pxfer[n_setup] = NULL;
1330
1331 USB_BUS_UNLOCK(info->bus);
1332 USB_XFER_UNLOCK(xfer);
1333
1334 usbd_transfer_drain(xfer);
1335
1336#if USB_HAVE_BUSDMA
1337 if (xfer->flags_int.bdma_enable)
1338 needs_delay = 1;
1339#endif
1340 /*
1341 * NOTE: default endpoint does not have an
1342 * interface, even if endpoint->iface_index == 0
1343 */
1344 USB_BUS_LOCK(info->bus);
1345 xfer->endpoint->refcount_alloc--;
1346 USB_BUS_UNLOCK(info->bus);
1347
1348 usb_callout_drain(&xfer->timeout_handle);
1349
1350 USB_BUS_LOCK(info->bus);
1351
1352 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1353 "reference count\n"));
1354
1355 info->setup_refcount--;
1356
1357 if (info->setup_refcount == 0) {
1358 usbd_transfer_unsetup_sub(info,
1359 needs_delay);
1360 } else {
1361 USB_BUS_UNLOCK(info->bus);
1362 }
1363 }
1364}
1365
1366/*------------------------------------------------------------------------*
1367 * usbd_control_transfer_init - factored out code
1368 *
1369 * In USB Device Mode we have to wait for the SETUP packet which
1370 * containst the "struct usb_device_request" structure, before we can
1371 * transfer any data. In USB Host Mode we already have the SETUP
1372 * packet at the moment the USB transfer is started. This leads us to
1373 * having to setup the USB transfer at two different places in
1374 * time. This function just contains factored out control transfer
1375 * initialisation code, so that we don't duplicate the code.
1376 *------------------------------------------------------------------------*/
1377static void
1378usbd_control_transfer_init(struct usb_xfer *xfer)
1379{
1380 struct usb_device_request req;
1381
1382 /* copy out the USB request header */
1383
1384 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1385
1386 /* setup remainder */
1387
1388 xfer->flags_int.control_rem = UGETW(req.wLength);
1389
1390 /* copy direction to endpoint variable */
1391
1392 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1393 xfer->endpointno |=
1394 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1395}
1396
1397/*------------------------------------------------------------------------*
1398 * usbd_setup_ctrl_transfer
1399 *
1400 * This function handles initialisation of control transfers. Control
1401 * transfers are special in that regard that they can both transmit
1402 * and receive data.
1403 *
1404 * Return values:
1405 * 0: Success
1406 * Else: Failure
1407 *------------------------------------------------------------------------*/
1408static int
1409usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1410{
1411 usb_frlength_t len;
1412
1413 /* Check for control endpoint stall */
1414 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1415 /* the control transfer is no longer active */
1416 xfer->flags_int.control_stall = 1;
1417 xfer->flags_int.control_act = 0;
1418 } else {
1419 /* don't stall control transfer by default */
1420 xfer->flags_int.control_stall = 0;
1421 }
1422
1423 /* Check for invalid number of frames */
1424 if (xfer->nframes > 2) {
1425 /*
1426 * If you need to split a control transfer, you
1427 * have to do one part at a time. Only with
1428 * non-control transfers you can do multiple
1429 * parts a time.
1430 */
1431 DPRINTFN(0, "Too many frames: %u\n",
1432 (unsigned int)xfer->nframes);
1433 goto error;
1434 }
1435
1436 /*
1437 * Check if there is a control
1438 * transfer in progress:
1439 */
1440 if (xfer->flags_int.control_act) {
1441
1442 if (xfer->flags_int.control_hdr) {
1443
1444 /* clear send header flag */
1445
1446 xfer->flags_int.control_hdr = 0;
1447
1448 /* setup control transfer */
1449 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1450 usbd_control_transfer_init(xfer);
1451 }
1452 }
1453 /* get data length */
1454
1455 len = xfer->sumlen;
1456
1457 } else {
1458
1459 /* the size of the SETUP structure is hardcoded ! */
1460
1461 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1462 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1463 xfer->frlengths[0], sizeof(struct
1464 usb_device_request));
1465 goto error;
1466 }
1467 /* check USB mode */
1468 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1469
1470 /* check number of frames */
1471 if (xfer->nframes != 1) {
1472 /*
1473 * We need to receive the setup
1474 * message first so that we know the
1475 * data direction!
1476 */
1477 DPRINTF("Misconfigured transfer\n");
1478 goto error;
1479 }
1480 /*
1481 * Set a dummy "control_rem" value. This
1482 * variable will be overwritten later by a
1483 * call to "usbd_control_transfer_init()" !
1484 */
1485 xfer->flags_int.control_rem = 0xFFFF;
1486 } else {
1487
1488 /* setup "endpoint" and "control_rem" */
1489
1490 usbd_control_transfer_init(xfer);
1491 }
1492
1493 /* set transfer-header flag */
1494
1495 xfer->flags_int.control_hdr = 1;
1496
1497 /* get data length */
1498
1499 len = (xfer->sumlen - sizeof(struct usb_device_request));
1500 }
1501
1502 /* check if there is a length mismatch */
1503
1504 if (len > xfer->flags_int.control_rem) {
1505 DPRINTFN(0, "Length (%d) greater than "
1506 "remaining length (%d)\n", len,
1507 xfer->flags_int.control_rem);
1508 goto error;
1509 }
1510 /* check if we are doing a short transfer */
1511
1512 if (xfer->flags.force_short_xfer) {
1513 xfer->flags_int.control_rem = 0;
1514 } else {
1515 if ((len != xfer->max_data_length) &&
1516 (len != xfer->flags_int.control_rem) &&
1517 (xfer->nframes != 1)) {
1518 DPRINTFN(0, "Short control transfer without "
1519 "force_short_xfer set\n");
1520 goto error;
1521 }
1522 xfer->flags_int.control_rem -= len;
1523 }
1524
1525 /* the status part is executed when "control_act" is 0 */
1526
1527 if ((xfer->flags_int.control_rem > 0) ||
1528 (xfer->flags.manual_status)) {
1529 /* don't execute the STATUS stage yet */
1530 xfer->flags_int.control_act = 1;
1531
1532 /* sanity check */
1533 if ((!xfer->flags_int.control_hdr) &&
1534 (xfer->nframes == 1)) {
1535 /*
1536 * This is not a valid operation!
1537 */
1538 DPRINTFN(0, "Invalid parameter "
1539 "combination\n");
1540 goto error;
1541 }
1542 } else {
1543 /* time to execute the STATUS stage */
1544 xfer->flags_int.control_act = 0;
1545 }
1546 return (0); /* success */
1547
1548error:
1549 return (1); /* failure */
1550}
1551
1552/*------------------------------------------------------------------------*
1553 * usbd_transfer_submit - start USB hardware for the given transfer
1554 *
1555 * This function should only be called from the USB callback.
1556 *------------------------------------------------------------------------*/
1557void
1558usbd_transfer_submit(struct usb_xfer *xfer)
1559{
1560 struct usb_xfer_root *info;
1561 struct usb_bus *bus;
1562 usb_frcount_t x;
1563
1564 info = xfer->xroot;
1565 bus = info->bus;
1566
1567 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1568 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1569 "read" : "write");
1570
1571#ifdef USB_DEBUG
1572 if (USB_DEBUG_VAR > 0) {
1573 USB_BUS_LOCK(bus);
1574
1575 usb_dump_endpoint(xfer->endpoint);
1576
1577 USB_BUS_UNLOCK(bus);
1578 }
1579#endif
1580
1581 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1582 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1583
1584 /* Only open the USB transfer once! */
1585 if (!xfer->flags_int.open) {
1586 xfer->flags_int.open = 1;
1587
1588 DPRINTF("open\n");
1589
1590 USB_BUS_LOCK(bus);
1591 (xfer->endpoint->methods->open) (xfer);
1592 USB_BUS_UNLOCK(bus);
1593 }
1594 /* set "transferring" flag */
1595 xfer->flags_int.transferring = 1;
1596
1597#if USB_HAVE_POWERD
1598 /* increment power reference */
1599 usbd_transfer_power_ref(xfer, 1);
1600#endif
1601 /*
1602 * Check if the transfer is waiting on a queue, most
1603 * frequently the "done_q":
1604 */
1605 if (xfer->wait_queue) {
1606 USB_BUS_LOCK(bus);
1607 usbd_transfer_dequeue(xfer);
1608 USB_BUS_UNLOCK(bus);
1609 }
1610 /* clear "did_dma_delay" flag */
1611 xfer->flags_int.did_dma_delay = 0;
1612
1613 /* clear "did_close" flag */
1614 xfer->flags_int.did_close = 0;
1615
1616#if USB_HAVE_BUSDMA
1617 /* clear "bdma_setup" flag */
1618 xfer->flags_int.bdma_setup = 0;
1619#endif
1620 /* by default we cannot cancel any USB transfer immediately */
1621 xfer->flags_int.can_cancel_immed = 0;
1622
1623 /* clear lengths and frame counts by default */
1624 xfer->sumlen = 0;
1625 xfer->actlen = 0;
1626 xfer->aframes = 0;
1627
1628 /* clear any previous errors */
1629 xfer->error = 0;
1630
1631 /* Check if the device is still alive */
1632 if (info->udev->state < USB_STATE_POWERED) {
1633 USB_BUS_LOCK(bus);
1634 /*
1635 * Must return cancelled error code else
1636 * device drivers can hang.
1637 */
1638 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1639 USB_BUS_UNLOCK(bus);
1640 return;
1641 }
1642
1643 /* sanity check */
1644 if (xfer->nframes == 0) {
1645 if (xfer->flags.stall_pipe) {
1646 /*
1647 * Special case - want to stall without transferring
1648 * any data:
1649 */
1650 DPRINTF("xfer=%p nframes=0: stall "
1651 "or clear stall!\n", xfer);
1652 USB_BUS_LOCK(bus);
1653 xfer->flags_int.can_cancel_immed = 1;
1654 /* start the transfer */
1655 usb_command_wrapper(&xfer->endpoint->
1656 endpoint_q[xfer->stream_id], xfer);
1657 USB_BUS_UNLOCK(bus);
1658 return;
1659 }
1660 USB_BUS_LOCK(bus);
1661 usbd_transfer_done(xfer, USB_ERR_INVAL);
1662 USB_BUS_UNLOCK(bus);
1663 return;
1664 }
1665 /* compute some variables */
1666
1667 for (x = 0; x != xfer->nframes; x++) {
1668 /* make a copy of the frlenghts[] */
1669 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1670 /* compute total transfer length */
1671 xfer->sumlen += xfer->frlengths[x];
1672 if (xfer->sumlen < xfer->frlengths[x]) {
1673 /* length wrapped around */
1674 USB_BUS_LOCK(bus);
1675 usbd_transfer_done(xfer, USB_ERR_INVAL);
1676 USB_BUS_UNLOCK(bus);
1677 return;
1678 }
1679 }
1680
1681 /* clear some internal flags */
1682
1683 xfer->flags_int.short_xfer_ok = 0;
1684 xfer->flags_int.short_frames_ok = 0;
1685
1686 /* check if this is a control transfer */
1687
1688 if (xfer->flags_int.control_xfr) {
1689
1690 if (usbd_setup_ctrl_transfer(xfer)) {
1691 USB_BUS_LOCK(bus);
1692 usbd_transfer_done(xfer, USB_ERR_STALLED);
1693 USB_BUS_UNLOCK(bus);
1694 return;
1695 }
1696 }
1697 /*
1698 * Setup filtered version of some transfer flags,
1699 * in case of data read direction
1700 */
1701 if (USB_GET_DATA_ISREAD(xfer)) {
1702
1703 if (xfer->flags.short_frames_ok) {
1704 xfer->flags_int.short_xfer_ok = 1;
1705 xfer->flags_int.short_frames_ok = 1;
1706 } else if (xfer->flags.short_xfer_ok) {
1707 xfer->flags_int.short_xfer_ok = 1;
1708
1709 /* check for control transfer */
1710 if (xfer->flags_int.control_xfr) {
1711 /*
1712 * 1) Control transfers do not support
1713 * reception of multiple short USB
1714 * frames in host mode and device side
1715 * mode, with exception of:
1716 *
1717 * 2) Due to sometimes buggy device
1718 * side firmware we need to do a
1719 * STATUS stage in case of short
1720 * control transfers in USB host mode.
1721 * The STATUS stage then becomes the
1722 * "alt_next" to the DATA stage.
1723 */
1724 xfer->flags_int.short_frames_ok = 1;
1725 }
1726 }
1727 }
1728 /*
1729 * Check if BUS-DMA support is enabled and try to load virtual
1730 * buffers into DMA, if any:
1731 */
1732#if USB_HAVE_BUSDMA
1733 if (xfer->flags_int.bdma_enable) {
1734 /* insert the USB transfer last in the BUS-DMA queue */
1735 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1736 return;
1737 }
1738#endif
1739 /*
1740 * Enter the USB transfer into the Host Controller or
1741 * Device Controller schedule:
1742 */
1743 usbd_pipe_enter(xfer);
1744}
1745
1746/*------------------------------------------------------------------------*
1747 * usbd_pipe_enter - factored out code
1748 *------------------------------------------------------------------------*/
1749void
1750usbd_pipe_enter(struct usb_xfer *xfer)
1751{
1752 struct usb_endpoint *ep;
1753
1754 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1755
1756 USB_BUS_LOCK(xfer->xroot->bus);
1757
1758 ep = xfer->endpoint;
1759
1760 DPRINTF("enter\n");
1761
1762 /* the transfer can now be cancelled */
1763 xfer->flags_int.can_cancel_immed = 1;
1764
1765 /* enter the transfer */
1766 (ep->methods->enter) (xfer);
1767
1768 /* check for transfer error */
1769 if (xfer->error) {
1770 /* some error has happened */
1771 usbd_transfer_done(xfer, 0);
1772 USB_BUS_UNLOCK(xfer->xroot->bus);
1773 return;
1774 }
1775
1776 /* start the transfer */
1777 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1778 USB_BUS_UNLOCK(xfer->xroot->bus);
1779}
1780
1781/*------------------------------------------------------------------------*
1782 * usbd_transfer_start - start an USB transfer
1783 *
1784 * NOTE: Calling this function more than one time will only
1785 * result in a single transfer start, until the USB transfer
1786 * completes.
1787 *------------------------------------------------------------------------*/
1788void
1789usbd_transfer_start(struct usb_xfer *xfer)
1790{
1791 if (xfer == NULL) {
1792 /* transfer is gone */
1793 return;
1794 }
1795 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1796
1797 /* mark the USB transfer started */
1798
1799 if (!xfer->flags_int.started) {
1800 /* lock the BUS lock to avoid races updating flags_int */
1801 USB_BUS_LOCK(xfer->xroot->bus);
1802 xfer->flags_int.started = 1;
1803 USB_BUS_UNLOCK(xfer->xroot->bus);
1804 }
1805 /* check if the USB transfer callback is already transferring */
1806
1807 if (xfer->flags_int.transferring) {
1808 return;
1809 }
1810 USB_BUS_LOCK(xfer->xroot->bus);
1811 /* call the USB transfer callback */
1812 usbd_callback_ss_done_defer(xfer);
1813 USB_BUS_UNLOCK(xfer->xroot->bus);
1814}
1815
1816/*------------------------------------------------------------------------*
1817 * usbd_transfer_stop - stop an USB transfer
1818 *
1819 * NOTE: Calling this function more than one time will only
1820 * result in a single transfer stop.
1821 * NOTE: When this function returns it is not safe to free nor
1822 * reuse any DMA buffers. See "usbd_transfer_drain()".
1823 *------------------------------------------------------------------------*/
1824void
1825usbd_transfer_stop(struct usb_xfer *xfer)
1826{
1827 struct usb_endpoint *ep;
1828
1829 if (xfer == NULL) {
1830 /* transfer is gone */
1831 return;
1832 }
1833 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1834
1835 /* check if the USB transfer was ever opened */
1836
1837 if (!xfer->flags_int.open) {
1838 if (xfer->flags_int.started) {
1839 /* nothing to do except clearing the "started" flag */
1840 /* lock the BUS lock to avoid races updating flags_int */
1841 USB_BUS_LOCK(xfer->xroot->bus);
1842 xfer->flags_int.started = 0;
1843 USB_BUS_UNLOCK(xfer->xroot->bus);
1844 }
1845 return;
1846 }
1847 /* try to stop the current USB transfer */
1848
1849 USB_BUS_LOCK(xfer->xroot->bus);
1850 /* override any previous error */
1851 xfer->error = USB_ERR_CANCELLED;
1852
1853 /*
1854 * Clear "open" and "started" when both private and USB lock
1855 * is locked so that we don't get a race updating "flags_int"
1856 */
1857 xfer->flags_int.open = 0;
1858 xfer->flags_int.started = 0;
1859
1860 /*
1861 * Check if we can cancel the USB transfer immediately.
1862 */
1863 if (xfer->flags_int.transferring) {
1864 if (xfer->flags_int.can_cancel_immed &&
1865 (!xfer->flags_int.did_close)) {
1866 DPRINTF("close\n");
1867 /*
1868 * The following will lead to an USB_ERR_CANCELLED
1869 * error code being passed to the USB callback.
1870 */
1871 (xfer->endpoint->methods->close) (xfer);
1872 /* only close once */
1873 xfer->flags_int.did_close = 1;
1874 } else {
1875 /* need to wait for the next done callback */
1876 }
1877 } else {
1878 DPRINTF("close\n");
1879
1880 /* close here and now */
1881 (xfer->endpoint->methods->close) (xfer);
1882
1883 /*
1884 * Any additional DMA delay is done by
1885 * "usbd_transfer_unsetup()".
1886 */
1887
1888 /*
1889 * Special case. Check if we need to restart a blocked
1890 * endpoint.
1891 */
1892 ep = xfer->endpoint;
1893
1894 /*
1895 * If the current USB transfer is completing we need
1896 * to start the next one:
1897 */
1898 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1899 usb_command_wrapper(
1900 &ep->endpoint_q[xfer->stream_id], NULL);
1901 }
1902 }
1903
1904 USB_BUS_UNLOCK(xfer->xroot->bus);
1905}
1906
1907/*------------------------------------------------------------------------*
1908 * usbd_transfer_pending
1909 *
1910 * This function will check if an USB transfer is pending which is a
1911 * little bit complicated!
1912 * Return values:
1913 * 0: Not pending
1914 * 1: Pending: The USB transfer will receive a callback in the future.
1915 *------------------------------------------------------------------------*/
1916uint8_t
1917usbd_transfer_pending(struct usb_xfer *xfer)
1918{
1919 struct usb_xfer_root *info;
1920 struct usb_xfer_queue *pq;
1921
1922 if (xfer == NULL) {
1923 /* transfer is gone */
1924 return (0);
1925 }
1926 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1927
1928 if (xfer->flags_int.transferring) {
1929 /* trivial case */
1930 return (1);
1931 }
1932 USB_BUS_LOCK(xfer->xroot->bus);
1933 if (xfer->wait_queue) {
1934 /* we are waiting on a queue somewhere */
1935 USB_BUS_UNLOCK(xfer->xroot->bus);
1936 return (1);
1937 }
1938 info = xfer->xroot;
1939 pq = &info->done_q;
1940
1941 if (pq->curr == xfer) {
1942 /* we are currently scheduled for callback */
1943 USB_BUS_UNLOCK(xfer->xroot->bus);
1944 return (1);
1945 }
1946 /* we are not pending */
1947 USB_BUS_UNLOCK(xfer->xroot->bus);
1948 return (0);
1949}
1950
1951/*------------------------------------------------------------------------*
1952 * usbd_transfer_drain
1953 *
1954 * This function will stop the USB transfer and wait for any
1955 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1956 * are loaded into DMA can safely be freed or reused after that this
1957 * function has returned.
1958 *------------------------------------------------------------------------*/
1959void
1960usbd_transfer_drain(struct usb_xfer *xfer)
1961{
1962 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1963 "usbd_transfer_drain can sleep!");
1964
1965 if (xfer == NULL) {
1966 /* transfer is gone */
1967 return;
1968 }
1969 if (xfer->xroot->xfer_mtx != &Giant) {
1970 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1971 }
1972 USB_XFER_LOCK(xfer);
1973
1974 usbd_transfer_stop(xfer);
1975
1976 while (usbd_transfer_pending(xfer) ||
1977 xfer->flags_int.doing_callback) {
1978
1979 /*
1980 * It is allowed that the callback can drop its
1981 * transfer mutex. In that case checking only
1982 * "usbd_transfer_pending()" is not enough to tell if
1983 * the USB transfer is fully drained. We also need to
1984 * check the internal "doing_callback" flag.
1985 */
1986 xfer->flags_int.draining = 1;
1987
1988 /*
1989 * Wait until the current outstanding USB
1990 * transfer is complete !
1991 */
1992 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
1993 }
1994 USB_XFER_UNLOCK(xfer);
1995}
1996
1997struct usb_page_cache *
1998usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1999{
2000 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2001
2002 return (&xfer->frbuffers[frindex]);
2003}
2004
2005void *
2006usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2007{
2008 struct usb_page_search page_info;
2009
2010 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2011
2012 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2013 return (page_info.buffer);
2014}
2015
2016/*------------------------------------------------------------------------*
2017 * usbd_xfer_get_fps_shift
2018 *
2019 * The following function is only useful for isochronous transfers. It
2020 * returns how many times the frame execution rate has been shifted
2021 * down.
2022 *
2023 * Return value:
2024 * Success: 0..3
2025 * Failure: 0
2026 *------------------------------------------------------------------------*/
2027uint8_t
2028usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2029{
2030 return (xfer->fps_shift);
2031}
2032
2033usb_frlength_t
2034usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2035{
2036 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2037
2038 return (xfer->frlengths[frindex]);
2039}
2040
2041/*------------------------------------------------------------------------*
2042 * usbd_xfer_set_frame_data
2043 *
2044 * This function sets the pointer of the buffer that should
2045 * loaded directly into DMA for the given USB frame. Passing "ptr"
2046 * equal to NULL while the corresponding "frlength" is greater
2047 * than zero gives undefined results!
2048 *------------------------------------------------------------------------*/
2049void
2050usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2051 void *ptr, usb_frlength_t len)
2052{
2053 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2054
2055 /* set virtual address to load and length */
2056 xfer->frbuffers[frindex].buffer = ptr;
2057 usbd_xfer_set_frame_len(xfer, frindex, len);
2058}
2059
2060void
2061usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2062 void **ptr, int *len)
2063{
2064 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2065
2066 if (ptr != NULL)
2067 *ptr = xfer->frbuffers[frindex].buffer;
2068 if (len != NULL)
2069 *len = xfer->frlengths[frindex];
2070}
2071
2072/*------------------------------------------------------------------------*
2073 * usbd_xfer_old_frame_length
2074 *
2075 * This function returns the framelength of the given frame at the
2076 * time the transfer was submitted. This function can be used to
2077 * compute the starting data pointer of the next isochronous frame
2078 * when an isochronous transfer has completed.
2079 *------------------------------------------------------------------------*/
2080usb_frlength_t
2081usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2082{
2083 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2084
2085 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2086}
2087
2088void
2089usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2090 int *nframes)
2091{
2092 if (actlen != NULL)
2093 *actlen = xfer->actlen;
2094 if (sumlen != NULL)
2095 *sumlen = xfer->sumlen;
2096 if (aframes != NULL)
2097 *aframes = xfer->aframes;
2098 if (nframes != NULL)
2099 *nframes = xfer->nframes;
2100}
2101
2102/*------------------------------------------------------------------------*
2103 * usbd_xfer_set_frame_offset
2104 *
2105 * This function sets the frame data buffer offset relative to the beginning
2106 * of the USB DMA buffer allocated for this USB transfer.
2107 *------------------------------------------------------------------------*/
2108void
2109usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2110 usb_frcount_t frindex)
2111{
2112 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2113 "when the USB buffer is external\n"));
2114 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2115
2116 /* set virtual address to load */
2117 xfer->frbuffers[frindex].buffer =
2118 USB_ADD_BYTES(xfer->local_buffer, offset);
2119}
2120
2121void
2122usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2123{
2124 xfer->interval = i;
2125}
2126
2127void
2128usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2129{
2130 xfer->timeout = t;
2131}
2132
2133void
2134usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2135{
2136 xfer->nframes = n;
2137}
2138
2139usb_frcount_t
2140usbd_xfer_max_frames(struct usb_xfer *xfer)
2141{
2142 return (xfer->max_frame_count);
2143}
2144
2145usb_frlength_t
2146usbd_xfer_max_len(struct usb_xfer *xfer)
2147{
2148 return (xfer->max_data_length);
2149}
2150
2151usb_frlength_t
2152usbd_xfer_max_framelen(struct usb_xfer *xfer)
2153{
2154 return (xfer->max_frame_size);
2155}
2156
2157void
2158usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2159 usb_frlength_t len)
2160{
2161 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2162
2163 xfer->frlengths[frindex] = len;
2164}
2165
2166/*------------------------------------------------------------------------*
2167 * usb_callback_proc - factored out code
2168 *
2169 * This function performs USB callbacks.
2170 *------------------------------------------------------------------------*/
2171static void
2172usb_callback_proc(struct usb_proc_msg *_pm)
2173{
2174 struct usb_done_msg *pm = (void *)_pm;
2175 struct usb_xfer_root *info = pm->xroot;
2176
2177 /* Change locking order */
2178 USB_BUS_UNLOCK(info->bus);
2179
2180 /*
2181 * We exploit the fact that the mutex is the same for all
2182 * callbacks that will be called from this thread:
2183 */
2184 mtx_lock(info->xfer_mtx);
2185 USB_BUS_LOCK(info->bus);
2186
2187 /* Continue where we lost track */
2188 usb_command_wrapper(&info->done_q,
2189 info->done_q.curr);
2190
2191 mtx_unlock(info->xfer_mtx);
2192}
2193
2194/*------------------------------------------------------------------------*
2195 * usbd_callback_ss_done_defer
2196 *
2197 * This function will defer the start, stop and done callback to the
2198 * correct thread.
2199 *------------------------------------------------------------------------*/
2200static void
2201usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2202{
2203 struct usb_xfer_root *info = xfer->xroot;
2204 struct usb_xfer_queue *pq = &info->done_q;
2205
2206 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2207
2208 if (pq->curr != xfer) {
2209 usbd_transfer_enqueue(pq, xfer);
2210 }
2211 if (!pq->recurse_1) {
2212
2213 /*
2214 * We have to postpone the callback due to the fact we
2215 * will have a Lock Order Reversal, LOR, if we try to
2216 * proceed !
2217 */
2218 if (usb_proc_msignal(info->done_p,
2219 &info->done_m[0], &info->done_m[1])) {
2220 /* ignore */
2221 }
2222 } else {
2223 /* clear second recurse flag */
2224 pq->recurse_2 = 0;
2225 }
2226 return;
2227
2228}
2229
2230/*------------------------------------------------------------------------*
2231 * usbd_callback_wrapper
2232 *
2233 * This is a wrapper for USB callbacks. This wrapper does some
2234 * auto-magic things like figuring out if we can call the callback
2235 * directly from the current context or if we need to wakeup the
2236 * interrupt process.
2237 *------------------------------------------------------------------------*/
2238static void
2239usbd_callback_wrapper(struct usb_xfer_queue *pq)
2240{
2241 struct usb_xfer *xfer = pq->curr;
2242 struct usb_xfer_root *info = xfer->xroot;
2243
2244 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2245 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2246 /*
2247 * Cases that end up here:
2248 *
2249 * 5) HW interrupt done callback or other source.
2250 */
2251 DPRINTFN(3, "case 5\n");
2252
2253 /*
2254 * We have to postpone the callback due to the fact we
2255 * will have a Lock Order Reversal, LOR, if we try to
2256 * proceed !
2257 */
2258 if (usb_proc_msignal(info->done_p,
2259 &info->done_m[0], &info->done_m[1])) {
2260 /* ignore */
2261 }
2262 return;
2263 }
2264 /*
2265 * Cases that end up here:
2266 *
2267 * 1) We are starting a transfer
2268 * 2) We are prematurely calling back a transfer
2269 * 3) We are stopping a transfer
2270 * 4) We are doing an ordinary callback
2271 */
2272 DPRINTFN(3, "case 1-4\n");
2273 /* get next USB transfer in the queue */
2274 info->done_q.curr = NULL;
2275
2276 /* set flag in case of drain */
2277 xfer->flags_int.doing_callback = 1;
2278
2279 USB_BUS_UNLOCK(info->bus);
2280 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2281
2282 /* set correct USB state for callback */
2283 if (!xfer->flags_int.transferring) {
2284 xfer->usb_state = USB_ST_SETUP;
2285 if (!xfer->flags_int.started) {
2286 /* we got stopped before we even got started */
2287 USB_BUS_LOCK(info->bus);
2288 goto done;
2289 }
2290 } else {
2291
2292 if (usbd_callback_wrapper_sub(xfer)) {
2293 /* the callback has been deferred */
2294 USB_BUS_LOCK(info->bus);
2295 goto done;
2296 }
2297#if USB_HAVE_POWERD
2298 /* decrement power reference */
2299 usbd_transfer_power_ref(xfer, -1);
2300#endif
2301 xfer->flags_int.transferring = 0;
2302
2303 if (xfer->error) {
2304 xfer->usb_state = USB_ST_ERROR;
2305 } else {
2306 /* set transferred state */
2307 xfer->usb_state = USB_ST_TRANSFERRED;
2308#if USB_HAVE_BUSDMA
2309 /* sync DMA memory, if any */
2310 if (xfer->flags_int.bdma_enable &&
2311 (!xfer->flags_int.bdma_no_post_sync)) {
2312 usb_bdma_post_sync(xfer);
2313 }
2314#endif
2315 }
2316 }
2317
2318#if USB_HAVE_PF
2319 if (xfer->usb_state != USB_ST_SETUP)
2320 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2321#endif
2322 /* call processing routine */
2323 (xfer->callback) (xfer, xfer->error);
2324
2325 /* pickup the USB mutex again */
2326 USB_BUS_LOCK(info->bus);
2327
2328 /*
2329 * Check if we got started after that we got cancelled, but
2330 * before we managed to do the callback.
2331 */
2332 if ((!xfer->flags_int.open) &&
2333 (xfer->flags_int.started) &&
2334 (xfer->usb_state == USB_ST_ERROR)) {
2335 /* clear flag in case of drain */
2336 xfer->flags_int.doing_callback = 0;
2337 /* try to loop, but not recursivly */
2338 usb_command_wrapper(&info->done_q, xfer);
2339 return;
2340 }
2341
2342done:
2343 /* clear flag in case of drain */
2344 xfer->flags_int.doing_callback = 0;
2345
2346 /*
2347 * Check if we are draining.
2348 */
2349 if (xfer->flags_int.draining &&
2350 (!xfer->flags_int.transferring)) {
2351 /* "usbd_transfer_drain()" is waiting for end of transfer */
2352 xfer->flags_int.draining = 0;
2353 cv_broadcast(&info->cv_drain);
2354 }
2355
2356 /* do the next callback, if any */
2357 usb_command_wrapper(&info->done_q,
2358 info->done_q.curr);
2359}
2360
2361/*------------------------------------------------------------------------*
2362 * usb_dma_delay_done_cb
2363 *
2364 * This function is called when the DMA delay has been exectuded, and
2365 * will make sure that the callback is called to complete the USB
2366 * transfer. This code path is ususally only used when there is an USB
2367 * error like USB_ERR_CANCELLED.
2368 *------------------------------------------------------------------------*/
2369void
2370usb_dma_delay_done_cb(struct usb_xfer *xfer)
2371{
2372 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2373
2374 DPRINTFN(3, "Completed %p\n", xfer);
2375
2376 /* queue callback for execution, again */
2377 usbd_transfer_done(xfer, 0);
2378}
2379
2380/*------------------------------------------------------------------------*
2381 * usbd_transfer_dequeue
2382 *
2383 * - This function is used to remove an USB transfer from a USB
2384 * transfer queue.
2385 *
2386 * - This function can be called multiple times in a row.
2387 *------------------------------------------------------------------------*/
2388void
2389usbd_transfer_dequeue(struct usb_xfer *xfer)
2390{
2391 struct usb_xfer_queue *pq;
2392
2393 pq = xfer->wait_queue;
2394 if (pq) {
2395 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2396 xfer->wait_queue = NULL;
2397 }
2398}
2399
2400/*------------------------------------------------------------------------*
2401 * usbd_transfer_enqueue
2402 *
2403 * - This function is used to insert an USB transfer into a USB *
2404 * transfer queue.
2405 *
2406 * - This function can be called multiple times in a row.
2407 *------------------------------------------------------------------------*/
2408void
2409usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2410{
2411 /*
2412 * Insert the USB transfer into the queue, if it is not
2413 * already on a USB transfer queue:
2414 */
2415 if (xfer->wait_queue == NULL) {
2416 xfer->wait_queue = pq;
2417 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2418 }
2419}
2420
2421/*------------------------------------------------------------------------*
2422 * usbd_transfer_done
2423 *
2424 * - This function is used to remove an USB transfer from the busdma,
2425 * pipe or interrupt queue.
2426 *
2427 * - This function is used to queue the USB transfer on the done
2428 * queue.
2429 *
2430 * - This function is used to stop any USB transfer timeouts.
2431 *------------------------------------------------------------------------*/
2432void
2433usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2434{
2435 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2436
2437 DPRINTF("err=%s\n", usbd_errstr(error));
2438
2439 /*
2440 * If we are not transferring then just return.
2441 * This can happen during transfer cancel.
2442 */
2443 if (!xfer->flags_int.transferring) {
2444 DPRINTF("not transferring\n");
2445 /* end of control transfer, if any */
2446 xfer->flags_int.control_act = 0;
2447 return;
2448 }
2449 /* only set transfer error if not already set */
2450 if (!xfer->error) {
2451 xfer->error = error;
2452 }
2453 /* stop any callouts */
2454 usb_callout_stop(&xfer->timeout_handle);
2455
2456 /*
2457 * If we are waiting on a queue, just remove the USB transfer
2458 * from the queue, if any. We should have the required locks
2459 * locked to do the remove when this function is called.
2460 */
2461 usbd_transfer_dequeue(xfer);
2462
2463#if USB_HAVE_BUSDMA
2464 if (mtx_owned(xfer->xroot->xfer_mtx)) {
2465 struct usb_xfer_queue *pq;
2466
2467 /*
2468 * If the private USB lock is not locked, then we assume
2469 * that the BUS-DMA load stage has been passed:
2470 */
2471 pq = &xfer->xroot->dma_q;
2472
2473 if (pq->curr == xfer) {
2474 /* start the next BUS-DMA load, if any */
2475 usb_command_wrapper(pq, NULL);
2476 }
2477 }
2478#endif
2479 /* keep some statistics */
2480 if (xfer->error) {
2481 xfer->xroot->bus->stats_err.uds_requests
2482 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2483 } else {
2484 xfer->xroot->bus->stats_ok.uds_requests
2485 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2486 }
2487
2488 /* call the USB transfer callback */
2489 usbd_callback_ss_done_defer(xfer);
2490}
2491
2492/*------------------------------------------------------------------------*
2493 * usbd_transfer_start_cb
2494 *
2495 * This function is called to start the USB transfer when
2496 * "xfer->interval" is greater than zero, and and the endpoint type is
2497 * BULK or CONTROL.
2498 *------------------------------------------------------------------------*/
2499static void
2500usbd_transfer_start_cb(void *arg)
2501{
2502 struct usb_xfer *xfer = arg;
2503 struct usb_endpoint *ep = xfer->endpoint;
2504
2505 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2506
2507 DPRINTF("start\n");
2508
2509#if USB_HAVE_PF
2510 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2511#endif
2512
2513 /* the transfer can now be cancelled */
2514 xfer->flags_int.can_cancel_immed = 1;
2515
2516 /* start USB transfer, if no error */
2517 if (xfer->error == 0)
2518 (ep->methods->start) (xfer);
2519
2520 /* check for transfer error */
2521 if (xfer->error) {
2522 /* some error has happened */
2523 usbd_transfer_done(xfer, 0);
2524 }
2525}
2526
2527/*------------------------------------------------------------------------*
2528 * usbd_xfer_set_stall
2529 *
2530 * This function is used to set the stall flag outside the
2531 * callback. This function is NULL safe.
2532 *------------------------------------------------------------------------*/
2533void
2534usbd_xfer_set_stall(struct usb_xfer *xfer)
2535{
2536 if (xfer == NULL) {
2537 /* tearing down */
2538 return;
2539 }
2540 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2541
2542 /* avoid any races by locking the USB mutex */
2543 USB_BUS_LOCK(xfer->xroot->bus);
2544 xfer->flags.stall_pipe = 1;
2545 USB_BUS_UNLOCK(xfer->xroot->bus);
2546}
2547
2548int
2549usbd_xfer_is_stalled(struct usb_xfer *xfer)
2550{
2551 return (xfer->endpoint->is_stalled);
2552}
2553
2554/*------------------------------------------------------------------------*
2555 * usbd_transfer_clear_stall
2556 *
2557 * This function is used to clear the stall flag outside the
2558 * callback. This function is NULL safe.
2559 *------------------------------------------------------------------------*/
2560void
2561usbd_transfer_clear_stall(struct usb_xfer *xfer)
2562{
2563 if (xfer == NULL) {
2564 /* tearing down */
2565 return;
2566 }
2567 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2568
2569 /* avoid any races by locking the USB mutex */
2570 USB_BUS_LOCK(xfer->xroot->bus);
2571
2572 xfer->flags.stall_pipe = 0;
2573
2574 USB_BUS_UNLOCK(xfer->xroot->bus);
2575}
2576
2577/*------------------------------------------------------------------------*
2578 * usbd_pipe_start
2579 *
2580 * This function is used to add an USB transfer to the pipe transfer list.
2581 *------------------------------------------------------------------------*/
2582void
2583usbd_pipe_start(struct usb_xfer_queue *pq)
2584{
2585 struct usb_endpoint *ep;
2586 struct usb_xfer *xfer;
2587 uint8_t type;
2588
2589 xfer = pq->curr;
2590 ep = xfer->endpoint;
2591
2592 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2593
2594 /*
2595 * If the endpoint is already stalled we do nothing !
2596 */
2597 if (ep->is_stalled) {
2598 return;
2599 }
2600 /*
2601 * Check if we are supposed to stall the endpoint:
2602 */
2603 if (xfer->flags.stall_pipe) {
2604 struct usb_device *udev;
2605 struct usb_xfer_root *info;
2606
2607 /* clear stall command */
2608 xfer->flags.stall_pipe = 0;
2609
2610 /* get pointer to USB device */
2611 info = xfer->xroot;
2612 udev = info->udev;
2613
2614 /*
2615 * Only stall BULK and INTERRUPT endpoints.
2616 */
2617 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2618 if ((type == UE_BULK) ||
2619 (type == UE_INTERRUPT)) {
2620 uint8_t did_stall;
2621
2622 did_stall = 1;
2623
2624 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2625 (udev->bus->methods->set_stall) (
2626 udev, ep, &did_stall);
2627 } else if (udev->ctrl_xfer[1]) {
2628 info = udev->ctrl_xfer[1]->xroot;
2629 usb_proc_msignal(
2630 USB_BUS_NON_GIANT_PROC(info->bus),
2631 &udev->cs_msg[0], &udev->cs_msg[1]);
2632 } else {
2633 /* should not happen */
2634 DPRINTFN(0, "No stall handler\n");
2635 }
2636 /*
2637 * Check if we should stall. Some USB hardware
2638 * handles set- and clear-stall in hardware.
2639 */
2640 if (did_stall) {
2641 /*
2642 * The transfer will be continued when
2643 * the clear-stall control endpoint
2644 * message is received.
2645 */
2646 ep->is_stalled = 1;
2647 return;
2648 }
2649 } else if (type == UE_ISOCHRONOUS) {
2650
2651 /*
2652 * Make sure any FIFO overflow or other FIFO
2653 * error conditions go away by resetting the
2654 * endpoint FIFO through the clear stall
2655 * method.
2656 */
2657 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2658 (udev->bus->methods->clear_stall) (udev, ep);
2659 }
2660 }
2661 }
2662 /* Set or clear stall complete - special case */
2663 if (xfer->nframes == 0) {
2664 /* we are complete */
2665 xfer->aframes = 0;
2666 usbd_transfer_done(xfer, 0);
2667 return;
2668 }
2669 /*
2670 * Handled cases:
2671 *
2672 * 1) Start the first transfer queued.
2673 *
2674 * 2) Re-start the current USB transfer.
2675 */
2676 /*
2677 * Check if there should be any
2678 * pre transfer start delay:
2679 */
2680 if (xfer->interval > 0) {
2681 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2682 if ((type == UE_BULK) ||
2683 (type == UE_CONTROL)) {
2684 usbd_transfer_timeout_ms(xfer,
2685 &usbd_transfer_start_cb,
2686 xfer->interval);
2687 return;
2688 }
2689 }
2690 DPRINTF("start\n");
2691
2692#if USB_HAVE_PF
2693 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2694#endif
2695 /* the transfer can now be cancelled */
2696 xfer->flags_int.can_cancel_immed = 1;
2697
2698 /* start USB transfer, if no error */
2699 if (xfer->error == 0)
2700 (ep->methods->start) (xfer);
2701
2702 /* check for transfer error */
2703 if (xfer->error) {
2704 /* some error has happened */
2705 usbd_transfer_done(xfer, 0);
2706 }
2707}
2708
2709/*------------------------------------------------------------------------*
2710 * usbd_transfer_timeout_ms
2711 *
2712 * This function is used to setup a timeout on the given USB
2713 * transfer. If the timeout has been deferred the callback given by
2714 * "cb" will get called after "ms" milliseconds.
2715 *------------------------------------------------------------------------*/
2716void
2717usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2718 void (*cb) (void *arg), usb_timeout_t ms)
2719{
2720 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2721
2722 /* defer delay */
2723 usb_callout_reset(&xfer->timeout_handle,
2724 USB_MS_TO_TICKS(ms), cb, xfer);
2725}
2726
2727/*------------------------------------------------------------------------*
2728 * usbd_callback_wrapper_sub
2729 *
2730 * - This function will update variables in an USB transfer after
2731 * that the USB transfer is complete.
2732 *
2733 * - This function is used to start the next USB transfer on the
2734 * ep transfer queue, if any.
2735 *
2736 * NOTE: In some special cases the USB transfer will not be removed from
2737 * the pipe queue, but remain first. To enforce USB transfer removal call
2738 * this function passing the error code "USB_ERR_CANCELLED".
2739 *
2740 * Return values:
2741 * 0: Success.
2742 * Else: The callback has been deferred.
2743 *------------------------------------------------------------------------*/
2744static uint8_t
2745usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2746{
2747 struct usb_endpoint *ep;
2748 struct usb_bus *bus;
2749 usb_frcount_t x;
2750
2751 bus = xfer->xroot->bus;
2752
2753 if ((!xfer->flags_int.open) &&
2754 (!xfer->flags_int.did_close)) {
2755 DPRINTF("close\n");
2756 USB_BUS_LOCK(bus);
2757 (xfer->endpoint->methods->close) (xfer);
2758 USB_BUS_UNLOCK(bus);
2759 /* only close once */
2760 xfer->flags_int.did_close = 1;
2761 return (1); /* wait for new callback */
2762 }
2763 /*
2764 * If we have a non-hardware induced error we
2765 * need to do the DMA delay!
2766 */
2767 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2768 (xfer->error == USB_ERR_CANCELLED ||
2769 xfer->error == USB_ERR_TIMEOUT ||
2770 bus->methods->start_dma_delay != NULL)) {
2771
2772 usb_timeout_t temp;
2773
2774 /* only delay once */
2775 xfer->flags_int.did_dma_delay = 1;
2776
2777 /* we can not cancel this delay */
2778 xfer->flags_int.can_cancel_immed = 0;
2779
2780 temp = usbd_get_dma_delay(xfer->xroot->udev);
2781
2782 DPRINTFN(3, "DMA delay, %u ms, "
2783 "on %p\n", temp, xfer);
2784
2785 if (temp != 0) {
2786 USB_BUS_LOCK(bus);
2787 /*
2788 * Some hardware solutions have dedicated
2789 * events when it is safe to free DMA'ed
2790 * memory. For the other hardware platforms we
2791 * use a static delay.
2792 */
2793 if (bus->methods->start_dma_delay != NULL) {
2794 (bus->methods->start_dma_delay) (xfer);
2795 } else {
2796 usbd_transfer_timeout_ms(xfer,
2797 (void (*)(void *))&usb_dma_delay_done_cb,
2798 temp);
2799 }
2800 USB_BUS_UNLOCK(bus);
2801 return (1); /* wait for new callback */
2802 }
2803 }
2804 /* check actual number of frames */
2805 if (xfer->aframes > xfer->nframes) {
2806 if (xfer->error == 0) {
2807 panic("%s: actual number of frames, %d, is "
2808 "greater than initial number of frames, %d\n",
2809 __FUNCTION__, xfer->aframes, xfer->nframes);
2810 } else {
2811 /* just set some valid value */
2812 xfer->aframes = xfer->nframes;
2813 }
2814 }
2815 /* compute actual length */
2816 xfer->actlen = 0;
2817
2818 for (x = 0; x != xfer->aframes; x++) {
2819 xfer->actlen += xfer->frlengths[x];
2820 }
2821
2822 /*
2823 * Frames that were not transferred get zero actual length in
2824 * case the USB device driver does not check the actual number
2825 * of frames transferred, "xfer->aframes":
2826 */
2827 for (; x < xfer->nframes; x++) {
2828 usbd_xfer_set_frame_len(xfer, x, 0);
2829 }
2830
2831 /* check actual length */
2832 if (xfer->actlen > xfer->sumlen) {
2833 if (xfer->error == 0) {
2834 panic("%s: actual length, %d, is greater than "
2835 "initial length, %d\n",
2836 __FUNCTION__, xfer->actlen, xfer->sumlen);
2837 } else {
2838 /* just set some valid value */
2839 xfer->actlen = xfer->sumlen;
2840 }
2841 }
2842 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2843 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2844 xfer->aframes, xfer->nframes);
2845
2846 if (xfer->error) {
2847 /* end of control transfer, if any */
2848 xfer->flags_int.control_act = 0;
2849
2850 /* check if we should block the execution queue */
2851 if ((xfer->error != USB_ERR_CANCELLED) &&
2852 (xfer->flags.pipe_bof)) {
2853 DPRINTFN(2, "xfer=%p: Block On Failure "
2854 "on endpoint=%p\n", xfer, xfer->endpoint);
2855 goto done;
2856 }
2857 } else {
2858 /* check for short transfers */
2859 if (xfer->actlen < xfer->sumlen) {
2860
2861 /* end of control transfer, if any */
2862 xfer->flags_int.control_act = 0;
2863
2864 if (!xfer->flags_int.short_xfer_ok) {
2865 xfer->error = USB_ERR_SHORT_XFER;
2866 if (xfer->flags.pipe_bof) {
2867 DPRINTFN(2, "xfer=%p: Block On Failure on "
2868 "Short Transfer on endpoint %p.\n",
2869 xfer, xfer->endpoint);
2870 goto done;
2871 }
2872 }
2873 } else {
2874 /*
2875 * Check if we are in the middle of a
2876 * control transfer:
2877 */
2878 if (xfer->flags_int.control_act) {
2879 DPRINTFN(5, "xfer=%p: Control transfer "
2880 "active on endpoint=%p\n", xfer, xfer->endpoint);
2881 goto done;
2882 }
2883 }
2884 }
2885
2886 ep = xfer->endpoint;
2887
2888 /*
2889 * If the current USB transfer is completing we need to start the
2890 * next one:
2891 */
2892 USB_BUS_LOCK(bus);
2893 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2894 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2895
2896 if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
2897 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
2898 /* there is another USB transfer waiting */
2899 } else {
2900 /* this is the last USB transfer */
2901 /* clear isochronous sync flag */
2902 xfer->endpoint->is_synced = 0;
2903 }
2904 }
2905 USB_BUS_UNLOCK(bus);
2906done:
2907 return (0);
2908}
2909
2910/*------------------------------------------------------------------------*
2911 * usb_command_wrapper
2912 *
2913 * This function is used to execute commands non-recursivly on an USB
2914 * transfer.
2915 *------------------------------------------------------------------------*/
2916void
2917usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2918{
2919 if (xfer) {
2920 /*
2921 * If the transfer is not already processing,
2922 * queue it!
2923 */
2924 if (pq->curr != xfer) {
2925 usbd_transfer_enqueue(pq, xfer);
2926 if (pq->curr != NULL) {
2927 /* something is already processing */
2928 DPRINTFN(6, "busy %p\n", pq->curr);
2929 return;
2930 }
2931 }
2932 } else {
2933 /* Get next element in queue */
2934 pq->curr = NULL;
2935 }
2936
2937 if (!pq->recurse_1) {
2938
2939 do {
2940
2941 /* set both recurse flags */
2942 pq->recurse_1 = 1;
2943 pq->recurse_2 = 1;
2944
2945 if (pq->curr == NULL) {
2946 xfer = TAILQ_FIRST(&pq->head);
2947 if (xfer) {
2948 TAILQ_REMOVE(&pq->head, xfer,
2949 wait_entry);
2950 xfer->wait_queue = NULL;
2951 pq->curr = xfer;
2952 } else {
2953 break;
2954 }
2955 }
2956 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2957 (pq->command) (pq);
2958 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2959
2960 } while (!pq->recurse_2);
2961
2962 /* clear first recurse flag */
2963 pq->recurse_1 = 0;
2964
2965 } else {
2966 /* clear second recurse flag */
2967 pq->recurse_2 = 0;
2968 }
2969}
2970
2971/*------------------------------------------------------------------------*
2972 * usbd_ctrl_transfer_setup
2973 *
2974 * This function is used to setup the default USB control endpoint
2975 * transfer.
2976 *------------------------------------------------------------------------*/
2977void
2978usbd_ctrl_transfer_setup(struct usb_device *udev)
2979{
2980 struct usb_xfer *xfer;
2981 uint8_t no_resetup;
2982 uint8_t iface_index;
2983
2984 /* check for root HUB */
2985 if (udev->parent_hub == NULL)
2986 return;
2987repeat:
2988
2989 xfer = udev->ctrl_xfer[0];
2990 if (xfer) {
2991 USB_XFER_LOCK(xfer);
2992 no_resetup =
2993 ((xfer->address == udev->address) &&
2994 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2995 udev->ddesc.bMaxPacketSize));
2996 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2997 if (no_resetup) {
2998 /*
2999 * NOTE: checking "xfer->address" and
3000 * starting the USB transfer must be
3001 * atomic!
3002 */
3003 usbd_transfer_start(xfer);
3004 }
3005 }
3006 USB_XFER_UNLOCK(xfer);
3007 } else {
3008 no_resetup = 0;
3009 }
3010
3011 if (no_resetup) {
3012 /*
3013 * All parameters are exactly the same like before.
3014 * Just return.
3015 */
3016 return;
3017 }
3018 /*
3019 * Update wMaxPacketSize for the default control endpoint:
3020 */
3021 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3022 udev->ddesc.bMaxPacketSize;
3023
3024 /*
3025 * Unsetup any existing USB transfer:
3026 */
3027 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3028
3029 /*
3030 * Reset clear stall error counter.
3031 */
3032 udev->clear_stall_errors = 0;
3033
3034 /*
3035 * Try to setup a new USB transfer for the
3036 * default control endpoint:
3037 */
3038 iface_index = 0;
3039 if (usbd_transfer_setup(udev, &iface_index,
3040 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3041 &udev->device_mtx)) {
3042 DPRINTFN(0, "could not setup default "
3043 "USB transfer\n");
3044 } else {
3045 goto repeat;
3046 }
3047}
3048
3049/*------------------------------------------------------------------------*
3050 * usbd_clear_data_toggle - factored out code
3051 *
3052 * NOTE: the intention of this function is not to reset the hardware
3053 * data toggle.
3054 *------------------------------------------------------------------------*/
3055void
3056usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3057{
3058 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3059
3060 /* check that we have a valid case */
3061 if (udev->flags.usb_mode == USB_MODE_HOST &&
3062 udev->parent_hub != NULL &&
3063 udev->bus->methods->clear_stall != NULL &&
3064 ep->methods != NULL) {
3065 (udev->bus->methods->clear_stall) (udev, ep);
3066 }
3067}
3068
3069/*------------------------------------------------------------------------*
3070 * usbd_clear_data_toggle - factored out code
3071 *
3072 * NOTE: the intention of this function is not to reset the hardware
3073 * data toggle on the USB device side.
3074 *------------------------------------------------------------------------*/
3075void
3076usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3077{
3078 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3079
3080 USB_BUS_LOCK(udev->bus);
3081 ep->toggle_next = 0;
3082 /* some hardware needs a callback to clear the data toggle */
3083 usbd_clear_stall_locked(udev, ep);
3084 USB_BUS_UNLOCK(udev->bus);
3085}
3086
3087/*------------------------------------------------------------------------*
3088 * usbd_clear_stall_callback - factored out clear stall callback
3089 *
3090 * Input parameters:
3091 * xfer1: Clear Stall Control Transfer
3092 * xfer2: Stalled USB Transfer
3093 *
3094 * This function is NULL safe.
3095 *
3096 * Return values:
3097 * 0: In progress
3098 * Else: Finished
3099 *
3100 * Clear stall config example:
3101 *
3102 * static const struct usb_config my_clearstall = {
3103 * .type = UE_CONTROL,
3104 * .endpoint = 0,
3105 * .direction = UE_DIR_ANY,
3106 * .interval = 50, //50 milliseconds
3107 * .bufsize = sizeof(struct usb_device_request),
3108 * .timeout = 1000, //1.000 seconds
3109 * .callback = &my_clear_stall_callback, // **
3110 * .usb_mode = USB_MODE_HOST,
3111 * };
3112 *
3113 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3114 * passing the correct parameters.
3115 *------------------------------------------------------------------------*/
3116uint8_t
3117usbd_clear_stall_callback(struct usb_xfer *xfer1,
3118 struct usb_xfer *xfer2)
3119{
3120 struct usb_device_request req;
3121
3122 if (xfer2 == NULL) {
3123 /* looks like we are tearing down */
3124 DPRINTF("NULL input parameter\n");
3125 return (0);
3126 }
3127 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3128 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3129
3130 switch (USB_GET_STATE(xfer1)) {
3131 case USB_ST_SETUP:
3132
3133 /*
3134 * pre-clear the data toggle to DATA0 ("umass.c" and
3135 * "ata-usb.c" depends on this)
3136 */
3137
3138 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3139
3140 /* setup a clear-stall packet */
3141
3142 req.bmRequestType = UT_WRITE_ENDPOINT;
3143 req.bRequest = UR_CLEAR_FEATURE;
3144 USETW(req.wValue, UF_ENDPOINT_HALT);
3145 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3146 req.wIndex[1] = 0;
3147 USETW(req.wLength, 0);
3148
3149 /*
3150 * "usbd_transfer_setup_sub()" will ensure that
3151 * we have sufficient room in the buffer for
3152 * the request structure!
3153 */
3154
3155 /* copy in the transfer */
3156
3157 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3158
3159 /* set length */
3160 xfer1->frlengths[0] = sizeof(req);
3161 xfer1->nframes = 1;
3162
3163 usbd_transfer_submit(xfer1);
3164 return (0);
3165
3166 case USB_ST_TRANSFERRED:
3167 break;
3168
3169 default: /* Error */
3170 if (xfer1->error == USB_ERR_CANCELLED) {
3171 return (0);
3172 }
3173 break;
3174 }
3175 return (1); /* Clear Stall Finished */
3176}
3177
3178/*------------------------------------------------------------------------*
3179 * usbd_transfer_poll
3180 *
3181 * The following function gets called from the USB keyboard driver and
3182 * UMASS when the system has paniced.
3183 *
3184 * NOTE: It is currently not possible to resume normal operation on
3185 * the USB controller which has been polled, due to clearing of the
3186 * "up_dsleep" and "up_msleep" flags.
3187 *------------------------------------------------------------------------*/
3188void
3189usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3190{
3191 struct usb_xfer *xfer;
3192 struct usb_xfer_root *xroot;
3193 struct usb_device *udev;
3194 struct usb_proc_msg *pm;
3195 uint16_t n;
3196 uint16_t drop_bus;
3197 uint16_t drop_xfer;
3198
3199 for (n = 0; n != max; n++) {
3200 /* Extra checks to avoid panic */
3201 xfer = ppxfer[n];
3202 if (xfer == NULL)
3203 continue; /* no USB transfer */
3204 xroot = xfer->xroot;
3205 if (xroot == NULL)
3206 continue; /* no USB root */
3207 udev = xroot->udev;
3208 if (udev == NULL)
3209 continue; /* no USB device */
3210 if (udev->bus == NULL)
3211 continue; /* no BUS structure */
3212 if (udev->bus->methods == NULL)
3213 continue; /* no BUS methods */
3214 if (udev->bus->methods->xfer_poll == NULL)
3215 continue; /* no poll method */
3216
3217 /* make sure that the BUS mutex is not locked */
3218 drop_bus = 0;
3219 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3220 mtx_unlock(&xroot->udev->bus->bus_mtx);
3221 drop_bus++;
3222 }
3223
3224 /* make sure that the transfer mutex is not locked */
3225 drop_xfer = 0;
3226 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3227 mtx_unlock(xroot->xfer_mtx);
3228 drop_xfer++;
3229 }
3230
3231 /* Make sure cv_signal() and cv_broadcast() is not called */
3232 USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3233 USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3234 USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3235 USB_BUS_NON_GIANT_PROC(udev->bus)->up_msleep = 0;
3236
3237 /* poll USB hardware */
3238 (udev->bus->methods->xfer_poll) (udev->bus);
3239
3240 USB_BUS_LOCK(xroot->bus);
3241
3242 /* check for clear stall */
3243 if (udev->ctrl_xfer[1] != NULL) {
3244
3245 /* poll clear stall start */
3246 pm = &udev->cs_msg[0].hdr;
3247 (pm->pm_callback) (pm);
3248 /* poll clear stall done thread */
3249 pm = &udev->ctrl_xfer[1]->
3250 xroot->done_m[0].hdr;
3251 (pm->pm_callback) (pm);
3252 }
3253
3254 /* poll done thread */
3255 pm = &xroot->done_m[0].hdr;
3256 (pm->pm_callback) (pm);
3257
3258 USB_BUS_UNLOCK(xroot->bus);
3259
3260 /* restore transfer mutex */
3261 while (drop_xfer--)
3262 mtx_lock(xroot->xfer_mtx);
3263
3264 /* restore BUS mutex */
3265 while (drop_bus--)
3266 mtx_lock(&xroot->udev->bus->bus_mtx);
3267 }
3268}
3269
3270static void
3271usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3272 uint8_t type, enum usb_dev_speed speed)
3273{
3274 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3275 [USB_SPEED_LOW] = 8,
3276 [USB_SPEED_FULL] = 64,
3277 [USB_SPEED_HIGH] = 1024,
3278 [USB_SPEED_VARIABLE] = 1024,
3279 [USB_SPEED_SUPER] = 1024,
3280 };
3281
3282 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3283 [USB_SPEED_LOW] = 0, /* invalid */
3284 [USB_SPEED_FULL] = 1023,
3285 [USB_SPEED_HIGH] = 1024,
3286 [USB_SPEED_VARIABLE] = 3584,
3287 [USB_SPEED_SUPER] = 1024,
3288 };
3289
3290 static const uint16_t control_min[USB_SPEED_MAX] = {
3291 [USB_SPEED_LOW] = 8,
3292 [USB_SPEED_FULL] = 8,
3293 [USB_SPEED_HIGH] = 64,
3294 [USB_SPEED_VARIABLE] = 512,
3295 [USB_SPEED_SUPER] = 512,
3296 };
3297
3298 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3299 [USB_SPEED_LOW] = 8,
3300 [USB_SPEED_FULL] = 8,
3301 [USB_SPEED_HIGH] = 512,
3302 [USB_SPEED_VARIABLE] = 512,
3303 [USB_SPEED_SUPER] = 1024,
3304 };
3305
3306 uint16_t temp;
3307
3308 memset(ptr, 0, sizeof(*ptr));
3309
3310 switch (type) {
3311 case UE_INTERRUPT:
3312 ptr->range.max = intr_range_max[speed];
3313 break;
3314 case UE_ISOCHRONOUS:
3315 ptr->range.max = isoc_range_max[speed];
3316 break;
3317 default:
3318 if (type == UE_BULK)
3319 temp = bulk_min[speed];
3320 else /* UE_CONTROL */
3321 temp = control_min[speed];
3322
3323 /* default is fixed */
3324 ptr->fixed[0] = temp;
3325 ptr->fixed[1] = temp;
3326 ptr->fixed[2] = temp;
3327 ptr->fixed[3] = temp;
3328
3329 if (speed == USB_SPEED_FULL) {
3330 /* multiple sizes */
3331 ptr->fixed[1] = 16;
3332 ptr->fixed[2] = 32;
3333 ptr->fixed[3] = 64;
3334 }
3335 if ((speed == USB_SPEED_VARIABLE) &&
3336 (type == UE_BULK)) {
3337 /* multiple sizes */
3338 ptr->fixed[2] = 1024;
3339 ptr->fixed[3] = 1536;
3340 }
3341 break;
3342 }
3343}
3344
3345void *
3346usbd_xfer_softc(struct usb_xfer *xfer)
3347{
3348 return (xfer->priv_sc);
3349}
3350
3351void *
3352usbd_xfer_get_priv(struct usb_xfer *xfer)
3353{
3354 return (xfer->priv_fifo);
3355}
3356
3357void
3358usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3359{
3360 xfer->priv_fifo = ptr;
3361}
3362
3363uint8_t
3364usbd_xfer_state(struct usb_xfer *xfer)
3365{
3366 return (xfer->usb_state);
3367}
3368
3369void
3370usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3371{
3372 switch (flag) {
3373 case USB_FORCE_SHORT_XFER:
3374 xfer->flags.force_short_xfer = 1;
3375 break;
3376 case USB_SHORT_XFER_OK:
3377 xfer->flags.short_xfer_ok = 1;
3378 break;
3379 case USB_MULTI_SHORT_OK:
3380 xfer->flags.short_frames_ok = 1;
3381 break;
3382 case USB_MANUAL_STATUS:
3383 xfer->flags.manual_status = 1;
3384 break;
3385 }
3386}
3387
3388void
3389usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3390{
3391 switch (flag) {
3392 case USB_FORCE_SHORT_XFER:
3393 xfer->flags.force_short_xfer = 0;
3394 break;
3395 case USB_SHORT_XFER_OK:
3396 xfer->flags.short_xfer_ok = 0;
3397 break;
3398 case USB_MULTI_SHORT_OK:
3399 xfer->flags.short_frames_ok = 0;
3400 break;
3401 case USB_MANUAL_STATUS:
3402 xfer->flags.manual_status = 0;
3403 break;
3404 }
3405}
3406
3407/*
3408 * The following function returns in milliseconds when the isochronous
3409 * transfer was completed by the hardware. The returned value wraps
3410 * around 65536 milliseconds.
3411 */
3412uint16_t
3413usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3414{
3415 return (xfer->isoc_time_complete);
3416}