usb_transfer.c revision 229080
1/* $FreeBSD: stable/9/sys/dev/usb/usb_transfer.c 229080 2011-12-31 13:07:09Z hselasky $ */
2/*-
3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/stdint.h>
28#include <sys/stddef.h>
29#include <sys/param.h>
30#include <sys/queue.h>
31#include <sys/types.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/module.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/sysctl.h>
40#include <sys/sx.h>
41#include <sys/unistd.h>
42#include <sys/callout.h>
43#include <sys/malloc.h>
44#include <sys/priv.h>
45
46#include <dev/usb/usb.h>
47#include <dev/usb/usbdi.h>
48#include <dev/usb/usbdi_util.h>
49
50#define	USB_DEBUG_VAR usb_debug
51
52#include <dev/usb/usb_core.h>
53#include <dev/usb/usb_busdma.h>
54#include <dev/usb/usb_process.h>
55#include <dev/usb/usb_transfer.h>
56#include <dev/usb/usb_device.h>
57#include <dev/usb/usb_debug.h>
58#include <dev/usb/usb_util.h>
59
60#include <dev/usb/usb_controller.h>
61#include <dev/usb/usb_bus.h>
62#include <dev/usb/usb_pf.h>
63
64struct usb_std_packet_size {
65	struct {
66		uint16_t min;		/* inclusive */
67		uint16_t max;		/* inclusive */
68	}	range;
69
70	uint16_t fixed[4];
71};
72
73static usb_callback_t usb_request_callback;
74
75static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
76
77	/* This transfer is used for generic control endpoint transfers */
78
79	[0] = {
80		.type = UE_CONTROL,
81		.endpoint = 0x00,	/* Control endpoint */
82		.direction = UE_DIR_ANY,
83		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
84		.flags = {.proxy_buffer = 1,},
85		.callback = &usb_request_callback,
86		.usb_mode = USB_MODE_DUAL,	/* both modes */
87	},
88
89	/* This transfer is used for generic clear stall only */
90
91	[1] = {
92		.type = UE_CONTROL,
93		.endpoint = 0x00,	/* Control pipe */
94		.direction = UE_DIR_ANY,
95		.bufsize = sizeof(struct usb_device_request),
96		.callback = &usb_do_clear_stall_callback,
97		.timeout = 1000,	/* 1 second */
98		.interval = 50,	/* 50ms */
99		.usb_mode = USB_MODE_HOST,
100	},
101};
102
103/* function prototypes */
104
105static void	usbd_update_max_frame_size(struct usb_xfer *);
106static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
107static void	usbd_control_transfer_init(struct usb_xfer *);
108static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
109static void	usb_callback_proc(struct usb_proc_msg *);
110static void	usbd_callback_ss_done_defer(struct usb_xfer *);
111static void	usbd_callback_wrapper(struct usb_xfer_queue *);
112static void	usbd_transfer_start_cb(void *);
113static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
114static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
115		    uint8_t type, enum usb_dev_speed speed);
116
117/*------------------------------------------------------------------------*
118 *	usb_request_callback
119 *------------------------------------------------------------------------*/
120static void
121usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
122{
123	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
124		usb_handle_request_callback(xfer, error);
125	else
126		usbd_do_request_callback(xfer, error);
127}
128
129/*------------------------------------------------------------------------*
130 *	usbd_update_max_frame_size
131 *
132 * This function updates the maximum frame size, hence high speed USB
133 * can transfer multiple consecutive packets.
134 *------------------------------------------------------------------------*/
135static void
136usbd_update_max_frame_size(struct usb_xfer *xfer)
137{
138	/* compute maximum frame size */
139	/* this computation should not overflow 16-bit */
140	/* max = 15 * 1024 */
141
142	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
143}
144
145/*------------------------------------------------------------------------*
146 *	usbd_get_dma_delay
147 *
148 * The following function is called when we need to
149 * synchronize with DMA hardware.
150 *
151 * Returns:
152 *    0: no DMA delay required
153 * Else: milliseconds of DMA delay
154 *------------------------------------------------------------------------*/
155usb_timeout_t
156usbd_get_dma_delay(struct usb_device *udev)
157{
158	struct usb_bus_methods *mtod;
159	uint32_t temp;
160
161	mtod = udev->bus->methods;
162	temp = 0;
163
164	if (mtod->get_dma_delay) {
165		(mtod->get_dma_delay) (udev, &temp);
166		/*
167		 * Round up and convert to milliseconds. Note that we use
168		 * 1024 milliseconds per second. to save a division.
169		 */
170		temp += 0x3FF;
171		temp /= 0x400;
172	}
173	return (temp);
174}
175
176/*------------------------------------------------------------------------*
177 *	usbd_transfer_setup_sub_malloc
178 *
179 * This function will allocate one or more DMA'able memory chunks
180 * according to "size", "align" and "count" arguments. "ppc" is
181 * pointed to a linear array of USB page caches afterwards.
182 *
183 * Returns:
184 *    0: Success
185 * Else: Failure
186 *------------------------------------------------------------------------*/
187#if USB_HAVE_BUSDMA
188uint8_t
189usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
190    struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
191    usb_size_t count)
192{
193	struct usb_page_cache *pc;
194	struct usb_page *pg;
195	void *buf;
196	usb_size_t n_dma_pc;
197	usb_size_t n_obj;
198	usb_size_t x;
199	usb_size_t y;
200	usb_size_t r;
201	usb_size_t z;
202
203	USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n",
204	    align));
205	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
206
207	if (count == 0) {
208		return (0);		/* nothing to allocate */
209	}
210	/*
211	 * Make sure that the size is aligned properly.
212	 */
213	size = -((-size) & (-align));
214
215	/*
216	 * Try multi-allocation chunks to reduce the number of DMA
217	 * allocations, hence DMA allocations are slow.
218	 */
219	if (size >= PAGE_SIZE) {
220		n_dma_pc = count;
221		n_obj = 1;
222	} else {
223		/* compute number of objects per page */
224		n_obj = (PAGE_SIZE / size);
225		/*
226		 * Compute number of DMA chunks, rounded up
227		 * to nearest one:
228		 */
229		n_dma_pc = ((count + n_obj - 1) / n_obj);
230	}
231
232	if (parm->buf == NULL) {
233		/* for the future */
234		parm->dma_page_ptr += n_dma_pc;
235		parm->dma_page_cache_ptr += n_dma_pc;
236		parm->dma_page_ptr += count;
237		parm->xfer_page_cache_ptr += count;
238		return (0);
239	}
240	for (x = 0; x != n_dma_pc; x++) {
241		/* need to initialize the page cache */
242		parm->dma_page_cache_ptr[x].tag_parent =
243		    &parm->curr_xfer->xroot->dma_parent_tag;
244	}
245	for (x = 0; x != count; x++) {
246		/* need to initialize the page cache */
247		parm->xfer_page_cache_ptr[x].tag_parent =
248		    &parm->curr_xfer->xroot->dma_parent_tag;
249	}
250
251	if (ppc) {
252		*ppc = parm->xfer_page_cache_ptr;
253	}
254	r = count;			/* set remainder count */
255	z = n_obj * size;		/* set allocation size */
256	pc = parm->xfer_page_cache_ptr;
257	pg = parm->dma_page_ptr;
258
259	for (x = 0; x != n_dma_pc; x++) {
260
261		if (r < n_obj) {
262			/* compute last remainder */
263			z = r * size;
264			n_obj = r;
265		}
266		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
267		    pg, z, align)) {
268			return (1);	/* failure */
269		}
270		/* Set beginning of current buffer */
271		buf = parm->dma_page_cache_ptr->buffer;
272		/* Make room for one DMA page cache and one page */
273		parm->dma_page_cache_ptr++;
274		pg++;
275
276		for (y = 0; (y != n_obj); y++, r--, pc++, pg++) {
277
278			/* Load sub-chunk into DMA */
279			if (usb_pc_dmamap_create(pc, size)) {
280				return (1);	/* failure */
281			}
282			pc->buffer = USB_ADD_BYTES(buf, y * size);
283			pc->page_start = pg;
284
285			mtx_lock(pc->tag_parent->mtx);
286			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
287				mtx_unlock(pc->tag_parent->mtx);
288				return (1);	/* failure */
289			}
290			mtx_unlock(pc->tag_parent->mtx);
291		}
292	}
293
294	parm->xfer_page_cache_ptr = pc;
295	parm->dma_page_ptr = pg;
296	return (0);
297}
298#endif
299
300/*------------------------------------------------------------------------*
301 *	usbd_transfer_setup_sub - transfer setup subroutine
302 *
303 * This function must be called from the "xfer_setup" callback of the
304 * USB Host or Device controller driver when setting up an USB
305 * transfer. This function will setup correct packet sizes, buffer
306 * sizes, flags and more, that are stored in the "usb_xfer"
307 * structure.
308 *------------------------------------------------------------------------*/
309void
310usbd_transfer_setup_sub(struct usb_setup_params *parm)
311{
312	enum {
313		REQ_SIZE = 8,
314		MIN_PKT = 8,
315	};
316	struct usb_xfer *xfer = parm->curr_xfer;
317	const struct usb_config *setup = parm->curr_setup;
318	struct usb_endpoint_ss_comp_descriptor *ecomp;
319	struct usb_endpoint_descriptor *edesc;
320	struct usb_std_packet_size std_size;
321	usb_frcount_t n_frlengths;
322	usb_frcount_t n_frbuffers;
323	usb_frcount_t x;
324	uint8_t type;
325	uint8_t zmps;
326
327	/*
328	 * Sanity check. The following parameters must be initialized before
329	 * calling this function.
330	 */
331	if ((parm->hc_max_packet_size == 0) ||
332	    (parm->hc_max_packet_count == 0) ||
333	    (parm->hc_max_frame_size == 0)) {
334		parm->err = USB_ERR_INVAL;
335		goto done;
336	}
337	edesc = xfer->endpoint->edesc;
338	ecomp = xfer->endpoint->ecomp;
339
340	type = (edesc->bmAttributes & UE_XFERTYPE);
341
342	xfer->flags = setup->flags;
343	xfer->nframes = setup->frames;
344	xfer->timeout = setup->timeout;
345	xfer->callback = setup->callback;
346	xfer->interval = setup->interval;
347	xfer->endpointno = edesc->bEndpointAddress;
348	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
349	xfer->max_packet_count = 1;
350	/* make a shadow copy: */
351	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
352
353	parm->bufsize = setup->bufsize;
354
355	switch (parm->speed) {
356	case USB_SPEED_HIGH:
357		switch (type) {
358		case UE_ISOCHRONOUS:
359		case UE_INTERRUPT:
360			xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
361
362			/* check for invalid max packet count */
363			if (xfer->max_packet_count > 3)
364				xfer->max_packet_count = 3;
365			break;
366		default:
367			break;
368		}
369		xfer->max_packet_size &= 0x7FF;
370		break;
371	case USB_SPEED_SUPER:
372		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
373
374		if (ecomp != NULL)
375			xfer->max_packet_count += ecomp->bMaxBurst;
376
377		if ((xfer->max_packet_count == 0) ||
378		    (xfer->max_packet_count > 16))
379			xfer->max_packet_count = 16;
380
381		switch (type) {
382		case UE_CONTROL:
383			xfer->max_packet_count = 1;
384			break;
385		case UE_ISOCHRONOUS:
386			if (ecomp != NULL) {
387				uint8_t mult;
388
389				mult = (ecomp->bmAttributes & 3) + 1;
390				if (mult > 3)
391					mult = 3;
392
393				xfer->max_packet_count *= mult;
394			}
395			break;
396		default:
397			break;
398		}
399		xfer->max_packet_size &= 0x7FF;
400		break;
401	default:
402		break;
403	}
404	/* range check "max_packet_count" */
405
406	if (xfer->max_packet_count > parm->hc_max_packet_count) {
407		xfer->max_packet_count = parm->hc_max_packet_count;
408	}
409	/* filter "wMaxPacketSize" according to HC capabilities */
410
411	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
412	    (xfer->max_packet_size == 0)) {
413		xfer->max_packet_size = parm->hc_max_packet_size;
414	}
415	/* filter "wMaxPacketSize" according to standard sizes */
416
417	usbd_get_std_packet_size(&std_size, type, parm->speed);
418
419	if (std_size.range.min || std_size.range.max) {
420
421		if (xfer->max_packet_size < std_size.range.min) {
422			xfer->max_packet_size = std_size.range.min;
423		}
424		if (xfer->max_packet_size > std_size.range.max) {
425			xfer->max_packet_size = std_size.range.max;
426		}
427	} else {
428
429		if (xfer->max_packet_size >= std_size.fixed[3]) {
430			xfer->max_packet_size = std_size.fixed[3];
431		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
432			xfer->max_packet_size = std_size.fixed[2];
433		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
434			xfer->max_packet_size = std_size.fixed[1];
435		} else {
436			/* only one possibility left */
437			xfer->max_packet_size = std_size.fixed[0];
438		}
439	}
440
441	/* compute "max_frame_size" */
442
443	usbd_update_max_frame_size(xfer);
444
445	/* check interrupt interval and transfer pre-delay */
446
447	if (type == UE_ISOCHRONOUS) {
448
449		uint16_t frame_limit;
450
451		xfer->interval = 0;	/* not used, must be zero */
452		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
453
454		if (xfer->timeout == 0) {
455			/*
456			 * set a default timeout in
457			 * case something goes wrong!
458			 */
459			xfer->timeout = 1000 / 4;
460		}
461		switch (parm->speed) {
462		case USB_SPEED_LOW:
463		case USB_SPEED_FULL:
464			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
465			xfer->fps_shift = 0;
466			break;
467		default:
468			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
469			xfer->fps_shift = edesc->bInterval;
470			if (xfer->fps_shift > 0)
471				xfer->fps_shift--;
472			if (xfer->fps_shift > 3)
473				xfer->fps_shift = 3;
474			if (xfer->flags.pre_scale_frames != 0)
475				xfer->nframes <<= (3 - xfer->fps_shift);
476			break;
477		}
478
479		if (xfer->nframes > frame_limit) {
480			/*
481			 * this is not going to work
482			 * cross hardware
483			 */
484			parm->err = USB_ERR_INVAL;
485			goto done;
486		}
487		if (xfer->nframes == 0) {
488			/*
489			 * this is not a valid value
490			 */
491			parm->err = USB_ERR_ZERO_NFRAMES;
492			goto done;
493		}
494	} else {
495
496		/*
497		 * If a value is specified use that else check the
498		 * endpoint descriptor!
499		 */
500		if (type == UE_INTERRUPT) {
501
502			uint32_t temp;
503
504			if (xfer->interval == 0) {
505
506				xfer->interval = edesc->bInterval;
507
508				switch (parm->speed) {
509				case USB_SPEED_LOW:
510				case USB_SPEED_FULL:
511					break;
512				default:
513					/* 125us -> 1ms */
514					if (xfer->interval < 4)
515						xfer->interval = 1;
516					else if (xfer->interval > 16)
517						xfer->interval = (1 << (16 - 4));
518					else
519						xfer->interval =
520						    (1 << (xfer->interval - 4));
521					break;
522				}
523			}
524
525			if (xfer->interval == 0) {
526				/*
527				 * One millisecond is the smallest
528				 * interval we support:
529				 */
530				xfer->interval = 1;
531			}
532
533			xfer->fps_shift = 0;
534			temp = 1;
535
536			while ((temp != 0) && (temp < xfer->interval)) {
537				xfer->fps_shift++;
538				temp *= 2;
539			}
540
541			switch (parm->speed) {
542			case USB_SPEED_LOW:
543			case USB_SPEED_FULL:
544				break;
545			default:
546				xfer->fps_shift += 3;
547				break;
548			}
549		}
550	}
551
552	/*
553	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
554	 * to be equal to zero when setting up USB transfers, hence
555	 * this leads to alot of extra code in the USB kernel.
556	 */
557
558	if ((xfer->max_frame_size == 0) ||
559	    (xfer->max_packet_size == 0)) {
560
561		zmps = 1;
562
563		if ((parm->bufsize <= MIN_PKT) &&
564		    (type != UE_CONTROL) &&
565		    (type != UE_BULK)) {
566
567			/* workaround */
568			xfer->max_packet_size = MIN_PKT;
569			xfer->max_packet_count = 1;
570			parm->bufsize = 0;	/* automatic setup length */
571			usbd_update_max_frame_size(xfer);
572
573		} else {
574			parm->err = USB_ERR_ZERO_MAXP;
575			goto done;
576		}
577
578	} else {
579		zmps = 0;
580	}
581
582	/*
583	 * check if we should setup a default
584	 * length:
585	 */
586
587	if (parm->bufsize == 0) {
588
589		parm->bufsize = xfer->max_frame_size;
590
591		if (type == UE_ISOCHRONOUS) {
592			parm->bufsize *= xfer->nframes;
593		}
594	}
595	/*
596	 * check if we are about to setup a proxy
597	 * type of buffer:
598	 */
599
600	if (xfer->flags.proxy_buffer) {
601
602		/* round bufsize up */
603
604		parm->bufsize += (xfer->max_frame_size - 1);
605
606		if (parm->bufsize < xfer->max_frame_size) {
607			/* length wrapped around */
608			parm->err = USB_ERR_INVAL;
609			goto done;
610		}
611		/* subtract remainder */
612
613		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
614
615		/* add length of USB device request structure, if any */
616
617		if (type == UE_CONTROL) {
618			parm->bufsize += REQ_SIZE;	/* SETUP message */
619		}
620	}
621	xfer->max_data_length = parm->bufsize;
622
623	/* Setup "n_frlengths" and "n_frbuffers" */
624
625	if (type == UE_ISOCHRONOUS) {
626		n_frlengths = xfer->nframes;
627		n_frbuffers = 1;
628	} else {
629
630		if (type == UE_CONTROL) {
631			xfer->flags_int.control_xfr = 1;
632			if (xfer->nframes == 0) {
633				if (parm->bufsize <= REQ_SIZE) {
634					/*
635					 * there will never be any data
636					 * stage
637					 */
638					xfer->nframes = 1;
639				} else {
640					xfer->nframes = 2;
641				}
642			}
643		} else {
644			if (xfer->nframes == 0) {
645				xfer->nframes = 1;
646			}
647		}
648
649		n_frlengths = xfer->nframes;
650		n_frbuffers = xfer->nframes;
651	}
652
653	/*
654	 * check if we have room for the
655	 * USB device request structure:
656	 */
657
658	if (type == UE_CONTROL) {
659
660		if (xfer->max_data_length < REQ_SIZE) {
661			/* length wrapped around or too small bufsize */
662			parm->err = USB_ERR_INVAL;
663			goto done;
664		}
665		xfer->max_data_length -= REQ_SIZE;
666	}
667	/*
668	 * Setup "frlengths" and shadow "frlengths" for keeping the
669	 * initial frame lengths when a USB transfer is complete. This
670	 * information is useful when computing isochronous offsets.
671	 */
672	xfer->frlengths = parm->xfer_length_ptr;
673	parm->xfer_length_ptr += 2 * n_frlengths;
674
675	/* setup "frbuffers" */
676	xfer->frbuffers = parm->xfer_page_cache_ptr;
677	parm->xfer_page_cache_ptr += n_frbuffers;
678
679	/* initialize max frame count */
680	xfer->max_frame_count = xfer->nframes;
681
682	/*
683	 * check if we need to setup
684	 * a local buffer:
685	 */
686
687	if (!xfer->flags.ext_buffer) {
688
689		/* align data */
690		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
691
692		if (parm->buf) {
693
694			xfer->local_buffer =
695			    USB_ADD_BYTES(parm->buf, parm->size[0]);
696
697			usbd_xfer_set_frame_offset(xfer, 0, 0);
698
699			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
700				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
701			}
702		}
703		parm->size[0] += parm->bufsize;
704
705		/* align data again */
706		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
707	}
708	/*
709	 * Compute maximum buffer size
710	 */
711
712	if (parm->bufsize_max < parm->bufsize) {
713		parm->bufsize_max = parm->bufsize;
714	}
715#if USB_HAVE_BUSDMA
716	if (xfer->flags_int.bdma_enable) {
717		/*
718		 * Setup "dma_page_ptr".
719		 *
720		 * Proof for formula below:
721		 *
722		 * Assume there are three USB frames having length "a", "b" and
723		 * "c". These USB frames will at maximum need "z"
724		 * "usb_page" structures. "z" is given by:
725		 *
726		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
727		 * ((c / USB_PAGE_SIZE) + 2);
728		 *
729		 * Constraining "a", "b" and "c" like this:
730		 *
731		 * (a + b + c) <= parm->bufsize
732		 *
733		 * We know that:
734		 *
735		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
736		 *
737		 * Here is the general formula:
738		 */
739		xfer->dma_page_ptr = parm->dma_page_ptr;
740		parm->dma_page_ptr += (2 * n_frbuffers);
741		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
742	}
743#endif
744	if (zmps) {
745		/* correct maximum data length */
746		xfer->max_data_length = 0;
747	}
748	/* subtract USB frame remainder from "hc_max_frame_size" */
749
750	xfer->max_hc_frame_size =
751	    (parm->hc_max_frame_size -
752	    (parm->hc_max_frame_size % xfer->max_frame_size));
753
754	if (xfer->max_hc_frame_size == 0) {
755		parm->err = USB_ERR_INVAL;
756		goto done;
757	}
758
759	/* initialize frame buffers */
760
761	if (parm->buf) {
762		for (x = 0; x != n_frbuffers; x++) {
763			xfer->frbuffers[x].tag_parent =
764			    &xfer->xroot->dma_parent_tag;
765#if USB_HAVE_BUSDMA
766			if (xfer->flags_int.bdma_enable &&
767			    (parm->bufsize_max > 0)) {
768
769				if (usb_pc_dmamap_create(
770				    xfer->frbuffers + x,
771				    parm->bufsize_max)) {
772					parm->err = USB_ERR_NOMEM;
773					goto done;
774				}
775			}
776#endif
777		}
778	}
779done:
780	if (parm->err) {
781		/*
782		 * Set some dummy values so that we avoid division by zero:
783		 */
784		xfer->max_hc_frame_size = 1;
785		xfer->max_frame_size = 1;
786		xfer->max_packet_size = 1;
787		xfer->max_data_length = 0;
788		xfer->nframes = 0;
789		xfer->max_frame_count = 0;
790	}
791}
792
793/*------------------------------------------------------------------------*
794 *	usbd_transfer_setup - setup an array of USB transfers
795 *
796 * NOTE: You must always call "usbd_transfer_unsetup" after calling
797 * "usbd_transfer_setup" if success was returned.
798 *
799 * The idea is that the USB device driver should pre-allocate all its
800 * transfers by one call to this function.
801 *
802 * Return values:
803 *    0: Success
804 * Else: Failure
805 *------------------------------------------------------------------------*/
806usb_error_t
807usbd_transfer_setup(struct usb_device *udev,
808    const uint8_t *ifaces, struct usb_xfer **ppxfer,
809    const struct usb_config *setup_start, uint16_t n_setup,
810    void *priv_sc, struct mtx *xfer_mtx)
811{
812	struct usb_xfer dummy;
813	struct usb_setup_params parm;
814	const struct usb_config *setup_end = setup_start + n_setup;
815	const struct usb_config *setup;
816	struct usb_endpoint *ep;
817	struct usb_xfer_root *info;
818	struct usb_xfer *xfer;
819	void *buf = NULL;
820	uint16_t n;
821	uint16_t refcount;
822
823	parm.err = 0;
824	refcount = 0;
825	info = NULL;
826
827	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
828	    "usbd_transfer_setup can sleep!");
829
830	/* do some checking first */
831
832	if (n_setup == 0) {
833		DPRINTFN(6, "setup array has zero length!\n");
834		return (USB_ERR_INVAL);
835	}
836	if (ifaces == 0) {
837		DPRINTFN(6, "ifaces array is NULL!\n");
838		return (USB_ERR_INVAL);
839	}
840	if (xfer_mtx == NULL) {
841		DPRINTFN(6, "using global lock\n");
842		xfer_mtx = &Giant;
843	}
844	/* sanity checks */
845	for (setup = setup_start, n = 0;
846	    setup != setup_end; setup++, n++) {
847		if (setup->bufsize == (usb_frlength_t)-1) {
848			parm.err = USB_ERR_BAD_BUFSIZE;
849			DPRINTF("invalid bufsize\n");
850		}
851		if (setup->callback == NULL) {
852			parm.err = USB_ERR_NO_CALLBACK;
853			DPRINTF("no callback\n");
854		}
855		ppxfer[n] = NULL;
856	}
857
858	if (parm.err) {
859		goto done;
860	}
861	memset(&parm, 0, sizeof(parm));
862
863	parm.udev = udev;
864	parm.speed = usbd_get_speed(udev);
865	parm.hc_max_packet_count = 1;
866
867	if (parm.speed >= USB_SPEED_MAX) {
868		parm.err = USB_ERR_INVAL;
869		goto done;
870	}
871	/* setup all transfers */
872
873	while (1) {
874
875		if (buf) {
876			/*
877			 * Initialize the "usb_xfer_root" structure,
878			 * which is common for all our USB transfers.
879			 */
880			info = USB_ADD_BYTES(buf, 0);
881
882			info->memory_base = buf;
883			info->memory_size = parm.size[0];
884
885#if USB_HAVE_BUSDMA
886			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]);
887			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]);
888#endif
889			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]);
890			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]);
891
892			cv_init(&info->cv_drain, "WDRAIN");
893
894			info->xfer_mtx = xfer_mtx;
895#if USB_HAVE_BUSDMA
896			usb_dma_tag_setup(&info->dma_parent_tag,
897			    parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag,
898			    xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max);
899#endif
900
901			info->bus = udev->bus;
902			info->udev = udev;
903
904			TAILQ_INIT(&info->done_q.head);
905			info->done_q.command = &usbd_callback_wrapper;
906#if USB_HAVE_BUSDMA
907			TAILQ_INIT(&info->dma_q.head);
908			info->dma_q.command = &usb_bdma_work_loop;
909#endif
910			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
911			info->done_m[0].xroot = info;
912			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
913			info->done_m[1].xroot = info;
914
915			/*
916			 * In device side mode control endpoint
917			 * requests need to run from a separate
918			 * context, else there is a chance of
919			 * deadlock!
920			 */
921			if (setup_start == usb_control_ep_cfg)
922				info->done_p =
923				    &udev->bus->control_xfer_proc;
924			else if (xfer_mtx == &Giant)
925				info->done_p =
926				    &udev->bus->giant_callback_proc;
927			else
928				info->done_p =
929				    &udev->bus->non_giant_callback_proc;
930		}
931		/* reset sizes */
932
933		parm.size[0] = 0;
934		parm.buf = buf;
935		parm.size[0] += sizeof(info[0]);
936
937		for (setup = setup_start, n = 0;
938		    setup != setup_end; setup++, n++) {
939
940			/* skip USB transfers without callbacks: */
941			if (setup->callback == NULL) {
942				continue;
943			}
944			/* see if there is a matching endpoint */
945			ep = usbd_get_endpoint(udev,
946			    ifaces[setup->if_index], setup);
947
948			if ((ep == NULL) || (ep->methods == NULL)) {
949				if (setup->flags.no_pipe_ok)
950					continue;
951				if ((setup->usb_mode != USB_MODE_DUAL) &&
952				    (setup->usb_mode != udev->flags.usb_mode))
953					continue;
954				parm.err = USB_ERR_NO_PIPE;
955				goto done;
956			}
957
958			/* align data properly */
959			parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
960
961			/* store current setup pointer */
962			parm.curr_setup = setup;
963
964			if (buf) {
965				/*
966				 * Common initialization of the
967				 * "usb_xfer" structure.
968				 */
969				xfer = USB_ADD_BYTES(buf, parm.size[0]);
970				xfer->address = udev->address;
971				xfer->priv_sc = priv_sc;
972				xfer->xroot = info;
973
974				usb_callout_init_mtx(&xfer->timeout_handle,
975				    &udev->bus->bus_mtx, 0);
976			} else {
977				/*
978				 * Setup a dummy xfer, hence we are
979				 * writing to the "usb_xfer"
980				 * structure pointed to by "xfer"
981				 * before we have allocated any
982				 * memory:
983				 */
984				xfer = &dummy;
985				memset(&dummy, 0, sizeof(dummy));
986				refcount++;
987			}
988
989			/* set transfer endpoint pointer */
990			xfer->endpoint = ep;
991
992			parm.size[0] += sizeof(xfer[0]);
993			parm.methods = xfer->endpoint->methods;
994			parm.curr_xfer = xfer;
995
996			/*
997			 * Call the Host or Device controller transfer
998			 * setup routine:
999			 */
1000			(udev->bus->methods->xfer_setup) (&parm);
1001
1002			/* check for error */
1003			if (parm.err)
1004				goto done;
1005
1006			if (buf) {
1007				/*
1008				 * Increment the endpoint refcount. This
1009				 * basically prevents setting a new
1010				 * configuration and alternate setting
1011				 * when USB transfers are in use on
1012				 * the given interface. Search the USB
1013				 * code for "endpoint->refcount_alloc" if you
1014				 * want more information.
1015				 */
1016				USB_BUS_LOCK(info->bus);
1017				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1018					parm.err = USB_ERR_INVAL;
1019
1020				xfer->endpoint->refcount_alloc++;
1021
1022				if (xfer->endpoint->refcount_alloc == 0)
1023					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1024				USB_BUS_UNLOCK(info->bus);
1025
1026				/*
1027				 * Whenever we set ppxfer[] then we
1028				 * also need to increment the
1029				 * "setup_refcount":
1030				 */
1031				info->setup_refcount++;
1032
1033				/*
1034				 * Transfer is successfully setup and
1035				 * can be used:
1036				 */
1037				ppxfer[n] = xfer;
1038			}
1039
1040			/* check for error */
1041			if (parm.err)
1042				goto done;
1043		}
1044
1045		if (buf || parm.err) {
1046			goto done;
1047		}
1048		if (refcount == 0) {
1049			/* no transfers - nothing to do ! */
1050			goto done;
1051		}
1052		/* align data properly */
1053		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1054
1055		/* store offset temporarily */
1056		parm.size[1] = parm.size[0];
1057
1058		/*
1059		 * The number of DMA tags required depends on
1060		 * the number of endpoints. The current estimate
1061		 * for maximum number of DMA tags per endpoint
1062		 * is two.
1063		 */
1064		parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX);
1065
1066		/*
1067		 * DMA tags for QH, TD, Data and more.
1068		 */
1069		parm.dma_tag_max += 8;
1070
1071		parm.dma_tag_p += parm.dma_tag_max;
1072
1073		parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
1074		    ((uint8_t *)0);
1075
1076		/* align data properly */
1077		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1078
1079		/* store offset temporarily */
1080		parm.size[3] = parm.size[0];
1081
1082		parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
1083		    ((uint8_t *)0);
1084
1085		/* align data properly */
1086		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1087
1088		/* store offset temporarily */
1089		parm.size[4] = parm.size[0];
1090
1091		parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
1092		    ((uint8_t *)0);
1093
1094		/* store end offset temporarily */
1095		parm.size[5] = parm.size[0];
1096
1097		parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
1098		    ((uint8_t *)0);
1099
1100		/* store end offset temporarily */
1101
1102		parm.size[2] = parm.size[0];
1103
1104		/* align data properly */
1105		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1106
1107		parm.size[6] = parm.size[0];
1108
1109		parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
1110		    ((uint8_t *)0);
1111
1112		/* align data properly */
1113		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1114
1115		/* allocate zeroed memory */
1116		buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
1117
1118		if (buf == NULL) {
1119			parm.err = USB_ERR_NOMEM;
1120			DPRINTFN(0, "cannot allocate memory block for "
1121			    "configuration (%d bytes)\n",
1122			    parm.size[0]);
1123			goto done;
1124		}
1125		parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]);
1126		parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]);
1127		parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]);
1128		parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]);
1129		parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]);
1130	}
1131
1132done:
1133	if (buf) {
1134		if (info->setup_refcount == 0) {
1135			/*
1136			 * "usbd_transfer_unsetup_sub" will unlock
1137			 * the bus mutex before returning !
1138			 */
1139			USB_BUS_LOCK(info->bus);
1140
1141			/* something went wrong */
1142			usbd_transfer_unsetup_sub(info, 0);
1143		}
1144	}
1145	if (parm.err) {
1146		usbd_transfer_unsetup(ppxfer, n_setup);
1147	}
1148	return (parm.err);
1149}
1150
1151/*------------------------------------------------------------------------*
1152 *	usbd_transfer_unsetup_sub - factored out code
1153 *------------------------------------------------------------------------*/
1154static void
1155usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1156{
1157#if USB_HAVE_BUSDMA
1158	struct usb_page_cache *pc;
1159#endif
1160
1161	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1162
1163	/* wait for any outstanding DMA operations */
1164
1165	if (needs_delay) {
1166		usb_timeout_t temp;
1167		temp = usbd_get_dma_delay(info->udev);
1168		if (temp != 0) {
1169			usb_pause_mtx(&info->bus->bus_mtx,
1170			    USB_MS_TO_TICKS(temp));
1171		}
1172	}
1173
1174	/* make sure that our done messages are not queued anywhere */
1175	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1176
1177	USB_BUS_UNLOCK(info->bus);
1178
1179#if USB_HAVE_BUSDMA
1180	/* free DMA'able memory, if any */
1181	pc = info->dma_page_cache_start;
1182	while (pc != info->dma_page_cache_end) {
1183		usb_pc_free_mem(pc);
1184		pc++;
1185	}
1186
1187	/* free DMA maps in all "xfer->frbuffers" */
1188	pc = info->xfer_page_cache_start;
1189	while (pc != info->xfer_page_cache_end) {
1190		usb_pc_dmamap_destroy(pc);
1191		pc++;
1192	}
1193
1194	/* free all DMA tags */
1195	usb_dma_tag_unsetup(&info->dma_parent_tag);
1196#endif
1197
1198	cv_destroy(&info->cv_drain);
1199
1200	/*
1201	 * free the "memory_base" last, hence the "info" structure is
1202	 * contained within the "memory_base"!
1203	 */
1204	free(info->memory_base, M_USB);
1205}
1206
1207/*------------------------------------------------------------------------*
1208 *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1209 *
1210 * NOTE: All USB transfers in progress will get called back passing
1211 * the error code "USB_ERR_CANCELLED" before this function
1212 * returns.
1213 *------------------------------------------------------------------------*/
1214void
1215usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1216{
1217	struct usb_xfer *xfer;
1218	struct usb_xfer_root *info;
1219	uint8_t needs_delay = 0;
1220
1221	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1222	    "usbd_transfer_unsetup can sleep!");
1223
1224	while (n_setup--) {
1225		xfer = pxfer[n_setup];
1226
1227		if (xfer == NULL)
1228			continue;
1229
1230		info = xfer->xroot;
1231
1232		USB_XFER_LOCK(xfer);
1233		USB_BUS_LOCK(info->bus);
1234
1235		/*
1236		 * HINT: when you start/stop a transfer, it might be a
1237		 * good idea to directly use the "pxfer[]" structure:
1238		 *
1239		 * usbd_transfer_start(sc->pxfer[0]);
1240		 * usbd_transfer_stop(sc->pxfer[0]);
1241		 *
1242		 * That way, if your code has many parts that will not
1243		 * stop running under the same lock, in other words
1244		 * "xfer_mtx", the usbd_transfer_start and
1245		 * usbd_transfer_stop functions will simply return
1246		 * when they detect a NULL pointer argument.
1247		 *
1248		 * To avoid any races we clear the "pxfer[]" pointer
1249		 * while holding the private mutex of the driver:
1250		 */
1251		pxfer[n_setup] = NULL;
1252
1253		USB_BUS_UNLOCK(info->bus);
1254		USB_XFER_UNLOCK(xfer);
1255
1256		usbd_transfer_drain(xfer);
1257
1258#if USB_HAVE_BUSDMA
1259		if (xfer->flags_int.bdma_enable)
1260			needs_delay = 1;
1261#endif
1262		/*
1263		 * NOTE: default endpoint does not have an
1264		 * interface, even if endpoint->iface_index == 0
1265		 */
1266		USB_BUS_LOCK(info->bus);
1267		xfer->endpoint->refcount_alloc--;
1268		USB_BUS_UNLOCK(info->bus);
1269
1270		usb_callout_drain(&xfer->timeout_handle);
1271
1272		USB_BUS_LOCK(info->bus);
1273
1274		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1275		    "reference count\n"));
1276
1277		info->setup_refcount--;
1278
1279		if (info->setup_refcount == 0) {
1280			usbd_transfer_unsetup_sub(info,
1281			    needs_delay);
1282		} else {
1283			USB_BUS_UNLOCK(info->bus);
1284		}
1285	}
1286}
1287
1288/*------------------------------------------------------------------------*
1289 *	usbd_control_transfer_init - factored out code
1290 *
1291 * In USB Device Mode we have to wait for the SETUP packet which
1292 * containst the "struct usb_device_request" structure, before we can
1293 * transfer any data. In USB Host Mode we already have the SETUP
1294 * packet at the moment the USB transfer is started. This leads us to
1295 * having to setup the USB transfer at two different places in
1296 * time. This function just contains factored out control transfer
1297 * initialisation code, so that we don't duplicate the code.
1298 *------------------------------------------------------------------------*/
1299static void
1300usbd_control_transfer_init(struct usb_xfer *xfer)
1301{
1302	struct usb_device_request req;
1303
1304	/* copy out the USB request header */
1305
1306	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1307
1308	/* setup remainder */
1309
1310	xfer->flags_int.control_rem = UGETW(req.wLength);
1311
1312	/* copy direction to endpoint variable */
1313
1314	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1315	xfer->endpointno |=
1316	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1317}
1318
1319/*------------------------------------------------------------------------*
1320 *	usbd_setup_ctrl_transfer
1321 *
1322 * This function handles initialisation of control transfers. Control
1323 * transfers are special in that regard that they can both transmit
1324 * and receive data.
1325 *
1326 * Return values:
1327 *    0: Success
1328 * Else: Failure
1329 *------------------------------------------------------------------------*/
1330static int
1331usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1332{
1333	usb_frlength_t len;
1334
1335	/* Check for control endpoint stall */
1336	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1337		/* the control transfer is no longer active */
1338		xfer->flags_int.control_stall = 1;
1339		xfer->flags_int.control_act = 0;
1340	} else {
1341		/* don't stall control transfer by default */
1342		xfer->flags_int.control_stall = 0;
1343	}
1344
1345	/* Check for invalid number of frames */
1346	if (xfer->nframes > 2) {
1347		/*
1348		 * If you need to split a control transfer, you
1349		 * have to do one part at a time. Only with
1350		 * non-control transfers you can do multiple
1351		 * parts a time.
1352		 */
1353		DPRINTFN(0, "Too many frames: %u\n",
1354		    (unsigned int)xfer->nframes);
1355		goto error;
1356	}
1357
1358	/*
1359         * Check if there is a control
1360         * transfer in progress:
1361         */
1362	if (xfer->flags_int.control_act) {
1363
1364		if (xfer->flags_int.control_hdr) {
1365
1366			/* clear send header flag */
1367
1368			xfer->flags_int.control_hdr = 0;
1369
1370			/* setup control transfer */
1371			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1372				usbd_control_transfer_init(xfer);
1373			}
1374		}
1375		/* get data length */
1376
1377		len = xfer->sumlen;
1378
1379	} else {
1380
1381		/* the size of the SETUP structure is hardcoded ! */
1382
1383		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1384			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1385			    xfer->frlengths[0], sizeof(struct
1386			    usb_device_request));
1387			goto error;
1388		}
1389		/* check USB mode */
1390		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1391
1392			/* check number of frames */
1393			if (xfer->nframes != 1) {
1394				/*
1395			         * We need to receive the setup
1396			         * message first so that we know the
1397			         * data direction!
1398			         */
1399				DPRINTF("Misconfigured transfer\n");
1400				goto error;
1401			}
1402			/*
1403			 * Set a dummy "control_rem" value.  This
1404			 * variable will be overwritten later by a
1405			 * call to "usbd_control_transfer_init()" !
1406			 */
1407			xfer->flags_int.control_rem = 0xFFFF;
1408		} else {
1409
1410			/* setup "endpoint" and "control_rem" */
1411
1412			usbd_control_transfer_init(xfer);
1413		}
1414
1415		/* set transfer-header flag */
1416
1417		xfer->flags_int.control_hdr = 1;
1418
1419		/* get data length */
1420
1421		len = (xfer->sumlen - sizeof(struct usb_device_request));
1422	}
1423
1424	/* check if there is a length mismatch */
1425
1426	if (len > xfer->flags_int.control_rem) {
1427		DPRINTFN(0, "Length (%d) greater than "
1428		    "remaining length (%d)\n", len,
1429		    xfer->flags_int.control_rem);
1430		goto error;
1431	}
1432	/* check if we are doing a short transfer */
1433
1434	if (xfer->flags.force_short_xfer) {
1435		xfer->flags_int.control_rem = 0;
1436	} else {
1437		if ((len != xfer->max_data_length) &&
1438		    (len != xfer->flags_int.control_rem) &&
1439		    (xfer->nframes != 1)) {
1440			DPRINTFN(0, "Short control transfer without "
1441			    "force_short_xfer set\n");
1442			goto error;
1443		}
1444		xfer->flags_int.control_rem -= len;
1445	}
1446
1447	/* the status part is executed when "control_act" is 0 */
1448
1449	if ((xfer->flags_int.control_rem > 0) ||
1450	    (xfer->flags.manual_status)) {
1451		/* don't execute the STATUS stage yet */
1452		xfer->flags_int.control_act = 1;
1453
1454		/* sanity check */
1455		if ((!xfer->flags_int.control_hdr) &&
1456		    (xfer->nframes == 1)) {
1457			/*
1458		         * This is not a valid operation!
1459		         */
1460			DPRINTFN(0, "Invalid parameter "
1461			    "combination\n");
1462			goto error;
1463		}
1464	} else {
1465		/* time to execute the STATUS stage */
1466		xfer->flags_int.control_act = 0;
1467	}
1468	return (0);			/* success */
1469
1470error:
1471	return (1);			/* failure */
1472}
1473
1474/*------------------------------------------------------------------------*
1475 *	usbd_transfer_submit - start USB hardware for the given transfer
1476 *
1477 * This function should only be called from the USB callback.
1478 *------------------------------------------------------------------------*/
1479void
1480usbd_transfer_submit(struct usb_xfer *xfer)
1481{
1482	struct usb_xfer_root *info;
1483	struct usb_bus *bus;
1484	usb_frcount_t x;
1485
1486	info = xfer->xroot;
1487	bus = info->bus;
1488
1489	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1490	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1491	    "read" : "write");
1492
1493#ifdef USB_DEBUG
1494	if (USB_DEBUG_VAR > 0) {
1495		USB_BUS_LOCK(bus);
1496
1497		usb_dump_endpoint(xfer->endpoint);
1498
1499		USB_BUS_UNLOCK(bus);
1500	}
1501#endif
1502
1503	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1504	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1505
1506	/* Only open the USB transfer once! */
1507	if (!xfer->flags_int.open) {
1508		xfer->flags_int.open = 1;
1509
1510		DPRINTF("open\n");
1511
1512		USB_BUS_LOCK(bus);
1513		(xfer->endpoint->methods->open) (xfer);
1514		USB_BUS_UNLOCK(bus);
1515	}
1516	/* set "transferring" flag */
1517	xfer->flags_int.transferring = 1;
1518
1519#if USB_HAVE_POWERD
1520	/* increment power reference */
1521	usbd_transfer_power_ref(xfer, 1);
1522#endif
1523	/*
1524	 * Check if the transfer is waiting on a queue, most
1525	 * frequently the "done_q":
1526	 */
1527	if (xfer->wait_queue) {
1528		USB_BUS_LOCK(bus);
1529		usbd_transfer_dequeue(xfer);
1530		USB_BUS_UNLOCK(bus);
1531	}
1532	/* clear "did_dma_delay" flag */
1533	xfer->flags_int.did_dma_delay = 0;
1534
1535	/* clear "did_close" flag */
1536	xfer->flags_int.did_close = 0;
1537
1538#if USB_HAVE_BUSDMA
1539	/* clear "bdma_setup" flag */
1540	xfer->flags_int.bdma_setup = 0;
1541#endif
1542	/* by default we cannot cancel any USB transfer immediately */
1543	xfer->flags_int.can_cancel_immed = 0;
1544
1545	/* clear lengths and frame counts by default */
1546	xfer->sumlen = 0;
1547	xfer->actlen = 0;
1548	xfer->aframes = 0;
1549
1550	/* clear any previous errors */
1551	xfer->error = 0;
1552
1553	/* Check if the device is still alive */
1554	if (info->udev->state < USB_STATE_POWERED) {
1555		USB_BUS_LOCK(bus);
1556		/*
1557		 * Must return cancelled error code else
1558		 * device drivers can hang.
1559		 */
1560		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1561		USB_BUS_UNLOCK(bus);
1562		return;
1563	}
1564
1565	/* sanity check */
1566	if (xfer->nframes == 0) {
1567		if (xfer->flags.stall_pipe) {
1568			/*
1569			 * Special case - want to stall without transferring
1570			 * any data:
1571			 */
1572			DPRINTF("xfer=%p nframes=0: stall "
1573			    "or clear stall!\n", xfer);
1574			USB_BUS_LOCK(bus);
1575			xfer->flags_int.can_cancel_immed = 1;
1576			/* start the transfer */
1577			usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer);
1578			USB_BUS_UNLOCK(bus);
1579			return;
1580		}
1581		USB_BUS_LOCK(bus);
1582		usbd_transfer_done(xfer, USB_ERR_INVAL);
1583		USB_BUS_UNLOCK(bus);
1584		return;
1585	}
1586	/* compute some variables */
1587
1588	for (x = 0; x != xfer->nframes; x++) {
1589		/* make a copy of the frlenghts[] */
1590		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1591		/* compute total transfer length */
1592		xfer->sumlen += xfer->frlengths[x];
1593		if (xfer->sumlen < xfer->frlengths[x]) {
1594			/* length wrapped around */
1595			USB_BUS_LOCK(bus);
1596			usbd_transfer_done(xfer, USB_ERR_INVAL);
1597			USB_BUS_UNLOCK(bus);
1598			return;
1599		}
1600	}
1601
1602	/* clear some internal flags */
1603
1604	xfer->flags_int.short_xfer_ok = 0;
1605	xfer->flags_int.short_frames_ok = 0;
1606
1607	/* check if this is a control transfer */
1608
1609	if (xfer->flags_int.control_xfr) {
1610
1611		if (usbd_setup_ctrl_transfer(xfer)) {
1612			USB_BUS_LOCK(bus);
1613			usbd_transfer_done(xfer, USB_ERR_STALLED);
1614			USB_BUS_UNLOCK(bus);
1615			return;
1616		}
1617	}
1618	/*
1619	 * Setup filtered version of some transfer flags,
1620	 * in case of data read direction
1621	 */
1622	if (USB_GET_DATA_ISREAD(xfer)) {
1623
1624		if (xfer->flags.short_frames_ok) {
1625			xfer->flags_int.short_xfer_ok = 1;
1626			xfer->flags_int.short_frames_ok = 1;
1627		} else if (xfer->flags.short_xfer_ok) {
1628			xfer->flags_int.short_xfer_ok = 1;
1629
1630			/* check for control transfer */
1631			if (xfer->flags_int.control_xfr) {
1632				/*
1633				 * 1) Control transfers do not support
1634				 * reception of multiple short USB
1635				 * frames in host mode and device side
1636				 * mode, with exception of:
1637				 *
1638				 * 2) Due to sometimes buggy device
1639				 * side firmware we need to do a
1640				 * STATUS stage in case of short
1641				 * control transfers in USB host mode.
1642				 * The STATUS stage then becomes the
1643				 * "alt_next" to the DATA stage.
1644				 */
1645				xfer->flags_int.short_frames_ok = 1;
1646			}
1647		}
1648	}
1649	/*
1650	 * Check if BUS-DMA support is enabled and try to load virtual
1651	 * buffers into DMA, if any:
1652	 */
1653#if USB_HAVE_BUSDMA
1654	if (xfer->flags_int.bdma_enable) {
1655		/* insert the USB transfer last in the BUS-DMA queue */
1656		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1657		return;
1658	}
1659#endif
1660	/*
1661	 * Enter the USB transfer into the Host Controller or
1662	 * Device Controller schedule:
1663	 */
1664	usbd_pipe_enter(xfer);
1665}
1666
1667/*------------------------------------------------------------------------*
1668 *	usbd_pipe_enter - factored out code
1669 *------------------------------------------------------------------------*/
1670void
1671usbd_pipe_enter(struct usb_xfer *xfer)
1672{
1673	struct usb_endpoint *ep;
1674
1675	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1676
1677	USB_BUS_LOCK(xfer->xroot->bus);
1678
1679	ep = xfer->endpoint;
1680
1681	DPRINTF("enter\n");
1682
1683	/* enter the transfer */
1684	(ep->methods->enter) (xfer);
1685
1686	xfer->flags_int.can_cancel_immed = 1;
1687
1688	/* check for transfer error */
1689	if (xfer->error) {
1690		/* some error has happened */
1691		usbd_transfer_done(xfer, 0);
1692		USB_BUS_UNLOCK(xfer->xroot->bus);
1693		return;
1694	}
1695
1696	/* start the transfer */
1697	usb_command_wrapper(&ep->endpoint_q, xfer);
1698	USB_BUS_UNLOCK(xfer->xroot->bus);
1699}
1700
1701/*------------------------------------------------------------------------*
1702 *	usbd_transfer_start - start an USB transfer
1703 *
1704 * NOTE: Calling this function more than one time will only
1705 *       result in a single transfer start, until the USB transfer
1706 *       completes.
1707 *------------------------------------------------------------------------*/
1708void
1709usbd_transfer_start(struct usb_xfer *xfer)
1710{
1711	if (xfer == NULL) {
1712		/* transfer is gone */
1713		return;
1714	}
1715	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1716
1717	/* mark the USB transfer started */
1718
1719	if (!xfer->flags_int.started) {
1720		/* lock the BUS lock to avoid races updating flags_int */
1721		USB_BUS_LOCK(xfer->xroot->bus);
1722		xfer->flags_int.started = 1;
1723		USB_BUS_UNLOCK(xfer->xroot->bus);
1724	}
1725	/* check if the USB transfer callback is already transferring */
1726
1727	if (xfer->flags_int.transferring) {
1728		return;
1729	}
1730	USB_BUS_LOCK(xfer->xroot->bus);
1731	/* call the USB transfer callback */
1732	usbd_callback_ss_done_defer(xfer);
1733	USB_BUS_UNLOCK(xfer->xroot->bus);
1734}
1735
1736/*------------------------------------------------------------------------*
1737 *	usbd_transfer_stop - stop an USB transfer
1738 *
1739 * NOTE: Calling this function more than one time will only
1740 *       result in a single transfer stop.
1741 * NOTE: When this function returns it is not safe to free nor
1742 *       reuse any DMA buffers. See "usbd_transfer_drain()".
1743 *------------------------------------------------------------------------*/
1744void
1745usbd_transfer_stop(struct usb_xfer *xfer)
1746{
1747	struct usb_endpoint *ep;
1748
1749	if (xfer == NULL) {
1750		/* transfer is gone */
1751		return;
1752	}
1753	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1754
1755	/* check if the USB transfer was ever opened */
1756
1757	if (!xfer->flags_int.open) {
1758		if (xfer->flags_int.started) {
1759			/* nothing to do except clearing the "started" flag */
1760			/* lock the BUS lock to avoid races updating flags_int */
1761			USB_BUS_LOCK(xfer->xroot->bus);
1762			xfer->flags_int.started = 0;
1763			USB_BUS_UNLOCK(xfer->xroot->bus);
1764		}
1765		return;
1766	}
1767	/* try to stop the current USB transfer */
1768
1769	USB_BUS_LOCK(xfer->xroot->bus);
1770	/* override any previous error */
1771	xfer->error = USB_ERR_CANCELLED;
1772
1773	/*
1774	 * Clear "open" and "started" when both private and USB lock
1775	 * is locked so that we don't get a race updating "flags_int"
1776	 */
1777	xfer->flags_int.open = 0;
1778	xfer->flags_int.started = 0;
1779
1780	/*
1781	 * Check if we can cancel the USB transfer immediately.
1782	 */
1783	if (xfer->flags_int.transferring) {
1784		if (xfer->flags_int.can_cancel_immed &&
1785		    (!xfer->flags_int.did_close)) {
1786			DPRINTF("close\n");
1787			/*
1788			 * The following will lead to an USB_ERR_CANCELLED
1789			 * error code being passed to the USB callback.
1790			 */
1791			(xfer->endpoint->methods->close) (xfer);
1792			/* only close once */
1793			xfer->flags_int.did_close = 1;
1794		} else {
1795			/* need to wait for the next done callback */
1796		}
1797	} else {
1798		DPRINTF("close\n");
1799
1800		/* close here and now */
1801		(xfer->endpoint->methods->close) (xfer);
1802
1803		/*
1804		 * Any additional DMA delay is done by
1805		 * "usbd_transfer_unsetup()".
1806		 */
1807
1808		/*
1809		 * Special case. Check if we need to restart a blocked
1810		 * endpoint.
1811		 */
1812		ep = xfer->endpoint;
1813
1814		/*
1815		 * If the current USB transfer is completing we need
1816		 * to start the next one:
1817		 */
1818		if (ep->endpoint_q.curr == xfer) {
1819			usb_command_wrapper(&ep->endpoint_q, NULL);
1820		}
1821	}
1822
1823	USB_BUS_UNLOCK(xfer->xroot->bus);
1824}
1825
1826/*------------------------------------------------------------------------*
1827 *	usbd_transfer_pending
1828 *
1829 * This function will check if an USB transfer is pending which is a
1830 * little bit complicated!
1831 * Return values:
1832 * 0: Not pending
1833 * 1: Pending: The USB transfer will receive a callback in the future.
1834 *------------------------------------------------------------------------*/
1835uint8_t
1836usbd_transfer_pending(struct usb_xfer *xfer)
1837{
1838	struct usb_xfer_root *info;
1839	struct usb_xfer_queue *pq;
1840
1841	if (xfer == NULL) {
1842		/* transfer is gone */
1843		return (0);
1844	}
1845	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1846
1847	if (xfer->flags_int.transferring) {
1848		/* trivial case */
1849		return (1);
1850	}
1851	USB_BUS_LOCK(xfer->xroot->bus);
1852	if (xfer->wait_queue) {
1853		/* we are waiting on a queue somewhere */
1854		USB_BUS_UNLOCK(xfer->xroot->bus);
1855		return (1);
1856	}
1857	info = xfer->xroot;
1858	pq = &info->done_q;
1859
1860	if (pq->curr == xfer) {
1861		/* we are currently scheduled for callback */
1862		USB_BUS_UNLOCK(xfer->xroot->bus);
1863		return (1);
1864	}
1865	/* we are not pending */
1866	USB_BUS_UNLOCK(xfer->xroot->bus);
1867	return (0);
1868}
1869
1870/*------------------------------------------------------------------------*
1871 *	usbd_transfer_drain
1872 *
1873 * This function will stop the USB transfer and wait for any
1874 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1875 * are loaded into DMA can safely be freed or reused after that this
1876 * function has returned.
1877 *------------------------------------------------------------------------*/
1878void
1879usbd_transfer_drain(struct usb_xfer *xfer)
1880{
1881	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1882	    "usbd_transfer_drain can sleep!");
1883
1884	if (xfer == NULL) {
1885		/* transfer is gone */
1886		return;
1887	}
1888	if (xfer->xroot->xfer_mtx != &Giant) {
1889		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1890	}
1891	USB_XFER_LOCK(xfer);
1892
1893	usbd_transfer_stop(xfer);
1894
1895	while (usbd_transfer_pending(xfer) ||
1896	    xfer->flags_int.doing_callback) {
1897
1898		/*
1899		 * It is allowed that the callback can drop its
1900		 * transfer mutex. In that case checking only
1901		 * "usbd_transfer_pending()" is not enough to tell if
1902		 * the USB transfer is fully drained. We also need to
1903		 * check the internal "doing_callback" flag.
1904		 */
1905		xfer->flags_int.draining = 1;
1906
1907		/*
1908		 * Wait until the current outstanding USB
1909		 * transfer is complete !
1910		 */
1911		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
1912	}
1913	USB_XFER_UNLOCK(xfer);
1914}
1915
1916struct usb_page_cache *
1917usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1918{
1919	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1920
1921	return (&xfer->frbuffers[frindex]);
1922}
1923
1924/*------------------------------------------------------------------------*
1925 *	usbd_xfer_get_fps_shift
1926 *
1927 * The following function is only useful for isochronous transfers. It
1928 * returns how many times the frame execution rate has been shifted
1929 * down.
1930 *
1931 * Return value:
1932 * Success: 0..3
1933 * Failure: 0
1934 *------------------------------------------------------------------------*/
1935uint8_t
1936usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
1937{
1938	return (xfer->fps_shift);
1939}
1940
1941usb_frlength_t
1942usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
1943{
1944	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1945
1946	return (xfer->frlengths[frindex]);
1947}
1948
1949/*------------------------------------------------------------------------*
1950 *	usbd_xfer_set_frame_data
1951 *
1952 * This function sets the pointer of the buffer that should
1953 * loaded directly into DMA for the given USB frame. Passing "ptr"
1954 * equal to NULL while the corresponding "frlength" is greater
1955 * than zero gives undefined results!
1956 *------------------------------------------------------------------------*/
1957void
1958usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1959    void *ptr, usb_frlength_t len)
1960{
1961	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1962
1963	/* set virtual address to load and length */
1964	xfer->frbuffers[frindex].buffer = ptr;
1965	usbd_xfer_set_frame_len(xfer, frindex, len);
1966}
1967
1968void
1969usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1970    void **ptr, int *len)
1971{
1972	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1973
1974	if (ptr != NULL)
1975		*ptr = xfer->frbuffers[frindex].buffer;
1976	if (len != NULL)
1977		*len = xfer->frlengths[frindex];
1978}
1979
1980/*------------------------------------------------------------------------*
1981 *	usbd_xfer_old_frame_length
1982 *
1983 * This function returns the framelength of the given frame at the
1984 * time the transfer was submitted. This function can be used to
1985 * compute the starting data pointer of the next isochronous frame
1986 * when an isochronous transfer has completed.
1987 *------------------------------------------------------------------------*/
1988usb_frlength_t
1989usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
1990{
1991	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1992
1993	return (xfer->frlengths[frindex + xfer->max_frame_count]);
1994}
1995
1996void
1997usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
1998    int *nframes)
1999{
2000	if (actlen != NULL)
2001		*actlen = xfer->actlen;
2002	if (sumlen != NULL)
2003		*sumlen = xfer->sumlen;
2004	if (aframes != NULL)
2005		*aframes = xfer->aframes;
2006	if (nframes != NULL)
2007		*nframes = xfer->nframes;
2008}
2009
2010/*------------------------------------------------------------------------*
2011 *	usbd_xfer_set_frame_offset
2012 *
2013 * This function sets the frame data buffer offset relative to the beginning
2014 * of the USB DMA buffer allocated for this USB transfer.
2015 *------------------------------------------------------------------------*/
2016void
2017usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2018    usb_frcount_t frindex)
2019{
2020	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2021	    "when the USB buffer is external\n"));
2022	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2023
2024	/* set virtual address to load */
2025	xfer->frbuffers[frindex].buffer =
2026	    USB_ADD_BYTES(xfer->local_buffer, offset);
2027}
2028
2029void
2030usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2031{
2032	xfer->interval = i;
2033}
2034
2035void
2036usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2037{
2038	xfer->timeout = t;
2039}
2040
2041void
2042usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2043{
2044	xfer->nframes = n;
2045}
2046
2047usb_frcount_t
2048usbd_xfer_max_frames(struct usb_xfer *xfer)
2049{
2050	return (xfer->max_frame_count);
2051}
2052
2053usb_frlength_t
2054usbd_xfer_max_len(struct usb_xfer *xfer)
2055{
2056	return (xfer->max_data_length);
2057}
2058
2059usb_frlength_t
2060usbd_xfer_max_framelen(struct usb_xfer *xfer)
2061{
2062	return (xfer->max_frame_size);
2063}
2064
2065void
2066usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2067    usb_frlength_t len)
2068{
2069	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2070
2071	xfer->frlengths[frindex] = len;
2072}
2073
2074/*------------------------------------------------------------------------*
2075 *	usb_callback_proc - factored out code
2076 *
2077 * This function performs USB callbacks.
2078 *------------------------------------------------------------------------*/
2079static void
2080usb_callback_proc(struct usb_proc_msg *_pm)
2081{
2082	struct usb_done_msg *pm = (void *)_pm;
2083	struct usb_xfer_root *info = pm->xroot;
2084
2085	/* Change locking order */
2086	USB_BUS_UNLOCK(info->bus);
2087
2088	/*
2089	 * We exploit the fact that the mutex is the same for all
2090	 * callbacks that will be called from this thread:
2091	 */
2092	mtx_lock(info->xfer_mtx);
2093	USB_BUS_LOCK(info->bus);
2094
2095	/* Continue where we lost track */
2096	usb_command_wrapper(&info->done_q,
2097	    info->done_q.curr);
2098
2099	mtx_unlock(info->xfer_mtx);
2100}
2101
2102/*------------------------------------------------------------------------*
2103 *	usbd_callback_ss_done_defer
2104 *
2105 * This function will defer the start, stop and done callback to the
2106 * correct thread.
2107 *------------------------------------------------------------------------*/
2108static void
2109usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2110{
2111	struct usb_xfer_root *info = xfer->xroot;
2112	struct usb_xfer_queue *pq = &info->done_q;
2113
2114	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2115
2116	if (pq->curr != xfer) {
2117		usbd_transfer_enqueue(pq, xfer);
2118	}
2119	if (!pq->recurse_1) {
2120
2121		/*
2122	         * We have to postpone the callback due to the fact we
2123	         * will have a Lock Order Reversal, LOR, if we try to
2124	         * proceed !
2125	         */
2126		if (usb_proc_msignal(info->done_p,
2127		    &info->done_m[0], &info->done_m[1])) {
2128			/* ignore */
2129		}
2130	} else {
2131		/* clear second recurse flag */
2132		pq->recurse_2 = 0;
2133	}
2134	return;
2135
2136}
2137
2138/*------------------------------------------------------------------------*
2139 *	usbd_callback_wrapper
2140 *
2141 * This is a wrapper for USB callbacks. This wrapper does some
2142 * auto-magic things like figuring out if we can call the callback
2143 * directly from the current context or if we need to wakeup the
2144 * interrupt process.
2145 *------------------------------------------------------------------------*/
2146static void
2147usbd_callback_wrapper(struct usb_xfer_queue *pq)
2148{
2149	struct usb_xfer *xfer = pq->curr;
2150	struct usb_xfer_root *info = xfer->xroot;
2151
2152	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2153	if (!mtx_owned(info->xfer_mtx)) {
2154		/*
2155	       	 * Cases that end up here:
2156		 *
2157		 * 5) HW interrupt done callback or other source.
2158		 */
2159		DPRINTFN(3, "case 5\n");
2160
2161		/*
2162	         * We have to postpone the callback due to the fact we
2163	         * will have a Lock Order Reversal, LOR, if we try to
2164	         * proceed !
2165	         */
2166		if (usb_proc_msignal(info->done_p,
2167		    &info->done_m[0], &info->done_m[1])) {
2168			/* ignore */
2169		}
2170		return;
2171	}
2172	/*
2173	 * Cases that end up here:
2174	 *
2175	 * 1) We are starting a transfer
2176	 * 2) We are prematurely calling back a transfer
2177	 * 3) We are stopping a transfer
2178	 * 4) We are doing an ordinary callback
2179	 */
2180	DPRINTFN(3, "case 1-4\n");
2181	/* get next USB transfer in the queue */
2182	info->done_q.curr = NULL;
2183
2184	/* set flag in case of drain */
2185	xfer->flags_int.doing_callback = 1;
2186
2187	USB_BUS_UNLOCK(info->bus);
2188	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2189
2190	/* set correct USB state for callback */
2191	if (!xfer->flags_int.transferring) {
2192		xfer->usb_state = USB_ST_SETUP;
2193		if (!xfer->flags_int.started) {
2194			/* we got stopped before we even got started */
2195			USB_BUS_LOCK(info->bus);
2196			goto done;
2197		}
2198	} else {
2199
2200		if (usbd_callback_wrapper_sub(xfer)) {
2201			/* the callback has been deferred */
2202			USB_BUS_LOCK(info->bus);
2203			goto done;
2204		}
2205#if USB_HAVE_POWERD
2206		/* decrement power reference */
2207		usbd_transfer_power_ref(xfer, -1);
2208#endif
2209		xfer->flags_int.transferring = 0;
2210
2211		if (xfer->error) {
2212			xfer->usb_state = USB_ST_ERROR;
2213		} else {
2214			/* set transferred state */
2215			xfer->usb_state = USB_ST_TRANSFERRED;
2216#if USB_HAVE_BUSDMA
2217			/* sync DMA memory, if any */
2218			if (xfer->flags_int.bdma_enable &&
2219			    (!xfer->flags_int.bdma_no_post_sync)) {
2220				usb_bdma_post_sync(xfer);
2221			}
2222#endif
2223		}
2224	}
2225
2226#if USB_HAVE_PF
2227	if (xfer->usb_state != USB_ST_SETUP)
2228		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2229#endif
2230	/* call processing routine */
2231	(xfer->callback) (xfer, xfer->error);
2232
2233	/* pickup the USB mutex again */
2234	USB_BUS_LOCK(info->bus);
2235
2236	/*
2237	 * Check if we got started after that we got cancelled, but
2238	 * before we managed to do the callback.
2239	 */
2240	if ((!xfer->flags_int.open) &&
2241	    (xfer->flags_int.started) &&
2242	    (xfer->usb_state == USB_ST_ERROR)) {
2243		/* clear flag in case of drain */
2244		xfer->flags_int.doing_callback = 0;
2245		/* try to loop, but not recursivly */
2246		usb_command_wrapper(&info->done_q, xfer);
2247		return;
2248	}
2249
2250done:
2251	/* clear flag in case of drain */
2252	xfer->flags_int.doing_callback = 0;
2253
2254	/*
2255	 * Check if we are draining.
2256	 */
2257	if (xfer->flags_int.draining &&
2258	    (!xfer->flags_int.transferring)) {
2259		/* "usbd_transfer_drain()" is waiting for end of transfer */
2260		xfer->flags_int.draining = 0;
2261		cv_broadcast(&info->cv_drain);
2262	}
2263
2264	/* do the next callback, if any */
2265	usb_command_wrapper(&info->done_q,
2266	    info->done_q.curr);
2267}
2268
2269/*------------------------------------------------------------------------*
2270 *	usb_dma_delay_done_cb
2271 *
2272 * This function is called when the DMA delay has been exectuded, and
2273 * will make sure that the callback is called to complete the USB
2274 * transfer. This code path is ususally only used when there is an USB
2275 * error like USB_ERR_CANCELLED.
2276 *------------------------------------------------------------------------*/
2277void
2278usb_dma_delay_done_cb(struct usb_xfer *xfer)
2279{
2280	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2281
2282	DPRINTFN(3, "Completed %p\n", xfer);
2283
2284	/* queue callback for execution, again */
2285	usbd_transfer_done(xfer, 0);
2286}
2287
2288/*------------------------------------------------------------------------*
2289 *	usbd_transfer_dequeue
2290 *
2291 *  - This function is used to remove an USB transfer from a USB
2292 *  transfer queue.
2293 *
2294 *  - This function can be called multiple times in a row.
2295 *------------------------------------------------------------------------*/
2296void
2297usbd_transfer_dequeue(struct usb_xfer *xfer)
2298{
2299	struct usb_xfer_queue *pq;
2300
2301	pq = xfer->wait_queue;
2302	if (pq) {
2303		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2304		xfer->wait_queue = NULL;
2305	}
2306}
2307
2308/*------------------------------------------------------------------------*
2309 *	usbd_transfer_enqueue
2310 *
2311 *  - This function is used to insert an USB transfer into a USB *
2312 *  transfer queue.
2313 *
2314 *  - This function can be called multiple times in a row.
2315 *------------------------------------------------------------------------*/
2316void
2317usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2318{
2319	/*
2320	 * Insert the USB transfer into the queue, if it is not
2321	 * already on a USB transfer queue:
2322	 */
2323	if (xfer->wait_queue == NULL) {
2324		xfer->wait_queue = pq;
2325		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2326	}
2327}
2328
2329/*------------------------------------------------------------------------*
2330 *	usbd_transfer_done
2331 *
2332 *  - This function is used to remove an USB transfer from the busdma,
2333 *  pipe or interrupt queue.
2334 *
2335 *  - This function is used to queue the USB transfer on the done
2336 *  queue.
2337 *
2338 *  - This function is used to stop any USB transfer timeouts.
2339 *------------------------------------------------------------------------*/
2340void
2341usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2342{
2343	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2344
2345	DPRINTF("err=%s\n", usbd_errstr(error));
2346
2347	/*
2348	 * If we are not transferring then just return.
2349	 * This can happen during transfer cancel.
2350	 */
2351	if (!xfer->flags_int.transferring) {
2352		DPRINTF("not transferring\n");
2353		/* end of control transfer, if any */
2354		xfer->flags_int.control_act = 0;
2355		return;
2356	}
2357	/* only set transfer error if not already set */
2358	if (!xfer->error) {
2359		xfer->error = error;
2360	}
2361	/* stop any callouts */
2362	usb_callout_stop(&xfer->timeout_handle);
2363
2364	/*
2365	 * If we are waiting on a queue, just remove the USB transfer
2366	 * from the queue, if any. We should have the required locks
2367	 * locked to do the remove when this function is called.
2368	 */
2369	usbd_transfer_dequeue(xfer);
2370
2371#if USB_HAVE_BUSDMA
2372	if (mtx_owned(xfer->xroot->xfer_mtx)) {
2373		struct usb_xfer_queue *pq;
2374
2375		/*
2376		 * If the private USB lock is not locked, then we assume
2377		 * that the BUS-DMA load stage has been passed:
2378		 */
2379		pq = &xfer->xroot->dma_q;
2380
2381		if (pq->curr == xfer) {
2382			/* start the next BUS-DMA load, if any */
2383			usb_command_wrapper(pq, NULL);
2384		}
2385	}
2386#endif
2387	/* keep some statistics */
2388	if (xfer->error) {
2389		xfer->xroot->bus->stats_err.uds_requests
2390		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2391	} else {
2392		xfer->xroot->bus->stats_ok.uds_requests
2393		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2394	}
2395
2396	/* call the USB transfer callback */
2397	usbd_callback_ss_done_defer(xfer);
2398}
2399
2400/*------------------------------------------------------------------------*
2401 *	usbd_transfer_start_cb
2402 *
2403 * This function is called to start the USB transfer when
2404 * "xfer->interval" is greater than zero, and and the endpoint type is
2405 * BULK or CONTROL.
2406 *------------------------------------------------------------------------*/
2407static void
2408usbd_transfer_start_cb(void *arg)
2409{
2410	struct usb_xfer *xfer = arg;
2411	struct usb_endpoint *ep = xfer->endpoint;
2412
2413	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2414
2415	DPRINTF("start\n");
2416
2417#if USB_HAVE_PF
2418	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2419#endif
2420	/* start USB transfer, if no error */
2421	if (xfer->error == 0)
2422		(ep->methods->start) (xfer);
2423
2424	xfer->flags_int.can_cancel_immed = 1;
2425
2426	/* check for error */
2427	if (xfer->error) {
2428		/* some error has happened */
2429		usbd_transfer_done(xfer, 0);
2430	}
2431}
2432
2433/*------------------------------------------------------------------------*
2434 *	usbd_xfer_set_stall
2435 *
2436 * This function is used to set the stall flag outside the
2437 * callback. This function is NULL safe.
2438 *------------------------------------------------------------------------*/
2439void
2440usbd_xfer_set_stall(struct usb_xfer *xfer)
2441{
2442	if (xfer == NULL) {
2443		/* tearing down */
2444		return;
2445	}
2446	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2447
2448	/* avoid any races by locking the USB mutex */
2449	USB_BUS_LOCK(xfer->xroot->bus);
2450	xfer->flags.stall_pipe = 1;
2451	USB_BUS_UNLOCK(xfer->xroot->bus);
2452}
2453
2454int
2455usbd_xfer_is_stalled(struct usb_xfer *xfer)
2456{
2457	return (xfer->endpoint->is_stalled);
2458}
2459
2460/*------------------------------------------------------------------------*
2461 *	usbd_transfer_clear_stall
2462 *
2463 * This function is used to clear the stall flag outside the
2464 * callback. This function is NULL safe.
2465 *------------------------------------------------------------------------*/
2466void
2467usbd_transfer_clear_stall(struct usb_xfer *xfer)
2468{
2469	if (xfer == NULL) {
2470		/* tearing down */
2471		return;
2472	}
2473	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2474
2475	/* avoid any races by locking the USB mutex */
2476	USB_BUS_LOCK(xfer->xroot->bus);
2477
2478	xfer->flags.stall_pipe = 0;
2479
2480	USB_BUS_UNLOCK(xfer->xroot->bus);
2481}
2482
2483/*------------------------------------------------------------------------*
2484 *	usbd_pipe_start
2485 *
2486 * This function is used to add an USB transfer to the pipe transfer list.
2487 *------------------------------------------------------------------------*/
2488void
2489usbd_pipe_start(struct usb_xfer_queue *pq)
2490{
2491	struct usb_endpoint *ep;
2492	struct usb_xfer *xfer;
2493	uint8_t type;
2494
2495	xfer = pq->curr;
2496	ep = xfer->endpoint;
2497
2498	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2499
2500	/*
2501	 * If the endpoint is already stalled we do nothing !
2502	 */
2503	if (ep->is_stalled) {
2504		return;
2505	}
2506	/*
2507	 * Check if we are supposed to stall the endpoint:
2508	 */
2509	if (xfer->flags.stall_pipe) {
2510		struct usb_device *udev;
2511		struct usb_xfer_root *info;
2512
2513		/* clear stall command */
2514		xfer->flags.stall_pipe = 0;
2515
2516		/* get pointer to USB device */
2517		info = xfer->xroot;
2518		udev = info->udev;
2519
2520		/*
2521		 * Only stall BULK and INTERRUPT endpoints.
2522		 */
2523		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2524		if ((type == UE_BULK) ||
2525		    (type == UE_INTERRUPT)) {
2526			uint8_t did_stall;
2527
2528			did_stall = 1;
2529
2530			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2531				(udev->bus->methods->set_stall) (
2532				    udev, NULL, ep, &did_stall);
2533			} else if (udev->ctrl_xfer[1]) {
2534				info = udev->ctrl_xfer[1]->xroot;
2535				usb_proc_msignal(
2536				    &info->bus->non_giant_callback_proc,
2537				    &udev->cs_msg[0], &udev->cs_msg[1]);
2538			} else {
2539				/* should not happen */
2540				DPRINTFN(0, "No stall handler\n");
2541			}
2542			/*
2543			 * Check if we should stall. Some USB hardware
2544			 * handles set- and clear-stall in hardware.
2545			 */
2546			if (did_stall) {
2547				/*
2548				 * The transfer will be continued when
2549				 * the clear-stall control endpoint
2550				 * message is received.
2551				 */
2552				ep->is_stalled = 1;
2553				return;
2554			}
2555		} else if (type == UE_ISOCHRONOUS) {
2556
2557			/*
2558			 * Make sure any FIFO overflow or other FIFO
2559			 * error conditions go away by resetting the
2560			 * endpoint FIFO through the clear stall
2561			 * method.
2562			 */
2563			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2564				(udev->bus->methods->clear_stall) (udev, ep);
2565			}
2566		}
2567	}
2568	/* Set or clear stall complete - special case */
2569	if (xfer->nframes == 0) {
2570		/* we are complete */
2571		xfer->aframes = 0;
2572		usbd_transfer_done(xfer, 0);
2573		return;
2574	}
2575	/*
2576	 * Handled cases:
2577	 *
2578	 * 1) Start the first transfer queued.
2579	 *
2580	 * 2) Re-start the current USB transfer.
2581	 */
2582	/*
2583	 * Check if there should be any
2584	 * pre transfer start delay:
2585	 */
2586	if (xfer->interval > 0) {
2587		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2588		if ((type == UE_BULK) ||
2589		    (type == UE_CONTROL)) {
2590			usbd_transfer_timeout_ms(xfer,
2591			    &usbd_transfer_start_cb,
2592			    xfer->interval);
2593			return;
2594		}
2595	}
2596	DPRINTF("start\n");
2597
2598#if USB_HAVE_PF
2599	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2600#endif
2601	/* start USB transfer, if no error */
2602	if (xfer->error == 0)
2603		(ep->methods->start) (xfer);
2604
2605	xfer->flags_int.can_cancel_immed = 1;
2606
2607	/* check for error */
2608	if (xfer->error) {
2609		/* some error has happened */
2610		usbd_transfer_done(xfer, 0);
2611	}
2612}
2613
2614/*------------------------------------------------------------------------*
2615 *	usbd_transfer_timeout_ms
2616 *
2617 * This function is used to setup a timeout on the given USB
2618 * transfer. If the timeout has been deferred the callback given by
2619 * "cb" will get called after "ms" milliseconds.
2620 *------------------------------------------------------------------------*/
2621void
2622usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2623    void (*cb) (void *arg), usb_timeout_t ms)
2624{
2625	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2626
2627	/* defer delay */
2628	usb_callout_reset(&xfer->timeout_handle,
2629	    USB_MS_TO_TICKS(ms), cb, xfer);
2630}
2631
2632/*------------------------------------------------------------------------*
2633 *	usbd_callback_wrapper_sub
2634 *
2635 *  - This function will update variables in an USB transfer after
2636 *  that the USB transfer is complete.
2637 *
2638 *  - This function is used to start the next USB transfer on the
2639 *  ep transfer queue, if any.
2640 *
2641 * NOTE: In some special cases the USB transfer will not be removed from
2642 * the pipe queue, but remain first. To enforce USB transfer removal call
2643 * this function passing the error code "USB_ERR_CANCELLED".
2644 *
2645 * Return values:
2646 * 0: Success.
2647 * Else: The callback has been deferred.
2648 *------------------------------------------------------------------------*/
2649static uint8_t
2650usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2651{
2652	struct usb_endpoint *ep;
2653	struct usb_bus *bus;
2654	usb_frcount_t x;
2655
2656	bus = xfer->xroot->bus;
2657
2658	if ((!xfer->flags_int.open) &&
2659	    (!xfer->flags_int.did_close)) {
2660		DPRINTF("close\n");
2661		USB_BUS_LOCK(bus);
2662		(xfer->endpoint->methods->close) (xfer);
2663		USB_BUS_UNLOCK(bus);
2664		/* only close once */
2665		xfer->flags_int.did_close = 1;
2666		return (1);		/* wait for new callback */
2667	}
2668	/*
2669	 * If we have a non-hardware induced error we
2670	 * need to do the DMA delay!
2671	 */
2672	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2673	    (xfer->error == USB_ERR_CANCELLED ||
2674	    xfer->error == USB_ERR_TIMEOUT ||
2675	    bus->methods->start_dma_delay != NULL)) {
2676
2677		usb_timeout_t temp;
2678
2679		/* only delay once */
2680		xfer->flags_int.did_dma_delay = 1;
2681
2682		/* we can not cancel this delay */
2683		xfer->flags_int.can_cancel_immed = 0;
2684
2685		temp = usbd_get_dma_delay(xfer->xroot->udev);
2686
2687		DPRINTFN(3, "DMA delay, %u ms, "
2688		    "on %p\n", temp, xfer);
2689
2690		if (temp != 0) {
2691			USB_BUS_LOCK(bus);
2692			/*
2693			 * Some hardware solutions have dedicated
2694			 * events when it is safe to free DMA'ed
2695			 * memory. For the other hardware platforms we
2696			 * use a static delay.
2697			 */
2698			if (bus->methods->start_dma_delay != NULL) {
2699				(bus->methods->start_dma_delay) (xfer);
2700			} else {
2701				usbd_transfer_timeout_ms(xfer,
2702				    (void *)&usb_dma_delay_done_cb, temp);
2703			}
2704			USB_BUS_UNLOCK(bus);
2705			return (1);	/* wait for new callback */
2706		}
2707	}
2708	/* check actual number of frames */
2709	if (xfer->aframes > xfer->nframes) {
2710		if (xfer->error == 0) {
2711			panic("%s: actual number of frames, %d, is "
2712			    "greater than initial number of frames, %d\n",
2713			    __FUNCTION__, xfer->aframes, xfer->nframes);
2714		} else {
2715			/* just set some valid value */
2716			xfer->aframes = xfer->nframes;
2717		}
2718	}
2719	/* compute actual length */
2720	xfer->actlen = 0;
2721
2722	for (x = 0; x != xfer->aframes; x++) {
2723		xfer->actlen += xfer->frlengths[x];
2724	}
2725
2726	/*
2727	 * Frames that were not transferred get zero actual length in
2728	 * case the USB device driver does not check the actual number
2729	 * of frames transferred, "xfer->aframes":
2730	 */
2731	for (; x < xfer->nframes; x++) {
2732		usbd_xfer_set_frame_len(xfer, x, 0);
2733	}
2734
2735	/* check actual length */
2736	if (xfer->actlen > xfer->sumlen) {
2737		if (xfer->error == 0) {
2738			panic("%s: actual length, %d, is greater than "
2739			    "initial length, %d\n",
2740			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2741		} else {
2742			/* just set some valid value */
2743			xfer->actlen = xfer->sumlen;
2744		}
2745	}
2746	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2747	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2748	    xfer->aframes, xfer->nframes);
2749
2750	if (xfer->error) {
2751		/* end of control transfer, if any */
2752		xfer->flags_int.control_act = 0;
2753
2754		/* check if we should block the execution queue */
2755		if ((xfer->error != USB_ERR_CANCELLED) &&
2756		    (xfer->flags.pipe_bof)) {
2757			DPRINTFN(2, "xfer=%p: Block On Failure "
2758			    "on endpoint=%p\n", xfer, xfer->endpoint);
2759			goto done;
2760		}
2761	} else {
2762		/* check for short transfers */
2763		if (xfer->actlen < xfer->sumlen) {
2764
2765			/* end of control transfer, if any */
2766			xfer->flags_int.control_act = 0;
2767
2768			if (!xfer->flags_int.short_xfer_ok) {
2769				xfer->error = USB_ERR_SHORT_XFER;
2770				if (xfer->flags.pipe_bof) {
2771					DPRINTFN(2, "xfer=%p: Block On Failure on "
2772					    "Short Transfer on endpoint %p.\n",
2773					    xfer, xfer->endpoint);
2774					goto done;
2775				}
2776			}
2777		} else {
2778			/*
2779			 * Check if we are in the middle of a
2780			 * control transfer:
2781			 */
2782			if (xfer->flags_int.control_act) {
2783				DPRINTFN(5, "xfer=%p: Control transfer "
2784				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2785				goto done;
2786			}
2787		}
2788	}
2789
2790	ep = xfer->endpoint;
2791
2792	/*
2793	 * If the current USB transfer is completing we need to start the
2794	 * next one:
2795	 */
2796	USB_BUS_LOCK(bus);
2797	if (ep->endpoint_q.curr == xfer) {
2798		usb_command_wrapper(&ep->endpoint_q, NULL);
2799
2800		if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) {
2801			/* there is another USB transfer waiting */
2802		} else {
2803			/* this is the last USB transfer */
2804			/* clear isochronous sync flag */
2805			xfer->endpoint->is_synced = 0;
2806		}
2807	}
2808	USB_BUS_UNLOCK(bus);
2809done:
2810	return (0);
2811}
2812
2813/*------------------------------------------------------------------------*
2814 *	usb_command_wrapper
2815 *
2816 * This function is used to execute commands non-recursivly on an USB
2817 * transfer.
2818 *------------------------------------------------------------------------*/
2819void
2820usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2821{
2822	if (xfer) {
2823		/*
2824		 * If the transfer is not already processing,
2825		 * queue it!
2826		 */
2827		if (pq->curr != xfer) {
2828			usbd_transfer_enqueue(pq, xfer);
2829			if (pq->curr != NULL) {
2830				/* something is already processing */
2831				DPRINTFN(6, "busy %p\n", pq->curr);
2832				return;
2833			}
2834		}
2835	} else {
2836		/* Get next element in queue */
2837		pq->curr = NULL;
2838	}
2839
2840	if (!pq->recurse_1) {
2841
2842		do {
2843
2844			/* set both recurse flags */
2845			pq->recurse_1 = 1;
2846			pq->recurse_2 = 1;
2847
2848			if (pq->curr == NULL) {
2849				xfer = TAILQ_FIRST(&pq->head);
2850				if (xfer) {
2851					TAILQ_REMOVE(&pq->head, xfer,
2852					    wait_entry);
2853					xfer->wait_queue = NULL;
2854					pq->curr = xfer;
2855				} else {
2856					break;
2857				}
2858			}
2859			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2860			(pq->command) (pq);
2861			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2862
2863		} while (!pq->recurse_2);
2864
2865		/* clear first recurse flag */
2866		pq->recurse_1 = 0;
2867
2868	} else {
2869		/* clear second recurse flag */
2870		pq->recurse_2 = 0;
2871	}
2872}
2873
2874/*------------------------------------------------------------------------*
2875 *	usbd_ctrl_transfer_setup
2876 *
2877 * This function is used to setup the default USB control endpoint
2878 * transfer.
2879 *------------------------------------------------------------------------*/
2880void
2881usbd_ctrl_transfer_setup(struct usb_device *udev)
2882{
2883	struct usb_xfer *xfer;
2884	uint8_t no_resetup;
2885	uint8_t iface_index;
2886
2887	/* check for root HUB */
2888	if (udev->parent_hub == NULL)
2889		return;
2890repeat:
2891
2892	xfer = udev->ctrl_xfer[0];
2893	if (xfer) {
2894		USB_XFER_LOCK(xfer);
2895		no_resetup =
2896		    ((xfer->address == udev->address) &&
2897		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2898		    udev->ddesc.bMaxPacketSize));
2899		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2900			if (no_resetup) {
2901				/*
2902				 * NOTE: checking "xfer->address" and
2903				 * starting the USB transfer must be
2904				 * atomic!
2905				 */
2906				usbd_transfer_start(xfer);
2907			}
2908		}
2909		USB_XFER_UNLOCK(xfer);
2910	} else {
2911		no_resetup = 0;
2912	}
2913
2914	if (no_resetup) {
2915		/*
2916	         * All parameters are exactly the same like before.
2917	         * Just return.
2918	         */
2919		return;
2920	}
2921	/*
2922	 * Update wMaxPacketSize for the default control endpoint:
2923	 */
2924	udev->ctrl_ep_desc.wMaxPacketSize[0] =
2925	    udev->ddesc.bMaxPacketSize;
2926
2927	/*
2928	 * Unsetup any existing USB transfer:
2929	 */
2930	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
2931
2932	/*
2933	 * Reset clear stall error counter.
2934	 */
2935	udev->clear_stall_errors = 0;
2936
2937	/*
2938	 * Try to setup a new USB transfer for the
2939	 * default control endpoint:
2940	 */
2941	iface_index = 0;
2942	if (usbd_transfer_setup(udev, &iface_index,
2943	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
2944	    &udev->device_mtx)) {
2945		DPRINTFN(0, "could not setup default "
2946		    "USB transfer\n");
2947	} else {
2948		goto repeat;
2949	}
2950}
2951
2952/*------------------------------------------------------------------------*
2953 *	usbd_clear_data_toggle - factored out code
2954 *
2955 * NOTE: the intention of this function is not to reset the hardware
2956 * data toggle.
2957 *------------------------------------------------------------------------*/
2958void
2959usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
2960{
2961	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
2962
2963	/* check that we have a valid case */
2964	if (udev->flags.usb_mode == USB_MODE_HOST &&
2965	    udev->parent_hub != NULL &&
2966	    udev->bus->methods->clear_stall != NULL &&
2967	    ep->methods != NULL) {
2968		(udev->bus->methods->clear_stall) (udev, ep);
2969	}
2970}
2971
2972/*------------------------------------------------------------------------*
2973 *	usbd_clear_data_toggle - factored out code
2974 *
2975 * NOTE: the intention of this function is not to reset the hardware
2976 * data toggle on the USB device side.
2977 *------------------------------------------------------------------------*/
2978void
2979usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
2980{
2981	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
2982
2983	USB_BUS_LOCK(udev->bus);
2984	ep->toggle_next = 0;
2985	/* some hardware needs a callback to clear the data toggle */
2986	usbd_clear_stall_locked(udev, ep);
2987	USB_BUS_UNLOCK(udev->bus);
2988}
2989
2990/*------------------------------------------------------------------------*
2991 *	usbd_clear_stall_callback - factored out clear stall callback
2992 *
2993 * Input parameters:
2994 *  xfer1: Clear Stall Control Transfer
2995 *  xfer2: Stalled USB Transfer
2996 *
2997 * This function is NULL safe.
2998 *
2999 * Return values:
3000 *   0: In progress
3001 *   Else: Finished
3002 *
3003 * Clear stall config example:
3004 *
3005 * static const struct usb_config my_clearstall =  {
3006 *	.type = UE_CONTROL,
3007 *	.endpoint = 0,
3008 *	.direction = UE_DIR_ANY,
3009 *	.interval = 50, //50 milliseconds
3010 *	.bufsize = sizeof(struct usb_device_request),
3011 *	.timeout = 1000, //1.000 seconds
3012 *	.callback = &my_clear_stall_callback, // **
3013 *	.usb_mode = USB_MODE_HOST,
3014 * };
3015 *
3016 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3017 * passing the correct parameters.
3018 *------------------------------------------------------------------------*/
3019uint8_t
3020usbd_clear_stall_callback(struct usb_xfer *xfer1,
3021    struct usb_xfer *xfer2)
3022{
3023	struct usb_device_request req;
3024
3025	if (xfer2 == NULL) {
3026		/* looks like we are tearing down */
3027		DPRINTF("NULL input parameter\n");
3028		return (0);
3029	}
3030	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3031	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3032
3033	switch (USB_GET_STATE(xfer1)) {
3034	case USB_ST_SETUP:
3035
3036		/*
3037		 * pre-clear the data toggle to DATA0 ("umass.c" and
3038		 * "ata-usb.c" depends on this)
3039		 */
3040
3041		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3042
3043		/* setup a clear-stall packet */
3044
3045		req.bmRequestType = UT_WRITE_ENDPOINT;
3046		req.bRequest = UR_CLEAR_FEATURE;
3047		USETW(req.wValue, UF_ENDPOINT_HALT);
3048		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3049		req.wIndex[1] = 0;
3050		USETW(req.wLength, 0);
3051
3052		/*
3053		 * "usbd_transfer_setup_sub()" will ensure that
3054		 * we have sufficient room in the buffer for
3055		 * the request structure!
3056		 */
3057
3058		/* copy in the transfer */
3059
3060		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3061
3062		/* set length */
3063		xfer1->frlengths[0] = sizeof(req);
3064		xfer1->nframes = 1;
3065
3066		usbd_transfer_submit(xfer1);
3067		return (0);
3068
3069	case USB_ST_TRANSFERRED:
3070		break;
3071
3072	default:			/* Error */
3073		if (xfer1->error == USB_ERR_CANCELLED) {
3074			return (0);
3075		}
3076		break;
3077	}
3078	return (1);			/* Clear Stall Finished */
3079}
3080
3081/*------------------------------------------------------------------------*
3082 *	usbd_transfer_poll
3083 *
3084 * The following function gets called from the USB keyboard driver and
3085 * UMASS when the system has paniced.
3086 *
3087 * NOTE: It is currently not possible to resume normal operation on
3088 * the USB controller which has been polled, due to clearing of the
3089 * "up_dsleep" and "up_msleep" flags.
3090 *------------------------------------------------------------------------*/
3091void
3092usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3093{
3094	struct usb_xfer *xfer;
3095	struct usb_xfer_root *xroot;
3096	struct usb_device *udev;
3097	struct usb_proc_msg *pm;
3098	uint16_t n;
3099	uint16_t drop_bus;
3100	uint16_t drop_xfer;
3101
3102	for (n = 0; n != max; n++) {
3103		/* Extra checks to avoid panic */
3104		xfer = ppxfer[n];
3105		if (xfer == NULL)
3106			continue;	/* no USB transfer */
3107		xroot = xfer->xroot;
3108		if (xroot == NULL)
3109			continue;	/* no USB root */
3110		udev = xroot->udev;
3111		if (udev == NULL)
3112			continue;	/* no USB device */
3113		if (udev->bus == NULL)
3114			continue;	/* no BUS structure */
3115		if (udev->bus->methods == NULL)
3116			continue;	/* no BUS methods */
3117		if (udev->bus->methods->xfer_poll == NULL)
3118			continue;	/* no poll method */
3119
3120		/* make sure that the BUS mutex is not locked */
3121		drop_bus = 0;
3122		while (mtx_owned(&xroot->udev->bus->bus_mtx)) {
3123			mtx_unlock(&xroot->udev->bus->bus_mtx);
3124			drop_bus++;
3125		}
3126
3127		/* make sure that the transfer mutex is not locked */
3128		drop_xfer = 0;
3129		while (mtx_owned(xroot->xfer_mtx)) {
3130			mtx_unlock(xroot->xfer_mtx);
3131			drop_xfer++;
3132		}
3133
3134		/* Make sure cv_signal() and cv_broadcast() is not called */
3135		udev->bus->control_xfer_proc.up_msleep = 0;
3136		udev->bus->explore_proc.up_msleep = 0;
3137		udev->bus->giant_callback_proc.up_msleep = 0;
3138		udev->bus->non_giant_callback_proc.up_msleep = 0;
3139
3140		/* poll USB hardware */
3141		(udev->bus->methods->xfer_poll) (udev->bus);
3142
3143		USB_BUS_LOCK(xroot->bus);
3144
3145		/* check for clear stall */
3146		if (udev->ctrl_xfer[1] != NULL) {
3147
3148			/* poll clear stall start */
3149			pm = &udev->cs_msg[0].hdr;
3150			(pm->pm_callback) (pm);
3151			/* poll clear stall done thread */
3152			pm = &udev->ctrl_xfer[1]->
3153			    xroot->done_m[0].hdr;
3154			(pm->pm_callback) (pm);
3155		}
3156
3157		/* poll done thread */
3158		pm = &xroot->done_m[0].hdr;
3159		(pm->pm_callback) (pm);
3160
3161		USB_BUS_UNLOCK(xroot->bus);
3162
3163		/* restore transfer mutex */
3164		while (drop_xfer--)
3165			mtx_lock(xroot->xfer_mtx);
3166
3167		/* restore BUS mutex */
3168		while (drop_bus--)
3169			mtx_lock(&xroot->udev->bus->bus_mtx);
3170	}
3171}
3172
3173static void
3174usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3175    uint8_t type, enum usb_dev_speed speed)
3176{
3177	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3178		[USB_SPEED_LOW] = 8,
3179		[USB_SPEED_FULL] = 64,
3180		[USB_SPEED_HIGH] = 1024,
3181		[USB_SPEED_VARIABLE] = 1024,
3182		[USB_SPEED_SUPER] = 1024,
3183	};
3184
3185	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3186		[USB_SPEED_LOW] = 0,	/* invalid */
3187		[USB_SPEED_FULL] = 1023,
3188		[USB_SPEED_HIGH] = 1024,
3189		[USB_SPEED_VARIABLE] = 3584,
3190		[USB_SPEED_SUPER] = 1024,
3191	};
3192
3193	static const uint16_t control_min[USB_SPEED_MAX] = {
3194		[USB_SPEED_LOW] = 8,
3195		[USB_SPEED_FULL] = 8,
3196		[USB_SPEED_HIGH] = 64,
3197		[USB_SPEED_VARIABLE] = 512,
3198		[USB_SPEED_SUPER] = 512,
3199	};
3200
3201	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3202		[USB_SPEED_LOW] = 8,
3203		[USB_SPEED_FULL] = 8,
3204		[USB_SPEED_HIGH] = 512,
3205		[USB_SPEED_VARIABLE] = 512,
3206		[USB_SPEED_SUPER] = 1024,
3207	};
3208
3209	uint16_t temp;
3210
3211	memset(ptr, 0, sizeof(*ptr));
3212
3213	switch (type) {
3214	case UE_INTERRUPT:
3215		ptr->range.max = intr_range_max[speed];
3216		break;
3217	case UE_ISOCHRONOUS:
3218		ptr->range.max = isoc_range_max[speed];
3219		break;
3220	default:
3221		if (type == UE_BULK)
3222			temp = bulk_min[speed];
3223		else /* UE_CONTROL */
3224			temp = control_min[speed];
3225
3226		/* default is fixed */
3227		ptr->fixed[0] = temp;
3228		ptr->fixed[1] = temp;
3229		ptr->fixed[2] = temp;
3230		ptr->fixed[3] = temp;
3231
3232		if (speed == USB_SPEED_FULL) {
3233			/* multiple sizes */
3234			ptr->fixed[1] = 16;
3235			ptr->fixed[2] = 32;
3236			ptr->fixed[3] = 64;
3237		}
3238		if ((speed == USB_SPEED_VARIABLE) &&
3239		    (type == UE_BULK)) {
3240			/* multiple sizes */
3241			ptr->fixed[2] = 1024;
3242			ptr->fixed[3] = 1536;
3243		}
3244		break;
3245	}
3246}
3247
3248void	*
3249usbd_xfer_softc(struct usb_xfer *xfer)
3250{
3251	return (xfer->priv_sc);
3252}
3253
3254void *
3255usbd_xfer_get_priv(struct usb_xfer *xfer)
3256{
3257	return (xfer->priv_fifo);
3258}
3259
3260void
3261usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3262{
3263	xfer->priv_fifo = ptr;
3264}
3265
3266uint8_t
3267usbd_xfer_state(struct usb_xfer *xfer)
3268{
3269	return (xfer->usb_state);
3270}
3271
3272void
3273usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3274{
3275	switch (flag) {
3276		case USB_FORCE_SHORT_XFER:
3277			xfer->flags.force_short_xfer = 1;
3278			break;
3279		case USB_SHORT_XFER_OK:
3280			xfer->flags.short_xfer_ok = 1;
3281			break;
3282		case USB_MULTI_SHORT_OK:
3283			xfer->flags.short_frames_ok = 1;
3284			break;
3285		case USB_MANUAL_STATUS:
3286			xfer->flags.manual_status = 1;
3287			break;
3288	}
3289}
3290
3291void
3292usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3293{
3294	switch (flag) {
3295		case USB_FORCE_SHORT_XFER:
3296			xfer->flags.force_short_xfer = 0;
3297			break;
3298		case USB_SHORT_XFER_OK:
3299			xfer->flags.short_xfer_ok = 0;
3300			break;
3301		case USB_MULTI_SHORT_OK:
3302			xfer->flags.short_frames_ok = 0;
3303			break;
3304		case USB_MANUAL_STATUS:
3305			xfer->flags.manual_status = 0;
3306			break;
3307	}
3308}
3309
3310/*
3311 * The following function returns in milliseconds when the isochronous
3312 * transfer was completed by the hardware. The returned value wraps
3313 * around 65536 milliseconds.
3314 */
3315uint16_t
3316usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3317{
3318	return (xfer->isoc_time_complete);
3319}
3320