usb_transfer.c revision 235410
1/* $FreeBSD: stable/9/sys/dev/usb/usb_transfer.c 235410 2012-05-13 17:14:26Z avg $ */
2/*-
3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/stdint.h>
28#include <sys/stddef.h>
29#include <sys/param.h>
30#include <sys/queue.h>
31#include <sys/types.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/module.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/sysctl.h>
40#include <sys/sx.h>
41#include <sys/unistd.h>
42#include <sys/callout.h>
43#include <sys/malloc.h>
44#include <sys/priv.h>
45
46#include <dev/usb/usb.h>
47#include <dev/usb/usbdi.h>
48#include <dev/usb/usbdi_util.h>
49
50#define	USB_DEBUG_VAR usb_debug
51
52#include <dev/usb/usb_core.h>
53#include <dev/usb/usb_busdma.h>
54#include <dev/usb/usb_process.h>
55#include <dev/usb/usb_transfer.h>
56#include <dev/usb/usb_device.h>
57#include <dev/usb/usb_debug.h>
58#include <dev/usb/usb_util.h>
59
60#include <dev/usb/usb_controller.h>
61#include <dev/usb/usb_bus.h>
62#include <dev/usb/usb_pf.h>
63
64struct usb_std_packet_size {
65	struct {
66		uint16_t min;		/* inclusive */
67		uint16_t max;		/* inclusive */
68	}	range;
69
70	uint16_t fixed[4];
71};
72
73static usb_callback_t usb_request_callback;
74
75static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
76
77	/* This transfer is used for generic control endpoint transfers */
78
79	[0] = {
80		.type = UE_CONTROL,
81		.endpoint = 0x00,	/* Control endpoint */
82		.direction = UE_DIR_ANY,
83		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
84		.flags = {.proxy_buffer = 1,},
85		.callback = &usb_request_callback,
86		.usb_mode = USB_MODE_DUAL,	/* both modes */
87	},
88
89	/* This transfer is used for generic clear stall only */
90
91	[1] = {
92		.type = UE_CONTROL,
93		.endpoint = 0x00,	/* Control pipe */
94		.direction = UE_DIR_ANY,
95		.bufsize = sizeof(struct usb_device_request),
96		.callback = &usb_do_clear_stall_callback,
97		.timeout = 1000,	/* 1 second */
98		.interval = 50,	/* 50ms */
99		.usb_mode = USB_MODE_HOST,
100	},
101};
102
103/* function prototypes */
104
105static void	usbd_update_max_frame_size(struct usb_xfer *);
106static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
107static void	usbd_control_transfer_init(struct usb_xfer *);
108static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
109static void	usb_callback_proc(struct usb_proc_msg *);
110static void	usbd_callback_ss_done_defer(struct usb_xfer *);
111static void	usbd_callback_wrapper(struct usb_xfer_queue *);
112static void	usbd_transfer_start_cb(void *);
113static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
114static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
115		    uint8_t type, enum usb_dev_speed speed);
116
117/*------------------------------------------------------------------------*
118 *	usb_request_callback
119 *------------------------------------------------------------------------*/
120static void
121usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
122{
123	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
124		usb_handle_request_callback(xfer, error);
125	else
126		usbd_do_request_callback(xfer, error);
127}
128
129/*------------------------------------------------------------------------*
130 *	usbd_update_max_frame_size
131 *
132 * This function updates the maximum frame size, hence high speed USB
133 * can transfer multiple consecutive packets.
134 *------------------------------------------------------------------------*/
135static void
136usbd_update_max_frame_size(struct usb_xfer *xfer)
137{
138	/* compute maximum frame size */
139	/* this computation should not overflow 16-bit */
140	/* max = 15 * 1024 */
141
142	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
143}
144
145/*------------------------------------------------------------------------*
146 *	usbd_get_dma_delay
147 *
148 * The following function is called when we need to
149 * synchronize with DMA hardware.
150 *
151 * Returns:
152 *    0: no DMA delay required
153 * Else: milliseconds of DMA delay
154 *------------------------------------------------------------------------*/
155usb_timeout_t
156usbd_get_dma_delay(struct usb_device *udev)
157{
158	struct usb_bus_methods *mtod;
159	uint32_t temp;
160
161	mtod = udev->bus->methods;
162	temp = 0;
163
164	if (mtod->get_dma_delay) {
165		(mtod->get_dma_delay) (udev, &temp);
166		/*
167		 * Round up and convert to milliseconds. Note that we use
168		 * 1024 milliseconds per second. to save a division.
169		 */
170		temp += 0x3FF;
171		temp /= 0x400;
172	}
173	return (temp);
174}
175
176/*------------------------------------------------------------------------*
177 *	usbd_transfer_setup_sub_malloc
178 *
179 * This function will allocate one or more DMA'able memory chunks
180 * according to "size", "align" and "count" arguments. "ppc" is
181 * pointed to a linear array of USB page caches afterwards.
182 *
183 * Returns:
184 *    0: Success
185 * Else: Failure
186 *------------------------------------------------------------------------*/
187#if USB_HAVE_BUSDMA
188uint8_t
189usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
190    struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
191    usb_size_t count)
192{
193	struct usb_page_cache *pc;
194	struct usb_page *pg;
195	void *buf;
196	usb_size_t n_dma_pc;
197	usb_size_t n_obj;
198	usb_size_t x;
199	usb_size_t y;
200	usb_size_t r;
201	usb_size_t z;
202
203	USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n",
204	    align));
205	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
206
207	if (count == 0) {
208		return (0);		/* nothing to allocate */
209	}
210	/*
211	 * Make sure that the size is aligned properly.
212	 */
213	size = -((-size) & (-align));
214
215	/*
216	 * Try multi-allocation chunks to reduce the number of DMA
217	 * allocations, hence DMA allocations are slow.
218	 */
219	if (size >= PAGE_SIZE) {
220		n_dma_pc = count;
221		n_obj = 1;
222	} else {
223		/* compute number of objects per page */
224		n_obj = (PAGE_SIZE / size);
225		/*
226		 * Compute number of DMA chunks, rounded up
227		 * to nearest one:
228		 */
229		n_dma_pc = ((count + n_obj - 1) / n_obj);
230	}
231
232	if (parm->buf == NULL) {
233		/* for the future */
234		parm->dma_page_ptr += n_dma_pc;
235		parm->dma_page_cache_ptr += n_dma_pc;
236		parm->dma_page_ptr += count;
237		parm->xfer_page_cache_ptr += count;
238		return (0);
239	}
240	for (x = 0; x != n_dma_pc; x++) {
241		/* need to initialize the page cache */
242		parm->dma_page_cache_ptr[x].tag_parent =
243		    &parm->curr_xfer->xroot->dma_parent_tag;
244	}
245	for (x = 0; x != count; x++) {
246		/* need to initialize the page cache */
247		parm->xfer_page_cache_ptr[x].tag_parent =
248		    &parm->curr_xfer->xroot->dma_parent_tag;
249	}
250
251	if (ppc) {
252		*ppc = parm->xfer_page_cache_ptr;
253	}
254	r = count;			/* set remainder count */
255	z = n_obj * size;		/* set allocation size */
256	pc = parm->xfer_page_cache_ptr;
257	pg = parm->dma_page_ptr;
258
259	for (x = 0; x != n_dma_pc; x++) {
260
261		if (r < n_obj) {
262			/* compute last remainder */
263			z = r * size;
264			n_obj = r;
265		}
266		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
267		    pg, z, align)) {
268			return (1);	/* failure */
269		}
270		/* Set beginning of current buffer */
271		buf = parm->dma_page_cache_ptr->buffer;
272		/* Make room for one DMA page cache and one page */
273		parm->dma_page_cache_ptr++;
274		pg++;
275
276		for (y = 0; (y != n_obj); y++, r--, pc++, pg++) {
277
278			/* Load sub-chunk into DMA */
279			if (usb_pc_dmamap_create(pc, size)) {
280				return (1);	/* failure */
281			}
282			pc->buffer = USB_ADD_BYTES(buf, y * size);
283			pc->page_start = pg;
284
285			mtx_lock(pc->tag_parent->mtx);
286			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
287				mtx_unlock(pc->tag_parent->mtx);
288				return (1);	/* failure */
289			}
290			mtx_unlock(pc->tag_parent->mtx);
291		}
292	}
293
294	parm->xfer_page_cache_ptr = pc;
295	parm->dma_page_ptr = pg;
296	return (0);
297}
298#endif
299
300/*------------------------------------------------------------------------*
301 *	usbd_transfer_setup_sub - transfer setup subroutine
302 *
303 * This function must be called from the "xfer_setup" callback of the
304 * USB Host or Device controller driver when setting up an USB
305 * transfer. This function will setup correct packet sizes, buffer
306 * sizes, flags and more, that are stored in the "usb_xfer"
307 * structure.
308 *------------------------------------------------------------------------*/
309void
310usbd_transfer_setup_sub(struct usb_setup_params *parm)
311{
312	enum {
313		REQ_SIZE = 8,
314		MIN_PKT = 8,
315	};
316	struct usb_xfer *xfer = parm->curr_xfer;
317	const struct usb_config *setup = parm->curr_setup;
318	struct usb_endpoint_ss_comp_descriptor *ecomp;
319	struct usb_endpoint_descriptor *edesc;
320	struct usb_std_packet_size std_size;
321	usb_frcount_t n_frlengths;
322	usb_frcount_t n_frbuffers;
323	usb_frcount_t x;
324	uint8_t type;
325	uint8_t zmps;
326
327	/*
328	 * Sanity check. The following parameters must be initialized before
329	 * calling this function.
330	 */
331	if ((parm->hc_max_packet_size == 0) ||
332	    (parm->hc_max_packet_count == 0) ||
333	    (parm->hc_max_frame_size == 0)) {
334		parm->err = USB_ERR_INVAL;
335		goto done;
336	}
337	edesc = xfer->endpoint->edesc;
338	ecomp = xfer->endpoint->ecomp;
339
340	type = (edesc->bmAttributes & UE_XFERTYPE);
341
342	xfer->flags = setup->flags;
343	xfer->nframes = setup->frames;
344	xfer->timeout = setup->timeout;
345	xfer->callback = setup->callback;
346	xfer->interval = setup->interval;
347	xfer->endpointno = edesc->bEndpointAddress;
348	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
349	xfer->max_packet_count = 1;
350	/* make a shadow copy: */
351	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
352
353	parm->bufsize = setup->bufsize;
354
355	switch (parm->speed) {
356	case USB_SPEED_HIGH:
357		switch (type) {
358		case UE_ISOCHRONOUS:
359		case UE_INTERRUPT:
360			xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
361
362			/* check for invalid max packet count */
363			if (xfer->max_packet_count > 3)
364				xfer->max_packet_count = 3;
365			break;
366		default:
367			break;
368		}
369		xfer->max_packet_size &= 0x7FF;
370		break;
371	case USB_SPEED_SUPER:
372		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
373
374		if (ecomp != NULL)
375			xfer->max_packet_count += ecomp->bMaxBurst;
376
377		if ((xfer->max_packet_count == 0) ||
378		    (xfer->max_packet_count > 16))
379			xfer->max_packet_count = 16;
380
381		switch (type) {
382		case UE_CONTROL:
383			xfer->max_packet_count = 1;
384			break;
385		case UE_ISOCHRONOUS:
386			if (ecomp != NULL) {
387				uint8_t mult;
388
389				mult = (ecomp->bmAttributes & 3) + 1;
390				if (mult > 3)
391					mult = 3;
392
393				xfer->max_packet_count *= mult;
394			}
395			break;
396		default:
397			break;
398		}
399		xfer->max_packet_size &= 0x7FF;
400		break;
401	default:
402		break;
403	}
404	/* range check "max_packet_count" */
405
406	if (xfer->max_packet_count > parm->hc_max_packet_count) {
407		xfer->max_packet_count = parm->hc_max_packet_count;
408	}
409	/* filter "wMaxPacketSize" according to HC capabilities */
410
411	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
412	    (xfer->max_packet_size == 0)) {
413		xfer->max_packet_size = parm->hc_max_packet_size;
414	}
415	/* filter "wMaxPacketSize" according to standard sizes */
416
417	usbd_get_std_packet_size(&std_size, type, parm->speed);
418
419	if (std_size.range.min || std_size.range.max) {
420
421		if (xfer->max_packet_size < std_size.range.min) {
422			xfer->max_packet_size = std_size.range.min;
423		}
424		if (xfer->max_packet_size > std_size.range.max) {
425			xfer->max_packet_size = std_size.range.max;
426		}
427	} else {
428
429		if (xfer->max_packet_size >= std_size.fixed[3]) {
430			xfer->max_packet_size = std_size.fixed[3];
431		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
432			xfer->max_packet_size = std_size.fixed[2];
433		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
434			xfer->max_packet_size = std_size.fixed[1];
435		} else {
436			/* only one possibility left */
437			xfer->max_packet_size = std_size.fixed[0];
438		}
439	}
440
441	/* compute "max_frame_size" */
442
443	usbd_update_max_frame_size(xfer);
444
445	/* check interrupt interval and transfer pre-delay */
446
447	if (type == UE_ISOCHRONOUS) {
448
449		uint16_t frame_limit;
450
451		xfer->interval = 0;	/* not used, must be zero */
452		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
453
454		if (xfer->timeout == 0) {
455			/*
456			 * set a default timeout in
457			 * case something goes wrong!
458			 */
459			xfer->timeout = 1000 / 4;
460		}
461		switch (parm->speed) {
462		case USB_SPEED_LOW:
463		case USB_SPEED_FULL:
464			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
465			xfer->fps_shift = 0;
466			break;
467		default:
468			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
469			xfer->fps_shift = edesc->bInterval;
470			if (xfer->fps_shift > 0)
471				xfer->fps_shift--;
472			if (xfer->fps_shift > 3)
473				xfer->fps_shift = 3;
474			if (xfer->flags.pre_scale_frames != 0)
475				xfer->nframes <<= (3 - xfer->fps_shift);
476			break;
477		}
478
479		if (xfer->nframes > frame_limit) {
480			/*
481			 * this is not going to work
482			 * cross hardware
483			 */
484			parm->err = USB_ERR_INVAL;
485			goto done;
486		}
487		if (xfer->nframes == 0) {
488			/*
489			 * this is not a valid value
490			 */
491			parm->err = USB_ERR_ZERO_NFRAMES;
492			goto done;
493		}
494	} else {
495
496		/*
497		 * If a value is specified use that else check the
498		 * endpoint descriptor!
499		 */
500		if (type == UE_INTERRUPT) {
501
502			uint32_t temp;
503
504			if (xfer->interval == 0) {
505
506				xfer->interval = edesc->bInterval;
507
508				switch (parm->speed) {
509				case USB_SPEED_LOW:
510				case USB_SPEED_FULL:
511					break;
512				default:
513					/* 125us -> 1ms */
514					if (xfer->interval < 4)
515						xfer->interval = 1;
516					else if (xfer->interval > 16)
517						xfer->interval = (1 << (16 - 4));
518					else
519						xfer->interval =
520						    (1 << (xfer->interval - 4));
521					break;
522				}
523			}
524
525			if (xfer->interval == 0) {
526				/*
527				 * One millisecond is the smallest
528				 * interval we support:
529				 */
530				xfer->interval = 1;
531			}
532
533			xfer->fps_shift = 0;
534			temp = 1;
535
536			while ((temp != 0) && (temp < xfer->interval)) {
537				xfer->fps_shift++;
538				temp *= 2;
539			}
540
541			switch (parm->speed) {
542			case USB_SPEED_LOW:
543			case USB_SPEED_FULL:
544				break;
545			default:
546				xfer->fps_shift += 3;
547				break;
548			}
549		}
550	}
551
552	/*
553	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
554	 * to be equal to zero when setting up USB transfers, hence
555	 * this leads to alot of extra code in the USB kernel.
556	 */
557
558	if ((xfer->max_frame_size == 0) ||
559	    (xfer->max_packet_size == 0)) {
560
561		zmps = 1;
562
563		if ((parm->bufsize <= MIN_PKT) &&
564		    (type != UE_CONTROL) &&
565		    (type != UE_BULK)) {
566
567			/* workaround */
568			xfer->max_packet_size = MIN_PKT;
569			xfer->max_packet_count = 1;
570			parm->bufsize = 0;	/* automatic setup length */
571			usbd_update_max_frame_size(xfer);
572
573		} else {
574			parm->err = USB_ERR_ZERO_MAXP;
575			goto done;
576		}
577
578	} else {
579		zmps = 0;
580	}
581
582	/*
583	 * check if we should setup a default
584	 * length:
585	 */
586
587	if (parm->bufsize == 0) {
588
589		parm->bufsize = xfer->max_frame_size;
590
591		if (type == UE_ISOCHRONOUS) {
592			parm->bufsize *= xfer->nframes;
593		}
594	}
595	/*
596	 * check if we are about to setup a proxy
597	 * type of buffer:
598	 */
599
600	if (xfer->flags.proxy_buffer) {
601
602		/* round bufsize up */
603
604		parm->bufsize += (xfer->max_frame_size - 1);
605
606		if (parm->bufsize < xfer->max_frame_size) {
607			/* length wrapped around */
608			parm->err = USB_ERR_INVAL;
609			goto done;
610		}
611		/* subtract remainder */
612
613		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
614
615		/* add length of USB device request structure, if any */
616
617		if (type == UE_CONTROL) {
618			parm->bufsize += REQ_SIZE;	/* SETUP message */
619		}
620	}
621	xfer->max_data_length = parm->bufsize;
622
623	/* Setup "n_frlengths" and "n_frbuffers" */
624
625	if (type == UE_ISOCHRONOUS) {
626		n_frlengths = xfer->nframes;
627		n_frbuffers = 1;
628	} else {
629
630		if (type == UE_CONTROL) {
631			xfer->flags_int.control_xfr = 1;
632			if (xfer->nframes == 0) {
633				if (parm->bufsize <= REQ_SIZE) {
634					/*
635					 * there will never be any data
636					 * stage
637					 */
638					xfer->nframes = 1;
639				} else {
640					xfer->nframes = 2;
641				}
642			}
643		} else {
644			if (xfer->nframes == 0) {
645				xfer->nframes = 1;
646			}
647		}
648
649		n_frlengths = xfer->nframes;
650		n_frbuffers = xfer->nframes;
651	}
652
653	/*
654	 * check if we have room for the
655	 * USB device request structure:
656	 */
657
658	if (type == UE_CONTROL) {
659
660		if (xfer->max_data_length < REQ_SIZE) {
661			/* length wrapped around or too small bufsize */
662			parm->err = USB_ERR_INVAL;
663			goto done;
664		}
665		xfer->max_data_length -= REQ_SIZE;
666	}
667	/*
668	 * Setup "frlengths" and shadow "frlengths" for keeping the
669	 * initial frame lengths when a USB transfer is complete. This
670	 * information is useful when computing isochronous offsets.
671	 */
672	xfer->frlengths = parm->xfer_length_ptr;
673	parm->xfer_length_ptr += 2 * n_frlengths;
674
675	/* setup "frbuffers" */
676	xfer->frbuffers = parm->xfer_page_cache_ptr;
677	parm->xfer_page_cache_ptr += n_frbuffers;
678
679	/* initialize max frame count */
680	xfer->max_frame_count = xfer->nframes;
681
682	/*
683	 * check if we need to setup
684	 * a local buffer:
685	 */
686
687	if (!xfer->flags.ext_buffer) {
688
689		/* align data */
690		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
691
692		if (parm->buf) {
693
694			xfer->local_buffer =
695			    USB_ADD_BYTES(parm->buf, parm->size[0]);
696
697			usbd_xfer_set_frame_offset(xfer, 0, 0);
698
699			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
700				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
701			}
702		}
703		parm->size[0] += parm->bufsize;
704
705		/* align data again */
706		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
707	}
708	/*
709	 * Compute maximum buffer size
710	 */
711
712	if (parm->bufsize_max < parm->bufsize) {
713		parm->bufsize_max = parm->bufsize;
714	}
715#if USB_HAVE_BUSDMA
716	if (xfer->flags_int.bdma_enable) {
717		/*
718		 * Setup "dma_page_ptr".
719		 *
720		 * Proof for formula below:
721		 *
722		 * Assume there are three USB frames having length "a", "b" and
723		 * "c". These USB frames will at maximum need "z"
724		 * "usb_page" structures. "z" is given by:
725		 *
726		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
727		 * ((c / USB_PAGE_SIZE) + 2);
728		 *
729		 * Constraining "a", "b" and "c" like this:
730		 *
731		 * (a + b + c) <= parm->bufsize
732		 *
733		 * We know that:
734		 *
735		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
736		 *
737		 * Here is the general formula:
738		 */
739		xfer->dma_page_ptr = parm->dma_page_ptr;
740		parm->dma_page_ptr += (2 * n_frbuffers);
741		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
742	}
743#endif
744	if (zmps) {
745		/* correct maximum data length */
746		xfer->max_data_length = 0;
747	}
748	/* subtract USB frame remainder from "hc_max_frame_size" */
749
750	xfer->max_hc_frame_size =
751	    (parm->hc_max_frame_size -
752	    (parm->hc_max_frame_size % xfer->max_frame_size));
753
754	if (xfer->max_hc_frame_size == 0) {
755		parm->err = USB_ERR_INVAL;
756		goto done;
757	}
758
759	/* initialize frame buffers */
760
761	if (parm->buf) {
762		for (x = 0; x != n_frbuffers; x++) {
763			xfer->frbuffers[x].tag_parent =
764			    &xfer->xroot->dma_parent_tag;
765#if USB_HAVE_BUSDMA
766			if (xfer->flags_int.bdma_enable &&
767			    (parm->bufsize_max > 0)) {
768
769				if (usb_pc_dmamap_create(
770				    xfer->frbuffers + x,
771				    parm->bufsize_max)) {
772					parm->err = USB_ERR_NOMEM;
773					goto done;
774				}
775			}
776#endif
777		}
778	}
779done:
780	if (parm->err) {
781		/*
782		 * Set some dummy values so that we avoid division by zero:
783		 */
784		xfer->max_hc_frame_size = 1;
785		xfer->max_frame_size = 1;
786		xfer->max_packet_size = 1;
787		xfer->max_data_length = 0;
788		xfer->nframes = 0;
789		xfer->max_frame_count = 0;
790	}
791}
792
793/*------------------------------------------------------------------------*
794 *	usbd_transfer_setup - setup an array of USB transfers
795 *
796 * NOTE: You must always call "usbd_transfer_unsetup" after calling
797 * "usbd_transfer_setup" if success was returned.
798 *
799 * The idea is that the USB device driver should pre-allocate all its
800 * transfers by one call to this function.
801 *
802 * Return values:
803 *    0: Success
804 * Else: Failure
805 *------------------------------------------------------------------------*/
806usb_error_t
807usbd_transfer_setup(struct usb_device *udev,
808    const uint8_t *ifaces, struct usb_xfer **ppxfer,
809    const struct usb_config *setup_start, uint16_t n_setup,
810    void *priv_sc, struct mtx *xfer_mtx)
811{
812	struct usb_xfer dummy;
813	struct usb_setup_params parm;
814	const struct usb_config *setup_end = setup_start + n_setup;
815	const struct usb_config *setup;
816	struct usb_endpoint *ep;
817	struct usb_xfer_root *info;
818	struct usb_xfer *xfer;
819	void *buf = NULL;
820	uint16_t n;
821	uint16_t refcount;
822
823	parm.err = 0;
824	refcount = 0;
825	info = NULL;
826
827	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
828	    "usbd_transfer_setup can sleep!");
829
830	/* do some checking first */
831
832	if (n_setup == 0) {
833		DPRINTFN(6, "setup array has zero length!\n");
834		return (USB_ERR_INVAL);
835	}
836	if (ifaces == 0) {
837		DPRINTFN(6, "ifaces array is NULL!\n");
838		return (USB_ERR_INVAL);
839	}
840	if (xfer_mtx == NULL) {
841		DPRINTFN(6, "using global lock\n");
842		xfer_mtx = &Giant;
843	}
844	/* sanity checks */
845	for (setup = setup_start, n = 0;
846	    setup != setup_end; setup++, n++) {
847		if (setup->bufsize == (usb_frlength_t)-1) {
848			parm.err = USB_ERR_BAD_BUFSIZE;
849			DPRINTF("invalid bufsize\n");
850		}
851		if (setup->callback == NULL) {
852			parm.err = USB_ERR_NO_CALLBACK;
853			DPRINTF("no callback\n");
854		}
855		ppxfer[n] = NULL;
856	}
857
858	if (parm.err) {
859		goto done;
860	}
861	memset(&parm, 0, sizeof(parm));
862
863	parm.udev = udev;
864	parm.speed = usbd_get_speed(udev);
865	parm.hc_max_packet_count = 1;
866
867	if (parm.speed >= USB_SPEED_MAX) {
868		parm.err = USB_ERR_INVAL;
869		goto done;
870	}
871	/* setup all transfers */
872
873	while (1) {
874
875		if (buf) {
876			/*
877			 * Initialize the "usb_xfer_root" structure,
878			 * which is common for all our USB transfers.
879			 */
880			info = USB_ADD_BYTES(buf, 0);
881
882			info->memory_base = buf;
883			info->memory_size = parm.size[0];
884
885#if USB_HAVE_BUSDMA
886			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]);
887			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]);
888#endif
889			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]);
890			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]);
891
892			cv_init(&info->cv_drain, "WDRAIN");
893
894			info->xfer_mtx = xfer_mtx;
895#if USB_HAVE_BUSDMA
896			usb_dma_tag_setup(&info->dma_parent_tag,
897			    parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag,
898			    xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max);
899#endif
900
901			info->bus = udev->bus;
902			info->udev = udev;
903
904			TAILQ_INIT(&info->done_q.head);
905			info->done_q.command = &usbd_callback_wrapper;
906#if USB_HAVE_BUSDMA
907			TAILQ_INIT(&info->dma_q.head);
908			info->dma_q.command = &usb_bdma_work_loop;
909#endif
910			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
911			info->done_m[0].xroot = info;
912			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
913			info->done_m[1].xroot = info;
914
915			/*
916			 * In device side mode control endpoint
917			 * requests need to run from a separate
918			 * context, else there is a chance of
919			 * deadlock!
920			 */
921			if (setup_start == usb_control_ep_cfg)
922				info->done_p =
923				    &udev->bus->control_xfer_proc;
924			else if (xfer_mtx == &Giant)
925				info->done_p =
926				    &udev->bus->giant_callback_proc;
927			else
928				info->done_p =
929				    &udev->bus->non_giant_callback_proc;
930		}
931		/* reset sizes */
932
933		parm.size[0] = 0;
934		parm.buf = buf;
935		parm.size[0] += sizeof(info[0]);
936
937		for (setup = setup_start, n = 0;
938		    setup != setup_end; setup++, n++) {
939
940			/* skip USB transfers without callbacks: */
941			if (setup->callback == NULL) {
942				continue;
943			}
944			/* see if there is a matching endpoint */
945			ep = usbd_get_endpoint(udev,
946			    ifaces[setup->if_index], setup);
947
948			if ((ep == NULL) || (ep->methods == NULL)) {
949				if (setup->flags.no_pipe_ok)
950					continue;
951				if ((setup->usb_mode != USB_MODE_DUAL) &&
952				    (setup->usb_mode != udev->flags.usb_mode))
953					continue;
954				parm.err = USB_ERR_NO_PIPE;
955				goto done;
956			}
957
958			/* align data properly */
959			parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
960
961			/* store current setup pointer */
962			parm.curr_setup = setup;
963
964			if (buf) {
965				/*
966				 * Common initialization of the
967				 * "usb_xfer" structure.
968				 */
969				xfer = USB_ADD_BYTES(buf, parm.size[0]);
970				xfer->address = udev->address;
971				xfer->priv_sc = priv_sc;
972				xfer->xroot = info;
973
974				usb_callout_init_mtx(&xfer->timeout_handle,
975				    &udev->bus->bus_mtx, 0);
976			} else {
977				/*
978				 * Setup a dummy xfer, hence we are
979				 * writing to the "usb_xfer"
980				 * structure pointed to by "xfer"
981				 * before we have allocated any
982				 * memory:
983				 */
984				xfer = &dummy;
985				memset(&dummy, 0, sizeof(dummy));
986				refcount++;
987			}
988
989			/* set transfer endpoint pointer */
990			xfer->endpoint = ep;
991
992			parm.size[0] += sizeof(xfer[0]);
993			parm.methods = xfer->endpoint->methods;
994			parm.curr_xfer = xfer;
995
996			/*
997			 * Call the Host or Device controller transfer
998			 * setup routine:
999			 */
1000			(udev->bus->methods->xfer_setup) (&parm);
1001
1002			/* check for error */
1003			if (parm.err)
1004				goto done;
1005
1006			if (buf) {
1007				/*
1008				 * Increment the endpoint refcount. This
1009				 * basically prevents setting a new
1010				 * configuration and alternate setting
1011				 * when USB transfers are in use on
1012				 * the given interface. Search the USB
1013				 * code for "endpoint->refcount_alloc" if you
1014				 * want more information.
1015				 */
1016				USB_BUS_LOCK(info->bus);
1017				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1018					parm.err = USB_ERR_INVAL;
1019
1020				xfer->endpoint->refcount_alloc++;
1021
1022				if (xfer->endpoint->refcount_alloc == 0)
1023					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1024				USB_BUS_UNLOCK(info->bus);
1025
1026				/*
1027				 * Whenever we set ppxfer[] then we
1028				 * also need to increment the
1029				 * "setup_refcount":
1030				 */
1031				info->setup_refcount++;
1032
1033				/*
1034				 * Transfer is successfully setup and
1035				 * can be used:
1036				 */
1037				ppxfer[n] = xfer;
1038			}
1039
1040			/* check for error */
1041			if (parm.err)
1042				goto done;
1043		}
1044
1045		if (buf || parm.err) {
1046			goto done;
1047		}
1048		if (refcount == 0) {
1049			/* no transfers - nothing to do ! */
1050			goto done;
1051		}
1052		/* align data properly */
1053		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1054
1055		/* store offset temporarily */
1056		parm.size[1] = parm.size[0];
1057
1058		/*
1059		 * The number of DMA tags required depends on
1060		 * the number of endpoints. The current estimate
1061		 * for maximum number of DMA tags per endpoint
1062		 * is two.
1063		 */
1064		parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX);
1065
1066		/*
1067		 * DMA tags for QH, TD, Data and more.
1068		 */
1069		parm.dma_tag_max += 8;
1070
1071		parm.dma_tag_p += parm.dma_tag_max;
1072
1073		parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
1074		    ((uint8_t *)0);
1075
1076		/* align data properly */
1077		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1078
1079		/* store offset temporarily */
1080		parm.size[3] = parm.size[0];
1081
1082		parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
1083		    ((uint8_t *)0);
1084
1085		/* align data properly */
1086		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1087
1088		/* store offset temporarily */
1089		parm.size[4] = parm.size[0];
1090
1091		parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
1092		    ((uint8_t *)0);
1093
1094		/* store end offset temporarily */
1095		parm.size[5] = parm.size[0];
1096
1097		parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
1098		    ((uint8_t *)0);
1099
1100		/* store end offset temporarily */
1101
1102		parm.size[2] = parm.size[0];
1103
1104		/* align data properly */
1105		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1106
1107		parm.size[6] = parm.size[0];
1108
1109		parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
1110		    ((uint8_t *)0);
1111
1112		/* align data properly */
1113		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1114
1115		/* allocate zeroed memory */
1116		buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
1117
1118		if (buf == NULL) {
1119			parm.err = USB_ERR_NOMEM;
1120			DPRINTFN(0, "cannot allocate memory block for "
1121			    "configuration (%d bytes)\n",
1122			    parm.size[0]);
1123			goto done;
1124		}
1125		parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]);
1126		parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]);
1127		parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]);
1128		parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]);
1129		parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]);
1130	}
1131
1132done:
1133	if (buf) {
1134		if (info->setup_refcount == 0) {
1135			/*
1136			 * "usbd_transfer_unsetup_sub" will unlock
1137			 * the bus mutex before returning !
1138			 */
1139			USB_BUS_LOCK(info->bus);
1140
1141			/* something went wrong */
1142			usbd_transfer_unsetup_sub(info, 0);
1143		}
1144	}
1145	if (parm.err) {
1146		usbd_transfer_unsetup(ppxfer, n_setup);
1147	}
1148	return (parm.err);
1149}
1150
1151/*------------------------------------------------------------------------*
1152 *	usbd_transfer_unsetup_sub - factored out code
1153 *------------------------------------------------------------------------*/
1154static void
1155usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1156{
1157#if USB_HAVE_BUSDMA
1158	struct usb_page_cache *pc;
1159#endif
1160
1161	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1162
1163	/* wait for any outstanding DMA operations */
1164
1165	if (needs_delay) {
1166		usb_timeout_t temp;
1167		temp = usbd_get_dma_delay(info->udev);
1168		if (temp != 0) {
1169			usb_pause_mtx(&info->bus->bus_mtx,
1170			    USB_MS_TO_TICKS(temp));
1171		}
1172	}
1173
1174	/* make sure that our done messages are not queued anywhere */
1175	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1176
1177	USB_BUS_UNLOCK(info->bus);
1178
1179#if USB_HAVE_BUSDMA
1180	/* free DMA'able memory, if any */
1181	pc = info->dma_page_cache_start;
1182	while (pc != info->dma_page_cache_end) {
1183		usb_pc_free_mem(pc);
1184		pc++;
1185	}
1186
1187	/* free DMA maps in all "xfer->frbuffers" */
1188	pc = info->xfer_page_cache_start;
1189	while (pc != info->xfer_page_cache_end) {
1190		usb_pc_dmamap_destroy(pc);
1191		pc++;
1192	}
1193
1194	/* free all DMA tags */
1195	usb_dma_tag_unsetup(&info->dma_parent_tag);
1196#endif
1197
1198	cv_destroy(&info->cv_drain);
1199
1200	/*
1201	 * free the "memory_base" last, hence the "info" structure is
1202	 * contained within the "memory_base"!
1203	 */
1204	free(info->memory_base, M_USB);
1205}
1206
1207/*------------------------------------------------------------------------*
1208 *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1209 *
1210 * NOTE: All USB transfers in progress will get called back passing
1211 * the error code "USB_ERR_CANCELLED" before this function
1212 * returns.
1213 *------------------------------------------------------------------------*/
1214void
1215usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1216{
1217	struct usb_xfer *xfer;
1218	struct usb_xfer_root *info;
1219	uint8_t needs_delay = 0;
1220
1221	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1222	    "usbd_transfer_unsetup can sleep!");
1223
1224	while (n_setup--) {
1225		xfer = pxfer[n_setup];
1226
1227		if (xfer == NULL)
1228			continue;
1229
1230		info = xfer->xroot;
1231
1232		USB_XFER_LOCK(xfer);
1233		USB_BUS_LOCK(info->bus);
1234
1235		/*
1236		 * HINT: when you start/stop a transfer, it might be a
1237		 * good idea to directly use the "pxfer[]" structure:
1238		 *
1239		 * usbd_transfer_start(sc->pxfer[0]);
1240		 * usbd_transfer_stop(sc->pxfer[0]);
1241		 *
1242		 * That way, if your code has many parts that will not
1243		 * stop running under the same lock, in other words
1244		 * "xfer_mtx", the usbd_transfer_start and
1245		 * usbd_transfer_stop functions will simply return
1246		 * when they detect a NULL pointer argument.
1247		 *
1248		 * To avoid any races we clear the "pxfer[]" pointer
1249		 * while holding the private mutex of the driver:
1250		 */
1251		pxfer[n_setup] = NULL;
1252
1253		USB_BUS_UNLOCK(info->bus);
1254		USB_XFER_UNLOCK(xfer);
1255
1256		usbd_transfer_drain(xfer);
1257
1258#if USB_HAVE_BUSDMA
1259		if (xfer->flags_int.bdma_enable)
1260			needs_delay = 1;
1261#endif
1262		/*
1263		 * NOTE: default endpoint does not have an
1264		 * interface, even if endpoint->iface_index == 0
1265		 */
1266		USB_BUS_LOCK(info->bus);
1267		xfer->endpoint->refcount_alloc--;
1268		USB_BUS_UNLOCK(info->bus);
1269
1270		usb_callout_drain(&xfer->timeout_handle);
1271
1272		USB_BUS_LOCK(info->bus);
1273
1274		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1275		    "reference count\n"));
1276
1277		info->setup_refcount--;
1278
1279		if (info->setup_refcount == 0) {
1280			usbd_transfer_unsetup_sub(info,
1281			    needs_delay);
1282		} else {
1283			USB_BUS_UNLOCK(info->bus);
1284		}
1285	}
1286}
1287
1288/*------------------------------------------------------------------------*
1289 *	usbd_control_transfer_init - factored out code
1290 *
1291 * In USB Device Mode we have to wait for the SETUP packet which
1292 * containst the "struct usb_device_request" structure, before we can
1293 * transfer any data. In USB Host Mode we already have the SETUP
1294 * packet at the moment the USB transfer is started. This leads us to
1295 * having to setup the USB transfer at two different places in
1296 * time. This function just contains factored out control transfer
1297 * initialisation code, so that we don't duplicate the code.
1298 *------------------------------------------------------------------------*/
1299static void
1300usbd_control_transfer_init(struct usb_xfer *xfer)
1301{
1302	struct usb_device_request req;
1303
1304	/* copy out the USB request header */
1305
1306	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1307
1308	/* setup remainder */
1309
1310	xfer->flags_int.control_rem = UGETW(req.wLength);
1311
1312	/* copy direction to endpoint variable */
1313
1314	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1315	xfer->endpointno |=
1316	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1317}
1318
1319/*------------------------------------------------------------------------*
1320 *	usbd_setup_ctrl_transfer
1321 *
1322 * This function handles initialisation of control transfers. Control
1323 * transfers are special in that regard that they can both transmit
1324 * and receive data.
1325 *
1326 * Return values:
1327 *    0: Success
1328 * Else: Failure
1329 *------------------------------------------------------------------------*/
1330static int
1331usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1332{
1333	usb_frlength_t len;
1334
1335	/* Check for control endpoint stall */
1336	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1337		/* the control transfer is no longer active */
1338		xfer->flags_int.control_stall = 1;
1339		xfer->flags_int.control_act = 0;
1340	} else {
1341		/* don't stall control transfer by default */
1342		xfer->flags_int.control_stall = 0;
1343	}
1344
1345	/* Check for invalid number of frames */
1346	if (xfer->nframes > 2) {
1347		/*
1348		 * If you need to split a control transfer, you
1349		 * have to do one part at a time. Only with
1350		 * non-control transfers you can do multiple
1351		 * parts a time.
1352		 */
1353		DPRINTFN(0, "Too many frames: %u\n",
1354		    (unsigned int)xfer->nframes);
1355		goto error;
1356	}
1357
1358	/*
1359         * Check if there is a control
1360         * transfer in progress:
1361         */
1362	if (xfer->flags_int.control_act) {
1363
1364		if (xfer->flags_int.control_hdr) {
1365
1366			/* clear send header flag */
1367
1368			xfer->flags_int.control_hdr = 0;
1369
1370			/* setup control transfer */
1371			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1372				usbd_control_transfer_init(xfer);
1373			}
1374		}
1375		/* get data length */
1376
1377		len = xfer->sumlen;
1378
1379	} else {
1380
1381		/* the size of the SETUP structure is hardcoded ! */
1382
1383		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1384			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1385			    xfer->frlengths[0], sizeof(struct
1386			    usb_device_request));
1387			goto error;
1388		}
1389		/* check USB mode */
1390		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1391
1392			/* check number of frames */
1393			if (xfer->nframes != 1) {
1394				/*
1395			         * We need to receive the setup
1396			         * message first so that we know the
1397			         * data direction!
1398			         */
1399				DPRINTF("Misconfigured transfer\n");
1400				goto error;
1401			}
1402			/*
1403			 * Set a dummy "control_rem" value.  This
1404			 * variable will be overwritten later by a
1405			 * call to "usbd_control_transfer_init()" !
1406			 */
1407			xfer->flags_int.control_rem = 0xFFFF;
1408		} else {
1409
1410			/* setup "endpoint" and "control_rem" */
1411
1412			usbd_control_transfer_init(xfer);
1413		}
1414
1415		/* set transfer-header flag */
1416
1417		xfer->flags_int.control_hdr = 1;
1418
1419		/* get data length */
1420
1421		len = (xfer->sumlen - sizeof(struct usb_device_request));
1422	}
1423
1424	/* check if there is a length mismatch */
1425
1426	if (len > xfer->flags_int.control_rem) {
1427		DPRINTFN(0, "Length (%d) greater than "
1428		    "remaining length (%d)\n", len,
1429		    xfer->flags_int.control_rem);
1430		goto error;
1431	}
1432	/* check if we are doing a short transfer */
1433
1434	if (xfer->flags.force_short_xfer) {
1435		xfer->flags_int.control_rem = 0;
1436	} else {
1437		if ((len != xfer->max_data_length) &&
1438		    (len != xfer->flags_int.control_rem) &&
1439		    (xfer->nframes != 1)) {
1440			DPRINTFN(0, "Short control transfer without "
1441			    "force_short_xfer set\n");
1442			goto error;
1443		}
1444		xfer->flags_int.control_rem -= len;
1445	}
1446
1447	/* the status part is executed when "control_act" is 0 */
1448
1449	if ((xfer->flags_int.control_rem > 0) ||
1450	    (xfer->flags.manual_status)) {
1451		/* don't execute the STATUS stage yet */
1452		xfer->flags_int.control_act = 1;
1453
1454		/* sanity check */
1455		if ((!xfer->flags_int.control_hdr) &&
1456		    (xfer->nframes == 1)) {
1457			/*
1458		         * This is not a valid operation!
1459		         */
1460			DPRINTFN(0, "Invalid parameter "
1461			    "combination\n");
1462			goto error;
1463		}
1464	} else {
1465		/* time to execute the STATUS stage */
1466		xfer->flags_int.control_act = 0;
1467	}
1468	return (0);			/* success */
1469
1470error:
1471	return (1);			/* failure */
1472}
1473
1474/*------------------------------------------------------------------------*
1475 *	usbd_transfer_submit - start USB hardware for the given transfer
1476 *
1477 * This function should only be called from the USB callback.
1478 *------------------------------------------------------------------------*/
1479void
1480usbd_transfer_submit(struct usb_xfer *xfer)
1481{
1482	struct usb_xfer_root *info;
1483	struct usb_bus *bus;
1484	usb_frcount_t x;
1485
1486	info = xfer->xroot;
1487	bus = info->bus;
1488
1489	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1490	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1491	    "read" : "write");
1492
1493#ifdef USB_DEBUG
1494	if (USB_DEBUG_VAR > 0) {
1495		USB_BUS_LOCK(bus);
1496
1497		usb_dump_endpoint(xfer->endpoint);
1498
1499		USB_BUS_UNLOCK(bus);
1500	}
1501#endif
1502
1503	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1504	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1505
1506	/* Only open the USB transfer once! */
1507	if (!xfer->flags_int.open) {
1508		xfer->flags_int.open = 1;
1509
1510		DPRINTF("open\n");
1511
1512		USB_BUS_LOCK(bus);
1513		(xfer->endpoint->methods->open) (xfer);
1514		USB_BUS_UNLOCK(bus);
1515	}
1516	/* set "transferring" flag */
1517	xfer->flags_int.transferring = 1;
1518
1519#if USB_HAVE_POWERD
1520	/* increment power reference */
1521	usbd_transfer_power_ref(xfer, 1);
1522#endif
1523	/*
1524	 * Check if the transfer is waiting on a queue, most
1525	 * frequently the "done_q":
1526	 */
1527	if (xfer->wait_queue) {
1528		USB_BUS_LOCK(bus);
1529		usbd_transfer_dequeue(xfer);
1530		USB_BUS_UNLOCK(bus);
1531	}
1532	/* clear "did_dma_delay" flag */
1533	xfer->flags_int.did_dma_delay = 0;
1534
1535	/* clear "did_close" flag */
1536	xfer->flags_int.did_close = 0;
1537
1538#if USB_HAVE_BUSDMA
1539	/* clear "bdma_setup" flag */
1540	xfer->flags_int.bdma_setup = 0;
1541#endif
1542	/* by default we cannot cancel any USB transfer immediately */
1543	xfer->flags_int.can_cancel_immed = 0;
1544
1545	/* clear lengths and frame counts by default */
1546	xfer->sumlen = 0;
1547	xfer->actlen = 0;
1548	xfer->aframes = 0;
1549
1550	/* clear any previous errors */
1551	xfer->error = 0;
1552
1553	/* Check if the device is still alive */
1554	if (info->udev->state < USB_STATE_POWERED) {
1555		USB_BUS_LOCK(bus);
1556		/*
1557		 * Must return cancelled error code else
1558		 * device drivers can hang.
1559		 */
1560		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1561		USB_BUS_UNLOCK(bus);
1562		return;
1563	}
1564
1565	/* sanity check */
1566	if (xfer->nframes == 0) {
1567		if (xfer->flags.stall_pipe) {
1568			/*
1569			 * Special case - want to stall without transferring
1570			 * any data:
1571			 */
1572			DPRINTF("xfer=%p nframes=0: stall "
1573			    "or clear stall!\n", xfer);
1574			USB_BUS_LOCK(bus);
1575			xfer->flags_int.can_cancel_immed = 1;
1576			/* start the transfer */
1577			usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer);
1578			USB_BUS_UNLOCK(bus);
1579			return;
1580		}
1581		USB_BUS_LOCK(bus);
1582		usbd_transfer_done(xfer, USB_ERR_INVAL);
1583		USB_BUS_UNLOCK(bus);
1584		return;
1585	}
1586	/* compute some variables */
1587
1588	for (x = 0; x != xfer->nframes; x++) {
1589		/* make a copy of the frlenghts[] */
1590		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1591		/* compute total transfer length */
1592		xfer->sumlen += xfer->frlengths[x];
1593		if (xfer->sumlen < xfer->frlengths[x]) {
1594			/* length wrapped around */
1595			USB_BUS_LOCK(bus);
1596			usbd_transfer_done(xfer, USB_ERR_INVAL);
1597			USB_BUS_UNLOCK(bus);
1598			return;
1599		}
1600	}
1601
1602	/* clear some internal flags */
1603
1604	xfer->flags_int.short_xfer_ok = 0;
1605	xfer->flags_int.short_frames_ok = 0;
1606
1607	/* check if this is a control transfer */
1608
1609	if (xfer->flags_int.control_xfr) {
1610
1611		if (usbd_setup_ctrl_transfer(xfer)) {
1612			USB_BUS_LOCK(bus);
1613			usbd_transfer_done(xfer, USB_ERR_STALLED);
1614			USB_BUS_UNLOCK(bus);
1615			return;
1616		}
1617	}
1618	/*
1619	 * Setup filtered version of some transfer flags,
1620	 * in case of data read direction
1621	 */
1622	if (USB_GET_DATA_ISREAD(xfer)) {
1623
1624		if (xfer->flags.short_frames_ok) {
1625			xfer->flags_int.short_xfer_ok = 1;
1626			xfer->flags_int.short_frames_ok = 1;
1627		} else if (xfer->flags.short_xfer_ok) {
1628			xfer->flags_int.short_xfer_ok = 1;
1629
1630			/* check for control transfer */
1631			if (xfer->flags_int.control_xfr) {
1632				/*
1633				 * 1) Control transfers do not support
1634				 * reception of multiple short USB
1635				 * frames in host mode and device side
1636				 * mode, with exception of:
1637				 *
1638				 * 2) Due to sometimes buggy device
1639				 * side firmware we need to do a
1640				 * STATUS stage in case of short
1641				 * control transfers in USB host mode.
1642				 * The STATUS stage then becomes the
1643				 * "alt_next" to the DATA stage.
1644				 */
1645				xfer->flags_int.short_frames_ok = 1;
1646			}
1647		}
1648	}
1649	/*
1650	 * Check if BUS-DMA support is enabled and try to load virtual
1651	 * buffers into DMA, if any:
1652	 */
1653#if USB_HAVE_BUSDMA
1654	if (xfer->flags_int.bdma_enable) {
1655		/* insert the USB transfer last in the BUS-DMA queue */
1656		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1657		return;
1658	}
1659#endif
1660	/*
1661	 * Enter the USB transfer into the Host Controller or
1662	 * Device Controller schedule:
1663	 */
1664	usbd_pipe_enter(xfer);
1665}
1666
1667/*------------------------------------------------------------------------*
1668 *	usbd_pipe_enter - factored out code
1669 *------------------------------------------------------------------------*/
1670void
1671usbd_pipe_enter(struct usb_xfer *xfer)
1672{
1673	struct usb_endpoint *ep;
1674
1675	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1676
1677	USB_BUS_LOCK(xfer->xroot->bus);
1678
1679	ep = xfer->endpoint;
1680
1681	DPRINTF("enter\n");
1682
1683	/* the transfer can now be cancelled */
1684	xfer->flags_int.can_cancel_immed = 1;
1685
1686	/* enter the transfer */
1687	(ep->methods->enter) (xfer);
1688
1689	/* check for transfer error */
1690	if (xfer->error) {
1691		/* some error has happened */
1692		usbd_transfer_done(xfer, 0);
1693		USB_BUS_UNLOCK(xfer->xroot->bus);
1694		return;
1695	}
1696
1697	/* start the transfer */
1698	usb_command_wrapper(&ep->endpoint_q, xfer);
1699	USB_BUS_UNLOCK(xfer->xroot->bus);
1700}
1701
1702/*------------------------------------------------------------------------*
1703 *	usbd_transfer_start - start an USB transfer
1704 *
1705 * NOTE: Calling this function more than one time will only
1706 *       result in a single transfer start, until the USB transfer
1707 *       completes.
1708 *------------------------------------------------------------------------*/
1709void
1710usbd_transfer_start(struct usb_xfer *xfer)
1711{
1712	if (xfer == NULL) {
1713		/* transfer is gone */
1714		return;
1715	}
1716	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1717
1718	/* mark the USB transfer started */
1719
1720	if (!xfer->flags_int.started) {
1721		/* lock the BUS lock to avoid races updating flags_int */
1722		USB_BUS_LOCK(xfer->xroot->bus);
1723		xfer->flags_int.started = 1;
1724		USB_BUS_UNLOCK(xfer->xroot->bus);
1725	}
1726	/* check if the USB transfer callback is already transferring */
1727
1728	if (xfer->flags_int.transferring) {
1729		return;
1730	}
1731	USB_BUS_LOCK(xfer->xroot->bus);
1732	/* call the USB transfer callback */
1733	usbd_callback_ss_done_defer(xfer);
1734	USB_BUS_UNLOCK(xfer->xroot->bus);
1735}
1736
1737/*------------------------------------------------------------------------*
1738 *	usbd_transfer_stop - stop an USB transfer
1739 *
1740 * NOTE: Calling this function more than one time will only
1741 *       result in a single transfer stop.
1742 * NOTE: When this function returns it is not safe to free nor
1743 *       reuse any DMA buffers. See "usbd_transfer_drain()".
1744 *------------------------------------------------------------------------*/
1745void
1746usbd_transfer_stop(struct usb_xfer *xfer)
1747{
1748	struct usb_endpoint *ep;
1749
1750	if (xfer == NULL) {
1751		/* transfer is gone */
1752		return;
1753	}
1754	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1755
1756	/* check if the USB transfer was ever opened */
1757
1758	if (!xfer->flags_int.open) {
1759		if (xfer->flags_int.started) {
1760			/* nothing to do except clearing the "started" flag */
1761			/* lock the BUS lock to avoid races updating flags_int */
1762			USB_BUS_LOCK(xfer->xroot->bus);
1763			xfer->flags_int.started = 0;
1764			USB_BUS_UNLOCK(xfer->xroot->bus);
1765		}
1766		return;
1767	}
1768	/* try to stop the current USB transfer */
1769
1770	USB_BUS_LOCK(xfer->xroot->bus);
1771	/* override any previous error */
1772	xfer->error = USB_ERR_CANCELLED;
1773
1774	/*
1775	 * Clear "open" and "started" when both private and USB lock
1776	 * is locked so that we don't get a race updating "flags_int"
1777	 */
1778	xfer->flags_int.open = 0;
1779	xfer->flags_int.started = 0;
1780
1781	/*
1782	 * Check if we can cancel the USB transfer immediately.
1783	 */
1784	if (xfer->flags_int.transferring) {
1785		if (xfer->flags_int.can_cancel_immed &&
1786		    (!xfer->flags_int.did_close)) {
1787			DPRINTF("close\n");
1788			/*
1789			 * The following will lead to an USB_ERR_CANCELLED
1790			 * error code being passed to the USB callback.
1791			 */
1792			(xfer->endpoint->methods->close) (xfer);
1793			/* only close once */
1794			xfer->flags_int.did_close = 1;
1795		} else {
1796			/* need to wait for the next done callback */
1797		}
1798	} else {
1799		DPRINTF("close\n");
1800
1801		/* close here and now */
1802		(xfer->endpoint->methods->close) (xfer);
1803
1804		/*
1805		 * Any additional DMA delay is done by
1806		 * "usbd_transfer_unsetup()".
1807		 */
1808
1809		/*
1810		 * Special case. Check if we need to restart a blocked
1811		 * endpoint.
1812		 */
1813		ep = xfer->endpoint;
1814
1815		/*
1816		 * If the current USB transfer is completing we need
1817		 * to start the next one:
1818		 */
1819		if (ep->endpoint_q.curr == xfer) {
1820			usb_command_wrapper(&ep->endpoint_q, NULL);
1821		}
1822	}
1823
1824	USB_BUS_UNLOCK(xfer->xroot->bus);
1825}
1826
1827/*------------------------------------------------------------------------*
1828 *	usbd_transfer_pending
1829 *
1830 * This function will check if an USB transfer is pending which is a
1831 * little bit complicated!
1832 * Return values:
1833 * 0: Not pending
1834 * 1: Pending: The USB transfer will receive a callback in the future.
1835 *------------------------------------------------------------------------*/
1836uint8_t
1837usbd_transfer_pending(struct usb_xfer *xfer)
1838{
1839	struct usb_xfer_root *info;
1840	struct usb_xfer_queue *pq;
1841
1842	if (xfer == NULL) {
1843		/* transfer is gone */
1844		return (0);
1845	}
1846	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1847
1848	if (xfer->flags_int.transferring) {
1849		/* trivial case */
1850		return (1);
1851	}
1852	USB_BUS_LOCK(xfer->xroot->bus);
1853	if (xfer->wait_queue) {
1854		/* we are waiting on a queue somewhere */
1855		USB_BUS_UNLOCK(xfer->xroot->bus);
1856		return (1);
1857	}
1858	info = xfer->xroot;
1859	pq = &info->done_q;
1860
1861	if (pq->curr == xfer) {
1862		/* we are currently scheduled for callback */
1863		USB_BUS_UNLOCK(xfer->xroot->bus);
1864		return (1);
1865	}
1866	/* we are not pending */
1867	USB_BUS_UNLOCK(xfer->xroot->bus);
1868	return (0);
1869}
1870
1871/*------------------------------------------------------------------------*
1872 *	usbd_transfer_drain
1873 *
1874 * This function will stop the USB transfer and wait for any
1875 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1876 * are loaded into DMA can safely be freed or reused after that this
1877 * function has returned.
1878 *------------------------------------------------------------------------*/
1879void
1880usbd_transfer_drain(struct usb_xfer *xfer)
1881{
1882	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1883	    "usbd_transfer_drain can sleep!");
1884
1885	if (xfer == NULL) {
1886		/* transfer is gone */
1887		return;
1888	}
1889	if (xfer->xroot->xfer_mtx != &Giant) {
1890		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1891	}
1892	USB_XFER_LOCK(xfer);
1893
1894	usbd_transfer_stop(xfer);
1895
1896	while (usbd_transfer_pending(xfer) ||
1897	    xfer->flags_int.doing_callback) {
1898
1899		/*
1900		 * It is allowed that the callback can drop its
1901		 * transfer mutex. In that case checking only
1902		 * "usbd_transfer_pending()" is not enough to tell if
1903		 * the USB transfer is fully drained. We also need to
1904		 * check the internal "doing_callback" flag.
1905		 */
1906		xfer->flags_int.draining = 1;
1907
1908		/*
1909		 * Wait until the current outstanding USB
1910		 * transfer is complete !
1911		 */
1912		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
1913	}
1914	USB_XFER_UNLOCK(xfer);
1915}
1916
1917struct usb_page_cache *
1918usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1919{
1920	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1921
1922	return (&xfer->frbuffers[frindex]);
1923}
1924
1925/*------------------------------------------------------------------------*
1926 *	usbd_xfer_get_fps_shift
1927 *
1928 * The following function is only useful for isochronous transfers. It
1929 * returns how many times the frame execution rate has been shifted
1930 * down.
1931 *
1932 * Return value:
1933 * Success: 0..3
1934 * Failure: 0
1935 *------------------------------------------------------------------------*/
1936uint8_t
1937usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
1938{
1939	return (xfer->fps_shift);
1940}
1941
1942usb_frlength_t
1943usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
1944{
1945	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1946
1947	return (xfer->frlengths[frindex]);
1948}
1949
1950/*------------------------------------------------------------------------*
1951 *	usbd_xfer_set_frame_data
1952 *
1953 * This function sets the pointer of the buffer that should
1954 * loaded directly into DMA for the given USB frame. Passing "ptr"
1955 * equal to NULL while the corresponding "frlength" is greater
1956 * than zero gives undefined results!
1957 *------------------------------------------------------------------------*/
1958void
1959usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1960    void *ptr, usb_frlength_t len)
1961{
1962	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1963
1964	/* set virtual address to load and length */
1965	xfer->frbuffers[frindex].buffer = ptr;
1966	usbd_xfer_set_frame_len(xfer, frindex, len);
1967}
1968
1969void
1970usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1971    void **ptr, int *len)
1972{
1973	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1974
1975	if (ptr != NULL)
1976		*ptr = xfer->frbuffers[frindex].buffer;
1977	if (len != NULL)
1978		*len = xfer->frlengths[frindex];
1979}
1980
1981/*------------------------------------------------------------------------*
1982 *	usbd_xfer_old_frame_length
1983 *
1984 * This function returns the framelength of the given frame at the
1985 * time the transfer was submitted. This function can be used to
1986 * compute the starting data pointer of the next isochronous frame
1987 * when an isochronous transfer has completed.
1988 *------------------------------------------------------------------------*/
1989usb_frlength_t
1990usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
1991{
1992	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1993
1994	return (xfer->frlengths[frindex + xfer->max_frame_count]);
1995}
1996
1997void
1998usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
1999    int *nframes)
2000{
2001	if (actlen != NULL)
2002		*actlen = xfer->actlen;
2003	if (sumlen != NULL)
2004		*sumlen = xfer->sumlen;
2005	if (aframes != NULL)
2006		*aframes = xfer->aframes;
2007	if (nframes != NULL)
2008		*nframes = xfer->nframes;
2009}
2010
2011/*------------------------------------------------------------------------*
2012 *	usbd_xfer_set_frame_offset
2013 *
2014 * This function sets the frame data buffer offset relative to the beginning
2015 * of the USB DMA buffer allocated for this USB transfer.
2016 *------------------------------------------------------------------------*/
2017void
2018usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2019    usb_frcount_t frindex)
2020{
2021	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2022	    "when the USB buffer is external\n"));
2023	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2024
2025	/* set virtual address to load */
2026	xfer->frbuffers[frindex].buffer =
2027	    USB_ADD_BYTES(xfer->local_buffer, offset);
2028}
2029
2030void
2031usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2032{
2033	xfer->interval = i;
2034}
2035
2036void
2037usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2038{
2039	xfer->timeout = t;
2040}
2041
2042void
2043usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2044{
2045	xfer->nframes = n;
2046}
2047
2048usb_frcount_t
2049usbd_xfer_max_frames(struct usb_xfer *xfer)
2050{
2051	return (xfer->max_frame_count);
2052}
2053
2054usb_frlength_t
2055usbd_xfer_max_len(struct usb_xfer *xfer)
2056{
2057	return (xfer->max_data_length);
2058}
2059
2060usb_frlength_t
2061usbd_xfer_max_framelen(struct usb_xfer *xfer)
2062{
2063	return (xfer->max_frame_size);
2064}
2065
2066void
2067usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2068    usb_frlength_t len)
2069{
2070	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2071
2072	xfer->frlengths[frindex] = len;
2073}
2074
2075/*------------------------------------------------------------------------*
2076 *	usb_callback_proc - factored out code
2077 *
2078 * This function performs USB callbacks.
2079 *------------------------------------------------------------------------*/
2080static void
2081usb_callback_proc(struct usb_proc_msg *_pm)
2082{
2083	struct usb_done_msg *pm = (void *)_pm;
2084	struct usb_xfer_root *info = pm->xroot;
2085
2086	/* Change locking order */
2087	USB_BUS_UNLOCK(info->bus);
2088
2089	/*
2090	 * We exploit the fact that the mutex is the same for all
2091	 * callbacks that will be called from this thread:
2092	 */
2093	mtx_lock(info->xfer_mtx);
2094	USB_BUS_LOCK(info->bus);
2095
2096	/* Continue where we lost track */
2097	usb_command_wrapper(&info->done_q,
2098	    info->done_q.curr);
2099
2100	mtx_unlock(info->xfer_mtx);
2101}
2102
2103/*------------------------------------------------------------------------*
2104 *	usbd_callback_ss_done_defer
2105 *
2106 * This function will defer the start, stop and done callback to the
2107 * correct thread.
2108 *------------------------------------------------------------------------*/
2109static void
2110usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2111{
2112	struct usb_xfer_root *info = xfer->xroot;
2113	struct usb_xfer_queue *pq = &info->done_q;
2114
2115	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2116
2117	if (pq->curr != xfer) {
2118		usbd_transfer_enqueue(pq, xfer);
2119	}
2120	if (!pq->recurse_1) {
2121
2122		/*
2123	         * We have to postpone the callback due to the fact we
2124	         * will have a Lock Order Reversal, LOR, if we try to
2125	         * proceed !
2126	         */
2127		if (usb_proc_msignal(info->done_p,
2128		    &info->done_m[0], &info->done_m[1])) {
2129			/* ignore */
2130		}
2131	} else {
2132		/* clear second recurse flag */
2133		pq->recurse_2 = 0;
2134	}
2135	return;
2136
2137}
2138
2139/*------------------------------------------------------------------------*
2140 *	usbd_callback_wrapper
2141 *
2142 * This is a wrapper for USB callbacks. This wrapper does some
2143 * auto-magic things like figuring out if we can call the callback
2144 * directly from the current context or if we need to wakeup the
2145 * interrupt process.
2146 *------------------------------------------------------------------------*/
2147static void
2148usbd_callback_wrapper(struct usb_xfer_queue *pq)
2149{
2150	struct usb_xfer *xfer = pq->curr;
2151	struct usb_xfer_root *info = xfer->xroot;
2152
2153	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2154	if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2155		/*
2156	       	 * Cases that end up here:
2157		 *
2158		 * 5) HW interrupt done callback or other source.
2159		 */
2160		DPRINTFN(3, "case 5\n");
2161
2162		/*
2163	         * We have to postpone the callback due to the fact we
2164	         * will have a Lock Order Reversal, LOR, if we try to
2165	         * proceed !
2166	         */
2167		if (usb_proc_msignal(info->done_p,
2168		    &info->done_m[0], &info->done_m[1])) {
2169			/* ignore */
2170		}
2171		return;
2172	}
2173	/*
2174	 * Cases that end up here:
2175	 *
2176	 * 1) We are starting a transfer
2177	 * 2) We are prematurely calling back a transfer
2178	 * 3) We are stopping a transfer
2179	 * 4) We are doing an ordinary callback
2180	 */
2181	DPRINTFN(3, "case 1-4\n");
2182	/* get next USB transfer in the queue */
2183	info->done_q.curr = NULL;
2184
2185	/* set flag in case of drain */
2186	xfer->flags_int.doing_callback = 1;
2187
2188	USB_BUS_UNLOCK(info->bus);
2189	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2190
2191	/* set correct USB state for callback */
2192	if (!xfer->flags_int.transferring) {
2193		xfer->usb_state = USB_ST_SETUP;
2194		if (!xfer->flags_int.started) {
2195			/* we got stopped before we even got started */
2196			USB_BUS_LOCK(info->bus);
2197			goto done;
2198		}
2199	} else {
2200
2201		if (usbd_callback_wrapper_sub(xfer)) {
2202			/* the callback has been deferred */
2203			USB_BUS_LOCK(info->bus);
2204			goto done;
2205		}
2206#if USB_HAVE_POWERD
2207		/* decrement power reference */
2208		usbd_transfer_power_ref(xfer, -1);
2209#endif
2210		xfer->flags_int.transferring = 0;
2211
2212		if (xfer->error) {
2213			xfer->usb_state = USB_ST_ERROR;
2214		} else {
2215			/* set transferred state */
2216			xfer->usb_state = USB_ST_TRANSFERRED;
2217#if USB_HAVE_BUSDMA
2218			/* sync DMA memory, if any */
2219			if (xfer->flags_int.bdma_enable &&
2220			    (!xfer->flags_int.bdma_no_post_sync)) {
2221				usb_bdma_post_sync(xfer);
2222			}
2223#endif
2224		}
2225	}
2226
2227#if USB_HAVE_PF
2228	if (xfer->usb_state != USB_ST_SETUP)
2229		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2230#endif
2231	/* call processing routine */
2232	(xfer->callback) (xfer, xfer->error);
2233
2234	/* pickup the USB mutex again */
2235	USB_BUS_LOCK(info->bus);
2236
2237	/*
2238	 * Check if we got started after that we got cancelled, but
2239	 * before we managed to do the callback.
2240	 */
2241	if ((!xfer->flags_int.open) &&
2242	    (xfer->flags_int.started) &&
2243	    (xfer->usb_state == USB_ST_ERROR)) {
2244		/* clear flag in case of drain */
2245		xfer->flags_int.doing_callback = 0;
2246		/* try to loop, but not recursivly */
2247		usb_command_wrapper(&info->done_q, xfer);
2248		return;
2249	}
2250
2251done:
2252	/* clear flag in case of drain */
2253	xfer->flags_int.doing_callback = 0;
2254
2255	/*
2256	 * Check if we are draining.
2257	 */
2258	if (xfer->flags_int.draining &&
2259	    (!xfer->flags_int.transferring)) {
2260		/* "usbd_transfer_drain()" is waiting for end of transfer */
2261		xfer->flags_int.draining = 0;
2262		cv_broadcast(&info->cv_drain);
2263	}
2264
2265	/* do the next callback, if any */
2266	usb_command_wrapper(&info->done_q,
2267	    info->done_q.curr);
2268}
2269
2270/*------------------------------------------------------------------------*
2271 *	usb_dma_delay_done_cb
2272 *
2273 * This function is called when the DMA delay has been exectuded, and
2274 * will make sure that the callback is called to complete the USB
2275 * transfer. This code path is ususally only used when there is an USB
2276 * error like USB_ERR_CANCELLED.
2277 *------------------------------------------------------------------------*/
2278void
2279usb_dma_delay_done_cb(struct usb_xfer *xfer)
2280{
2281	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2282
2283	DPRINTFN(3, "Completed %p\n", xfer);
2284
2285	/* queue callback for execution, again */
2286	usbd_transfer_done(xfer, 0);
2287}
2288
2289/*------------------------------------------------------------------------*
2290 *	usbd_transfer_dequeue
2291 *
2292 *  - This function is used to remove an USB transfer from a USB
2293 *  transfer queue.
2294 *
2295 *  - This function can be called multiple times in a row.
2296 *------------------------------------------------------------------------*/
2297void
2298usbd_transfer_dequeue(struct usb_xfer *xfer)
2299{
2300	struct usb_xfer_queue *pq;
2301
2302	pq = xfer->wait_queue;
2303	if (pq) {
2304		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2305		xfer->wait_queue = NULL;
2306	}
2307}
2308
2309/*------------------------------------------------------------------------*
2310 *	usbd_transfer_enqueue
2311 *
2312 *  - This function is used to insert an USB transfer into a USB *
2313 *  transfer queue.
2314 *
2315 *  - This function can be called multiple times in a row.
2316 *------------------------------------------------------------------------*/
2317void
2318usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2319{
2320	/*
2321	 * Insert the USB transfer into the queue, if it is not
2322	 * already on a USB transfer queue:
2323	 */
2324	if (xfer->wait_queue == NULL) {
2325		xfer->wait_queue = pq;
2326		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2327	}
2328}
2329
2330/*------------------------------------------------------------------------*
2331 *	usbd_transfer_done
2332 *
2333 *  - This function is used to remove an USB transfer from the busdma,
2334 *  pipe or interrupt queue.
2335 *
2336 *  - This function is used to queue the USB transfer on the done
2337 *  queue.
2338 *
2339 *  - This function is used to stop any USB transfer timeouts.
2340 *------------------------------------------------------------------------*/
2341void
2342usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2343{
2344	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2345
2346	DPRINTF("err=%s\n", usbd_errstr(error));
2347
2348	/*
2349	 * If we are not transferring then just return.
2350	 * This can happen during transfer cancel.
2351	 */
2352	if (!xfer->flags_int.transferring) {
2353		DPRINTF("not transferring\n");
2354		/* end of control transfer, if any */
2355		xfer->flags_int.control_act = 0;
2356		return;
2357	}
2358	/* only set transfer error if not already set */
2359	if (!xfer->error) {
2360		xfer->error = error;
2361	}
2362	/* stop any callouts */
2363	usb_callout_stop(&xfer->timeout_handle);
2364
2365	/*
2366	 * If we are waiting on a queue, just remove the USB transfer
2367	 * from the queue, if any. We should have the required locks
2368	 * locked to do the remove when this function is called.
2369	 */
2370	usbd_transfer_dequeue(xfer);
2371
2372#if USB_HAVE_BUSDMA
2373	if (mtx_owned(xfer->xroot->xfer_mtx)) {
2374		struct usb_xfer_queue *pq;
2375
2376		/*
2377		 * If the private USB lock is not locked, then we assume
2378		 * that the BUS-DMA load stage has been passed:
2379		 */
2380		pq = &xfer->xroot->dma_q;
2381
2382		if (pq->curr == xfer) {
2383			/* start the next BUS-DMA load, if any */
2384			usb_command_wrapper(pq, NULL);
2385		}
2386	}
2387#endif
2388	/* keep some statistics */
2389	if (xfer->error) {
2390		xfer->xroot->bus->stats_err.uds_requests
2391		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2392	} else {
2393		xfer->xroot->bus->stats_ok.uds_requests
2394		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2395	}
2396
2397	/* call the USB transfer callback */
2398	usbd_callback_ss_done_defer(xfer);
2399}
2400
2401/*------------------------------------------------------------------------*
2402 *	usbd_transfer_start_cb
2403 *
2404 * This function is called to start the USB transfer when
2405 * "xfer->interval" is greater than zero, and and the endpoint type is
2406 * BULK or CONTROL.
2407 *------------------------------------------------------------------------*/
2408static void
2409usbd_transfer_start_cb(void *arg)
2410{
2411	struct usb_xfer *xfer = arg;
2412	struct usb_endpoint *ep = xfer->endpoint;
2413
2414	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2415
2416	DPRINTF("start\n");
2417
2418#if USB_HAVE_PF
2419	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2420#endif
2421
2422	/* the transfer can now be cancelled */
2423	xfer->flags_int.can_cancel_immed = 1;
2424
2425	/* start USB transfer, if no error */
2426	if (xfer->error == 0)
2427		(ep->methods->start) (xfer);
2428
2429	/* check for transfer error */
2430	if (xfer->error) {
2431		/* some error has happened */
2432		usbd_transfer_done(xfer, 0);
2433	}
2434}
2435
2436/*------------------------------------------------------------------------*
2437 *	usbd_xfer_set_stall
2438 *
2439 * This function is used to set the stall flag outside the
2440 * callback. This function is NULL safe.
2441 *------------------------------------------------------------------------*/
2442void
2443usbd_xfer_set_stall(struct usb_xfer *xfer)
2444{
2445	if (xfer == NULL) {
2446		/* tearing down */
2447		return;
2448	}
2449	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2450
2451	/* avoid any races by locking the USB mutex */
2452	USB_BUS_LOCK(xfer->xroot->bus);
2453	xfer->flags.stall_pipe = 1;
2454	USB_BUS_UNLOCK(xfer->xroot->bus);
2455}
2456
2457int
2458usbd_xfer_is_stalled(struct usb_xfer *xfer)
2459{
2460	return (xfer->endpoint->is_stalled);
2461}
2462
2463/*------------------------------------------------------------------------*
2464 *	usbd_transfer_clear_stall
2465 *
2466 * This function is used to clear the stall flag outside the
2467 * callback. This function is NULL safe.
2468 *------------------------------------------------------------------------*/
2469void
2470usbd_transfer_clear_stall(struct usb_xfer *xfer)
2471{
2472	if (xfer == NULL) {
2473		/* tearing down */
2474		return;
2475	}
2476	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2477
2478	/* avoid any races by locking the USB mutex */
2479	USB_BUS_LOCK(xfer->xroot->bus);
2480
2481	xfer->flags.stall_pipe = 0;
2482
2483	USB_BUS_UNLOCK(xfer->xroot->bus);
2484}
2485
2486/*------------------------------------------------------------------------*
2487 *	usbd_pipe_start
2488 *
2489 * This function is used to add an USB transfer to the pipe transfer list.
2490 *------------------------------------------------------------------------*/
2491void
2492usbd_pipe_start(struct usb_xfer_queue *pq)
2493{
2494	struct usb_endpoint *ep;
2495	struct usb_xfer *xfer;
2496	uint8_t type;
2497
2498	xfer = pq->curr;
2499	ep = xfer->endpoint;
2500
2501	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2502
2503	/*
2504	 * If the endpoint is already stalled we do nothing !
2505	 */
2506	if (ep->is_stalled) {
2507		return;
2508	}
2509	/*
2510	 * Check if we are supposed to stall the endpoint:
2511	 */
2512	if (xfer->flags.stall_pipe) {
2513		struct usb_device *udev;
2514		struct usb_xfer_root *info;
2515
2516		/* clear stall command */
2517		xfer->flags.stall_pipe = 0;
2518
2519		/* get pointer to USB device */
2520		info = xfer->xroot;
2521		udev = info->udev;
2522
2523		/*
2524		 * Only stall BULK and INTERRUPT endpoints.
2525		 */
2526		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2527		if ((type == UE_BULK) ||
2528		    (type == UE_INTERRUPT)) {
2529			uint8_t did_stall;
2530
2531			did_stall = 1;
2532
2533			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2534				(udev->bus->methods->set_stall) (
2535				    udev, NULL, ep, &did_stall);
2536			} else if (udev->ctrl_xfer[1]) {
2537				info = udev->ctrl_xfer[1]->xroot;
2538				usb_proc_msignal(
2539				    &info->bus->non_giant_callback_proc,
2540				    &udev->cs_msg[0], &udev->cs_msg[1]);
2541			} else {
2542				/* should not happen */
2543				DPRINTFN(0, "No stall handler\n");
2544			}
2545			/*
2546			 * Check if we should stall. Some USB hardware
2547			 * handles set- and clear-stall in hardware.
2548			 */
2549			if (did_stall) {
2550				/*
2551				 * The transfer will be continued when
2552				 * the clear-stall control endpoint
2553				 * message is received.
2554				 */
2555				ep->is_stalled = 1;
2556				return;
2557			}
2558		} else if (type == UE_ISOCHRONOUS) {
2559
2560			/*
2561			 * Make sure any FIFO overflow or other FIFO
2562			 * error conditions go away by resetting the
2563			 * endpoint FIFO through the clear stall
2564			 * method.
2565			 */
2566			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2567				(udev->bus->methods->clear_stall) (udev, ep);
2568			}
2569		}
2570	}
2571	/* Set or clear stall complete - special case */
2572	if (xfer->nframes == 0) {
2573		/* we are complete */
2574		xfer->aframes = 0;
2575		usbd_transfer_done(xfer, 0);
2576		return;
2577	}
2578	/*
2579	 * Handled cases:
2580	 *
2581	 * 1) Start the first transfer queued.
2582	 *
2583	 * 2) Re-start the current USB transfer.
2584	 */
2585	/*
2586	 * Check if there should be any
2587	 * pre transfer start delay:
2588	 */
2589	if (xfer->interval > 0) {
2590		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2591		if ((type == UE_BULK) ||
2592		    (type == UE_CONTROL)) {
2593			usbd_transfer_timeout_ms(xfer,
2594			    &usbd_transfer_start_cb,
2595			    xfer->interval);
2596			return;
2597		}
2598	}
2599	DPRINTF("start\n");
2600
2601#if USB_HAVE_PF
2602	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2603#endif
2604	/* the transfer can now be cancelled */
2605	xfer->flags_int.can_cancel_immed = 1;
2606
2607	/* start USB transfer, if no error */
2608	if (xfer->error == 0)
2609		(ep->methods->start) (xfer);
2610
2611	/* check for transfer error */
2612	if (xfer->error) {
2613		/* some error has happened */
2614		usbd_transfer_done(xfer, 0);
2615	}
2616}
2617
2618/*------------------------------------------------------------------------*
2619 *	usbd_transfer_timeout_ms
2620 *
2621 * This function is used to setup a timeout on the given USB
2622 * transfer. If the timeout has been deferred the callback given by
2623 * "cb" will get called after "ms" milliseconds.
2624 *------------------------------------------------------------------------*/
2625void
2626usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2627    void (*cb) (void *arg), usb_timeout_t ms)
2628{
2629	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2630
2631	/* defer delay */
2632	usb_callout_reset(&xfer->timeout_handle,
2633	    USB_MS_TO_TICKS(ms), cb, xfer);
2634}
2635
2636/*------------------------------------------------------------------------*
2637 *	usbd_callback_wrapper_sub
2638 *
2639 *  - This function will update variables in an USB transfer after
2640 *  that the USB transfer is complete.
2641 *
2642 *  - This function is used to start the next USB transfer on the
2643 *  ep transfer queue, if any.
2644 *
2645 * NOTE: In some special cases the USB transfer will not be removed from
2646 * the pipe queue, but remain first. To enforce USB transfer removal call
2647 * this function passing the error code "USB_ERR_CANCELLED".
2648 *
2649 * Return values:
2650 * 0: Success.
2651 * Else: The callback has been deferred.
2652 *------------------------------------------------------------------------*/
2653static uint8_t
2654usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2655{
2656	struct usb_endpoint *ep;
2657	struct usb_bus *bus;
2658	usb_frcount_t x;
2659
2660	bus = xfer->xroot->bus;
2661
2662	if ((!xfer->flags_int.open) &&
2663	    (!xfer->flags_int.did_close)) {
2664		DPRINTF("close\n");
2665		USB_BUS_LOCK(bus);
2666		(xfer->endpoint->methods->close) (xfer);
2667		USB_BUS_UNLOCK(bus);
2668		/* only close once */
2669		xfer->flags_int.did_close = 1;
2670		return (1);		/* wait for new callback */
2671	}
2672	/*
2673	 * If we have a non-hardware induced error we
2674	 * need to do the DMA delay!
2675	 */
2676	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2677	    (xfer->error == USB_ERR_CANCELLED ||
2678	    xfer->error == USB_ERR_TIMEOUT ||
2679	    bus->methods->start_dma_delay != NULL)) {
2680
2681		usb_timeout_t temp;
2682
2683		/* only delay once */
2684		xfer->flags_int.did_dma_delay = 1;
2685
2686		/* we can not cancel this delay */
2687		xfer->flags_int.can_cancel_immed = 0;
2688
2689		temp = usbd_get_dma_delay(xfer->xroot->udev);
2690
2691		DPRINTFN(3, "DMA delay, %u ms, "
2692		    "on %p\n", temp, xfer);
2693
2694		if (temp != 0) {
2695			USB_BUS_LOCK(bus);
2696			/*
2697			 * Some hardware solutions have dedicated
2698			 * events when it is safe to free DMA'ed
2699			 * memory. For the other hardware platforms we
2700			 * use a static delay.
2701			 */
2702			if (bus->methods->start_dma_delay != NULL) {
2703				(bus->methods->start_dma_delay) (xfer);
2704			} else {
2705				usbd_transfer_timeout_ms(xfer,
2706				    (void (*)(void *))&usb_dma_delay_done_cb,
2707				    temp);
2708			}
2709			USB_BUS_UNLOCK(bus);
2710			return (1);	/* wait for new callback */
2711		}
2712	}
2713	/* check actual number of frames */
2714	if (xfer->aframes > xfer->nframes) {
2715		if (xfer->error == 0) {
2716			panic("%s: actual number of frames, %d, is "
2717			    "greater than initial number of frames, %d\n",
2718			    __FUNCTION__, xfer->aframes, xfer->nframes);
2719		} else {
2720			/* just set some valid value */
2721			xfer->aframes = xfer->nframes;
2722		}
2723	}
2724	/* compute actual length */
2725	xfer->actlen = 0;
2726
2727	for (x = 0; x != xfer->aframes; x++) {
2728		xfer->actlen += xfer->frlengths[x];
2729	}
2730
2731	/*
2732	 * Frames that were not transferred get zero actual length in
2733	 * case the USB device driver does not check the actual number
2734	 * of frames transferred, "xfer->aframes":
2735	 */
2736	for (; x < xfer->nframes; x++) {
2737		usbd_xfer_set_frame_len(xfer, x, 0);
2738	}
2739
2740	/* check actual length */
2741	if (xfer->actlen > xfer->sumlen) {
2742		if (xfer->error == 0) {
2743			panic("%s: actual length, %d, is greater than "
2744			    "initial length, %d\n",
2745			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2746		} else {
2747			/* just set some valid value */
2748			xfer->actlen = xfer->sumlen;
2749		}
2750	}
2751	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2752	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2753	    xfer->aframes, xfer->nframes);
2754
2755	if (xfer->error) {
2756		/* end of control transfer, if any */
2757		xfer->flags_int.control_act = 0;
2758
2759		/* check if we should block the execution queue */
2760		if ((xfer->error != USB_ERR_CANCELLED) &&
2761		    (xfer->flags.pipe_bof)) {
2762			DPRINTFN(2, "xfer=%p: Block On Failure "
2763			    "on endpoint=%p\n", xfer, xfer->endpoint);
2764			goto done;
2765		}
2766	} else {
2767		/* check for short transfers */
2768		if (xfer->actlen < xfer->sumlen) {
2769
2770			/* end of control transfer, if any */
2771			xfer->flags_int.control_act = 0;
2772
2773			if (!xfer->flags_int.short_xfer_ok) {
2774				xfer->error = USB_ERR_SHORT_XFER;
2775				if (xfer->flags.pipe_bof) {
2776					DPRINTFN(2, "xfer=%p: Block On Failure on "
2777					    "Short Transfer on endpoint %p.\n",
2778					    xfer, xfer->endpoint);
2779					goto done;
2780				}
2781			}
2782		} else {
2783			/*
2784			 * Check if we are in the middle of a
2785			 * control transfer:
2786			 */
2787			if (xfer->flags_int.control_act) {
2788				DPRINTFN(5, "xfer=%p: Control transfer "
2789				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2790				goto done;
2791			}
2792		}
2793	}
2794
2795	ep = xfer->endpoint;
2796
2797	/*
2798	 * If the current USB transfer is completing we need to start the
2799	 * next one:
2800	 */
2801	USB_BUS_LOCK(bus);
2802	if (ep->endpoint_q.curr == xfer) {
2803		usb_command_wrapper(&ep->endpoint_q, NULL);
2804
2805		if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) {
2806			/* there is another USB transfer waiting */
2807		} else {
2808			/* this is the last USB transfer */
2809			/* clear isochronous sync flag */
2810			xfer->endpoint->is_synced = 0;
2811		}
2812	}
2813	USB_BUS_UNLOCK(bus);
2814done:
2815	return (0);
2816}
2817
2818/*------------------------------------------------------------------------*
2819 *	usb_command_wrapper
2820 *
2821 * This function is used to execute commands non-recursivly on an USB
2822 * transfer.
2823 *------------------------------------------------------------------------*/
2824void
2825usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2826{
2827	if (xfer) {
2828		/*
2829		 * If the transfer is not already processing,
2830		 * queue it!
2831		 */
2832		if (pq->curr != xfer) {
2833			usbd_transfer_enqueue(pq, xfer);
2834			if (pq->curr != NULL) {
2835				/* something is already processing */
2836				DPRINTFN(6, "busy %p\n", pq->curr);
2837				return;
2838			}
2839		}
2840	} else {
2841		/* Get next element in queue */
2842		pq->curr = NULL;
2843	}
2844
2845	if (!pq->recurse_1) {
2846
2847		do {
2848
2849			/* set both recurse flags */
2850			pq->recurse_1 = 1;
2851			pq->recurse_2 = 1;
2852
2853			if (pq->curr == NULL) {
2854				xfer = TAILQ_FIRST(&pq->head);
2855				if (xfer) {
2856					TAILQ_REMOVE(&pq->head, xfer,
2857					    wait_entry);
2858					xfer->wait_queue = NULL;
2859					pq->curr = xfer;
2860				} else {
2861					break;
2862				}
2863			}
2864			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2865			(pq->command) (pq);
2866			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2867
2868		} while (!pq->recurse_2);
2869
2870		/* clear first recurse flag */
2871		pq->recurse_1 = 0;
2872
2873	} else {
2874		/* clear second recurse flag */
2875		pq->recurse_2 = 0;
2876	}
2877}
2878
2879/*------------------------------------------------------------------------*
2880 *	usbd_ctrl_transfer_setup
2881 *
2882 * This function is used to setup the default USB control endpoint
2883 * transfer.
2884 *------------------------------------------------------------------------*/
2885void
2886usbd_ctrl_transfer_setup(struct usb_device *udev)
2887{
2888	struct usb_xfer *xfer;
2889	uint8_t no_resetup;
2890	uint8_t iface_index;
2891
2892	/* check for root HUB */
2893	if (udev->parent_hub == NULL)
2894		return;
2895repeat:
2896
2897	xfer = udev->ctrl_xfer[0];
2898	if (xfer) {
2899		USB_XFER_LOCK(xfer);
2900		no_resetup =
2901		    ((xfer->address == udev->address) &&
2902		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2903		    udev->ddesc.bMaxPacketSize));
2904		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2905			if (no_resetup) {
2906				/*
2907				 * NOTE: checking "xfer->address" and
2908				 * starting the USB transfer must be
2909				 * atomic!
2910				 */
2911				usbd_transfer_start(xfer);
2912			}
2913		}
2914		USB_XFER_UNLOCK(xfer);
2915	} else {
2916		no_resetup = 0;
2917	}
2918
2919	if (no_resetup) {
2920		/*
2921	         * All parameters are exactly the same like before.
2922	         * Just return.
2923	         */
2924		return;
2925	}
2926	/*
2927	 * Update wMaxPacketSize for the default control endpoint:
2928	 */
2929	udev->ctrl_ep_desc.wMaxPacketSize[0] =
2930	    udev->ddesc.bMaxPacketSize;
2931
2932	/*
2933	 * Unsetup any existing USB transfer:
2934	 */
2935	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
2936
2937	/*
2938	 * Reset clear stall error counter.
2939	 */
2940	udev->clear_stall_errors = 0;
2941
2942	/*
2943	 * Try to setup a new USB transfer for the
2944	 * default control endpoint:
2945	 */
2946	iface_index = 0;
2947	if (usbd_transfer_setup(udev, &iface_index,
2948	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
2949	    &udev->device_mtx)) {
2950		DPRINTFN(0, "could not setup default "
2951		    "USB transfer\n");
2952	} else {
2953		goto repeat;
2954	}
2955}
2956
2957/*------------------------------------------------------------------------*
2958 *	usbd_clear_data_toggle - factored out code
2959 *
2960 * NOTE: the intention of this function is not to reset the hardware
2961 * data toggle.
2962 *------------------------------------------------------------------------*/
2963void
2964usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
2965{
2966	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
2967
2968	/* check that we have a valid case */
2969	if (udev->flags.usb_mode == USB_MODE_HOST &&
2970	    udev->parent_hub != NULL &&
2971	    udev->bus->methods->clear_stall != NULL &&
2972	    ep->methods != NULL) {
2973		(udev->bus->methods->clear_stall) (udev, ep);
2974	}
2975}
2976
2977/*------------------------------------------------------------------------*
2978 *	usbd_clear_data_toggle - factored out code
2979 *
2980 * NOTE: the intention of this function is not to reset the hardware
2981 * data toggle on the USB device side.
2982 *------------------------------------------------------------------------*/
2983void
2984usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
2985{
2986	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
2987
2988	USB_BUS_LOCK(udev->bus);
2989	ep->toggle_next = 0;
2990	/* some hardware needs a callback to clear the data toggle */
2991	usbd_clear_stall_locked(udev, ep);
2992	USB_BUS_UNLOCK(udev->bus);
2993}
2994
2995/*------------------------------------------------------------------------*
2996 *	usbd_clear_stall_callback - factored out clear stall callback
2997 *
2998 * Input parameters:
2999 *  xfer1: Clear Stall Control Transfer
3000 *  xfer2: Stalled USB Transfer
3001 *
3002 * This function is NULL safe.
3003 *
3004 * Return values:
3005 *   0: In progress
3006 *   Else: Finished
3007 *
3008 * Clear stall config example:
3009 *
3010 * static const struct usb_config my_clearstall =  {
3011 *	.type = UE_CONTROL,
3012 *	.endpoint = 0,
3013 *	.direction = UE_DIR_ANY,
3014 *	.interval = 50, //50 milliseconds
3015 *	.bufsize = sizeof(struct usb_device_request),
3016 *	.timeout = 1000, //1.000 seconds
3017 *	.callback = &my_clear_stall_callback, // **
3018 *	.usb_mode = USB_MODE_HOST,
3019 * };
3020 *
3021 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3022 * passing the correct parameters.
3023 *------------------------------------------------------------------------*/
3024uint8_t
3025usbd_clear_stall_callback(struct usb_xfer *xfer1,
3026    struct usb_xfer *xfer2)
3027{
3028	struct usb_device_request req;
3029
3030	if (xfer2 == NULL) {
3031		/* looks like we are tearing down */
3032		DPRINTF("NULL input parameter\n");
3033		return (0);
3034	}
3035	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3036	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3037
3038	switch (USB_GET_STATE(xfer1)) {
3039	case USB_ST_SETUP:
3040
3041		/*
3042		 * pre-clear the data toggle to DATA0 ("umass.c" and
3043		 * "ata-usb.c" depends on this)
3044		 */
3045
3046		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3047
3048		/* setup a clear-stall packet */
3049
3050		req.bmRequestType = UT_WRITE_ENDPOINT;
3051		req.bRequest = UR_CLEAR_FEATURE;
3052		USETW(req.wValue, UF_ENDPOINT_HALT);
3053		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3054		req.wIndex[1] = 0;
3055		USETW(req.wLength, 0);
3056
3057		/*
3058		 * "usbd_transfer_setup_sub()" will ensure that
3059		 * we have sufficient room in the buffer for
3060		 * the request structure!
3061		 */
3062
3063		/* copy in the transfer */
3064
3065		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3066
3067		/* set length */
3068		xfer1->frlengths[0] = sizeof(req);
3069		xfer1->nframes = 1;
3070
3071		usbd_transfer_submit(xfer1);
3072		return (0);
3073
3074	case USB_ST_TRANSFERRED:
3075		break;
3076
3077	default:			/* Error */
3078		if (xfer1->error == USB_ERR_CANCELLED) {
3079			return (0);
3080		}
3081		break;
3082	}
3083	return (1);			/* Clear Stall Finished */
3084}
3085
3086/*------------------------------------------------------------------------*
3087 *	usbd_transfer_poll
3088 *
3089 * The following function gets called from the USB keyboard driver and
3090 * UMASS when the system has paniced.
3091 *
3092 * NOTE: It is currently not possible to resume normal operation on
3093 * the USB controller which has been polled, due to clearing of the
3094 * "up_dsleep" and "up_msleep" flags.
3095 *------------------------------------------------------------------------*/
3096void
3097usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3098{
3099	struct usb_xfer *xfer;
3100	struct usb_xfer_root *xroot;
3101	struct usb_device *udev;
3102	struct usb_proc_msg *pm;
3103	uint16_t n;
3104	uint16_t drop_bus;
3105	uint16_t drop_xfer;
3106
3107	for (n = 0; n != max; n++) {
3108		/* Extra checks to avoid panic */
3109		xfer = ppxfer[n];
3110		if (xfer == NULL)
3111			continue;	/* no USB transfer */
3112		xroot = xfer->xroot;
3113		if (xroot == NULL)
3114			continue;	/* no USB root */
3115		udev = xroot->udev;
3116		if (udev == NULL)
3117			continue;	/* no USB device */
3118		if (udev->bus == NULL)
3119			continue;	/* no BUS structure */
3120		if (udev->bus->methods == NULL)
3121			continue;	/* no BUS methods */
3122		if (udev->bus->methods->xfer_poll == NULL)
3123			continue;	/* no poll method */
3124
3125		/* make sure that the BUS mutex is not locked */
3126		drop_bus = 0;
3127		while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3128			mtx_unlock(&xroot->udev->bus->bus_mtx);
3129			drop_bus++;
3130		}
3131
3132		/* make sure that the transfer mutex is not locked */
3133		drop_xfer = 0;
3134		while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3135			mtx_unlock(xroot->xfer_mtx);
3136			drop_xfer++;
3137		}
3138
3139		/* Make sure cv_signal() and cv_broadcast() is not called */
3140		udev->bus->control_xfer_proc.up_msleep = 0;
3141		udev->bus->explore_proc.up_msleep = 0;
3142		udev->bus->giant_callback_proc.up_msleep = 0;
3143		udev->bus->non_giant_callback_proc.up_msleep = 0;
3144
3145		/* poll USB hardware */
3146		(udev->bus->methods->xfer_poll) (udev->bus);
3147
3148		USB_BUS_LOCK(xroot->bus);
3149
3150		/* check for clear stall */
3151		if (udev->ctrl_xfer[1] != NULL) {
3152
3153			/* poll clear stall start */
3154			pm = &udev->cs_msg[0].hdr;
3155			(pm->pm_callback) (pm);
3156			/* poll clear stall done thread */
3157			pm = &udev->ctrl_xfer[1]->
3158			    xroot->done_m[0].hdr;
3159			(pm->pm_callback) (pm);
3160		}
3161
3162		/* poll done thread */
3163		pm = &xroot->done_m[0].hdr;
3164		(pm->pm_callback) (pm);
3165
3166		USB_BUS_UNLOCK(xroot->bus);
3167
3168		/* restore transfer mutex */
3169		while (drop_xfer--)
3170			mtx_lock(xroot->xfer_mtx);
3171
3172		/* restore BUS mutex */
3173		while (drop_bus--)
3174			mtx_lock(&xroot->udev->bus->bus_mtx);
3175	}
3176}
3177
3178static void
3179usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3180    uint8_t type, enum usb_dev_speed speed)
3181{
3182	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3183		[USB_SPEED_LOW] = 8,
3184		[USB_SPEED_FULL] = 64,
3185		[USB_SPEED_HIGH] = 1024,
3186		[USB_SPEED_VARIABLE] = 1024,
3187		[USB_SPEED_SUPER] = 1024,
3188	};
3189
3190	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3191		[USB_SPEED_LOW] = 0,	/* invalid */
3192		[USB_SPEED_FULL] = 1023,
3193		[USB_SPEED_HIGH] = 1024,
3194		[USB_SPEED_VARIABLE] = 3584,
3195		[USB_SPEED_SUPER] = 1024,
3196	};
3197
3198	static const uint16_t control_min[USB_SPEED_MAX] = {
3199		[USB_SPEED_LOW] = 8,
3200		[USB_SPEED_FULL] = 8,
3201		[USB_SPEED_HIGH] = 64,
3202		[USB_SPEED_VARIABLE] = 512,
3203		[USB_SPEED_SUPER] = 512,
3204	};
3205
3206	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3207		[USB_SPEED_LOW] = 8,
3208		[USB_SPEED_FULL] = 8,
3209		[USB_SPEED_HIGH] = 512,
3210		[USB_SPEED_VARIABLE] = 512,
3211		[USB_SPEED_SUPER] = 1024,
3212	};
3213
3214	uint16_t temp;
3215
3216	memset(ptr, 0, sizeof(*ptr));
3217
3218	switch (type) {
3219	case UE_INTERRUPT:
3220		ptr->range.max = intr_range_max[speed];
3221		break;
3222	case UE_ISOCHRONOUS:
3223		ptr->range.max = isoc_range_max[speed];
3224		break;
3225	default:
3226		if (type == UE_BULK)
3227			temp = bulk_min[speed];
3228		else /* UE_CONTROL */
3229			temp = control_min[speed];
3230
3231		/* default is fixed */
3232		ptr->fixed[0] = temp;
3233		ptr->fixed[1] = temp;
3234		ptr->fixed[2] = temp;
3235		ptr->fixed[3] = temp;
3236
3237		if (speed == USB_SPEED_FULL) {
3238			/* multiple sizes */
3239			ptr->fixed[1] = 16;
3240			ptr->fixed[2] = 32;
3241			ptr->fixed[3] = 64;
3242		}
3243		if ((speed == USB_SPEED_VARIABLE) &&
3244		    (type == UE_BULK)) {
3245			/* multiple sizes */
3246			ptr->fixed[2] = 1024;
3247			ptr->fixed[3] = 1536;
3248		}
3249		break;
3250	}
3251}
3252
3253void	*
3254usbd_xfer_softc(struct usb_xfer *xfer)
3255{
3256	return (xfer->priv_sc);
3257}
3258
3259void *
3260usbd_xfer_get_priv(struct usb_xfer *xfer)
3261{
3262	return (xfer->priv_fifo);
3263}
3264
3265void
3266usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3267{
3268	xfer->priv_fifo = ptr;
3269}
3270
3271uint8_t
3272usbd_xfer_state(struct usb_xfer *xfer)
3273{
3274	return (xfer->usb_state);
3275}
3276
3277void
3278usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3279{
3280	switch (flag) {
3281		case USB_FORCE_SHORT_XFER:
3282			xfer->flags.force_short_xfer = 1;
3283			break;
3284		case USB_SHORT_XFER_OK:
3285			xfer->flags.short_xfer_ok = 1;
3286			break;
3287		case USB_MULTI_SHORT_OK:
3288			xfer->flags.short_frames_ok = 1;
3289			break;
3290		case USB_MANUAL_STATUS:
3291			xfer->flags.manual_status = 1;
3292			break;
3293	}
3294}
3295
3296void
3297usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3298{
3299	switch (flag) {
3300		case USB_FORCE_SHORT_XFER:
3301			xfer->flags.force_short_xfer = 0;
3302			break;
3303		case USB_SHORT_XFER_OK:
3304			xfer->flags.short_xfer_ok = 0;
3305			break;
3306		case USB_MULTI_SHORT_OK:
3307			xfer->flags.short_frames_ok = 0;
3308			break;
3309		case USB_MANUAL_STATUS:
3310			xfer->flags.manual_status = 0;
3311			break;
3312	}
3313}
3314
3315/*
3316 * The following function returns in milliseconds when the isochronous
3317 * transfer was completed by the hardware. The returned value wraps
3318 * around 65536 milliseconds.
3319 */
3320uint16_t
3321usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3322{
3323	return (xfer->isoc_time_complete);
3324}
3325