1/*
2 * BRIEF MODULE DESCRIPTION
3 *	Au1000 USB Device-Side (device layer)
4 *
5 * Copyright 2001-2002 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc.
7 *		stevel@mvista.com or source@mvista.com
8 *
9 *  This program is free software; you can redistribute	 it and/or modify it
10 *  under  the terms of	 the GNU General  Public License as published by the
11 *  Free Software Foundation;  either version 2 of the	License, or (at your
12 *  option) any later version.
13 *
14 *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
15 *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
16 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17 *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
18 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
20 *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
22 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 *  You should have received a copy of the  GNU General Public License along
26 *  with this program; if not, write  to the Free Software Foundation, Inc.,
27 *  675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#include <linux/config.h>
31#include <linux/kernel.h>
32#include <linux/ioport.h>
33#include <linux/sched.h>
34#include <linux/signal.h>
35#include <linux/errno.h>
36#include <linux/poll.h>
37#include <linux/init.h>
38#include <linux/slab.h>
39#include <linux/fcntl.h>
40#include <linux/module.h>
41#include <linux/spinlock.h>
42#include <linux/list.h>
43#include <linux/smp_lock.h>
44#define DEBUG
45#include <linux/usb.h>
46
47#include <asm/io.h>
48#include <asm/uaccess.h>
49#include <asm/irq.h>
50#include <asm/mipsregs.h>
51#include <asm/au1000.h>
52#include <asm/au1000_dma.h>
53#include <asm/au1000_usbdev.h>
54
55#ifdef DEBUG
56#undef VDEBUG
57#ifdef VDEBUG
58#define vdbg(fmt, arg...) printk(KERN_DEBUG __FILE__ ": " fmt "\n" , ## arg)
59#else
60#define vdbg(fmt, arg...) do {} while (0)
61#endif
62#else
63#define vdbg(fmt, arg...) do {} while (0)
64#endif
65
66#define MAX(a,b)	(((a)>(b))?(a):(b))
67
68#define ALLOC_FLAGS (in_interrupt () ? GFP_ATOMIC : GFP_KERNEL)
69
70#define EP_FIFO_DEPTH 8
71
72typedef enum {
73	SETUP_STAGE = 0,
74	DATA_STAGE,
75	STATUS_STAGE
76} ep0_stage_t;
77
78typedef struct {
79	int read_fifo;
80	int write_fifo;
81	int ctrl_stat;
82	int read_fifo_status;
83	int write_fifo_status;
84} endpoint_reg_t;
85
86typedef struct {
87	usbdev_pkt_t *head;
88	usbdev_pkt_t *tail;
89	int count;
90} pkt_list_t;
91
92typedef struct {
93	int active;
94	struct usb_endpoint_descriptor *desc;
95	endpoint_reg_t *reg;
96	/* Only one of these are used, unless this is the control ep */
97	pkt_list_t inlist;
98	pkt_list_t outlist;
99	unsigned int indma, outdma; /* DMA channel numbers for IN, OUT */
100	/* following are extracted from endpoint descriptor for easy access */
101	int max_pkt_size;
102	int type;
103	int direction;
104	/* WE assign endpoint addresses! */
105	int address;
106	spinlock_t lock;
107} endpoint_t;
108
109
110static struct usb_dev {
111	endpoint_t ep[6];
112	ep0_stage_t ep0_stage;
113
114	struct usb_device_descriptor *   dev_desc;
115	struct usb_interface_descriptor* if_desc;
116	struct usb_config_descriptor *   conf_desc;
117	u8 *                             full_conf_desc;
118	struct usb_string_descriptor *   str_desc[6];
119
120	/* callback to function layer */
121	void (*func_cb)(usbdev_cb_type_t type, unsigned long arg,
122			void *cb_data);
123	void* cb_data;
124
125	usbdev_state_t state;	// device state
126	int suspended;		// suspended flag
127	int address;		// device address
128	int interface;
129	int num_ep;
130	u8 alternate_setting;
131	u8 configuration;	// configuration value
132	int remote_wakeup_en;
133} usbdev;
134
135
136static endpoint_reg_t ep_reg[] = {
137	// FIFO's 0 and 1 are EP0 default control
138	{USBD_EP0RD, USBD_EP0WR, USBD_EP0CS, USBD_EP0RDSTAT, USBD_EP0WRSTAT },
139	{0},
140	// FIFO 2 is EP2, IN
141	{ -1, USBD_EP2WR, USBD_EP2CS, -1, USBD_EP2WRSTAT },
142	// FIFO 3 is EP3, IN
143	{    -1,     USBD_EP3WR, USBD_EP3CS,     -1,         USBD_EP3WRSTAT },
144	// FIFO 4 is EP4, OUT
145	{USBD_EP4RD,     -1,     USBD_EP4CS, USBD_EP4RDSTAT,     -1         },
146	// FIFO 5 is EP5, OUT
147	{USBD_EP5RD,     -1,     USBD_EP5CS, USBD_EP5RDSTAT,     -1         }
148};
149
150static struct {
151	unsigned int id;
152	const char *str;
153} ep_dma_id[] = {
154	{ DMA_ID_USBDEV_EP0_TX, "USBDev EP0 IN" },
155	{ DMA_ID_USBDEV_EP0_RX, "USBDev EP0 OUT" },
156	{ DMA_ID_USBDEV_EP2_TX, "USBDev EP2 IN" },
157	{ DMA_ID_USBDEV_EP3_TX, "USBDev EP3 IN" },
158	{ DMA_ID_USBDEV_EP4_RX, "USBDev EP4 OUT" },
159	{ DMA_ID_USBDEV_EP5_RX, "USBDev EP5 OUT" }
160};
161
162#define DIR_OUT 0
163#define DIR_IN  (1<<3)
164
165#define CONTROL_EP USB_ENDPOINT_XFER_CONTROL
166#define BULK_EP    USB_ENDPOINT_XFER_BULK
167
168static inline endpoint_t *
169epaddr_to_ep(struct usb_dev* dev, int ep_addr)
170{
171	if (ep_addr >= 0 && ep_addr < 2)
172		return &dev->ep[0];
173	if (ep_addr < 6)
174		return &dev->ep[ep_addr];
175	return NULL;
176}
177
178static const char* std_req_name[] = {
179	"GET_STATUS",
180	"CLEAR_FEATURE",
181	"RESERVED",
182	"SET_FEATURE",
183	"RESERVED",
184	"SET_ADDRESS",
185	"GET_DESCRIPTOR",
186	"SET_DESCRIPTOR",
187	"GET_CONFIGURATION",
188	"SET_CONFIGURATION",
189	"GET_INTERFACE",
190	"SET_INTERFACE",
191	"SYNCH_FRAME"
192};
193
194static inline const char*
195get_std_req_name(int req)
196{
197	return (req >= 0 && req <= 12) ? std_req_name[req] : "UNKNOWN";
198}
199
200
201static inline usbdev_pkt_t *
202alloc_packet(endpoint_t * ep, int data_size, void* data)
203{
204	usbdev_pkt_t* pkt =
205		(usbdev_pkt_t *)kmalloc(sizeof(usbdev_pkt_t) + data_size,
206					ALLOC_FLAGS);
207	if (!pkt)
208		return NULL;
209	pkt->ep_addr = ep->address;
210	pkt->size = data_size;
211	pkt->status = 0;
212	pkt->next = NULL;
213	if (data)
214		memcpy(pkt->payload, data, data_size);
215
216	return pkt;
217}
218
219
220/*
221 * Link a packet to the tail of the enpoint's packet list.
222 * EP spinlock must be held when calling.
223 */
224static void
225link_tail(endpoint_t * ep, pkt_list_t * list, usbdev_pkt_t * pkt)
226{
227	if (!list->tail) {
228		list->head = list->tail = pkt;
229		list->count = 1;
230	} else {
231		list->tail->next = pkt;
232		list->tail = pkt;
233		list->count++;
234	}
235}
236
237/*
238 * Unlink and return a packet from the head of the given packet
239 * list. It is the responsibility of the caller to free the packet.
240 * EP spinlock must be held when calling.
241 */
242static usbdev_pkt_t *
243unlink_head(pkt_list_t * list)
244{
245	usbdev_pkt_t *pkt;
246
247	pkt = list->head;
248	if (!pkt || !list->count) {
249		return NULL;
250	}
251
252	list->head = pkt->next;
253	if (!list->head) {
254		list->head = list->tail = NULL;
255		list->count = 0;
256	} else
257		list->count--;
258
259	return pkt;
260}
261
262/*
263 * Create and attach a new packet to the tail of the enpoint's
264 * packet list. EP spinlock must be held when calling.
265 */
266static usbdev_pkt_t *
267add_packet(endpoint_t * ep, pkt_list_t * list, int size)
268{
269	usbdev_pkt_t *pkt = alloc_packet(ep, size, NULL);
270	if (!pkt)
271		return NULL;
272
273	link_tail(ep, list, pkt);
274	return pkt;
275}
276
277
278/*
279 * Unlink and free a packet from the head of the enpoint's
280 * packet list. EP spinlock must be held when calling.
281 */
282static inline void
283free_packet(pkt_list_t * list)
284{
285	kfree(unlink_head(list));
286}
287
288/* EP spinlock must be held when calling. */
289static inline void
290flush_pkt_list(pkt_list_t * list)
291{
292	while (list->count)
293		free_packet(list);
294}
295
296/* EP spinlock must be held when calling */
297static inline void
298flush_write_fifo(endpoint_t * ep)
299{
300	if (ep->reg->write_fifo_status >= 0) {
301		au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
302			  USBDEV_FSTAT_OF,
303			  ep->reg->write_fifo_status);
304		//udelay(100);
305		//au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
306		//	  ep->reg->write_fifo_status);
307	}
308}
309
310/* EP spinlock must be held when calling */
311static inline void
312flush_read_fifo(endpoint_t * ep)
313{
314	if (ep->reg->read_fifo_status >= 0) {
315		au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
316			  USBDEV_FSTAT_OF,
317			  ep->reg->read_fifo_status);
318		//udelay(100);
319		//au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
320		//	  ep->reg->read_fifo_status);
321	}
322}
323
324
325/* EP spinlock must be held when calling. */
326static void
327endpoint_flush(endpoint_t * ep)
328{
329	// First, flush all packets
330	flush_pkt_list(&ep->inlist);
331	flush_pkt_list(&ep->outlist);
332
333	// Now flush the endpoint's h/w FIFO(s)
334	flush_write_fifo(ep);
335	flush_read_fifo(ep);
336}
337
338/* EP spinlock must be held when calling. */
339static void
340endpoint_stall(endpoint_t * ep)
341{
342	u32 cs;
343
344	warn(__FUNCTION__);
345
346	cs = au_readl(ep->reg->ctrl_stat) | USBDEV_CS_STALL;
347	au_writel(cs, ep->reg->ctrl_stat);
348}
349
350/* EP spinlock must be held when calling. */
351static void
352endpoint_unstall(endpoint_t * ep)
353{
354	u32 cs;
355
356	warn(__FUNCTION__);
357
358	cs = au_readl(ep->reg->ctrl_stat) & ~USBDEV_CS_STALL;
359	au_writel(cs, ep->reg->ctrl_stat);
360}
361
362static void
363endpoint_reset_datatoggle(endpoint_t * ep)
364{
365	// FIXME: is this possible?
366}
367
368
369/* EP spinlock must be held when calling. */
370static int
371endpoint_fifo_read(endpoint_t * ep)
372{
373	int read_count = 0;
374	u8 *bufptr;
375	usbdev_pkt_t *pkt = ep->outlist.tail;
376
377	if (!pkt)
378		return -EINVAL;
379
380	bufptr = &pkt->payload[pkt->size];
381	while (au_readl(ep->reg->read_fifo_status) & USBDEV_FSTAT_FCNT_MASK) {
382		*bufptr++ = au_readl(ep->reg->read_fifo) & 0xff;
383		read_count++;
384		pkt->size++;
385	}
386
387	return read_count;
388}
389
390
391/*
392 * This routine is called to restart transmission of a packet.
393 * The endpoint's TSIZE must be set to the new packet's size,
394 * and DMA to the write FIFO needs to be restarted.
395 * EP spinlock must be held when calling.
396 */
397static void
398kickstart_send_packet(endpoint_t * ep)
399{
400	u32 cs;
401	usbdev_pkt_t *pkt = ep->inlist.head;
402
403	vdbg(__FUNCTION__ ": ep%d, pkt=%p", ep->address, pkt);
404
405	if (!pkt) {
406		err(__FUNCTION__ ": head=NULL! list->count=%d",
407		    ep->inlist.count);
408		return;
409	}
410
411	dma_cache_wback_inv((unsigned long)pkt->payload, pkt->size);
412
413	/*
414	 * make sure FIFO is empty
415	 */
416	flush_write_fifo(ep);
417
418	cs = au_readl(ep->reg->ctrl_stat) & USBDEV_CS_STALL;
419	cs |= (pkt->size << USBDEV_CS_TSIZE_BIT);
420	au_writel(cs, ep->reg->ctrl_stat);
421
422	if (get_dma_active_buffer(ep->indma) == 1) {
423		set_dma_count1(ep->indma, pkt->size);
424		set_dma_addr1(ep->indma, virt_to_phys(pkt->payload));
425		enable_dma_buffer1(ep->indma);	// reenable
426	} else {
427		set_dma_count0(ep->indma, pkt->size);
428		set_dma_addr0(ep->indma, virt_to_phys(pkt->payload));
429		enable_dma_buffer0(ep->indma);	// reenable
430	}
431	if (dma_halted(ep->indma))
432		start_dma(ep->indma);
433}
434
435
436/*
437 * This routine is called when a packet in the inlist has been
438 * completed. Frees the completed packet and starts sending the
439 * next. EP spinlock must be held when calling.
440 */
441static usbdev_pkt_t *
442send_packet_complete(endpoint_t * ep)
443{
444	usbdev_pkt_t *pkt = unlink_head(&ep->inlist);
445
446	if (pkt) {
447		pkt->status =
448			(au_readl(ep->reg->ctrl_stat) & USBDEV_CS_NAK) ?
449			PKT_STATUS_NAK : PKT_STATUS_ACK;
450
451		vdbg(__FUNCTION__ ": ep%d, %s pkt=%p, list count=%d",
452		     ep->address, (pkt->status & PKT_STATUS_NAK) ?
453		     "NAK" : "ACK", pkt, ep->inlist.count);
454	}
455
456	/*
457	 * The write fifo should already be drained if things are
458	 * working right, but flush it anyway just in case.
459	 */
460	flush_write_fifo(ep);
461
462	// begin transmitting next packet in the inlist
463	if (ep->inlist.count) {
464		kickstart_send_packet(ep);
465	}
466
467	return pkt;
468}
469
470/*
471 * Add a new packet to the tail of the given ep's packet
472 * inlist. The transmit complete interrupt frees packets from
473 * the head of this list. EP spinlock must be held when calling.
474 */
475static int
476send_packet(struct usb_dev* dev, usbdev_pkt_t *pkt, int async)
477{
478	pkt_list_t *list;
479	endpoint_t* ep;
480
481	if (!pkt || !(ep = epaddr_to_ep(dev, pkt->ep_addr)))
482		return -EINVAL;
483
484	if (!pkt->size)
485		return 0;
486
487	list = &ep->inlist;
488
489	if (!async && list->count) {
490		halt_dma(ep->indma);
491		flush_pkt_list(list);
492	}
493
494	link_tail(ep, list, pkt);
495
496	vdbg(__FUNCTION__ ": ep%d, pkt=%p, size=%d, list count=%d",
497	     ep->address, pkt, pkt->size, list->count);
498
499	if (list->count == 1) {
500		/*
501		 * if the packet count is one, it means the list was empty,
502		 * and no more data will go out this ep until we kick-start
503		 * it again.
504		 */
505		kickstart_send_packet(ep);
506	}
507
508	return pkt->size;
509}
510
511/*
512 * This routine is called to restart reception of a packet.
513 * EP spinlock must be held when calling.
514 */
515static void
516kickstart_receive_packet(endpoint_t * ep)
517{
518	usbdev_pkt_t *pkt;
519
520	// get and link a new packet for next reception
521	if (!(pkt = add_packet(ep, &ep->outlist, ep->max_pkt_size))) {
522		err(__FUNCTION__ ": could not alloc new packet");
523		return;
524	}
525
526	if (get_dma_active_buffer(ep->outdma) == 1) {
527		clear_dma_done1(ep->outdma);
528		set_dma_count1(ep->outdma, ep->max_pkt_size);
529		set_dma_count0(ep->outdma, 0);
530		set_dma_addr1(ep->outdma, virt_to_phys(pkt->payload));
531		enable_dma_buffer1(ep->outdma);	// reenable
532	} else {
533		clear_dma_done0(ep->outdma);
534		set_dma_count0(ep->outdma, ep->max_pkt_size);
535		set_dma_count1(ep->outdma, 0);
536		set_dma_addr0(ep->outdma, virt_to_phys(pkt->payload));
537		enable_dma_buffer0(ep->outdma);	// reenable
538	}
539	if (dma_halted(ep->outdma))
540		start_dma(ep->outdma);
541}
542
543
544static usbdev_pkt_t *
545receive_packet_complete(endpoint_t * ep)
546{
547	usbdev_pkt_t *pkt = ep->outlist.tail;
548	u32 cs;
549
550	halt_dma(ep->outdma);
551
552	cs = au_readl(ep->reg->ctrl_stat);
553
554	if (!pkt)
555		return NULL;
556
557	pkt->size = ep->max_pkt_size - get_dma_residue(ep->outdma);
558	if (pkt->size)
559		dma_cache_inv((unsigned long)pkt->payload, pkt->size);
560	/*
561	 * need to pull out any remaining bytes in the FIFO.
562	 */
563	endpoint_fifo_read(ep);
564	/*
565	 * should be drained now, but flush anyway just in case.
566	 */
567	flush_read_fifo(ep);
568
569	pkt->status = (cs & USBDEV_CS_NAK) ? PKT_STATUS_NAK : PKT_STATUS_ACK;
570	if (ep->address == 0 && (cs & USBDEV_CS_SU))
571		pkt->status |= PKT_STATUS_SU;
572
573	vdbg(__FUNCTION__ ": ep%d, %s pkt=%p, size=%d",
574	     ep->address, (pkt->status & PKT_STATUS_NAK) ?
575	     "NAK" : "ACK", pkt, pkt->size);
576
577	kickstart_receive_packet(ep);
578
579	return pkt;
580}
581
582
583/*
584 ****************************************************************************
585 * Here starts the standard device request handlers. They are
586 * all called by do_setup() via a table of function pointers.
587 ****************************************************************************
588 */
589
590static ep0_stage_t
591do_get_status(struct usb_dev* dev, struct usb_ctrlrequest* setup)
592{
593	switch (setup->bRequestType) {
594	case 0x80:	// Device
595		// FIXME: send device status
596		break;
597	case 0x81:	// Interface
598		// FIXME: send interface status
599		break;
600	case 0x82:	// End Point
601		// FIXME: send endpoint status
602		break;
603	default:
604		// Invalid Command
605		endpoint_stall(&dev->ep[0]); // Stall End Point 0
606		break;
607	}
608
609	return STATUS_STAGE;
610}
611
612static ep0_stage_t
613do_clear_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
614{
615	switch (setup->bRequestType) {
616	case 0x00:	// Device
617		if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
618			dev->remote_wakeup_en = 0;
619	else
620			endpoint_stall(&dev->ep[0]);
621		break;
622	case 0x02:	// End Point
623		if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
624			endpoint_t *ep =
625				epaddr_to_ep(dev,
626					     le16_to_cpu(setup->wIndex) & 0xff);
627
628			endpoint_unstall(ep);
629			endpoint_reset_datatoggle(ep);
630		} else
631			endpoint_stall(&dev->ep[0]);
632		break;
633	}
634
635	return SETUP_STAGE;
636}
637
638static ep0_stage_t
639do_reserved(struct usb_dev* dev, struct usb_ctrlrequest* setup)
640{
641	// Invalid request, stall End Point 0
642	endpoint_stall(&dev->ep[0]);
643	return SETUP_STAGE;
644}
645
646static ep0_stage_t
647do_set_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
648{
649	switch (setup->bRequestType) {
650	case 0x00:	// Device
651		if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
652			dev->remote_wakeup_en = 1;
653		else
654			endpoint_stall(&dev->ep[0]);
655		break;
656	case 0x02:	// End Point
657		if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
658			endpoint_t *ep =
659				epaddr_to_ep(dev,
660					     le16_to_cpu(setup->wIndex) & 0xff);
661
662			endpoint_stall(ep);
663		} else
664			endpoint_stall(&dev->ep[0]);
665		break;
666	}
667
668	return SETUP_STAGE;
669}
670
671static ep0_stage_t
672do_set_address(struct usb_dev* dev, struct usb_ctrlrequest* setup)
673{
674	int new_state = dev->state;
675	int new_addr = le16_to_cpu(setup->wValue);
676
677	dbg(__FUNCTION__ ": our address=%d", new_addr);
678
679	if (new_addr > 127) {
680			// usb spec doesn't tell us what to do, so just go to
681			// default state
682		new_state = DEFAULT;
683		dev->address = 0;
684	} else if (dev->address != new_addr) {
685		dev->address = new_addr;
686		new_state = ADDRESS;
687	}
688
689	if (dev->state != new_state) {
690		dev->state = new_state;
691		/* inform function layer of usbdev state change */
692		dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
693	}
694
695	return SETUP_STAGE;
696}
697
698static ep0_stage_t
699do_get_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
700{
701	int strnum, desc_len = le16_to_cpu(setup->wLength);
702
703		switch (le16_to_cpu(setup->wValue) >> 8) {
704		case USB_DT_DEVICE:
705			// send device descriptor!
706		desc_len = desc_len > dev->dev_desc->bLength ?
707			dev->dev_desc->bLength : desc_len;
708			dbg("sending device desc, size=%d", desc_len);
709		send_packet(dev, alloc_packet(&dev->ep[0], desc_len,
710					      dev->dev_desc), 0);
711			break;
712		case USB_DT_CONFIG:
713			// If the config descr index in low-byte of
714			// setup->wValue	is valid, send config descr,
715			// otherwise stall ep0.
716			if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
717				// send config descriptor!
718				if (desc_len <= USB_DT_CONFIG_SIZE) {
719					dbg("sending partial config desc, size=%d",
720					     desc_len);
721				send_packet(dev,
722					    alloc_packet(&dev->ep[0],
723							 desc_len,
724							 dev->conf_desc),
725					    0);
726				} else {
727				int len = dev->conf_desc->wTotalLength;
728				dbg("sending whole config desc,"
729				    " size=%d, our size=%d", desc_len, len);
730				desc_len = desc_len > len ? len : desc_len;
731				send_packet(dev,
732					    alloc_packet(&dev->ep[0],
733							 desc_len,
734							 dev->full_conf_desc),
735					    0);
736				}
737			} else
738			endpoint_stall(&dev->ep[0]);
739			break;
740		case USB_DT_STRING:
741			// If the string descr index in low-byte of setup->wValue
742			// is valid, send string descr, otherwise stall ep0.
743			strnum = le16_to_cpu(setup->wValue) & 0xff;
744			if (strnum >= 0 && strnum < 6) {
745				struct usb_string_descriptor *desc =
746				dev->str_desc[strnum];
747				desc_len = desc_len > desc->bLength ?
748					desc->bLength : desc_len;
749				dbg("sending string desc %d", strnum);
750			send_packet(dev,
751				    alloc_packet(&dev->ep[0], desc_len,
752						 desc), 0);
753			} else
754			endpoint_stall(&dev->ep[0]);
755			break;
756	default:
757		// Invalid request
758		err("invalid get desc=%d, stalled",
759			    le16_to_cpu(setup->wValue) >> 8);
760		endpoint_stall(&dev->ep[0]);	// Stall endpoint 0
761			break;
762		}
763
764	return STATUS_STAGE;
765}
766
767static ep0_stage_t
768do_set_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
769{
770	// TODO: implement
771	// there will be an OUT data stage (the descriptor to set)
772	return DATA_STAGE;
773}
774
775static ep0_stage_t
776do_get_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
777{
778	// send dev->configuration
779	dbg("sending config");
780	send_packet(dev, alloc_packet(&dev->ep[0], 1, &dev->configuration),
781		    0);
782	return STATUS_STAGE;
783}
784
785static ep0_stage_t
786do_set_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
787{
788	// set active config to low-byte of setup->wValue
789	dev->configuration = le16_to_cpu(setup->wValue) & 0xff;
790	dbg("set config, config=%d", dev->configuration);
791	if (!dev->configuration && dev->state > DEFAULT) {
792		dev->state = ADDRESS;
793		/* inform function layer of usbdev state change */
794		dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
795	} else if (dev->configuration == 1) {
796		dev->state = CONFIGURED;
797		/* inform function layer of usbdev state change */
798		dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
799	} else {
800		// FIXME: "respond with request error" - how?
801	}
802
803	return SETUP_STAGE;
804}
805
806static ep0_stage_t
807do_get_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
808{
809		// interface must be zero.
810	if ((le16_to_cpu(setup->wIndex) & 0xff) || dev->state == ADDRESS) {
811			// FIXME: respond with "request error". how?
812	} else if (dev->state == CONFIGURED) {
813		// send dev->alternate_setting
814			dbg("sending alt setting");
815		send_packet(dev, alloc_packet(&dev->ep[0], 1,
816					      &dev->alternate_setting), 0);
817		}
818
819	return STATUS_STAGE;
820
821}
822
823static ep0_stage_t
824do_set_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
825{
826	if (dev->state == ADDRESS) {
827			// FIXME: respond with "request error". how?
828	} else if (dev->state == CONFIGURED) {
829		dev->interface = le16_to_cpu(setup->wIndex) & 0xff;
830		dev->alternate_setting =
831			    le16_to_cpu(setup->wValue) & 0xff;
832			// interface and alternate_setting must be zero
833		if (dev->interface || dev->alternate_setting) {
834				// FIXME: respond with "request error". how?
835			}
836		}
837
838	return SETUP_STAGE;
839}
840
841static ep0_stage_t
842do_synch_frame(struct usb_dev* dev, struct usb_ctrlrequest* setup)
843{
844	// TODO
845	return SETUP_STAGE;
846}
847
848typedef ep0_stage_t (*req_method_t)(struct usb_dev* dev,
849				    struct usb_ctrlrequest* setup);
850
851
852/* Table of the standard device request handlers */
853static const req_method_t req_method[] = {
854	do_get_status,
855	do_clear_feature,
856	do_reserved,
857	do_set_feature,
858	do_reserved,
859	do_set_address,
860	do_get_descriptor,
861	do_set_descriptor,
862	do_get_configuration,
863	do_set_configuration,
864	do_get_interface,
865	do_set_interface,
866	do_synch_frame
867};
868
869
870// SETUP packet request dispatcher
871static void
872do_setup (struct usb_dev* dev, struct usb_ctrlrequest* setup)
873{
874	req_method_t m;
875
876	dbg(__FUNCTION__ ": req %d %s", setup->bRequestType,
877	    get_std_req_name(setup->bRequestType));
878
879	if ((setup->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD ||
880	    (setup->bRequestType & USB_RECIP_MASK) != USB_RECIP_DEVICE) {
881		err(__FUNCTION__ ": invalid requesttype 0x%02x",
882		    setup->bRequestType);
883		return;
884		}
885
886	if ((setup->bRequestType & 0x80) == USB_DIR_OUT && setup->wLength)
887		dbg(__FUNCTION__ ": OUT phase! length=%d", setup->wLength);
888
889	if (setup->bRequestType < sizeof(req_method)/sizeof(req_method_t))
890		m = req_method[setup->bRequestType];
891			else
892		m = do_reserved;
893
894	dev->ep0_stage = (*m)(dev, setup);
895}
896
897/*
898 * A SETUP, DATA0, or DATA1 packet has been received
899 * on the default control endpoint's fifo.
900 */
901static void
902process_ep0_receive (struct usb_dev* dev)
903{
904	endpoint_t *ep0 = &dev->ep[0];
905	usbdev_pkt_t *pkt;
906
907	spin_lock(&ep0->lock);
908
909		// complete packet and prepare a new packet
910	pkt = receive_packet_complete(ep0);
911	if (!pkt) {
912		// FIXME: should  put a warn/err here.
913		spin_unlock(&ep0->lock);
914			return;
915		}
916
917	// unlink immediately from endpoint.
918	unlink_head(&ep0->outlist);
919
920	// override current stage if h/w says it's a setup packet
921	if (pkt->status & PKT_STATUS_SU)
922		dev->ep0_stage = SETUP_STAGE;
923
924	switch (dev->ep0_stage) {
925	case SETUP_STAGE:
926		vdbg("SU bit is %s in setup stage",
927		     (pkt->status & PKT_STATUS_SU) ? "set" : "not set");
928
929			if (pkt->size == sizeof(struct usb_ctrlrequest)) {
930#ifdef VDEBUG
931			if (pkt->status & PKT_STATUS_ACK)
932				vdbg("received SETUP");
933				else
934				vdbg("received NAK SETUP");
935#endif
936			do_setup(dev, (struct usb_ctrlrequest*)pkt->payload);
937		} else
938			err(__FUNCTION__ ": wrong size SETUP received");
939		break;
940	case DATA_STAGE:
941		/*
942		 * this setup has an OUT data stage. Of the standard
943		 * device requests, only set_descriptor has this stage,
944		 * so this packet is that descriptor. TODO: drop it for
945		 * now, set_descriptor not implemented.
946		 *
947		 * Need to place a byte in the write FIFO here, to prepare
948		 * to send a zero-length DATA ack packet to the host in the
949		 * STATUS stage.
950		 */
951		au_writel(0, ep0->reg->write_fifo);
952		dbg("received OUT stage DATAx on EP0, size=%d", pkt->size);
953		dev->ep0_stage = SETUP_STAGE;
954		break;
955	case STATUS_STAGE:
956		// this setup had an IN data stage, and host is ACK'ing
957		// the packet we sent during that stage.
958		if (pkt->size != 0)
959			warn("received non-zero ACK on EP0??");
960#ifdef VDEBUG
961		else
962			vdbg("received ACK on EP0");
963#endif
964		dev->ep0_stage = SETUP_STAGE;
965		break;
966		}
967
968	spin_unlock(&ep0->lock);
969		// we're done processing the packet, free it
970		kfree(pkt);
971}
972
973
974/*
975 * A DATA0/1 packet has been received on one of the OUT endpoints (4 or 5)
976 */
977static void
978process_ep_receive (struct usb_dev* dev, endpoint_t *ep)
979{
980	usbdev_pkt_t *pkt;
981
982		spin_lock(&ep->lock);
983	pkt = receive_packet_complete(ep);
984		spin_unlock(&ep->lock);
985
986	dev->func_cb(CB_PKT_COMPLETE, (unsigned long)pkt, dev->cb_data);
987}
988
989
990
991/* This ISR handles the receive complete and suspend events */
992static void
993req_sus_intr (int irq, void *dev_id, struct pt_regs *regs)
994{
995	struct usb_dev *dev = (struct usb_dev *) dev_id;
996	u32 status;
997
998	status = au_readl(USBD_INTSTAT);
999	au_writel(status, USBD_INTSTAT);	// ack'em
1000
1001	if (status & (1<<0))
1002		process_ep0_receive(dev);
1003	if (status & (1<<4))
1004		process_ep_receive(dev, &dev->ep[4]);
1005	if (status & (1<<5))
1006		process_ep_receive(dev, &dev->ep[5]);
1007}
1008
1009
1010/* This ISR handles the DMA done events on EP0 */
1011static void
1012dma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs)
1013{
1014	struct usb_dev *dev = (struct usb_dev *) dev_id;
1015	usbdev_pkt_t* pkt;
1016	endpoint_t *ep0 = &dev->ep[0];
1017	u32 cs0, buff_done;
1018
1019	spin_lock(&ep0->lock);
1020	cs0 = au_readl(ep0->reg->ctrl_stat);
1021
1022	// first check packet transmit done
1023	if ((buff_done = get_dma_buffer_done(ep0->indma)) != 0) {
1024		// transmitted a DATAx packet during DATA stage
1025		// on control endpoint 0
1026		// clear DMA done bit
1027		if (buff_done & DMA_D0)
1028			clear_dma_done0(ep0->indma);
1029		if (buff_done & DMA_D1)
1030			clear_dma_done1(ep0->indma);
1031
1032		pkt = send_packet_complete(ep0);
1033		if (pkt)
1034			kfree(pkt);
1035	}
1036
1037	/*
1038	 * Now check packet receive done. Shouldn't get these,
1039	 * the receive packet complete intr should happen
1040	 * before the DMA done intr occurs.
1041	 */
1042	if ((buff_done = get_dma_buffer_done(ep0->outdma)) != 0) {
1043		// clear DMA done bit
1044		if (buff_done & DMA_D0)
1045			clear_dma_done0(ep0->outdma);
1046		if (buff_done & DMA_D1)
1047			clear_dma_done1(ep0->outdma);
1048
1049		//process_ep0_receive(dev);
1050	}
1051
1052	spin_unlock(&ep0->lock);
1053}
1054
1055/* This ISR handles the DMA done events on endpoints 2,3,4,5 */
1056static void
1057dma_done_ep_intr(int irq, void *dev_id, struct pt_regs *regs)
1058{
1059	struct usb_dev *dev = (struct usb_dev *) dev_id;
1060	int i;
1061
1062	for (i = 2; i < 6; i++) {
1063	u32 buff_done;
1064		usbdev_pkt_t* pkt;
1065		endpoint_t *ep = &dev->ep[i];
1066
1067		if (!ep->active) continue;
1068
1069	spin_lock(&ep->lock);
1070
1071		if (ep->direction == USB_DIR_IN) {
1072			buff_done = get_dma_buffer_done(ep->indma);
1073			if (buff_done != 0) {
1074				// transmitted a DATAx pkt on the IN ep
1075		// clear DMA done bit
1076		if (buff_done & DMA_D0)
1077			clear_dma_done0(ep->indma);
1078		if (buff_done & DMA_D1)
1079			clear_dma_done1(ep->indma);
1080
1081				pkt = send_packet_complete(ep);
1082
1083				spin_unlock(&ep->lock);
1084				dev->func_cb(CB_PKT_COMPLETE,
1085					     (unsigned long)pkt,
1086					     dev->cb_data);
1087				spin_lock(&ep->lock);
1088			}
1089		} else {
1090	/*
1091			 * Check packet receive done (OUT ep). Shouldn't get
1092			 * these, the rx packet complete intr should happen
1093	 * before the DMA done intr occurs.
1094	 */
1095			buff_done = get_dma_buffer_done(ep->outdma);
1096			if (buff_done != 0) {
1097				// received a DATAx pkt on the OUT ep
1098		// clear DMA done bit
1099		if (buff_done & DMA_D0)
1100			clear_dma_done0(ep->outdma);
1101		if (buff_done & DMA_D1)
1102			clear_dma_done1(ep->outdma);
1103
1104				//process_ep_receive(dev, ep);
1105	}
1106	}
1107
1108		spin_unlock(&ep->lock);
1109	}
1110}
1111
1112
1113/***************************************************************************
1114 * Here begins the external interface functions
1115 ***************************************************************************
1116 */
1117
1118/*
1119 * allocate a new packet
1120 */
1121int
1122usbdev_alloc_packet(int ep_addr, int data_size, usbdev_pkt_t** pkt)
1123{
1124	endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1125	usbdev_pkt_t* lpkt = NULL;
1126
1127	if (!ep || !ep->active || ep->address < 2)
1128		return -ENODEV;
1129	if (data_size > ep->max_pkt_size)
1130		return -EINVAL;
1131
1132	lpkt = *pkt = alloc_packet(ep, data_size, NULL);
1133	if (!lpkt)
1134		return -ENOMEM;
1135	return 0;
1136}
1137
1138
1139/*
1140 * packet send
1141 */
1142int
1143usbdev_send_packet(int ep_addr, usbdev_pkt_t * pkt)
1144{
1145	unsigned long flags;
1146	int count;
1147	endpoint_t * ep;
1148
1149	if (!pkt || !(ep = epaddr_to_ep(&usbdev, pkt->ep_addr)) ||
1150	    !ep->active || ep->address < 2)
1151		return -ENODEV;
1152	if (ep->direction != USB_DIR_IN)
1153		return -EINVAL;
1154
1155	spin_lock_irqsave(&ep->lock, flags);
1156	count = send_packet(&usbdev, pkt, 1);
1157	spin_unlock_irqrestore(&ep->lock, flags);
1158
1159	return count;
1160}
1161
1162/*
1163 * packet receive
1164 */
1165int
1166usbdev_receive_packet(int ep_addr, usbdev_pkt_t** pkt)
1167{
1168	unsigned long flags;
1169	usbdev_pkt_t* lpkt = NULL;
1170	endpoint_t *ep = epaddr_to_ep(&usbdev, ep_addr);
1171
1172	if (!ep || !ep->active || ep->address < 2)
1173		return -ENODEV;
1174	if (ep->direction != USB_DIR_OUT)
1175		return -EINVAL;
1176
1177	spin_lock_irqsave(&ep->lock, flags);
1178	if (ep->outlist.count > 1)
1179		lpkt = unlink_head(&ep->outlist);
1180	spin_unlock_irqrestore(&ep->lock, flags);
1181
1182	if (!lpkt) {
1183		/* no packet available */
1184		*pkt = NULL;
1185		return -ENODATA;
1186	}
1187
1188	*pkt = lpkt;
1189
1190	return lpkt->size;
1191}
1192
1193
1194/*
1195 * return total queued byte count on the endpoint.
1196 */
1197int
1198usbdev_get_byte_count(int ep_addr)
1199{
1200        unsigned long flags;
1201        pkt_list_t *list;
1202        usbdev_pkt_t *scan;
1203        int count = 0;
1204	endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1205
1206	if (!ep || !ep->active || ep->address < 2)
1207		return -ENODEV;
1208
1209	if (ep->direction == USB_DIR_IN) {
1210		list = &ep->inlist;
1211
1212		spin_lock_irqsave(&ep->lock, flags);
1213		for (scan = list->head; scan; scan = scan->next)
1214			count += scan->size;
1215		spin_unlock_irqrestore(&ep->lock, flags);
1216	} else {
1217		list = &ep->outlist;
1218
1219		spin_lock_irqsave(&ep->lock, flags);
1220		if (list->count > 1) {
1221			for (scan = list->head; scan != list->tail;
1222			     scan = scan->next)
1223				count += scan->size;
1224	}
1225		spin_unlock_irqrestore(&ep->lock, flags);
1226	}
1227
1228	return count;
1229}
1230
1231
1232void
1233usbdev_exit(void)
1234{
1235	endpoint_t *ep;
1236	int i;
1237
1238	au_writel(0, USBD_INTEN);	// disable usb dev ints
1239	au_writel(0, USBD_ENABLE);	// disable usb dev
1240
1241	free_irq(AU1000_USB_DEV_REQ_INT, &usbdev);
1242	free_irq(AU1000_USB_DEV_SUS_INT, &usbdev);
1243
1244	// free all control endpoint resources
1245	ep = &usbdev.ep[0];
1246	free_au1000_dma(ep->indma);
1247	free_au1000_dma(ep->outdma);
1248	endpoint_flush(ep);
1249
1250	// free ep resources
1251	for (i = 2; i < 6; i++) {
1252		ep = &usbdev.ep[i];
1253		if (!ep->active) continue;
1254
1255		if (ep->direction == USB_DIR_IN) {
1256			free_au1000_dma(ep->indma);
1257		} else {
1258		free_au1000_dma(ep->outdma);
1259		}
1260		endpoint_flush(ep);
1261	}
1262
1263	if (usbdev.full_conf_desc)
1264		kfree(usbdev.full_conf_desc);
1265}
1266
1267int
1268usbdev_init(struct usb_device_descriptor* dev_desc,
1269	    struct usb_config_descriptor* config_desc,
1270	    struct usb_interface_descriptor* if_desc,
1271	    struct usb_endpoint_descriptor* ep_desc,
1272	    struct usb_string_descriptor* str_desc[],
1273	    void (*cb)(usbdev_cb_type_t, unsigned long, void *),
1274	    void* cb_data)
1275{
1276	endpoint_t *ep0;
1277	int i, ret=0;
1278	u8* fcd;
1279
1280	if (dev_desc->bNumConfigurations > 1 ||
1281	    config_desc->bNumInterfaces > 1 ||
1282	    if_desc->bNumEndpoints > 4) {
1283		err("Only one config, one i/f, and no more "
1284		    "than 4 ep's allowed");
1285		ret = -EINVAL;
1286		goto out;
1287	}
1288
1289	if (!cb) {
1290		err("Function-layer callback required");
1291		ret = -EINVAL;
1292		goto out;
1293	}
1294
1295	if (dev_desc->bMaxPacketSize0 != USBDEV_EP0_MAX_PACKET_SIZE) {
1296		warn("EP0 Max Packet size must be %d",
1297		     USBDEV_EP0_MAX_PACKET_SIZE);
1298		dev_desc->bMaxPacketSize0 = USBDEV_EP0_MAX_PACKET_SIZE;
1299	}
1300
1301	memset(&usbdev, 0, sizeof(struct usb_dev));
1302
1303	usbdev.state = DEFAULT;
1304	usbdev.dev_desc = dev_desc;
1305	usbdev.if_desc = if_desc;
1306	usbdev.conf_desc = config_desc;
1307	for (i=0; i<6; i++)
1308		usbdev.str_desc[i] = str_desc[i];
1309	usbdev.func_cb = cb;
1310	usbdev.cb_data = cb_data;
1311
1312	/* Initialize default control endpoint */
1313	ep0 = &usbdev.ep[0];
1314	ep0->active = 1;
1315	ep0->type = CONTROL_EP;
1316	ep0->max_pkt_size = USBDEV_EP0_MAX_PACKET_SIZE;
1317	spin_lock_init(&ep0->lock);
1318	ep0->desc = NULL;	// ep0 has no descriptor
1319	ep0->address = 0;
1320	ep0->direction = 0;
1321	ep0->reg = &ep_reg[0];
1322
1323	/* Initialize the other requested endpoints */
1324	for (i = 0; i < if_desc->bNumEndpoints; i++) {
1325		struct usb_endpoint_descriptor* epd = &ep_desc[i];
1326	endpoint_t *ep;
1327
1328		if ((epd->bEndpointAddress & 0x80) == USB_DIR_IN) {
1329			ep = &usbdev.ep[2];
1330			ep->address = 2;
1331			if (ep->active) {
1332				ep = &usbdev.ep[3];
1333				ep->address = 3;
1334				if (ep->active) {
1335					err("too many IN ep's requested");
1336					ret = -ENODEV;
1337					goto out;
1338	}
1339	}
1340		} else {
1341			ep = &usbdev.ep[4];
1342			ep->address = 4;
1343			if (ep->active) {
1344				ep = &usbdev.ep[5];
1345				ep->address = 5;
1346				if (ep->active) {
1347					err("too many OUT ep's requested");
1348					ret = -ENODEV;
1349					goto out;
1350	}
1351	}
1352		}
1353
1354		ep->active = 1;
1355		epd->bEndpointAddress &= ~0x0f;
1356		epd->bEndpointAddress |= (u8)ep->address;
1357		ep->direction = epd->bEndpointAddress & 0x80;
1358		ep->type = epd->bmAttributes & 0x03;
1359		ep->max_pkt_size = epd->wMaxPacketSize;
1360		spin_lock_init(&ep->lock);
1361		ep->desc = epd;
1362		ep->reg = &ep_reg[ep->address];
1363		}
1364
1365	/*
1366	 * initialize the full config descriptor
1367	 */
1368	usbdev.full_conf_desc = fcd = kmalloc(config_desc->wTotalLength,
1369					      ALLOC_FLAGS);
1370	if (!fcd) {
1371		err("failed to alloc full config descriptor");
1372		ret = -ENOMEM;
1373		goto out;
1374	}
1375
1376	memcpy(fcd, config_desc, USB_DT_CONFIG_SIZE);
1377	fcd += USB_DT_CONFIG_SIZE;
1378	memcpy(fcd, if_desc, USB_DT_INTERFACE_SIZE);
1379	fcd += USB_DT_INTERFACE_SIZE;
1380	for (i = 0; i < if_desc->bNumEndpoints; i++) {
1381		memcpy(fcd, &ep_desc[i], USB_DT_ENDPOINT_SIZE);
1382		fcd += USB_DT_ENDPOINT_SIZE;
1383	}
1384
1385	/* Now we're ready to enable the controller */
1386	au_writel(0x0002, USBD_ENABLE);
1387	udelay(100);
1388	au_writel(0x0003, USBD_ENABLE);
1389	udelay(100);
1390
1391	/* build and send config table based on ep descriptors */
1392	for (i = 0; i < 6; i++) {
1393		endpoint_t *ep;
1394		if (i == 1)
1395			continue; // skip dummy ep
1396		ep = &usbdev.ep[i];
1397		if (ep->active) {
1398			au_writel((ep->address << 4) | 0x04, USBD_CONFIG);
1399			au_writel(((ep->max_pkt_size & 0x380) >> 7) |
1400				  (ep->direction >> 4) | (ep->type << 4),
1401				  USBD_CONFIG);
1402			au_writel((ep->max_pkt_size & 0x7f) << 1, USBD_CONFIG);
1403			au_writel(0x00, USBD_CONFIG);
1404			au_writel(ep->address, USBD_CONFIG);
1405		} else {
1406			u8 dir = (i==2 || i==3) ? DIR_IN : DIR_OUT;
1407			au_writel((i << 4) | 0x04, USBD_CONFIG);
1408			au_writel(((16 & 0x380) >> 7) | dir |
1409				  (BULK_EP << 4), USBD_CONFIG);
1410			au_writel((16 & 0x7f) << 1, USBD_CONFIG);
1411			au_writel(0x00, USBD_CONFIG);
1412			au_writel(i, USBD_CONFIG);
1413		}
1414	}
1415
1416	/*
1417	 * Enable Receive FIFO Complete interrupts only. Transmit
1418	 * complete is being handled by the DMA done interrupts.
1419	 */
1420	au_writel(0x31, USBD_INTEN);
1421
1422	/*
1423	 * Controller is now enabled, request DMA and IRQ
1424	 * resources.
1425	 */
1426
1427	/* request the USB device transfer complete interrupt */
1428	if (request_irq(AU1000_USB_DEV_REQ_INT, req_sus_intr, SA_INTERRUPT,
1429			"USBdev req", &usbdev)) {
1430		err("Can't get device request intr");
1431		ret = -ENXIO;
1432		goto out;
1433	}
1434	/* request the USB device suspend interrupt */
1435	if (request_irq(AU1000_USB_DEV_SUS_INT, req_sus_intr, SA_INTERRUPT,
1436			"USBdev sus", &usbdev)) {
1437		err("Can't get device suspend intr");
1438		ret = -ENXIO;
1439		goto out;
1440	}
1441
1442	/* Request EP0 DMA and IRQ */
1443	if ((ep0->indma = request_au1000_dma(ep_dma_id[0].id,
1444					     ep_dma_id[0].str,
1445					     dma_done_ep0_intr,
1446					     SA_INTERRUPT,
1447					     &usbdev)) < 0) {
1448		err("Can't get %s DMA", ep_dma_id[0].str);
1449		ret = -ENXIO;
1450		goto out;
1451	}
1452	if ((ep0->outdma = request_au1000_dma(ep_dma_id[1].id,
1453					      ep_dma_id[1].str,
1454					      NULL, 0, NULL)) < 0) {
1455		err("Can't get %s DMA", ep_dma_id[1].str);
1456		ret = -ENXIO;
1457		goto out;
1458	}
1459
1460	// Flush the ep0 buffers and FIFOs
1461	endpoint_flush(ep0);
1462	// start packet reception on ep0
1463	kickstart_receive_packet(ep0);
1464
1465	/* Request DMA and IRQ for the other endpoints */
1466	for (i = 2; i < 6; i++) {
1467		endpoint_t *ep = &usbdev.ep[i];
1468		if (!ep->active)
1469			continue;
1470
1471		// Flush the endpoint buffers and FIFOs
1472		endpoint_flush(ep);
1473
1474		if (ep->direction == USB_DIR_IN) {
1475			ep->indma =
1476				request_au1000_dma(ep_dma_id[ep->address].id,
1477						   ep_dma_id[ep->address].str,
1478						   dma_done_ep_intr,
1479						   SA_INTERRUPT,
1480						   &usbdev);
1481			if (ep->indma < 0) {
1482				err("Can't get %s DMA",
1483				    ep_dma_id[ep->address].str);
1484				ret = -ENXIO;
1485				goto out;
1486			}
1487		} else {
1488			ep->outdma =
1489				request_au1000_dma(ep_dma_id[ep->address].id,
1490						   ep_dma_id[ep->address].str,
1491						   NULL, 0, NULL);
1492			if (ep->outdma < 0) {
1493				err("Can't get %s DMA",
1494				    ep_dma_id[ep->address].str);
1495				ret = -ENXIO;
1496				goto out;
1497			}
1498
1499			// start packet reception on OUT endpoint
1500			kickstart_receive_packet(ep);
1501		}
1502	}
1503
1504 out:
1505	if (ret)
1506		usbdev_exit();
1507	return ret;
1508}
1509
1510EXPORT_SYMBOL(usbdev_init);
1511EXPORT_SYMBOL(usbdev_exit);
1512EXPORT_SYMBOL(usbdev_alloc_packet);
1513EXPORT_SYMBOL(usbdev_receive_packet);
1514EXPORT_SYMBOL(usbdev_send_packet);
1515EXPORT_SYMBOL(usbdev_get_byte_count);
1516