1/*
2 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
3 *
4 * Copyright (C) 2000-2002 Lineo
5 *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
6 * Copyright (C) 2002 Toshiba Corporation
7 * Copyright (C) 2003 MontaVista Software (source@mvista.com)
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2.  This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14/*
15 * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
16 *
17 *  - Endpoint numbering is fixed: ep{1,2,3}-bulk
18 *  - Gadget drivers can choose ep maxpacket (8/16/32/64)
19 *  - Gadget drivers can choose direction (IN, OUT)
20 *  - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
21 */
22
23#undef DEBUG
24// #define	VERBOSE		/* extra debug messages (success too) */
25// #define	USB_TRACE	/* packet-level success messages */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/ioport.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/init.h>
35#include <linux/timer.h>
36#include <linux/list.h>
37#include <linux/interrupt.h>
38#include <linux/proc_fs.h>
39#include <linux/device.h>
40#include <linux/usb/ch9.h>
41#include <linux/usb_gadget.h>
42
43#include <asm/byteorder.h>
44#include <asm/io.h>
45#include <asm/irq.h>
46#include <asm/system.h>
47#include <asm/unaligned.h>
48
49
50#include "goku_udc.h"
51
52#define	DRIVER_DESC		"TC86C001 USB Device Controller"
53#define	DRIVER_VERSION		"30-Oct 2003"
54
55#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
56
57static const char driver_name [] = "goku_udc";
58static const char driver_desc [] = DRIVER_DESC;
59
60MODULE_AUTHOR("source@mvista.com");
61MODULE_DESCRIPTION(DRIVER_DESC);
62MODULE_LICENSE("GPL");
63
64
65/*
66 * IN dma behaves ok under testing, though the IN-dma abort paths don't
67 * seem to behave quite as expected.  Used by default.
68 *
69 * OUT dma documents design problems handling the common "short packet"
70 * transfer termination policy; it couldn't be enabled by default, even
71 * if the OUT-dma abort problems had a resolution.
72 */
73static unsigned use_dma = 1;
74
75
76/*-------------------------------------------------------------------------*/
77
78static void nuke(struct goku_ep *, int status);
79
80static inline void
81command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
82{
83	writel(COMMAND_EP(epnum) | command, &regs->Command);
84	udelay(300);
85}
86
87static int
88goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
89{
90	struct goku_udc	*dev;
91	struct goku_ep	*ep;
92	u32		mode;
93	u16		max;
94	unsigned long	flags;
95
96	ep = container_of(_ep, struct goku_ep, ep);
97	if (!_ep || !desc || ep->desc
98			|| desc->bDescriptorType != USB_DT_ENDPOINT)
99		return -EINVAL;
100	dev = ep->dev;
101	if (ep == &dev->ep[0])
102		return -EINVAL;
103	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
104		return -ESHUTDOWN;
105	if (ep->num != (desc->bEndpointAddress & 0x0f))
106		return -EINVAL;
107
108	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
109	case USB_ENDPOINT_XFER_BULK:
110	case USB_ENDPOINT_XFER_INT:
111		break;
112	default:
113		return -EINVAL;
114	}
115
116	if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
117			!= EPxSTATUS_EP_INVALID)
118		return -EBUSY;
119
120	/* enabling the no-toggle interrupt mode would need an api hook */
121	mode = 0;
122	max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
123	switch (max) {
124	case 64:	mode++;
125	case 32:	mode++;
126	case 16:	mode++;
127	case 8:		mode <<= 3;
128			break;
129	default:
130		return -EINVAL;
131	}
132	mode |= 2 << 1;		/* bulk, or intr-with-toggle */
133
134	/* ep1/ep2 dma direction is chosen early; it works in the other
135	 * direction, with pio.  be cautious with out-dma.
136	 */
137	ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
138	if (ep->is_in) {
139		mode |= 1;
140		ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
141	} else {
142		ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
143		if (ep->dma)
144			DBG(dev, "%s out-dma hides short packets\n",
145				ep->ep.name);
146	}
147
148	spin_lock_irqsave(&ep->dev->lock, flags);
149
150	/* ep1 and ep2 can do double buffering and/or dma */
151	if (ep->num < 3) {
152		struct goku_udc_regs __iomem	*regs = ep->dev->regs;
153		u32				tmp;
154
155		/* double buffer except (for now) with pio in */
156		tmp = ((ep->dma || !ep->is_in)
157				? 0x10	/* double buffered */
158				: 0x11	/* single buffer */
159			) << ep->num;
160		tmp |= readl(&regs->EPxSingle);
161		writel(tmp, &regs->EPxSingle);
162
163		tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
164		tmp |= readl(&regs->EPxBCS);
165		writel(tmp, &regs->EPxBCS);
166	}
167	writel(mode, ep->reg_mode);
168	command(ep->dev->regs, COMMAND_RESET, ep->num);
169	ep->ep.maxpacket = max;
170	ep->stopped = 0;
171	ep->desc = desc;
172	spin_unlock_irqrestore(&ep->dev->lock, flags);
173
174	DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
175		ep->is_in ? "IN" : "OUT",
176		ep->dma ? "dma" : "pio",
177		max);
178
179	return 0;
180}
181
182static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
183{
184	struct goku_udc		*dev = ep->dev;
185
186	if (regs) {
187		command(regs, COMMAND_INVALID, ep->num);
188		if (ep->num) {
189			if (ep->num == UDC_MSTWR_ENDPOINT)
190				dev->int_enable &= ~(INT_MSTWREND
191							|INT_MSTWRTMOUT);
192			else if (ep->num == UDC_MSTRD_ENDPOINT)
193				dev->int_enable &= ~INT_MSTRDEND;
194			dev->int_enable &= ~INT_EPxDATASET (ep->num);
195		} else
196			dev->int_enable &= ~INT_EP0;
197		writel(dev->int_enable, &regs->int_enable);
198		readl(&regs->int_enable);
199		if (ep->num < 3) {
200			struct goku_udc_regs __iomem	*r = ep->dev->regs;
201			u32				tmp;
202
203			tmp = readl(&r->EPxSingle);
204			tmp &= ~(0x11 << ep->num);
205			writel(tmp, &r->EPxSingle);
206
207			tmp = readl(&r->EPxBCS);
208			tmp &= ~(0x11 << ep->num);
209			writel(tmp, &r->EPxBCS);
210		}
211		/* reset dma in case we're still using it */
212		if (ep->dma) {
213			u32	master;
214
215			master = readl(&regs->dma_master) & MST_RW_BITS;
216			if (ep->num == UDC_MSTWR_ENDPOINT) {
217				master &= ~MST_W_BITS;
218				master |= MST_WR_RESET;
219			} else {
220				master &= ~MST_R_BITS;
221				master |= MST_RD_RESET;
222			}
223			writel(master, &regs->dma_master);
224		}
225	}
226
227	ep->ep.maxpacket = MAX_FIFO_SIZE;
228	ep->desc = NULL;
229	ep->stopped = 1;
230	ep->irqs = 0;
231	ep->dma = 0;
232}
233
234static int goku_ep_disable(struct usb_ep *_ep)
235{
236	struct goku_ep	*ep;
237	struct goku_udc	*dev;
238	unsigned long	flags;
239
240	ep = container_of(_ep, struct goku_ep, ep);
241	if (!_ep || !ep->desc)
242		return -ENODEV;
243	dev = ep->dev;
244	if (dev->ep0state == EP0_SUSPEND)
245		return -EBUSY;
246
247	VDBG(dev, "disable %s\n", _ep->name);
248
249	spin_lock_irqsave(&dev->lock, flags);
250	nuke(ep, -ESHUTDOWN);
251	ep_reset(dev->regs, ep);
252	spin_unlock_irqrestore(&dev->lock, flags);
253
254	return 0;
255}
256
257/*-------------------------------------------------------------------------*/
258
259static struct usb_request *
260goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
261{
262	struct goku_request	*req;
263
264	if (!_ep)
265		return NULL;
266	req = kzalloc(sizeof *req, gfp_flags);
267	if (!req)
268		return NULL;
269
270	req->req.dma = DMA_ADDR_INVALID;
271	INIT_LIST_HEAD(&req->queue);
272	return &req->req;
273}
274
275static void
276goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
277{
278	struct goku_request	*req;
279
280	if (!_ep || !_req)
281		return;
282
283	req = container_of(_req, struct goku_request, req);
284	WARN_ON(!list_empty(&req->queue));
285	kfree(req);
286}
287
288/*-------------------------------------------------------------------------*/
289
290/* allocating buffers this way eliminates dma mapping overhead, which
291 * on some platforms will mean eliminating a per-io buffer copy.  with
292 * some kinds of system caches, further tweaks may still be needed.
293 */
294static void *
295goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
296			dma_addr_t *dma, gfp_t gfp_flags)
297{
298	void		*retval;
299	struct goku_ep	*ep;
300
301	ep = container_of(_ep, struct goku_ep, ep);
302	if (!_ep)
303		return NULL;
304	*dma = DMA_ADDR_INVALID;
305
306	if (ep->dma) {
307		/* the main problem with this call is that it wastes memory
308		 * on typical 1/N page allocations: it allocates 1-N pages.
309		 */
310#warning Using dma_alloc_coherent even with buffers smaller than a page.
311		retval = dma_alloc_coherent(&ep->dev->pdev->dev,
312				bytes, dma, gfp_flags);
313	} else
314		retval = kmalloc(bytes, gfp_flags);
315	return retval;
316}
317
318static void
319goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
320{
321	/* free memory into the right allocator */
322	if (dma != DMA_ADDR_INVALID) {
323		struct goku_ep	*ep;
324
325		ep = container_of(_ep, struct goku_ep, ep);
326		if (!_ep)
327			return;
328		dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
329	} else
330		kfree (buf);
331}
332
333/*-------------------------------------------------------------------------*/
334
335static void
336done(struct goku_ep *ep, struct goku_request *req, int status)
337{
338	struct goku_udc		*dev;
339	unsigned		stopped = ep->stopped;
340
341	list_del_init(&req->queue);
342
343	if (likely(req->req.status == -EINPROGRESS))
344		req->req.status = status;
345	else
346		status = req->req.status;
347
348	dev = ep->dev;
349	if (req->mapped) {
350		pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
351			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
352		req->req.dma = DMA_ADDR_INVALID;
353		req->mapped = 0;
354	}
355
356#ifndef USB_TRACE
357	if (status && status != -ESHUTDOWN)
358#endif
359		VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
360			ep->ep.name, &req->req, status,
361			req->req.actual, req->req.length);
362
363	/* don't modify queue heads during completion callback */
364	ep->stopped = 1;
365	spin_unlock(&dev->lock);
366	req->req.complete(&ep->ep, &req->req);
367	spin_lock(&dev->lock);
368	ep->stopped = stopped;
369}
370
371/*-------------------------------------------------------------------------*/
372
373static inline int
374write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
375{
376	unsigned	length, count;
377
378	length = min(req->req.length - req->req.actual, max);
379	req->req.actual += length;
380
381	count = length;
382	while (likely(count--))
383		writel(*buf++, fifo);
384	return length;
385}
386
387// return:  0 = still running, 1 = completed, negative = errno
388static int write_fifo(struct goku_ep *ep, struct goku_request *req)
389{
390	struct goku_udc	*dev = ep->dev;
391	u32		tmp;
392	u8		*buf;
393	unsigned	count;
394	int		is_last;
395
396	tmp = readl(&dev->regs->DataSet);
397	buf = req->req.buf + req->req.actual;
398	prefetch(buf);
399
400	dev = ep->dev;
401	if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
402		return -EL2HLT;
403
404	/* NOTE:  just single-buffered PIO-IN for now.  */
405	if (unlikely((tmp & DATASET_A(ep->num)) != 0))
406		return 0;
407
408	/* clear our "packet available" irq */
409	if (ep->num != 0)
410		writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
411
412	count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
413
414	/* last packet often short (sometimes a zlp, especially on ep0) */
415	if (unlikely(count != ep->ep.maxpacket)) {
416		writel(~(1<<ep->num), &dev->regs->EOP);
417		if (ep->num == 0) {
418			dev->ep[0].stopped = 1;
419			dev->ep0state = EP0_STATUS;
420		}
421		is_last = 1;
422	} else {
423		if (likely(req->req.length != req->req.actual)
424				|| req->req.zero)
425			is_last = 0;
426		else
427			is_last = 1;
428	}
429
430	/* requests complete when all IN data is in the FIFO,
431	 * or sometimes later, if a zlp was needed.
432	 */
433	if (is_last) {
434		done(ep, req, 0);
435		return 1;
436	}
437
438	return 0;
439}
440
441static int read_fifo(struct goku_ep *ep, struct goku_request *req)
442{
443	struct goku_udc_regs __iomem	*regs;
444	u32				size, set;
445	u8				*buf;
446	unsigned			bufferspace, is_short, dbuff;
447
448	regs = ep->dev->regs;
449top:
450	buf = req->req.buf + req->req.actual;
451	prefetchw(buf);
452
453	if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
454		return -EL2HLT;
455
456	dbuff = (ep->num == 1 || ep->num == 2);
457	do {
458		/* ack dataset irq matching the status we'll handle */
459		if (ep->num != 0)
460			writel(~INT_EPxDATASET(ep->num), &regs->int_status);
461
462		set = readl(&regs->DataSet) & DATASET_AB(ep->num);
463		size = readl(&regs->EPxSizeLA[ep->num]);
464		bufferspace = req->req.length - req->req.actual;
465
466		/* usually do nothing without an OUT packet */
467		if (likely(ep->num != 0 || bufferspace != 0)) {
468			if (unlikely(set == 0))
469				break;
470			/* use ep1/ep2 double-buffering for OUT */
471			if (!(size & PACKET_ACTIVE))
472				size = readl(&regs->EPxSizeLB[ep->num]);
473			if (!(size & PACKET_ACTIVE)) 	// "can't happen"
474				break;
475			size &= DATASIZE;	/* EPxSizeH == 0 */
476
477		/* ep0out no-out-data case for set_config, etc */
478		} else
479			size = 0;
480
481		/* read all bytes from this packet */
482		req->req.actual += size;
483		is_short = (size < ep->ep.maxpacket);
484#ifdef USB_TRACE
485		VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
486			ep->ep.name, size, is_short ? "/S" : "",
487			req, req->req.actual, req->req.length);
488#endif
489		while (likely(size-- != 0)) {
490			u8	byte = (u8) readl(ep->reg_fifo);
491
492			if (unlikely(bufferspace == 0)) {
493				/* this happens when the driver's buffer
494				 * is smaller than what the host sent.
495				 * discard the extra data in this packet.
496				 */
497				if (req->req.status != -EOVERFLOW)
498					DBG(ep->dev, "%s overflow %u\n",
499						ep->ep.name, size);
500				req->req.status = -EOVERFLOW;
501			} else {
502				*buf++ = byte;
503				bufferspace--;
504			}
505		}
506
507		/* completion */
508		if (unlikely(is_short || req->req.actual == req->req.length)) {
509			if (unlikely(ep->num == 0)) {
510				/* non-control endpoints now usable? */
511				if (ep->dev->req_config)
512					writel(ep->dev->configured
513							? USBSTATE_CONFIGURED
514							: 0,
515						&regs->UsbState);
516				/* ep0out status stage */
517				writel(~(1<<0), &regs->EOP);
518				ep->stopped = 1;
519				ep->dev->ep0state = EP0_STATUS;
520			}
521			done(ep, req, 0);
522
523			/* empty the second buffer asap */
524			if (dbuff && !list_empty(&ep->queue)) {
525				req = list_entry(ep->queue.next,
526						struct goku_request, queue);
527				goto top;
528			}
529			return 1;
530		}
531	} while (dbuff);
532	return 0;
533}
534
535static inline void
536pio_irq_enable(struct goku_udc *dev,
537		struct goku_udc_regs __iomem *regs, int epnum)
538{
539	dev->int_enable |= INT_EPxDATASET (epnum);
540	writel(dev->int_enable, &regs->int_enable);
541	/* write may still be posted */
542}
543
544static inline void
545pio_irq_disable(struct goku_udc *dev,
546		struct goku_udc_regs __iomem *regs, int epnum)
547{
548	dev->int_enable &= ~INT_EPxDATASET (epnum);
549	writel(dev->int_enable, &regs->int_enable);
550	/* write may still be posted */
551}
552
553static inline void
554pio_advance(struct goku_ep *ep)
555{
556	struct goku_request	*req;
557
558	if (unlikely(list_empty (&ep->queue)))
559		return;
560	req = list_entry(ep->queue.next, struct goku_request, queue);
561	(ep->is_in ? write_fifo : read_fifo)(ep, req);
562}
563
564
565/*-------------------------------------------------------------------------*/
566
567// return:  0 = q running, 1 = q stopped, negative = errno
568static int start_dma(struct goku_ep *ep, struct goku_request *req)
569{
570	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
571	u32				master;
572	u32				start = req->req.dma;
573	u32				end = start + req->req.length - 1;
574
575	master = readl(&regs->dma_master) & MST_RW_BITS;
576
577	/* re-init the bits affecting IN dma; careful with zlps */
578	if (likely(ep->is_in)) {
579		if (unlikely(master & MST_RD_ENA)) {
580			DBG (ep->dev, "start, IN active dma %03x!!\n",
581				master);
582//			return -EL2HLT;
583		}
584		writel(end, &regs->in_dma_end);
585		writel(start, &regs->in_dma_start);
586
587		master &= ~MST_R_BITS;
588		if (unlikely(req->req.length == 0))
589			master = MST_RD_ENA | MST_RD_EOPB;
590		else if ((req->req.length % ep->ep.maxpacket) != 0
591					|| req->req.zero)
592			master = MST_RD_ENA | MST_EOPB_ENA;
593		else
594			master = MST_RD_ENA | MST_EOPB_DIS;
595
596		ep->dev->int_enable |= INT_MSTRDEND;
597
598	/* Goku DMA-OUT merges short packets, which plays poorly with
599	 * protocols where short packets mark the transfer boundaries.
600	 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
601	 * ending transfers after 3 SOFs; we don't turn it on.
602	 */
603	} else {
604		if (unlikely(master & MST_WR_ENA)) {
605			DBG (ep->dev, "start, OUT active dma %03x!!\n",
606				master);
607//			return -EL2HLT;
608		}
609		writel(end, &regs->out_dma_end);
610		writel(start, &regs->out_dma_start);
611
612		master &= ~MST_W_BITS;
613		master |= MST_WR_ENA | MST_TIMEOUT_DIS;
614
615		ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
616	}
617
618	writel(master, &regs->dma_master);
619	writel(ep->dev->int_enable, &regs->int_enable);
620	return 0;
621}
622
623static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
624{
625	struct goku_request		*req;
626	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
627	u32				master;
628
629	master = readl(&regs->dma_master);
630
631	if (unlikely(list_empty(&ep->queue))) {
632stop:
633		if (ep->is_in)
634			dev->int_enable &= ~INT_MSTRDEND;
635		else
636			dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
637		writel(dev->int_enable, &regs->int_enable);
638		return;
639	}
640	req = list_entry(ep->queue.next, struct goku_request, queue);
641
642	/* normal hw dma completion (not abort) */
643	if (likely(ep->is_in)) {
644		if (unlikely(master & MST_RD_ENA))
645			return;
646		req->req.actual = readl(&regs->in_dma_current);
647	} else {
648		if (unlikely(master & MST_WR_ENA))
649			return;
650
651		/* hardware merges short packets, and also hides packet
652		 * overruns.  a partial packet MAY be in the fifo here.
653		 */
654		req->req.actual = readl(&regs->out_dma_current);
655	}
656	req->req.actual -= req->req.dma;
657	req->req.actual++;
658
659#ifdef USB_TRACE
660	VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
661		ep->ep.name, ep->is_in ? "IN" : "OUT",
662		req->req.actual, req->req.length, req);
663#endif
664	done(ep, req, 0);
665	if (list_empty(&ep->queue))
666		goto stop;
667	req = list_entry(ep->queue.next, struct goku_request, queue);
668	(void) start_dma(ep, req);
669}
670
671static void abort_dma(struct goku_ep *ep, int status)
672{
673	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
674	struct goku_request		*req;
675	u32				curr, master;
676
677	/* NAK future host requests, hoping the implicit delay lets the
678	 * dma engine finish reading (or writing) its latest packet and
679	 * empty the dma buffer (up to 16 bytes).
680	 *
681	 * This avoids needing to clean up a partial packet in the fifo;
682	 * we can't do that for IN without side effects to HALT and TOGGLE.
683	 */
684	command(regs, COMMAND_FIFO_DISABLE, ep->num);
685	req = list_entry(ep->queue.next, struct goku_request, queue);
686	master = readl(&regs->dma_master) & MST_RW_BITS;
687
688	if (ep->is_in) {
689		if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
690			goto finished;
691		curr = readl(&regs->in_dma_current);
692
693		writel(curr, &regs->in_dma_end);
694		writel(curr, &regs->in_dma_start);
695
696		master &= ~MST_R_BITS;
697		master |= MST_RD_RESET;
698		writel(master, &regs->dma_master);
699
700		if (readl(&regs->dma_master) & MST_RD_ENA)
701			DBG(ep->dev, "IN dma active after reset!\n");
702
703	} else {
704		if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
705			goto finished;
706		curr = readl(&regs->out_dma_current);
707
708		writel(curr, &regs->out_dma_end);
709		writel(curr, &regs->out_dma_start);
710
711		master &= ~MST_W_BITS;
712		master |= MST_WR_RESET;
713		writel(master, &regs->dma_master);
714
715		if (readl(&regs->dma_master) & MST_WR_ENA)
716			DBG(ep->dev, "OUT dma active after reset!\n");
717	}
718	req->req.actual = (curr - req->req.dma) + 1;
719	req->req.status = status;
720
721	VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name,
722		ep->is_in ? "IN" : "OUT",
723		req->req.actual, req->req.length);
724
725	command(regs, COMMAND_FIFO_ENABLE, ep->num);
726
727	return;
728
729finished:
730	/* dma already completed; no abort needed */
731	command(regs, COMMAND_FIFO_ENABLE, ep->num);
732	req->req.actual = req->req.length;
733	req->req.status = 0;
734}
735
736/*-------------------------------------------------------------------------*/
737
738static int
739goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
740{
741	struct goku_request	*req;
742	struct goku_ep		*ep;
743	struct goku_udc		*dev;
744	unsigned long		flags;
745	int			status;
746
747	/* always require a cpu-view buffer so pio works */
748	req = container_of(_req, struct goku_request, req);
749	if (unlikely(!_req || !_req->complete
750			|| !_req->buf || !list_empty(&req->queue)))
751		return -EINVAL;
752	ep = container_of(_ep, struct goku_ep, ep);
753	if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
754		return -EINVAL;
755	dev = ep->dev;
756	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
757		return -ESHUTDOWN;
758
759	/* can't touch registers when suspended */
760	if (dev->ep0state == EP0_SUSPEND)
761		return -EBUSY;
762
763	/* set up dma mapping in case the caller didn't */
764	if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
765		_req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
766			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
767		req->mapped = 1;
768	}
769
770#ifdef USB_TRACE
771	VDBG(dev, "%s queue req %p, len %u buf %p\n",
772			_ep->name, _req, _req->length, _req->buf);
773#endif
774
775	spin_lock_irqsave(&dev->lock, flags);
776
777	_req->status = -EINPROGRESS;
778	_req->actual = 0;
779
780	/* for ep0 IN without premature status, zlp is required and
781	 * writing EOP starts the status stage (OUT).
782	 */
783	if (unlikely(ep->num == 0 && ep->is_in))
784		_req->zero = 1;
785
786	/* kickstart this i/o queue? */
787	status = 0;
788	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
789		/* dma:  done after dma completion IRQ (or error)
790		 * pio:  done after last fifo operation
791		 */
792		if (ep->dma)
793			status = start_dma(ep, req);
794		else
795			status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
796
797		if (unlikely(status != 0)) {
798			if (status > 0)
799				status = 0;
800			req = NULL;
801		}
802
803	} /* else pio or dma irq handler advances the queue. */
804
805	if (likely(req != 0))
806		list_add_tail(&req->queue, &ep->queue);
807
808	if (likely(!list_empty(&ep->queue))
809			&& likely(ep->num != 0)
810			&& !ep->dma
811			&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
812		pio_irq_enable(dev, dev->regs, ep->num);
813
814	spin_unlock_irqrestore(&dev->lock, flags);
815
816	/* pci writes may still be posted */
817	return status;
818}
819
820/* dequeue ALL requests */
821static void nuke(struct goku_ep *ep, int status)
822{
823	struct goku_request	*req;
824
825	ep->stopped = 1;
826	if (list_empty(&ep->queue))
827		return;
828	if (ep->dma)
829		abort_dma(ep, status);
830	while (!list_empty(&ep->queue)) {
831		req = list_entry(ep->queue.next, struct goku_request, queue);
832		done(ep, req, status);
833	}
834}
835
836/* dequeue JUST ONE request */
837static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
838{
839	struct goku_request	*req;
840	struct goku_ep		*ep;
841	struct goku_udc		*dev;
842	unsigned long		flags;
843
844	ep = container_of(_ep, struct goku_ep, ep);
845	if (!_ep || !_req || (!ep->desc && ep->num != 0))
846		return -EINVAL;
847	dev = ep->dev;
848	if (!dev->driver)
849		return -ESHUTDOWN;
850
851	/* we can't touch (dma) registers when suspended */
852	if (dev->ep0state == EP0_SUSPEND)
853		return -EBUSY;
854
855	VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name,
856		ep->is_in ? "IN" : "OUT",
857		ep->dma ? "dma" : "pio",
858		_req);
859
860	spin_lock_irqsave(&dev->lock, flags);
861
862	/* make sure it's actually queued on this endpoint */
863	list_for_each_entry (req, &ep->queue, queue) {
864		if (&req->req == _req)
865			break;
866	}
867	if (&req->req != _req) {
868		spin_unlock_irqrestore (&dev->lock, flags);
869		return -EINVAL;
870	}
871
872	if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
873		abort_dma(ep, -ECONNRESET);
874		done(ep, req, -ECONNRESET);
875		dma_advance(dev, ep);
876	} else if (!list_empty(&req->queue))
877		done(ep, req, -ECONNRESET);
878	else
879		req = NULL;
880	spin_unlock_irqrestore(&dev->lock, flags);
881
882	return req ? 0 : -EOPNOTSUPP;
883}
884
885/*-------------------------------------------------------------------------*/
886
887static void goku_clear_halt(struct goku_ep *ep)
888{
889	// assert (ep->num !=0)
890	VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
891	command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
892	command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
893	if (ep->stopped) {
894		ep->stopped = 0;
895		if (ep->dma) {
896			struct goku_request	*req;
897
898			if (list_empty(&ep->queue))
899				return;
900			req = list_entry(ep->queue.next, struct goku_request,
901						queue);
902			(void) start_dma(ep, req);
903		} else
904			pio_advance(ep);
905	}
906}
907
908static int goku_set_halt(struct usb_ep *_ep, int value)
909{
910	struct goku_ep	*ep;
911	unsigned long	flags;
912	int		retval = 0;
913
914	if (!_ep)
915		return -ENODEV;
916	ep = container_of (_ep, struct goku_ep, ep);
917
918	if (ep->num == 0) {
919		if (value) {
920			ep->dev->ep0state = EP0_STALL;
921			ep->dev->ep[0].stopped = 1;
922		} else
923			return -EINVAL;
924
925	/* don't change EPxSTATUS_EP_INVALID to READY */
926	} else if (!ep->desc) {
927		DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
928		return -EINVAL;
929	}
930
931	spin_lock_irqsave(&ep->dev->lock, flags);
932	if (!list_empty(&ep->queue))
933		retval = -EAGAIN;
934	else if (ep->is_in && value
935			/* data in (either) packet buffer? */
936			&& (readl(&ep->dev->regs->DataSet)
937					& DATASET_AB(ep->num)))
938		retval = -EAGAIN;
939	else if (!value)
940		goku_clear_halt(ep);
941	else {
942		ep->stopped = 1;
943		VDBG(ep->dev, "%s set halt\n", ep->ep.name);
944		command(ep->dev->regs, COMMAND_STALL, ep->num);
945		readl(ep->reg_status);
946	}
947	spin_unlock_irqrestore(&ep->dev->lock, flags);
948	return retval;
949}
950
951static int goku_fifo_status(struct usb_ep *_ep)
952{
953	struct goku_ep			*ep;
954	struct goku_udc_regs __iomem	*regs;
955	u32				size;
956
957	if (!_ep)
958		return -ENODEV;
959	ep = container_of(_ep, struct goku_ep, ep);
960
961	/* size is only reported sanely for OUT */
962	if (ep->is_in)
963		return -EOPNOTSUPP;
964
965	/* ignores 16-byte dma buffer; SizeH == 0 */
966	regs = ep->dev->regs;
967	size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
968	size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
969	VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size);
970	return size;
971}
972
973static void goku_fifo_flush(struct usb_ep *_ep)
974{
975	struct goku_ep			*ep;
976	struct goku_udc_regs __iomem	*regs;
977	u32				size;
978
979	if (!_ep)
980		return;
981	ep = container_of(_ep, struct goku_ep, ep);
982	VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name);
983
984	/* don't change EPxSTATUS_EP_INVALID to READY */
985	if (!ep->desc && ep->num != 0) {
986		DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
987		return;
988	}
989
990	regs = ep->dev->regs;
991	size = readl(&regs->EPxSizeLA[ep->num]);
992	size &= DATASIZE;
993
994	/* Non-desirable behavior:  FIFO_CLEAR also clears the
995	 * endpoint halt feature.  For OUT, we _could_ just read
996	 * the bytes out (PIO, if !ep->dma); for in, no choice.
997	 */
998	if (size)
999		command(regs, COMMAND_FIFO_CLEAR, ep->num);
1000}
1001
1002static struct usb_ep_ops goku_ep_ops = {
1003	.enable		= goku_ep_enable,
1004	.disable	= goku_ep_disable,
1005
1006	.alloc_request	= goku_alloc_request,
1007	.free_request	= goku_free_request,
1008
1009	.alloc_buffer	= goku_alloc_buffer,
1010	.free_buffer	= goku_free_buffer,
1011
1012	.queue		= goku_queue,
1013	.dequeue	= goku_dequeue,
1014
1015	.set_halt	= goku_set_halt,
1016	.fifo_status	= goku_fifo_status,
1017	.fifo_flush	= goku_fifo_flush,
1018};
1019
1020/*-------------------------------------------------------------------------*/
1021
1022static int goku_get_frame(struct usb_gadget *_gadget)
1023{
1024	return -EOPNOTSUPP;
1025}
1026
1027static const struct usb_gadget_ops goku_ops = {
1028	.get_frame	= goku_get_frame,
1029	// no remote wakeup
1030	// not selfpowered
1031};
1032
1033/*-------------------------------------------------------------------------*/
1034
1035static inline char *dmastr(void)
1036{
1037	if (use_dma == 0)
1038		return "(dma disabled)";
1039	else if (use_dma == 2)
1040		return "(dma IN and OUT)";
1041	else
1042		return "(dma IN)";
1043}
1044
1045#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1046
1047static const char proc_node_name [] = "driver/udc";
1048
1049#define FOURBITS "%s%s%s%s"
1050#define EIGHTBITS FOURBITS FOURBITS
1051
1052static void
1053dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
1054{
1055	int t;
1056
1057	/* int_status is the same format ... */
1058	t = scnprintf(*next, *size,
1059		"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
1060		label, mask,
1061		(mask & INT_PWRDETECT) ? " power" : "",
1062		(mask & INT_SYSERROR) ? " sys" : "",
1063		(mask & INT_MSTRDEND) ? " in-dma" : "",
1064		(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
1065
1066		(mask & INT_MSTWREND) ? " out-dma" : "",
1067		(mask & INT_MSTWRSET) ? " wrset" : "",
1068		(mask & INT_ERR) ? " err" : "",
1069		(mask & INT_SOF) ? " sof" : "",
1070
1071		(mask & INT_EP3NAK) ? " ep3nak" : "",
1072		(mask & INT_EP2NAK) ? " ep2nak" : "",
1073		(mask & INT_EP1NAK) ? " ep1nak" : "",
1074		(mask & INT_EP3DATASET) ? " ep3" : "",
1075
1076		(mask & INT_EP2DATASET) ? " ep2" : "",
1077		(mask & INT_EP1DATASET) ? " ep1" : "",
1078		(mask & INT_STATUSNAK) ? " ep0snak" : "",
1079		(mask & INT_STATUS) ? " ep0status" : "",
1080
1081		(mask & INT_SETUP) ? " setup" : "",
1082		(mask & INT_ENDPOINT0) ? " ep0" : "",
1083		(mask & INT_USBRESET) ? " reset" : "",
1084		(mask & INT_SUSPEND) ? " suspend" : "");
1085	*size -= t;
1086	*next += t;
1087}
1088
1089
1090static int
1091udc_proc_read(char *buffer, char **start, off_t off, int count,
1092		int *eof, void *_dev)
1093{
1094	char				*buf = buffer;
1095	struct goku_udc			*dev = _dev;
1096	struct goku_udc_regs __iomem	*regs = dev->regs;
1097	char				*next = buf;
1098	unsigned			size = count;
1099	unsigned long			flags;
1100	int				i, t, is_usb_connected;
1101	u32				tmp;
1102
1103	if (off != 0)
1104		return 0;
1105
1106	local_irq_save(flags);
1107
1108	/* basic device status */
1109	tmp = readl(&regs->power_detect);
1110	is_usb_connected = tmp & PW_DETECT;
1111	t = scnprintf(next, size,
1112		"%s - %s\n"
1113		"%s version: %s %s\n"
1114		"Gadget driver: %s\n"
1115		"Host %s, %s\n"
1116		"\n",
1117		pci_name(dev->pdev), driver_desc,
1118		driver_name, DRIVER_VERSION, dmastr(),
1119		dev->driver ? dev->driver->driver.name : "(none)",
1120		is_usb_connected
1121			? ((tmp & PW_PULLUP) ? "full speed" : "powered")
1122			: "disconnected",
1123		({char *tmp;
1124		switch(dev->ep0state){
1125		case EP0_DISCONNECT:	tmp = "ep0_disconnect"; break;
1126		case EP0_IDLE:		tmp = "ep0_idle"; break;
1127		case EP0_IN:		tmp = "ep0_in"; break;
1128		case EP0_OUT:		tmp = "ep0_out"; break;
1129		case EP0_STATUS:	tmp = "ep0_status"; break;
1130		case EP0_STALL:		tmp = "ep0_stall"; break;
1131		case EP0_SUSPEND:	tmp = "ep0_suspend"; break;
1132		default:		tmp = "ep0_?"; break;
1133		} tmp; })
1134		);
1135	size -= t;
1136	next += t;
1137
1138	dump_intmask("int_status", readl(&regs->int_status), &next, &size);
1139	dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
1140
1141	if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
1142		goto done;
1143
1144	/* registers for (active) device and ep0 */
1145	t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
1146			"single.bcs %02x.%02x state %x addr %u\n",
1147			dev->irqs, readl(&regs->DataSet),
1148			readl(&regs->EPxSingle), readl(&regs->EPxBCS),
1149			readl(&regs->UsbState),
1150			readl(&regs->address));
1151	size -= t;
1152	next += t;
1153
1154	tmp = readl(&regs->dma_master);
1155	t = scnprintf(next, size,
1156		"dma %03X =" EIGHTBITS "%s %s\n", tmp,
1157		(tmp & MST_EOPB_DIS) ? " eopb-" : "",
1158		(tmp & MST_EOPB_ENA) ? " eopb+" : "",
1159		(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
1160		(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
1161
1162		(tmp & MST_RD_EOPB) ? " eopb" : "",
1163		(tmp & MST_RD_RESET) ? " in_reset" : "",
1164		(tmp & MST_WR_RESET) ? " out_reset" : "",
1165		(tmp & MST_RD_ENA) ? " IN" : "",
1166
1167		(tmp & MST_WR_ENA) ? " OUT" : "",
1168		(tmp & MST_CONNECTION)
1169			? "ep1in/ep2out"
1170			: "ep1out/ep2in");
1171	size -= t;
1172	next += t;
1173
1174	/* dump endpoint queues */
1175	for (i = 0; i < 4; i++) {
1176		struct goku_ep		*ep = &dev->ep [i];
1177		struct goku_request	*req;
1178		int			t;
1179
1180		if (i && !ep->desc)
1181			continue;
1182
1183		tmp = readl(ep->reg_status);
1184		t = scnprintf(next, size,
1185			"%s %s max %u %s, irqs %lu, "
1186			"status %02x (%s) " FOURBITS "\n",
1187			ep->ep.name,
1188			ep->is_in ? "in" : "out",
1189			ep->ep.maxpacket,
1190			ep->dma ? "dma" : "pio",
1191			ep->irqs,
1192			tmp, ({ char *s;
1193			switch (tmp & EPxSTATUS_EP_MASK) {
1194			case EPxSTATUS_EP_READY:
1195				s = "ready"; break;
1196			case EPxSTATUS_EP_DATAIN:
1197				s = "packet"; break;
1198			case EPxSTATUS_EP_FULL:
1199				s = "full"; break;
1200			case EPxSTATUS_EP_TX_ERR:	// host will retry
1201				s = "tx_err"; break;
1202			case EPxSTATUS_EP_RX_ERR:
1203				s = "rx_err"; break;
1204			case EPxSTATUS_EP_BUSY:		/* ep0 only */
1205				s = "busy"; break;
1206			case EPxSTATUS_EP_STALL:
1207				s = "stall"; break;
1208			case EPxSTATUS_EP_INVALID:	// these "can't happen"
1209				s = "invalid"; break;
1210			default:
1211				s = "?"; break;
1212			}; s; }),
1213			(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
1214			(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
1215			(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
1216			(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
1217			);
1218		if (t <= 0 || t > size)
1219			goto done;
1220		size -= t;
1221		next += t;
1222
1223		if (list_empty(&ep->queue)) {
1224			t = scnprintf(next, size, "\t(nothing queued)\n");
1225			if (t <= 0 || t > size)
1226				goto done;
1227			size -= t;
1228			next += t;
1229			continue;
1230		}
1231		list_for_each_entry(req, &ep->queue, queue) {
1232			if (ep->dma && req->queue.prev == &ep->queue) {
1233				if (i == UDC_MSTRD_ENDPOINT)
1234					tmp = readl(&regs->in_dma_current);
1235				else
1236					tmp = readl(&regs->out_dma_current);
1237				tmp -= req->req.dma;
1238				tmp++;
1239			} else
1240				tmp = req->req.actual;
1241
1242			t = scnprintf(next, size,
1243				"\treq %p len %u/%u buf %p\n",
1244				&req->req, tmp, req->req.length,
1245				req->req.buf);
1246			if (t <= 0 || t > size)
1247				goto done;
1248			size -= t;
1249			next += t;
1250		}
1251	}
1252
1253done:
1254	local_irq_restore(flags);
1255	*eof = 1;
1256	return count - size;
1257}
1258
1259#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
1260
1261/*-------------------------------------------------------------------------*/
1262
1263static void udc_reinit (struct goku_udc *dev)
1264{
1265	static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
1266
1267	unsigned i;
1268
1269	INIT_LIST_HEAD (&dev->gadget.ep_list);
1270	dev->gadget.ep0 = &dev->ep [0].ep;
1271	dev->gadget.speed = USB_SPEED_UNKNOWN;
1272	dev->ep0state = EP0_DISCONNECT;
1273	dev->irqs = 0;
1274
1275	for (i = 0; i < 4; i++) {
1276		struct goku_ep	*ep = &dev->ep[i];
1277
1278		ep->num = i;
1279		ep->ep.name = names[i];
1280		ep->reg_fifo = &dev->regs->ep_fifo [i];
1281		ep->reg_status = &dev->regs->ep_status [i];
1282		ep->reg_mode = &dev->regs->ep_mode[i];
1283
1284		ep->ep.ops = &goku_ep_ops;
1285		list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1286		ep->dev = dev;
1287		INIT_LIST_HEAD (&ep->queue);
1288
1289		ep_reset(NULL, ep);
1290	}
1291
1292	dev->ep[0].reg_mode = NULL;
1293	dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
1294	list_del_init (&dev->ep[0].ep.ep_list);
1295}
1296
1297static void udc_reset(struct goku_udc *dev)
1298{
1299	struct goku_udc_regs __iomem	*regs = dev->regs;
1300
1301	writel(0, &regs->power_detect);
1302	writel(0, &regs->int_enable);
1303	readl(&regs->int_enable);
1304	dev->int_enable = 0;
1305
1306	/* deassert reset, leave USB D+ at hi-Z (no pullup)
1307	 * don't let INT_PWRDETECT sequence begin
1308	 */
1309	udelay(250);
1310	writel(PW_RESETB, &regs->power_detect);
1311	readl(&regs->int_enable);
1312}
1313
1314static void ep0_start(struct goku_udc *dev)
1315{
1316	struct goku_udc_regs __iomem	*regs = dev->regs;
1317	unsigned			i;
1318
1319	VDBG(dev, "%s\n", __FUNCTION__);
1320
1321	udc_reset(dev);
1322	udc_reinit (dev);
1323	//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
1324
1325	/* hw handles set_address, set_feature, get_status; maybe more */
1326	writel(   G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
1327		| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
1328		| G_REQMODE_GET_DESC
1329		| G_REQMODE_CLEAR_FEAT
1330		, &regs->reqmode);
1331
1332	for (i = 0; i < 4; i++)
1333		dev->ep[i].irqs = 0;
1334
1335	/* can't modify descriptors after writing UsbReady */
1336	for (i = 0; i < DESC_LEN; i++)
1337		writel(0, &regs->descriptors[i]);
1338	writel(0, &regs->UsbReady);
1339
1340	/* expect ep0 requests when the host drops reset */
1341	writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
1342	dev->int_enable = INT_DEVWIDE | INT_EP0;
1343	writel(dev->int_enable, &dev->regs->int_enable);
1344	readl(&regs->int_enable);
1345	dev->gadget.speed = USB_SPEED_FULL;
1346	dev->ep0state = EP0_IDLE;
1347}
1348
1349static void udc_enable(struct goku_udc *dev)
1350{
1351	/* start enumeration now, or after power detect irq */
1352	if (readl(&dev->regs->power_detect) & PW_DETECT)
1353		ep0_start(dev);
1354	else {
1355		DBG(dev, "%s\n", __FUNCTION__);
1356		dev->int_enable = INT_PWRDETECT;
1357		writel(dev->int_enable, &dev->regs->int_enable);
1358	}
1359}
1360
1361/*-------------------------------------------------------------------------*/
1362
1363/* keeping it simple:
1364 * - one bus driver, initted first;
1365 * - one function driver, initted second
1366 */
1367
1368static struct goku_udc	*the_controller;
1369
1370/* when a driver is successfully registered, it will receive
1371 * control requests including set_configuration(), which enables
1372 * non-control requests.  then usb traffic follows until a
1373 * disconnect is reported.  then a host may connect again, or
1374 * the driver might get unbound.
1375 */
1376int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1377{
1378	struct goku_udc	*dev = the_controller;
1379	int			retval;
1380
1381	if (!driver
1382			|| driver->speed != USB_SPEED_FULL
1383			|| !driver->bind
1384			|| !driver->disconnect
1385			|| !driver->setup)
1386		return -EINVAL;
1387	if (!dev)
1388		return -ENODEV;
1389	if (dev->driver)
1390		return -EBUSY;
1391
1392	/* hook up the driver */
1393	driver->driver.bus = NULL;
1394	dev->driver = driver;
1395	dev->gadget.dev.driver = &driver->driver;
1396	retval = driver->bind(&dev->gadget);
1397	if (retval) {
1398		DBG(dev, "bind to driver %s --> error %d\n",
1399				driver->driver.name, retval);
1400		dev->driver = NULL;
1401		dev->gadget.dev.driver = NULL;
1402		return retval;
1403	}
1404
1405	/* then enable host detection and ep0; and we're ready
1406	 * for set_configuration as well as eventual disconnect.
1407	 */
1408	udc_enable(dev);
1409
1410	DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
1411	return 0;
1412}
1413EXPORT_SYMBOL(usb_gadget_register_driver);
1414
1415static void
1416stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
1417{
1418	unsigned	i;
1419
1420	DBG (dev, "%s\n", __FUNCTION__);
1421
1422	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1423		driver = NULL;
1424
1425	/* disconnect gadget driver after quiesceing hw and the driver */
1426	udc_reset (dev);
1427	for (i = 0; i < 4; i++)
1428		nuke(&dev->ep [i], -ESHUTDOWN);
1429	if (driver) {
1430		spin_unlock(&dev->lock);
1431		driver->disconnect(&dev->gadget);
1432		spin_lock(&dev->lock);
1433	}
1434
1435	if (dev->driver)
1436		udc_enable(dev);
1437}
1438
1439int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1440{
1441	struct goku_udc	*dev = the_controller;
1442	unsigned long	flags;
1443
1444	if (!dev)
1445		return -ENODEV;
1446	if (!driver || driver != dev->driver || !driver->unbind)
1447		return -EINVAL;
1448
1449	spin_lock_irqsave(&dev->lock, flags);
1450	dev->driver = NULL;
1451	stop_activity(dev, driver);
1452	spin_unlock_irqrestore(&dev->lock, flags);
1453
1454	driver->unbind(&dev->gadget);
1455
1456	DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
1457	return 0;
1458}
1459EXPORT_SYMBOL(usb_gadget_unregister_driver);
1460
1461
1462/*-------------------------------------------------------------------------*/
1463
1464static void ep0_setup(struct goku_udc *dev)
1465{
1466	struct goku_udc_regs __iomem	*regs = dev->regs;
1467	struct usb_ctrlrequest		ctrl;
1468	int				tmp;
1469
1470	/* read SETUP packet and enter DATA stage */
1471	ctrl.bRequestType = readl(&regs->bRequestType);
1472	ctrl.bRequest = readl(&regs->bRequest);
1473	ctrl.wValue  = cpu_to_le16((readl(&regs->wValueH)  << 8)
1474					| readl(&regs->wValueL));
1475	ctrl.wIndex  = cpu_to_le16((readl(&regs->wIndexH)  << 8)
1476					| readl(&regs->wIndexL));
1477	ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
1478					| readl(&regs->wLengthL));
1479	writel(0, &regs->SetupRecv);
1480
1481	nuke(&dev->ep[0], 0);
1482	dev->ep[0].stopped = 0;
1483	if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1484		dev->ep[0].is_in = 1;
1485		dev->ep0state = EP0_IN;
1486		/* detect early status stages */
1487		writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
1488	} else {
1489		dev->ep[0].is_in = 0;
1490		dev->ep0state = EP0_OUT;
1491
1492		/* NOTE:  CLEAR_FEATURE is done in software so that we can
1493		 * synchronize transfer restarts after bulk IN stalls.  data
1494		 * won't even enter the fifo until the halt is cleared.
1495		 */
1496		switch (ctrl.bRequest) {
1497		case USB_REQ_CLEAR_FEATURE:
1498			switch (ctrl.bRequestType) {
1499			case USB_RECIP_ENDPOINT:
1500				tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
1501				/* active endpoint */
1502				if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
1503					goto stall;
1504				if (ctrl.wIndex & __constant_cpu_to_le16(
1505						USB_DIR_IN)) {
1506					if (!dev->ep[tmp].is_in)
1507						goto stall;
1508				} else {
1509					if (dev->ep[tmp].is_in)
1510						goto stall;
1511				}
1512				if (ctrl.wValue != __constant_cpu_to_le16(
1513						USB_ENDPOINT_HALT))
1514					goto stall;
1515				if (tmp)
1516					goku_clear_halt(&dev->ep[tmp]);
1517succeed:
1518				/* start ep0out status stage */
1519				writel(~(1<<0), &regs->EOP);
1520				dev->ep[0].stopped = 1;
1521				dev->ep0state = EP0_STATUS;
1522				return;
1523			case USB_RECIP_DEVICE:
1524				/* device remote wakeup: always clear */
1525				if (ctrl.wValue != __constant_cpu_to_le16(1))
1526					goto stall;
1527				VDBG(dev, "clear dev remote wakeup\n");
1528				goto succeed;
1529			case USB_RECIP_INTERFACE:
1530				goto stall;
1531			default:		/* pass to gadget driver */
1532				break;
1533			}
1534			break;
1535		default:
1536			break;
1537		}
1538	}
1539
1540#ifdef USB_TRACE
1541	VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1542		ctrl.bRequestType, ctrl.bRequest,
1543		le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
1544		le16_to_cpu(ctrl.wLength));
1545#endif
1546
1547	/* hw wants to know when we're configured (or not) */
1548	dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
1549				&& ctrl.bRequestType == USB_RECIP_DEVICE);
1550	if (unlikely(dev->req_config))
1551		dev->configured = (ctrl.wValue != __constant_cpu_to_le16(0));
1552
1553	/* delegate everything to the gadget driver.
1554	 * it may respond after this irq handler returns.
1555	 */
1556	spin_unlock (&dev->lock);
1557	tmp = dev->driver->setup(&dev->gadget, &ctrl);
1558	spin_lock (&dev->lock);
1559	if (unlikely(tmp < 0)) {
1560stall:
1561#ifdef USB_TRACE
1562		VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
1563				ctrl.bRequestType, ctrl.bRequest, tmp);
1564#endif
1565		command(regs, COMMAND_STALL, 0);
1566		dev->ep[0].stopped = 1;
1567		dev->ep0state = EP0_STALL;
1568	}
1569
1570	/* expect at least one data or status stage irq */
1571}
1572
1573#define ACK(irqbit) { \
1574		stat &= ~irqbit; \
1575		writel(~irqbit, &regs->int_status); \
1576		handled = 1; \
1577		}
1578
1579static irqreturn_t goku_irq(int irq, void *_dev)
1580{
1581	struct goku_udc			*dev = _dev;
1582	struct goku_udc_regs __iomem	*regs = dev->regs;
1583	struct goku_ep			*ep;
1584	u32				stat, handled = 0;
1585	unsigned			i, rescans = 5;
1586
1587	spin_lock(&dev->lock);
1588
1589rescan:
1590	stat = readl(&regs->int_status) & dev->int_enable;
1591        if (!stat)
1592		goto done;
1593	dev->irqs++;
1594
1595	/* device-wide irqs */
1596	if (unlikely(stat & INT_DEVWIDE)) {
1597		if (stat & INT_SYSERROR) {
1598			ERROR(dev, "system error\n");
1599			stop_activity(dev, dev->driver);
1600			stat = 0;
1601			handled = 1;
1602			dev->driver = NULL;
1603			goto done;
1604		}
1605		if (stat & INT_PWRDETECT) {
1606			writel(~stat, &regs->int_status);
1607			if (readl(&dev->regs->power_detect) & PW_DETECT) {
1608				VDBG(dev, "connect\n");
1609				ep0_start(dev);
1610			} else {
1611				DBG(dev, "disconnect\n");
1612				if (dev->gadget.speed == USB_SPEED_FULL)
1613					stop_activity(dev, dev->driver);
1614				dev->ep0state = EP0_DISCONNECT;
1615				dev->int_enable = INT_DEVWIDE;
1616				writel(dev->int_enable, &dev->regs->int_enable);
1617			}
1618			stat = 0;
1619			handled = 1;
1620			goto done;
1621		}
1622		if (stat & INT_SUSPEND) {
1623			ACK(INT_SUSPEND);
1624			if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
1625				switch (dev->ep0state) {
1626				case EP0_DISCONNECT:
1627				case EP0_SUSPEND:
1628					goto pm_next;
1629				default:
1630					break;
1631				}
1632				DBG(dev, "USB suspend\n");
1633				dev->ep0state = EP0_SUSPEND;
1634				if (dev->gadget.speed != USB_SPEED_UNKNOWN
1635						&& dev->driver
1636						&& dev->driver->suspend) {
1637					spin_unlock(&dev->lock);
1638					dev->driver->suspend(&dev->gadget);
1639					spin_lock(&dev->lock);
1640				}
1641			} else {
1642				if (dev->ep0state != EP0_SUSPEND) {
1643					DBG(dev, "bogus USB resume %d\n",
1644						dev->ep0state);
1645					goto pm_next;
1646				}
1647				DBG(dev, "USB resume\n");
1648				dev->ep0state = EP0_IDLE;
1649				if (dev->gadget.speed != USB_SPEED_UNKNOWN
1650						&& dev->driver
1651						&& dev->driver->resume) {
1652					spin_unlock(&dev->lock);
1653					dev->driver->resume(&dev->gadget);
1654					spin_lock(&dev->lock);
1655				}
1656			}
1657		}
1658pm_next:
1659		if (stat & INT_USBRESET) {		/* hub reset done */
1660			ACK(INT_USBRESET);
1661			INFO(dev, "USB reset done, gadget %s\n",
1662				dev->driver->driver.name);
1663		}
1664		// and INT_ERR on some endpoint's crc/bitstuff/... problem
1665	}
1666
1667	/* progress ep0 setup, data, or status stages.
1668	 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
1669	 */
1670	if (stat & INT_SETUP) {
1671		ACK(INT_SETUP);
1672		dev->ep[0].irqs++;
1673		ep0_setup(dev);
1674	}
1675        if (stat & INT_STATUSNAK) {
1676		ACK(INT_STATUSNAK|INT_ENDPOINT0);
1677		if (dev->ep0state == EP0_IN) {
1678			ep = &dev->ep[0];
1679			ep->irqs++;
1680			nuke(ep, 0);
1681			writel(~(1<<0), &regs->EOP);
1682			dev->ep0state = EP0_STATUS;
1683		}
1684	}
1685        if (stat & INT_ENDPOINT0) {
1686		ACK(INT_ENDPOINT0);
1687		ep = &dev->ep[0];
1688		ep->irqs++;
1689		pio_advance(ep);
1690        }
1691
1692	/* dma completion */
1693        if (stat & INT_MSTRDEND) {	/* IN */
1694		ACK(INT_MSTRDEND);
1695		ep = &dev->ep[UDC_MSTRD_ENDPOINT];
1696		ep->irqs++;
1697		dma_advance(dev, ep);
1698        }
1699        if (stat & INT_MSTWREND) {	/* OUT */
1700		ACK(INT_MSTWREND);
1701		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1702		ep->irqs++;
1703		dma_advance(dev, ep);
1704        }
1705        if (stat & INT_MSTWRTMOUT) {	/* OUT */
1706		ACK(INT_MSTWRTMOUT);
1707		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1708		ep->irqs++;
1709		ERROR(dev, "%s write timeout ?\n", ep->ep.name);
1710		// reset dma? then dma_advance()
1711        }
1712
1713	/* pio */
1714	for (i = 1; i < 4; i++) {
1715		u32		tmp = INT_EPxDATASET(i);
1716
1717		if (!(stat & tmp))
1718			continue;
1719		ep = &dev->ep[i];
1720		pio_advance(ep);
1721		if (list_empty (&ep->queue))
1722			pio_irq_disable(dev, regs, i);
1723		stat &= ~tmp;
1724		handled = 1;
1725		ep->irqs++;
1726	}
1727
1728	if (rescans--)
1729		goto rescan;
1730
1731done:
1732	(void)readl(&regs->int_enable);
1733	spin_unlock(&dev->lock);
1734	if (stat)
1735		DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
1736				readl(&regs->int_status), dev->int_enable);
1737	return IRQ_RETVAL(handled);
1738}
1739
1740#undef ACK
1741
1742/*-------------------------------------------------------------------------*/
1743
1744static void gadget_release(struct device *_dev)
1745{
1746	struct goku_udc	*dev = dev_get_drvdata(_dev);
1747
1748	kfree(dev);
1749}
1750
1751/* tear down the binding between this driver and the pci device */
1752
1753static void goku_remove(struct pci_dev *pdev)
1754{
1755	struct goku_udc		*dev = pci_get_drvdata(pdev);
1756
1757	DBG(dev, "%s\n", __FUNCTION__);
1758
1759	BUG_ON(dev->driver);
1760
1761#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1762	remove_proc_entry(proc_node_name, NULL);
1763#endif
1764	if (dev->regs)
1765		udc_reset(dev);
1766	if (dev->got_irq)
1767		free_irq(pdev->irq, dev);
1768	if (dev->regs)
1769		iounmap(dev->regs);
1770	if (dev->got_region)
1771		release_mem_region(pci_resource_start (pdev, 0),
1772				pci_resource_len (pdev, 0));
1773	if (dev->enabled)
1774		pci_disable_device(pdev);
1775	device_unregister(&dev->gadget.dev);
1776
1777	pci_set_drvdata(pdev, NULL);
1778	dev->regs = NULL;
1779	the_controller = NULL;
1780
1781	INFO(dev, "unbind\n");
1782}
1783
1784/* wrap this driver around the specified pci device, but
1785 * don't respond over USB until a gadget driver binds to us.
1786 */
1787
1788static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1789{
1790	struct goku_udc		*dev = NULL;
1791	unsigned long		resource, len;
1792	void __iomem		*base = NULL;
1793	int			retval;
1794
1795	/* if you want to support more than one controller in a system,
1796	 * usb_gadget_driver_{register,unregister}() must change.
1797	 */
1798	if (the_controller) {
1799		WARN(dev, "ignoring %s\n", pci_name(pdev));
1800		return -EBUSY;
1801	}
1802	if (!pdev->irq) {
1803		printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1804		retval = -ENODEV;
1805		goto done;
1806	}
1807
1808	/* alloc, and start init */
1809	dev = kmalloc (sizeof *dev, GFP_KERNEL);
1810	if (dev == NULL){
1811		pr_debug("enomem %s\n", pci_name(pdev));
1812		retval = -ENOMEM;
1813		goto done;
1814	}
1815
1816	memset(dev, 0, sizeof *dev);
1817	spin_lock_init(&dev->lock);
1818	dev->pdev = pdev;
1819	dev->gadget.ops = &goku_ops;
1820
1821	/* the "gadget" abstracts/virtualizes the controller */
1822	strcpy(dev->gadget.dev.bus_id, "gadget");
1823	dev->gadget.dev.parent = &pdev->dev;
1824	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
1825	dev->gadget.dev.release = gadget_release;
1826	dev->gadget.name = driver_name;
1827
1828	/* now all the pci goodies ... */
1829	retval = pci_enable_device(pdev);
1830	if (retval < 0) {
1831		DBG(dev, "can't enable, %d\n", retval);
1832		goto done;
1833	}
1834	dev->enabled = 1;
1835
1836	resource = pci_resource_start(pdev, 0);
1837	len = pci_resource_len(pdev, 0);
1838	if (!request_mem_region(resource, len, driver_name)) {
1839		DBG(dev, "controller already in use\n");
1840		retval = -EBUSY;
1841		goto done;
1842	}
1843	dev->got_region = 1;
1844
1845	base = ioremap_nocache(resource, len);
1846	if (base == NULL) {
1847		DBG(dev, "can't map memory\n");
1848		retval = -EFAULT;
1849		goto done;
1850	}
1851	dev->regs = (struct goku_udc_regs __iomem *) base;
1852
1853	pci_set_drvdata(pdev, dev);
1854	INFO(dev, "%s\n", driver_desc);
1855	INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
1856	INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
1857
1858	/* init to known state, then setup irqs */
1859	udc_reset(dev);
1860	udc_reinit (dev);
1861	if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
1862			driver_name, dev) != 0) {
1863		DBG(dev, "request interrupt %d failed\n", pdev->irq);
1864		retval = -EBUSY;
1865		goto done;
1866	}
1867	dev->got_irq = 1;
1868	if (use_dma)
1869		pci_set_master(pdev);
1870
1871
1872#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1873	create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
1874#endif
1875
1876	/* done */
1877	the_controller = dev;
1878	device_register(&dev->gadget.dev);
1879
1880	return 0;
1881
1882done:
1883	if (dev)
1884		goku_remove (pdev);
1885	return retval;
1886}
1887
1888
1889/*-------------------------------------------------------------------------*/
1890
1891static struct pci_device_id pci_ids [] = { {
1892	.class = 	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
1893	.class_mask = 	~0,
1894	.vendor =	0x102f,		/* Toshiba */
1895	.device =	0x0107,		/* this UDC */
1896	.subvendor =	PCI_ANY_ID,
1897	.subdevice =	PCI_ANY_ID,
1898
1899}, { /* end: all zeroes */ }
1900};
1901MODULE_DEVICE_TABLE (pci, pci_ids);
1902
1903static struct pci_driver goku_pci_driver = {
1904	.name =		(char *) driver_name,
1905	.id_table =	pci_ids,
1906
1907	.probe =	goku_probe,
1908	.remove =	goku_remove,
1909
1910};
1911
1912static int __init init (void)
1913{
1914	return pci_register_driver (&goku_pci_driver);
1915}
1916module_init (init);
1917
1918static void __exit cleanup (void)
1919{
1920	pci_unregister_driver (&goku_pci_driver);
1921}
1922module_exit (cleanup);
1923