1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
4 *
5 * Copyright (C) 2000-2002 Lineo
6 *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
7 * Copyright (C) 2002 Toshiba Corporation
8 * Copyright (C) 2003 MontaVista Software (source@mvista.com)
9 */
10
11/*
12 * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
13 *
14 *  - Endpoint numbering is fixed: ep{1,2,3}-bulk
15 *  - Gadget drivers can choose ep maxpacket (8/16/32/64)
16 *  - Gadget drivers can choose direction (IN, OUT)
17 *  - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
18 */
19
20// #define	VERBOSE		/* extra debug messages (success too) */
21// #define	USB_TRACE	/* packet-level success messages */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/errno.h>
30#include <linux/timer.h>
31#include <linux/list.h>
32#include <linux/interrupt.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/device.h>
36#include <linux/usb/ch9.h>
37#include <linux/usb/gadget.h>
38#include <linux/prefetch.h>
39
40#include <asm/byteorder.h>
41#include <asm/io.h>
42#include <asm/irq.h>
43#include <asm/unaligned.h>
44
45
46#include "goku_udc.h"
47
48#define	DRIVER_DESC		"TC86C001 USB Device Controller"
49#define	DRIVER_VERSION		"30-Oct 2003"
50
51static const char driver_name [] = "goku_udc";
52static const char driver_desc [] = DRIVER_DESC;
53
54MODULE_AUTHOR("source@mvista.com");
55MODULE_DESCRIPTION(DRIVER_DESC);
56MODULE_LICENSE("GPL");
57
58
59/*
60 * IN dma behaves ok under testing, though the IN-dma abort paths don't
61 * seem to behave quite as expected.  Used by default.
62 *
63 * OUT dma documents design problems handling the common "short packet"
64 * transfer termination policy; it couldn't be enabled by default, even
65 * if the OUT-dma abort problems had a resolution.
66 */
67static unsigned use_dma = 1;
68
69#if 0
70//#include <linux/moduleparam.h>
71/* "modprobe goku_udc use_dma=1" etc
72 *	0 to disable dma
73 *	1 to use IN dma only (normal operation)
74 *	2 to use IN and OUT dma
75 */
76module_param(use_dma, uint, S_IRUGO);
77#endif
78
79/*-------------------------------------------------------------------------*/
80
81static void nuke(struct goku_ep *, int status);
82
83static inline void
84command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
85{
86	writel(COMMAND_EP(epnum) | command, &regs->Command);
87	udelay(300);
88}
89
90static int
91goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
92{
93	struct goku_udc	*dev;
94	struct goku_ep	*ep;
95	u32		mode;
96	u16		max;
97	unsigned long	flags;
98
99	ep = container_of(_ep, struct goku_ep, ep);
100	if (!_ep || !desc
101			|| desc->bDescriptorType != USB_DT_ENDPOINT)
102		return -EINVAL;
103	dev = ep->dev;
104	if (ep == &dev->ep[0])
105		return -EINVAL;
106	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
107		return -ESHUTDOWN;
108	if (ep->num != usb_endpoint_num(desc))
109		return -EINVAL;
110
111	switch (usb_endpoint_type(desc)) {
112	case USB_ENDPOINT_XFER_BULK:
113	case USB_ENDPOINT_XFER_INT:
114		break;
115	default:
116		return -EINVAL;
117	}
118
119	if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
120			!= EPxSTATUS_EP_INVALID)
121		return -EBUSY;
122
123	/* enabling the no-toggle interrupt mode would need an api hook */
124	mode = 0;
125	max = get_unaligned_le16(&desc->wMaxPacketSize);
126	switch (max) {
127	case 64:
128		mode++;
129		fallthrough;
130	case 32:
131		mode++;
132		fallthrough;
133	case 16:
134		mode++;
135		fallthrough;
136	case 8:
137		mode <<= 3;
138		break;
139	default:
140		return -EINVAL;
141	}
142	mode |= 2 << 1;		/* bulk, or intr-with-toggle */
143
144	/* ep1/ep2 dma direction is chosen early; it works in the other
145	 * direction, with pio.  be cautious with out-dma.
146	 */
147	ep->is_in = usb_endpoint_dir_in(desc);
148	if (ep->is_in) {
149		mode |= 1;
150		ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
151	} else {
152		ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
153		if (ep->dma)
154			DBG(dev, "%s out-dma hides short packets\n",
155				ep->ep.name);
156	}
157
158	spin_lock_irqsave(&ep->dev->lock, flags);
159
160	/* ep1 and ep2 can do double buffering and/or dma */
161	if (ep->num < 3) {
162		struct goku_udc_regs __iomem	*regs = ep->dev->regs;
163		u32				tmp;
164
165		/* double buffer except (for now) with pio in */
166		tmp = ((ep->dma || !ep->is_in)
167				? 0x10	/* double buffered */
168				: 0x11	/* single buffer */
169			) << ep->num;
170		tmp |= readl(&regs->EPxSingle);
171		writel(tmp, &regs->EPxSingle);
172
173		tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
174		tmp |= readl(&regs->EPxBCS);
175		writel(tmp, &regs->EPxBCS);
176	}
177	writel(mode, ep->reg_mode);
178	command(ep->dev->regs, COMMAND_RESET, ep->num);
179	ep->ep.maxpacket = max;
180	ep->stopped = 0;
181	ep->ep.desc = desc;
182	spin_unlock_irqrestore(&ep->dev->lock, flags);
183
184	DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
185		ep->is_in ? "IN" : "OUT",
186		ep->dma ? "dma" : "pio",
187		max);
188
189	return 0;
190}
191
192static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
193{
194	struct goku_udc		*dev = ep->dev;
195
196	if (regs) {
197		command(regs, COMMAND_INVALID, ep->num);
198		if (ep->num) {
199			if (ep->num == UDC_MSTWR_ENDPOINT)
200				dev->int_enable &= ~(INT_MSTWREND
201							|INT_MSTWRTMOUT);
202			else if (ep->num == UDC_MSTRD_ENDPOINT)
203				dev->int_enable &= ~INT_MSTRDEND;
204			dev->int_enable &= ~INT_EPxDATASET (ep->num);
205		} else
206			dev->int_enable &= ~INT_EP0;
207		writel(dev->int_enable, &regs->int_enable);
208		readl(&regs->int_enable);
209		if (ep->num < 3) {
210			struct goku_udc_regs __iomem	*r = ep->dev->regs;
211			u32				tmp;
212
213			tmp = readl(&r->EPxSingle);
214			tmp &= ~(0x11 << ep->num);
215			writel(tmp, &r->EPxSingle);
216
217			tmp = readl(&r->EPxBCS);
218			tmp &= ~(0x11 << ep->num);
219			writel(tmp, &r->EPxBCS);
220		}
221		/* reset dma in case we're still using it */
222		if (ep->dma) {
223			u32	master;
224
225			master = readl(&regs->dma_master) & MST_RW_BITS;
226			if (ep->num == UDC_MSTWR_ENDPOINT) {
227				master &= ~MST_W_BITS;
228				master |= MST_WR_RESET;
229			} else {
230				master &= ~MST_R_BITS;
231				master |= MST_RD_RESET;
232			}
233			writel(master, &regs->dma_master);
234		}
235	}
236
237	usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE);
238	ep->ep.desc = NULL;
239	ep->stopped = 1;
240	ep->irqs = 0;
241	ep->dma = 0;
242}
243
244static int goku_ep_disable(struct usb_ep *_ep)
245{
246	struct goku_ep	*ep;
247	struct goku_udc	*dev;
248	unsigned long	flags;
249
250	ep = container_of(_ep, struct goku_ep, ep);
251	if (!_ep || !ep->ep.desc)
252		return -ENODEV;
253	dev = ep->dev;
254	if (dev->ep0state == EP0_SUSPEND)
255		return -EBUSY;
256
257	VDBG(dev, "disable %s\n", _ep->name);
258
259	spin_lock_irqsave(&dev->lock, flags);
260	nuke(ep, -ESHUTDOWN);
261	ep_reset(dev->regs, ep);
262	spin_unlock_irqrestore(&dev->lock, flags);
263
264	return 0;
265}
266
267/*-------------------------------------------------------------------------*/
268
269static struct usb_request *
270goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
271{
272	struct goku_request	*req;
273
274	if (!_ep)
275		return NULL;
276	req = kzalloc(sizeof *req, gfp_flags);
277	if (!req)
278		return NULL;
279
280	INIT_LIST_HEAD(&req->queue);
281	return &req->req;
282}
283
284static void
285goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
286{
287	struct goku_request	*req;
288
289	if (!_ep || !_req)
290		return;
291
292	req = container_of(_req, struct goku_request, req);
293	WARN_ON(!list_empty(&req->queue));
294	kfree(req);
295}
296
297/*-------------------------------------------------------------------------*/
298
299static void
300done(struct goku_ep *ep, struct goku_request *req, int status)
301{
302	struct goku_udc		*dev;
303	unsigned		stopped = ep->stopped;
304
305	list_del_init(&req->queue);
306
307	if (likely(req->req.status == -EINPROGRESS))
308		req->req.status = status;
309	else
310		status = req->req.status;
311
312	dev = ep->dev;
313
314	if (ep->dma)
315		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
316
317#ifndef USB_TRACE
318	if (status && status != -ESHUTDOWN)
319#endif
320		VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
321			ep->ep.name, &req->req, status,
322			req->req.actual, req->req.length);
323
324	/* don't modify queue heads during completion callback */
325	ep->stopped = 1;
326	spin_unlock(&dev->lock);
327	usb_gadget_giveback_request(&ep->ep, &req->req);
328	spin_lock(&dev->lock);
329	ep->stopped = stopped;
330}
331
332/*-------------------------------------------------------------------------*/
333
334static inline int
335write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
336{
337	unsigned	length, count;
338
339	length = min(req->req.length - req->req.actual, max);
340	req->req.actual += length;
341
342	count = length;
343	while (likely(count--))
344		writel(*buf++, fifo);
345	return length;
346}
347
348// return:  0 = still running, 1 = completed, negative = errno
349static int write_fifo(struct goku_ep *ep, struct goku_request *req)
350{
351	struct goku_udc	*dev = ep->dev;
352	u32		tmp;
353	u8		*buf;
354	unsigned	count;
355	int		is_last;
356
357	tmp = readl(&dev->regs->DataSet);
358	buf = req->req.buf + req->req.actual;
359	prefetch(buf);
360
361	dev = ep->dev;
362	if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
363		return -EL2HLT;
364
365	/* NOTE:  just single-buffered PIO-IN for now.  */
366	if (unlikely((tmp & DATASET_A(ep->num)) != 0))
367		return 0;
368
369	/* clear our "packet available" irq */
370	if (ep->num != 0)
371		writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
372
373	count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
374
375	/* last packet often short (sometimes a zlp, especially on ep0) */
376	if (unlikely(count != ep->ep.maxpacket)) {
377		writel(~(1<<ep->num), &dev->regs->EOP);
378		if (ep->num == 0) {
379			dev->ep[0].stopped = 1;
380			dev->ep0state = EP0_STATUS;
381		}
382		is_last = 1;
383	} else {
384		if (likely(req->req.length != req->req.actual)
385				|| req->req.zero)
386			is_last = 0;
387		else
388			is_last = 1;
389	}
390#if 0		/* printk seemed to trash is_last...*/
391//#ifdef USB_TRACE
392	VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
393		ep->ep.name, count, is_last ? "/last" : "",
394		req->req.length - req->req.actual, req);
395#endif
396
397	/* requests complete when all IN data is in the FIFO,
398	 * or sometimes later, if a zlp was needed.
399	 */
400	if (is_last) {
401		done(ep, req, 0);
402		return 1;
403	}
404
405	return 0;
406}
407
408static int read_fifo(struct goku_ep *ep, struct goku_request *req)
409{
410	struct goku_udc_regs __iomem	*regs;
411	u32				size, set;
412	u8				*buf;
413	unsigned			bufferspace, is_short, dbuff;
414
415	regs = ep->dev->regs;
416top:
417	buf = req->req.buf + req->req.actual;
418	prefetchw(buf);
419
420	if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
421		return -EL2HLT;
422
423	dbuff = (ep->num == 1 || ep->num == 2);
424	do {
425		/* ack dataset irq matching the status we'll handle */
426		if (ep->num != 0)
427			writel(~INT_EPxDATASET(ep->num), &regs->int_status);
428
429		set = readl(&regs->DataSet) & DATASET_AB(ep->num);
430		size = readl(&regs->EPxSizeLA[ep->num]);
431		bufferspace = req->req.length - req->req.actual;
432
433		/* usually do nothing without an OUT packet */
434		if (likely(ep->num != 0 || bufferspace != 0)) {
435			if (unlikely(set == 0))
436				break;
437			/* use ep1/ep2 double-buffering for OUT */
438			if (!(size & PACKET_ACTIVE))
439				size = readl(&regs->EPxSizeLB[ep->num]);
440			if (!(size & PACKET_ACTIVE))	/* "can't happen" */
441				break;
442			size &= DATASIZE;	/* EPxSizeH == 0 */
443
444		/* ep0out no-out-data case for set_config, etc */
445		} else
446			size = 0;
447
448		/* read all bytes from this packet */
449		req->req.actual += size;
450		is_short = (size < ep->ep.maxpacket);
451#ifdef USB_TRACE
452		VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
453			ep->ep.name, size, is_short ? "/S" : "",
454			req, req->req.actual, req->req.length);
455#endif
456		while (likely(size-- != 0)) {
457			u8	byte = (u8) readl(ep->reg_fifo);
458
459			if (unlikely(bufferspace == 0)) {
460				/* this happens when the driver's buffer
461				 * is smaller than what the host sent.
462				 * discard the extra data in this packet.
463				 */
464				if (req->req.status != -EOVERFLOW)
465					DBG(ep->dev, "%s overflow %u\n",
466						ep->ep.name, size);
467				req->req.status = -EOVERFLOW;
468			} else {
469				*buf++ = byte;
470				bufferspace--;
471			}
472		}
473
474		/* completion */
475		if (unlikely(is_short || req->req.actual == req->req.length)) {
476			if (unlikely(ep->num == 0)) {
477				/* non-control endpoints now usable? */
478				if (ep->dev->req_config)
479					writel(ep->dev->configured
480							? USBSTATE_CONFIGURED
481							: 0,
482						&regs->UsbState);
483				/* ep0out status stage */
484				writel(~(1<<0), &regs->EOP);
485				ep->stopped = 1;
486				ep->dev->ep0state = EP0_STATUS;
487			}
488			done(ep, req, 0);
489
490			/* empty the second buffer asap */
491			if (dbuff && !list_empty(&ep->queue)) {
492				req = list_entry(ep->queue.next,
493						struct goku_request, queue);
494				goto top;
495			}
496			return 1;
497		}
498	} while (dbuff);
499	return 0;
500}
501
502static inline void
503pio_irq_enable(struct goku_udc *dev,
504		struct goku_udc_regs __iomem *regs, int epnum)
505{
506	dev->int_enable |= INT_EPxDATASET (epnum);
507	writel(dev->int_enable, &regs->int_enable);
508	/* write may still be posted */
509}
510
511static inline void
512pio_irq_disable(struct goku_udc *dev,
513		struct goku_udc_regs __iomem *regs, int epnum)
514{
515	dev->int_enable &= ~INT_EPxDATASET (epnum);
516	writel(dev->int_enable, &regs->int_enable);
517	/* write may still be posted */
518}
519
520static inline void
521pio_advance(struct goku_ep *ep)
522{
523	struct goku_request	*req;
524
525	if (unlikely(list_empty (&ep->queue)))
526		return;
527	req = list_entry(ep->queue.next, struct goku_request, queue);
528	(ep->is_in ? write_fifo : read_fifo)(ep, req);
529}
530
531
532/*-------------------------------------------------------------------------*/
533
534// return:  0 = q running, 1 = q stopped, negative = errno
535static int start_dma(struct goku_ep *ep, struct goku_request *req)
536{
537	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
538	u32				master;
539	u32				start = req->req.dma;
540	u32				end = start + req->req.length - 1;
541
542	master = readl(&regs->dma_master) & MST_RW_BITS;
543
544	/* re-init the bits affecting IN dma; careful with zlps */
545	if (likely(ep->is_in)) {
546		if (unlikely(master & MST_RD_ENA)) {
547			DBG (ep->dev, "start, IN active dma %03x!!\n",
548				master);
549//			return -EL2HLT;
550		}
551		writel(end, &regs->in_dma_end);
552		writel(start, &regs->in_dma_start);
553
554		master &= ~MST_R_BITS;
555		if (unlikely(req->req.length == 0))
556			master |= MST_RD_ENA | MST_RD_EOPB;
557		else if ((req->req.length % ep->ep.maxpacket) != 0
558					|| req->req.zero)
559			master |= MST_RD_ENA | MST_EOPB_ENA;
560		else
561			master |= MST_RD_ENA | MST_EOPB_DIS;
562
563		ep->dev->int_enable |= INT_MSTRDEND;
564
565	/* Goku DMA-OUT merges short packets, which plays poorly with
566	 * protocols where short packets mark the transfer boundaries.
567	 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
568	 * ending transfers after 3 SOFs; we don't turn it on.
569	 */
570	} else {
571		if (unlikely(master & MST_WR_ENA)) {
572			DBG (ep->dev, "start, OUT active dma %03x!!\n",
573				master);
574//			return -EL2HLT;
575		}
576		writel(end, &regs->out_dma_end);
577		writel(start, &regs->out_dma_start);
578
579		master &= ~MST_W_BITS;
580		master |= MST_WR_ENA | MST_TIMEOUT_DIS;
581
582		ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
583	}
584
585	writel(master, &regs->dma_master);
586	writel(ep->dev->int_enable, &regs->int_enable);
587	return 0;
588}
589
590static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
591{
592	struct goku_request		*req;
593	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
594	u32				master;
595
596	master = readl(&regs->dma_master);
597
598	if (unlikely(list_empty(&ep->queue))) {
599stop:
600		if (ep->is_in)
601			dev->int_enable &= ~INT_MSTRDEND;
602		else
603			dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
604		writel(dev->int_enable, &regs->int_enable);
605		return;
606	}
607	req = list_entry(ep->queue.next, struct goku_request, queue);
608
609	/* normal hw dma completion (not abort) */
610	if (likely(ep->is_in)) {
611		if (unlikely(master & MST_RD_ENA))
612			return;
613		req->req.actual = readl(&regs->in_dma_current);
614	} else {
615		if (unlikely(master & MST_WR_ENA))
616			return;
617
618		/* hardware merges short packets, and also hides packet
619		 * overruns.  a partial packet MAY be in the fifo here.
620		 */
621		req->req.actual = readl(&regs->out_dma_current);
622	}
623	req->req.actual -= req->req.dma;
624	req->req.actual++;
625
626#ifdef USB_TRACE
627	VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
628		ep->ep.name, ep->is_in ? "IN" : "OUT",
629		req->req.actual, req->req.length, req);
630#endif
631	done(ep, req, 0);
632	if (list_empty(&ep->queue))
633		goto stop;
634	req = list_entry(ep->queue.next, struct goku_request, queue);
635	(void) start_dma(ep, req);
636}
637
638static void abort_dma(struct goku_ep *ep, int status)
639{
640	struct goku_udc_regs __iomem	*regs = ep->dev->regs;
641	struct goku_request		*req;
642	u32				curr, master;
643
644	/* NAK future host requests, hoping the implicit delay lets the
645	 * dma engine finish reading (or writing) its latest packet and
646	 * empty the dma buffer (up to 16 bytes).
647	 *
648	 * This avoids needing to clean up a partial packet in the fifo;
649	 * we can't do that for IN without side effects to HALT and TOGGLE.
650	 */
651	command(regs, COMMAND_FIFO_DISABLE, ep->num);
652	req = list_entry(ep->queue.next, struct goku_request, queue);
653	master = readl(&regs->dma_master) & MST_RW_BITS;
654
655	/* FIXME using these resets isn't usably documented. this may
656	 * not work unless it's followed by disabling the endpoint.
657	 *
658	 * FIXME the OUT reset path doesn't even behave consistently.
659	 */
660	if (ep->is_in) {
661		if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
662			goto finished;
663		curr = readl(&regs->in_dma_current);
664
665		writel(curr, &regs->in_dma_end);
666		writel(curr, &regs->in_dma_start);
667
668		master &= ~MST_R_BITS;
669		master |= MST_RD_RESET;
670		writel(master, &regs->dma_master);
671
672		if (readl(&regs->dma_master) & MST_RD_ENA)
673			DBG(ep->dev, "IN dma active after reset!\n");
674
675	} else {
676		if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
677			goto finished;
678		curr = readl(&regs->out_dma_current);
679
680		writel(curr, &regs->out_dma_end);
681		writel(curr, &regs->out_dma_start);
682
683		master &= ~MST_W_BITS;
684		master |= MST_WR_RESET;
685		writel(master, &regs->dma_master);
686
687		if (readl(&regs->dma_master) & MST_WR_ENA)
688			DBG(ep->dev, "OUT dma active after reset!\n");
689	}
690	req->req.actual = (curr - req->req.dma) + 1;
691	req->req.status = status;
692
693	VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
694		ep->is_in ? "IN" : "OUT",
695		req->req.actual, req->req.length);
696
697	command(regs, COMMAND_FIFO_ENABLE, ep->num);
698
699	return;
700
701finished:
702	/* dma already completed; no abort needed */
703	command(regs, COMMAND_FIFO_ENABLE, ep->num);
704	req->req.actual = req->req.length;
705	req->req.status = 0;
706}
707
708/*-------------------------------------------------------------------------*/
709
710static int
711goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
712{
713	struct goku_request	*req;
714	struct goku_ep		*ep;
715	struct goku_udc		*dev;
716	unsigned long		flags;
717	int			status;
718
719	/* always require a cpu-view buffer so pio works */
720	req = container_of(_req, struct goku_request, req);
721	if (unlikely(!_req || !_req->complete
722			|| !_req->buf || !list_empty(&req->queue)))
723		return -EINVAL;
724	ep = container_of(_ep, struct goku_ep, ep);
725	if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0)))
726		return -EINVAL;
727	dev = ep->dev;
728	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
729		return -ESHUTDOWN;
730
731	/* can't touch registers when suspended */
732	if (dev->ep0state == EP0_SUSPEND)
733		return -EBUSY;
734
735	/* set up dma mapping in case the caller didn't */
736	if (ep->dma) {
737		status = usb_gadget_map_request(&dev->gadget, &req->req,
738				ep->is_in);
739		if (status)
740			return status;
741	}
742
743#ifdef USB_TRACE
744	VDBG(dev, "%s queue req %p, len %u buf %p\n",
745			_ep->name, _req, _req->length, _req->buf);
746#endif
747
748	spin_lock_irqsave(&dev->lock, flags);
749
750	_req->status = -EINPROGRESS;
751	_req->actual = 0;
752
753	/* for ep0 IN without premature status, zlp is required and
754	 * writing EOP starts the status stage (OUT).
755	 */
756	if (unlikely(ep->num == 0 && ep->is_in))
757		_req->zero = 1;
758
759	/* kickstart this i/o queue? */
760	status = 0;
761	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
762		/* dma:  done after dma completion IRQ (or error)
763		 * pio:  done after last fifo operation
764		 */
765		if (ep->dma)
766			status = start_dma(ep, req);
767		else
768			status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
769
770		if (unlikely(status != 0)) {
771			if (status > 0)
772				status = 0;
773			req = NULL;
774		}
775
776	} /* else pio or dma irq handler advances the queue. */
777
778	if (likely(req != NULL))
779		list_add_tail(&req->queue, &ep->queue);
780
781	if (likely(!list_empty(&ep->queue))
782			&& likely(ep->num != 0)
783			&& !ep->dma
784			&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
785		pio_irq_enable(dev, dev->regs, ep->num);
786
787	spin_unlock_irqrestore(&dev->lock, flags);
788
789	/* pci writes may still be posted */
790	return status;
791}
792
793/* dequeue ALL requests */
794static void nuke(struct goku_ep *ep, int status)
795{
796	struct goku_request	*req;
797
798	ep->stopped = 1;
799	if (list_empty(&ep->queue))
800		return;
801	if (ep->dma)
802		abort_dma(ep, status);
803	while (!list_empty(&ep->queue)) {
804		req = list_entry(ep->queue.next, struct goku_request, queue);
805		done(ep, req, status);
806	}
807}
808
809/* dequeue JUST ONE request */
810static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
811{
812	struct goku_request	*req = NULL, *iter;
813	struct goku_ep		*ep;
814	struct goku_udc		*dev;
815	unsigned long		flags;
816
817	ep = container_of(_ep, struct goku_ep, ep);
818	if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
819		return -EINVAL;
820	dev = ep->dev;
821	if (!dev->driver)
822		return -ESHUTDOWN;
823
824	/* we can't touch (dma) registers when suspended */
825	if (dev->ep0state == EP0_SUSPEND)
826		return -EBUSY;
827
828	VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
829		ep->is_in ? "IN" : "OUT",
830		ep->dma ? "dma" : "pio",
831		_req);
832
833	spin_lock_irqsave(&dev->lock, flags);
834
835	/* make sure it's actually queued on this endpoint */
836	list_for_each_entry(iter, &ep->queue, queue) {
837		if (&iter->req != _req)
838			continue;
839		req = iter;
840		break;
841	}
842	if (!req) {
843		spin_unlock_irqrestore (&dev->lock, flags);
844		return -EINVAL;
845	}
846
847	if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
848		abort_dma(ep, -ECONNRESET);
849		done(ep, req, -ECONNRESET);
850		dma_advance(dev, ep);
851	} else if (!list_empty(&req->queue))
852		done(ep, req, -ECONNRESET);
853	else
854		req = NULL;
855	spin_unlock_irqrestore(&dev->lock, flags);
856
857	return req ? 0 : -EOPNOTSUPP;
858}
859
860/*-------------------------------------------------------------------------*/
861
862static void goku_clear_halt(struct goku_ep *ep)
863{
864	// assert (ep->num !=0)
865	VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
866	command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
867	command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
868	if (ep->stopped) {
869		ep->stopped = 0;
870		if (ep->dma) {
871			struct goku_request	*req;
872
873			if (list_empty(&ep->queue))
874				return;
875			req = list_entry(ep->queue.next, struct goku_request,
876						queue);
877			(void) start_dma(ep, req);
878		} else
879			pio_advance(ep);
880	}
881}
882
883static int goku_set_halt(struct usb_ep *_ep, int value)
884{
885	struct goku_ep	*ep;
886	unsigned long	flags;
887	int		retval = 0;
888
889	if (!_ep)
890		return -ENODEV;
891	ep = container_of (_ep, struct goku_ep, ep);
892
893	if (ep->num == 0) {
894		if (value) {
895			ep->dev->ep0state = EP0_STALL;
896			ep->dev->ep[0].stopped = 1;
897		} else
898			return -EINVAL;
899
900	/* don't change EPxSTATUS_EP_INVALID to READY */
901	} else if (!ep->ep.desc) {
902		DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
903		return -EINVAL;
904	}
905
906	spin_lock_irqsave(&ep->dev->lock, flags);
907	if (!list_empty(&ep->queue))
908		retval = -EAGAIN;
909	else if (ep->is_in && value
910			/* data in (either) packet buffer? */
911			&& (readl(&ep->dev->regs->DataSet)
912					& DATASET_AB(ep->num)))
913		retval = -EAGAIN;
914	else if (!value)
915		goku_clear_halt(ep);
916	else {
917		ep->stopped = 1;
918		VDBG(ep->dev, "%s set halt\n", ep->ep.name);
919		command(ep->dev->regs, COMMAND_STALL, ep->num);
920		readl(ep->reg_status);
921	}
922	spin_unlock_irqrestore(&ep->dev->lock, flags);
923	return retval;
924}
925
926static int goku_fifo_status(struct usb_ep *_ep)
927{
928	struct goku_ep			*ep;
929	struct goku_udc_regs __iomem	*regs;
930	u32				size;
931
932	if (!_ep)
933		return -ENODEV;
934	ep = container_of(_ep, struct goku_ep, ep);
935
936	/* size is only reported sanely for OUT */
937	if (ep->is_in)
938		return -EOPNOTSUPP;
939
940	/* ignores 16-byte dma buffer; SizeH == 0 */
941	regs = ep->dev->regs;
942	size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
943	size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
944	VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
945	return size;
946}
947
948static void goku_fifo_flush(struct usb_ep *_ep)
949{
950	struct goku_ep			*ep;
951	struct goku_udc_regs __iomem	*regs;
952	u32				size;
953
954	if (!_ep)
955		return;
956	ep = container_of(_ep, struct goku_ep, ep);
957	VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
958
959	/* don't change EPxSTATUS_EP_INVALID to READY */
960	if (!ep->ep.desc && ep->num != 0) {
961		DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
962		return;
963	}
964
965	regs = ep->dev->regs;
966	size = readl(&regs->EPxSizeLA[ep->num]);
967	size &= DATASIZE;
968
969	/* Non-desirable behavior:  FIFO_CLEAR also clears the
970	 * endpoint halt feature.  For OUT, we _could_ just read
971	 * the bytes out (PIO, if !ep->dma); for in, no choice.
972	 */
973	if (size)
974		command(regs, COMMAND_FIFO_CLEAR, ep->num);
975}
976
977static const struct usb_ep_ops goku_ep_ops = {
978	.enable		= goku_ep_enable,
979	.disable	= goku_ep_disable,
980
981	.alloc_request	= goku_alloc_request,
982	.free_request	= goku_free_request,
983
984	.queue		= goku_queue,
985	.dequeue	= goku_dequeue,
986
987	.set_halt	= goku_set_halt,
988	.fifo_status	= goku_fifo_status,
989	.fifo_flush	= goku_fifo_flush,
990};
991
992/*-------------------------------------------------------------------------*/
993
994static int goku_get_frame(struct usb_gadget *_gadget)
995{
996	return -EOPNOTSUPP;
997}
998
999static struct usb_ep *goku_match_ep(struct usb_gadget *g,
1000		struct usb_endpoint_descriptor *desc,
1001		struct usb_ss_ep_comp_descriptor *ep_comp)
1002{
1003	struct goku_udc	*dev = to_goku_udc(g);
1004	struct usb_ep *ep;
1005
1006	switch (usb_endpoint_type(desc)) {
1007	case USB_ENDPOINT_XFER_INT:
1008		/* single buffering is enough */
1009		ep = &dev->ep[3].ep;
1010		if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1011			return ep;
1012		break;
1013	case USB_ENDPOINT_XFER_BULK:
1014		if (usb_endpoint_dir_in(desc)) {
1015			/* DMA may be available */
1016			ep = &dev->ep[2].ep;
1017			if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1018				return ep;
1019		}
1020		break;
1021	default:
1022		/* nothing */ ;
1023	}
1024
1025	return NULL;
1026}
1027
1028static int goku_udc_start(struct usb_gadget *g,
1029		struct usb_gadget_driver *driver);
1030static int goku_udc_stop(struct usb_gadget *g);
1031
1032static const struct usb_gadget_ops goku_ops = {
1033	.get_frame	= goku_get_frame,
1034	.udc_start	= goku_udc_start,
1035	.udc_stop	= goku_udc_stop,
1036	.match_ep	= goku_match_ep,
1037	// no remote wakeup
1038	// not selfpowered
1039};
1040
1041/*-------------------------------------------------------------------------*/
1042
1043static inline const char *dmastr(void)
1044{
1045	if (use_dma == 0)
1046		return "(dma disabled)";
1047	else if (use_dma == 2)
1048		return "(dma IN and OUT)";
1049	else
1050		return "(dma IN)";
1051}
1052
1053#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1054
1055static const char proc_node_name [] = "driver/udc";
1056
1057#define FOURBITS "%s%s%s%s"
1058#define EIGHTBITS FOURBITS FOURBITS
1059
1060static void dump_intmask(struct seq_file *m, const char *label, u32 mask)
1061{
1062	/* int_status is the same format ... */
1063	seq_printf(m, "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
1064		   label, mask,
1065		   (mask & INT_PWRDETECT) ? " power" : "",
1066		   (mask & INT_SYSERROR) ? " sys" : "",
1067		   (mask & INT_MSTRDEND) ? " in-dma" : "",
1068		   (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
1069
1070		   (mask & INT_MSTWREND) ? " out-dma" : "",
1071		   (mask & INT_MSTWRSET) ? " wrset" : "",
1072		   (mask & INT_ERR) ? " err" : "",
1073		   (mask & INT_SOF) ? " sof" : "",
1074
1075		   (mask & INT_EP3NAK) ? " ep3nak" : "",
1076		   (mask & INT_EP2NAK) ? " ep2nak" : "",
1077		   (mask & INT_EP1NAK) ? " ep1nak" : "",
1078		   (mask & INT_EP3DATASET) ? " ep3" : "",
1079
1080		   (mask & INT_EP2DATASET) ? " ep2" : "",
1081		   (mask & INT_EP1DATASET) ? " ep1" : "",
1082		   (mask & INT_STATUSNAK) ? " ep0snak" : "",
1083		   (mask & INT_STATUS) ? " ep0status" : "",
1084
1085		   (mask & INT_SETUP) ? " setup" : "",
1086		   (mask & INT_ENDPOINT0) ? " ep0" : "",
1087		   (mask & INT_USBRESET) ? " reset" : "",
1088		   (mask & INT_SUSPEND) ? " suspend" : "");
1089}
1090
1091static const char *udc_ep_state(enum ep0state state)
1092{
1093	switch (state) {
1094	case EP0_DISCONNECT:
1095		return "ep0_disconnect";
1096	case EP0_IDLE:
1097		return "ep0_idle";
1098	case EP0_IN:
1099		return "ep0_in";
1100	case EP0_OUT:
1101		return "ep0_out";
1102	case EP0_STATUS:
1103		return "ep0_status";
1104	case EP0_STALL:
1105		return "ep0_stall";
1106	case EP0_SUSPEND:
1107		return "ep0_suspend";
1108	}
1109
1110	return "ep0_?";
1111}
1112
1113static const char *udc_ep_status(u32 status)
1114{
1115	switch (status & EPxSTATUS_EP_MASK) {
1116	case EPxSTATUS_EP_READY:
1117		return "ready";
1118	case EPxSTATUS_EP_DATAIN:
1119		return "packet";
1120	case EPxSTATUS_EP_FULL:
1121		return "full";
1122	case EPxSTATUS_EP_TX_ERR:	/* host will retry */
1123		return "tx_err";
1124	case EPxSTATUS_EP_RX_ERR:
1125		return "rx_err";
1126	case EPxSTATUS_EP_BUSY:		/* ep0 only */
1127		return "busy";
1128	case EPxSTATUS_EP_STALL:
1129		return "stall";
1130	case EPxSTATUS_EP_INVALID:	/* these "can't happen" */
1131		return "invalid";
1132	}
1133
1134	return "?";
1135}
1136
1137static int udc_proc_read(struct seq_file *m, void *v)
1138{
1139	struct goku_udc			*dev = m->private;
1140	struct goku_udc_regs __iomem	*regs = dev->regs;
1141	unsigned long			flags;
1142	int				i, is_usb_connected;
1143	u32				tmp;
1144
1145	local_irq_save(flags);
1146
1147	/* basic device status */
1148	tmp = readl(&regs->power_detect);
1149	is_usb_connected = tmp & PW_DETECT;
1150	seq_printf(m,
1151		   "%s - %s\n"
1152		   "%s version: %s %s\n"
1153		   "Gadget driver: %s\n"
1154		   "Host %s, %s\n"
1155		   "\n",
1156		   pci_name(dev->pdev), driver_desc,
1157		   driver_name, DRIVER_VERSION, dmastr(),
1158		   dev->driver ? dev->driver->driver.name : "(none)",
1159		   is_usb_connected
1160			   ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
1161			   : "disconnected",
1162		   udc_ep_state(dev->ep0state));
1163
1164	dump_intmask(m, "int_status", readl(&regs->int_status));
1165	dump_intmask(m, "int_enable", readl(&regs->int_enable));
1166
1167	if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
1168		goto done;
1169
1170	/* registers for (active) device and ep0 */
1171	seq_printf(m, "\nirqs %lu\ndataset %02x single.bcs %02x.%02x state %x addr %u\n",
1172		   dev->irqs, readl(&regs->DataSet),
1173		   readl(&regs->EPxSingle), readl(&regs->EPxBCS),
1174		   readl(&regs->UsbState),
1175		   readl(&regs->address));
1176	if (seq_has_overflowed(m))
1177		goto done;
1178
1179	tmp = readl(&regs->dma_master);
1180	seq_printf(m, "dma %03X =" EIGHTBITS "%s %s\n",
1181		   tmp,
1182		   (tmp & MST_EOPB_DIS) ? " eopb-" : "",
1183		   (tmp & MST_EOPB_ENA) ? " eopb+" : "",
1184		   (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
1185		   (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
1186
1187		   (tmp & MST_RD_EOPB) ? " eopb" : "",
1188		   (tmp & MST_RD_RESET) ? " in_reset" : "",
1189		   (tmp & MST_WR_RESET) ? " out_reset" : "",
1190		   (tmp & MST_RD_ENA) ? " IN" : "",
1191
1192		   (tmp & MST_WR_ENA) ? " OUT" : "",
1193		   (tmp & MST_CONNECTION) ? "ep1in/ep2out" : "ep1out/ep2in");
1194	if (seq_has_overflowed(m))
1195		goto done;
1196
1197	/* dump endpoint queues */
1198	for (i = 0; i < 4; i++) {
1199		struct goku_ep		*ep = &dev->ep [i];
1200		struct goku_request	*req;
1201
1202		if (i && !ep->ep.desc)
1203			continue;
1204
1205		tmp = readl(ep->reg_status);
1206		seq_printf(m, "%s %s max %u %s, irqs %lu, status %02x (%s) " FOURBITS "\n",
1207			   ep->ep.name,
1208			   ep->is_in ? "in" : "out",
1209			   ep->ep.maxpacket,
1210			   ep->dma ? "dma" : "pio",
1211			   ep->irqs,
1212			   tmp, udc_ep_status(tmp),
1213			   (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
1214			   (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
1215			   (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
1216			   (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : "");
1217		if (seq_has_overflowed(m))
1218			goto done;
1219
1220		if (list_empty(&ep->queue)) {
1221			seq_puts(m, "\t(nothing queued)\n");
1222			if (seq_has_overflowed(m))
1223				goto done;
1224			continue;
1225		}
1226		list_for_each_entry(req, &ep->queue, queue) {
1227			if (ep->dma && req->queue.prev == &ep->queue) {
1228				if (i == UDC_MSTRD_ENDPOINT)
1229					tmp = readl(&regs->in_dma_current);
1230				else
1231					tmp = readl(&regs->out_dma_current);
1232				tmp -= req->req.dma;
1233				tmp++;
1234			} else
1235				tmp = req->req.actual;
1236
1237			seq_printf(m, "\treq %p len %u/%u buf %p\n",
1238				   &req->req, tmp, req->req.length,
1239				   req->req.buf);
1240			if (seq_has_overflowed(m))
1241				goto done;
1242		}
1243	}
1244
1245done:
1246	local_irq_restore(flags);
1247	return 0;
1248}
1249#endif	/* CONFIG_USB_GADGET_DEBUG_FILES */
1250
1251/*-------------------------------------------------------------------------*/
1252
1253static void udc_reinit (struct goku_udc *dev)
1254{
1255	static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
1256
1257	unsigned i;
1258
1259	INIT_LIST_HEAD (&dev->gadget.ep_list);
1260	dev->gadget.ep0 = &dev->ep [0].ep;
1261	dev->gadget.speed = USB_SPEED_UNKNOWN;
1262	dev->ep0state = EP0_DISCONNECT;
1263	dev->irqs = 0;
1264
1265	for (i = 0; i < 4; i++) {
1266		struct goku_ep	*ep = &dev->ep[i];
1267
1268		ep->num = i;
1269		ep->ep.name = names[i];
1270		ep->reg_fifo = &dev->regs->ep_fifo [i];
1271		ep->reg_status = &dev->regs->ep_status [i];
1272		ep->reg_mode = &dev->regs->ep_mode[i];
1273
1274		ep->ep.ops = &goku_ep_ops;
1275		list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1276		ep->dev = dev;
1277		INIT_LIST_HEAD (&ep->queue);
1278
1279		ep_reset(NULL, ep);
1280
1281		if (i == 0)
1282			ep->ep.caps.type_control = true;
1283		else
1284			ep->ep.caps.type_bulk = true;
1285
1286		ep->ep.caps.dir_in = true;
1287		ep->ep.caps.dir_out = true;
1288	}
1289
1290	dev->ep[0].reg_mode = NULL;
1291	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE);
1292	list_del_init (&dev->ep[0].ep.ep_list);
1293}
1294
1295static void udc_reset(struct goku_udc *dev)
1296{
1297	struct goku_udc_regs __iomem	*regs = dev->regs;
1298
1299	writel(0, &regs->power_detect);
1300	writel(0, &regs->int_enable);
1301	readl(&regs->int_enable);
1302	dev->int_enable = 0;
1303
1304	/* deassert reset, leave USB D+ at hi-Z (no pullup)
1305	 * don't let INT_PWRDETECT sequence begin
1306	 */
1307	udelay(250);
1308	writel(PW_RESETB, &regs->power_detect);
1309	readl(&regs->int_enable);
1310}
1311
1312static void ep0_start(struct goku_udc *dev)
1313{
1314	struct goku_udc_regs __iomem	*regs = dev->regs;
1315	unsigned			i;
1316
1317	VDBG(dev, "%s\n", __func__);
1318
1319	udc_reset(dev);
1320	udc_reinit (dev);
1321	//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
1322
1323	/* hw handles set_address, set_feature, get_status; maybe more */
1324	writel(   G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
1325		| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
1326		| G_REQMODE_GET_DESC
1327		| G_REQMODE_CLEAR_FEAT
1328		, &regs->reqmode);
1329
1330	for (i = 0; i < 4; i++)
1331		dev->ep[i].irqs = 0;
1332
1333	/* can't modify descriptors after writing UsbReady */
1334	for (i = 0; i < DESC_LEN; i++)
1335		writel(0, &regs->descriptors[i]);
1336	writel(0, &regs->UsbReady);
1337
1338	/* expect ep0 requests when the host drops reset */
1339	writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
1340	dev->int_enable = INT_DEVWIDE | INT_EP0;
1341	writel(dev->int_enable, &dev->regs->int_enable);
1342	readl(&regs->int_enable);
1343	dev->gadget.speed = USB_SPEED_FULL;
1344	dev->ep0state = EP0_IDLE;
1345}
1346
1347static void udc_enable(struct goku_udc *dev)
1348{
1349	/* start enumeration now, or after power detect irq */
1350	if (readl(&dev->regs->power_detect) & PW_DETECT)
1351		ep0_start(dev);
1352	else {
1353		DBG(dev, "%s\n", __func__);
1354		dev->int_enable = INT_PWRDETECT;
1355		writel(dev->int_enable, &dev->regs->int_enable);
1356	}
1357}
1358
1359/*-------------------------------------------------------------------------*/
1360
1361/* keeping it simple:
1362 * - one bus driver, initted first;
1363 * - one function driver, initted second
1364 */
1365
1366/* when a driver is successfully registered, it will receive
1367 * control requests including set_configuration(), which enables
1368 * non-control requests.  then usb traffic follows until a
1369 * disconnect is reported.  then a host may connect again, or
1370 * the driver might get unbound.
1371 */
1372static int goku_udc_start(struct usb_gadget *g,
1373		struct usb_gadget_driver *driver)
1374{
1375	struct goku_udc	*dev = to_goku_udc(g);
1376
1377	/* hook up the driver */
1378	dev->driver = driver;
1379
1380	/*
1381	 * then enable host detection and ep0; and we're ready
1382	 * for set_configuration as well as eventual disconnect.
1383	 */
1384	udc_enable(dev);
1385
1386	return 0;
1387}
1388
1389static void stop_activity(struct goku_udc *dev)
1390{
1391	unsigned	i;
1392
1393	DBG (dev, "%s\n", __func__);
1394
1395	/* disconnect gadget driver after quiesceing hw and the driver */
1396	udc_reset (dev);
1397	for (i = 0; i < 4; i++)
1398		nuke(&dev->ep [i], -ESHUTDOWN);
1399
1400	if (dev->driver)
1401		udc_enable(dev);
1402}
1403
1404static int goku_udc_stop(struct usb_gadget *g)
1405{
1406	struct goku_udc	*dev = to_goku_udc(g);
1407	unsigned long	flags;
1408
1409	spin_lock_irqsave(&dev->lock, flags);
1410	dev->driver = NULL;
1411	stop_activity(dev);
1412	spin_unlock_irqrestore(&dev->lock, flags);
1413
1414	return 0;
1415}
1416
1417/*-------------------------------------------------------------------------*/
1418
1419static void ep0_setup(struct goku_udc *dev)
1420{
1421	struct goku_udc_regs __iomem	*regs = dev->regs;
1422	struct usb_ctrlrequest		ctrl;
1423	int				tmp;
1424
1425	/* read SETUP packet and enter DATA stage */
1426	ctrl.bRequestType = readl(&regs->bRequestType);
1427	ctrl.bRequest = readl(&regs->bRequest);
1428	ctrl.wValue  = cpu_to_le16((readl(&regs->wValueH)  << 8)
1429					| readl(&regs->wValueL));
1430	ctrl.wIndex  = cpu_to_le16((readl(&regs->wIndexH)  << 8)
1431					| readl(&regs->wIndexL));
1432	ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
1433					| readl(&regs->wLengthL));
1434	writel(0, &regs->SetupRecv);
1435
1436	nuke(&dev->ep[0], 0);
1437	dev->ep[0].stopped = 0;
1438	if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1439		dev->ep[0].is_in = 1;
1440		dev->ep0state = EP0_IN;
1441		/* detect early status stages */
1442		writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
1443	} else {
1444		dev->ep[0].is_in = 0;
1445		dev->ep0state = EP0_OUT;
1446
1447		/* NOTE:  CLEAR_FEATURE is done in software so that we can
1448		 * synchronize transfer restarts after bulk IN stalls.  data
1449		 * won't even enter the fifo until the halt is cleared.
1450		 */
1451		switch (ctrl.bRequest) {
1452		case USB_REQ_CLEAR_FEATURE:
1453			switch (ctrl.bRequestType) {
1454			case USB_RECIP_ENDPOINT:
1455				tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
1456				/* active endpoint */
1457				if (tmp > 3 ||
1458				    (!dev->ep[tmp].ep.desc && tmp != 0))
1459					goto stall;
1460				if (ctrl.wIndex & cpu_to_le16(
1461						USB_DIR_IN)) {
1462					if (!dev->ep[tmp].is_in)
1463						goto stall;
1464				} else {
1465					if (dev->ep[tmp].is_in)
1466						goto stall;
1467				}
1468				if (ctrl.wValue != cpu_to_le16(
1469						USB_ENDPOINT_HALT))
1470					goto stall;
1471				if (tmp)
1472					goku_clear_halt(&dev->ep[tmp]);
1473succeed:
1474				/* start ep0out status stage */
1475				writel(~(1<<0), &regs->EOP);
1476				dev->ep[0].stopped = 1;
1477				dev->ep0state = EP0_STATUS;
1478				return;
1479			case USB_RECIP_DEVICE:
1480				/* device remote wakeup: always clear */
1481				if (ctrl.wValue != cpu_to_le16(1))
1482					goto stall;
1483				VDBG(dev, "clear dev remote wakeup\n");
1484				goto succeed;
1485			case USB_RECIP_INTERFACE:
1486				goto stall;
1487			default:		/* pass to gadget driver */
1488				break;
1489			}
1490			break;
1491		default:
1492			break;
1493		}
1494	}
1495
1496#ifdef USB_TRACE
1497	VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1498		ctrl.bRequestType, ctrl.bRequest,
1499		le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
1500		le16_to_cpu(ctrl.wLength));
1501#endif
1502
1503	/* hw wants to know when we're configured (or not) */
1504	dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
1505				&& ctrl.bRequestType == USB_RECIP_DEVICE);
1506	if (unlikely(dev->req_config))
1507		dev->configured = (ctrl.wValue != cpu_to_le16(0));
1508
1509	/* delegate everything to the gadget driver.
1510	 * it may respond after this irq handler returns.
1511	 */
1512	spin_unlock (&dev->lock);
1513	tmp = dev->driver->setup(&dev->gadget, &ctrl);
1514	spin_lock (&dev->lock);
1515	if (unlikely(tmp < 0)) {
1516stall:
1517#ifdef USB_TRACE
1518		VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
1519				ctrl.bRequestType, ctrl.bRequest, tmp);
1520#endif
1521		command(regs, COMMAND_STALL, 0);
1522		dev->ep[0].stopped = 1;
1523		dev->ep0state = EP0_STALL;
1524	}
1525
1526	/* expect at least one data or status stage irq */
1527}
1528
1529#define ACK(irqbit) { \
1530		stat &= ~irqbit; \
1531		writel(~irqbit, &regs->int_status); \
1532		handled = 1; \
1533		}
1534
1535static irqreturn_t goku_irq(int irq, void *_dev)
1536{
1537	struct goku_udc			*dev = _dev;
1538	struct goku_udc_regs __iomem	*regs = dev->regs;
1539	struct goku_ep			*ep;
1540	u32				stat, handled = 0;
1541	unsigned			i, rescans = 5;
1542
1543	spin_lock(&dev->lock);
1544
1545rescan:
1546	stat = readl(&regs->int_status) & dev->int_enable;
1547        if (!stat)
1548		goto done;
1549	dev->irqs++;
1550
1551	/* device-wide irqs */
1552	if (unlikely(stat & INT_DEVWIDE)) {
1553		if (stat & INT_SYSERROR) {
1554			ERROR(dev, "system error\n");
1555			stop_activity(dev);
1556			stat = 0;
1557			handled = 1;
1558			// FIXME have a neater way to prevent re-enumeration
1559			dev->driver = NULL;
1560			goto done;
1561		}
1562		if (stat & INT_PWRDETECT) {
1563			writel(~stat, &regs->int_status);
1564			if (readl(&dev->regs->power_detect) & PW_DETECT) {
1565				VDBG(dev, "connect\n");
1566				ep0_start(dev);
1567			} else {
1568				DBG(dev, "disconnect\n");
1569				if (dev->gadget.speed == USB_SPEED_FULL)
1570					stop_activity(dev);
1571				dev->ep0state = EP0_DISCONNECT;
1572				dev->int_enable = INT_DEVWIDE;
1573				writel(dev->int_enable, &dev->regs->int_enable);
1574			}
1575			stat = 0;
1576			handled = 1;
1577			goto done;
1578		}
1579		if (stat & INT_SUSPEND) {
1580			ACK(INT_SUSPEND);
1581			if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
1582				switch (dev->ep0state) {
1583				case EP0_DISCONNECT:
1584				case EP0_SUSPEND:
1585					goto pm_next;
1586				default:
1587					break;
1588				}
1589				DBG(dev, "USB suspend\n");
1590				dev->ep0state = EP0_SUSPEND;
1591				if (dev->gadget.speed != USB_SPEED_UNKNOWN
1592						&& dev->driver
1593						&& dev->driver->suspend) {
1594					spin_unlock(&dev->lock);
1595					dev->driver->suspend(&dev->gadget);
1596					spin_lock(&dev->lock);
1597				}
1598			} else {
1599				if (dev->ep0state != EP0_SUSPEND) {
1600					DBG(dev, "bogus USB resume %d\n",
1601						dev->ep0state);
1602					goto pm_next;
1603				}
1604				DBG(dev, "USB resume\n");
1605				dev->ep0state = EP0_IDLE;
1606				if (dev->gadget.speed != USB_SPEED_UNKNOWN
1607						&& dev->driver
1608						&& dev->driver->resume) {
1609					spin_unlock(&dev->lock);
1610					dev->driver->resume(&dev->gadget);
1611					spin_lock(&dev->lock);
1612				}
1613			}
1614		}
1615pm_next:
1616		if (stat & INT_USBRESET) {		/* hub reset done */
1617			ACK(INT_USBRESET);
1618			INFO(dev, "USB reset done, gadget %s\n",
1619				dev->driver->driver.name);
1620		}
1621		// and INT_ERR on some endpoint's crc/bitstuff/... problem
1622	}
1623
1624	/* progress ep0 setup, data, or status stages.
1625	 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
1626	 */
1627	if (stat & INT_SETUP) {
1628		ACK(INT_SETUP);
1629		dev->ep[0].irqs++;
1630		ep0_setup(dev);
1631	}
1632        if (stat & INT_STATUSNAK) {
1633		ACK(INT_STATUSNAK|INT_ENDPOINT0);
1634		if (dev->ep0state == EP0_IN) {
1635			ep = &dev->ep[0];
1636			ep->irqs++;
1637			nuke(ep, 0);
1638			writel(~(1<<0), &regs->EOP);
1639			dev->ep0state = EP0_STATUS;
1640		}
1641	}
1642        if (stat & INT_ENDPOINT0) {
1643		ACK(INT_ENDPOINT0);
1644		ep = &dev->ep[0];
1645		ep->irqs++;
1646		pio_advance(ep);
1647        }
1648
1649	/* dma completion */
1650        if (stat & INT_MSTRDEND) {	/* IN */
1651		ACK(INT_MSTRDEND);
1652		ep = &dev->ep[UDC_MSTRD_ENDPOINT];
1653		ep->irqs++;
1654		dma_advance(dev, ep);
1655        }
1656        if (stat & INT_MSTWREND) {	/* OUT */
1657		ACK(INT_MSTWREND);
1658		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1659		ep->irqs++;
1660		dma_advance(dev, ep);
1661        }
1662        if (stat & INT_MSTWRTMOUT) {	/* OUT */
1663		ACK(INT_MSTWRTMOUT);
1664		ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1665		ep->irqs++;
1666		ERROR(dev, "%s write timeout ?\n", ep->ep.name);
1667		// reset dma? then dma_advance()
1668        }
1669
1670	/* pio */
1671	for (i = 1; i < 4; i++) {
1672		u32		tmp = INT_EPxDATASET(i);
1673
1674		if (!(stat & tmp))
1675			continue;
1676		ep = &dev->ep[i];
1677		pio_advance(ep);
1678		if (list_empty (&ep->queue))
1679			pio_irq_disable(dev, regs, i);
1680		stat &= ~tmp;
1681		handled = 1;
1682		ep->irqs++;
1683	}
1684
1685	if (rescans--)
1686		goto rescan;
1687
1688done:
1689	(void)readl(&regs->int_enable);
1690	spin_unlock(&dev->lock);
1691	if (stat)
1692		DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
1693				readl(&regs->int_status), dev->int_enable);
1694	return IRQ_RETVAL(handled);
1695}
1696
1697#undef ACK
1698
1699/*-------------------------------------------------------------------------*/
1700
1701static void gadget_release(struct device *_dev)
1702{
1703	struct goku_udc	*dev = dev_get_drvdata(_dev);
1704
1705	kfree(dev);
1706}
1707
1708/* tear down the binding between this driver and the pci device */
1709
1710static void goku_remove(struct pci_dev *pdev)
1711{
1712	struct goku_udc		*dev = pci_get_drvdata(pdev);
1713
1714	DBG(dev, "%s\n", __func__);
1715
1716	usb_del_gadget_udc(&dev->gadget);
1717
1718	BUG_ON(dev->driver);
1719
1720#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1721	remove_proc_entry(proc_node_name, NULL);
1722#endif
1723	if (dev->regs)
1724		udc_reset(dev);
1725	if (dev->got_irq)
1726		free_irq(pdev->irq, dev);
1727	if (dev->regs)
1728		iounmap(dev->regs);
1729	if (dev->got_region)
1730		release_mem_region(pci_resource_start (pdev, 0),
1731				pci_resource_len (pdev, 0));
1732	if (dev->enabled)
1733		pci_disable_device(pdev);
1734
1735	dev->regs = NULL;
1736
1737	INFO(dev, "unbind\n");
1738}
1739
1740/* wrap this driver around the specified pci device, but
1741 * don't respond over USB until a gadget driver binds to us.
1742 */
1743
1744static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1745{
1746	struct goku_udc		*dev = NULL;
1747	unsigned long		resource, len;
1748	void __iomem		*base = NULL;
1749	int			retval;
1750
1751	if (!pdev->irq) {
1752		printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1753		retval = -ENODEV;
1754		goto err;
1755	}
1756
1757	/* alloc, and start init */
1758	dev = kzalloc (sizeof *dev, GFP_KERNEL);
1759	if (!dev) {
1760		retval = -ENOMEM;
1761		goto err;
1762	}
1763
1764	pci_set_drvdata(pdev, dev);
1765	spin_lock_init(&dev->lock);
1766	dev->pdev = pdev;
1767	dev->gadget.ops = &goku_ops;
1768	dev->gadget.max_speed = USB_SPEED_FULL;
1769
1770	/* the "gadget" abstracts/virtualizes the controller */
1771	dev->gadget.name = driver_name;
1772
1773	/* now all the pci goodies ... */
1774	retval = pci_enable_device(pdev);
1775	if (retval < 0) {
1776		DBG(dev, "can't enable, %d\n", retval);
1777		goto err;
1778	}
1779	dev->enabled = 1;
1780
1781	resource = pci_resource_start(pdev, 0);
1782	len = pci_resource_len(pdev, 0);
1783	if (!request_mem_region(resource, len, driver_name)) {
1784		DBG(dev, "controller already in use\n");
1785		retval = -EBUSY;
1786		goto err;
1787	}
1788	dev->got_region = 1;
1789
1790	base = ioremap(resource, len);
1791	if (base == NULL) {
1792		DBG(dev, "can't map memory\n");
1793		retval = -EFAULT;
1794		goto err;
1795	}
1796	dev->regs = (struct goku_udc_regs __iomem *) base;
1797
1798	INFO(dev, "%s\n", driver_desc);
1799	INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
1800	INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
1801
1802	/* init to known state, then setup irqs */
1803	udc_reset(dev);
1804	udc_reinit (dev);
1805	if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
1806			driver_name, dev) != 0) {
1807		DBG(dev, "request interrupt %d failed\n", pdev->irq);
1808		retval = -EBUSY;
1809		goto err;
1810	}
1811	dev->got_irq = 1;
1812	if (use_dma)
1813		pci_set_master(pdev);
1814
1815
1816#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1817	proc_create_single_data(proc_node_name, 0, NULL, udc_proc_read, dev);
1818#endif
1819
1820	retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
1821			gadget_release);
1822	if (retval)
1823		goto err;
1824
1825	return 0;
1826
1827err:
1828	if (dev)
1829		goku_remove (pdev);
1830	/* gadget_release is not registered yet, kfree explicitly */
1831	kfree(dev);
1832	return retval;
1833}
1834
1835
1836/*-------------------------------------------------------------------------*/
1837
1838static const struct pci_device_id pci_ids[] = { {
1839	.class =	PCI_CLASS_SERIAL_USB_DEVICE,
1840	.class_mask =	~0,
1841	.vendor =	0x102f,		/* Toshiba */
1842	.device =	0x0107,		/* this UDC */
1843	.subvendor =	PCI_ANY_ID,
1844	.subdevice =	PCI_ANY_ID,
1845
1846}, { /* end: all zeroes */ }
1847};
1848MODULE_DEVICE_TABLE (pci, pci_ids);
1849
1850static struct pci_driver goku_pci_driver = {
1851	.name =		driver_name,
1852	.id_table =	pci_ids,
1853
1854	.probe =	goku_probe,
1855	.remove =	goku_remove,
1856
1857	/* FIXME add power management support */
1858};
1859
1860module_pci_driver(goku_pci_driver);
1861