1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
5 * PLX Technology Inc. (formerly NetChip Technology) supported the
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
12 * the File Storage, Serial, and Ethernet/RNDIS gadget drivers
13 * as well as Gadget Zero and Gadgetfs.
14 *
15 * DMA is enabled by default.  Drivers using transfer queues might use
16 * DMA chaining to remove IRQ latencies between transfers.  (Except when
17 * short OUT transfers happen.)  Drivers can use the req->no_interrupt
18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
19 * and DMA chaining is enabled.
20 *
21 * Note that almost all the errata workarounds here are only needed for
22 * rev1 chips.  Rev1a silicon (0110) fixes almost all of them.
23 */
24
25/*
26 * Copyright (C) 2003 David Brownell
27 * Copyright (C) 2003-2005 PLX Technology, Inc.
28 *
29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30 *	with 2282 chip
31 *
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License as published by
34 * the Free Software Foundation; either version 2 of the License, or
35 * (at your option) any later version.
36 *
37 * This program is distributed in the hope that it will be useful,
38 * but WITHOUT ANY WARRANTY; without even the implied warranty of
39 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
40 * GNU General Public License for more details.
41 *
42 * You should have received a copy of the GNU General Public License
43 * along with this program; if not, write to the Free Software
44 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46
47#undef	DEBUG		/* messages on error and most fault paths */
48#undef	VERBOSE		/* extra debug messages (success too) */
49
50#include <linux/module.h>
51#include <linux/pci.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/delay.h>
55#include <linux/ioport.h>
56#include <linux/slab.h>
57#include <linux/errno.h>
58#include <linux/init.h>
59#include <linux/timer.h>
60#include <linux/list.h>
61#include <linux/interrupt.h>
62#include <linux/moduleparam.h>
63#include <linux/device.h>
64#include <linux/usb/ch9.h>
65#include <linux/usb_gadget.h>
66
67#include <asm/byteorder.h>
68#include <asm/io.h>
69#include <asm/irq.h>
70#include <asm/system.h>
71#include <asm/unaligned.h>
72
73
74#define	DRIVER_DESC		"PLX NET228x USB Peripheral Controller"
75#define	DRIVER_VERSION		"2005 Sept 27"
76
77#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
78#define	EP_DONTUSE		13	/* nonzero */
79
80#define USE_RDK_LEDS		/* GPIO pins control three LEDs */
81
82
83static const char driver_name [] = "net2280";
84static const char driver_desc [] = DRIVER_DESC;
85
86static const char ep0name [] = "ep0";
87static const char *const ep_name [] = {
88	ep0name,
89	"ep-a", "ep-b", "ep-c", "ep-d",
90	"ep-e", "ep-f",
91};
92
93/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
94 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
95 *
96 * The net2280 DMA engines are not tightly integrated with their FIFOs;
97 * not all cases are (yet) handled well in this driver or the silicon.
98 * Some gadget drivers work better with the dma support here than others.
99 * These two parameters let you use PIO or more aggressive DMA.
100 */
101static int use_dma = 1;
102static int use_dma_chaining = 0;
103
104/* "modprobe net2280 use_dma=n" etc */
105module_param (use_dma, bool, S_IRUGO);
106module_param (use_dma_chaining, bool, S_IRUGO);
107
108
109/* mode 0 == ep-{a,b,c,d} 1K fifo each
110 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
111 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
112 */
113static ushort fifo_mode = 0;
114
115/* "modprobe net2280 fifo_mode=1" etc */
116module_param (fifo_mode, ushort, 0644);
117
118/* enable_suspend -- When enabled, the driver will respond to
119 * USB suspend requests by powering down the NET2280.  Otherwise,
120 * USB suspend requests will be ignored.  This is acceptible for
121 * self-powered devices
122 */
123static int enable_suspend = 0;
124
125/* "modprobe net2280 enable_suspend=1" etc */
126module_param (enable_suspend, bool, S_IRUGO);
127
128
129#define	DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
130
131#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined(DEBUG)
132static char *type_string (u8 bmAttributes)
133{
134	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
135	case USB_ENDPOINT_XFER_BULK:	return "bulk";
136	case USB_ENDPOINT_XFER_ISOC:	return "iso";
137	case USB_ENDPOINT_XFER_INT:	return "intr";
138	};
139	return "control";
140}
141#endif
142
143#include "net2280.h"
144
145#define valid_bit	__constant_cpu_to_le32 (1 << VALID_BIT)
146#define dma_done_ie	__constant_cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
147
148/*-------------------------------------------------------------------------*/
149
150static int
151net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
152{
153	struct net2280		*dev;
154	struct net2280_ep	*ep;
155	u32			max, tmp;
156	unsigned long		flags;
157
158	ep = container_of (_ep, struct net2280_ep, ep);
159	if (!_ep || !desc || ep->desc || _ep->name == ep0name
160			|| desc->bDescriptorType != USB_DT_ENDPOINT)
161		return -EINVAL;
162	dev = ep->dev;
163	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
164		return -ESHUTDOWN;
165
166	if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
167		return -EDOM;
168
169	/* sanity check ep-e/ep-f since their fifos are small */
170	max = le16_to_cpu (desc->wMaxPacketSize) & 0x1fff;
171	if (ep->num > 4 && max > 64)
172		return -ERANGE;
173
174	spin_lock_irqsave (&dev->lock, flags);
175	_ep->maxpacket = max & 0x7ff;
176	ep->desc = desc;
177
178	/* ep_reset() has already been called */
179	ep->stopped = 0;
180	ep->out_overflow = 0;
181
182	/* set speed-dependent max packet; may kick in high bandwidth */
183	set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
184
185	/* FIFO lines can't go to different packets.  PIO is ok, so
186	 * use it instead of troublesome (non-bulk) multi-packet DMA.
187	 */
188	if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
189		DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
190			ep->ep.name, ep->ep.maxpacket);
191		ep->dma = NULL;
192	}
193
194	/* set type, direction, address; reset fifo counters */
195	writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
196	tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
197	if (tmp == USB_ENDPOINT_XFER_INT) {
198		if (dev->chiprev == 0100
199				&& dev->gadget.speed == USB_SPEED_HIGH
200				&& !(desc->bEndpointAddress & USB_DIR_IN))
201			writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
202				&ep->regs->ep_rsp);
203	} else if (tmp == USB_ENDPOINT_XFER_BULK) {
204		/* catch some particularly blatant driver bugs */
205		if ((dev->gadget.speed == USB_SPEED_HIGH
206					&& max != 512)
207				|| (dev->gadget.speed == USB_SPEED_FULL
208					&& max > 64)) {
209			spin_unlock_irqrestore (&dev->lock, flags);
210			return -ERANGE;
211		}
212	}
213	ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
214	tmp <<= ENDPOINT_TYPE;
215	tmp |= desc->bEndpointAddress;
216	tmp |= (4 << ENDPOINT_BYTE_COUNT);	/* default full fifo lines */
217	tmp |= 1 << ENDPOINT_ENABLE;
218	wmb ();
219
220	/* for OUT transfers, block the rx fifo until a read is posted */
221	ep->is_in = (tmp & USB_DIR_IN) != 0;
222	if (!ep->is_in)
223		writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
224	else if (dev->pdev->device != 0x2280) {
225		/* Added for 2282, Don't use nak packets on an in endpoint,
226		 * this was ignored on 2280
227		 */
228		writel ((1 << CLEAR_NAK_OUT_PACKETS)
229			| (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
230	}
231
232	writel (tmp, &ep->regs->ep_cfg);
233
234	/* enable irqs */
235	if (!ep->dma) {				/* pio, per-packet */
236		tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
237		writel (tmp, &dev->regs->pciirqenb0);
238
239		tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
240			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
241		if (dev->pdev->device == 0x2280)
242			tmp |= readl (&ep->regs->ep_irqenb);
243		writel (tmp, &ep->regs->ep_irqenb);
244	} else {				/* dma, per-request */
245		tmp = (1 << (8 + ep->num));	/* completion */
246		tmp |= readl (&dev->regs->pciirqenb1);
247		writel (tmp, &dev->regs->pciirqenb1);
248
249		if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
250			tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
251			writel (tmp, &ep->regs->ep_irqenb);
252
253			tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
254			writel (tmp, &dev->regs->pciirqenb0);
255		}
256	}
257
258	tmp = desc->bEndpointAddress;
259	DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
260		_ep->name, tmp & 0x0f, DIR_STRING (tmp),
261		type_string (desc->bmAttributes),
262		ep->dma ? "dma" : "pio", max);
263
264	/* pci writes may still be posted */
265	spin_unlock_irqrestore (&dev->lock, flags);
266	return 0;
267}
268
269static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
270{
271	u32	result;
272
273	do {
274		result = readl (ptr);
275		if (result == ~(u32)0)		/* "device unplugged" */
276			return -ENODEV;
277		result &= mask;
278		if (result == done)
279			return 0;
280		udelay (1);
281		usec--;
282	} while (usec > 0);
283	return -ETIMEDOUT;
284}
285
286static const struct usb_ep_ops net2280_ep_ops;
287
288static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
289{
290	u32		tmp;
291
292	ep->desc = NULL;
293	INIT_LIST_HEAD (&ep->queue);
294
295	ep->ep.maxpacket = ~0;
296	ep->ep.ops = &net2280_ep_ops;
297
298	/* disable the dma, irqs, endpoint... */
299	if (ep->dma) {
300		writel (0, &ep->dma->dmactl);
301		writel (  (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
302			| (1 << DMA_TRANSACTION_DONE_INTERRUPT)
303			| (1 << DMA_ABORT)
304			, &ep->dma->dmastat);
305
306		tmp = readl (&regs->pciirqenb0);
307		tmp &= ~(1 << ep->num);
308		writel (tmp, &regs->pciirqenb0);
309	} else {
310		tmp = readl (&regs->pciirqenb1);
311		tmp &= ~(1 << (8 + ep->num));	/* completion */
312		writel (tmp, &regs->pciirqenb1);
313	}
314	writel (0, &ep->regs->ep_irqenb);
315
316	/* init to our chosen defaults, notably so that we NAK OUT
317	 * packets until the driver queues a read (+note erratum 0112)
318	 */
319	if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
320		tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
321		| (1 << SET_NAK_OUT_PACKETS)
322		| (1 << CLEAR_EP_HIDE_STATUS_PHASE)
323		| (1 << CLEAR_INTERRUPT_MODE);
324	} else {
325		/* added for 2282 */
326		tmp = (1 << CLEAR_NAK_OUT_PACKETS_MODE)
327		| (1 << CLEAR_NAK_OUT_PACKETS)
328		| (1 << CLEAR_EP_HIDE_STATUS_PHASE)
329		| (1 << CLEAR_INTERRUPT_MODE);
330	}
331
332	if (ep->num != 0) {
333		tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
334			| (1 << CLEAR_ENDPOINT_HALT);
335	}
336	writel (tmp, &ep->regs->ep_rsp);
337
338	/* scrub most status bits, and flush any fifo state */
339	if (ep->dev->pdev->device == 0x2280)
340		tmp = (1 << FIFO_OVERFLOW)
341			| (1 << FIFO_UNDERFLOW);
342	else
343		tmp = 0;
344
345	writel (tmp | (1 << TIMEOUT)
346		| (1 << USB_STALL_SENT)
347		| (1 << USB_IN_NAK_SENT)
348		| (1 << USB_IN_ACK_RCVD)
349		| (1 << USB_OUT_PING_NAK_SENT)
350		| (1 << USB_OUT_ACK_SENT)
351		| (1 << FIFO_FLUSH)
352		| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
353		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
354		| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
355		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
356		| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
357		| (1 << DATA_IN_TOKEN_INTERRUPT)
358		, &ep->regs->ep_stat);
359
360	/* fifo size is handled separately */
361}
362
363static void nuke (struct net2280_ep *);
364
365static int net2280_disable (struct usb_ep *_ep)
366{
367	struct net2280_ep	*ep;
368	unsigned long		flags;
369
370	ep = container_of (_ep, struct net2280_ep, ep);
371	if (!_ep || !ep->desc || _ep->name == ep0name)
372		return -EINVAL;
373
374	spin_lock_irqsave (&ep->dev->lock, flags);
375	nuke (ep);
376	ep_reset (ep->dev->regs, ep);
377
378	VDEBUG (ep->dev, "disabled %s %s\n",
379			ep->dma ? "dma" : "pio", _ep->name);
380
381	/* synch memory views with the device */
382	(void) readl (&ep->regs->ep_cfg);
383
384	if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
385		ep->dma = &ep->dev->dma [ep->num - 1];
386
387	spin_unlock_irqrestore (&ep->dev->lock, flags);
388	return 0;
389}
390
391/*-------------------------------------------------------------------------*/
392
393static struct usb_request *
394net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
395{
396	struct net2280_ep	*ep;
397	struct net2280_request	*req;
398
399	if (!_ep)
400		return NULL;
401	ep = container_of (_ep, struct net2280_ep, ep);
402
403	req = kzalloc(sizeof(*req), gfp_flags);
404	if (!req)
405		return NULL;
406
407	req->req.dma = DMA_ADDR_INVALID;
408	INIT_LIST_HEAD (&req->queue);
409
410	/* this dma descriptor may be swapped with the previous dummy */
411	if (ep->dma) {
412		struct net2280_dma	*td;
413
414		td = pci_pool_alloc (ep->dev->requests, gfp_flags,
415				&req->td_dma);
416		if (!td) {
417			kfree (req);
418			return NULL;
419		}
420		td->dmacount = 0;	/* not VALID */
421		td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
422		td->dmadesc = td->dmaaddr;
423		req->td = td;
424	}
425	return &req->req;
426}
427
428static void
429net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
430{
431	struct net2280_ep	*ep;
432	struct net2280_request	*req;
433
434	ep = container_of (_ep, struct net2280_ep, ep);
435	if (!_ep || !_req)
436		return;
437
438	req = container_of (_req, struct net2280_request, req);
439	WARN_ON (!list_empty (&req->queue));
440	if (req->td)
441		pci_pool_free (ep->dev->requests, req->td, req->td_dma);
442	kfree (req);
443}
444
445/*-------------------------------------------------------------------------*/
446
447
448static void *
449net2280_alloc_buffer (
450	struct usb_ep		*_ep,
451	unsigned		bytes,
452	dma_addr_t		*dma,
453	gfp_t			gfp_flags
454)
455{
456	void			*retval;
457	struct net2280_ep	*ep;
458
459	ep = container_of (_ep, struct net2280_ep, ep);
460	if (!_ep)
461		return NULL;
462	*dma = DMA_ADDR_INVALID;
463
464	if (ep->dma)
465		retval = dma_alloc_coherent(&ep->dev->pdev->dev,
466				bytes, dma, gfp_flags);
467	else
468		retval = kmalloc(bytes, gfp_flags);
469	return retval;
470}
471
472static DEFINE_SPINLOCK(buflock);
473static LIST_HEAD(buffers);
474
475struct free_record {
476	struct list_head	list;
477	struct device		*dev;
478	unsigned		bytes;
479	dma_addr_t		dma;
480};
481
482static void do_free(unsigned long ignored)
483{
484	spin_lock_irq(&buflock);
485	while (!list_empty(&buffers)) {
486		struct free_record	*buf;
487
488		buf = list_entry(buffers.next, struct free_record, list);
489		list_del(&buf->list);
490		spin_unlock_irq(&buflock);
491
492		dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma);
493
494		spin_lock_irq(&buflock);
495	}
496	spin_unlock_irq(&buflock);
497}
498
499static DECLARE_TASKLET(deferred_free, do_free, 0);
500
501static void
502net2280_free_buffer (
503	struct usb_ep *_ep,
504	void *address,
505	dma_addr_t dma,
506	unsigned bytes
507) {
508	/* free memory into the right allocator */
509	if (dma != DMA_ADDR_INVALID) {
510		struct net2280_ep	*ep;
511		struct free_record	*buf = address;
512		unsigned long		flags;
513
514		ep = container_of(_ep, struct net2280_ep, ep);
515		if (!_ep)
516			return;
517
518		ep = container_of (_ep, struct net2280_ep, ep);
519		buf->dev = &ep->dev->pdev->dev;
520		buf->bytes = bytes;
521		buf->dma = dma;
522
523		spin_lock_irqsave(&buflock, flags);
524		list_add_tail(&buf->list, &buffers);
525		tasklet_schedule(&deferred_free);
526		spin_unlock_irqrestore(&buflock, flags);
527	} else
528		kfree (address);
529}
530
531/*-------------------------------------------------------------------------*/
532
533/* load a packet into the fifo we use for usb IN transfers.
534 * works for all endpoints.
535 *
536 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
537 * at a time, but this code is simpler because it knows it only writes
538 * one packet.  ep-a..ep-d should use dma instead.
539 */
540static void
541write_fifo (struct net2280_ep *ep, struct usb_request *req)
542{
543	struct net2280_ep_regs	__iomem *regs = ep->regs;
544	u8			*buf;
545	u32			tmp;
546	unsigned		count, total;
547
548	/* INVARIANT:  fifo is currently empty. (testable) */
549
550	if (req) {
551		buf = req->buf + req->actual;
552		prefetch (buf);
553		total = req->length - req->actual;
554	} else {
555		total = 0;
556		buf = NULL;
557	}
558
559	/* write just one packet at a time */
560	count = ep->ep.maxpacket;
561	if (count > total)	/* min() cannot be used on a bitfield */
562		count = total;
563
564	VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
565			ep->ep.name, count,
566			(count != ep->ep.maxpacket) ? " (short)" : "",
567			req);
568	while (count >= 4) {
569		/* NOTE be careful if you try to align these. fifo lines
570		 * should normally be full (4 bytes) and successive partial
571		 * lines are ok only in certain cases.
572		 */
573		tmp = get_unaligned ((u32 *)buf);
574		cpu_to_le32s (&tmp);
575		writel (tmp, &regs->ep_data);
576		buf += 4;
577		count -= 4;
578	}
579
580	/* last fifo entry is "short" unless we wrote a full packet.
581	 * also explicitly validate last word in (periodic) transfers
582	 * when maxpacket is not a multiple of 4 bytes.
583	 */
584	if (count || total < ep->ep.maxpacket) {
585		tmp = count ? get_unaligned ((u32 *)buf) : count;
586		cpu_to_le32s (&tmp);
587		set_fifo_bytecount (ep, count & 0x03);
588		writel (tmp, &regs->ep_data);
589	}
590
591	/* pci writes may still be posted */
592}
593
594static void out_flush (struct net2280_ep *ep)
595{
596	u32	__iomem *statp;
597	u32	tmp;
598
599	ASSERT_OUT_NAKING (ep);
600
601	statp = &ep->regs->ep_stat;
602	writel (  (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
603		| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
604		, statp);
605	writel ((1 << FIFO_FLUSH), statp);
606	mb ();
607	tmp = readl (statp);
608	if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
609			/* high speed did bulk NYET; fifo isn't filling */
610			&& ep->dev->gadget.speed == USB_SPEED_FULL) {
611		unsigned	usec;
612
613		usec = 50;		/* 64 byte bulk/interrupt */
614		handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
615				(1 << USB_OUT_PING_NAK_SENT), usec);
616		/* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
617	}
618}
619
620/* unload packet(s) from the fifo we use for usb OUT transfers.
621 * returns true iff the request completed, because of short packet
622 * or the request buffer having filled with full packets.
623 *
624 * for ep-a..ep-d this will read multiple packets out when they
625 * have been accepted.
626 */
627static int
628read_fifo (struct net2280_ep *ep, struct net2280_request *req)
629{
630	struct net2280_ep_regs	__iomem *regs = ep->regs;
631	u8			*buf = req->req.buf + req->req.actual;
632	unsigned		count, tmp, is_short;
633	unsigned		cleanup = 0, prevent = 0;
634
635	/* erratum 0106 ... packets coming in during fifo reads might
636	 * be incompletely rejected.  not all cases have workarounds.
637	 */
638	if (ep->dev->chiprev == 0x0100
639			&& ep->dev->gadget.speed == USB_SPEED_FULL) {
640		udelay (1);
641		tmp = readl (&ep->regs->ep_stat);
642		if ((tmp & (1 << NAK_OUT_PACKETS)))
643			cleanup = 1;
644		else if ((tmp & (1 << FIFO_FULL))) {
645			start_out_naking (ep);
646			prevent = 1;
647		}
648		/* else: hope we don't see the problem */
649	}
650
651	/* never overflow the rx buffer. the fifo reads packets until
652	 * it sees a short one; we might not be ready for them all.
653	 */
654	prefetchw (buf);
655	count = readl (&regs->ep_avail);
656	if (unlikely (count == 0)) {
657		udelay (1);
658		tmp = readl (&ep->regs->ep_stat);
659		count = readl (&regs->ep_avail);
660		/* handled that data already? */
661		if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
662			return 0;
663	}
664
665	tmp = req->req.length - req->req.actual;
666	if (count > tmp) {
667		/* as with DMA, data overflow gets flushed */
668		if ((tmp % ep->ep.maxpacket) != 0) {
669			ERROR (ep->dev,
670				"%s out fifo %d bytes, expected %d\n",
671				ep->ep.name, count, tmp);
672			req->req.status = -EOVERFLOW;
673			cleanup = 1;
674			/* NAK_OUT_PACKETS will be set, so flushing is safe;
675			 * the next read will start with the next packet
676			 */
677		} /* else it's a ZLP, no worries */
678		count = tmp;
679	}
680	req->req.actual += count;
681
682	is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
683
684	VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
685			ep->ep.name, count, is_short ? " (short)" : "",
686			cleanup ? " flush" : "", prevent ? " nak" : "",
687			req, req->req.actual, req->req.length);
688
689	while (count >= 4) {
690		tmp = readl (&regs->ep_data);
691		cpu_to_le32s (&tmp);
692		put_unaligned (tmp, (u32 *)buf);
693		buf += 4;
694		count -= 4;
695	}
696	if (count) {
697		tmp = readl (&regs->ep_data);
698		/* LE conversion is implicit here: */
699		do {
700			*buf++ = (u8) tmp;
701			tmp >>= 8;
702		} while (--count);
703	}
704	if (cleanup)
705		out_flush (ep);
706	if (prevent) {
707		writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
708		(void) readl (&ep->regs->ep_rsp);
709	}
710
711	return is_short || ((req->req.actual == req->req.length)
712				&& !req->req.zero);
713}
714
715/* fill out dma descriptor to match a given request */
716static void
717fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
718{
719	struct net2280_dma	*td = req->td;
720	u32			dmacount = req->req.length;
721
722	/* don't let DMA continue after a short OUT packet,
723	 * so overruns can't affect the next transfer.
724	 * in case of overruns on max-size packets, we can't
725	 * stop the fifo from filling but we can flush it.
726	 */
727	if (ep->is_in)
728		dmacount |= (1 << DMA_DIRECTION);
729	if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0)
730			|| ep->dev->pdev->device != 0x2280)
731		dmacount |= (1 << END_OF_CHAIN);
732
733	req->valid = valid;
734	if (valid)
735		dmacount |= (1 << VALID_BIT);
736	if (likely(!req->req.no_interrupt || !use_dma_chaining))
737		dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
738
739	/* td->dmadesc = previously set by caller */
740	td->dmaaddr = cpu_to_le32 (req->req.dma);
741
742	/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
743	wmb ();
744	td->dmacount = cpu_to_le32p (&dmacount);
745}
746
747static const u32 dmactl_default =
748		  (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
749		| (1 << DMA_CLEAR_COUNT_ENABLE)
750		| (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
751		| (1 << DMA_VALID_BIT_POLLING_ENABLE)
752		| (1 << DMA_VALID_BIT_ENABLE)
753		| (1 << DMA_SCATTER_GATHER_ENABLE)
754		| (1 << DMA_ENABLE);
755
756static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
757{
758	handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
759}
760
761static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
762{
763	writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
764	spin_stop_dma (dma);
765}
766
767static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
768{
769	struct net2280_dma_regs	__iomem *dma = ep->dma;
770	unsigned int tmp = (1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION);
771
772	if (ep->dev->pdev->device != 0x2280)
773		tmp |= (1 << END_OF_CHAIN);
774
775	writel (tmp, &dma->dmacount);
776	writel (readl (&dma->dmastat), &dma->dmastat);
777
778	writel (td_dma, &dma->dmadesc);
779	writel (dmactl, &dma->dmactl);
780
781	(void) readl (&ep->dev->pci->pcimstctl);
782
783	writel ((1 << DMA_START), &dma->dmastat);
784
785	if (!ep->is_in)
786		stop_out_naking (ep);
787}
788
789static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
790{
791	u32			tmp;
792	struct net2280_dma_regs	__iomem *dma = ep->dma;
793
794
795	/* on this path we "know" there's no dma active (yet) */
796	WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
797	writel (0, &ep->dma->dmactl);
798
799	/* previous OUT packet might have been short */
800	if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
801				& (1 << NAK_OUT_PACKETS)) != 0) {
802		writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
803			&ep->regs->ep_stat);
804
805		tmp = readl (&ep->regs->ep_avail);
806		if (tmp) {
807			writel (readl (&dma->dmastat), &dma->dmastat);
808
809			/* transfer all/some fifo data */
810			writel (req->req.dma, &dma->dmaaddr);
811			tmp = min (tmp, req->req.length);
812
813			/* dma irq, faking scatterlist status */
814			req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
815			writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
816				| tmp, &dma->dmacount);
817			req->td->dmadesc = 0;
818			req->valid = 1;
819
820			writel ((1 << DMA_ENABLE), &dma->dmactl);
821			writel ((1 << DMA_START), &dma->dmastat);
822			return;
823		}
824	}
825
826	tmp = dmactl_default;
827
828	/* force packet boundaries between dma requests, but prevent the
829	 * controller from automagically writing a last "short" packet
830	 * (zero length) unless the driver explicitly said to do that.
831	 */
832	if (ep->is_in) {
833		if (likely ((req->req.length % ep->ep.maxpacket) != 0
834				|| req->req.zero)) {
835			tmp |= (1 << DMA_FIFO_VALIDATE);
836			ep->in_fifo_validate = 1;
837		} else
838			ep->in_fifo_validate = 0;
839	}
840
841	/* init req->td, pointing to the current dummy */
842	req->td->dmadesc = cpu_to_le32 (ep->td_dma);
843	fill_dma_desc (ep, req, 1);
844
845	if (!use_dma_chaining)
846		req->td->dmacount |= __constant_cpu_to_le32 (1 << END_OF_CHAIN);
847
848	start_queue (ep, tmp, req->td_dma);
849}
850
851static inline void
852queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
853{
854	struct net2280_dma	*end;
855	dma_addr_t		tmp;
856
857	/* swap new dummy for old, link; fill and maybe activate */
858	end = ep->dummy;
859	ep->dummy = req->td;
860	req->td = end;
861
862	tmp = ep->td_dma;
863	ep->td_dma = req->td_dma;
864	req->td_dma = tmp;
865
866	end->dmadesc = cpu_to_le32 (ep->td_dma);
867
868	fill_dma_desc (ep, req, valid);
869}
870
871static void
872done (struct net2280_ep *ep, struct net2280_request *req, int status)
873{
874	struct net2280		*dev;
875	unsigned		stopped = ep->stopped;
876
877	list_del_init (&req->queue);
878
879	if (req->req.status == -EINPROGRESS)
880		req->req.status = status;
881	else
882		status = req->req.status;
883
884	dev = ep->dev;
885	if (req->mapped) {
886		pci_unmap_single (dev->pdev, req->req.dma, req->req.length,
887			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
888		req->req.dma = DMA_ADDR_INVALID;
889		req->mapped = 0;
890	}
891
892	if (status && status != -ESHUTDOWN)
893		VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
894			ep->ep.name, &req->req, status,
895			req->req.actual, req->req.length);
896
897	/* don't modify queue heads during completion callback */
898	ep->stopped = 1;
899	spin_unlock (&dev->lock);
900	req->req.complete (&ep->ep, &req->req);
901	spin_lock (&dev->lock);
902	ep->stopped = stopped;
903}
904
905/*-------------------------------------------------------------------------*/
906
907static int
908net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
909{
910	struct net2280_request	*req;
911	struct net2280_ep	*ep;
912	struct net2280		*dev;
913	unsigned long		flags;
914
915	/* we always require a cpu-view buffer, so that we can
916	 * always use pio (as fallback or whatever).
917	 */
918	req = container_of (_req, struct net2280_request, req);
919	if (!_req || !_req->complete || !_req->buf
920			|| !list_empty (&req->queue))
921		return -EINVAL;
922	if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
923		return -EDOM;
924	ep = container_of (_ep, struct net2280_ep, ep);
925	if (!_ep || (!ep->desc && ep->num != 0))
926		return -EINVAL;
927	dev = ep->dev;
928	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
929		return -ESHUTDOWN;
930
931	if (ep->dma && _req->length == 0)
932		return -EOPNOTSUPP;
933
934	/* set up dma mapping in case the caller didn't */
935	if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
936		_req->dma = pci_map_single (dev->pdev, _req->buf, _req->length,
937			ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
938		req->mapped = 1;
939	}
940
941
942	spin_lock_irqsave (&dev->lock, flags);
943
944	_req->status = -EINPROGRESS;
945	_req->actual = 0;
946
947	/* kickstart this i/o queue? */
948	if (list_empty (&ep->queue) && !ep->stopped) {
949		/* use DMA if the endpoint supports it, else pio */
950		if (ep->dma)
951			start_dma (ep, req);
952		else {
953			/* maybe there's no control data, just status ack */
954			if (ep->num == 0 && _req->length == 0) {
955				allow_status (ep);
956				done (ep, req, 0);
957				VDEBUG (dev, "%s status ack\n", ep->ep.name);
958				goto done;
959			}
960
961			/* PIO ... stuff the fifo, or unblock it.  */
962			if (ep->is_in)
963				write_fifo (ep, _req);
964			else if (list_empty (&ep->queue)) {
965				u32	s;
966
967				/* OUT FIFO might have packet(s) buffered */
968				s = readl (&ep->regs->ep_stat);
969				if ((s & (1 << FIFO_EMPTY)) == 0) {
970					/* note:  _req->short_not_ok is
971					 * ignored here since PIO _always_
972					 * stops queue advance here, and
973					 * _req->status doesn't change for
974					 * short reads (only _req->actual)
975					 */
976					if (read_fifo (ep, req)) {
977						done (ep, req, 0);
978						if (ep->num == 0)
979							allow_status (ep);
980						/* don't queue it */
981						req = NULL;
982					} else
983						s = readl (&ep->regs->ep_stat);
984				}
985
986				/* don't NAK, let the fifo fill */
987				if (req && (s & (1 << NAK_OUT_PACKETS)))
988					writel ((1 << CLEAR_NAK_OUT_PACKETS),
989							&ep->regs->ep_rsp);
990			}
991		}
992
993	} else if (ep->dma) {
994		int	valid = 1;
995
996		if (ep->is_in) {
997			int	expect;
998
999			/* preventing magic zlps is per-engine state, not
1000			 * per-transfer; irq logic must recover hiccups.
1001			 */
1002			expect = likely (req->req.zero
1003				|| (req->req.length % ep->ep.maxpacket) != 0);
1004			if (expect != ep->in_fifo_validate)
1005				valid = 0;
1006		}
1007		queue_dma (ep, req, valid);
1008
1009	} /* else the irq handler advances the queue. */
1010
1011	ep->responded = 1;
1012	if (req)
1013		list_add_tail (&req->queue, &ep->queue);
1014done:
1015	spin_unlock_irqrestore (&dev->lock, flags);
1016
1017	/* pci writes may still be posted */
1018	return 0;
1019}
1020
1021static inline void
1022dma_done (
1023	struct net2280_ep *ep,
1024	struct net2280_request *req,
1025	u32 dmacount,
1026	int status
1027)
1028{
1029	req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1030	done (ep, req, status);
1031}
1032
1033static void restart_dma (struct net2280_ep *ep);
1034
1035static void scan_dma_completions (struct net2280_ep *ep)
1036{
1037	/* only look at descriptors that were "naturally" retired,
1038	 * so fifo and list head state won't matter
1039	 */
1040	while (!list_empty (&ep->queue)) {
1041		struct net2280_request	*req;
1042		u32			tmp;
1043
1044		req = list_entry (ep->queue.next,
1045				struct net2280_request, queue);
1046		if (!req->valid)
1047			break;
1048		rmb ();
1049		tmp = le32_to_cpup (&req->td->dmacount);
1050		if ((tmp & (1 << VALID_BIT)) != 0)
1051			break;
1052
1053		/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1054		 * cases where DMA must be aborted; this code handles
1055		 * all non-abort DMA completions.
1056		 */
1057		if (unlikely (req->td->dmadesc == 0)) {
1058			/* paranoia */
1059			tmp = readl (&ep->dma->dmacount);
1060			if (tmp & DMA_BYTE_COUNT_MASK)
1061				break;
1062			/* single transfer mode */
1063			dma_done (ep, req, tmp, 0);
1064			break;
1065		} else if (!ep->is_in
1066				&& (req->req.length % ep->ep.maxpacket) != 0) {
1067			tmp = readl (&ep->regs->ep_stat);
1068
1069			/* AVOID TROUBLE HERE by not issuing short reads from
1070			 * your gadget driver.  That helps avoids errata 0121,
1071			 * 0122, and 0124; not all cases trigger the warning.
1072			 */
1073			if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
1074				WARN (ep->dev, "%s lost packet sync!\n",
1075						ep->ep.name);
1076				req->req.status = -EOVERFLOW;
1077			} else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
1078				/* fifo gets flushed later */
1079				ep->out_overflow = 1;
1080				DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1081						ep->ep.name, tmp,
1082						req->req.length);
1083				req->req.status = -EOVERFLOW;
1084			}
1085		}
1086		dma_done (ep, req, tmp, 0);
1087	}
1088}
1089
1090static void restart_dma (struct net2280_ep *ep)
1091{
1092	struct net2280_request	*req;
1093	u32			dmactl = dmactl_default;
1094
1095	if (ep->stopped)
1096		return;
1097	req = list_entry (ep->queue.next, struct net2280_request, queue);
1098
1099	if (!use_dma_chaining) {
1100		start_dma (ep, req);
1101		return;
1102	}
1103
1104	/* the 2280 will be processing the queue unless queue hiccups after
1105	 * the previous transfer:
1106	 *  IN:   wanted automagic zlp, head doesn't (or vice versa)
1107	 *        DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1108	 *  OUT:  was "usb-short", we must restart.
1109	 */
1110	if (ep->is_in && !req->valid) {
1111		struct net2280_request	*entry, *prev = NULL;
1112		int			reqmode, done = 0;
1113
1114		DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1115		ep->in_fifo_validate = likely (req->req.zero
1116			|| (req->req.length % ep->ep.maxpacket) != 0);
1117		if (ep->in_fifo_validate)
1118			dmactl |= (1 << DMA_FIFO_VALIDATE);
1119		list_for_each_entry (entry, &ep->queue, queue) {
1120			__le32		dmacount;
1121
1122			if (entry == req)
1123				continue;
1124			dmacount = entry->td->dmacount;
1125			if (!done) {
1126				reqmode = likely (entry->req.zero
1127					|| (entry->req.length
1128						% ep->ep.maxpacket) != 0);
1129				if (reqmode == ep->in_fifo_validate) {
1130					entry->valid = 1;
1131					dmacount |= valid_bit;
1132					entry->td->dmacount = dmacount;
1133					prev = entry;
1134					continue;
1135				} else {
1136					/* force a hiccup */
1137					prev->td->dmacount |= dma_done_ie;
1138					done = 1;
1139				}
1140			}
1141
1142			/* walk the rest of the queue so unlinks behave */
1143			entry->valid = 0;
1144			dmacount &= ~valid_bit;
1145			entry->td->dmacount = dmacount;
1146			prev = entry;
1147		}
1148	}
1149
1150	writel (0, &ep->dma->dmactl);
1151	start_queue (ep, dmactl, req->td_dma);
1152}
1153
1154static void abort_dma (struct net2280_ep *ep)
1155{
1156	/* abort the current transfer */
1157	if (likely (!list_empty (&ep->queue))) {
1158		writel ((1 << DMA_ABORT), &ep->dma->dmastat);
1159		spin_stop_dma (ep->dma);
1160	} else
1161		stop_dma (ep->dma);
1162	scan_dma_completions (ep);
1163}
1164
1165/* dequeue ALL requests */
1166static void nuke (struct net2280_ep *ep)
1167{
1168	struct net2280_request	*req;
1169
1170	/* called with spinlock held */
1171	ep->stopped = 1;
1172	if (ep->dma)
1173		abort_dma (ep);
1174	while (!list_empty (&ep->queue)) {
1175		req = list_entry (ep->queue.next,
1176				struct net2280_request,
1177				queue);
1178		done (ep, req, -ESHUTDOWN);
1179	}
1180}
1181
1182/* dequeue JUST ONE request */
1183static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1184{
1185	struct net2280_ep	*ep;
1186	struct net2280_request	*req;
1187	unsigned long		flags;
1188	u32			dmactl;
1189	int			stopped;
1190
1191	ep = container_of (_ep, struct net2280_ep, ep);
1192	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1193		return -EINVAL;
1194
1195	spin_lock_irqsave (&ep->dev->lock, flags);
1196	stopped = ep->stopped;
1197
1198	/* quiesce dma while we patch the queue */
1199	dmactl = 0;
1200	ep->stopped = 1;
1201	if (ep->dma) {
1202		dmactl = readl (&ep->dma->dmactl);
1203		/* WARNING erratum 0127 may kick in ... */
1204		stop_dma (ep->dma);
1205		scan_dma_completions (ep);
1206	}
1207
1208	/* make sure it's still queued on this endpoint */
1209	list_for_each_entry (req, &ep->queue, queue) {
1210		if (&req->req == _req)
1211			break;
1212	}
1213	if (&req->req != _req) {
1214		spin_unlock_irqrestore (&ep->dev->lock, flags);
1215		return -EINVAL;
1216	}
1217
1218	/* queue head may be partially complete. */
1219	if (ep->queue.next == &req->queue) {
1220		if (ep->dma) {
1221			DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1222			_req->status = -ECONNRESET;
1223			abort_dma (ep);
1224			if (likely (ep->queue.next == &req->queue)) {
1225				// NOTE: misreports single-transfer mode
1226				req->td->dmacount = 0;	/* invalidate */
1227				dma_done (ep, req,
1228					readl (&ep->dma->dmacount),
1229					-ECONNRESET);
1230			}
1231		} else {
1232			DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
1233			done (ep, req, -ECONNRESET);
1234		}
1235		req = NULL;
1236
1237	/* patch up hardware chaining data */
1238	} else if (ep->dma && use_dma_chaining) {
1239		if (req->queue.prev == ep->queue.next) {
1240			writel (le32_to_cpu (req->td->dmadesc),
1241				&ep->dma->dmadesc);
1242			if (req->td->dmacount & dma_done_ie)
1243				writel (readl (&ep->dma->dmacount)
1244						| le32_to_cpu(dma_done_ie),
1245					&ep->dma->dmacount);
1246		} else {
1247			struct net2280_request	*prev;
1248
1249			prev = list_entry (req->queue.prev,
1250				struct net2280_request, queue);
1251			prev->td->dmadesc = req->td->dmadesc;
1252			if (req->td->dmacount & dma_done_ie)
1253				prev->td->dmacount |= dma_done_ie;
1254		}
1255	}
1256
1257	if (req)
1258		done (ep, req, -ECONNRESET);
1259	ep->stopped = stopped;
1260
1261	if (ep->dma) {
1262		/* turn off dma on inactive queues */
1263		if (list_empty (&ep->queue))
1264			stop_dma (ep->dma);
1265		else if (!ep->stopped) {
1266			/* resume current request, or start new one */
1267			if (req)
1268				writel (dmactl, &ep->dma->dmactl);
1269			else
1270				start_dma (ep, list_entry (ep->queue.next,
1271					struct net2280_request, queue));
1272		}
1273	}
1274
1275	spin_unlock_irqrestore (&ep->dev->lock, flags);
1276	return 0;
1277}
1278
1279/*-------------------------------------------------------------------------*/
1280
1281static int net2280_fifo_status (struct usb_ep *_ep);
1282
1283static int
1284net2280_set_halt (struct usb_ep *_ep, int value)
1285{
1286	struct net2280_ep	*ep;
1287	unsigned long		flags;
1288	int			retval = 0;
1289
1290	ep = container_of (_ep, struct net2280_ep, ep);
1291	if (!_ep || (!ep->desc && ep->num != 0))
1292		return -EINVAL;
1293	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1294		return -ESHUTDOWN;
1295	if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1296						== USB_ENDPOINT_XFER_ISOC)
1297		return -EINVAL;
1298
1299	spin_lock_irqsave (&ep->dev->lock, flags);
1300	if (!list_empty (&ep->queue))
1301		retval = -EAGAIN;
1302	else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
1303		retval = -EAGAIN;
1304	else {
1305		VDEBUG (ep->dev, "%s %s halt\n", _ep->name,
1306				value ? "set" : "clear");
1307		/* set/clear, then synch memory views with the device */
1308		if (value) {
1309			if (ep->num == 0)
1310				ep->dev->protocol_stall = 1;
1311			else
1312				set_halt (ep);
1313		} else
1314			clear_halt (ep);
1315		(void) readl (&ep->regs->ep_rsp);
1316	}
1317	spin_unlock_irqrestore (&ep->dev->lock, flags);
1318
1319	return retval;
1320}
1321
1322static int
1323net2280_fifo_status (struct usb_ep *_ep)
1324{
1325	struct net2280_ep	*ep;
1326	u32			avail;
1327
1328	ep = container_of (_ep, struct net2280_ep, ep);
1329	if (!_ep || (!ep->desc && ep->num != 0))
1330		return -ENODEV;
1331	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1332		return -ESHUTDOWN;
1333
1334	avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
1335	if (avail > ep->fifo_size)
1336		return -EOVERFLOW;
1337	if (ep->is_in)
1338		avail = ep->fifo_size - avail;
1339	return avail;
1340}
1341
1342static void
1343net2280_fifo_flush (struct usb_ep *_ep)
1344{
1345	struct net2280_ep	*ep;
1346
1347	ep = container_of (_ep, struct net2280_ep, ep);
1348	if (!_ep || (!ep->desc && ep->num != 0))
1349		return;
1350	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1351		return;
1352
1353	writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
1354	(void) readl (&ep->regs->ep_rsp);
1355}
1356
1357static const struct usb_ep_ops net2280_ep_ops = {
1358	.enable		= net2280_enable,
1359	.disable	= net2280_disable,
1360
1361	.alloc_request	= net2280_alloc_request,
1362	.free_request	= net2280_free_request,
1363
1364	.alloc_buffer	= net2280_alloc_buffer,
1365	.free_buffer	= net2280_free_buffer,
1366
1367	.queue		= net2280_queue,
1368	.dequeue	= net2280_dequeue,
1369
1370	.set_halt	= net2280_set_halt,
1371	.fifo_status	= net2280_fifo_status,
1372	.fifo_flush	= net2280_fifo_flush,
1373};
1374
1375/*-------------------------------------------------------------------------*/
1376
1377static int net2280_get_frame (struct usb_gadget *_gadget)
1378{
1379	struct net2280		*dev;
1380	unsigned long		flags;
1381	u16			retval;
1382
1383	if (!_gadget)
1384		return -ENODEV;
1385	dev = container_of (_gadget, struct net2280, gadget);
1386	spin_lock_irqsave (&dev->lock, flags);
1387	retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
1388	spin_unlock_irqrestore (&dev->lock, flags);
1389	return retval;
1390}
1391
1392static int net2280_wakeup (struct usb_gadget *_gadget)
1393{
1394	struct net2280		*dev;
1395	u32			tmp;
1396	unsigned long		flags;
1397
1398	if (!_gadget)
1399		return 0;
1400	dev = container_of (_gadget, struct net2280, gadget);
1401
1402	spin_lock_irqsave (&dev->lock, flags);
1403	tmp = readl (&dev->usb->usbctl);
1404	if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
1405		writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
1406	spin_unlock_irqrestore (&dev->lock, flags);
1407
1408	/* pci writes may still be posted */
1409	return 0;
1410}
1411
1412static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1413{
1414	struct net2280		*dev;
1415	u32			tmp;
1416	unsigned long		flags;
1417
1418	if (!_gadget)
1419		return 0;
1420	dev = container_of (_gadget, struct net2280, gadget);
1421
1422	spin_lock_irqsave (&dev->lock, flags);
1423	tmp = readl (&dev->usb->usbctl);
1424	if (value)
1425		tmp |= (1 << SELF_POWERED_STATUS);
1426	else
1427		tmp &= ~(1 << SELF_POWERED_STATUS);
1428	writel (tmp, &dev->usb->usbctl);
1429	spin_unlock_irqrestore (&dev->lock, flags);
1430
1431	return 0;
1432}
1433
1434static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1435{
1436	struct net2280  *dev;
1437	u32             tmp;
1438	unsigned long   flags;
1439
1440	if (!_gadget)
1441		return -ENODEV;
1442	dev = container_of (_gadget, struct net2280, gadget);
1443
1444	spin_lock_irqsave (&dev->lock, flags);
1445	tmp = readl (&dev->usb->usbctl);
1446	dev->softconnect = (is_on != 0);
1447	if (is_on)
1448		tmp |= (1 << USB_DETECT_ENABLE);
1449	else
1450		tmp &= ~(1 << USB_DETECT_ENABLE);
1451	writel (tmp, &dev->usb->usbctl);
1452	spin_unlock_irqrestore (&dev->lock, flags);
1453
1454	return 0;
1455}
1456
1457static const struct usb_gadget_ops net2280_ops = {
1458	.get_frame	= net2280_get_frame,
1459	.wakeup		= net2280_wakeup,
1460	.set_selfpowered = net2280_set_selfpowered,
1461	.pullup		= net2280_pullup,
1462};
1463
1464/*-------------------------------------------------------------------------*/
1465
1466#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
1467
1468
1469/* "function" sysfs attribute */
1470static ssize_t
1471show_function (struct device *_dev, struct device_attribute *attr, char *buf)
1472{
1473	struct net2280	*dev = dev_get_drvdata (_dev);
1474
1475	if (!dev->driver
1476			|| !dev->driver->function
1477			|| strlen (dev->driver->function) > PAGE_SIZE)
1478		return 0;
1479	return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1480}
1481static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1482
1483static ssize_t
1484show_registers (struct device *_dev, struct device_attribute *attr, char *buf)
1485{
1486	struct net2280		*dev;
1487	char			*next;
1488	unsigned		size, t;
1489	unsigned long		flags;
1490	int			i;
1491	u32			t1, t2;
1492	const char		*s;
1493
1494	dev = dev_get_drvdata (_dev);
1495	next = buf;
1496	size = PAGE_SIZE;
1497	spin_lock_irqsave (&dev->lock, flags);
1498
1499	if (dev->driver)
1500		s = dev->driver->driver.name;
1501	else
1502		s = "(none)";
1503
1504	/* Main Control Registers */
1505	t = scnprintf (next, size, "%s version " DRIVER_VERSION
1506			", chiprev %04x, dma %s\n\n"
1507			"devinit %03x fifoctl %08x gadget '%s'\n"
1508			"pci irqenb0 %02x irqenb1 %08x "
1509			"irqstat0 %04x irqstat1 %08x\n",
1510			driver_name, dev->chiprev,
1511			use_dma
1512				? (use_dma_chaining ? "chaining" : "enabled")
1513				: "disabled",
1514			readl (&dev->regs->devinit),
1515			readl (&dev->regs->fifoctl),
1516			s,
1517			readl (&dev->regs->pciirqenb0),
1518			readl (&dev->regs->pciirqenb1),
1519			readl (&dev->regs->irqstat0),
1520			readl (&dev->regs->irqstat1));
1521	size -= t;
1522	next += t;
1523
1524	/* USB Control Registers */
1525	t1 = readl (&dev->usb->usbctl);
1526	t2 = readl (&dev->usb->usbstat);
1527	if (t1 & (1 << VBUS_PIN)) {
1528		if (t2 & (1 << HIGH_SPEED))
1529			s = "high speed";
1530		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1531			s = "powered";
1532		else
1533			s = "full speed";
1534		/* full speed bit (6) not working?? */
1535	} else
1536			s = "not attached";
1537	t = scnprintf (next, size,
1538			"stdrsp %08x usbctl %08x usbstat %08x "
1539				"addr 0x%02x (%s)\n",
1540			readl (&dev->usb->stdrsp), t1, t2,
1541			readl (&dev->usb->ouraddr), s);
1542	size -= t;
1543	next += t;
1544
1545	/* PCI Master Control Registers */
1546
1547	/* DMA Control Registers */
1548
1549	/* Configurable EP Control Registers */
1550	for (i = 0; i < 7; i++) {
1551		struct net2280_ep	*ep;
1552
1553		ep = &dev->ep [i];
1554		if (i && !ep->desc)
1555			continue;
1556
1557		t1 = readl (&ep->regs->ep_cfg);
1558		t2 = readl (&ep->regs->ep_rsp) & 0xff;
1559		t = scnprintf (next, size,
1560				"\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1561					"irqenb %02x\n",
1562				ep->ep.name, t1, t2,
1563				(t2 & (1 << CLEAR_NAK_OUT_PACKETS))
1564					? "NAK " : "",
1565				(t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
1566					? "hide " : "",
1567				(t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
1568					? "CRC " : "",
1569				(t2 & (1 << CLEAR_INTERRUPT_MODE))
1570					? "interrupt " : "",
1571				(t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1572					? "status " : "",
1573				(t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
1574					? "NAKmode " : "",
1575				(t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
1576					? "DATA1 " : "DATA0 ",
1577				(t2 & (1 << CLEAR_ENDPOINT_HALT))
1578					? "HALT " : "",
1579				readl (&ep->regs->ep_irqenb));
1580		size -= t;
1581		next += t;
1582
1583		t = scnprintf (next, size,
1584				"\tstat %08x avail %04x "
1585				"(ep%d%s-%s)%s\n",
1586				readl (&ep->regs->ep_stat),
1587				readl (&ep->regs->ep_avail),
1588				t1 & 0x0f, DIR_STRING (t1),
1589				type_string (t1 >> 8),
1590				ep->stopped ? "*" : "");
1591		size -= t;
1592		next += t;
1593
1594		if (!ep->dma)
1595			continue;
1596
1597		t = scnprintf (next, size,
1598				"  dma\tctl %08x stat %08x count %08x\n"
1599				"\taddr %08x desc %08x\n",
1600				readl (&ep->dma->dmactl),
1601				readl (&ep->dma->dmastat),
1602				readl (&ep->dma->dmacount),
1603				readl (&ep->dma->dmaaddr),
1604				readl (&ep->dma->dmadesc));
1605		size -= t;
1606		next += t;
1607
1608	}
1609
1610	/* Indexed Registers */
1611		// none yet
1612
1613	/* Statistics */
1614	t = scnprintf (next, size, "\nirqs:  ");
1615	size -= t;
1616	next += t;
1617	for (i = 0; i < 7; i++) {
1618		struct net2280_ep	*ep;
1619
1620		ep = &dev->ep [i];
1621		if (i && !ep->irqs)
1622			continue;
1623		t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
1624		size -= t;
1625		next += t;
1626
1627	}
1628	t = scnprintf (next, size, "\n");
1629	size -= t;
1630	next += t;
1631
1632	spin_unlock_irqrestore (&dev->lock, flags);
1633
1634	return PAGE_SIZE - size;
1635}
1636static DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL);
1637
1638static ssize_t
1639show_queues (struct device *_dev, struct device_attribute *attr, char *buf)
1640{
1641	struct net2280		*dev;
1642	char			*next;
1643	unsigned		size;
1644	unsigned long		flags;
1645	int			i;
1646
1647	dev = dev_get_drvdata (_dev);
1648	next = buf;
1649	size = PAGE_SIZE;
1650	spin_lock_irqsave (&dev->lock, flags);
1651
1652	for (i = 0; i < 7; i++) {
1653		struct net2280_ep		*ep = &dev->ep [i];
1654		struct net2280_request		*req;
1655		int				t;
1656
1657		if (i != 0) {
1658			const struct usb_endpoint_descriptor	*d;
1659
1660			d = ep->desc;
1661			if (!d)
1662				continue;
1663			t = d->bEndpointAddress;
1664			t = scnprintf (next, size,
1665				"\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1666				ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1667				(t & USB_DIR_IN) ? "in" : "out",
1668				({ char *val;
1669				 switch (d->bmAttributes & 0x03) {
1670				 case USB_ENDPOINT_XFER_BULK:
1671					val = "bulk"; break;
1672				 case USB_ENDPOINT_XFER_INT:
1673					val = "intr"; break;
1674				 default:
1675					val = "iso"; break;
1676				 }; val; }),
1677				le16_to_cpu (d->wMaxPacketSize) & 0x1fff,
1678				ep->dma ? "dma" : "pio", ep->fifo_size
1679				);
1680		} else /* ep0 should only have one transfer queued */
1681			t = scnprintf (next, size, "ep0 max 64 pio %s\n",
1682					ep->is_in ? "in" : "out");
1683		if (t <= 0 || t > size)
1684			goto done;
1685		size -= t;
1686		next += t;
1687
1688		if (list_empty (&ep->queue)) {
1689			t = scnprintf (next, size, "\t(nothing queued)\n");
1690			if (t <= 0 || t > size)
1691				goto done;
1692			size -= t;
1693			next += t;
1694			continue;
1695		}
1696		list_for_each_entry (req, &ep->queue, queue) {
1697			if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1698				t = scnprintf (next, size,
1699					"\treq %p len %d/%d "
1700					"buf %p (dmacount %08x)\n",
1701					&req->req, req->req.actual,
1702					req->req.length, req->req.buf,
1703					readl (&ep->dma->dmacount));
1704			else
1705				t = scnprintf (next, size,
1706					"\treq %p len %d/%d buf %p\n",
1707					&req->req, req->req.actual,
1708					req->req.length, req->req.buf);
1709			if (t <= 0 || t > size)
1710				goto done;
1711			size -= t;
1712			next += t;
1713
1714			if (ep->dma) {
1715				struct net2280_dma	*td;
1716
1717				td = req->td;
1718				t = scnprintf (next, size, "\t    td %08x "
1719					" count %08x buf %08x desc %08x\n",
1720					(u32) req->td_dma,
1721					le32_to_cpu (td->dmacount),
1722					le32_to_cpu (td->dmaaddr),
1723					le32_to_cpu (td->dmadesc));
1724				if (t <= 0 || t > size)
1725					goto done;
1726				size -= t;
1727				next += t;
1728			}
1729		}
1730	}
1731
1732done:
1733	spin_unlock_irqrestore (&dev->lock, flags);
1734	return PAGE_SIZE - size;
1735}
1736static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
1737
1738
1739#else
1740
1741#define device_create_file(a,b)	(0)
1742#define device_remove_file(a,b)	do { } while (0)
1743
1744#endif
1745
1746/*-------------------------------------------------------------------------*/
1747
1748/* another driver-specific mode might be a request type doing dma
1749 * to/from another device fifo instead of to/from memory.
1750 */
1751
1752static void set_fifo_mode (struct net2280 *dev, int mode)
1753{
1754	/* keeping high bits preserves BAR2 */
1755	writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1756
1757	/* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1758	INIT_LIST_HEAD (&dev->gadget.ep_list);
1759	list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
1760	list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
1761	switch (mode) {
1762	case 0:
1763		list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1764		list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
1765		dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
1766		break;
1767	case 1:
1768		dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
1769		break;
1770	case 2:
1771		list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1772		dev->ep [1].fifo_size = 2048;
1773		dev->ep [2].fifo_size = 1024;
1774		break;
1775	}
1776	/* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1777	list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
1778	list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
1779}
1780
1781/* just declare this in any driver that really need it */
1782extern int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode);
1783
1784/**
1785 * net2280_set_fifo_mode - change allocation of fifo buffers
1786 * @gadget: access to the net2280 device that will be updated
1787 * @mode: 0 for default, four 1kB buffers (ep-a through ep-d);
1788 *	1 for two 2kB buffers (ep-a and ep-b only);
1789 *	2 for one 2kB buffer (ep-a) and two 1kB ones (ep-b, ep-c).
1790 *
1791 * returns zero on success, else negative errno.  when this succeeds,
1792 * the contents of gadget->ep_list may have changed.
1793 *
1794 * you may only call this function when endpoints a-d are all disabled.
1795 * use it whenever extra hardware buffering can help performance, such
1796 * as before enabling "high bandwidth" interrupt endpoints that use
1797 * maxpacket bigger than 512 (when double buffering would otherwise
1798 * be unavailable).
1799 */
1800int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
1801{
1802	int			i;
1803	struct net2280		*dev;
1804	int			status = 0;
1805	unsigned long		flags;
1806
1807	if (!gadget)
1808		return -ENODEV;
1809	dev = container_of (gadget, struct net2280, gadget);
1810
1811	spin_lock_irqsave (&dev->lock, flags);
1812
1813	for (i = 1; i <= 4; i++)
1814		if (dev->ep [i].desc) {
1815			status = -EINVAL;
1816			break;
1817		}
1818	if (mode < 0 || mode > 2)
1819		status = -EINVAL;
1820	if (status == 0)
1821		set_fifo_mode (dev, mode);
1822	spin_unlock_irqrestore (&dev->lock, flags);
1823
1824	if (status == 0) {
1825		if (mode == 1)
1826			DEBUG (dev, "fifo:  ep-a 2K, ep-b 2K\n");
1827		else if (mode == 2)
1828			DEBUG (dev, "fifo:  ep-a 2K, ep-b 1K, ep-c 1K\n");
1829		/* else all are 1K */
1830	}
1831	return status;
1832}
1833EXPORT_SYMBOL (net2280_set_fifo_mode);
1834
1835/*-------------------------------------------------------------------------*/
1836
1837/* keeping it simple:
1838 * - one bus driver, initted first;
1839 * - one function driver, initted second
1840 *
1841 * most of the work to support multiple net2280 controllers would
1842 * be to associate this gadget driver (yes?) with all of them, or
1843 * perhaps to bind specific drivers to specific devices.
1844 */
1845
1846static struct net2280	*the_controller;
1847
1848static void usb_reset (struct net2280 *dev)
1849{
1850	u32	tmp;
1851
1852	dev->gadget.speed = USB_SPEED_UNKNOWN;
1853	(void) readl (&dev->usb->usbctl);
1854
1855	net2280_led_init (dev);
1856
1857	/* disable automatic responses, and irqs */
1858	writel (0, &dev->usb->stdrsp);
1859	writel (0, &dev->regs->pciirqenb0);
1860	writel (0, &dev->regs->pciirqenb1);
1861
1862	/* clear old dma and irq state */
1863	for (tmp = 0; tmp < 4; tmp++) {
1864		struct net2280_ep	*ep = &dev->ep [tmp + 1];
1865
1866		if (ep->dma)
1867			abort_dma (ep);
1868	}
1869	writel (~0, &dev->regs->irqstat0),
1870	writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1871
1872	/* reset, and enable pci */
1873	tmp = readl (&dev->regs->devinit)
1874		| (1 << PCI_ENABLE)
1875		| (1 << FIFO_SOFT_RESET)
1876		| (1 << USB_SOFT_RESET)
1877		| (1 << M8051_RESET);
1878	writel (tmp, &dev->regs->devinit);
1879
1880	/* standard fifo and endpoint allocations */
1881	set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
1882}
1883
1884static void usb_reinit (struct net2280 *dev)
1885{
1886	u32	tmp;
1887	int	init_dma;
1888
1889	/* use_dma changes are ignored till next device re-init */
1890	init_dma = use_dma;
1891
1892	/* basic endpoint init */
1893	for (tmp = 0; tmp < 7; tmp++) {
1894		struct net2280_ep	*ep = &dev->ep [tmp];
1895
1896		ep->ep.name = ep_name [tmp];
1897		ep->dev = dev;
1898		ep->num = tmp;
1899
1900		if (tmp > 0 && tmp <= 4) {
1901			ep->fifo_size = 1024;
1902			if (init_dma)
1903				ep->dma = &dev->dma [tmp - 1];
1904		} else
1905			ep->fifo_size = 64;
1906		ep->regs = &dev->epregs [tmp];
1907		ep_reset (dev->regs, ep);
1908	}
1909	dev->ep [0].ep.maxpacket = 64;
1910	dev->ep [5].ep.maxpacket = 64;
1911	dev->ep [6].ep.maxpacket = 64;
1912
1913	dev->gadget.ep0 = &dev->ep [0].ep;
1914	dev->ep [0].stopped = 0;
1915	INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1916
1917	/* we want to prevent lowlevel/insecure access from the USB host,
1918	 * but erratum 0119 means this enable bit is ignored
1919	 */
1920	for (tmp = 0; tmp < 5; tmp++)
1921		writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
1922}
1923
1924static void ep0_start (struct net2280 *dev)
1925{
1926	writel (  (1 << CLEAR_EP_HIDE_STATUS_PHASE)
1927		| (1 << CLEAR_NAK_OUT_PACKETS)
1928		| (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
1929		, &dev->epregs [0].ep_rsp);
1930
1931	/*
1932	 * hardware optionally handles a bunch of standard requests
1933	 * that the API hides from drivers anyway.  have it do so.
1934	 * endpoint status/features are handled in software, to
1935	 * help pass tests for some dubious behavior.
1936	 */
1937	writel (  (1 << SET_TEST_MODE)
1938		| (1 << SET_ADDRESS)
1939		| (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
1940		| (1 << GET_DEVICE_STATUS)
1941		| (1 << GET_INTERFACE_STATUS)
1942		, &dev->usb->stdrsp);
1943	writel (  (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
1944		| (1 << SELF_POWERED_USB_DEVICE)
1945		| (1 << REMOTE_WAKEUP_SUPPORT)
1946		| (dev->softconnect << USB_DETECT_ENABLE)
1947		| (1 << SELF_POWERED_STATUS)
1948		, &dev->usb->usbctl);
1949
1950	/* enable irqs so we can see ep0 and general operation  */
1951	writel (  (1 << SETUP_PACKET_INTERRUPT_ENABLE)
1952		| (1 << ENDPOINT_0_INTERRUPT_ENABLE)
1953		, &dev->regs->pciirqenb0);
1954	writel (  (1 << PCI_INTERRUPT_ENABLE)
1955		| (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
1956		| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
1957		| (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
1958		| (1 << VBUS_INTERRUPT_ENABLE)
1959		| (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
1960		| (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
1961		, &dev->regs->pciirqenb1);
1962
1963	/* don't leave any writes posted */
1964	(void) readl (&dev->usb->usbctl);
1965}
1966
1967/* when a driver is successfully registered, it will receive
1968 * control requests including set_configuration(), which enables
1969 * non-control requests.  then usb traffic follows until a
1970 * disconnect is reported.  then a host may connect again, or
1971 * the driver might get unbound.
1972 */
1973int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1974{
1975	struct net2280		*dev = the_controller;
1976	int			retval;
1977	unsigned		i;
1978
1979	/* insist on high speed support from the driver, since
1980	 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
1981	 * "must not be used in normal operation"
1982	 */
1983	if (!driver
1984			|| driver->speed != USB_SPEED_HIGH
1985			|| !driver->bind
1986			|| !driver->setup)
1987		return -EINVAL;
1988	if (!dev)
1989		return -ENODEV;
1990	if (dev->driver)
1991		return -EBUSY;
1992
1993	for (i = 0; i < 7; i++)
1994		dev->ep [i].irqs = 0;
1995
1996	/* hook up the driver ... */
1997	dev->softconnect = 1;
1998	driver->driver.bus = NULL;
1999	dev->driver = driver;
2000	dev->gadget.dev.driver = &driver->driver;
2001	retval = driver->bind (&dev->gadget);
2002	if (retval) {
2003		DEBUG (dev, "bind to driver %s --> %d\n",
2004				driver->driver.name, retval);
2005		dev->driver = NULL;
2006		dev->gadget.dev.driver = NULL;
2007		return retval;
2008	}
2009
2010	retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
2011	if (retval) goto err_unbind;
2012	retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
2013	if (retval) goto err_func;
2014
2015	/* ... then enable host detection and ep0; and we're ready
2016	 * for set_configuration as well as eventual disconnect.
2017	 */
2018	net2280_led_active (dev, 1);
2019	ep0_start (dev);
2020
2021	DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
2022			driver->driver.name,
2023			readl (&dev->usb->usbctl),
2024			readl (&dev->usb->stdrsp));
2025
2026	/* pci writes may still be posted */
2027	return 0;
2028
2029err_func:
2030	device_remove_file (&dev->pdev->dev, &dev_attr_function);
2031err_unbind:
2032	driver->unbind (&dev->gadget);
2033	dev->gadget.dev.driver = NULL;
2034	dev->driver = NULL;
2035	return retval;
2036}
2037EXPORT_SYMBOL (usb_gadget_register_driver);
2038
2039static void
2040stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2041{
2042	int			i;
2043
2044	/* don't disconnect if it's not connected */
2045	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2046		driver = NULL;
2047
2048	/* stop hardware; prevent new request submissions;
2049	 * and kill any outstanding requests.
2050	 */
2051	usb_reset (dev);
2052	for (i = 0; i < 7; i++)
2053		nuke (&dev->ep [i]);
2054
2055	/* report disconnect; the driver is already quiesced */
2056	if (driver) {
2057		spin_unlock (&dev->lock);
2058		driver->disconnect (&dev->gadget);
2059		spin_lock (&dev->lock);
2060	}
2061
2062	usb_reinit (dev);
2063}
2064
2065int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2066{
2067	struct net2280	*dev = the_controller;
2068	unsigned long	flags;
2069
2070	if (!dev)
2071		return -ENODEV;
2072	if (!driver || driver != dev->driver || !driver->unbind)
2073		return -EINVAL;
2074
2075	spin_lock_irqsave (&dev->lock, flags);
2076	stop_activity (dev, driver);
2077	spin_unlock_irqrestore (&dev->lock, flags);
2078
2079	net2280_pullup (&dev->gadget, 0);
2080
2081	driver->unbind (&dev->gadget);
2082	dev->gadget.dev.driver = NULL;
2083	dev->driver = NULL;
2084
2085	net2280_led_active (dev, 0);
2086	device_remove_file (&dev->pdev->dev, &dev_attr_function);
2087	device_remove_file (&dev->pdev->dev, &dev_attr_queues);
2088
2089	DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
2090	return 0;
2091}
2092EXPORT_SYMBOL (usb_gadget_unregister_driver);
2093
2094
2095/*-------------------------------------------------------------------------*/
2096
2097/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2098 * also works for dma-capable endpoints, in pio mode or just
2099 * to manually advance the queue after short OUT transfers.
2100 */
2101static void handle_ep_small (struct net2280_ep *ep)
2102{
2103	struct net2280_request	*req;
2104	u32			t;
2105	/* 0 error, 1 mid-data, 2 done */
2106	int			mode = 1;
2107
2108	if (!list_empty (&ep->queue))
2109		req = list_entry (ep->queue.next,
2110			struct net2280_request, queue);
2111	else
2112		req = NULL;
2113
2114	/* ack all, and handle what we care about */
2115	t = readl (&ep->regs->ep_stat);
2116	ep->irqs++;
2117	if (!ep->is_in || ep->dev->pdev->device == 0x2280)
2118		writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
2119	else
2120		/* Added for 2282 */
2121		writel (t, &ep->regs->ep_stat);
2122
2123	/* for ep0, monitor token irqs to catch data stage length errors
2124	 * and to synchronize on status.
2125	 *
2126	 * also, to defer reporting of protocol stalls ... here's where
2127	 * data or status first appears, handling stalls here should never
2128	 * cause trouble on the host side..
2129	 *
2130	 * control requests could be slightly faster without token synch for
2131	 * status, but status can jam up that way.
2132	 */
2133	if (unlikely (ep->num == 0)) {
2134		if (ep->is_in) {
2135			/* status; stop NAKing */
2136			if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
2137				if (ep->dev->protocol_stall) {
2138					ep->stopped = 1;
2139					set_halt (ep);
2140				}
2141				if (!req)
2142					allow_status (ep);
2143				mode = 2;
2144			/* reply to extra IN data tokens with a zlp */
2145			} else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2146				if (ep->dev->protocol_stall) {
2147					ep->stopped = 1;
2148					set_halt (ep);
2149					mode = 2;
2150				} else if (ep->responded &&
2151						!req && !ep->stopped)
2152					write_fifo (ep, NULL);
2153			}
2154		} else {
2155			/* status; stop NAKing */
2156			if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2157				if (ep->dev->protocol_stall) {
2158					ep->stopped = 1;
2159					set_halt (ep);
2160				}
2161				mode = 2;
2162			/* an extra OUT token is an error */
2163			} else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
2164					&& req
2165					&& req->req.actual == req->req.length)
2166					|| (ep->responded && !req)) {
2167				ep->dev->protocol_stall = 1;
2168				set_halt (ep);
2169				ep->stopped = 1;
2170				if (req)
2171					done (ep, req, -EOVERFLOW);
2172				req = NULL;
2173			}
2174		}
2175	}
2176
2177	if (unlikely (!req))
2178		return;
2179
2180	/* manual DMA queue advance after short OUT */
2181	if (likely (ep->dma != 0)) {
2182		if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2183			u32	count;
2184			int	stopped = ep->stopped;
2185
2186			/* TRANSFERRED works around OUT_DONE erratum 0112.
2187			 * we expect (N <= maxpacket) bytes; host wrote M.
2188			 * iff (M < N) we won't ever see a DMA interrupt.
2189			 */
2190			ep->stopped = 1;
2191			for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
2192
2193				/* any preceding dma transfers must finish.
2194				 * dma handles (M >= N), may empty the queue
2195				 */
2196				scan_dma_completions (ep);
2197				if (unlikely (list_empty (&ep->queue)
2198						|| ep->out_overflow)) {
2199					req = NULL;
2200					break;
2201				}
2202				req = list_entry (ep->queue.next,
2203					struct net2280_request, queue);
2204
2205				/* here either (M < N), a "real" short rx;
2206				 * or (M == N) and the queue didn't empty
2207				 */
2208				if (likely (t & (1 << FIFO_EMPTY))) {
2209					count = readl (&ep->dma->dmacount);
2210					count &= DMA_BYTE_COUNT_MASK;
2211					if (readl (&ep->dma->dmadesc)
2212							!= req->td_dma)
2213						req = NULL;
2214					break;
2215				}
2216				udelay(1);
2217			}
2218
2219			/* stop DMA, leave ep NAKing */
2220			writel ((1 << DMA_ABORT), &ep->dma->dmastat);
2221			spin_stop_dma (ep->dma);
2222
2223			if (likely (req)) {
2224				req->td->dmacount = 0;
2225				t = readl (&ep->regs->ep_avail);
2226				dma_done (ep, req, count,
2227					(ep->out_overflow || t)
2228						? -EOVERFLOW : 0);
2229			}
2230
2231			/* also flush to prevent erratum 0106 trouble */
2232			if (unlikely (ep->out_overflow
2233					|| (ep->dev->chiprev == 0x0100
2234						&& ep->dev->gadget.speed
2235							== USB_SPEED_FULL))) {
2236				out_flush (ep);
2237				ep->out_overflow = 0;
2238			}
2239
2240			/* (re)start dma if needed, stop NAKing */
2241			ep->stopped = stopped;
2242			if (!list_empty (&ep->queue))
2243				restart_dma (ep);
2244		} else
2245			DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2246					ep->ep.name, t);
2247		return;
2248
2249	/* data packet(s) received (in the fifo, OUT) */
2250	} else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
2251		if (read_fifo (ep, req) && ep->num != 0)
2252			mode = 2;
2253
2254	/* data packet(s) transmitted (IN) */
2255	} else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2256		unsigned	len;
2257
2258		len = req->req.length - req->req.actual;
2259		if (len > ep->ep.maxpacket)
2260			len = ep->ep.maxpacket;
2261		req->req.actual += len;
2262
2263		/* if we wrote it all, we're usually done */
2264		if (req->req.actual == req->req.length) {
2265			if (ep->num == 0) {
2266				/* send zlps until the status stage */
2267			} else if (!req->req.zero || len != ep->ep.maxpacket)
2268				mode = 2;
2269		}
2270
2271	/* there was nothing to do ...  */
2272	} else if (mode == 1)
2273		return;
2274
2275	/* done */
2276	if (mode == 2) {
2277		/* stream endpoints often resubmit/unlink in completion */
2278		done (ep, req, 0);
2279
2280		/* maybe advance queue to next request */
2281		if (ep->num == 0) {
2282			/* NOTE:  net2280 could let gadget driver start the
2283			 * status stage later. since not all controllers let
2284			 * them control that, the api doesn't (yet) allow it.
2285			 */
2286			if (!ep->stopped)
2287				allow_status (ep);
2288			req = NULL;
2289		} else {
2290			if (!list_empty (&ep->queue) && !ep->stopped)
2291				req = list_entry (ep->queue.next,
2292					struct net2280_request, queue);
2293			else
2294				req = NULL;
2295			if (req && !ep->is_in)
2296				stop_out_naking (ep);
2297		}
2298	}
2299
2300	/* is there a buffer for the next packet?
2301	 * for best streaming performance, make sure there is one.
2302	 */
2303	if (req && !ep->stopped) {
2304
2305		/* load IN fifo with next packet (may be zlp) */
2306		if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
2307			write_fifo (ep, &req->req);
2308	}
2309}
2310
2311static struct net2280_ep *
2312get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2313{
2314	struct net2280_ep	*ep;
2315
2316	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2317		return &dev->ep [0];
2318	list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
2319		u8	bEndpointAddress;
2320
2321		if (!ep->desc)
2322			continue;
2323		bEndpointAddress = ep->desc->bEndpointAddress;
2324		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2325			continue;
2326		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2327			return ep;
2328	}
2329	return NULL;
2330}
2331
2332static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
2333{
2334	struct net2280_ep	*ep;
2335	u32			num, scratch;
2336
2337	/* most of these don't need individual acks */
2338	stat &= ~(1 << INTA_ASSERTED);
2339	if (!stat)
2340		return;
2341	// DEBUG (dev, "irqstat0 %04x\n", stat);
2342
2343	/* starting a control request? */
2344	if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
2345		union {
2346			u32			raw [2];
2347			struct usb_ctrlrequest	r;
2348		} u;
2349		int				tmp;
2350		struct net2280_request		*req;
2351
2352		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
2353			if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
2354				dev->gadget.speed = USB_SPEED_HIGH;
2355			else
2356				dev->gadget.speed = USB_SPEED_FULL;
2357			net2280_led_speed (dev, dev->gadget.speed);
2358			DEBUG (dev, "%s speed\n",
2359				(dev->gadget.speed == USB_SPEED_HIGH)
2360					? "high" : "full");
2361		}
2362
2363		ep = &dev->ep [0];
2364		ep->irqs++;
2365
2366		/* make sure any leftover request state is cleared */
2367		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
2368		while (!list_empty (&ep->queue)) {
2369			req = list_entry (ep->queue.next,
2370					struct net2280_request, queue);
2371			done (ep, req, (req->req.actual == req->req.length)
2372						? 0 : -EPROTO);
2373		}
2374		ep->stopped = 0;
2375		dev->protocol_stall = 0;
2376
2377		if (ep->dev->pdev->device == 0x2280)
2378			tmp = (1 << FIFO_OVERFLOW)
2379				| (1 << FIFO_UNDERFLOW);
2380		else
2381			tmp = 0;
2382
2383		writel (tmp | (1 << TIMEOUT)
2384			| (1 << USB_STALL_SENT)
2385			| (1 << USB_IN_NAK_SENT)
2386			| (1 << USB_IN_ACK_RCVD)
2387			| (1 << USB_OUT_PING_NAK_SENT)
2388			| (1 << USB_OUT_ACK_SENT)
2389			| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
2390			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
2391			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2392			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2393			| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2394			| (1 << DATA_IN_TOKEN_INTERRUPT)
2395			, &ep->regs->ep_stat);
2396		u.raw [0] = readl (&dev->usb->setup0123);
2397		u.raw [1] = readl (&dev->usb->setup4567);
2398
2399		cpu_to_le32s (&u.raw [0]);
2400		cpu_to_le32s (&u.raw [1]);
2401
2402		tmp = 0;
2403
2404#define	w_value		le16_to_cpu(u.r.wValue)
2405#define	w_index		le16_to_cpu(u.r.wIndex)
2406#define	w_length	le16_to_cpu(u.r.wLength)
2407
2408		/* ack the irq */
2409		writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
2410		stat ^= (1 << SETUP_PACKET_INTERRUPT);
2411
2412		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2413		if (ep->is_in) {
2414			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2415				| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2416				| (1 << DATA_IN_TOKEN_INTERRUPT);
2417			stop_out_naking (ep);
2418		} else
2419			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2420				| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2421				| (1 << DATA_IN_TOKEN_INTERRUPT);
2422		writel (scratch, &dev->epregs [0].ep_irqenb);
2423
2424		/* we made the hardware handle most lowlevel requests;
2425		 * everything else goes uplevel to the gadget code.
2426		 */
2427		ep->responded = 1;
2428		switch (u.r.bRequest) {
2429		case USB_REQ_GET_STATUS: {
2430			struct net2280_ep	*e;
2431			__le32			status;
2432
2433			/* hw handles device and interface status */
2434			if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2435				goto delegate;
2436			if ((e = get_ep_by_addr (dev, w_index)) == 0
2437					|| w_length > 2)
2438				goto do_stall;
2439
2440			if (readl (&e->regs->ep_rsp)
2441					& (1 << SET_ENDPOINT_HALT))
2442				status = __constant_cpu_to_le32 (1);
2443			else
2444				status = __constant_cpu_to_le32 (0);
2445
2446			/* don't bother with a request object! */
2447			writel (0, &dev->epregs [0].ep_irqenb);
2448			set_fifo_bytecount (ep, w_length);
2449			writel ((__force u32)status, &dev->epregs [0].ep_data);
2450			allow_status (ep);
2451			VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
2452			goto next_endpoints;
2453			}
2454			break;
2455		case USB_REQ_CLEAR_FEATURE: {
2456			struct net2280_ep	*e;
2457
2458			/* hw handles device features */
2459			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2460				goto delegate;
2461			if (w_value != USB_ENDPOINT_HALT
2462					|| w_length != 0)
2463				goto do_stall;
2464			if ((e = get_ep_by_addr (dev, w_index)) == 0)
2465				goto do_stall;
2466			clear_halt (e);
2467			allow_status (ep);
2468			VDEBUG (dev, "%s clear halt\n", ep->ep.name);
2469			goto next_endpoints;
2470			}
2471			break;
2472		case USB_REQ_SET_FEATURE: {
2473			struct net2280_ep	*e;
2474
2475			/* hw handles device features */
2476			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2477				goto delegate;
2478			if (w_value != USB_ENDPOINT_HALT
2479					|| w_length != 0)
2480				goto do_stall;
2481			if ((e = get_ep_by_addr (dev, w_index)) == 0)
2482				goto do_stall;
2483			set_halt (e);
2484			allow_status (ep);
2485			VDEBUG (dev, "%s set halt\n", ep->ep.name);
2486			goto next_endpoints;
2487			}
2488			break;
2489		default:
2490delegate:
2491			VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x"
2492				"ep_cfg %08x\n",
2493				u.r.bRequestType, u.r.bRequest,
2494				w_value, w_index, w_length,
2495				readl (&ep->regs->ep_cfg));
2496			ep->responded = 0;
2497			spin_unlock (&dev->lock);
2498			tmp = dev->driver->setup (&dev->gadget, &u.r);
2499			spin_lock (&dev->lock);
2500		}
2501
2502		/* stall ep0 on error */
2503		if (tmp < 0) {
2504do_stall:
2505			VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
2506					u.r.bRequestType, u.r.bRequest, tmp);
2507			dev->protocol_stall = 1;
2508		}
2509
2510		/* some in/out token irq should follow; maybe stall then.
2511		 * driver must queue a request (even zlp) or halt ep0
2512		 * before the host times out.
2513		 */
2514	}
2515
2516#undef	w_value
2517#undef	w_index
2518#undef	w_length
2519
2520next_endpoints:
2521	/* endpoint data irq ? */
2522	scratch = stat & 0x7f;
2523	stat &= ~0x7f;
2524	for (num = 0; scratch; num++) {
2525		u32		t;
2526
2527		/* do this endpoint's FIFO and queue need tending? */
2528		t = 1 << num;
2529		if ((scratch & t) == 0)
2530			continue;
2531		scratch ^= t;
2532
2533		ep = &dev->ep [num];
2534		handle_ep_small (ep);
2535	}
2536
2537	if (stat)
2538		DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
2539}
2540
2541#define DMA_INTERRUPTS ( \
2542		  (1 << DMA_D_INTERRUPT) \
2543		| (1 << DMA_C_INTERRUPT) \
2544		| (1 << DMA_B_INTERRUPT) \
2545		| (1 << DMA_A_INTERRUPT))
2546#define	PCI_ERROR_INTERRUPTS ( \
2547		  (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
2548		| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
2549		| (1 << PCI_RETRY_ABORT_INTERRUPT))
2550
2551static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
2552{
2553	struct net2280_ep	*ep;
2554	u32			tmp, num, mask, scratch;
2555
2556	/* after disconnect there's nothing else to do! */
2557	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2558	mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
2559
2560	/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
2561	 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRRUPT set and
2562	 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
2563	 * only indicates a change in the reset state).
2564	 */
2565	if (stat & tmp) {
2566		writel (tmp, &dev->regs->irqstat1);
2567		if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT))
2568					&& ((readl (&dev->usb->usbstat) & mask)
2569							== 0))
2570				|| ((readl (&dev->usb->usbctl)
2571					& (1 << VBUS_PIN)) == 0)
2572			    ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2573			DEBUG (dev, "disconnect %s\n",
2574					dev->driver->driver.name);
2575			stop_activity (dev, dev->driver);
2576			ep0_start (dev);
2577			return;
2578		}
2579		stat &= ~tmp;
2580
2581		/* vBUS can bounce ... one of many reasons to ignore the
2582		 * notion of hotplug events on bus connect/disconnect!
2583		 */
2584		if (!stat)
2585			return;
2586	}
2587
2588	/* NOTE: chip stays in PCI D0 state for now, but it could
2589	 * enter D1 to save more power
2590	 */
2591	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2592	if (stat & tmp) {
2593		writel (tmp, &dev->regs->irqstat1);
2594		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2595			if (dev->driver->suspend)
2596				dev->driver->suspend (&dev->gadget);
2597			if (!enable_suspend)
2598				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2599		} else {
2600			if (dev->driver->resume)
2601				dev->driver->resume (&dev->gadget);
2602			/* at high speed, note erratum 0133 */
2603		}
2604		stat &= ~tmp;
2605	}
2606
2607	/* clear any other status/irqs */
2608	if (stat)
2609		writel (stat, &dev->regs->irqstat1);
2610
2611	/* some status we can just ignore */
2612	if (dev->pdev->device == 0x2280)
2613		stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2614			  | (1 << SUSPEND_REQUEST_INTERRUPT)
2615			  | (1 << RESUME_INTERRUPT)
2616			  | (1 << SOF_INTERRUPT));
2617	else
2618		stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2619			  | (1 << RESUME_INTERRUPT)
2620			  | (1 << SOF_DOWN_INTERRUPT)
2621			  | (1 << SOF_INTERRUPT));
2622
2623	if (!stat)
2624		return;
2625	// DEBUG (dev, "irqstat1 %08x\n", stat);
2626
2627	/* DMA status, for ep-{a,b,c,d} */
2628	scratch = stat & DMA_INTERRUPTS;
2629	stat &= ~DMA_INTERRUPTS;
2630	scratch >>= 9;
2631	for (num = 0; scratch; num++) {
2632		struct net2280_dma_regs	__iomem *dma;
2633
2634		tmp = 1 << num;
2635		if ((tmp & scratch) == 0)
2636			continue;
2637		scratch ^= tmp;
2638
2639		ep = &dev->ep [num + 1];
2640		dma = ep->dma;
2641
2642		if (!dma)
2643			continue;
2644
2645		/* clear ep's dma status */
2646		tmp = readl (&dma->dmastat);
2647		writel (tmp, &dma->dmastat);
2648
2649		/* chaining should stop on abort, short OUT from fifo,
2650		 * or (stat0 codepath) short OUT transfer.
2651		 */
2652		if (!use_dma_chaining) {
2653			if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
2654					== 0) {
2655				DEBUG (ep->dev, "%s no xact done? %08x\n",
2656					ep->ep.name, tmp);
2657				continue;
2658			}
2659			stop_dma (ep->dma);
2660		}
2661
2662		/* OUT transfers terminate when the data from the
2663		 * host is in our memory.  Process whatever's done.
2664		 * On this path, we know transfer's last packet wasn't
2665		 * less than req->length. NAK_OUT_PACKETS may be set,
2666		 * or the FIFO may already be holding new packets.
2667		 *
2668		 * IN transfers can linger in the FIFO for a very
2669		 * long time ... we ignore that for now, accounting
2670		 * precisely (like PIO does) needs per-packet irqs
2671		 */
2672		scan_dma_completions (ep);
2673
2674		/* disable dma on inactive queues; else maybe restart */
2675		if (list_empty (&ep->queue)) {
2676			if (use_dma_chaining)
2677				stop_dma (ep->dma);
2678		} else {
2679			tmp = readl (&dma->dmactl);
2680			if (!use_dma_chaining
2681					|| (tmp & (1 << DMA_ENABLE)) == 0)
2682				restart_dma (ep);
2683			else if (ep->is_in && use_dma_chaining) {
2684				struct net2280_request	*req;
2685				__le32			dmacount;
2686
2687				/* the descriptor at the head of the chain
2688				 * may still have VALID_BIT clear; that's
2689				 * used to trigger changing DMA_FIFO_VALIDATE
2690				 * (affects automagic zlp writes).
2691				 */
2692				req = list_entry (ep->queue.next,
2693						struct net2280_request, queue);
2694				dmacount = req->td->dmacount;
2695				dmacount &= __constant_cpu_to_le32 (
2696						(1 << VALID_BIT)
2697						| DMA_BYTE_COUNT_MASK);
2698				if (dmacount && (dmacount & valid_bit) == 0)
2699					restart_dma (ep);
2700			}
2701		}
2702		ep->irqs++;
2703	}
2704
2705	/* NOTE:  there are other PCI errors we might usefully notice.
2706	 * if they appear very often, here's where to try recovering.
2707	 */
2708	if (stat & PCI_ERROR_INTERRUPTS) {
2709		ERROR (dev, "pci dma error; stat %08x\n", stat);
2710		stat &= ~PCI_ERROR_INTERRUPTS;
2711		/* these are fatal errors, but "maybe" they won't
2712		 * happen again ...
2713		 */
2714		stop_activity (dev, dev->driver);
2715		ep0_start (dev);
2716		stat = 0;
2717	}
2718
2719	if (stat)
2720		DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
2721}
2722
2723static irqreturn_t net2280_irq (int irq, void *_dev)
2724{
2725	struct net2280		*dev = _dev;
2726
2727	/* shared interrupt, not ours */
2728	if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))
2729		return IRQ_NONE;
2730
2731	spin_lock (&dev->lock);
2732
2733	/* handle disconnect, dma, and more */
2734	handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
2735
2736	/* control requests and PIO */
2737	handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
2738
2739	spin_unlock (&dev->lock);
2740
2741	return IRQ_HANDLED;
2742}
2743
2744/*-------------------------------------------------------------------------*/
2745
2746static void gadget_release (struct device *_dev)
2747{
2748	struct net2280	*dev = dev_get_drvdata (_dev);
2749
2750	kfree (dev);
2751}
2752
2753/* tear down the binding between this driver and the pci device */
2754
2755static void net2280_remove (struct pci_dev *pdev)
2756{
2757	struct net2280		*dev = pci_get_drvdata (pdev);
2758
2759	BUG_ON(dev->driver);
2760
2761	/* then clean up the resources we allocated during probe() */
2762	net2280_led_shutdown (dev);
2763	if (dev->requests) {
2764		int		i;
2765		for (i = 1; i < 5; i++) {
2766			if (!dev->ep [i].dummy)
2767				continue;
2768			pci_pool_free (dev->requests, dev->ep [i].dummy,
2769					dev->ep [i].td_dma);
2770		}
2771		pci_pool_destroy (dev->requests);
2772	}
2773	if (dev->got_irq)
2774		free_irq (pdev->irq, dev);
2775	if (dev->regs)
2776		iounmap (dev->regs);
2777	if (dev->region)
2778		release_mem_region (pci_resource_start (pdev, 0),
2779				pci_resource_len (pdev, 0));
2780	if (dev->enabled)
2781		pci_disable_device (pdev);
2782	device_unregister (&dev->gadget.dev);
2783	device_remove_file (&pdev->dev, &dev_attr_registers);
2784	pci_set_drvdata (pdev, NULL);
2785
2786	INFO (dev, "unbind\n");
2787
2788	the_controller = NULL;
2789}
2790
2791/* wrap this driver around the specified device, but
2792 * don't respond over USB until a gadget driver binds to us.
2793 */
2794
2795static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
2796{
2797	struct net2280		*dev;
2798	unsigned long		resource, len;
2799	void			__iomem *base = NULL;
2800	int			retval, i;
2801
2802	/* if you want to support more than one controller in a system,
2803	 * usb_gadget_driver_{register,unregister}() must change.
2804	 */
2805	if (the_controller) {
2806		dev_warn (&pdev->dev, "ignoring\n");
2807		return -EBUSY;
2808	}
2809
2810	/* alloc, and start init */
2811	dev = kzalloc (sizeof *dev, GFP_KERNEL);
2812	if (dev == NULL){
2813		retval = -ENOMEM;
2814		goto done;
2815	}
2816
2817	pci_set_drvdata (pdev, dev);
2818	spin_lock_init (&dev->lock);
2819	dev->pdev = pdev;
2820	dev->gadget.ops = &net2280_ops;
2821	dev->gadget.is_dualspeed = 1;
2822
2823	/* the "gadget" abstracts/virtualizes the controller */
2824	strcpy (dev->gadget.dev.bus_id, "gadget");
2825	dev->gadget.dev.parent = &pdev->dev;
2826	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2827	dev->gadget.dev.release = gadget_release;
2828	dev->gadget.name = driver_name;
2829
2830	/* now all the pci goodies ... */
2831	if (pci_enable_device (pdev) < 0) {
2832	        retval = -ENODEV;
2833		goto done;
2834	}
2835	dev->enabled = 1;
2836
2837	/* BAR 0 holds all the registers
2838	 * BAR 1 is 8051 memory; unused here (note erratum 0103)
2839	 * BAR 2 is fifo memory; unused here
2840	 */
2841	resource = pci_resource_start (pdev, 0);
2842	len = pci_resource_len (pdev, 0);
2843	if (!request_mem_region (resource, len, driver_name)) {
2844		DEBUG (dev, "controller already in use\n");
2845		retval = -EBUSY;
2846		goto done;
2847	}
2848	dev->region = 1;
2849
2850
2851	base = ioremap_nocache (resource, len);
2852	if (base == NULL) {
2853		DEBUG (dev, "can't map memory\n");
2854		retval = -EFAULT;
2855		goto done;
2856	}
2857	dev->regs = (struct net2280_regs __iomem *) base;
2858	dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
2859	dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
2860	dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
2861	dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
2862	dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
2863
2864	/* put into initial config, link up all endpoints */
2865	writel (0, &dev->usb->usbctl);
2866	usb_reset (dev);
2867	usb_reinit (dev);
2868
2869	/* irq setup after old hardware is cleaned up */
2870	if (!pdev->irq) {
2871		ERROR (dev, "No IRQ.  Check PCI setup!\n");
2872		retval = -ENODEV;
2873		goto done;
2874	}
2875
2876	if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev)
2877			!= 0) {
2878		ERROR (dev, "request interrupt %d failed\n", pdev->irq);
2879		retval = -EBUSY;
2880		goto done;
2881	}
2882	dev->got_irq = 1;
2883
2884	/* DMA setup */
2885	/* NOTE:  we know only the 32 LSBs of dma addresses may be nonzero */
2886	dev->requests = pci_pool_create ("requests", pdev,
2887		sizeof (struct net2280_dma),
2888		0 /* no alignment requirements */,
2889		0 /* or page-crossing issues */);
2890	if (!dev->requests) {
2891		DEBUG (dev, "can't get request pool\n");
2892		retval = -ENOMEM;
2893		goto done;
2894	}
2895	for (i = 1; i < 5; i++) {
2896		struct net2280_dma	*td;
2897
2898		td = pci_pool_alloc (dev->requests, GFP_KERNEL,
2899				&dev->ep [i].td_dma);
2900		if (!td) {
2901			DEBUG (dev, "can't get dummy %d\n", i);
2902			retval = -ENOMEM;
2903			goto done;
2904		}
2905		td->dmacount = 0;	/* not VALID */
2906		td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
2907		td->dmadesc = td->dmaaddr;
2908		dev->ep [i].dummy = td;
2909	}
2910
2911	/* enable lower-overhead pci memory bursts during DMA */
2912	writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
2913			// 256 write retries may not be enough...
2914			// | (1 << PCI_RETRY_ABORT_ENABLE)
2915			| (1 << DMA_READ_MULTIPLE_ENABLE)
2916			| (1 << DMA_READ_LINE_ENABLE)
2917			, &dev->pci->pcimstctl);
2918	/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
2919	pci_set_master (pdev);
2920	pci_set_mwi (pdev);
2921
2922	/* ... also flushes any posted pci writes */
2923	dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
2924
2925	/* done */
2926	INFO (dev, "%s\n", driver_desc);
2927	INFO (dev, "irq %d, pci mem %p, chip rev %04x\n",
2928			pdev->irq, base, dev->chiprev);
2929	INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
2930			use_dma
2931				? (use_dma_chaining ? "chaining" : "enabled")
2932				: "disabled");
2933	the_controller = dev;
2934
2935	retval = device_register (&dev->gadget.dev);
2936	if (retval) goto done;
2937	retval = device_create_file (&pdev->dev, &dev_attr_registers);
2938	if (retval) goto done;
2939
2940	return 0;
2941
2942done:
2943	if (dev)
2944		net2280_remove (pdev);
2945	return retval;
2946}
2947
2948/* make sure the board is quiescent; otherwise it will continue
2949 * generating IRQs across the upcoming reboot.
2950 */
2951
2952static void net2280_shutdown (struct pci_dev *pdev)
2953{
2954	struct net2280		*dev = pci_get_drvdata (pdev);
2955
2956	/* disable IRQs */
2957	writel (0, &dev->regs->pciirqenb0);
2958	writel (0, &dev->regs->pciirqenb1);
2959
2960	/* disable the pullup so the host will think we're gone */
2961	writel (0, &dev->usb->usbctl);
2962}
2963
2964
2965/*-------------------------------------------------------------------------*/
2966
2967static const struct pci_device_id pci_ids [] = { {
2968	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2969	.class_mask =	~0,
2970	.vendor =	0x17cc,
2971	.device =	0x2280,
2972	.subvendor =	PCI_ANY_ID,
2973	.subdevice =	PCI_ANY_ID,
2974}, {
2975	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2976	.class_mask =	~0,
2977	.vendor =	0x17cc,
2978	.device =	0x2282,
2979	.subvendor =	PCI_ANY_ID,
2980	.subdevice =	PCI_ANY_ID,
2981
2982}, { /* end: all zeroes */ }
2983};
2984MODULE_DEVICE_TABLE (pci, pci_ids);
2985
2986/* pci driver glue; this is a "new style" PCI driver module */
2987static struct pci_driver net2280_pci_driver = {
2988	.name =		(char *) driver_name,
2989	.id_table =	pci_ids,
2990
2991	.probe =	net2280_probe,
2992	.remove =	net2280_remove,
2993	.shutdown =	net2280_shutdown,
2994
2995};
2996
2997MODULE_DESCRIPTION (DRIVER_DESC);
2998MODULE_AUTHOR ("David Brownell");
2999MODULE_LICENSE ("GPL");
3000
3001static int __init init (void)
3002{
3003	if (!use_dma)
3004		use_dma_chaining = 0;
3005	return pci_register_driver (&net2280_pci_driver);
3006}
3007module_init (init);
3008
3009static void __exit cleanup (void)
3010{
3011	pci_unregister_driver (&net2280_pci_driver);
3012}
3013module_exit (cleanup);
3014