• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/usb/gadget/
1/*
2 * Driver for the Atmel USBA high speed USB device controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/slab.h>
16#include <linux/device.h>
17#include <linux/dma-mapping.h>
18#include <linux/list.h>
19#include <linux/platform_device.h>
20#include <linux/usb/ch9.h>
21#include <linux/usb/gadget.h>
22#include <linux/usb/atmel_usba_udc.h>
23#include <linux/delay.h>
24
25#include <asm/gpio.h>
26#include <mach/board.h>
27
28#include "atmel_usba_udc.h"
29
30
31static struct usba_udc the_udc;
32static struct usba_ep *usba_ep;
33
34#ifdef CONFIG_USB_GADGET_DEBUG_FS
35#include <linux/debugfs.h>
36#include <linux/uaccess.h>
37
38static int queue_dbg_open(struct inode *inode, struct file *file)
39{
40	struct usba_ep *ep = inode->i_private;
41	struct usba_request *req, *req_copy;
42	struct list_head *queue_data;
43
44	queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
45	if (!queue_data)
46		return -ENOMEM;
47	INIT_LIST_HEAD(queue_data);
48
49	spin_lock_irq(&ep->udc->lock);
50	list_for_each_entry(req, &ep->queue, queue) {
51		req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
52		if (!req_copy)
53			goto fail;
54		list_add_tail(&req_copy->queue, queue_data);
55	}
56	spin_unlock_irq(&ep->udc->lock);
57
58	file->private_data = queue_data;
59	return 0;
60
61fail:
62	spin_unlock_irq(&ep->udc->lock);
63	list_for_each_entry_safe(req, req_copy, queue_data, queue) {
64		list_del(&req->queue);
65		kfree(req);
66	}
67	kfree(queue_data);
68	return -ENOMEM;
69}
70
71/*
72 * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
73 *
74 * b: buffer address
75 * l: buffer length
76 * I/i: interrupt/no interrupt
77 * Z/z: zero/no zero
78 * S/s: short ok/short not ok
79 * s: status
80 * n: nr_packets
81 * F/f: submitted/not submitted to FIFO
82 * D/d: using/not using DMA
83 * L/l: last transaction/not last transaction
84 */
85static ssize_t queue_dbg_read(struct file *file, char __user *buf,
86		size_t nbytes, loff_t *ppos)
87{
88	struct list_head *queue = file->private_data;
89	struct usba_request *req, *tmp_req;
90	size_t len, remaining, actual = 0;
91	char tmpbuf[38];
92
93	if (!access_ok(VERIFY_WRITE, buf, nbytes))
94		return -EFAULT;
95
96	mutex_lock(&file->f_dentry->d_inode->i_mutex);
97	list_for_each_entry_safe(req, tmp_req, queue, queue) {
98		len = snprintf(tmpbuf, sizeof(tmpbuf),
99				"%8p %08x %c%c%c %5d %c%c%c\n",
100				req->req.buf, req->req.length,
101				req->req.no_interrupt ? 'i' : 'I',
102				req->req.zero ? 'Z' : 'z',
103				req->req.short_not_ok ? 's' : 'S',
104				req->req.status,
105				req->submitted ? 'F' : 'f',
106				req->using_dma ? 'D' : 'd',
107				req->last_transaction ? 'L' : 'l');
108		len = min(len, sizeof(tmpbuf));
109		if (len > nbytes)
110			break;
111
112		list_del(&req->queue);
113		kfree(req);
114
115		remaining = __copy_to_user(buf, tmpbuf, len);
116		actual += len - remaining;
117		if (remaining)
118			break;
119
120		nbytes -= len;
121		buf += len;
122	}
123	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
124
125	return actual;
126}
127
128static int queue_dbg_release(struct inode *inode, struct file *file)
129{
130	struct list_head *queue_data = file->private_data;
131	struct usba_request *req, *tmp_req;
132
133	list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
134		list_del(&req->queue);
135		kfree(req);
136	}
137	kfree(queue_data);
138	return 0;
139}
140
141static int regs_dbg_open(struct inode *inode, struct file *file)
142{
143	struct usba_udc *udc;
144	unsigned int i;
145	u32 *data;
146	int ret = -ENOMEM;
147
148	mutex_lock(&inode->i_mutex);
149	udc = inode->i_private;
150	data = kmalloc(inode->i_size, GFP_KERNEL);
151	if (!data)
152		goto out;
153
154	spin_lock_irq(&udc->lock);
155	for (i = 0; i < inode->i_size / 4; i++)
156		data[i] = __raw_readl(udc->regs + i * 4);
157	spin_unlock_irq(&udc->lock);
158
159	file->private_data = data;
160	ret = 0;
161
162out:
163	mutex_unlock(&inode->i_mutex);
164
165	return ret;
166}
167
168static ssize_t regs_dbg_read(struct file *file, char __user *buf,
169		size_t nbytes, loff_t *ppos)
170{
171	struct inode *inode = file->f_dentry->d_inode;
172	int ret;
173
174	mutex_lock(&inode->i_mutex);
175	ret = simple_read_from_buffer(buf, nbytes, ppos,
176			file->private_data,
177			file->f_dentry->d_inode->i_size);
178	mutex_unlock(&inode->i_mutex);
179
180	return ret;
181}
182
183static int regs_dbg_release(struct inode *inode, struct file *file)
184{
185	kfree(file->private_data);
186	return 0;
187}
188
189const struct file_operations queue_dbg_fops = {
190	.owner		= THIS_MODULE,
191	.open		= queue_dbg_open,
192	.llseek		= no_llseek,
193	.read		= queue_dbg_read,
194	.release	= queue_dbg_release,
195};
196
197const struct file_operations regs_dbg_fops = {
198	.owner		= THIS_MODULE,
199	.open		= regs_dbg_open,
200	.llseek		= generic_file_llseek,
201	.read		= regs_dbg_read,
202	.release	= regs_dbg_release,
203};
204
205static void usba_ep_init_debugfs(struct usba_udc *udc,
206		struct usba_ep *ep)
207{
208	struct dentry *ep_root;
209
210	ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
211	if (!ep_root)
212		goto err_root;
213	ep->debugfs_dir = ep_root;
214
215	ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
216						ep, &queue_dbg_fops);
217	if (!ep->debugfs_queue)
218		goto err_queue;
219
220	if (ep->can_dma) {
221		ep->debugfs_dma_status
222			= debugfs_create_u32("dma_status", 0400, ep_root,
223					&ep->last_dma_status);
224		if (!ep->debugfs_dma_status)
225			goto err_dma_status;
226	}
227	if (ep_is_control(ep)) {
228		ep->debugfs_state
229			= debugfs_create_u32("state", 0400, ep_root,
230					&ep->state);
231		if (!ep->debugfs_state)
232			goto err_state;
233	}
234
235	return;
236
237err_state:
238	if (ep->can_dma)
239		debugfs_remove(ep->debugfs_dma_status);
240err_dma_status:
241	debugfs_remove(ep->debugfs_queue);
242err_queue:
243	debugfs_remove(ep_root);
244err_root:
245	dev_err(&ep->udc->pdev->dev,
246		"failed to create debugfs directory for %s\n", ep->ep.name);
247}
248
249static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
250{
251	debugfs_remove(ep->debugfs_queue);
252	debugfs_remove(ep->debugfs_dma_status);
253	debugfs_remove(ep->debugfs_state);
254	debugfs_remove(ep->debugfs_dir);
255	ep->debugfs_dma_status = NULL;
256	ep->debugfs_dir = NULL;
257}
258
259static void usba_init_debugfs(struct usba_udc *udc)
260{
261	struct dentry *root, *regs;
262	struct resource *regs_resource;
263
264	root = debugfs_create_dir(udc->gadget.name, NULL);
265	if (IS_ERR(root) || !root)
266		goto err_root;
267	udc->debugfs_root = root;
268
269	regs = debugfs_create_file("regs", 0400, root, udc, &regs_dbg_fops);
270	if (!regs)
271		goto err_regs;
272
273	regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
274				CTRL_IOMEM_ID);
275	regs->d_inode->i_size = regs_resource->end - regs_resource->start + 1;
276	udc->debugfs_regs = regs;
277
278	usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
279
280	return;
281
282err_regs:
283	debugfs_remove(root);
284err_root:
285	udc->debugfs_root = NULL;
286	dev_err(&udc->pdev->dev, "debugfs is not available\n");
287}
288
289static void usba_cleanup_debugfs(struct usba_udc *udc)
290{
291	usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
292	debugfs_remove(udc->debugfs_regs);
293	debugfs_remove(udc->debugfs_root);
294	udc->debugfs_regs = NULL;
295	udc->debugfs_root = NULL;
296}
297#else
298static inline void usba_ep_init_debugfs(struct usba_udc *udc,
299					 struct usba_ep *ep)
300{
301
302}
303
304static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
305{
306
307}
308
309static inline void usba_init_debugfs(struct usba_udc *udc)
310{
311
312}
313
314static inline void usba_cleanup_debugfs(struct usba_udc *udc)
315{
316
317}
318#endif
319
320static int vbus_is_present(struct usba_udc *udc)
321{
322	if (gpio_is_valid(udc->vbus_pin))
323		return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
324
325	/* No Vbus detection: Assume always present */
326	return 1;
327}
328
329#if defined(CONFIG_ARCH_AT91SAM9RL)
330
331#include <mach/at91_pmc.h>
332
333static void toggle_bias(int is_on)
334{
335	unsigned int uckr = at91_sys_read(AT91_CKGR_UCKR);
336
337	if (is_on)
338		at91_sys_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
339	else
340		at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
341}
342
343#else
344
345static void toggle_bias(int is_on)
346{
347}
348
349#endif /* CONFIG_ARCH_AT91SAM9RL */
350
351static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
352{
353	unsigned int transaction_len;
354
355	transaction_len = req->req.length - req->req.actual;
356	req->last_transaction = 1;
357	if (transaction_len > ep->ep.maxpacket) {
358		transaction_len = ep->ep.maxpacket;
359		req->last_transaction = 0;
360	} else if (transaction_len == ep->ep.maxpacket && req->req.zero)
361		req->last_transaction = 0;
362
363	DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
364		ep->ep.name, req, transaction_len,
365		req->last_transaction ? ", done" : "");
366
367	memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
368	usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
369	req->req.actual += transaction_len;
370}
371
372static void submit_request(struct usba_ep *ep, struct usba_request *req)
373{
374	DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
375		ep->ep.name, req, req->req.length);
376
377	req->req.actual = 0;
378	req->submitted = 1;
379
380	if (req->using_dma) {
381		if (req->req.length == 0) {
382			usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
383			return;
384		}
385
386		if (req->req.zero)
387			usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
388		else
389			usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
390
391		usba_dma_writel(ep, ADDRESS, req->req.dma);
392		usba_dma_writel(ep, CONTROL, req->ctrl);
393	} else {
394		next_fifo_transaction(ep, req);
395		if (req->last_transaction) {
396			usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
397			usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
398		} else {
399			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
400			usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
401		}
402	}
403}
404
405static void submit_next_request(struct usba_ep *ep)
406{
407	struct usba_request *req;
408
409	if (list_empty(&ep->queue)) {
410		usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
411		return;
412	}
413
414	req = list_entry(ep->queue.next, struct usba_request, queue);
415	if (!req->submitted)
416		submit_request(ep, req);
417}
418
419static void send_status(struct usba_udc *udc, struct usba_ep *ep)
420{
421	ep->state = STATUS_STAGE_IN;
422	usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
423	usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
424}
425
426static void receive_data(struct usba_ep *ep)
427{
428	struct usba_udc *udc = ep->udc;
429	struct usba_request *req;
430	unsigned long status;
431	unsigned int bytecount, nr_busy;
432	int is_complete = 0;
433
434	status = usba_ep_readl(ep, STA);
435	nr_busy = USBA_BFEXT(BUSY_BANKS, status);
436
437	DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
438
439	while (nr_busy > 0) {
440		if (list_empty(&ep->queue)) {
441			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
442			break;
443		}
444		req = list_entry(ep->queue.next,
445				 struct usba_request, queue);
446
447		bytecount = USBA_BFEXT(BYTE_COUNT, status);
448
449		if (status & (1 << 31))
450			is_complete = 1;
451		if (req->req.actual + bytecount >= req->req.length) {
452			is_complete = 1;
453			bytecount = req->req.length - req->req.actual;
454		}
455
456		memcpy_fromio(req->req.buf + req->req.actual,
457				ep->fifo, bytecount);
458		req->req.actual += bytecount;
459
460		usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
461
462		if (is_complete) {
463			DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
464			req->req.status = 0;
465			list_del_init(&req->queue);
466			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
467			spin_unlock(&udc->lock);
468			req->req.complete(&ep->ep, &req->req);
469			spin_lock(&udc->lock);
470		}
471
472		status = usba_ep_readl(ep, STA);
473		nr_busy = USBA_BFEXT(BUSY_BANKS, status);
474
475		if (is_complete && ep_is_control(ep)) {
476			send_status(udc, ep);
477			break;
478		}
479	}
480}
481
482static void
483request_complete(struct usba_ep *ep, struct usba_request *req, int status)
484{
485	struct usba_udc *udc = ep->udc;
486
487	WARN_ON(!list_empty(&req->queue));
488
489	if (req->req.status == -EINPROGRESS)
490		req->req.status = status;
491
492	if (req->mapped) {
493		dma_unmap_single(
494			&udc->pdev->dev, req->req.dma, req->req.length,
495			ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
496		req->req.dma = DMA_ADDR_INVALID;
497		req->mapped = 0;
498	}
499
500	DBG(DBG_GADGET | DBG_REQ,
501		"%s: req %p complete: status %d, actual %u\n",
502		ep->ep.name, req, req->req.status, req->req.actual);
503
504	spin_unlock(&udc->lock);
505	req->req.complete(&ep->ep, &req->req);
506	spin_lock(&udc->lock);
507}
508
509static void
510request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
511{
512	struct usba_request *req, *tmp_req;
513
514	list_for_each_entry_safe(req, tmp_req, list, queue) {
515		list_del_init(&req->queue);
516		request_complete(ep, req, status);
517	}
518}
519
520static int
521usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
522{
523	struct usba_ep *ep = to_usba_ep(_ep);
524	struct usba_udc *udc = ep->udc;
525	unsigned long flags, ept_cfg, maxpacket;
526	unsigned int nr_trans;
527
528	DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
529
530	maxpacket = le16_to_cpu(desc->wMaxPacketSize) & 0x7ff;
531
532	if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
533			|| ep->index == 0
534			|| desc->bDescriptorType != USB_DT_ENDPOINT
535			|| maxpacket == 0
536			|| maxpacket > ep->fifo_size) {
537		DBG(DBG_ERR, "ep_enable: Invalid argument");
538		return -EINVAL;
539	}
540
541	ep->is_isoc = 0;
542	ep->is_in = 0;
543
544	if (maxpacket <= 8)
545		ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
546	else
547		/* LSB is bit 1, not 0 */
548		ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
549
550	DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
551			ep->ep.name, ept_cfg, maxpacket);
552
553	if (usb_endpoint_dir_in(desc)) {
554		ep->is_in = 1;
555		ept_cfg |= USBA_EPT_DIR_IN;
556	}
557
558	switch (usb_endpoint_type(desc)) {
559	case USB_ENDPOINT_XFER_CONTROL:
560		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
561		ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
562		break;
563	case USB_ENDPOINT_XFER_ISOC:
564		if (!ep->can_isoc) {
565			DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
566					ep->ep.name);
567			return -EINVAL;
568		}
569
570		/*
571		 * Bits 11:12 specify number of _additional_
572		 * transactions per microframe.
573		 */
574		nr_trans = ((le16_to_cpu(desc->wMaxPacketSize) >> 11) & 3) + 1;
575		if (nr_trans > 3)
576			return -EINVAL;
577
578		ep->is_isoc = 1;
579		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
580
581		/*
582		 * Do triple-buffering on high-bandwidth iso endpoints.
583		 */
584		if (nr_trans > 1 && ep->nr_banks == 3)
585			ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
586		else
587			ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
588		ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
589		break;
590	case USB_ENDPOINT_XFER_BULK:
591		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
592		ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
593		break;
594	case USB_ENDPOINT_XFER_INT:
595		ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
596		ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
597		break;
598	}
599
600	spin_lock_irqsave(&ep->udc->lock, flags);
601
602	if (ep->desc) {
603		spin_unlock_irqrestore(&ep->udc->lock, flags);
604		DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
605		return -EBUSY;
606	}
607
608	ep->desc = desc;
609	ep->ep.maxpacket = maxpacket;
610
611	usba_ep_writel(ep, CFG, ept_cfg);
612	usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
613
614	if (ep->can_dma) {
615		u32 ctrl;
616
617		usba_writel(udc, INT_ENB,
618				(usba_readl(udc, INT_ENB)
619					| USBA_BF(EPT_INT, 1 << ep->index)
620					| USBA_BF(DMA_INT, 1 << ep->index)));
621		ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
622		usba_ep_writel(ep, CTL_ENB, ctrl);
623	} else {
624		usba_writel(udc, INT_ENB,
625				(usba_readl(udc, INT_ENB)
626					| USBA_BF(EPT_INT, 1 << ep->index)));
627	}
628
629	spin_unlock_irqrestore(&udc->lock, flags);
630
631	DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
632			(unsigned long)usba_ep_readl(ep, CFG));
633	DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
634			(unsigned long)usba_readl(udc, INT_ENB));
635
636	return 0;
637}
638
639static int usba_ep_disable(struct usb_ep *_ep)
640{
641	struct usba_ep *ep = to_usba_ep(_ep);
642	struct usba_udc *udc = ep->udc;
643	LIST_HEAD(req_list);
644	unsigned long flags;
645
646	DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
647
648	spin_lock_irqsave(&udc->lock, flags);
649
650	if (!ep->desc) {
651		spin_unlock_irqrestore(&udc->lock, flags);
652		/* REVISIT because this driver disables endpoints in
653		 * reset_all_endpoints() before calling disconnect(),
654		 * most gadget drivers would trigger this non-error ...
655		 */
656		if (udc->gadget.speed != USB_SPEED_UNKNOWN)
657			DBG(DBG_ERR, "ep_disable: %s not enabled\n",
658					ep->ep.name);
659		return -EINVAL;
660	}
661	ep->desc = NULL;
662
663	list_splice_init(&ep->queue, &req_list);
664	if (ep->can_dma) {
665		usba_dma_writel(ep, CONTROL, 0);
666		usba_dma_writel(ep, ADDRESS, 0);
667		usba_dma_readl(ep, STATUS);
668	}
669	usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
670	usba_writel(udc, INT_ENB,
671			usba_readl(udc, INT_ENB)
672			& ~USBA_BF(EPT_INT, 1 << ep->index));
673
674	request_complete_list(ep, &req_list, -ESHUTDOWN);
675
676	spin_unlock_irqrestore(&udc->lock, flags);
677
678	return 0;
679}
680
681static struct usb_request *
682usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
683{
684	struct usba_request *req;
685
686	DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
687
688	req = kzalloc(sizeof(*req), gfp_flags);
689	if (!req)
690		return NULL;
691
692	INIT_LIST_HEAD(&req->queue);
693	req->req.dma = DMA_ADDR_INVALID;
694
695	return &req->req;
696}
697
698static void
699usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
700{
701	struct usba_request *req = to_usba_req(_req);
702
703	DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
704
705	kfree(req);
706}
707
708static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
709		struct usba_request *req, gfp_t gfp_flags)
710{
711	unsigned long flags;
712	int ret;
713
714	DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n",
715		ep->ep.name, req->req.length, req->req.dma,
716		req->req.zero ? 'Z' : 'z',
717		req->req.short_not_ok ? 'S' : 's',
718		req->req.no_interrupt ? 'I' : 'i');
719
720	if (req->req.length > 0x10000) {
721		/* Lengths from 0 to 65536 (inclusive) are supported */
722		DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
723		return -EINVAL;
724	}
725
726	req->using_dma = 1;
727
728	if (req->req.dma == DMA_ADDR_INVALID) {
729		req->req.dma = dma_map_single(
730			&udc->pdev->dev, req->req.buf, req->req.length,
731			ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
732		req->mapped = 1;
733	} else {
734		dma_sync_single_for_device(
735			&udc->pdev->dev, req->req.dma, req->req.length,
736			ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
737		req->mapped = 0;
738	}
739
740	req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
741			| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
742			| USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
743
744	if (ep->is_in)
745		req->ctrl |= USBA_DMA_END_BUF_EN;
746
747	/*
748	 * Add this request to the queue and submit for DMA if
749	 * possible. Check if we're still alive first -- we may have
750	 * received a reset since last time we checked.
751	 */
752	ret = -ESHUTDOWN;
753	spin_lock_irqsave(&udc->lock, flags);
754	if (ep->desc) {
755		if (list_empty(&ep->queue))
756			submit_request(ep, req);
757
758		list_add_tail(&req->queue, &ep->queue);
759		ret = 0;
760	}
761	spin_unlock_irqrestore(&udc->lock, flags);
762
763	return ret;
764}
765
766static int
767usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
768{
769	struct usba_request *req = to_usba_req(_req);
770	struct usba_ep *ep = to_usba_ep(_ep);
771	struct usba_udc *udc = ep->udc;
772	unsigned long flags;
773	int ret;
774
775	DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
776			ep->ep.name, req, _req->length);
777
778	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || !ep->desc)
779		return -ESHUTDOWN;
780
781	req->submitted = 0;
782	req->using_dma = 0;
783	req->last_transaction = 0;
784
785	_req->status = -EINPROGRESS;
786	_req->actual = 0;
787
788	if (ep->can_dma)
789		return queue_dma(udc, ep, req, gfp_flags);
790
791	/* May have received a reset since last time we checked */
792	ret = -ESHUTDOWN;
793	spin_lock_irqsave(&udc->lock, flags);
794	if (ep->desc) {
795		list_add_tail(&req->queue, &ep->queue);
796
797		if ((!ep_is_control(ep) && ep->is_in) ||
798			(ep_is_control(ep)
799				&& (ep->state == DATA_STAGE_IN
800					|| ep->state == STATUS_STAGE_IN)))
801			usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
802		else
803			usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
804		ret = 0;
805	}
806	spin_unlock_irqrestore(&udc->lock, flags);
807
808	return ret;
809}
810
811static void
812usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
813{
814	req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
815}
816
817static int stop_dma(struct usba_ep *ep, u32 *pstatus)
818{
819	unsigned int timeout;
820	u32 status;
821
822	/*
823	 * Stop the DMA controller. When writing both CH_EN
824	 * and LINK to 0, the other bits are not affected.
825	 */
826	usba_dma_writel(ep, CONTROL, 0);
827
828	/* Wait for the FIFO to empty */
829	for (timeout = 40; timeout; --timeout) {
830		status = usba_dma_readl(ep, STATUS);
831		if (!(status & USBA_DMA_CH_EN))
832			break;
833		udelay(1);
834	}
835
836	if (pstatus)
837		*pstatus = status;
838
839	if (timeout == 0) {
840		dev_err(&ep->udc->pdev->dev,
841			"%s: timed out waiting for DMA FIFO to empty\n",
842			ep->ep.name);
843		return -ETIMEDOUT;
844	}
845
846	return 0;
847}
848
849static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
850{
851	struct usba_ep *ep = to_usba_ep(_ep);
852	struct usba_udc *udc = ep->udc;
853	struct usba_request *req = to_usba_req(_req);
854	unsigned long flags;
855	u32 status;
856
857	DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
858			ep->ep.name, req);
859
860	spin_lock_irqsave(&udc->lock, flags);
861
862	if (req->using_dma) {
863		/*
864		 * If this request is currently being transferred,
865		 * stop the DMA controller and reset the FIFO.
866		 */
867		if (ep->queue.next == &req->queue) {
868			status = usba_dma_readl(ep, STATUS);
869			if (status & USBA_DMA_CH_EN)
870				stop_dma(ep, &status);
871
872#ifdef CONFIG_USB_GADGET_DEBUG_FS
873			ep->last_dma_status = status;
874#endif
875
876			usba_writel(udc, EPT_RST, 1 << ep->index);
877
878			usba_update_req(ep, req, status);
879		}
880	}
881
882	/*
883	 * Errors should stop the queue from advancing until the
884	 * completion function returns.
885	 */
886	list_del_init(&req->queue);
887
888	request_complete(ep, req, -ECONNRESET);
889
890	/* Process the next request if any */
891	submit_next_request(ep);
892	spin_unlock_irqrestore(&udc->lock, flags);
893
894	return 0;
895}
896
897static int usba_ep_set_halt(struct usb_ep *_ep, int value)
898{
899	struct usba_ep *ep = to_usba_ep(_ep);
900	struct usba_udc *udc = ep->udc;
901	unsigned long flags;
902	int ret = 0;
903
904	DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
905			value ? "set" : "clear");
906
907	if (!ep->desc) {
908		DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
909				ep->ep.name);
910		return -ENODEV;
911	}
912	if (ep->is_isoc) {
913		DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
914				ep->ep.name);
915		return -ENOTTY;
916	}
917
918	spin_lock_irqsave(&udc->lock, flags);
919
920	/*
921	 * We can't halt IN endpoints while there are still data to be
922	 * transferred
923	 */
924	if (!list_empty(&ep->queue)
925			|| ((value && ep->is_in && (usba_ep_readl(ep, STA)
926					& USBA_BF(BUSY_BANKS, -1L))))) {
927		ret = -EAGAIN;
928	} else {
929		if (value)
930			usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
931		else
932			usba_ep_writel(ep, CLR_STA,
933					USBA_FORCE_STALL | USBA_TOGGLE_CLR);
934		usba_ep_readl(ep, STA);
935	}
936
937	spin_unlock_irqrestore(&udc->lock, flags);
938
939	return ret;
940}
941
942static int usba_ep_fifo_status(struct usb_ep *_ep)
943{
944	struct usba_ep *ep = to_usba_ep(_ep);
945
946	return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
947}
948
949static void usba_ep_fifo_flush(struct usb_ep *_ep)
950{
951	struct usba_ep *ep = to_usba_ep(_ep);
952	struct usba_udc *udc = ep->udc;
953
954	usba_writel(udc, EPT_RST, 1 << ep->index);
955}
956
957static const struct usb_ep_ops usba_ep_ops = {
958	.enable		= usba_ep_enable,
959	.disable	= usba_ep_disable,
960	.alloc_request	= usba_ep_alloc_request,
961	.free_request	= usba_ep_free_request,
962	.queue		= usba_ep_queue,
963	.dequeue	= usba_ep_dequeue,
964	.set_halt	= usba_ep_set_halt,
965	.fifo_status	= usba_ep_fifo_status,
966	.fifo_flush	= usba_ep_fifo_flush,
967};
968
969static int usba_udc_get_frame(struct usb_gadget *gadget)
970{
971	struct usba_udc *udc = to_usba_udc(gadget);
972
973	return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
974}
975
976static int usba_udc_wakeup(struct usb_gadget *gadget)
977{
978	struct usba_udc *udc = to_usba_udc(gadget);
979	unsigned long flags;
980	u32 ctrl;
981	int ret = -EINVAL;
982
983	spin_lock_irqsave(&udc->lock, flags);
984	if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
985		ctrl = usba_readl(udc, CTRL);
986		usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
987		ret = 0;
988	}
989	spin_unlock_irqrestore(&udc->lock, flags);
990
991	return ret;
992}
993
994static int
995usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
996{
997	struct usba_udc *udc = to_usba_udc(gadget);
998	unsigned long flags;
999
1000	spin_lock_irqsave(&udc->lock, flags);
1001	if (is_selfpowered)
1002		udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
1003	else
1004		udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
1005	spin_unlock_irqrestore(&udc->lock, flags);
1006
1007	return 0;
1008}
1009
1010static const struct usb_gadget_ops usba_udc_ops = {
1011	.get_frame		= usba_udc_get_frame,
1012	.wakeup			= usba_udc_wakeup,
1013	.set_selfpowered	= usba_udc_set_selfpowered,
1014};
1015
1016static struct usb_endpoint_descriptor usba_ep0_desc = {
1017	.bLength = USB_DT_ENDPOINT_SIZE,
1018	.bDescriptorType = USB_DT_ENDPOINT,
1019	.bEndpointAddress = 0,
1020	.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1021	.wMaxPacketSize = cpu_to_le16(64),
1022	.bInterval = 1,
1023};
1024
1025static void nop_release(struct device *dev)
1026{
1027
1028}
1029
1030static struct usba_udc the_udc = {
1031	.gadget	= {
1032		.ops		= &usba_udc_ops,
1033		.ep_list	= LIST_HEAD_INIT(the_udc.gadget.ep_list),
1034		.is_dualspeed	= 1,
1035		.name		= "atmel_usba_udc",
1036		.dev	= {
1037			.init_name	= "gadget",
1038			.release	= nop_release,
1039		},
1040	},
1041};
1042
1043/*
1044 * Called with interrupts disabled and udc->lock held.
1045 */
1046static void reset_all_endpoints(struct usba_udc *udc)
1047{
1048	struct usba_ep *ep;
1049	struct usba_request *req, *tmp_req;
1050
1051	usba_writel(udc, EPT_RST, ~0UL);
1052
1053	ep = to_usba_ep(udc->gadget.ep0);
1054	list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
1055		list_del_init(&req->queue);
1056		request_complete(ep, req, -ECONNRESET);
1057	}
1058
1059	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1060		if (ep->desc) {
1061			spin_unlock(&udc->lock);
1062			usba_ep_disable(&ep->ep);
1063			spin_lock(&udc->lock);
1064		}
1065	}
1066}
1067
1068static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
1069{
1070	struct usba_ep *ep;
1071
1072	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1073		return to_usba_ep(udc->gadget.ep0);
1074
1075	list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
1076		u8 bEndpointAddress;
1077
1078		if (!ep->desc)
1079			continue;
1080		bEndpointAddress = ep->desc->bEndpointAddress;
1081		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1082			continue;
1083		if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
1084				== (wIndex & USB_ENDPOINT_NUMBER_MASK))
1085			return ep;
1086	}
1087
1088	return NULL;
1089}
1090
1091/* Called with interrupts disabled and udc->lock held */
1092static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
1093{
1094	usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
1095	ep->state = WAIT_FOR_SETUP;
1096}
1097
1098static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
1099{
1100	if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
1101		return 1;
1102	return 0;
1103}
1104
1105static inline void set_address(struct usba_udc *udc, unsigned int addr)
1106{
1107	u32 regval;
1108
1109	DBG(DBG_BUS, "setting address %u...\n", addr);
1110	regval = usba_readl(udc, CTRL);
1111	regval = USBA_BFINS(DEV_ADDR, addr, regval);
1112	usba_writel(udc, CTRL, regval);
1113}
1114
1115static int do_test_mode(struct usba_udc *udc)
1116{
1117	static const char test_packet_buffer[] = {
1118		/* JKJKJKJK * 9 */
1119		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1120		/* JJKKJJKK * 8 */
1121		0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1122		/* JJKKJJKK * 8 */
1123		0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1124		/* JJJJJJJKKKKKKK * 8 */
1125		0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1126		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1127		/* JJJJJJJK * 8 */
1128		0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1129		/* {JKKKKKKK * 10}, JK */
1130		0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
1131	};
1132	struct usba_ep *ep;
1133	struct device *dev = &udc->pdev->dev;
1134	int test_mode;
1135
1136	test_mode = udc->test_mode;
1137
1138	/* Start from a clean slate */
1139	reset_all_endpoints(udc);
1140
1141	switch (test_mode) {
1142	case 0x0100:
1143		/* Test_J */
1144		usba_writel(udc, TST, USBA_TST_J_MODE);
1145		dev_info(dev, "Entering Test_J mode...\n");
1146		break;
1147	case 0x0200:
1148		/* Test_K */
1149		usba_writel(udc, TST, USBA_TST_K_MODE);
1150		dev_info(dev, "Entering Test_K mode...\n");
1151		break;
1152	case 0x0300:
1153		/*
1154		 * Test_SE0_NAK: Force high-speed mode and set up ep0
1155		 * for Bulk IN transfers
1156		 */
1157		ep = &usba_ep[0];
1158		usba_writel(udc, TST,
1159				USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
1160		usba_ep_writel(ep, CFG,
1161				USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1162				| USBA_EPT_DIR_IN
1163				| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1164				| USBA_BF(BK_NUMBER, 1));
1165		if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1166			set_protocol_stall(udc, ep);
1167			dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
1168		} else {
1169			usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1170			dev_info(dev, "Entering Test_SE0_NAK mode...\n");
1171		}
1172		break;
1173	case 0x0400:
1174		/* Test_Packet */
1175		ep = &usba_ep[0];
1176		usba_ep_writel(ep, CFG,
1177				USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1178				| USBA_EPT_DIR_IN
1179				| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1180				| USBA_BF(BK_NUMBER, 1));
1181		if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1182			set_protocol_stall(udc, ep);
1183			dev_err(dev, "Test_Packet: ep0 not mapped\n");
1184		} else {
1185			usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1186			usba_writel(udc, TST, USBA_TST_PKT_MODE);
1187			memcpy_toio(ep->fifo, test_packet_buffer,
1188					sizeof(test_packet_buffer));
1189			usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1190			dev_info(dev, "Entering Test_Packet mode...\n");
1191		}
1192		break;
1193	default:
1194		dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
1195		return -EINVAL;
1196	}
1197
1198	return 0;
1199}
1200
1201/* Avoid overly long expressions */
1202static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
1203{
1204	if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
1205		return true;
1206	return false;
1207}
1208
1209static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
1210{
1211	if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
1212		return true;
1213	return false;
1214}
1215
1216static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
1217{
1218	if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
1219		return true;
1220	return false;
1221}
1222
1223static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
1224		struct usb_ctrlrequest *crq)
1225{
1226	int retval = 0;
1227
1228	switch (crq->bRequest) {
1229	case USB_REQ_GET_STATUS: {
1230		u16 status;
1231
1232		if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
1233			status = cpu_to_le16(udc->devstatus);
1234		} else if (crq->bRequestType
1235				== (USB_DIR_IN | USB_RECIP_INTERFACE)) {
1236			status = cpu_to_le16(0);
1237		} else if (crq->bRequestType
1238				== (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
1239			struct usba_ep *target;
1240
1241			target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1242			if (!target)
1243				goto stall;
1244
1245			status = 0;
1246			if (is_stalled(udc, target))
1247				status |= cpu_to_le16(1);
1248		} else
1249			goto delegate;
1250
1251		/* Write directly to the FIFO. No queueing is done. */
1252		if (crq->wLength != cpu_to_le16(sizeof(status)))
1253			goto stall;
1254		ep->state = DATA_STAGE_IN;
1255		__raw_writew(status, ep->fifo);
1256		usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1257		break;
1258	}
1259
1260	case USB_REQ_CLEAR_FEATURE: {
1261		if (crq->bRequestType == USB_RECIP_DEVICE) {
1262			if (feature_is_dev_remote_wakeup(crq))
1263				udc->devstatus
1264					&= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
1265			else
1266				/* Can't CLEAR_FEATURE TEST_MODE */
1267				goto stall;
1268		} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1269			struct usba_ep *target;
1270
1271			if (crq->wLength != cpu_to_le16(0)
1272					|| !feature_is_ep_halt(crq))
1273				goto stall;
1274			target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1275			if (!target)
1276				goto stall;
1277
1278			usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
1279			if (target->index != 0)
1280				usba_ep_writel(target, CLR_STA,
1281						USBA_TOGGLE_CLR);
1282		} else {
1283			goto delegate;
1284		}
1285
1286		send_status(udc, ep);
1287		break;
1288	}
1289
1290	case USB_REQ_SET_FEATURE: {
1291		if (crq->bRequestType == USB_RECIP_DEVICE) {
1292			if (feature_is_dev_test_mode(crq)) {
1293				send_status(udc, ep);
1294				ep->state = STATUS_STAGE_TEST;
1295				udc->test_mode = le16_to_cpu(crq->wIndex);
1296				return 0;
1297			} else if (feature_is_dev_remote_wakeup(crq)) {
1298				udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
1299			} else {
1300				goto stall;
1301			}
1302		} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1303			struct usba_ep *target;
1304
1305			if (crq->wLength != cpu_to_le16(0)
1306					|| !feature_is_ep_halt(crq))
1307				goto stall;
1308
1309			target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1310			if (!target)
1311				goto stall;
1312
1313			usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
1314		} else
1315			goto delegate;
1316
1317		send_status(udc, ep);
1318		break;
1319	}
1320
1321	case USB_REQ_SET_ADDRESS:
1322		if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
1323			goto delegate;
1324
1325		set_address(udc, le16_to_cpu(crq->wValue));
1326		send_status(udc, ep);
1327		ep->state = STATUS_STAGE_ADDR;
1328		break;
1329
1330	default:
1331delegate:
1332		spin_unlock(&udc->lock);
1333		retval = udc->driver->setup(&udc->gadget, crq);
1334		spin_lock(&udc->lock);
1335	}
1336
1337	return retval;
1338
1339stall:
1340	pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1341		"halting endpoint...\n",
1342		ep->ep.name, crq->bRequestType, crq->bRequest,
1343		le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
1344		le16_to_cpu(crq->wLength));
1345	set_protocol_stall(udc, ep);
1346	return -1;
1347}
1348
1349static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
1350{
1351	struct usba_request *req;
1352	u32 epstatus;
1353	u32 epctrl;
1354
1355restart:
1356	epstatus = usba_ep_readl(ep, STA);
1357	epctrl = usba_ep_readl(ep, CTL);
1358
1359	DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
1360			ep->ep.name, ep->state, epstatus, epctrl);
1361
1362	req = NULL;
1363	if (!list_empty(&ep->queue))
1364		req = list_entry(ep->queue.next,
1365				 struct usba_request, queue);
1366
1367	if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1368		if (req->submitted)
1369			next_fifo_transaction(ep, req);
1370		else
1371			submit_request(ep, req);
1372
1373		if (req->last_transaction) {
1374			usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1375			usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
1376		}
1377		goto restart;
1378	}
1379	if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
1380		usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
1381
1382		switch (ep->state) {
1383		case DATA_STAGE_IN:
1384			usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
1385			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1386			ep->state = STATUS_STAGE_OUT;
1387			break;
1388		case STATUS_STAGE_ADDR:
1389			/* Activate our new address */
1390			usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
1391						| USBA_FADDR_EN));
1392			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1393			ep->state = WAIT_FOR_SETUP;
1394			break;
1395		case STATUS_STAGE_IN:
1396			if (req) {
1397				list_del_init(&req->queue);
1398				request_complete(ep, req, 0);
1399				submit_next_request(ep);
1400			}
1401			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1402			ep->state = WAIT_FOR_SETUP;
1403			break;
1404		case STATUS_STAGE_TEST:
1405			usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1406			ep->state = WAIT_FOR_SETUP;
1407			if (do_test_mode(udc))
1408				set_protocol_stall(udc, ep);
1409			break;
1410		default:
1411			pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
1412				"halting endpoint...\n",
1413				ep->ep.name, ep->state);
1414			set_protocol_stall(udc, ep);
1415			break;
1416		}
1417
1418		goto restart;
1419	}
1420	if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1421		switch (ep->state) {
1422		case STATUS_STAGE_OUT:
1423			usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1424			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1425
1426			if (req) {
1427				list_del_init(&req->queue);
1428				request_complete(ep, req, 0);
1429			}
1430			ep->state = WAIT_FOR_SETUP;
1431			break;
1432
1433		case DATA_STAGE_OUT:
1434			receive_data(ep);
1435			break;
1436
1437		default:
1438			usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1439			usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1440			pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
1441				"halting endpoint...\n",
1442				ep->ep.name, ep->state);
1443			set_protocol_stall(udc, ep);
1444			break;
1445		}
1446
1447		goto restart;
1448	}
1449	if (epstatus & USBA_RX_SETUP) {
1450		union {
1451			struct usb_ctrlrequest crq;
1452			unsigned long data[2];
1453		} crq;
1454		unsigned int pkt_len;
1455		int ret;
1456
1457		if (ep->state != WAIT_FOR_SETUP) {
1458			/*
1459			 * Didn't expect a SETUP packet at this
1460			 * point. Clean up any pending requests (which
1461			 * may be successful).
1462			 */
1463			int status = -EPROTO;
1464
1465			/*
1466			 * RXRDY and TXCOMP are dropped when SETUP
1467			 * packets arrive.  Just pretend we received
1468			 * the status packet.
1469			 */
1470			if (ep->state == STATUS_STAGE_OUT
1471					|| ep->state == STATUS_STAGE_IN) {
1472				usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1473				status = 0;
1474			}
1475
1476			if (req) {
1477				list_del_init(&req->queue);
1478				request_complete(ep, req, status);
1479			}
1480		}
1481
1482		pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
1483		DBG(DBG_HW, "Packet length: %u\n", pkt_len);
1484		if (pkt_len != sizeof(crq)) {
1485			pr_warning("udc: Invalid packet length %u "
1486				"(expected %zu)\n", pkt_len, sizeof(crq));
1487			set_protocol_stall(udc, ep);
1488			return;
1489		}
1490
1491		DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
1492		memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
1493
1494		/* Free up one bank in the FIFO so that we can
1495		 * generate or receive a reply right away. */
1496		usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
1497
1498		/* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
1499			ep->state, crq.crq.bRequestType,
1500			crq.crq.bRequest); */
1501
1502		if (crq.crq.bRequestType & USB_DIR_IN) {
1503			/*
1504			 * The USB 2.0 spec states that "if wLength is
1505			 * zero, there is no data transfer phase."
1506			 * However, testusb #14 seems to actually
1507			 * expect a data phase even if wLength = 0...
1508			 */
1509			ep->state = DATA_STAGE_IN;
1510		} else {
1511			if (crq.crq.wLength != cpu_to_le16(0))
1512				ep->state = DATA_STAGE_OUT;
1513			else
1514				ep->state = STATUS_STAGE_IN;
1515		}
1516
1517		ret = -1;
1518		if (ep->index == 0)
1519			ret = handle_ep0_setup(udc, ep, &crq.crq);
1520		else {
1521			spin_unlock(&udc->lock);
1522			ret = udc->driver->setup(&udc->gadget, &crq.crq);
1523			spin_lock(&udc->lock);
1524		}
1525
1526		DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
1527			crq.crq.bRequestType, crq.crq.bRequest,
1528			le16_to_cpu(crq.crq.wLength), ep->state, ret);
1529
1530		if (ret < 0) {
1531			/* Let the host know that we failed */
1532			set_protocol_stall(udc, ep);
1533		}
1534	}
1535}
1536
1537static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
1538{
1539	struct usba_request *req;
1540	u32 epstatus;
1541	u32 epctrl;
1542
1543	epstatus = usba_ep_readl(ep, STA);
1544	epctrl = usba_ep_readl(ep, CTL);
1545
1546	DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
1547
1548	while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1549		DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
1550
1551		if (list_empty(&ep->queue)) {
1552			dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
1553			usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1554			return;
1555		}
1556
1557		req = list_entry(ep->queue.next, struct usba_request, queue);
1558
1559		if (req->using_dma) {
1560			/* Send a zero-length packet */
1561			usba_ep_writel(ep, SET_STA,
1562					USBA_TX_PK_RDY);
1563			usba_ep_writel(ep, CTL_DIS,
1564					USBA_TX_PK_RDY);
1565			list_del_init(&req->queue);
1566			submit_next_request(ep);
1567			request_complete(ep, req, 0);
1568		} else {
1569			if (req->submitted)
1570				next_fifo_transaction(ep, req);
1571			else
1572				submit_request(ep, req);
1573
1574			if (req->last_transaction) {
1575				list_del_init(&req->queue);
1576				submit_next_request(ep);
1577				request_complete(ep, req, 0);
1578			}
1579		}
1580
1581		epstatus = usba_ep_readl(ep, STA);
1582		epctrl = usba_ep_readl(ep, CTL);
1583	}
1584	if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1585		DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
1586		receive_data(ep);
1587		usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1588	}
1589}
1590
1591static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
1592{
1593	struct usba_request *req;
1594	u32 status, control, pending;
1595
1596	status = usba_dma_readl(ep, STATUS);
1597	control = usba_dma_readl(ep, CONTROL);
1598#ifdef CONFIG_USB_GADGET_DEBUG_FS
1599	ep->last_dma_status = status;
1600#endif
1601	pending = status & control;
1602	DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
1603
1604	if (status & USBA_DMA_CH_EN) {
1605		dev_err(&udc->pdev->dev,
1606			"DMA_CH_EN is set after transfer is finished!\n");
1607		dev_err(&udc->pdev->dev,
1608			"status=%#08x, pending=%#08x, control=%#08x\n",
1609			status, pending, control);
1610
1611		/*
1612		 * try to pretend nothing happened. We might have to
1613		 * do something here...
1614		 */
1615	}
1616
1617	if (list_empty(&ep->queue))
1618		/* Might happen if a reset comes along at the right moment */
1619		return;
1620
1621	if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
1622		req = list_entry(ep->queue.next, struct usba_request, queue);
1623		usba_update_req(ep, req, status);
1624
1625		list_del_init(&req->queue);
1626		submit_next_request(ep);
1627		request_complete(ep, req, 0);
1628	}
1629}
1630
1631static irqreturn_t usba_udc_irq(int irq, void *devid)
1632{
1633	struct usba_udc *udc = devid;
1634	u32 status;
1635	u32 dma_status;
1636	u32 ep_status;
1637
1638	spin_lock(&udc->lock);
1639
1640	status = usba_readl(udc, INT_STA);
1641	DBG(DBG_INT, "irq, status=%#08x\n", status);
1642
1643	if (status & USBA_DET_SUSPEND) {
1644		toggle_bias(0);
1645		usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
1646		DBG(DBG_BUS, "Suspend detected\n");
1647		if (udc->gadget.speed != USB_SPEED_UNKNOWN
1648				&& udc->driver && udc->driver->suspend) {
1649			spin_unlock(&udc->lock);
1650			udc->driver->suspend(&udc->gadget);
1651			spin_lock(&udc->lock);
1652		}
1653	}
1654
1655	if (status & USBA_WAKE_UP) {
1656		toggle_bias(1);
1657		usba_writel(udc, INT_CLR, USBA_WAKE_UP);
1658		DBG(DBG_BUS, "Wake Up CPU detected\n");
1659	}
1660
1661	if (status & USBA_END_OF_RESUME) {
1662		usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
1663		DBG(DBG_BUS, "Resume detected\n");
1664		if (udc->gadget.speed != USB_SPEED_UNKNOWN
1665				&& udc->driver && udc->driver->resume) {
1666			spin_unlock(&udc->lock);
1667			udc->driver->resume(&udc->gadget);
1668			spin_lock(&udc->lock);
1669		}
1670	}
1671
1672	dma_status = USBA_BFEXT(DMA_INT, status);
1673	if (dma_status) {
1674		int i;
1675
1676		for (i = 1; i < USBA_NR_ENDPOINTS; i++)
1677			if (dma_status & (1 << i))
1678				usba_dma_irq(udc, &usba_ep[i]);
1679	}
1680
1681	ep_status = USBA_BFEXT(EPT_INT, status);
1682	if (ep_status) {
1683		int i;
1684
1685		for (i = 0; i < USBA_NR_ENDPOINTS; i++)
1686			if (ep_status & (1 << i)) {
1687				if (ep_is_control(&usba_ep[i]))
1688					usba_control_irq(udc, &usba_ep[i]);
1689				else
1690					usba_ep_irq(udc, &usba_ep[i]);
1691			}
1692	}
1693
1694	if (status & USBA_END_OF_RESET) {
1695		struct usba_ep *ep0;
1696
1697		usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
1698		reset_all_endpoints(udc);
1699
1700		if (udc->gadget.speed != USB_SPEED_UNKNOWN
1701				&& udc->driver->disconnect) {
1702			udc->gadget.speed = USB_SPEED_UNKNOWN;
1703			spin_unlock(&udc->lock);
1704			udc->driver->disconnect(&udc->gadget);
1705			spin_lock(&udc->lock);
1706		}
1707
1708		if (status & USBA_HIGH_SPEED) {
1709			DBG(DBG_BUS, "High-speed bus reset detected\n");
1710			udc->gadget.speed = USB_SPEED_HIGH;
1711		} else {
1712			DBG(DBG_BUS, "Full-speed bus reset detected\n");
1713			udc->gadget.speed = USB_SPEED_FULL;
1714		}
1715
1716		ep0 = &usba_ep[0];
1717		ep0->desc = &usba_ep0_desc;
1718		ep0->state = WAIT_FOR_SETUP;
1719		usba_ep_writel(ep0, CFG,
1720				(USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
1721				| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
1722				| USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
1723		usba_ep_writel(ep0, CTL_ENB,
1724				USBA_EPT_ENABLE | USBA_RX_SETUP);
1725		usba_writel(udc, INT_ENB,
1726				(usba_readl(udc, INT_ENB)
1727				| USBA_BF(EPT_INT, 1)
1728				| USBA_DET_SUSPEND
1729				| USBA_END_OF_RESUME));
1730
1731		/*
1732		 * Unclear why we hit this irregularly, e.g. in usbtest,
1733		 * but it's clearly harmless...
1734		 */
1735		if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
1736			dev_dbg(&udc->pdev->dev,
1737				 "ODD: EP0 configuration is invalid!\n");
1738	}
1739
1740	spin_unlock(&udc->lock);
1741
1742	return IRQ_HANDLED;
1743}
1744
1745static irqreturn_t usba_vbus_irq(int irq, void *devid)
1746{
1747	struct usba_udc *udc = devid;
1748	int vbus;
1749
1750	/* debounce */
1751	udelay(10);
1752
1753	spin_lock(&udc->lock);
1754
1755	/* May happen if Vbus pin toggles during probe() */
1756	if (!udc->driver)
1757		goto out;
1758
1759	vbus = vbus_is_present(udc);
1760	if (vbus != udc->vbus_prev) {
1761		if (vbus) {
1762			toggle_bias(1);
1763			usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1764			usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1765		} else {
1766			udc->gadget.speed = USB_SPEED_UNKNOWN;
1767			reset_all_endpoints(udc);
1768			toggle_bias(0);
1769			usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1770			if (udc->driver->disconnect) {
1771				spin_unlock(&udc->lock);
1772				udc->driver->disconnect(&udc->gadget);
1773				spin_lock(&udc->lock);
1774			}
1775		}
1776		udc->vbus_prev = vbus;
1777	}
1778
1779out:
1780	spin_unlock(&udc->lock);
1781
1782	return IRQ_HANDLED;
1783}
1784
1785int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1786{
1787	struct usba_udc *udc = &the_udc;
1788	unsigned long flags;
1789	int ret;
1790
1791	if (!udc->pdev)
1792		return -ENODEV;
1793
1794	spin_lock_irqsave(&udc->lock, flags);
1795	if (udc->driver) {
1796		spin_unlock_irqrestore(&udc->lock, flags);
1797		return -EBUSY;
1798	}
1799
1800	udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
1801	udc->driver = driver;
1802	udc->gadget.dev.driver = &driver->driver;
1803	spin_unlock_irqrestore(&udc->lock, flags);
1804
1805	clk_enable(udc->pclk);
1806	clk_enable(udc->hclk);
1807
1808	ret = driver->bind(&udc->gadget);
1809	if (ret) {
1810		DBG(DBG_ERR, "Could not bind to driver %s: error %d\n",
1811			driver->driver.name, ret);
1812		goto err_driver_bind;
1813	}
1814
1815	DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
1816
1817	udc->vbus_prev = 0;
1818	if (gpio_is_valid(udc->vbus_pin))
1819		enable_irq(gpio_to_irq(udc->vbus_pin));
1820
1821	/* If Vbus is present, enable the controller and wait for reset */
1822	spin_lock_irqsave(&udc->lock, flags);
1823	if (vbus_is_present(udc) && udc->vbus_prev == 0) {
1824		toggle_bias(1);
1825		usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1826		usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1827	}
1828	spin_unlock_irqrestore(&udc->lock, flags);
1829
1830	return 0;
1831
1832err_driver_bind:
1833	udc->driver = NULL;
1834	udc->gadget.dev.driver = NULL;
1835	return ret;
1836}
1837EXPORT_SYMBOL(usb_gadget_register_driver);
1838
1839int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1840{
1841	struct usba_udc *udc = &the_udc;
1842	unsigned long flags;
1843
1844	if (!udc->pdev)
1845		return -ENODEV;
1846	if (driver != udc->driver || !driver->unbind)
1847		return -EINVAL;
1848
1849	if (gpio_is_valid(udc->vbus_pin))
1850		disable_irq(gpio_to_irq(udc->vbus_pin));
1851
1852	spin_lock_irqsave(&udc->lock, flags);
1853	udc->gadget.speed = USB_SPEED_UNKNOWN;
1854	reset_all_endpoints(udc);
1855	spin_unlock_irqrestore(&udc->lock, flags);
1856
1857	/* This will also disable the DP pullup */
1858	toggle_bias(0);
1859	usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1860
1861	if (udc->driver->disconnect)
1862		udc->driver->disconnect(&udc->gadget);
1863
1864	driver->unbind(&udc->gadget);
1865	udc->gadget.dev.driver = NULL;
1866	udc->driver = NULL;
1867
1868	clk_disable(udc->hclk);
1869	clk_disable(udc->pclk);
1870
1871	DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
1872
1873	return 0;
1874}
1875EXPORT_SYMBOL(usb_gadget_unregister_driver);
1876
1877static int __init usba_udc_probe(struct platform_device *pdev)
1878{
1879	struct usba_platform_data *pdata = pdev->dev.platform_data;
1880	struct resource *regs, *fifo;
1881	struct clk *pclk, *hclk;
1882	struct usba_udc *udc = &the_udc;
1883	int irq, ret, i;
1884
1885	regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
1886	fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
1887	if (!regs || !fifo || !pdata)
1888		return -ENXIO;
1889
1890	irq = platform_get_irq(pdev, 0);
1891	if (irq < 0)
1892		return irq;
1893
1894	pclk = clk_get(&pdev->dev, "pclk");
1895	if (IS_ERR(pclk))
1896		return PTR_ERR(pclk);
1897	hclk = clk_get(&pdev->dev, "hclk");
1898	if (IS_ERR(hclk)) {
1899		ret = PTR_ERR(hclk);
1900		goto err_get_hclk;
1901	}
1902
1903	spin_lock_init(&udc->lock);
1904	udc->pdev = pdev;
1905	udc->pclk = pclk;
1906	udc->hclk = hclk;
1907	udc->vbus_pin = -ENODEV;
1908
1909	ret = -ENOMEM;
1910	udc->regs = ioremap(regs->start, resource_size(regs));
1911	if (!udc->regs) {
1912		dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
1913		goto err_map_regs;
1914	}
1915	dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
1916		 (unsigned long)regs->start, udc->regs);
1917	udc->fifo = ioremap(fifo->start, resource_size(fifo));
1918	if (!udc->fifo) {
1919		dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
1920		goto err_map_fifo;
1921	}
1922	dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
1923		 (unsigned long)fifo->start, udc->fifo);
1924
1925	device_initialize(&udc->gadget.dev);
1926	udc->gadget.dev.parent = &pdev->dev;
1927	udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
1928
1929	platform_set_drvdata(pdev, udc);
1930
1931	/* Make sure we start from a clean slate */
1932	clk_enable(pclk);
1933	toggle_bias(0);
1934	usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1935	clk_disable(pclk);
1936
1937	usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
1938			  GFP_KERNEL);
1939	if (!usba_ep)
1940		goto err_alloc_ep;
1941
1942	the_udc.gadget.ep0 = &usba_ep[0].ep;
1943
1944	INIT_LIST_HEAD(&usba_ep[0].ep.ep_list);
1945	usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0);
1946	usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0);
1947	usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0);
1948	usba_ep[0].ep.ops = &usba_ep_ops;
1949	usba_ep[0].ep.name = pdata->ep[0].name;
1950	usba_ep[0].ep.maxpacket = pdata->ep[0].fifo_size;
1951	usba_ep[0].udc = &the_udc;
1952	INIT_LIST_HEAD(&usba_ep[0].queue);
1953	usba_ep[0].fifo_size = pdata->ep[0].fifo_size;
1954	usba_ep[0].nr_banks = pdata->ep[0].nr_banks;
1955	usba_ep[0].index = pdata->ep[0].index;
1956	usba_ep[0].can_dma = pdata->ep[0].can_dma;
1957	usba_ep[0].can_isoc = pdata->ep[0].can_isoc;
1958
1959	for (i = 1; i < pdata->num_ep; i++) {
1960		struct usba_ep *ep = &usba_ep[i];
1961
1962		ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1963		ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1964		ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1965		ep->ep.ops = &usba_ep_ops;
1966		ep->ep.name = pdata->ep[i].name;
1967		ep->ep.maxpacket = pdata->ep[i].fifo_size;
1968		ep->udc = &the_udc;
1969		INIT_LIST_HEAD(&ep->queue);
1970		ep->fifo_size = pdata->ep[i].fifo_size;
1971		ep->nr_banks = pdata->ep[i].nr_banks;
1972		ep->index = pdata->ep[i].index;
1973		ep->can_dma = pdata->ep[i].can_dma;
1974		ep->can_isoc = pdata->ep[i].can_isoc;
1975
1976		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1977	}
1978
1979	ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
1980	if (ret) {
1981		dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
1982			irq, ret);
1983		goto err_request_irq;
1984	}
1985	udc->irq = irq;
1986
1987	ret = device_add(&udc->gadget.dev);
1988	if (ret) {
1989		dev_dbg(&pdev->dev, "Could not add gadget: %d\n", ret);
1990		goto err_device_add;
1991	}
1992
1993	if (gpio_is_valid(pdata->vbus_pin)) {
1994		if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
1995			udc->vbus_pin = pdata->vbus_pin;
1996			udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
1997
1998			ret = request_irq(gpio_to_irq(udc->vbus_pin),
1999					usba_vbus_irq, 0,
2000					"atmel_usba_udc", udc);
2001			if (ret) {
2002				gpio_free(udc->vbus_pin);
2003				udc->vbus_pin = -ENODEV;
2004				dev_warn(&udc->pdev->dev,
2005					 "failed to request vbus irq; "
2006					 "assuming always on\n");
2007			} else {
2008				disable_irq(gpio_to_irq(udc->vbus_pin));
2009			}
2010		} else {
2011			/* gpio_request fail so use -EINVAL for gpio_is_valid */
2012			udc->vbus_pin = -EINVAL;
2013		}
2014	}
2015
2016	usba_init_debugfs(udc);
2017	for (i = 1; i < pdata->num_ep; i++)
2018		usba_ep_init_debugfs(udc, &usba_ep[i]);
2019
2020	return 0;
2021
2022err_device_add:
2023	free_irq(irq, udc);
2024err_request_irq:
2025	kfree(usba_ep);
2026err_alloc_ep:
2027	iounmap(udc->fifo);
2028err_map_fifo:
2029	iounmap(udc->regs);
2030err_map_regs:
2031	clk_put(hclk);
2032err_get_hclk:
2033	clk_put(pclk);
2034
2035	platform_set_drvdata(pdev, NULL);
2036
2037	return ret;
2038}
2039
2040static int __exit usba_udc_remove(struct platform_device *pdev)
2041{
2042	struct usba_udc *udc;
2043	int i;
2044	struct usba_platform_data *pdata = pdev->dev.platform_data;
2045
2046	udc = platform_get_drvdata(pdev);
2047
2048	for (i = 1; i < pdata->num_ep; i++)
2049		usba_ep_cleanup_debugfs(&usba_ep[i]);
2050	usba_cleanup_debugfs(udc);
2051
2052	if (gpio_is_valid(udc->vbus_pin))
2053		gpio_free(udc->vbus_pin);
2054
2055	free_irq(udc->irq, udc);
2056	kfree(usba_ep);
2057	iounmap(udc->fifo);
2058	iounmap(udc->regs);
2059	clk_put(udc->hclk);
2060	clk_put(udc->pclk);
2061
2062	device_unregister(&udc->gadget.dev);
2063
2064	return 0;
2065}
2066
2067static struct platform_driver udc_driver = {
2068	.remove		= __exit_p(usba_udc_remove),
2069	.driver		= {
2070		.name		= "atmel_usba_udc",
2071		.owner		= THIS_MODULE,
2072	},
2073};
2074
2075static int __init udc_init(void)
2076{
2077	return platform_driver_probe(&udc_driver, usba_udc_probe);
2078}
2079module_init(udc_init);
2080
2081static void __exit udc_exit(void)
2082{
2083	platform_driver_unregister(&udc_driver);
2084}
2085module_exit(udc_exit);
2086
2087MODULE_DESCRIPTION("Atmel USBA UDC driver");
2088MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
2089MODULE_LICENSE("GPL");
2090MODULE_ALIAS("platform:atmel_usba_udc");
2091