1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * driver/usb/gadget/fsl_qe_udc.c
4 *
5 * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
6 *
7 * 	Xie Xiaobo <X.Xie@freescale.com>
8 * 	Li Yang <leoli@freescale.com>
9 * 	Based on bareboard code from Shlomi Gridish.
10 *
11 * Description:
12 * Freescle QE/CPM USB Pheripheral Controller Driver
13 * The controller can be found on MPC8360, MPC8272, and etc.
14 * MPC8360 Rev 1.1 may need QE mircocode update
15 */
16
17#undef USB_TRACE
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/ioport.h>
22#include <linux/types.h>
23#include <linux/errno.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <linux/list.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/moduleparam.h>
30#include <linux/of.h>
31#include <linux/of_address.h>
32#include <linux/of_irq.h>
33#include <linux/platform_device.h>
34#include <linux/dma-mapping.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/otg.h>
38#include <soc/fsl/qe/qe.h>
39#include <asm/cpm.h>
40#include <asm/dma.h>
41#include <asm/reg.h>
42#include "fsl_qe_udc.h"
43
44#define DRIVER_DESC     "Freescale QE/CPM USB Device Controller driver"
45#define DRIVER_AUTHOR   "Xie XiaoBo"
46#define DRIVER_VERSION  "1.0"
47
48#define DMA_ADDR_INVALID        (~(dma_addr_t)0)
49
50static const char driver_name[] = "fsl_qe_udc";
51static const char driver_desc[] = DRIVER_DESC;
52
53/*ep name is important in gadget, it should obey the convention of ep_match()*/
54static const char *const ep_name[] = {
55	"ep0-control", /* everyone has ep0 */
56	/* 3 configurable endpoints */
57	"ep1",
58	"ep2",
59	"ep3",
60};
61
62static const struct usb_endpoint_descriptor qe_ep0_desc = {
63	.bLength =		USB_DT_ENDPOINT_SIZE,
64	.bDescriptorType =	USB_DT_ENDPOINT,
65
66	.bEndpointAddress =	0,
67	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
68	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
69};
70
71/********************************************************************
72 *      Internal Used Function Start
73********************************************************************/
74/*-----------------------------------------------------------------
75 * done() - retire a request; caller blocked irqs
76 *--------------------------------------------------------------*/
77static void done(struct qe_ep *ep, struct qe_req *req, int status)
78{
79	struct qe_udc *udc = ep->udc;
80	unsigned char stopped = ep->stopped;
81
82	/* the req->queue pointer is used by ep_queue() func, in which
83	 * the request will be added into a udc_ep->queue 'd tail
84	 * so here the req will be dropped from the ep->queue
85	 */
86	list_del_init(&req->queue);
87
88	/* req.status should be set as -EINPROGRESS in ep_queue() */
89	if (req->req.status == -EINPROGRESS)
90		req->req.status = status;
91	else
92		status = req->req.status;
93
94	if (req->mapped) {
95		dma_unmap_single(udc->gadget.dev.parent,
96			req->req.dma, req->req.length,
97			ep_is_in(ep)
98				? DMA_TO_DEVICE
99				: DMA_FROM_DEVICE);
100		req->req.dma = DMA_ADDR_INVALID;
101		req->mapped = 0;
102	} else
103		dma_sync_single_for_cpu(udc->gadget.dev.parent,
104			req->req.dma, req->req.length,
105			ep_is_in(ep)
106				? DMA_TO_DEVICE
107				: DMA_FROM_DEVICE);
108
109	if (status && (status != -ESHUTDOWN))
110		dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
111			ep->ep.name, &req->req, status,
112			req->req.actual, req->req.length);
113
114	/* don't modify queue heads during completion callback */
115	ep->stopped = 1;
116	spin_unlock(&udc->lock);
117
118	usb_gadget_giveback_request(&ep->ep, &req->req);
119
120	spin_lock(&udc->lock);
121
122	ep->stopped = stopped;
123}
124
125/*-----------------------------------------------------------------
126 * nuke(): delete all requests related to this ep
127 *--------------------------------------------------------------*/
128static void nuke(struct qe_ep *ep, int status)
129{
130	/* Whether this eq has request linked */
131	while (!list_empty(&ep->queue)) {
132		struct qe_req *req = NULL;
133		req = list_entry(ep->queue.next, struct qe_req, queue);
134
135		done(ep, req, status);
136	}
137}
138
139/*---------------------------------------------------------------------------*
140 * USB and Endpoint manipulate process, include parameter and register       *
141 *---------------------------------------------------------------------------*/
142/* @value: 1--set stall 0--clean stall */
143static int qe_eprx_stall_change(struct qe_ep *ep, int value)
144{
145	u16 tem_usep;
146	u8 epnum = ep->epnum;
147	struct qe_udc *udc = ep->udc;
148
149	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
150	tem_usep = tem_usep & ~USB_RHS_MASK;
151	if (value == 1)
152		tem_usep |= USB_RHS_STALL;
153	else if (ep->dir == USB_DIR_IN)
154		tem_usep |= USB_RHS_IGNORE_OUT;
155
156	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
157	return 0;
158}
159
160static int qe_eptx_stall_change(struct qe_ep *ep, int value)
161{
162	u16 tem_usep;
163	u8 epnum = ep->epnum;
164	struct qe_udc *udc = ep->udc;
165
166	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
167	tem_usep = tem_usep & ~USB_THS_MASK;
168	if (value == 1)
169		tem_usep |= USB_THS_STALL;
170	else if (ep->dir == USB_DIR_OUT)
171		tem_usep |= USB_THS_IGNORE_IN;
172
173	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
174
175	return 0;
176}
177
178static int qe_ep0_stall(struct qe_udc *udc)
179{
180	qe_eptx_stall_change(&udc->eps[0], 1);
181	qe_eprx_stall_change(&udc->eps[0], 1);
182	udc->ep0_state = WAIT_FOR_SETUP;
183	udc->ep0_dir = 0;
184	return 0;
185}
186
187static int qe_eprx_nack(struct qe_ep *ep)
188{
189	u8 epnum = ep->epnum;
190	struct qe_udc *udc = ep->udc;
191
192	if (ep->state == EP_STATE_IDLE) {
193		/* Set the ep's nack */
194		clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
195				USB_RHS_MASK, USB_RHS_NACK);
196
197		/* Mask Rx and Busy interrupts */
198		clrbits16(&udc->usb_regs->usb_usbmr,
199				(USB_E_RXB_MASK | USB_E_BSY_MASK));
200
201		ep->state = EP_STATE_NACK;
202	}
203	return 0;
204}
205
206static int qe_eprx_normal(struct qe_ep *ep)
207{
208	struct qe_udc *udc = ep->udc;
209
210	if (ep->state == EP_STATE_NACK) {
211		clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
212				USB_RTHS_MASK, USB_THS_IGNORE_IN);
213
214		/* Unmask RX interrupts */
215		out_be16(&udc->usb_regs->usb_usber,
216				USB_E_BSY_MASK | USB_E_RXB_MASK);
217		setbits16(&udc->usb_regs->usb_usbmr,
218				(USB_E_RXB_MASK | USB_E_BSY_MASK));
219
220		ep->state = EP_STATE_IDLE;
221		ep->has_data = 0;
222	}
223
224	return 0;
225}
226
227static int qe_ep_cmd_stoptx(struct qe_ep *ep)
228{
229	if (ep->udc->soc_type == PORT_CPM)
230		cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
231				CPM_USB_STOP_TX_OPCODE);
232	else
233		qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
234				ep->epnum, 0);
235
236	return 0;
237}
238
239static int qe_ep_cmd_restarttx(struct qe_ep *ep)
240{
241	if (ep->udc->soc_type == PORT_CPM)
242		cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
243				CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
244	else
245		qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
246				ep->epnum, 0);
247
248	return 0;
249}
250
251static int qe_ep_flushtxfifo(struct qe_ep *ep)
252{
253	struct qe_udc *udc = ep->udc;
254	int i;
255
256	i = (int)ep->epnum;
257
258	qe_ep_cmd_stoptx(ep);
259	out_8(&udc->usb_regs->usb_uscom,
260		USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
261	out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
262	out_be32(&udc->ep_param[i]->tstate, 0);
263	out_be16(&udc->ep_param[i]->tbcnt, 0);
264
265	ep->c_txbd = ep->txbase;
266	ep->n_txbd = ep->txbase;
267	qe_ep_cmd_restarttx(ep);
268	return 0;
269}
270
271static int qe_ep_filltxfifo(struct qe_ep *ep)
272{
273	struct qe_udc *udc = ep->udc;
274
275	out_8(&udc->usb_regs->usb_uscom,
276			USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
277	return 0;
278}
279
280static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
281{
282	struct qe_ep *ep;
283	u32 bdring_len;
284	struct qe_bd __iomem *bd;
285	int i;
286
287	ep = &udc->eps[pipe_num];
288
289	if (ep->dir == USB_DIR_OUT)
290		bdring_len = USB_BDRING_LEN_RX;
291	else
292		bdring_len = USB_BDRING_LEN;
293
294	bd = ep->rxbase;
295	for (i = 0; i < (bdring_len - 1); i++) {
296		out_be32((u32 __iomem *)bd, R_E | R_I);
297		bd++;
298	}
299	out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
300
301	bd = ep->txbase;
302	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
303		out_be32(&bd->buf, 0);
304		out_be32((u32 __iomem *)bd, 0);
305		bd++;
306	}
307	out_be32((u32 __iomem *)bd, T_W);
308
309	return 0;
310}
311
312static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
313{
314	struct qe_ep *ep;
315	u16 tmpusep;
316
317	ep = &udc->eps[pipe_num];
318	tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
319	tmpusep &= ~USB_RTHS_MASK;
320
321	switch (ep->dir) {
322	case USB_DIR_BOTH:
323		qe_ep_flushtxfifo(ep);
324		break;
325	case USB_DIR_OUT:
326		tmpusep |= USB_THS_IGNORE_IN;
327		break;
328	case USB_DIR_IN:
329		qe_ep_flushtxfifo(ep);
330		tmpusep |= USB_RHS_IGNORE_OUT;
331		break;
332	default:
333		break;
334	}
335	out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
336
337	qe_epbds_reset(udc, pipe_num);
338
339	return 0;
340}
341
342static int qe_ep_toggledata01(struct qe_ep *ep)
343{
344	ep->data01 ^= 0x1;
345	return 0;
346}
347
348static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
349{
350	struct qe_ep *ep = &udc->eps[pipe_num];
351	unsigned long tmp_addr = 0;
352	struct usb_ep_para __iomem *epparam;
353	int i;
354	struct qe_bd __iomem *bd;
355	int bdring_len;
356
357	if (ep->dir == USB_DIR_OUT)
358		bdring_len = USB_BDRING_LEN_RX;
359	else
360		bdring_len = USB_BDRING_LEN;
361
362	epparam = udc->ep_param[pipe_num];
363	/* alloc multi-ram for BD rings and set the ep parameters */
364	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
365				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
366	if (IS_ERR_VALUE(tmp_addr))
367		return -ENOMEM;
368
369	out_be16(&epparam->rbase, (u16)tmp_addr);
370	out_be16(&epparam->tbase, (u16)(tmp_addr +
371				(sizeof(struct qe_bd) * bdring_len)));
372
373	out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
374	out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
375
376	ep->rxbase = cpm_muram_addr(tmp_addr);
377	ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
378				* bdring_len));
379	ep->n_rxbd = ep->rxbase;
380	ep->e_rxbd = ep->rxbase;
381	ep->n_txbd = ep->txbase;
382	ep->c_txbd = ep->txbase;
383	ep->data01 = 0; /* data0 */
384
385	/* Init TX and RX bds */
386	bd = ep->rxbase;
387	for (i = 0; i < bdring_len - 1; i++) {
388		out_be32(&bd->buf, 0);
389		out_be32((u32 __iomem *)bd, 0);
390		bd++;
391	}
392	out_be32(&bd->buf, 0);
393	out_be32((u32 __iomem *)bd, R_W);
394
395	bd = ep->txbase;
396	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
397		out_be32(&bd->buf, 0);
398		out_be32((u32 __iomem *)bd, 0);
399		bd++;
400	}
401	out_be32(&bd->buf, 0);
402	out_be32((u32 __iomem *)bd, T_W);
403
404	return 0;
405}
406
407static int qe_ep_rxbd_update(struct qe_ep *ep)
408{
409	unsigned int size;
410	int i;
411	unsigned int tmp;
412	struct qe_bd __iomem *bd;
413	unsigned int bdring_len;
414
415	if (ep->rxbase == NULL)
416		return -EINVAL;
417
418	bd = ep->rxbase;
419
420	ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
421	if (!ep->rxframe)
422		return -ENOMEM;
423
424	qe_frame_init(ep->rxframe);
425
426	if (ep->dir == USB_DIR_OUT)
427		bdring_len = USB_BDRING_LEN_RX;
428	else
429		bdring_len = USB_BDRING_LEN;
430
431	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
432	ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
433	if (!ep->rxbuffer) {
434		kfree(ep->rxframe);
435		return -ENOMEM;
436	}
437
438	ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
439	if (ep->rxbuf_d == DMA_ADDR_INVALID) {
440		ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
441					ep->rxbuffer,
442					size,
443					DMA_FROM_DEVICE);
444		ep->rxbufmap = 1;
445	} else {
446		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
447					ep->rxbuf_d, size,
448					DMA_FROM_DEVICE);
449		ep->rxbufmap = 0;
450	}
451
452	size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
453	tmp = ep->rxbuf_d;
454	tmp = (u32)(((tmp >> 2) << 2) + 4);
455
456	for (i = 0; i < bdring_len - 1; i++) {
457		out_be32(&bd->buf, tmp);
458		out_be32((u32 __iomem *)bd, (R_E | R_I));
459		tmp = tmp + size;
460		bd++;
461	}
462	out_be32(&bd->buf, tmp);
463	out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
464
465	return 0;
466}
467
468static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
469{
470	struct qe_ep *ep = &udc->eps[pipe_num];
471	struct usb_ep_para __iomem *epparam;
472	u16 usep, logepnum;
473	u16 tmp;
474	u8 rtfcr = 0;
475
476	epparam = udc->ep_param[pipe_num];
477
478	usep = 0;
479	logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
480	usep |= (logepnum << USB_EPNUM_SHIFT);
481
482	switch (ep->ep.desc->bmAttributes & 0x03) {
483	case USB_ENDPOINT_XFER_BULK:
484		usep |= USB_TRANS_BULK;
485		break;
486	case USB_ENDPOINT_XFER_ISOC:
487		usep |=  USB_TRANS_ISO;
488		break;
489	case USB_ENDPOINT_XFER_INT:
490		usep |= USB_TRANS_INT;
491		break;
492	default:
493		usep |= USB_TRANS_CTR;
494		break;
495	}
496
497	switch (ep->dir) {
498	case USB_DIR_OUT:
499		usep |= USB_THS_IGNORE_IN;
500		break;
501	case USB_DIR_IN:
502		usep |= USB_RHS_IGNORE_OUT;
503		break;
504	default:
505		break;
506	}
507	out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
508
509	rtfcr = 0x30;
510	out_8(&epparam->rbmr, rtfcr);
511	out_8(&epparam->tbmr, rtfcr);
512
513	tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
514	/* MRBLR must be divisble by 4 */
515	tmp = (u16)(((tmp >> 2) << 2) + 4);
516	out_be16(&epparam->mrblr, tmp);
517
518	return 0;
519}
520
521static int qe_ep_init(struct qe_udc *udc,
522		      unsigned char pipe_num,
523		      const struct usb_endpoint_descriptor *desc)
524{
525	struct qe_ep *ep = &udc->eps[pipe_num];
526	unsigned long flags;
527	int reval = 0;
528	u16 max = 0;
529
530	max = usb_endpoint_maxp(desc);
531
532	/* check the max package size validate for this endpoint */
533	/* Refer to USB2.0 spec table 9-13,
534	*/
535	if (pipe_num != 0) {
536		switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
537		case USB_ENDPOINT_XFER_BULK:
538			if (strstr(ep->ep.name, "-iso")
539					|| strstr(ep->ep.name, "-int"))
540				goto en_done;
541			switch (udc->gadget.speed) {
542			case USB_SPEED_HIGH:
543			if ((max == 128) || (max == 256) || (max == 512))
544				break;
545			fallthrough;
546			default:
547				switch (max) {
548				case 4:
549				case 8:
550				case 16:
551				case 32:
552				case 64:
553					break;
554				default:
555				case USB_SPEED_LOW:
556					goto en_done;
557				}
558			}
559			break;
560		case USB_ENDPOINT_XFER_INT:
561			if (strstr(ep->ep.name, "-iso"))	/* bulk is ok */
562				goto en_done;
563			switch (udc->gadget.speed) {
564			case USB_SPEED_HIGH:
565				if (max <= 1024)
566					break;
567				fallthrough;
568			case USB_SPEED_FULL:
569				if (max <= 64)
570					break;
571				fallthrough;
572			default:
573				if (max <= 8)
574					break;
575				goto en_done;
576			}
577			break;
578		case USB_ENDPOINT_XFER_ISOC:
579			if (strstr(ep->ep.name, "-bulk")
580				|| strstr(ep->ep.name, "-int"))
581				goto en_done;
582			switch (udc->gadget.speed) {
583			case USB_SPEED_HIGH:
584				if (max <= 1024)
585					break;
586				fallthrough;
587			case USB_SPEED_FULL:
588				if (max <= 1023)
589					break;
590				fallthrough;
591			default:
592				goto en_done;
593			}
594			break;
595		case USB_ENDPOINT_XFER_CONTROL:
596			if (strstr(ep->ep.name, "-iso")
597				|| strstr(ep->ep.name, "-int"))
598				goto en_done;
599			switch (udc->gadget.speed) {
600			case USB_SPEED_HIGH:
601			case USB_SPEED_FULL:
602				switch (max) {
603				case 1:
604				case 2:
605				case 4:
606				case 8:
607				case 16:
608				case 32:
609				case 64:
610					break;
611				default:
612					goto en_done;
613				}
614				fallthrough;
615			case USB_SPEED_LOW:
616				switch (max) {
617				case 1:
618				case 2:
619				case 4:
620				case 8:
621					break;
622				default:
623					goto en_done;
624				}
625			default:
626				goto en_done;
627			}
628			break;
629
630		default:
631			goto en_done;
632		}
633	} /* if ep0*/
634
635	spin_lock_irqsave(&udc->lock, flags);
636
637	/* initialize ep structure */
638	ep->ep.maxpacket = max;
639	ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
640	ep->ep.desc = desc;
641	ep->stopped = 0;
642	ep->init = 1;
643
644	if (pipe_num == 0) {
645		ep->dir = USB_DIR_BOTH;
646		udc->ep0_dir = USB_DIR_OUT;
647		udc->ep0_state = WAIT_FOR_SETUP;
648	} else	{
649		switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
650		case USB_DIR_OUT:
651			ep->dir = USB_DIR_OUT;
652			break;
653		case USB_DIR_IN:
654			ep->dir = USB_DIR_IN;
655		default:
656			break;
657		}
658	}
659
660	/* hardware special operation */
661	qe_ep_bd_init(udc, pipe_num);
662	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
663		reval = qe_ep_rxbd_update(ep);
664		if (reval)
665			goto en_done1;
666	}
667
668	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
669		ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
670		if (!ep->txframe)
671			goto en_done2;
672		qe_frame_init(ep->txframe);
673	}
674
675	qe_ep_register_init(udc, pipe_num);
676
677	/* Now HW will be NAKing transfers to that EP,
678	 * until a buffer is queued to it. */
679	spin_unlock_irqrestore(&udc->lock, flags);
680
681	return 0;
682en_done2:
683	kfree(ep->rxbuffer);
684	kfree(ep->rxframe);
685en_done1:
686	spin_unlock_irqrestore(&udc->lock, flags);
687en_done:
688	dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
689	return -ENODEV;
690}
691
692static inline void qe_usb_enable(struct qe_udc *udc)
693{
694	setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
695}
696
697static inline void qe_usb_disable(struct qe_udc *udc)
698{
699	clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
700}
701
702/*----------------------------------------------------------------------------*
703 *		USB and EP basic manipulate function end		      *
704 *----------------------------------------------------------------------------*/
705
706
707/******************************************************************************
708		UDC transmit and receive process
709 ******************************************************************************/
710static void recycle_one_rxbd(struct qe_ep *ep)
711{
712	u32 bdstatus;
713
714	bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
715	bdstatus = R_I | R_E | (bdstatus & R_W);
716	out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
717
718	if (bdstatus & R_W)
719		ep->e_rxbd = ep->rxbase;
720	else
721		ep->e_rxbd++;
722}
723
724static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
725{
726	u32 bdstatus;
727	struct qe_bd __iomem *bd, *nextbd;
728	unsigned char stop = 0;
729
730	nextbd = ep->n_rxbd;
731	bd = ep->e_rxbd;
732	bdstatus = in_be32((u32 __iomem *)bd);
733
734	while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
735		bdstatus = R_E | R_I | (bdstatus & R_W);
736		out_be32((u32 __iomem *)bd, bdstatus);
737
738		if (bdstatus & R_W)
739			bd = ep->rxbase;
740		else
741			bd++;
742
743		bdstatus = in_be32((u32 __iomem *)bd);
744		if (stopatnext && (bd == nextbd))
745			stop = 1;
746	}
747
748	ep->e_rxbd = bd;
749}
750
751static void ep_recycle_rxbds(struct qe_ep *ep)
752{
753	struct qe_bd __iomem *bd = ep->n_rxbd;
754	u32 bdstatus;
755	u8 epnum = ep->epnum;
756	struct qe_udc *udc = ep->udc;
757
758	bdstatus = in_be32((u32 __iomem *)bd);
759	if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
760		bd = ep->rxbase +
761				((in_be16(&udc->ep_param[epnum]->rbptr) -
762				  in_be16(&udc->ep_param[epnum]->rbase))
763				 >> 3);
764		bdstatus = in_be32((u32 __iomem *)bd);
765
766		if (bdstatus & R_W)
767			bd = ep->rxbase;
768		else
769			bd++;
770
771		ep->e_rxbd = bd;
772		recycle_rxbds(ep, 0);
773		ep->e_rxbd = ep->n_rxbd;
774	} else
775		recycle_rxbds(ep, 1);
776
777	if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
778		out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
779
780	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
781		qe_eprx_normal(ep);
782
783	ep->localnack = 0;
784}
785
786static void setup_received_handle(struct qe_udc *udc,
787					struct usb_ctrlrequest *setup);
788static int qe_ep_rxframe_handle(struct qe_ep *ep);
789static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
790/* when BD PID is setup, handle the packet */
791static int ep0_setup_handle(struct qe_udc *udc)
792{
793	struct qe_ep *ep = &udc->eps[0];
794	struct qe_frame *pframe;
795	unsigned int fsize;
796	u8 *cp;
797
798	pframe = ep->rxframe;
799	if ((frame_get_info(pframe) & PID_SETUP)
800			&& (udc->ep0_state == WAIT_FOR_SETUP)) {
801		fsize = frame_get_length(pframe);
802		if (unlikely(fsize != 8))
803			return -EINVAL;
804		cp = (u8 *)&udc->local_setup_buff;
805		memcpy(cp, pframe->data, fsize);
806		ep->data01 = 1;
807
808		/* handle the usb command base on the usb_ctrlrequest */
809		setup_received_handle(udc, &udc->local_setup_buff);
810		return 0;
811	}
812	return -EINVAL;
813}
814
815static int qe_ep0_rx(struct qe_udc *udc)
816{
817	struct qe_ep *ep = &udc->eps[0];
818	struct qe_frame *pframe;
819	struct qe_bd __iomem *bd;
820	u32 bdstatus, length;
821	u32 vaddr;
822
823	pframe = ep->rxframe;
824
825	if (ep->dir == USB_DIR_IN) {
826		dev_err(udc->dev, "ep0 not a control endpoint\n");
827		return -EINVAL;
828	}
829
830	bd = ep->n_rxbd;
831	bdstatus = in_be32((u32 __iomem *)bd);
832	length = bdstatus & BD_LENGTH_MASK;
833
834	while (!(bdstatus & R_E) && length) {
835		if ((bdstatus & R_F) && (bdstatus & R_L)
836			&& !(bdstatus & R_ERROR)) {
837			if (length == USB_CRC_SIZE) {
838				udc->ep0_state = WAIT_FOR_SETUP;
839				dev_vdbg(udc->dev,
840					"receive a ZLP in status phase\n");
841			} else {
842				qe_frame_clean(pframe);
843				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
844				frame_set_data(pframe, (u8 *)vaddr);
845				frame_set_length(pframe,
846						(length - USB_CRC_SIZE));
847				frame_set_status(pframe, FRAME_OK);
848				switch (bdstatus & R_PID) {
849				case R_PID_SETUP:
850					frame_set_info(pframe, PID_SETUP);
851					break;
852				case R_PID_DATA1:
853					frame_set_info(pframe, PID_DATA1);
854					break;
855				default:
856					frame_set_info(pframe, PID_DATA0);
857					break;
858				}
859
860				if ((bdstatus & R_PID) == R_PID_SETUP)
861					ep0_setup_handle(udc);
862				else
863					qe_ep_rxframe_handle(ep);
864			}
865		} else {
866			dev_err(udc->dev, "The receive frame with error!\n");
867		}
868
869		/* note: don't clear the rxbd's buffer address */
870		recycle_one_rxbd(ep);
871
872		/* Get next BD */
873		if (bdstatus & R_W)
874			bd = ep->rxbase;
875		else
876			bd++;
877
878		bdstatus = in_be32((u32 __iomem *)bd);
879		length = bdstatus & BD_LENGTH_MASK;
880
881	}
882
883	ep->n_rxbd = bd;
884
885	return 0;
886}
887
888static int qe_ep_rxframe_handle(struct qe_ep *ep)
889{
890	struct qe_frame *pframe;
891	u8 framepid = 0;
892	unsigned int fsize;
893	u8 *cp;
894	struct qe_req *req;
895
896	pframe = ep->rxframe;
897
898	if (frame_get_info(pframe) & PID_DATA1)
899		framepid = 0x1;
900
901	if (framepid != ep->data01) {
902		dev_err(ep->udc->dev, "the data01 error!\n");
903		return -EIO;
904	}
905
906	fsize = frame_get_length(pframe);
907	if (list_empty(&ep->queue)) {
908		dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
909	} else {
910		req = list_entry(ep->queue.next, struct qe_req, queue);
911
912		cp = (u8 *)(req->req.buf) + req->req.actual;
913		if (cp) {
914			memcpy(cp, pframe->data, fsize);
915			req->req.actual += fsize;
916			if ((fsize < ep->ep.maxpacket) ||
917					(req->req.actual >= req->req.length)) {
918				if (ep->epnum == 0)
919					ep0_req_complete(ep->udc, req);
920				else
921					done(ep, req, 0);
922				if (list_empty(&ep->queue) && ep->epnum != 0)
923					qe_eprx_nack(ep);
924			}
925		}
926	}
927
928	qe_ep_toggledata01(ep);
929
930	return 0;
931}
932
933static void ep_rx_tasklet(struct tasklet_struct *t)
934{
935	struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet);
936	struct qe_ep *ep;
937	struct qe_frame *pframe;
938	struct qe_bd __iomem *bd;
939	unsigned long flags;
940	u32 bdstatus, length;
941	u32 vaddr, i;
942
943	spin_lock_irqsave(&udc->lock, flags);
944
945	for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
946		ep = &udc->eps[i];
947
948		if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
949			dev_dbg(udc->dev,
950				"This is a transmit ep or disable tasklet!\n");
951			continue;
952		}
953
954		pframe = ep->rxframe;
955		bd = ep->n_rxbd;
956		bdstatus = in_be32((u32 __iomem *)bd);
957		length = bdstatus & BD_LENGTH_MASK;
958
959		while (!(bdstatus & R_E) && length) {
960			if (list_empty(&ep->queue)) {
961				qe_eprx_nack(ep);
962				dev_dbg(udc->dev,
963					"The rxep have noreq %d\n",
964					ep->has_data);
965				break;
966			}
967
968			if ((bdstatus & R_F) && (bdstatus & R_L)
969				&& !(bdstatus & R_ERROR)) {
970				qe_frame_clean(pframe);
971				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
972				frame_set_data(pframe, (u8 *)vaddr);
973				frame_set_length(pframe,
974						(length - USB_CRC_SIZE));
975				frame_set_status(pframe, FRAME_OK);
976				switch (bdstatus & R_PID) {
977				case R_PID_DATA1:
978					frame_set_info(pframe, PID_DATA1);
979					break;
980				case R_PID_SETUP:
981					frame_set_info(pframe, PID_SETUP);
982					break;
983				default:
984					frame_set_info(pframe, PID_DATA0);
985					break;
986				}
987				/* handle the rx frame */
988				qe_ep_rxframe_handle(ep);
989			} else {
990				dev_err(udc->dev,
991					"error in received frame\n");
992			}
993			/* note: don't clear the rxbd's buffer address */
994			/*clear the length */
995			out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
996			ep->has_data--;
997			if (!(ep->localnack))
998				recycle_one_rxbd(ep);
999
1000			/* Get next BD */
1001			if (bdstatus & R_W)
1002				bd = ep->rxbase;
1003			else
1004				bd++;
1005
1006			bdstatus = in_be32((u32 __iomem *)bd);
1007			length = bdstatus & BD_LENGTH_MASK;
1008		}
1009
1010		ep->n_rxbd = bd;
1011
1012		if (ep->localnack)
1013			ep_recycle_rxbds(ep);
1014
1015		ep->enable_tasklet = 0;
1016	} /* for i=1 */
1017
1018	spin_unlock_irqrestore(&udc->lock, flags);
1019}
1020
1021static int qe_ep_rx(struct qe_ep *ep)
1022{
1023	struct qe_udc *udc;
1024	struct qe_frame *pframe;
1025	struct qe_bd __iomem *bd;
1026	u16 swoffs, ucoffs, emptybds;
1027
1028	udc = ep->udc;
1029	pframe = ep->rxframe;
1030
1031	if (ep->dir == USB_DIR_IN) {
1032		dev_err(udc->dev, "transmit ep in rx function\n");
1033		return -EINVAL;
1034	}
1035
1036	bd = ep->n_rxbd;
1037
1038	swoffs = (u16)(bd - ep->rxbase);
1039	ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1040			in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1041	if (swoffs < ucoffs)
1042		emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1043	else
1044		emptybds = swoffs - ucoffs;
1045
1046	if (emptybds < MIN_EMPTY_BDS) {
1047		qe_eprx_nack(ep);
1048		ep->localnack = 1;
1049		dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1050	}
1051	ep->has_data = USB_BDRING_LEN_RX - emptybds;
1052
1053	if (list_empty(&ep->queue)) {
1054		qe_eprx_nack(ep);
1055		dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1056				ep->has_data);
1057		return 0;
1058	}
1059
1060	tasklet_schedule(&udc->rx_tasklet);
1061	ep->enable_tasklet = 1;
1062
1063	return 0;
1064}
1065
1066/* send data from a frame, no matter what tx_req */
1067static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1068{
1069	struct qe_udc *udc = ep->udc;
1070	struct qe_bd __iomem *bd;
1071	u16 saveusbmr;
1072	u32 bdstatus, pidmask;
1073	u32 paddr;
1074
1075	if (ep->dir == USB_DIR_OUT) {
1076		dev_err(udc->dev, "receive ep passed to tx function\n");
1077		return -EINVAL;
1078	}
1079
1080	/* Disable the Tx interrupt */
1081	saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1082	out_be16(&udc->usb_regs->usb_usbmr,
1083			saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1084
1085	bd = ep->n_txbd;
1086	bdstatus = in_be32((u32 __iomem *)bd);
1087
1088	if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1089		if (frame_get_length(frame) == 0) {
1090			frame_set_data(frame, udc->nullbuf);
1091			frame_set_length(frame, 2);
1092			frame->info |= (ZLP | NO_CRC);
1093			dev_vdbg(udc->dev, "the frame size = 0\n");
1094		}
1095		paddr = virt_to_phys((void *)frame->data);
1096		out_be32(&bd->buf, paddr);
1097		bdstatus = (bdstatus&T_W);
1098		if (!(frame_get_info(frame) & NO_CRC))
1099			bdstatus |= T_R | T_I | T_L | T_TC
1100					| frame_get_length(frame);
1101		else
1102			bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1103
1104		/* if the packet is a ZLP in status phase */
1105		if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1106			ep->data01 = 0x1;
1107
1108		if (ep->data01) {
1109			pidmask = T_PID_DATA1;
1110			frame->info |= PID_DATA1;
1111		} else {
1112			pidmask = T_PID_DATA0;
1113			frame->info |= PID_DATA0;
1114		}
1115		bdstatus |= T_CNF;
1116		bdstatus |= pidmask;
1117		out_be32((u32 __iomem *)bd, bdstatus);
1118		qe_ep_filltxfifo(ep);
1119
1120		/* enable the TX interrupt */
1121		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1122
1123		qe_ep_toggledata01(ep);
1124		if (bdstatus & T_W)
1125			ep->n_txbd = ep->txbase;
1126		else
1127			ep->n_txbd++;
1128
1129		return 0;
1130	} else {
1131		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1132		dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1133		return -EBUSY;
1134	}
1135}
1136
1137/* when a bd was transmitted, the function can
1138 * handle the tx_req, not include ep0           */
1139static int txcomplete(struct qe_ep *ep, unsigned char restart)
1140{
1141	if (ep->tx_req != NULL) {
1142		struct qe_req *req = ep->tx_req;
1143		unsigned zlp = 0, last_len = 0;
1144
1145		last_len = min_t(unsigned, req->req.length - ep->sent,
1146				ep->ep.maxpacket);
1147
1148		if (!restart) {
1149			int asent = ep->last;
1150			ep->sent += asent;
1151			ep->last -= asent;
1152		} else {
1153			ep->last = 0;
1154		}
1155
1156		/* zlp needed when req->re.zero is set */
1157		if (req->req.zero) {
1158			if (last_len == 0 ||
1159				(req->req.length % ep->ep.maxpacket) != 0)
1160				zlp = 0;
1161			else
1162				zlp = 1;
1163		} else
1164			zlp = 0;
1165
1166		/* a request already were transmitted completely */
1167		if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1168			done(ep, ep->tx_req, 0);
1169			ep->tx_req = NULL;
1170			ep->last = 0;
1171			ep->sent = 0;
1172		}
1173	}
1174
1175	/* we should gain a new tx_req fot this endpoint */
1176	if (ep->tx_req == NULL) {
1177		if (!list_empty(&ep->queue)) {
1178			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
1179							queue);
1180			ep->last = 0;
1181			ep->sent = 0;
1182		}
1183	}
1184
1185	return 0;
1186}
1187
1188/* give a frame and a tx_req, send some data */
1189static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1190{
1191	unsigned int size;
1192	u8 *buf;
1193
1194	qe_frame_clean(frame);
1195	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1196				ep->ep.maxpacket);
1197	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1198	if (buf && size) {
1199		ep->last = size;
1200		ep->tx_req->req.actual += size;
1201		frame_set_data(frame, buf);
1202		frame_set_length(frame, size);
1203		frame_set_status(frame, FRAME_OK);
1204		frame_set_info(frame, 0);
1205		return qe_ep_tx(ep, frame);
1206	}
1207	return -EIO;
1208}
1209
1210/* give a frame struct,send a ZLP */
1211static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1212{
1213	struct qe_udc *udc = ep->udc;
1214
1215	if (frame == NULL)
1216		return -ENODEV;
1217
1218	qe_frame_clean(frame);
1219	frame_set_data(frame, (u8 *)udc->nullbuf);
1220	frame_set_length(frame, 2);
1221	frame_set_status(frame, FRAME_OK);
1222	frame_set_info(frame, (ZLP | NO_CRC | infor));
1223
1224	return qe_ep_tx(ep, frame);
1225}
1226
1227static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1228{
1229	struct qe_req *req = ep->tx_req;
1230	int reval;
1231
1232	if (req == NULL)
1233		return -ENODEV;
1234
1235	if ((req->req.length - ep->sent) > 0)
1236		reval = qe_usb_senddata(ep, frame);
1237	else
1238		reval = sendnulldata(ep, frame, 0);
1239
1240	return reval;
1241}
1242
1243/* if direction is DIR_IN, the status is Device->Host
1244 * if direction is DIR_OUT, the status transaction is Device<-Host
1245 * in status phase, udc create a request and gain status */
1246static int ep0_prime_status(struct qe_udc *udc, int direction)
1247{
1248
1249	struct qe_ep *ep = &udc->eps[0];
1250
1251	if (direction == USB_DIR_IN) {
1252		udc->ep0_state = DATA_STATE_NEED_ZLP;
1253		udc->ep0_dir = USB_DIR_IN;
1254		sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1255	} else {
1256		udc->ep0_dir = USB_DIR_OUT;
1257		udc->ep0_state = WAIT_FOR_OUT_STATUS;
1258	}
1259
1260	return 0;
1261}
1262
1263/* a request complete in ep0, whether gadget request or udc request */
1264static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1265{
1266	struct qe_ep *ep = &udc->eps[0];
1267	/* because usb and ep's status already been set in ch9setaddress() */
1268
1269	switch (udc->ep0_state) {
1270	case DATA_STATE_XMIT:
1271		done(ep, req, 0);
1272		/* receive status phase */
1273		if (ep0_prime_status(udc, USB_DIR_OUT))
1274			qe_ep0_stall(udc);
1275		break;
1276
1277	case DATA_STATE_NEED_ZLP:
1278		done(ep, req, 0);
1279		udc->ep0_state = WAIT_FOR_SETUP;
1280		break;
1281
1282	case DATA_STATE_RECV:
1283		done(ep, req, 0);
1284		/* send status phase */
1285		if (ep0_prime_status(udc, USB_DIR_IN))
1286			qe_ep0_stall(udc);
1287		break;
1288
1289	case WAIT_FOR_OUT_STATUS:
1290		done(ep, req, 0);
1291		udc->ep0_state = WAIT_FOR_SETUP;
1292		break;
1293
1294	case WAIT_FOR_SETUP:
1295		dev_vdbg(udc->dev, "Unexpected interrupt\n");
1296		break;
1297
1298	default:
1299		qe_ep0_stall(udc);
1300		break;
1301	}
1302}
1303
1304static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1305{
1306	struct qe_req *tx_req = NULL;
1307	struct qe_frame *frame = ep->txframe;
1308
1309	if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1310		if (!restart)
1311			ep->udc->ep0_state = WAIT_FOR_SETUP;
1312		else
1313			sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1314		return 0;
1315	}
1316
1317	tx_req = ep->tx_req;
1318	if (tx_req != NULL) {
1319		if (!restart) {
1320			int asent = ep->last;
1321			ep->sent += asent;
1322			ep->last -= asent;
1323		} else {
1324			ep->last = 0;
1325		}
1326
1327		/* a request already were transmitted completely */
1328		if ((ep->tx_req->req.length - ep->sent) <= 0) {
1329			ep->tx_req->req.actual = (unsigned int)ep->sent;
1330			ep0_req_complete(ep->udc, ep->tx_req);
1331			ep->tx_req = NULL;
1332			ep->last = 0;
1333			ep->sent = 0;
1334		}
1335	} else {
1336		dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1337	}
1338
1339	return 0;
1340}
1341
1342static int ep0_txframe_handle(struct qe_ep *ep)
1343{
1344	/* if have error, transmit again */
1345	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1346		qe_ep_flushtxfifo(ep);
1347		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1348		if (frame_get_info(ep->txframe) & PID_DATA0)
1349			ep->data01 = 0;
1350		else
1351			ep->data01 = 1;
1352
1353		ep0_txcomplete(ep, 1);
1354	} else
1355		ep0_txcomplete(ep, 0);
1356
1357	frame_create_tx(ep, ep->txframe);
1358	return 0;
1359}
1360
1361static int qe_ep0_txconf(struct qe_ep *ep)
1362{
1363	struct qe_bd __iomem *bd;
1364	struct qe_frame *pframe;
1365	u32 bdstatus;
1366
1367	bd = ep->c_txbd;
1368	bdstatus = in_be32((u32 __iomem *)bd);
1369	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1370		pframe = ep->txframe;
1371
1372		/* clear and recycle the BD */
1373		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1374		out_be32(&bd->buf, 0);
1375		if (bdstatus & T_W)
1376			ep->c_txbd = ep->txbase;
1377		else
1378			ep->c_txbd++;
1379
1380		if (ep->c_txbd == ep->n_txbd) {
1381			if (bdstatus & DEVICE_T_ERROR) {
1382				frame_set_status(pframe, FRAME_ERROR);
1383				if (bdstatus & T_TO)
1384					pframe->status |= TX_ER_TIMEOUT;
1385				if (bdstatus & T_UN)
1386					pframe->status |= TX_ER_UNDERUN;
1387			}
1388			ep0_txframe_handle(ep);
1389		}
1390
1391		bd = ep->c_txbd;
1392		bdstatus = in_be32((u32 __iomem *)bd);
1393	}
1394
1395	return 0;
1396}
1397
1398static int ep_txframe_handle(struct qe_ep *ep)
1399{
1400	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1401		qe_ep_flushtxfifo(ep);
1402		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1403		if (frame_get_info(ep->txframe) & PID_DATA0)
1404			ep->data01 = 0;
1405		else
1406			ep->data01 = 1;
1407
1408		txcomplete(ep, 1);
1409	} else
1410		txcomplete(ep, 0);
1411
1412	frame_create_tx(ep, ep->txframe); /* send the data */
1413	return 0;
1414}
1415
1416/* confirm the already trainsmited bd */
1417static int qe_ep_txconf(struct qe_ep *ep)
1418{
1419	struct qe_bd __iomem *bd;
1420	struct qe_frame *pframe = NULL;
1421	u32 bdstatus;
1422	unsigned char breakonrxinterrupt = 0;
1423
1424	bd = ep->c_txbd;
1425	bdstatus = in_be32((u32 __iomem *)bd);
1426	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1427		pframe = ep->txframe;
1428		if (bdstatus & DEVICE_T_ERROR) {
1429			frame_set_status(pframe, FRAME_ERROR);
1430			if (bdstatus & T_TO)
1431				pframe->status |= TX_ER_TIMEOUT;
1432			if (bdstatus & T_UN)
1433				pframe->status |= TX_ER_UNDERUN;
1434		}
1435
1436		/* clear and recycle the BD */
1437		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1438		out_be32(&bd->buf, 0);
1439		if (bdstatus & T_W)
1440			ep->c_txbd = ep->txbase;
1441		else
1442			ep->c_txbd++;
1443
1444		/* handle the tx frame */
1445		ep_txframe_handle(ep);
1446		bd = ep->c_txbd;
1447		bdstatus = in_be32((u32 __iomem *)bd);
1448	}
1449	if (breakonrxinterrupt)
1450		return -EIO;
1451	else
1452		return 0;
1453}
1454
1455/* Add a request in queue, and try to transmit a packet */
1456static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1457{
1458	int reval = 0;
1459
1460	if (ep->tx_req == NULL) {
1461		ep->sent = 0;
1462		ep->last = 0;
1463		txcomplete(ep, 0); /* can gain a new tx_req */
1464		reval = frame_create_tx(ep, ep->txframe);
1465	}
1466	return reval;
1467}
1468
1469/* Maybe this is a good ideal */
1470static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1471{
1472	struct qe_udc *udc = ep->udc;
1473	struct qe_frame *pframe = NULL;
1474	struct qe_bd __iomem *bd;
1475	u32 bdstatus, length;
1476	u32 vaddr, fsize;
1477	u8 *cp;
1478	u8 finish_req = 0;
1479	u8 framepid;
1480
1481	if (list_empty(&ep->queue)) {
1482		dev_vdbg(udc->dev, "the req already finish!\n");
1483		return 0;
1484	}
1485	pframe = ep->rxframe;
1486
1487	bd = ep->n_rxbd;
1488	bdstatus = in_be32((u32 __iomem *)bd);
1489	length = bdstatus & BD_LENGTH_MASK;
1490
1491	while (!(bdstatus & R_E) && length) {
1492		if (finish_req)
1493			break;
1494		if ((bdstatus & R_F) && (bdstatus & R_L)
1495					&& !(bdstatus & R_ERROR)) {
1496			qe_frame_clean(pframe);
1497			vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1498			frame_set_data(pframe, (u8 *)vaddr);
1499			frame_set_length(pframe, (length - USB_CRC_SIZE));
1500			frame_set_status(pframe, FRAME_OK);
1501			switch (bdstatus & R_PID) {
1502			case R_PID_DATA1:
1503				frame_set_info(pframe, PID_DATA1); break;
1504			default:
1505				frame_set_info(pframe, PID_DATA0); break;
1506			}
1507			/* handle the rx frame */
1508
1509			if (frame_get_info(pframe) & PID_DATA1)
1510				framepid = 0x1;
1511			else
1512				framepid = 0;
1513
1514			if (framepid != ep->data01) {
1515				dev_vdbg(udc->dev, "the data01 error!\n");
1516			} else {
1517				fsize = frame_get_length(pframe);
1518
1519				cp = (u8 *)(req->req.buf) + req->req.actual;
1520				if (cp) {
1521					memcpy(cp, pframe->data, fsize);
1522					req->req.actual += fsize;
1523					if ((fsize < ep->ep.maxpacket)
1524						|| (req->req.actual >=
1525							req->req.length)) {
1526						finish_req = 1;
1527						done(ep, req, 0);
1528						if (list_empty(&ep->queue))
1529							qe_eprx_nack(ep);
1530					}
1531				}
1532				qe_ep_toggledata01(ep);
1533			}
1534		} else {
1535			dev_err(udc->dev, "The receive frame with error!\n");
1536		}
1537
1538		/* note: don't clear the rxbd's buffer address *
1539		 * only Clear the length */
1540		out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1541		ep->has_data--;
1542
1543		/* Get next BD */
1544		if (bdstatus & R_W)
1545			bd = ep->rxbase;
1546		else
1547			bd++;
1548
1549		bdstatus = in_be32((u32 __iomem *)bd);
1550		length = bdstatus & BD_LENGTH_MASK;
1551	}
1552
1553	ep->n_rxbd = bd;
1554	ep_recycle_rxbds(ep);
1555
1556	return 0;
1557}
1558
1559/* only add the request in queue */
1560static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1561{
1562	if (ep->state == EP_STATE_NACK) {
1563		if (ep->has_data <= 0) {
1564			/* Enable rx and unmask rx interrupt */
1565			qe_eprx_normal(ep);
1566		} else {
1567			/* Copy the exist BD data */
1568			ep_req_rx(ep, req);
1569		}
1570	}
1571
1572	return 0;
1573}
1574
1575/********************************************************************
1576	Internal Used Function End
1577********************************************************************/
1578
1579/*-----------------------------------------------------------------------
1580	Endpoint Management Functions For Gadget
1581 -----------------------------------------------------------------------*/
1582static int qe_ep_enable(struct usb_ep *_ep,
1583			 const struct usb_endpoint_descriptor *desc)
1584{
1585	struct qe_udc *udc;
1586	struct qe_ep *ep;
1587	int retval = 0;
1588	unsigned char epnum;
1589
1590	ep = container_of(_ep, struct qe_ep, ep);
1591
1592	/* catch various bogus parameters */
1593	if (!_ep || !desc || _ep->name == ep_name[0] ||
1594			(desc->bDescriptorType != USB_DT_ENDPOINT))
1595		return -EINVAL;
1596
1597	udc = ep->udc;
1598	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1599		return -ESHUTDOWN;
1600
1601	epnum = (u8)desc->bEndpointAddress & 0xF;
1602
1603	retval = qe_ep_init(udc, epnum, desc);
1604	if (retval != 0) {
1605		cpm_muram_free(cpm_muram_offset(ep->rxbase));
1606		dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1607		return -EINVAL;
1608	}
1609	dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1610	return 0;
1611}
1612
1613static int qe_ep_disable(struct usb_ep *_ep)
1614{
1615	struct qe_udc *udc;
1616	struct qe_ep *ep;
1617	unsigned long flags;
1618	unsigned int size;
1619
1620	ep = container_of(_ep, struct qe_ep, ep);
1621	udc = ep->udc;
1622
1623	if (!_ep || !ep->ep.desc) {
1624		dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1625		return -EINVAL;
1626	}
1627
1628	spin_lock_irqsave(&udc->lock, flags);
1629	/* Nuke all pending requests (does flush) */
1630	nuke(ep, -ESHUTDOWN);
1631	ep->ep.desc = NULL;
1632	ep->stopped = 1;
1633	ep->tx_req = NULL;
1634	qe_ep_reset(udc, ep->epnum);
1635	spin_unlock_irqrestore(&udc->lock, flags);
1636
1637	cpm_muram_free(cpm_muram_offset(ep->rxbase));
1638
1639	if (ep->dir == USB_DIR_OUT)
1640		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1641				(USB_BDRING_LEN_RX + 1);
1642	else
1643		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1644				(USB_BDRING_LEN + 1);
1645
1646	if (ep->dir != USB_DIR_IN) {
1647		kfree(ep->rxframe);
1648		if (ep->rxbufmap) {
1649			dma_unmap_single(udc->gadget.dev.parent,
1650					ep->rxbuf_d, size,
1651					DMA_FROM_DEVICE);
1652			ep->rxbuf_d = DMA_ADDR_INVALID;
1653		} else {
1654			dma_sync_single_for_cpu(
1655					udc->gadget.dev.parent,
1656					ep->rxbuf_d, size,
1657					DMA_FROM_DEVICE);
1658		}
1659		kfree(ep->rxbuffer);
1660	}
1661
1662	if (ep->dir != USB_DIR_OUT)
1663		kfree(ep->txframe);
1664
1665	dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1666	return 0;
1667}
1668
1669static struct usb_request *qe_alloc_request(struct usb_ep *_ep,	gfp_t gfp_flags)
1670{
1671	struct qe_req *req;
1672
1673	req = kzalloc(sizeof(*req), gfp_flags);
1674	if (!req)
1675		return NULL;
1676
1677	req->req.dma = DMA_ADDR_INVALID;
1678
1679	INIT_LIST_HEAD(&req->queue);
1680
1681	return &req->req;
1682}
1683
1684static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1685{
1686	struct qe_req *req;
1687
1688	req = container_of(_req, struct qe_req, req);
1689
1690	if (_req)
1691		kfree(req);
1692}
1693
1694static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1695{
1696	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1697	struct qe_req *req = container_of(_req, struct qe_req, req);
1698	struct qe_udc *udc;
1699	int reval;
1700
1701	udc = ep->udc;
1702	/* catch various bogus parameters */
1703	if (!_req || !req->req.complete || !req->req.buf
1704			|| !list_empty(&req->queue)) {
1705		dev_dbg(udc->dev, "bad params\n");
1706		return -EINVAL;
1707	}
1708	if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1709		dev_dbg(udc->dev, "bad ep\n");
1710		return -EINVAL;
1711	}
1712
1713	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1714		return -ESHUTDOWN;
1715
1716	req->ep = ep;
1717
1718	/* map virtual address to hardware */
1719	if (req->req.dma == DMA_ADDR_INVALID) {
1720		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1721					req->req.buf,
1722					req->req.length,
1723					ep_is_in(ep)
1724					? DMA_TO_DEVICE :
1725					DMA_FROM_DEVICE);
1726		req->mapped = 1;
1727	} else {
1728		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1729					req->req.dma, req->req.length,
1730					ep_is_in(ep)
1731					? DMA_TO_DEVICE :
1732					DMA_FROM_DEVICE);
1733		req->mapped = 0;
1734	}
1735
1736	req->req.status = -EINPROGRESS;
1737	req->req.actual = 0;
1738
1739	list_add_tail(&req->queue, &ep->queue);
1740	dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1741			ep->name, req->req.length);
1742
1743	/* push the request to device */
1744	if (ep_is_in(ep))
1745		reval = ep_req_send(ep, req);
1746
1747	/* EP0 */
1748	if (ep_index(ep) == 0 && req->req.length > 0) {
1749		if (ep_is_in(ep))
1750			udc->ep0_state = DATA_STATE_XMIT;
1751		else
1752			udc->ep0_state = DATA_STATE_RECV;
1753	}
1754
1755	if (ep->dir == USB_DIR_OUT)
1756		reval = ep_req_receive(ep, req);
1757
1758	return 0;
1759}
1760
1761/* queues (submits) an I/O request to an endpoint */
1762static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1763		       gfp_t gfp_flags)
1764{
1765	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1766	struct qe_udc *udc = ep->udc;
1767	unsigned long flags;
1768	int ret;
1769
1770	spin_lock_irqsave(&udc->lock, flags);
1771	ret = __qe_ep_queue(_ep, _req);
1772	spin_unlock_irqrestore(&udc->lock, flags);
1773	return ret;
1774}
1775
1776/* dequeues (cancels, unlinks) an I/O request from an endpoint */
1777static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1778{
1779	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1780	struct qe_req *req = NULL;
1781	struct qe_req *iter;
1782	unsigned long flags;
1783
1784	if (!_ep || !_req)
1785		return -EINVAL;
1786
1787	spin_lock_irqsave(&ep->udc->lock, flags);
1788
1789	/* make sure it's actually queued on this endpoint */
1790	list_for_each_entry(iter, &ep->queue, queue) {
1791		if (&iter->req != _req)
1792			continue;
1793		req = iter;
1794		break;
1795	}
1796
1797	if (!req) {
1798		spin_unlock_irqrestore(&ep->udc->lock, flags);
1799		return -EINVAL;
1800	}
1801
1802	done(ep, req, -ECONNRESET);
1803
1804	spin_unlock_irqrestore(&ep->udc->lock, flags);
1805	return 0;
1806}
1807
1808/*-----------------------------------------------------------------
1809 * modify the endpoint halt feature
1810 * @ep: the non-isochronous endpoint being stalled
1811 * @value: 1--set halt  0--clear halt
1812 * Returns zero, or a negative error code.
1813*----------------------------------------------------------------*/
1814static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1815{
1816	struct qe_ep *ep;
1817	unsigned long flags;
1818	int status = -EOPNOTSUPP;
1819	struct qe_udc *udc;
1820
1821	ep = container_of(_ep, struct qe_ep, ep);
1822	if (!_ep || !ep->ep.desc) {
1823		status = -EINVAL;
1824		goto out;
1825	}
1826
1827	udc = ep->udc;
1828	/* Attempt to halt IN ep will fail if any transfer requests
1829	 * are still queue */
1830	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1831		status = -EAGAIN;
1832		goto out;
1833	}
1834
1835	status = 0;
1836	spin_lock_irqsave(&ep->udc->lock, flags);
1837	qe_eptx_stall_change(ep, value);
1838	qe_eprx_stall_change(ep, value);
1839	spin_unlock_irqrestore(&ep->udc->lock, flags);
1840
1841	if (ep->epnum == 0) {
1842		udc->ep0_state = WAIT_FOR_SETUP;
1843		udc->ep0_dir = 0;
1844	}
1845
1846	/* set data toggle to DATA0 on clear halt */
1847	if (value == 0)
1848		ep->data01 = 0;
1849out:
1850	dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1851			value ?  "set" : "clear", status);
1852
1853	return status;
1854}
1855
1856static const struct usb_ep_ops qe_ep_ops = {
1857	.enable = qe_ep_enable,
1858	.disable = qe_ep_disable,
1859
1860	.alloc_request = qe_alloc_request,
1861	.free_request = qe_free_request,
1862
1863	.queue = qe_ep_queue,
1864	.dequeue = qe_ep_dequeue,
1865
1866	.set_halt = qe_ep_set_halt,
1867};
1868
1869/*------------------------------------------------------------------------
1870	Gadget Driver Layer Operations
1871 ------------------------------------------------------------------------*/
1872
1873/* Get the current frame number */
1874static int qe_get_frame(struct usb_gadget *gadget)
1875{
1876	struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1877	u16 tmp;
1878
1879	tmp = in_be16(&udc->usb_param->frame_n);
1880	if (tmp & 0x8000)
1881		return tmp & 0x07ff;
1882	return -EINVAL;
1883}
1884
1885static int fsl_qe_start(struct usb_gadget *gadget,
1886		struct usb_gadget_driver *driver);
1887static int fsl_qe_stop(struct usb_gadget *gadget);
1888
1889/* defined in usb_gadget.h */
1890static const struct usb_gadget_ops qe_gadget_ops = {
1891	.get_frame = qe_get_frame,
1892	.udc_start = fsl_qe_start,
1893	.udc_stop = fsl_qe_stop,
1894};
1895
1896/*-------------------------------------------------------------------------
1897	USB ep0 Setup process in BUS Enumeration
1898 -------------------------------------------------------------------------*/
1899static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1900{
1901	struct qe_ep *ep = &udc->eps[pipe];
1902
1903	nuke(ep, -ECONNRESET);
1904	ep->tx_req = NULL;
1905	return 0;
1906}
1907
1908static int reset_queues(struct qe_udc *udc)
1909{
1910	u8 pipe;
1911
1912	for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1913		udc_reset_ep_queue(udc, pipe);
1914
1915	/* report disconnect; the driver is already quiesced */
1916	spin_unlock(&udc->lock);
1917	usb_gadget_udc_reset(&udc->gadget, udc->driver);
1918	spin_lock(&udc->lock);
1919
1920	return 0;
1921}
1922
1923static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1924			u16 length)
1925{
1926	/* Save the new address to device struct */
1927	udc->device_address = (u8) value;
1928	/* Update usb state */
1929	udc->usb_state = USB_STATE_ADDRESS;
1930
1931	/* Status phase , send a ZLP */
1932	if (ep0_prime_status(udc, USB_DIR_IN))
1933		qe_ep0_stall(udc);
1934}
1935
1936static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1937{
1938	struct qe_req *req = container_of(_req, struct qe_req, req);
1939
1940	req->req.buf = NULL;
1941	kfree(req);
1942}
1943
1944static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1945			u16 index, u16 length)
1946{
1947	u16 usb_status = 0;
1948	struct qe_req *req;
1949	struct qe_ep *ep;
1950	int status = 0;
1951
1952	ep = &udc->eps[0];
1953	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1954		/* Get device status */
1955		usb_status = 1 << USB_DEVICE_SELF_POWERED;
1956	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1957		/* Get interface status */
1958		/* We don't have interface information in udc driver */
1959		usb_status = 0;
1960	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1961		/* Get endpoint status */
1962		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1963		if (pipe >= USB_MAX_ENDPOINTS)
1964			goto stall;
1965		struct qe_ep *target_ep = &udc->eps[pipe];
1966		u16 usep;
1967
1968		/* stall if endpoint doesn't exist */
1969		if (!target_ep->ep.desc)
1970			goto stall;
1971
1972		usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1973		if (index & USB_DIR_IN) {
1974			if (target_ep->dir != USB_DIR_IN)
1975				goto stall;
1976			if ((usep & USB_THS_MASK) == USB_THS_STALL)
1977				usb_status = 1 << USB_ENDPOINT_HALT;
1978		} else {
1979			if (target_ep->dir != USB_DIR_OUT)
1980				goto stall;
1981			if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1982				usb_status = 1 << USB_ENDPOINT_HALT;
1983		}
1984	}
1985
1986	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1987					struct qe_req, req);
1988	req->req.length = 2;
1989	req->req.buf = udc->statusbuf;
1990	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
1991	req->req.status = -EINPROGRESS;
1992	req->req.actual = 0;
1993	req->req.complete = ownercomplete;
1994
1995	udc->ep0_dir = USB_DIR_IN;
1996
1997	/* data phase */
1998	status = __qe_ep_queue(&ep->ep, &req->req);
1999
2000	if (status == 0)
2001		return;
2002stall:
2003	dev_err(udc->dev, "Can't respond to getstatus request \n");
2004	qe_ep0_stall(udc);
2005}
2006
2007/* only handle the setup request, suppose the device in normal status */
2008static void setup_received_handle(struct qe_udc *udc,
2009				struct usb_ctrlrequest *setup)
2010{
2011	/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
2012	u16 wValue = le16_to_cpu(setup->wValue);
2013	u16 wIndex = le16_to_cpu(setup->wIndex);
2014	u16 wLength = le16_to_cpu(setup->wLength);
2015
2016	/* clear the previous request in the ep0 */
2017	udc_reset_ep_queue(udc, 0);
2018
2019	if (setup->bRequestType & USB_DIR_IN)
2020		udc->ep0_dir = USB_DIR_IN;
2021	else
2022		udc->ep0_dir = USB_DIR_OUT;
2023
2024	switch (setup->bRequest) {
2025	case USB_REQ_GET_STATUS:
2026		/* Data+Status phase form udc */
2027		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2028					!= (USB_DIR_IN | USB_TYPE_STANDARD))
2029			break;
2030		ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2031					wLength);
2032		return;
2033
2034	case USB_REQ_SET_ADDRESS:
2035		/* Status phase from udc */
2036		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2037						USB_RECIP_DEVICE))
2038			break;
2039		ch9setaddress(udc, wValue, wIndex, wLength);
2040		return;
2041
2042	case USB_REQ_CLEAR_FEATURE:
2043	case USB_REQ_SET_FEATURE:
2044		/* Requests with no data phase, status phase from udc */
2045		if ((setup->bRequestType & USB_TYPE_MASK)
2046					!= USB_TYPE_STANDARD)
2047			break;
2048
2049		if ((setup->bRequestType & USB_RECIP_MASK)
2050				== USB_RECIP_ENDPOINT) {
2051			int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2052			struct qe_ep *ep;
2053
2054			if (wValue != 0 || wLength != 0
2055				|| pipe >= USB_MAX_ENDPOINTS)
2056				break;
2057			ep = &udc->eps[pipe];
2058
2059			spin_unlock(&udc->lock);
2060			qe_ep_set_halt(&ep->ep,
2061					(setup->bRequest == USB_REQ_SET_FEATURE)
2062						? 1 : 0);
2063			spin_lock(&udc->lock);
2064		}
2065
2066		ep0_prime_status(udc, USB_DIR_IN);
2067
2068		return;
2069
2070	default:
2071		break;
2072	}
2073
2074	if (wLength) {
2075		/* Data phase from gadget, status phase from udc */
2076		if (setup->bRequestType & USB_DIR_IN) {
2077			udc->ep0_state = DATA_STATE_XMIT;
2078			udc->ep0_dir = USB_DIR_IN;
2079		} else {
2080			udc->ep0_state = DATA_STATE_RECV;
2081			udc->ep0_dir = USB_DIR_OUT;
2082		}
2083		spin_unlock(&udc->lock);
2084		if (udc->driver->setup(&udc->gadget,
2085					&udc->local_setup_buff) < 0)
2086			qe_ep0_stall(udc);
2087		spin_lock(&udc->lock);
2088	} else {
2089		/* No data phase, IN status from gadget */
2090		udc->ep0_dir = USB_DIR_IN;
2091		spin_unlock(&udc->lock);
2092		if (udc->driver->setup(&udc->gadget,
2093					&udc->local_setup_buff) < 0)
2094			qe_ep0_stall(udc);
2095		spin_lock(&udc->lock);
2096		udc->ep0_state = DATA_STATE_NEED_ZLP;
2097	}
2098}
2099
2100/*-------------------------------------------------------------------------
2101	USB Interrupt handlers
2102 -------------------------------------------------------------------------*/
2103static void suspend_irq(struct qe_udc *udc)
2104{
2105	udc->resume_state = udc->usb_state;
2106	udc->usb_state = USB_STATE_SUSPENDED;
2107
2108	/* report suspend to the driver ,serial.c not support this*/
2109	if (udc->driver->suspend)
2110		udc->driver->suspend(&udc->gadget);
2111}
2112
2113static void resume_irq(struct qe_udc *udc)
2114{
2115	udc->usb_state = udc->resume_state;
2116	udc->resume_state = 0;
2117
2118	/* report resume to the driver , serial.c not support this*/
2119	if (udc->driver->resume)
2120		udc->driver->resume(&udc->gadget);
2121}
2122
2123static void idle_irq(struct qe_udc *udc)
2124{
2125	u8 usbs;
2126
2127	usbs = in_8(&udc->usb_regs->usb_usbs);
2128	if (usbs & USB_IDLE_STATUS_MASK) {
2129		if ((udc->usb_state) != USB_STATE_SUSPENDED)
2130			suspend_irq(udc);
2131	} else {
2132		if (udc->usb_state == USB_STATE_SUSPENDED)
2133			resume_irq(udc);
2134	}
2135}
2136
2137static int reset_irq(struct qe_udc *udc)
2138{
2139	unsigned char i;
2140
2141	if (udc->usb_state == USB_STATE_DEFAULT)
2142		return 0;
2143
2144	qe_usb_disable(udc);
2145	out_8(&udc->usb_regs->usb_usadr, 0);
2146
2147	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2148		if (udc->eps[i].init)
2149			qe_ep_reset(udc, i);
2150	}
2151
2152	reset_queues(udc);
2153	udc->usb_state = USB_STATE_DEFAULT;
2154	udc->ep0_state = WAIT_FOR_SETUP;
2155	udc->ep0_dir = USB_DIR_OUT;
2156	qe_usb_enable(udc);
2157	return 0;
2158}
2159
2160static int bsy_irq(struct qe_udc *udc)
2161{
2162	return 0;
2163}
2164
2165static int txe_irq(struct qe_udc *udc)
2166{
2167	return 0;
2168}
2169
2170/* ep0 tx interrupt also in here */
2171static int tx_irq(struct qe_udc *udc)
2172{
2173	struct qe_ep *ep;
2174	struct qe_bd __iomem *bd;
2175	int i, res = 0;
2176
2177	if ((udc->usb_state == USB_STATE_ADDRESS)
2178		&& (in_8(&udc->usb_regs->usb_usadr) == 0))
2179		out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2180
2181	for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2182		ep = &udc->eps[i];
2183		if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2184			bd = ep->c_txbd;
2185			if (!(in_be32((u32 __iomem *)bd) & T_R)
2186						&& (in_be32(&bd->buf))) {
2187				/* confirm the transmitted bd */
2188				if (ep->epnum == 0)
2189					res = qe_ep0_txconf(ep);
2190				else
2191					res = qe_ep_txconf(ep);
2192			}
2193		}
2194	}
2195	return res;
2196}
2197
2198
2199/* setup packect's rx is handle in the function too */
2200static void rx_irq(struct qe_udc *udc)
2201{
2202	struct qe_ep *ep;
2203	struct qe_bd __iomem *bd;
2204	int i;
2205
2206	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2207		ep = &udc->eps[i];
2208		if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2209			bd = ep->n_rxbd;
2210			if (!(in_be32((u32 __iomem *)bd) & R_E)
2211						&& (in_be32(&bd->buf))) {
2212				if (ep->epnum == 0) {
2213					qe_ep0_rx(udc);
2214				} else {
2215					/*non-setup package receive*/
2216					qe_ep_rx(ep);
2217				}
2218			}
2219		}
2220	}
2221}
2222
2223static irqreturn_t qe_udc_irq(int irq, void *_udc)
2224{
2225	struct qe_udc *udc = (struct qe_udc *)_udc;
2226	u16 irq_src;
2227	irqreturn_t status = IRQ_NONE;
2228	unsigned long flags;
2229
2230	spin_lock_irqsave(&udc->lock, flags);
2231
2232	irq_src = in_be16(&udc->usb_regs->usb_usber) &
2233		in_be16(&udc->usb_regs->usb_usbmr);
2234	/* Clear notification bits */
2235	out_be16(&udc->usb_regs->usb_usber, irq_src);
2236	/* USB Interrupt */
2237	if (irq_src & USB_E_IDLE_MASK) {
2238		idle_irq(udc);
2239		irq_src &= ~USB_E_IDLE_MASK;
2240		status = IRQ_HANDLED;
2241	}
2242
2243	if (irq_src & USB_E_TXB_MASK) {
2244		tx_irq(udc);
2245		irq_src &= ~USB_E_TXB_MASK;
2246		status = IRQ_HANDLED;
2247	}
2248
2249	if (irq_src & USB_E_RXB_MASK) {
2250		rx_irq(udc);
2251		irq_src &= ~USB_E_RXB_MASK;
2252		status = IRQ_HANDLED;
2253	}
2254
2255	if (irq_src & USB_E_RESET_MASK) {
2256		reset_irq(udc);
2257		irq_src &= ~USB_E_RESET_MASK;
2258		status = IRQ_HANDLED;
2259	}
2260
2261	if (irq_src & USB_E_BSY_MASK) {
2262		bsy_irq(udc);
2263		irq_src &= ~USB_E_BSY_MASK;
2264		status = IRQ_HANDLED;
2265	}
2266
2267	if (irq_src & USB_E_TXE_MASK) {
2268		txe_irq(udc);
2269		irq_src &= ~USB_E_TXE_MASK;
2270		status = IRQ_HANDLED;
2271	}
2272
2273	spin_unlock_irqrestore(&udc->lock, flags);
2274
2275	return status;
2276}
2277
2278/*-------------------------------------------------------------------------
2279	Gadget driver probe and unregister.
2280 --------------------------------------------------------------------------*/
2281static int fsl_qe_start(struct usb_gadget *gadget,
2282		struct usb_gadget_driver *driver)
2283{
2284	struct qe_udc *udc;
2285	unsigned long flags;
2286
2287	udc = container_of(gadget, struct qe_udc, gadget);
2288	/* lock is needed but whether should use this lock or another */
2289	spin_lock_irqsave(&udc->lock, flags);
2290
2291	/* hook up the driver */
2292	udc->driver = driver;
2293	udc->gadget.speed = driver->max_speed;
2294
2295	/* Enable IRQ reg and Set usbcmd reg EN bit */
2296	qe_usb_enable(udc);
2297
2298	out_be16(&udc->usb_regs->usb_usber, 0xffff);
2299	out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2300	udc->usb_state = USB_STATE_ATTACHED;
2301	udc->ep0_state = WAIT_FOR_SETUP;
2302	udc->ep0_dir = USB_DIR_OUT;
2303	spin_unlock_irqrestore(&udc->lock, flags);
2304
2305	return 0;
2306}
2307
2308static int fsl_qe_stop(struct usb_gadget *gadget)
2309{
2310	struct qe_udc *udc;
2311	struct qe_ep *loop_ep;
2312	unsigned long flags;
2313
2314	udc = container_of(gadget, struct qe_udc, gadget);
2315	/* stop usb controller, disable intr */
2316	qe_usb_disable(udc);
2317
2318	/* in fact, no needed */
2319	udc->usb_state = USB_STATE_ATTACHED;
2320	udc->ep0_state = WAIT_FOR_SETUP;
2321	udc->ep0_dir = 0;
2322
2323	/* stand operation */
2324	spin_lock_irqsave(&udc->lock, flags);
2325	udc->gadget.speed = USB_SPEED_UNKNOWN;
2326	nuke(&udc->eps[0], -ESHUTDOWN);
2327	list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2328		nuke(loop_ep, -ESHUTDOWN);
2329	spin_unlock_irqrestore(&udc->lock, flags);
2330
2331	udc->driver = NULL;
2332
2333	return 0;
2334}
2335
2336/* udc structure's alloc and setup, include ep-param alloc */
2337static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2338{
2339	struct qe_udc *udc;
2340	struct device_node *np = ofdev->dev.of_node;
2341	unsigned long tmp_addr = 0;
2342	struct usb_device_para __iomem *usbpram;
2343	unsigned int i;
2344	u64 size;
2345	u32 offset;
2346
2347	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2348	if (!udc)
2349		goto cleanup;
2350
2351	udc->dev = &ofdev->dev;
2352
2353	/* get default address of usb parameter in MURAM from device tree */
2354	offset = *of_get_address(np, 1, &size, NULL);
2355	udc->usb_param = cpm_muram_addr(offset);
2356	memset_io(udc->usb_param, 0, size);
2357
2358	usbpram = udc->usb_param;
2359	out_be16(&usbpram->frame_n, 0);
2360	out_be32(&usbpram->rstate, 0);
2361
2362	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2363					sizeof(struct usb_ep_para)),
2364					   USB_EP_PARA_ALIGNMENT);
2365	if (IS_ERR_VALUE(tmp_addr))
2366		goto cleanup;
2367
2368	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2369		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2370		udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2371		tmp_addr += 32;
2372	}
2373
2374	memset_io(udc->ep_param[0], 0,
2375			USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2376
2377	udc->resume_state = USB_STATE_NOTATTACHED;
2378	udc->usb_state = USB_STATE_POWERED;
2379	udc->ep0_dir = 0;
2380
2381	spin_lock_init(&udc->lock);
2382	return udc;
2383
2384cleanup:
2385	kfree(udc);
2386	return NULL;
2387}
2388
2389/* USB Controller register init */
2390static int qe_udc_reg_init(struct qe_udc *udc)
2391{
2392	struct usb_ctlr __iomem *qe_usbregs;
2393	qe_usbregs = udc->usb_regs;
2394
2395	/* Spec says that we must enable the USB controller to change mode. */
2396	out_8(&qe_usbregs->usb_usmod, 0x01);
2397	/* Mode changed, now disable it, since muram isn't initialized yet. */
2398	out_8(&qe_usbregs->usb_usmod, 0x00);
2399
2400	/* Initialize the rest. */
2401	out_be16(&qe_usbregs->usb_usbmr, 0);
2402	out_8(&qe_usbregs->usb_uscom, 0);
2403	out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2404
2405	return 0;
2406}
2407
2408static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2409{
2410	struct qe_ep *ep = &udc->eps[pipe_num];
2411
2412	ep->udc = udc;
2413	strcpy(ep->name, ep_name[pipe_num]);
2414	ep->ep.name = ep_name[pipe_num];
2415
2416	if (pipe_num == 0) {
2417		ep->ep.caps.type_control = true;
2418	} else {
2419		ep->ep.caps.type_iso = true;
2420		ep->ep.caps.type_bulk = true;
2421		ep->ep.caps.type_int = true;
2422	}
2423
2424	ep->ep.caps.dir_in = true;
2425	ep->ep.caps.dir_out = true;
2426
2427	ep->ep.ops = &qe_ep_ops;
2428	ep->stopped = 1;
2429	usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
2430	ep->ep.desc = NULL;
2431	ep->dir = 0xff;
2432	ep->epnum = (u8)pipe_num;
2433	ep->sent = 0;
2434	ep->last = 0;
2435	ep->init = 0;
2436	ep->rxframe = NULL;
2437	ep->txframe = NULL;
2438	ep->tx_req = NULL;
2439	ep->state = EP_STATE_IDLE;
2440	ep->has_data = 0;
2441
2442	/* the queue lists any req for this ep */
2443	INIT_LIST_HEAD(&ep->queue);
2444
2445	/* gagdet.ep_list used for ep_autoconfig so no ep0*/
2446	if (pipe_num != 0)
2447		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2448
2449	ep->gadget = &udc->gadget;
2450
2451	return 0;
2452}
2453
2454/*-----------------------------------------------------------------------
2455 *	UDC device Driver operation functions				*
2456 *----------------------------------------------------------------------*/
2457static void qe_udc_release(struct device *dev)
2458{
2459	struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2460	int i;
2461
2462	complete(udc->done);
2463	cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
2464	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2465		udc->ep_param[i] = NULL;
2466
2467	kfree(udc);
2468}
2469
2470/* Driver probe functions */
2471static const struct of_device_id qe_udc_match[];
2472static int qe_udc_probe(struct platform_device *ofdev)
2473{
2474	struct qe_udc *udc;
2475	struct device_node *np = ofdev->dev.of_node;
2476	struct qe_ep *ep;
2477	unsigned int ret = 0;
2478	unsigned int i;
2479	const void *prop;
2480
2481	prop = of_get_property(np, "mode", NULL);
2482	if (!prop || strcmp(prop, "peripheral"))
2483		return -ENODEV;
2484
2485	/* Initialize the udc structure including QH member and other member */
2486	udc = qe_udc_config(ofdev);
2487	if (!udc) {
2488		dev_err(&ofdev->dev, "failed to initialize\n");
2489		return -ENOMEM;
2490	}
2491
2492	udc->soc_type = (unsigned long)device_get_match_data(&ofdev->dev);
2493	udc->usb_regs = of_iomap(np, 0);
2494	if (!udc->usb_regs) {
2495		ret = -ENOMEM;
2496		goto err1;
2497	}
2498
2499	/* initialize usb hw reg except for regs for EP,
2500	 * leave usbintr reg untouched*/
2501	qe_udc_reg_init(udc);
2502
2503	/* here comes the stand operations for probe
2504	 * set the qe_udc->gadget.xxx */
2505	udc->gadget.ops = &qe_gadget_ops;
2506
2507	/* gadget.ep0 is a pointer */
2508	udc->gadget.ep0 = &udc->eps[0].ep;
2509
2510	INIT_LIST_HEAD(&udc->gadget.ep_list);
2511
2512	/* modify in register gadget process */
2513	udc->gadget.speed = USB_SPEED_UNKNOWN;
2514
2515	/* name: Identifies the controller hardware type. */
2516	udc->gadget.name = driver_name;
2517	udc->gadget.dev.parent = &ofdev->dev;
2518
2519	/* initialize qe_ep struct */
2520	for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2521		/* because the ep type isn't decide here so
2522		 * qe_ep_init() should be called in ep_enable() */
2523
2524		/* setup the qe_ep struct and link ep.ep.list
2525		 * into gadget.ep_list */
2526		qe_ep_config(udc, (unsigned char)i);
2527	}
2528
2529	/* ep0 initialization in here */
2530	ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2531	if (ret)
2532		goto err2;
2533
2534	/* create a buf for ZLP send, need to remain zeroed */
2535	udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
2536	if (udc->nullbuf == NULL) {
2537		ret = -ENOMEM;
2538		goto err3;
2539	}
2540
2541	/* buffer for data of get_status request */
2542	udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
2543	if (udc->statusbuf == NULL) {
2544		ret = -ENOMEM;
2545		goto err3;
2546	}
2547
2548	udc->nullp = virt_to_phys((void *)udc->nullbuf);
2549	if (udc->nullp == DMA_ADDR_INVALID) {
2550		udc->nullp = dma_map_single(
2551					udc->gadget.dev.parent,
2552					udc->nullbuf,
2553					256,
2554					DMA_TO_DEVICE);
2555		udc->nullmap = 1;
2556	} else {
2557		dma_sync_single_for_device(udc->gadget.dev.parent,
2558					udc->nullp, 256,
2559					DMA_TO_DEVICE);
2560	}
2561
2562	tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet);
2563	/* request irq and disable DR  */
2564	udc->usb_irq = irq_of_parse_and_map(np, 0);
2565	if (!udc->usb_irq) {
2566		ret = -EINVAL;
2567		goto err_noirq;
2568	}
2569
2570	ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2571				driver_name, udc);
2572	if (ret) {
2573		dev_err(udc->dev, "cannot request irq %d err %d\n",
2574				udc->usb_irq, ret);
2575		goto err4;
2576	}
2577
2578	ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
2579			qe_udc_release);
2580	if (ret)
2581		goto err5;
2582
2583	platform_set_drvdata(ofdev, udc);
2584	dev_info(udc->dev,
2585			"%s USB controller initialized as device\n",
2586			(udc->soc_type == PORT_QE) ? "QE" : "CPM");
2587	return 0;
2588
2589err5:
2590	free_irq(udc->usb_irq, udc);
2591err4:
2592	irq_dispose_mapping(udc->usb_irq);
2593err_noirq:
2594	if (udc->nullmap) {
2595		dma_unmap_single(udc->gadget.dev.parent,
2596			udc->nullp, 256,
2597				DMA_TO_DEVICE);
2598			udc->nullp = DMA_ADDR_INVALID;
2599	} else {
2600		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2601			udc->nullp, 256,
2602				DMA_TO_DEVICE);
2603	}
2604err3:
2605	ep = &udc->eps[0];
2606	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2607	kfree(ep->rxframe);
2608	kfree(ep->rxbuffer);
2609	kfree(ep->txframe);
2610err2:
2611	iounmap(udc->usb_regs);
2612err1:
2613	kfree(udc);
2614	return ret;
2615}
2616
2617#ifdef CONFIG_PM
2618static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2619{
2620	return -ENOTSUPP;
2621}
2622
2623static int qe_udc_resume(struct platform_device *dev)
2624{
2625	return -ENOTSUPP;
2626}
2627#endif
2628
2629static void qe_udc_remove(struct platform_device *ofdev)
2630{
2631	struct qe_udc *udc = platform_get_drvdata(ofdev);
2632	struct qe_ep *ep;
2633	unsigned int size;
2634	DECLARE_COMPLETION_ONSTACK(done);
2635
2636	usb_del_gadget_udc(&udc->gadget);
2637
2638	udc->done = &done;
2639	tasklet_disable(&udc->rx_tasklet);
2640
2641	if (udc->nullmap) {
2642		dma_unmap_single(udc->gadget.dev.parent,
2643			udc->nullp, 256,
2644				DMA_TO_DEVICE);
2645			udc->nullp = DMA_ADDR_INVALID;
2646	} else {
2647		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2648			udc->nullp, 256,
2649				DMA_TO_DEVICE);
2650	}
2651
2652	ep = &udc->eps[0];
2653	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2654	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2655
2656	kfree(ep->rxframe);
2657	if (ep->rxbufmap) {
2658		dma_unmap_single(udc->gadget.dev.parent,
2659				ep->rxbuf_d, size,
2660				DMA_FROM_DEVICE);
2661		ep->rxbuf_d = DMA_ADDR_INVALID;
2662	} else {
2663		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2664				ep->rxbuf_d, size,
2665				DMA_FROM_DEVICE);
2666	}
2667
2668	kfree(ep->rxbuffer);
2669	kfree(ep->txframe);
2670
2671	free_irq(udc->usb_irq, udc);
2672	irq_dispose_mapping(udc->usb_irq);
2673
2674	tasklet_kill(&udc->rx_tasklet);
2675
2676	iounmap(udc->usb_regs);
2677
2678	/* wait for release() of gadget.dev to free udc */
2679	wait_for_completion(&done);
2680}
2681
2682/*-------------------------------------------------------------------------*/
2683static const struct of_device_id qe_udc_match[] = {
2684	{
2685		.compatible = "fsl,mpc8323-qe-usb",
2686		.data = (void *)PORT_QE,
2687	},
2688	{
2689		.compatible = "fsl,mpc8360-qe-usb",
2690		.data = (void *)PORT_QE,
2691	},
2692	{
2693		.compatible = "fsl,mpc8272-cpm-usb",
2694		.data = (void *)PORT_CPM,
2695	},
2696	{},
2697};
2698
2699MODULE_DEVICE_TABLE(of, qe_udc_match);
2700
2701static struct platform_driver udc_driver = {
2702	.driver = {
2703		.name = driver_name,
2704		.of_match_table = qe_udc_match,
2705	},
2706	.probe          = qe_udc_probe,
2707	.remove_new     = qe_udc_remove,
2708#ifdef CONFIG_PM
2709	.suspend        = qe_udc_suspend,
2710	.resume         = qe_udc_resume,
2711#endif
2712};
2713
2714module_platform_driver(udc_driver);
2715
2716MODULE_DESCRIPTION(DRIVER_DESC);
2717MODULE_AUTHOR(DRIVER_AUTHOR);
2718MODULE_LICENSE("GPL");
2719