1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
5 *
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
8 */
9
10#include <common.h>
11#include <command.h>
12#include <config.h>
13#include <cpu_func.h>
14#include <net.h>
15#include <malloc.h>
16#include <wait_bit.h>
17#include <asm/byteorder.h>
18#include <asm/cache.h>
19#include <linux/delay.h>
20#include <linux/errno.h>
21#include <asm/io.h>
22#include <asm/unaligned.h>
23#include <linux/types.h>
24#include <linux/usb/ch9.h>
25#include <linux/usb/gadget.h>
26#include <usb/ci_udc.h>
27#include "../host/ehci.h"
28#include "ci_udc.h"
29
30/*
31 * Check if the system has too long cachelines. If the cachelines are
32 * longer then 128b, the driver will not be able flush/invalidate data
33 * cache over separate QH entries. We use 128b because one QH entry is
34 * 64b long and there are always two QH list entries for each endpoint.
35 */
36#if ARCH_DMA_MINALIGN > 128
37#error This driver can not work on systems with caches longer than 128b
38#endif
39
40/*
41 * Every QTD must be individually aligned, since we can program any
42 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN,
43 * and the USB HW requires 32-byte alignment. Align to both:
44 */
45#define ILIST_ALIGN		roundup(ARCH_DMA_MINALIGN, 32)
46/* Each QTD is this size */
47#define ILIST_ENT_RAW_SZ	sizeof(struct ept_queue_item)
48/*
49 * Align the size of the QTD too, so we can add this value to each
50 * QTD's address to get another aligned address.
51 */
52#define ILIST_ENT_SZ		roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN)
53/* For each endpoint, we need 2 QTDs, one for each of IN and OUT */
54#define ILIST_SZ		(NUM_ENDPOINTS * 2 * ILIST_ENT_SZ)
55
56#define EP_MAX_LENGTH_TRANSFER	0x4000
57
58#ifndef DEBUG
59#define DBG(x...) do {} while (0)
60#else
61#define DBG(x...) printf(x)
62static const char *reqname(unsigned r)
63{
64	switch (r) {
65	case USB_REQ_GET_STATUS: return "GET_STATUS";
66	case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
67	case USB_REQ_SET_FEATURE: return "SET_FEATURE";
68	case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
69	case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
70	case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
71	case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
72	case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
73	case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
74	case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
75	default: return "*UNKNOWN*";
76	}
77}
78#endif
79
80static struct usb_endpoint_descriptor ep0_desc = {
81	.bLength = sizeof(struct usb_endpoint_descriptor),
82	.bDescriptorType = USB_DT_ENDPOINT,
83	.bEndpointAddress = USB_DIR_IN,
84	.bmAttributes =	USB_ENDPOINT_XFER_CONTROL,
85};
86
87static int ci_pullup(struct usb_gadget *gadget, int is_on);
88static int ci_ep_enable(struct usb_ep *ep,
89		const struct usb_endpoint_descriptor *desc);
90static int ci_ep_disable(struct usb_ep *ep);
91static int ci_ep_queue(struct usb_ep *ep,
92		struct usb_request *req, gfp_t gfp_flags);
93static int ci_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
94static struct usb_request *
95ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
96static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
97
98static const struct usb_gadget_ops ci_udc_ops = {
99	.pullup = ci_pullup,
100};
101
102static const struct usb_ep_ops ci_ep_ops = {
103	.enable         = ci_ep_enable,
104	.disable        = ci_ep_disable,
105	.queue          = ci_ep_queue,
106	.dequeue	= ci_ep_dequeue,
107	.alloc_request  = ci_ep_alloc_request,
108	.free_request   = ci_ep_free_request,
109};
110
111__weak void ci_init_after_reset(struct ehci_ctrl *ctrl)
112{
113}
114
115/* Init values for USB endpoints. */
116static const struct usb_ep ci_ep_init[5] = {
117	[0] = {	/* EP 0 */
118		.maxpacket	= 64,
119		.name		= "ep0",
120		.ops		= &ci_ep_ops,
121	},
122	[1] = {
123		.maxpacket	= 512,
124		.name		= "ep1in-bulk",
125		.ops		= &ci_ep_ops,
126	},
127	[2] = {
128		.maxpacket	= 512,
129		.name		= "ep2out-bulk",
130		.ops		= &ci_ep_ops,
131	},
132	[3] = {
133		.maxpacket	= 512,
134		.name		= "ep3in-int",
135		.ops		= &ci_ep_ops,
136	},
137	[4] = {
138		.maxpacket	= 512,
139		.name		= "ep-",
140		.ops		= &ci_ep_ops,
141	},
142};
143
144static struct ci_drv controller = {
145	.gadget	= {
146		.name	= "ci_udc",
147		.ops	= &ci_udc_ops,
148		.is_dualspeed = 1,
149		.max_speed = USB_SPEED_HIGH,
150	},
151};
152
153/**
154 * ci_get_qh() - return queue head for endpoint
155 * @ep_num:	Endpoint number
156 * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
157 *
158 * This function returns the QH associated with particular endpoint
159 * and it's direction.
160 */
161static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
162{
163	return &controller.epts[(ep_num * 2) + dir_in];
164}
165
166/**
167 * ci_get_qtd() - return queue item for endpoint
168 * @ep_num:	Endpoint number
169 * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
170 *
171 * This function returns the QH associated with particular endpoint
172 * and it's direction.
173 */
174static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
175{
176	int index = (ep_num * 2) + dir_in;
177	uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ);
178	return (struct ept_queue_item *)imem;
179}
180
181/**
182 * ci_flush_qh - flush cache over queue head
183 * @ep_num:	Endpoint number
184 *
185 * This function flushes cache over QH for particular endpoint.
186 */
187static void ci_flush_qh(int ep_num)
188{
189	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
190	const unsigned long start = (unsigned long)head;
191	const unsigned long end = start + 2 * sizeof(*head);
192
193	flush_dcache_range(start, end);
194}
195
196/**
197 * ci_invalidate_qh - invalidate cache over queue head
198 * @ep_num:	Endpoint number
199 *
200 * This function invalidates cache over QH for particular endpoint.
201 */
202static void ci_invalidate_qh(int ep_num)
203{
204	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
205	unsigned long start = (unsigned long)head;
206	unsigned long end = start + 2 * sizeof(*head);
207
208	invalidate_dcache_range(start, end);
209}
210
211/**
212 * ci_flush_qtd - flush cache over queue item
213 * @ep_num:	Endpoint number
214 *
215 * This function flushes cache over qTD pair for particular endpoint.
216 */
217static void ci_flush_qtd(int ep_num)
218{
219	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
220	const unsigned long start = (unsigned long)item;
221	const unsigned long end = start + 2 * ILIST_ENT_SZ;
222
223	flush_dcache_range(start, end);
224}
225
226/**
227 * ci_flush_td - flush cache over queue item
228 * @td:	td pointer
229 *
230 * This function flushes cache for particular transfer descriptor.
231 */
232static void ci_flush_td(struct ept_queue_item *td)
233{
234	const unsigned long start = (unsigned long)td;
235	const unsigned long end = (unsigned long)td + ILIST_ENT_SZ;
236	flush_dcache_range(start, end);
237}
238
239/**
240 * ci_invalidate_qtd - invalidate cache over queue item
241 * @ep_num:	Endpoint number
242 *
243 * This function invalidates cache over qTD pair for particular endpoint.
244 */
245static void ci_invalidate_qtd(int ep_num)
246{
247	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
248	const unsigned long start = (unsigned long)item;
249	const unsigned long end = start + 2 * ILIST_ENT_SZ;
250
251	invalidate_dcache_range(start, end);
252}
253
254/**
255 * ci_invalidate_td - invalidate cache over queue item
256 * @td:	td pointer
257 *
258 * This function invalidates cache for particular transfer descriptor.
259 */
260static void ci_invalidate_td(struct ept_queue_item *td)
261{
262	const unsigned long start = (unsigned long)td;
263	const unsigned long end = start + ILIST_ENT_SZ;
264	invalidate_dcache_range(start, end);
265}
266
267static struct usb_request *
268ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
269{
270	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
271	int num = -1;
272	struct ci_req *ci_req;
273
274	if (ci_ep->desc)
275		num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
276
277	if (num == 0 && controller.ep0_req)
278		return &controller.ep0_req->req;
279
280	ci_req = calloc(1, sizeof(*ci_req));
281	if (!ci_req)
282		return NULL;
283
284	INIT_LIST_HEAD(&ci_req->queue);
285
286	if (num == 0)
287		controller.ep0_req = ci_req;
288
289	return &ci_req->req;
290}
291
292static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
293{
294	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
295	struct ci_req *ci_req = container_of(req, struct ci_req, req);
296	int num = -1;
297
298	if (ci_ep->desc)
299		num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
300
301	if (num == 0) {
302		if (!controller.ep0_req)
303			return;
304		controller.ep0_req = 0;
305	}
306
307	if (ci_req->b_buf)
308		free(ci_req->b_buf);
309	free(ci_req);
310}
311
312static void ep_enable(int num, int in, int maxpacket)
313{
314	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
315	unsigned n;
316
317	n = readl(&udc->epctrl[num]);
318	if (in)
319		n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
320	else
321		n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
322
323	if (num != 0) {
324		struct ept_queue_head *head = ci_get_qh(num, in);
325
326		head->config = CFG_MAX_PKT(maxpacket) | CFG_ZLT;
327		ci_flush_qh(num);
328	}
329	writel(n, &udc->epctrl[num]);
330}
331
332static int ci_ep_enable(struct usb_ep *ep,
333		const struct usb_endpoint_descriptor *desc)
334{
335	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
336	int num, in;
337	num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
338	in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
339	ci_ep->desc = desc;
340	ep->desc = desc;
341
342	if (num) {
343		int max = get_unaligned_le16(&desc->wMaxPacketSize);
344
345		if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
346			max = 64;
347		if (ep->maxpacket != max) {
348			DBG("%s: from %d to %d\n", __func__,
349			    ep->maxpacket, max);
350			ep->maxpacket = max;
351		}
352	}
353	ep_enable(num, in, ep->maxpacket);
354	DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
355	return 0;
356}
357
358static int ep_disable(int num, int in)
359{
360	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
361	unsigned int ep_bit, enable_bit;
362	int err;
363
364	if (in) {
365		ep_bit = EPT_TX(num);
366		enable_bit = CTRL_TXE;
367	} else {
368		ep_bit = EPT_RX(num);
369		enable_bit = CTRL_RXE;
370	}
371
372	/* clear primed buffers */
373	do {
374		writel(ep_bit, &udc->epflush);
375		err = wait_for_bit_le32(&udc->epflush, ep_bit, false, 1000, false);
376		if (err)
377			return err;
378	} while (readl(&udc->epstat) & ep_bit);
379
380	/* clear enable bit */
381	clrbits_le32(&udc->epctrl[num], enable_bit);
382
383	return 0;
384}
385
386static int ci_ep_disable(struct usb_ep *ep)
387{
388	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
389	int num, in, err;
390
391	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
392	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
393
394	err = ep_disable(num, in);
395	if (err)
396		return err;
397
398	ci_ep->desc = NULL;
399	ep->desc = NULL;
400	ci_ep->req_primed = false;
401	return 0;
402}
403
404static int ci_bounce(struct ci_req *ci_req, int in)
405{
406	struct usb_request *req = &ci_req->req;
407	unsigned long addr = (unsigned long)req->buf;
408	unsigned long hwaddr;
409	uint32_t aligned_used_len;
410
411	/* Input buffer address is not aligned. */
412	if (addr & (ARCH_DMA_MINALIGN - 1))
413		goto align;
414
415	/* Input buffer length is not aligned. */
416	if (req->length & (ARCH_DMA_MINALIGN - 1))
417		goto align;
418
419	/* The buffer is well aligned, only flush cache. */
420	ci_req->hw_len = req->length;
421	ci_req->hw_buf = req->buf;
422	goto flush;
423
424align:
425	if (ci_req->b_buf && req->length > ci_req->b_len) {
426		free(ci_req->b_buf);
427		ci_req->b_buf = 0;
428	}
429	if (!ci_req->b_buf) {
430		ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
431		ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
432		if (!ci_req->b_buf)
433			return -ENOMEM;
434	}
435	ci_req->hw_len = ci_req->b_len;
436	ci_req->hw_buf = ci_req->b_buf;
437
438	if (in)
439		memcpy(ci_req->hw_buf, req->buf, req->length);
440
441flush:
442	hwaddr = (unsigned long)ci_req->hw_buf;
443	if (!hwaddr)
444		return 0;
445
446	aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
447	flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
448
449	return 0;
450}
451
452static void ci_debounce(struct ci_req *ci_req, int in)
453{
454	struct usb_request *req = &ci_req->req;
455	unsigned long addr = (unsigned long)req->buf;
456	unsigned long hwaddr = (unsigned long)ci_req->hw_buf;
457	uint32_t aligned_used_len;
458
459	if (in || !hwaddr)
460		return;
461
462	aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
463	invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
464
465	if (addr == hwaddr)
466		return; /* not a bounce */
467
468	memcpy(req->buf, ci_req->hw_buf, req->actual);
469}
470
471static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
472{
473	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
474	struct ept_queue_item *item;
475	struct ept_queue_head *head;
476	int bit, num, len, in;
477	struct ci_req *ci_req;
478	u8 *buf;
479	uint32_t len_left, len_this_dtd;
480	struct ept_queue_item *dtd, *qtd;
481
482	ci_ep->req_primed = true;
483
484	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
485	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
486	item = ci_get_qtd(num, in);
487	head = ci_get_qh(num, in);
488
489	ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
490	len = ci_req->req.length;
491
492	head->next = (unsigned long)item;
493	head->info = 0;
494
495	ci_req->dtd_count = 0;
496	buf = ci_req->hw_buf;
497	len_left = len;
498	dtd = item;
499
500	do {
501		len_this_dtd = min(len_left, (unsigned)EP_MAX_LENGTH_TRANSFER);
502
503		dtd->info = INFO_BYTES(len_this_dtd) | INFO_ACTIVE;
504		dtd->page0 = (unsigned long)buf;
505		dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000;
506		dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000;
507		dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000;
508		dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000;
509
510		len_left -= len_this_dtd;
511		buf += len_this_dtd;
512
513		if (len_left) {
514			qtd = (struct ept_queue_item *)
515			       memalign(ILIST_ALIGN, ILIST_ENT_SZ);
516			dtd->next = (unsigned long)qtd;
517			dtd = qtd;
518			memset(dtd, 0, ILIST_ENT_SZ);
519		}
520
521		ci_req->dtd_count++;
522	} while (len_left);
523
524	item = dtd;
525	/*
526	 * When sending the data for an IN transaction, the attached host
527	 * knows that all data for the IN is sent when one of the following
528	 * occurs:
529	 * a) A zero-length packet is transmitted.
530	 * b) A packet with length that isn't an exact multiple of the ep's
531	 *    maxpacket is transmitted.
532	 * c) Enough data is sent to exactly fill the host's maximum expected
533	 *    IN transaction size.
534	 *
535	 * One of these conditions MUST apply at the end of an IN transaction,
536	 * or the transaction will not be considered complete by the host. If
537	 * none of (a)..(c) already applies, then we must force (a) to apply
538	 * by explicitly sending an extra zero-length packet.
539	 */
540	/*  IN    !a     !b                              !c */
541	if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
542		/*
543		 * Each endpoint has 2 items allocated, even though typically
544		 * only 1 is used at a time since either an IN or an OUT but
545		 * not both is queued. For an IN transaction, item currently
546		 * points at the second of these items, so we know that we
547		 * can use the other to transmit the extra zero-length packet.
548		 */
549		struct ept_queue_item *other_item = ci_get_qtd(num, 0);
550		item->next = (unsigned long)other_item;
551		item = other_item;
552		item->info = INFO_ACTIVE;
553	}
554
555	item->next = TERMINATE;
556	item->info |= INFO_IOC;
557
558	ci_flush_qtd(num);
559
560	item = (struct ept_queue_item *)(unsigned long)head->next;
561	while (item->next != TERMINATE) {
562		ci_flush_td((struct ept_queue_item *)(unsigned long)item->next);
563		item = (struct ept_queue_item *)(unsigned long)item->next;
564	}
565
566	DBG("ept%d %s queue len %x, req %p, buffer %p\n",
567	    num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
568	ci_flush_qh(num);
569
570	if (in)
571		bit = EPT_TX(num);
572	else
573		bit = EPT_RX(num);
574
575	writel(bit, &udc->epprime);
576}
577
578static int ci_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
579{
580	struct ci_ep *ci_ep = container_of(_ep, struct ci_ep, ep);
581	struct ci_req *ci_req;
582
583	list_for_each_entry(ci_req, &ci_ep->queue, queue) {
584		if (&ci_req->req == _req)
585			break;
586	}
587
588	if (&ci_req->req != _req)
589		return -EINVAL;
590
591	list_del_init(&ci_req->queue);
592
593	if (ci_req->req.status == -EINPROGRESS) {
594		ci_req->req.status = -ECONNRESET;
595		if (ci_req->req.complete)
596			ci_req->req.complete(_ep, _req);
597	}
598
599	return 0;
600}
601
602static int ci_ep_queue(struct usb_ep *ep,
603		struct usb_request *req, gfp_t gfp_flags)
604{
605	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
606	struct ci_req *ci_req = container_of(req, struct ci_req, req);
607	int in, ret;
608	int __maybe_unused num;
609
610	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
611	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
612
613	if (!num && ci_ep->req_primed) {
614		/*
615		 * The flipping of ep0 between IN and OUT relies on
616		 * ci_ep_queue consuming the current IN/OUT setting
617		 * immediately. If this is deferred to a later point when the
618		 * req is pulled out of ci_req->queue, then the IN/OUT setting
619		 * may have been changed since the req was queued, and state
620		 * will get out of sync. This condition doesn't occur today,
621		 * but could if bugs were introduced later, and this error
622		 * check will save a lot of debugging time.
623		 */
624		printf("%s: ep0 transaction already in progress\n", __func__);
625		return -EPROTO;
626	}
627
628	ret = ci_bounce(ci_req, in);
629	if (ret)
630		return ret;
631
632	DBG("ept%d %s pre-queue req %p, buffer %p\n",
633	    num, in ? "in" : "out", ci_req, ci_req->hw_buf);
634	list_add_tail(&ci_req->queue, &ci_ep->queue);
635
636	if (!ci_ep->req_primed)
637		ci_ep_submit_next_request(ci_ep);
638
639	return 0;
640}
641
642static void flip_ep0_direction(void)
643{
644	if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
645		DBG("%s: Flipping ep0 to OUT\n", __func__);
646		ep0_desc.bEndpointAddress = 0;
647	} else {
648		DBG("%s: Flipping ep0 to IN\n", __func__);
649		ep0_desc.bEndpointAddress = USB_DIR_IN;
650	}
651}
652
653static void handle_ep_complete(struct ci_ep *ci_ep)
654{
655	struct ept_queue_item *item, *next_td;
656	int num, in, len, j;
657	struct ci_req *ci_req;
658
659	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
660	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
661	item = ci_get_qtd(num, in);
662	ci_invalidate_qtd(num);
663	ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
664
665	next_td = item;
666	len = 0;
667	for (j = 0; j < ci_req->dtd_count; j++) {
668		ci_invalidate_td(next_td);
669		item = next_td;
670		len += (item->info >> 16) & 0x7fff;
671		if (item->info & 0xff)
672			printf("EP%d/%s FAIL info=%x pg0=%x\n",
673			       num, in ? "in" : "out", item->info, item->page0);
674		if (j != ci_req->dtd_count - 1)
675			next_td = (struct ept_queue_item *)(unsigned long)
676				item->next;
677		if (j != 0)
678			free(item);
679	}
680
681	list_del_init(&ci_req->queue);
682	ci_ep->req_primed = false;
683
684	if (!list_empty(&ci_ep->queue))
685		ci_ep_submit_next_request(ci_ep);
686
687	ci_req->req.actual = ci_req->req.length - len;
688	ci_debounce(ci_req, in);
689
690	DBG("ept%d %s req %p, complete %x\n",
691	    num, in ? "in" : "out", ci_req, len);
692	if (num != 0 || controller.ep0_data_phase)
693		ci_req->req.complete(&ci_ep->ep, &ci_req->req);
694	if (num == 0 && controller.ep0_data_phase) {
695		/*
696		 * Data Stage is complete, so flip ep0 dir for Status Stage,
697		 * which always transfers a packet in the opposite direction.
698		 */
699		DBG("%s: flip ep0 dir for Status Stage\n", __func__);
700		flip_ep0_direction();
701		controller.ep0_data_phase = false;
702		ci_req->req.length = 0;
703		usb_ep_queue(&ci_ep->ep, &ci_req->req, 0);
704	}
705}
706
707#define SETUP(type, request) (((type) << 8) | (request))
708
709static void handle_setup(void)
710{
711	struct ci_ep *ci_ep = &controller.ep[0];
712	struct ci_req *ci_req;
713	struct usb_request *req;
714	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
715	struct ept_queue_head *head;
716	struct usb_ctrlrequest r;
717	int status = 0;
718	int num, in, _num, _in, i;
719	char *buf;
720
721	ci_req = controller.ep0_req;
722	req = &ci_req->req;
723	head = ci_get_qh(0, 0);	/* EP0 OUT */
724
725	ci_invalidate_qh(0);
726	memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
727#ifdef CONFIG_CI_UDC_HAS_HOSTPC
728	writel(EPT_RX(0), &udc->epsetupstat);
729#else
730	writel(EPT_RX(0), &udc->epstat);
731#endif
732	DBG("handle setup %s, %x, %x index %x value %x length %x\n",
733	    reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
734	    r.wValue, r.wLength);
735
736	/* Set EP0 dir for Data Stage based on Setup Stage data */
737	if (r.bRequestType & USB_DIR_IN) {
738		DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
739		ep0_desc.bEndpointAddress = USB_DIR_IN;
740	} else {
741		DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
742		ep0_desc.bEndpointAddress = 0;
743	}
744	if (r.wLength) {
745		controller.ep0_data_phase = true;
746	} else {
747		/* 0 length -> no Data Stage. Flip dir for Status Stage */
748		DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
749		flip_ep0_direction();
750		controller.ep0_data_phase = false;
751	}
752
753	list_del_init(&ci_req->queue);
754	ci_ep->req_primed = false;
755
756	switch (SETUP(r.bRequestType, r.bRequest)) {
757	case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
758		_num = r.wIndex & 15;
759		_in = !!(r.wIndex & 0x80);
760
761		if ((r.wValue == 0) && (r.wLength == 0)) {
762			req->length = 0;
763			for (i = 0; i < NUM_ENDPOINTS; i++) {
764				struct ci_ep *ep = &controller.ep[i];
765
766				if (!ep->desc)
767					continue;
768				num = ep->desc->bEndpointAddress
769						& USB_ENDPOINT_NUMBER_MASK;
770				in = (ep->desc->bEndpointAddress
771						& USB_DIR_IN) != 0;
772				if ((num == _num) && (in == _in)) {
773					ep_enable(num, in, ep->ep.maxpacket);
774					usb_ep_queue(controller.gadget.ep0,
775							req, 0);
776					break;
777				}
778			}
779		}
780		return;
781
782	case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
783		/*
784		 * write address delayed (will take effect
785		 * after the next IN txn)
786		 */
787		writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
788		req->length = 0;
789		usb_ep_queue(controller.gadget.ep0, req, 0);
790		return;
791
792	case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
793		req->length = 2;
794		buf = (char *)req->buf;
795		buf[0] = 1 << USB_DEVICE_SELF_POWERED;
796		buf[1] = 0;
797		usb_ep_queue(controller.gadget.ep0, req, 0);
798		return;
799	}
800	/* pass request up to the gadget driver */
801	if (controller.driver)
802		status = controller.driver->setup(&controller.gadget, &r);
803	else
804		status = -ENODEV;
805
806	if (!status)
807		return;
808	DBG("STALL reqname %s type %x value %x, index %x\n",
809	    reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
810	writel((1<<16) | (1 << 0), &udc->epctrl[0]);
811}
812
813static void stop_activity(void)
814{
815	int i, num, in;
816	struct ept_queue_head *head;
817	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
818	writel(readl(&udc->epcomp), &udc->epcomp);
819#ifdef CONFIG_CI_UDC_HAS_HOSTPC
820	writel(readl(&udc->epsetupstat), &udc->epsetupstat);
821#endif
822	writel(readl(&udc->epstat), &udc->epstat);
823	writel(0xffffffff, &udc->epflush);
824
825	/* error out any pending reqs */
826	for (i = 0; i < NUM_ENDPOINTS; i++) {
827		if (i != 0)
828			writel(0, &udc->epctrl[i]);
829		if (controller.ep[i].desc) {
830			num = controller.ep[i].desc->bEndpointAddress
831				& USB_ENDPOINT_NUMBER_MASK;
832			in = (controller.ep[i].desc->bEndpointAddress
833				& USB_DIR_IN) != 0;
834			head = ci_get_qh(num, in);
835			head->info = INFO_ACTIVE;
836			ci_flush_qh(num);
837		}
838	}
839}
840
841void udc_irq(void)
842{
843	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
844	unsigned n = readl(&udc->usbsts);
845	writel(n, &udc->usbsts);
846	int bit, i, num, in;
847
848	n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
849	if (n == 0)
850		return;
851
852	if (n & STS_URI) {
853		DBG("-- reset --\n");
854		stop_activity();
855	}
856	if (n & STS_SLI)
857		DBG("-- suspend --\n");
858
859	if (n & STS_PCI) {
860		int max = 64;
861		int speed = USB_SPEED_FULL;
862
863#ifdef CONFIG_CI_UDC_HAS_HOSTPC
864		bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
865#else
866		bit = (readl(&udc->portsc) >> 26) & 3;
867#endif
868		DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
869		if (bit == 2) {
870			speed = USB_SPEED_HIGH;
871			max = 512;
872		}
873		controller.gadget.speed = speed;
874		for (i = 1; i < NUM_ENDPOINTS; i++) {
875			if (controller.ep[i].ep.maxpacket > max)
876				controller.ep[i].ep.maxpacket = max;
877		}
878	}
879
880	if (n & STS_UEI)
881		printf("<UEI %x>\n", readl(&udc->epcomp));
882
883	if ((n & STS_UI) || (n & STS_UEI)) {
884#ifdef CONFIG_CI_UDC_HAS_HOSTPC
885		n = readl(&udc->epsetupstat);
886#else
887		n = readl(&udc->epstat);
888#endif
889		if (n & EPT_RX(0))
890			handle_setup();
891
892		n = readl(&udc->epcomp);
893		if (n != 0)
894			writel(n, &udc->epcomp);
895
896		for (i = 0; i < NUM_ENDPOINTS && n; i++) {
897			if (controller.ep[i].desc) {
898				num = controller.ep[i].desc->bEndpointAddress
899					& USB_ENDPOINT_NUMBER_MASK;
900				in = (controller.ep[i].desc->bEndpointAddress
901						& USB_DIR_IN) != 0;
902				bit = (in) ? EPT_TX(num) : EPT_RX(num);
903				if (n & bit)
904					handle_ep_complete(&controller.ep[i]);
905			}
906		}
907	}
908}
909
910int dm_usb_gadget_handle_interrupts(struct udevice *dev)
911{
912	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
913	u32 value;
914
915	value = readl(&udc->usbsts);
916	if (value)
917		udc_irq();
918
919	return value;
920}
921
922void udc_disconnect(void)
923{
924	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
925	/* disable pullup */
926	stop_activity();
927	writel(USBCMD_FS2, &udc->usbcmd);
928	udelay(800);
929	if (controller.driver)
930		controller.driver->disconnect(&controller.gadget);
931}
932
933static int ci_pullup(struct usb_gadget *gadget, int is_on)
934{
935	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
936	if (is_on) {
937		/* RESET */
938		writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
939		udelay(200);
940
941		ci_init_after_reset(controller.ctrl);
942
943		writel((unsigned long)controller.epts, &udc->epinitaddr);
944
945		/* select DEVICE mode */
946		writel(USBMODE_DEVICE, &udc->usbmode);
947
948#if !defined(CONFIG_USB_GADGET_DUALSPEED)
949		/* Port force Full-Speed Connect */
950		setbits_le32(&udc->portsc, PFSC);
951#endif
952
953		writel(0xffffffff, &udc->epflush);
954
955		/* Turn on the USB connection by enabling the pullup resistor */
956		setbits_le32(&udc->usbcmd, USBCMD_ITC(MICRO_8FRAME) |
957			     USBCMD_RUN);
958	} else {
959		udc_disconnect();
960	}
961
962	return 0;
963}
964
965static int ci_udc_probe(void)
966{
967	struct ept_queue_head *head;
968	int i;
969
970	const int num = 2 * NUM_ENDPOINTS;
971
972	const int eplist_min_align = 4096;
973	const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
974	const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
975	const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
976
977	/* The QH list must be aligned to 4096 bytes. */
978	controller.epts = memalign(eplist_align, eplist_sz);
979	if (!controller.epts)
980		return -ENOMEM;
981	memset(controller.epts, 0, eplist_sz);
982
983	controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ);
984	if (!controller.items_mem) {
985		free(controller.epts);
986		return -ENOMEM;
987	}
988	memset(controller.items_mem, 0, ILIST_SZ);
989
990	for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
991		/*
992		 * Configure QH for each endpoint. The structure of the QH list
993		 * is such that each two subsequent fields, N and N+1 where N is
994		 * even, in the QH list represent QH for one endpoint. The Nth
995		 * entry represents OUT configuration and the N+1th entry does
996		 * represent IN configuration of the endpoint.
997		 */
998		head = controller.epts + i;
999		if (i < 2)
1000			head->config = CFG_MAX_PKT(EP0_MAX_PACKET_SIZE)
1001				| CFG_ZLT | CFG_IOS;
1002		else
1003			head->config = CFG_MAX_PKT(EP_MAX_PACKET_SIZE)
1004				| CFG_ZLT;
1005		head->next = TERMINATE;
1006		head->info = 0;
1007
1008		if (i & 1) {
1009			ci_flush_qh(i / 2);
1010			ci_flush_qtd(i / 2);
1011		}
1012	}
1013
1014	INIT_LIST_HEAD(&controller.gadget.ep_list);
1015
1016	/* Init EP 0 */
1017	memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
1018	controller.ep[0].desc = &ep0_desc;
1019	INIT_LIST_HEAD(&controller.ep[0].queue);
1020	controller.ep[0].req_primed = false;
1021	controller.gadget.ep0 = &controller.ep[0].ep;
1022	INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
1023
1024	/* Init EP 1..3 */
1025	for (i = 1; i < 4; i++) {
1026		memcpy(&controller.ep[i].ep, &ci_ep_init[i],
1027		       sizeof(*ci_ep_init));
1028		INIT_LIST_HEAD(&controller.ep[i].queue);
1029		controller.ep[i].req_primed = false;
1030		list_add_tail(&controller.ep[i].ep.ep_list,
1031			      &controller.gadget.ep_list);
1032	}
1033
1034	/* Init EP 4..n */
1035	for (i = 4; i < NUM_ENDPOINTS; i++) {
1036		memcpy(&controller.ep[i].ep, &ci_ep_init[4],
1037		       sizeof(*ci_ep_init));
1038		INIT_LIST_HEAD(&controller.ep[i].queue);
1039		controller.ep[i].req_primed = false;
1040		list_add_tail(&controller.ep[i].ep.ep_list,
1041			      &controller.gadget.ep_list);
1042	}
1043
1044	ci_ep_alloc_request(&controller.ep[0].ep, 0);
1045	if (!controller.ep0_req) {
1046		free(controller.items_mem);
1047		free(controller.epts);
1048		return -ENOMEM;
1049	}
1050
1051	return 0;
1052}
1053
1054int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1055{
1056	int ret;
1057
1058	if (!driver)
1059		return -EINVAL;
1060	if (!driver->bind || !driver->setup || !driver->disconnect)
1061		return -EINVAL;
1062
1063#if CONFIG_IS_ENABLED(DM_USB)
1064	ret = usb_setup_ehci_gadget(&controller.ctrl);
1065#else
1066	ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
1067#endif
1068	if (ret)
1069		return ret;
1070
1071	ret = ci_udc_probe();
1072	if (ret) {
1073		DBG("udc probe failed, returned %d\n", ret);
1074		return ret;
1075	}
1076
1077	ret = driver->bind(&controller.gadget);
1078	if (ret) {
1079		DBG("driver->bind() returned %d\n", ret);
1080		return ret;
1081	}
1082	controller.driver = driver;
1083
1084	return 0;
1085}
1086
1087int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1088{
1089	udc_disconnect();
1090
1091	driver->unbind(&controller.gadget);
1092	controller.driver = NULL;
1093
1094	ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
1095	free(controller.items_mem);
1096	free(controller.epts);
1097
1098#if CONFIG_IS_ENABLED(DM_USB)
1099	usb_remove_ehci_gadget(&controller.ctrl);
1100#else
1101	usb_lowlevel_stop(0);
1102	controller.ctrl = NULL;
1103#endif
1104
1105	return 0;
1106}
1107
1108bool dfu_usb_get_reset(void)
1109{
1110	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
1111
1112	return !!(readl(&udc->usbsts) & STS_URI);
1113}
1114