• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/usb/c67x00/
1/*
2 * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
3 *
4 * Copyright (C) 2006-2008 Barco N.V.
5 *    Derived from the Cypress cy7c67200/300 ezusb linux driver and
6 *    based on multiple host controller drivers inside the linux kernel.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 * MA  02110-1301  USA.
22 */
23
24#include <linux/kthread.h>
25#include <linux/slab.h>
26
27#include "c67x00.h"
28#include "c67x00-hcd.h"
29
30/*
31 * These are the stages for a control urb, they are kept
32 * in both urb->interval and td->privdata.
33 */
34#define SETUP_STAGE		0
35#define DATA_STAGE		1
36#define STATUS_STAGE		2
37
38/* -------------------------------------------------------------------------- */
39
40/**
41 * struct c67x00_ep_data: Host endpoint data structure
42 */
43struct c67x00_ep_data {
44	struct list_head queue;
45	struct list_head node;
46	struct usb_host_endpoint *hep;
47	struct usb_device *dev;
48	u16 next_frame;		/* For int/isoc transactions */
49};
50
51/**
52 * struct c67x00_td
53 *
54 * Hardware parts are little endiannes, SW in CPU endianess.
55 */
56struct c67x00_td {
57	/* HW specific part */
58	__le16 ly_base_addr;	/* Bytes 0-1 */
59	__le16 port_length;	/* Bytes 2-3 */
60	u8 pid_ep;		/* Byte 4 */
61	u8 dev_addr;		/* Byte 5 */
62	u8 ctrl_reg;		/* Byte 6 */
63	u8 status;		/* Byte 7 */
64	u8 retry_cnt;		/* Byte 8 */
65#define TT_OFFSET		2
66#define TT_CONTROL		0
67#define TT_ISOCHRONOUS		1
68#define TT_BULK			2
69#define TT_INTERRUPT		3
70	u8 residue;		/* Byte 9 */
71	__le16 next_td_addr;	/* Bytes 10-11 */
72	/* SW part */
73	struct list_head td_list;
74	u16 td_addr;
75	void *data;
76	struct urb *urb;
77	unsigned long privdata;
78
79	/* These are needed for handling the toggle bits:
80	 * an urb can be dequeued while a td is in progress
81	 * after checking the td, the toggle bit might need to
82	 * be fixed */
83	struct c67x00_ep_data *ep_data;
84	unsigned int pipe;
85};
86
87struct c67x00_urb_priv {
88	struct list_head hep_node;
89	struct urb *urb;
90	int port;
91	int cnt;		/* packet number for isoc */
92	int status;
93	struct c67x00_ep_data *ep_data;
94};
95
96#define td_udev(td)	((td)->ep_data->dev)
97
98#define CY_TD_SIZE		12
99
100#define TD_PIDEP_OFFSET		0x04
101#define TD_PIDEPMASK_PID	0xF0
102#define TD_PIDEPMASK_EP		0x0F
103#define TD_PORTLENMASK_DL	0x02FF
104#define TD_PORTLENMASK_PN	0xC000
105
106#define TD_STATUS_OFFSET	0x07
107#define TD_STATUSMASK_ACK	0x01
108#define TD_STATUSMASK_ERR	0x02
109#define TD_STATUSMASK_TMOUT	0x04
110#define TD_STATUSMASK_SEQ	0x08
111#define TD_STATUSMASK_SETUP	0x10
112#define TD_STATUSMASK_OVF	0x20
113#define TD_STATUSMASK_NAK	0x40
114#define TD_STATUSMASK_STALL	0x80
115
116#define TD_ERROR_MASK		(TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
117				 TD_STATUSMASK_STALL)
118
119#define TD_RETRYCNT_OFFSET	0x08
120#define TD_RETRYCNTMASK_ACT_FLG	0x10
121#define TD_RETRYCNTMASK_TX_TYPE	0x0C
122#define TD_RETRYCNTMASK_RTY_CNT	0x03
123
124#define TD_RESIDUE_OVERFLOW	0x80
125
126#define TD_PID_IN		0x90
127
128/* Residue: signed 8bits, neg -> OVERFLOW, pos -> UNDERFLOW */
129#define td_residue(td)		((__s8)(td->residue))
130#define td_ly_base_addr(td)	(__le16_to_cpu((td)->ly_base_addr))
131#define td_port_length(td)	(__le16_to_cpu((td)->port_length))
132#define td_next_td_addr(td)	(__le16_to_cpu((td)->next_td_addr))
133
134#define td_active(td)		((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
135#define td_length(td)		(td_port_length(td) & TD_PORTLENMASK_DL)
136
137#define td_sequence_ok(td)	(!td->status || \
138				 (!(td->status & TD_STATUSMASK_SEQ) ==	\
139				  !(td->ctrl_reg & SEQ_SEL)))
140
141#define td_acked(td)		(!td->status || \
142				 (td->status & TD_STATUSMASK_ACK))
143#define td_actual_bytes(td)	(td_length(td) - td_residue(td))
144
145/* -------------------------------------------------------------------------- */
146
147#ifdef DEBUG
148
149/**
150 * dbg_td - Dump the contents of the TD
151 */
152static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
153{
154	struct device *dev = c67x00_hcd_dev(c67x00);
155
156	dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr);
157	dev_dbg(dev, "urb:      0x%p\n", td->urb);
158	dev_dbg(dev, "endpoint:   %4d\n", usb_pipeendpoint(td->pipe));
159	dev_dbg(dev, "pipeout:    %4d\n", usb_pipeout(td->pipe));
160	dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td));
161	dev_dbg(dev, "port_length:  0x%04x\n", td_port_length(td));
162	dev_dbg(dev, "pid_ep:         0x%02x\n", td->pid_ep);
163	dev_dbg(dev, "dev_addr:       0x%02x\n", td->dev_addr);
164	dev_dbg(dev, "ctrl_reg:       0x%02x\n", td->ctrl_reg);
165	dev_dbg(dev, "status:         0x%02x\n", td->status);
166	dev_dbg(dev, "retry_cnt:      0x%02x\n", td->retry_cnt);
167	dev_dbg(dev, "residue:        0x%02x\n", td->residue);
168	dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
169	dev_dbg(dev, "data:");
170	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
171		       td->data, td_length(td), 1);
172}
173#else				/* DEBUG */
174
175static inline void
176dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) { }
177
178#endif				/* DEBUG */
179
180/* -------------------------------------------------------------------------- */
181/* Helper functions */
182
183static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
184{
185	return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
186}
187
188/**
189 * frame_add
190 * Software wraparound for framenumbers.
191 */
192static inline u16 frame_add(u16 a, u16 b)
193{
194	return (a + b) & HOST_FRAME_MASK;
195}
196
197/**
198 * frame_after - is frame a after frame b
199 */
200static inline int frame_after(u16 a, u16 b)
201{
202	return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) <
203	    (HOST_FRAME_MASK / 2);
204}
205
206/**
207 * frame_after_eq - is frame a after or equal to frame b
208 */
209static inline int frame_after_eq(u16 a, u16 b)
210{
211	return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) <
212	    (HOST_FRAME_MASK / 2);
213}
214
215/* -------------------------------------------------------------------------- */
216
217/**
218 * c67x00_release_urb - remove link from all tds to this urb
219 * Disconnects the urb from it's tds, so that it can be given back.
220 * pre: urb->hcpriv != NULL
221 */
222static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
223{
224	struct c67x00_td *td;
225	struct c67x00_urb_priv *urbp;
226
227	BUG_ON(!urb);
228
229	c67x00->urb_count--;
230
231	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
232		c67x00->urb_iso_count--;
233		if (c67x00->urb_iso_count == 0)
234			c67x00->max_frame_bw = MAX_FRAME_BW_STD;
235	}
236
237	/* TODO this might be not so efficient when we've got many urbs!
238	 * Alternatives:
239	 *   * only clear when needed
240	 *   * keep a list of tds with each urbp
241	 */
242	list_for_each_entry(td, &c67x00->td_list, td_list)
243		if (urb == td->urb)
244			td->urb = NULL;
245
246	urbp = urb->hcpriv;
247	urb->hcpriv = NULL;
248	list_del(&urbp->hep_node);
249	kfree(urbp);
250}
251
252/* -------------------------------------------------------------------------- */
253
254static struct c67x00_ep_data *
255c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
256{
257	struct usb_host_endpoint *hep = urb->ep;
258	struct c67x00_ep_data *ep_data;
259	int type;
260
261	c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
262
263	/* Check if endpoint already has a c67x00_ep_data struct allocated */
264	if (hep->hcpriv) {
265		ep_data = hep->hcpriv;
266		if (frame_after(c67x00->current_frame, ep_data->next_frame))
267			ep_data->next_frame =
268			    frame_add(c67x00->current_frame, 1);
269		return hep->hcpriv;
270	}
271
272	/* Allocate and initialize a new c67x00 endpoint data structure */
273	ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC);
274	if (!ep_data)
275		return NULL;
276
277	INIT_LIST_HEAD(&ep_data->queue);
278	INIT_LIST_HEAD(&ep_data->node);
279	ep_data->hep = hep;
280
281	/* hold a reference to udev as long as this endpoint lives,
282	 * this is needed to possibly fix the data toggle */
283	ep_data->dev = usb_get_dev(urb->dev);
284	hep->hcpriv = ep_data;
285
286	/* For ISOC and INT endpoints, start ASAP: */
287	ep_data->next_frame = frame_add(c67x00->current_frame, 1);
288
289	/* Add the endpoint data to one of the pipe lists; must be added
290	   in order of endpoint address */
291	type = usb_pipetype(urb->pipe);
292	if (list_empty(&ep_data->node)) {
293		list_add(&ep_data->node, &c67x00->list[type]);
294	} else {
295		struct c67x00_ep_data *prev;
296
297		list_for_each_entry(prev, &c67x00->list[type], node) {
298			if (prev->hep->desc.bEndpointAddress >
299			    hep->desc.bEndpointAddress) {
300				list_add(&ep_data->node, prev->node.prev);
301				break;
302			}
303		}
304	}
305
306	return ep_data;
307}
308
309static int c67x00_ep_data_free(struct usb_host_endpoint *hep)
310{
311	struct c67x00_ep_data *ep_data = hep->hcpriv;
312
313	if (!ep_data)
314		return 0;
315
316	if (!list_empty(&ep_data->queue))
317		return -EBUSY;
318
319	usb_put_dev(ep_data->dev);
320	list_del(&ep_data->queue);
321	list_del(&ep_data->node);
322
323	kfree(ep_data);
324	hep->hcpriv = NULL;
325
326	return 0;
327}
328
329void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
330{
331	struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
332	unsigned long flags;
333
334	if (!list_empty(&ep->urb_list))
335		dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
336
337	spin_lock_irqsave(&c67x00->lock, flags);
338
339	/* loop waiting for all transfers in the endpoint queue to complete */
340	while (c67x00_ep_data_free(ep)) {
341		/* Drop the lock so we can sleep waiting for the hardware */
342		spin_unlock_irqrestore(&c67x00->lock, flags);
343
344		/* it could happen that we reinitialize this completion, while
345		 * somebody was waiting for that completion.  The timeout and
346		 * while loop handle such cases, but this might be improved */
347		INIT_COMPLETION(c67x00->endpoint_disable);
348		c67x00_sched_kick(c67x00);
349		wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
350
351		spin_lock_irqsave(&c67x00->lock, flags);
352	}
353
354	spin_unlock_irqrestore(&c67x00->lock, flags);
355}
356
357/* -------------------------------------------------------------------------- */
358
359static inline int get_root_port(struct usb_device *dev)
360{
361	while (dev->parent->parent)
362		dev = dev->parent;
363	return dev->portnum;
364}
365
366int c67x00_urb_enqueue(struct usb_hcd *hcd,
367		       struct urb *urb, gfp_t mem_flags)
368{
369	int ret;
370	unsigned long flags;
371	struct c67x00_urb_priv *urbp;
372	struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
373	int port = get_root_port(urb->dev)-1;
374
375	spin_lock_irqsave(&c67x00->lock, flags);
376
377	/* Make sure host controller is running */
378	if (!HC_IS_RUNNING(hcd->state)) {
379		ret = -ENODEV;
380		goto err_not_linked;
381	}
382
383	ret = usb_hcd_link_urb_to_ep(hcd, urb);
384	if (ret)
385		goto err_not_linked;
386
387	/* Allocate and initialize urb private data */
388	urbp = kzalloc(sizeof(*urbp), mem_flags);
389	if (!urbp) {
390		ret = -ENOMEM;
391		goto err_urbp;
392	}
393
394	INIT_LIST_HEAD(&urbp->hep_node);
395	urbp->urb = urb;
396	urbp->port = port;
397
398	urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
399
400	if (!urbp->ep_data) {
401		ret = -ENOMEM;
402		goto err_epdata;
403	}
404
405	/* TODO claim bandwidth with usb_claim_bandwidth?
406	 * also release it somewhere! */
407
408	urb->hcpriv = urbp;
409
410	urb->actual_length = 0;	/* Nothing received/transmitted yet */
411
412	switch (usb_pipetype(urb->pipe)) {
413	case PIPE_CONTROL:
414		urb->interval = SETUP_STAGE;
415		break;
416	case PIPE_INTERRUPT:
417		break;
418	case PIPE_BULK:
419		break;
420	case PIPE_ISOCHRONOUS:
421		if (c67x00->urb_iso_count == 0)
422			c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
423		c67x00->urb_iso_count++;
424		if (list_empty(&urbp->ep_data->queue))
425			urb->start_frame = urbp->ep_data->next_frame;
426		else {
427			/* Go right after the last one */
428			struct urb *last_urb;
429
430			last_urb = list_entry(urbp->ep_data->queue.prev,
431					      struct c67x00_urb_priv,
432					      hep_node)->urb;
433			urb->start_frame =
434			    frame_add(last_urb->start_frame,
435				      last_urb->number_of_packets *
436				      last_urb->interval);
437		}
438		urbp->cnt = 0;
439		break;
440	}
441
442	/* Add the URB to the endpoint queue */
443	list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
444
445	/* If this is the only URB, kick start the controller */
446	if (!c67x00->urb_count++)
447		c67x00_ll_hpi_enable_sofeop(c67x00->sie);
448
449	c67x00_sched_kick(c67x00);
450	spin_unlock_irqrestore(&c67x00->lock, flags);
451
452	return 0;
453
454err_epdata:
455	kfree(urbp);
456err_urbp:
457	usb_hcd_unlink_urb_from_ep(hcd, urb);
458err_not_linked:
459	spin_unlock_irqrestore(&c67x00->lock, flags);
460
461	return ret;
462}
463
464int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
465{
466	struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
467	unsigned long flags;
468	int rc;
469
470	spin_lock_irqsave(&c67x00->lock, flags);
471	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
472	if (rc)
473		goto done;
474
475	c67x00_release_urb(c67x00, urb);
476	usb_hcd_unlink_urb_from_ep(hcd, urb);
477
478	spin_unlock(&c67x00->lock);
479	usb_hcd_giveback_urb(hcd, urb, status);
480	spin_lock(&c67x00->lock);
481
482	spin_unlock_irqrestore(&c67x00->lock, flags);
483
484	return 0;
485
486 done:
487	spin_unlock_irqrestore(&c67x00->lock, flags);
488	return rc;
489}
490
491/* -------------------------------------------------------------------------- */
492
493/*
494 * pre: c67x00 locked, urb unlocked
495 */
496static void
497c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
498{
499	struct c67x00_urb_priv *urbp;
500
501	if (!urb)
502		return;
503
504	urbp = urb->hcpriv;
505	urbp->status = status;
506
507	list_del_init(&urbp->hep_node);
508
509	c67x00_release_urb(c67x00, urb);
510	usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
511	spin_unlock(&c67x00->lock);
512	usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
513	spin_lock(&c67x00->lock);
514}
515
516/* -------------------------------------------------------------------------- */
517
518static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
519				 int len, int periodic)
520{
521	struct c67x00_urb_priv *urbp = urb->hcpriv;
522	int bit_time;
523
524	/* According to the C67x00 BIOS user manual, page 3-18,19, the
525	 * following calculations provide the full speed bit times for
526	 * a transaction.
527	 *
528	 * FS(in)	= 112.5 +  9.36*BC + HOST_DELAY
529	 * FS(in,iso)	=  90.5 +  9.36*BC + HOST_DELAY
530	 * FS(out)	= 112.5 +  9.36*BC + HOST_DELAY
531	 * FS(out,iso)	=  78.4 +  9.36*BC + HOST_DELAY
532	 * LS(in)	= 802.4 + 75.78*BC + HOST_DELAY
533	 * LS(out)	= 802.6 + 74.67*BC + HOST_DELAY
534	 *
535	 * HOST_DELAY == 106 for the c67200 and c67300.
536	 */
537
538	/* make calculations in 1/100 bit times to maintain resolution */
539	if (urbp->ep_data->dev->speed == USB_SPEED_LOW) {
540		/* Low speed pipe */
541		if (usb_pipein(urb->pipe))
542			bit_time = 80240 + 7578*len;
543		else
544			bit_time = 80260 + 7467*len;
545	} else {
546		/* FS pipes */
547		if (usb_pipeisoc(urb->pipe))
548			bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
549		else
550			bit_time = 11250;
551		bit_time += 936*len;
552	}
553
554	/* Scale back down to integer bit times.  Use a host delay of 106.
555	 * (this is the only place it is used) */
556	bit_time = ((bit_time+50) / 100) + 106;
557
558	if (unlikely(bit_time + c67x00->bandwidth_allocated >=
559		     c67x00->max_frame_bw))
560		return -EMSGSIZE;
561
562	if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
563		     c67x00->td_base_addr + SIE_TD_SIZE))
564		return -EMSGSIZE;
565
566	if (unlikely(c67x00->next_buf_addr + len >=
567		     c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
568		return -EMSGSIZE;
569
570	if (periodic) {
571		if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
572			     MAX_PERIODIC_BW(c67x00->max_frame_bw)))
573			return -EMSGSIZE;
574		c67x00->periodic_bw_allocated += bit_time;
575	}
576
577	c67x00->bandwidth_allocated += bit_time;
578	return 0;
579}
580
581/* -------------------------------------------------------------------------- */
582
583/**
584 * td_addr and buf_addr must be word aligned
585 */
586static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
587			    void *data, int len, int pid, int toggle,
588			    unsigned long privdata)
589{
590	struct c67x00_td *td;
591	struct c67x00_urb_priv *urbp = urb->hcpriv;
592	const __u8 active_flag = 1, retry_cnt = 1;
593	__u8 cmd = 0;
594	int tt = 0;
595
596	if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
597				  || usb_pipeint(urb->pipe)))
598		return -EMSGSIZE;	/* Not really an error, but expected */
599
600	td = kzalloc(sizeof(*td), GFP_ATOMIC);
601	if (!td)
602		return -ENOMEM;
603
604	td->pipe = urb->pipe;
605	td->ep_data = urbp->ep_data;
606
607	if ((td_udev(td)->speed == USB_SPEED_LOW) &&
608	    !(c67x00->low_speed_ports & (1 << urbp->port)))
609		cmd |= PREAMBLE_EN;
610
611	switch (usb_pipetype(td->pipe)) {
612	case PIPE_ISOCHRONOUS:
613		tt = TT_ISOCHRONOUS;
614		cmd |= ISO_EN;
615		break;
616	case PIPE_CONTROL:
617		tt = TT_CONTROL;
618		break;
619	case PIPE_BULK:
620		tt = TT_BULK;
621		break;
622	case PIPE_INTERRUPT:
623		tt = TT_INTERRUPT;
624		break;
625	}
626
627	if (toggle)
628		cmd |= SEQ_SEL;
629
630	cmd |= ARM_EN;
631
632	/* SW part */
633	td->td_addr = c67x00->next_td_addr;
634	c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
635
636	/* HW part */
637	td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
638	td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
639					(urbp->port << 14) | (len & 0x3FF));
640	td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) |
641	    (usb_pipeendpoint(td->pipe) & 0xF);
642	td->dev_addr = usb_pipedevice(td->pipe) & 0x7F;
643	td->ctrl_reg = cmd;
644	td->status = 0;
645	td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt;
646	td->residue = 0;
647	td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
648
649	/* SW part */
650	td->data = data;
651	td->urb = urb;
652	td->privdata = privdata;
653
654	c67x00->next_buf_addr += (len + 1) & ~0x01;	/* properly align */
655
656	list_add_tail(&td->td_list, &c67x00->td_list);
657	return 0;
658}
659
660static inline void c67x00_release_td(struct c67x00_td *td)
661{
662	list_del_init(&td->td_list);
663	kfree(td);
664}
665
666/* -------------------------------------------------------------------------- */
667
668static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
669{
670	int remaining;
671	int toggle;
672	int pid;
673	int ret = 0;
674	int maxps;
675	int need_empty;
676
677	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
678			       usb_pipeout(urb->pipe));
679	remaining = urb->transfer_buffer_length - urb->actual_length;
680
681	maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
682
683	need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
684	    usb_pipeout(urb->pipe) && !(remaining % maxps);
685
686	while (remaining || need_empty) {
687		int len;
688		char *td_buf;
689
690		len = (remaining > maxps) ? maxps : remaining;
691		if (!len)
692			need_empty = 0;
693
694		pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
695		td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
696		    remaining;
697		ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
698				       DATA_STAGE);
699		if (ret)
700			return ret;	/* td wasn't created */
701
702		toggle ^= 1;
703		remaining -= len;
704		if (usb_pipecontrol(urb->pipe))
705			break;
706	}
707
708	return 0;
709}
710
711/**
712 * return 0 in case more bandwidth is available, else errorcode
713 */
714static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
715{
716	int ret;
717	int pid;
718
719	switch (urb->interval) {
720	default:
721	case SETUP_STAGE:
722		ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
723				       8, USB_PID_SETUP, 0, SETUP_STAGE);
724		if (ret)
725			return ret;
726		urb->interval = SETUP_STAGE;
727		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
728			      usb_pipeout(urb->pipe), 1);
729		break;
730	case DATA_STAGE:
731		if (urb->transfer_buffer_length) {
732			ret = c67x00_add_data_urb(c67x00, urb);
733			if (ret)
734				return ret;
735			break;
736		}		/* else fallthrough */
737	case STATUS_STAGE:
738		pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
739		ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
740				       STATUS_STAGE);
741		if (ret)
742			return ret;
743		break;
744	}
745
746	return 0;
747}
748
749/*
750 * return 0 in case more bandwidth is available, else errorcode
751 */
752static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
753{
754	struct c67x00_urb_priv *urbp = urb->hcpriv;
755
756	if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
757		urbp->ep_data->next_frame =
758		    frame_add(urbp->ep_data->next_frame, urb->interval);
759		return c67x00_add_data_urb(c67x00, urb);
760	}
761	return 0;
762}
763
764static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
765{
766	struct c67x00_urb_priv *urbp = urb->hcpriv;
767
768	if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
769		char *td_buf;
770		int len, pid, ret;
771
772		BUG_ON(urbp->cnt >= urb->number_of_packets);
773
774		td_buf = urb->transfer_buffer +
775		    urb->iso_frame_desc[urbp->cnt].offset;
776		len = urb->iso_frame_desc[urbp->cnt].length;
777		pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
778
779		ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
780				       urbp->cnt);
781		if (ret) {
782			printk(KERN_DEBUG "create failed: %d\n", ret);
783			urb->iso_frame_desc[urbp->cnt].actual_length = 0;
784			urb->iso_frame_desc[urbp->cnt].status = ret;
785			if (urbp->cnt + 1 == urb->number_of_packets)
786				c67x00_giveback_urb(c67x00, urb, 0);
787		}
788
789		urbp->ep_data->next_frame =
790		    frame_add(urbp->ep_data->next_frame, urb->interval);
791		urbp->cnt++;
792	}
793	return 0;
794}
795
796/* -------------------------------------------------------------------------- */
797
798static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
799				  int (*add)(struct c67x00_hcd *, struct urb *))
800{
801	struct c67x00_ep_data *ep_data;
802	struct urb *urb;
803
804	/* traverse every endpoint on the list */
805	list_for_each_entry(ep_data, &c67x00->list[type], node) {
806		if (!list_empty(&ep_data->queue)) {
807			/* and add the first urb */
808			/* isochronous transfer rely on this */
809			urb = list_entry(ep_data->queue.next,
810					 struct c67x00_urb_priv,
811					 hep_node)->urb;
812			add(c67x00, urb);
813		}
814	}
815}
816
817static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
818{
819	struct c67x00_td *td, *ttd;
820
821	/* Check if we can proceed */
822	if (!list_empty(&c67x00->td_list)) {
823		dev_warn(c67x00_hcd_dev(c67x00),
824			 "TD list not empty! This should not happen!\n");
825		list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
826			dbg_td(c67x00, td, "Unprocessed td");
827			c67x00_release_td(td);
828		}
829	}
830
831	/* Reinitialize variables */
832	c67x00->bandwidth_allocated = 0;
833	c67x00->periodic_bw_allocated = 0;
834
835	c67x00->next_td_addr = c67x00->td_base_addr;
836	c67x00->next_buf_addr = c67x00->buf_base_addr;
837
838	/* Fill the list */
839	c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
840	c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
841	c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
842	c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
843}
844
845/* -------------------------------------------------------------------------- */
846
847/**
848 * Get TD from C67X00
849 */
850static inline void
851c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
852{
853	c67x00_ll_read_mem_le16(c67x00->sie->dev,
854				td->td_addr, td, CY_TD_SIZE);
855
856	if (usb_pipein(td->pipe) && td_actual_bytes(td))
857		c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
858					td->data, td_actual_bytes(td));
859}
860
861static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
862{
863	if (td->status & TD_STATUSMASK_ERR) {
864		dbg_td(c67x00, td, "ERROR_FLAG");
865		return -EILSEQ;
866	}
867	if (td->status & TD_STATUSMASK_STALL) {
868		/* dbg_td(c67x00, td, "STALL"); */
869		return -EPIPE;
870	}
871	if (td->status & TD_STATUSMASK_TMOUT) {
872		dbg_td(c67x00, td, "TIMEOUT");
873		return -ETIMEDOUT;
874	}
875
876	return 0;
877}
878
879static inline int c67x00_end_of_data(struct c67x00_td *td)
880{
881	int maxps, need_empty, remaining;
882	struct urb *urb = td->urb;
883	int act_bytes;
884
885	act_bytes = td_actual_bytes(td);
886
887	if (unlikely(!act_bytes))
888		return 1;	/* This was an empty packet */
889
890	maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
891
892	if (unlikely(act_bytes < maxps))
893		return 1;	/* Smaller then full packet */
894
895	remaining = urb->transfer_buffer_length - urb->actual_length;
896	need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
897	    usb_pipeout(urb->pipe) && !(remaining % maxps);
898
899	if (unlikely(!remaining && !need_empty))
900		return 1;
901
902	return 0;
903}
904
905/* -------------------------------------------------------------------------- */
906
907/* Remove all td's from the list which come
908 * after last_td and are meant for the same pipe.
909 * This is used when a short packet has occured */
910static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
911				     struct c67x00_td *last_td)
912{
913	struct c67x00_td *td, *tmp;
914	td = last_td;
915	tmp = last_td;
916	while (td->td_list.next != &c67x00->td_list) {
917		td = list_entry(td->td_list.next, struct c67x00_td, td_list);
918		if (td->pipe == last_td->pipe) {
919			c67x00_release_td(td);
920			td = tmp;
921		}
922		tmp = td;
923	}
924}
925
926/* -------------------------------------------------------------------------- */
927
928static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
929					struct c67x00_td *td)
930{
931	struct urb *urb = td->urb;
932
933	if (!urb)
934		return;
935
936	urb->actual_length += td_actual_bytes(td);
937
938	switch (usb_pipetype(td->pipe)) {
939		/* isochronous tds are handled separately */
940	case PIPE_CONTROL:
941		switch (td->privdata) {
942		case SETUP_STAGE:
943			urb->interval =
944			    urb->transfer_buffer_length ?
945			    DATA_STAGE : STATUS_STAGE;
946			/* Don't count setup_packet with normal data: */
947			urb->actual_length = 0;
948			break;
949
950		case DATA_STAGE:
951			if (c67x00_end_of_data(td)) {
952				urb->interval = STATUS_STAGE;
953				c67x00_clear_pipe(c67x00, td);
954			}
955			break;
956
957		case STATUS_STAGE:
958			urb->interval = 0;
959			c67x00_giveback_urb(c67x00, urb, 0);
960			break;
961		}
962		break;
963
964	case PIPE_INTERRUPT:
965	case PIPE_BULK:
966		if (unlikely(c67x00_end_of_data(td))) {
967			c67x00_clear_pipe(c67x00, td);
968			c67x00_giveback_urb(c67x00, urb, 0);
969		}
970		break;
971	}
972}
973
974static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
975{
976	struct urb *urb = td->urb;
977	struct c67x00_urb_priv *urbp;
978	int cnt;
979
980	if (!urb)
981		return;
982
983	urbp = urb->hcpriv;
984	cnt = td->privdata;
985
986	if (td->status & TD_ERROR_MASK)
987		urb->error_count++;
988
989	urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td);
990	urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
991	if (cnt + 1 == urb->number_of_packets)	/* Last packet */
992		c67x00_giveback_urb(c67x00, urb, 0);
993}
994
995/* -------------------------------------------------------------------------- */
996
997/**
998 * c67x00_check_td_list - handle tds which have been processed by the c67x00
999 * pre: current_td == 0
1000 */
1001static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
1002{
1003	struct c67x00_td *td, *tmp;
1004	struct urb *urb;
1005	int ack_ok;
1006	int clear_endpoint;
1007
1008	list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
1009		/* get the TD */
1010		c67x00_parse_td(c67x00, td);
1011		urb = td->urb;	/* urb can be NULL! */
1012		ack_ok = 0;
1013		clear_endpoint = 1;
1014
1015		/* Handle isochronous transfers separately */
1016		if (usb_pipeisoc(td->pipe)) {
1017			clear_endpoint = 0;
1018			c67x00_handle_isoc(c67x00, td);
1019			goto cont;
1020		}
1021
1022		/* When an error occurs, all td's for that pipe go into an
1023		 * inactive state. This state matches successful transfers so
1024		 * we must make sure not to service them. */
1025		if (td->status & TD_ERROR_MASK) {
1026			c67x00_giveback_urb(c67x00, urb,
1027					    c67x00_td_to_error(c67x00, td));
1028			goto cont;
1029		}
1030
1031		if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) ||
1032		    !td_acked(td))
1033			goto cont;
1034
1035		/* Sequence ok and acked, don't need to fix toggle */
1036		ack_ok = 1;
1037
1038		if (unlikely(td->status & TD_STATUSMASK_OVF)) {
1039			if (td_residue(td) & TD_RESIDUE_OVERFLOW) {
1040				/* Overflow */
1041				c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
1042				goto cont;
1043			}
1044		}
1045
1046		clear_endpoint = 0;
1047		c67x00_handle_successful_td(c67x00, td);
1048
1049cont:
1050		if (clear_endpoint)
1051			c67x00_clear_pipe(c67x00, td);
1052		if (ack_ok)
1053			usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe),
1054				      usb_pipeout(td->pipe),
1055				      !(td->ctrl_reg & SEQ_SEL));
1056		/* next in list could have been removed, due to clear_pipe! */
1057		tmp = list_entry(td->td_list.next, typeof(*td), td_list);
1058		c67x00_release_td(td);
1059	}
1060}
1061
1062/* -------------------------------------------------------------------------- */
1063
1064static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
1065{
1066	/* If all tds are processed, we can check the previous frame (if
1067	 * there was any) and start our next frame.
1068	 */
1069	return !c67x00_ll_husb_get_current_td(c67x00->sie);
1070}
1071
1072/**
1073 * Send td to C67X00
1074 */
1075static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
1076{
1077	int len = td_length(td);
1078
1079	if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN))
1080		c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
1081					 td->data, len);
1082
1083	c67x00_ll_write_mem_le16(c67x00->sie->dev,
1084				 td->td_addr, td, CY_TD_SIZE);
1085}
1086
1087static void c67x00_send_frame(struct c67x00_hcd *c67x00)
1088{
1089	struct c67x00_td *td;
1090
1091	if (list_empty(&c67x00->td_list))
1092		dev_warn(c67x00_hcd_dev(c67x00),
1093			 "%s: td list should not be empty here!\n",
1094			 __func__);
1095
1096	list_for_each_entry(td, &c67x00->td_list, td_list) {
1097		if (td->td_list.next == &c67x00->td_list)
1098			td->next_td_addr = 0;	/* Last td in list */
1099
1100		c67x00_send_td(c67x00, td);
1101	}
1102
1103	c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
1104}
1105
1106/* -------------------------------------------------------------------------- */
1107
1108/**
1109 * c67x00_do_work - Schedulers state machine
1110 */
1111static void c67x00_do_work(struct c67x00_hcd *c67x00)
1112{
1113	spin_lock(&c67x00->lock);
1114	/* Make sure all tds are processed */
1115	if (!c67x00_all_tds_processed(c67x00))
1116		goto out;
1117
1118	c67x00_check_td_list(c67x00);
1119
1120	/* no td's are being processed (current == 0)
1121	 * and all have been "checked" */
1122	complete(&c67x00->endpoint_disable);
1123
1124	if (!list_empty(&c67x00->td_list))
1125		goto out;
1126
1127	c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
1128	if (c67x00->current_frame == c67x00->last_frame)
1129		goto out;	/* Don't send tds in same frame */
1130	c67x00->last_frame = c67x00->current_frame;
1131
1132	/* If no urbs are scheduled, our work is done */
1133	if (!c67x00->urb_count) {
1134		c67x00_ll_hpi_disable_sofeop(c67x00->sie);
1135		goto out;
1136	}
1137
1138	c67x00_fill_frame(c67x00);
1139	if (!list_empty(&c67x00->td_list))
1140		/* TD's have been added to the frame */
1141		c67x00_send_frame(c67x00);
1142
1143 out:
1144	spin_unlock(&c67x00->lock);
1145}
1146
1147/* -------------------------------------------------------------------------- */
1148
1149static void c67x00_sched_tasklet(unsigned long __c67x00)
1150{
1151	struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00;
1152	c67x00_do_work(c67x00);
1153}
1154
1155void c67x00_sched_kick(struct c67x00_hcd *c67x00)
1156{
1157	tasklet_hi_schedule(&c67x00->tasklet);
1158}
1159
1160int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
1161{
1162	tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet,
1163		     (unsigned long)c67x00);
1164	return 0;
1165}
1166
1167void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
1168{
1169	tasklet_kill(&c67x00->tasklet);
1170}
1171