1/*
2 * Universal Host Controller Interface driver for USB (take II).
3 *
4 * (c) 1999-2001 Georg Acher, acher@in.tum.de (executive slave) (base guitar)
5 *               Deti Fliegl, deti@fliegl.de (executive slave) (lead voice)
6 *               Thomas Sailer, sailer@ife.ee.ethz.ch (chief consultant) (cheer leader)
7 *               Roman Weissgaerber, weissg@vienna.at (virt root hub) (studio porter)
8 * (c) 2000      Yggdrasil Computing, Inc. (port of new PCI interface support
9 *               from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
10 * (C) 2000      David Brownell, david-b@pacbell.net (usb-ohci.c)
11 *
12 * HW-initalization based on material of
13 *
14 * (C) Copyright 1999 Linus Torvalds
15 * (C) Copyright 1999 Johannes Erdfelt
16 * (C) Copyright 1999 Randy Dunlap
17 * (C) Copyright 1999 Gregory P. Smith
18 *
19 * $Id: usb-uhci.c,v 1.1.1.1 2008/10/15 03:27:03 james26_jang Exp $
20 */
21
22#include <linux/config.h>
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/kernel.h>
26#include <linux/delay.h>
27#include <linux/ioport.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/smp_lock.h>
31#include <linux/errno.h>
32#include <linux/unistd.h>
33#include <linux/interrupt.h>	/* for in_interrupt() */
34#include <linux/init.h>
35#include <linux/version.h>
36#include <linux/pm.h>
37#include <linux/timer.h>
38
39#include <asm/uaccess.h>
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/system.h>
43
44/* This enables more detailed sanity checks in submit_iso */
45//#define ISO_SANITY_CHECK
46
47/* This enables debug printks */
48#define DEBUG
49
50/* This enables all symbols to be exported, to ease debugging oopses */
51//#define DEBUG_SYMBOLS
52
53/* This enables an extra UHCI slab for memory debugging */
54#define DEBUG_SLAB
55
56#define VERSTR "$Revision: 1.1.1.1 $ time " __TIME__ " " __DATE__
57
58#include <linux/usb.h>
59#include "usb-uhci.h"
60#include "usb-uhci-debug.h"
61
62#include "../hcd.h"
63
64/*
65 * Version Information
66 */
67#define DRIVER_VERSION "v1.275"
68#define DRIVER_AUTHOR "Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber"
69#define DRIVER_DESC "USB Universal Host Controller Interface driver"
70
71#undef DEBUG
72#undef dbg
73#define dbg(format, arg...) do {} while (0)
74#define DEBUG_SYMBOLS
75#ifdef DEBUG_SYMBOLS
76	#define _static
77	#ifndef EXPORT_SYMTAB
78		#define EXPORT_SYMTAB
79	#endif
80#else
81	#define _static static
82#endif
83
84#define queue_dbg dbg //err
85#define async_dbg dbg //err
86
87#ifdef DEBUG_SLAB
88	static kmem_cache_t *urb_priv_kmem;
89#endif
90
91#define SLAB_FLAG     (in_interrupt () || current->state != TASK_RUNNING ? SLAB_ATOMIC : SLAB_NOIO)
92#define KMALLOC_FLAG  (in_interrupt () || current->state != TASK_RUNNING ? GFP_ATOMIC : GFP_NOIO)
93
94/* CONFIG_USB_UHCI_HIGH_BANDWITH turns on Full Speed Bandwidth
95 * Reclamation: feature that puts loop on descriptor loop when
96 * there's some transfer going on. With FSBR, USB performance
97 * is optimal, but PCI can be slowed down up-to 5 times, slowing down
98 * system performance (eg. framebuffer devices).
99 */
100#define CONFIG_USB_UHCI_HIGH_BANDWIDTH
101
102/* *_DEPTH_FIRST puts descriptor in depth-first mode. This has
103 * somehow similar effect to FSBR (higher speed), but does not
104 * slow PCI down. OTOH USB performace is slightly slower than
105 * in FSBR case and single device could hog whole USB, starving
106 * other devices.
107 */
108#define USE_CTRL_DEPTH_FIRST 0  // 0: Breadth first, 1: Depth first
109#define USE_BULK_DEPTH_FIRST 0  // 0: Breadth first, 1: Depth first
110
111/* Turning off both CONFIG_USB_UHCI_HIGH_BANDWITH and *_DEPTH_FIRST
112 * will lead to <64KB/sec performance over USB for bulk transfers targeting
113 * one device's endpoint. You probably do not want to do that.
114 */
115
116// stop bandwidth reclamation after (roughly) 50ms
117#define IDLE_TIMEOUT  (HZ/20)
118
119// Suppress HC interrupt error messages for 5s
120#define ERROR_SUPPRESSION_TIME (HZ*5)
121
122_static int rh_submit_urb (struct urb *urb);
123_static int rh_unlink_urb (struct urb *urb);
124_static int delete_qh (uhci_t *s, uhci_desc_t *qh);
125_static int process_transfer (uhci_t *s, struct urb *urb, int mode);
126_static int process_interrupt (uhci_t *s, struct urb *urb);
127_static int process_iso (uhci_t *s, struct urb *urb, int force);
128
129// How much URBs with ->next are walked
130#define MAX_NEXT_COUNT 2048
131
132static uhci_t *devs = NULL;
133
134/* used by userspace UHCI data structure dumper */
135uhci_t **uhci_devices = &devs;
136
137static int bcm_dump_pci_status = 0;
138static int bcm_host_error_reset = 0;
139
140_static void dump_pci_status_regs(char *tag)
141{
142    struct pci_dev *dev;
143    __u32 err_log, imstate, *ptr;
144    __u32 off, val;
145
146    printk("%s\n", tag);
147
148    // some backplane status regs
149    ptr = __ioremap(0x18004000+0xeb0, 4, _CACHE_UNCACHED);
150    err_log = readl(ptr);
151    ptr = __ioremap(0x18004000+0xf90, 4, _CACHE_UNCACHED);
152    imstate = readl(ptr);
153    printk("\n*** PCI SBIMErrLog = 0x%08x, PCI SBIMState = 0x%08x\n\n",
154		err_log, imstate);
155
156    printk("Memory Controller registers\n");
157    for (off = 0xe00; off <= 0xffc; off += 4) {
158	if ((off & 0x0f) == 0) {
159	    printk("\n0x%08x: ", off);
160	}
161	ptr = __ioremap(0x18008000+off, 4, _CACHE_UNCACHED);
162	val = readl(ptr);
163	printk("0x%08x,", val);
164    }
165    printk("\n\n");
166
167
168    printk("\tdevfn\tclass\t\tvendor\tdevice\tPCI Status\n");
169    pci_for_each_dev(dev) {
170        unsigned short status;
171
172        pci_read_config_word(dev, 0x06, &status);
173        printk("\t0x%04x\t0x%08x\t0x%04x\t0x%04x\t0x%04x\n",
174                dev->devfn, dev->class, dev->vendor, dev->device, status);
175    }
176
177}
178
179/*-------------------------------------------------------------------*/
180// Cleans up collected QHs, but not more than 100 in one go
181void clean_descs(uhci_t *s, int force)
182{
183	struct list_head *q;
184	uhci_desc_t *qh;
185	int now=UHCI_GET_CURRENT_FRAME(s), n=0;
186
187	q=s->free_desc.prev;
188
189	while (q != &s->free_desc && (force || n<100)) {
190		qh = list_entry (q, uhci_desc_t, horizontal);
191		q=qh->horizontal.prev;
192
193		if ((qh->last_used!=now) || force)
194			delete_qh(s,qh);
195		n++;
196	}
197}
198/*-------------------------------------------------------------------*/
199_static void uhci_switch_timer_int(uhci_t *s)
200{
201
202	if (!list_empty(&s->urb_unlinked))
203		set_td_ioc(s->td1ms);
204	else
205		clr_td_ioc(s->td1ms);
206
207	if (s->timeout_urbs)
208		set_td_ioc(s->td32ms);
209	else
210		clr_td_ioc(s->td32ms);
211	wmb();
212}
213/*-------------------------------------------------------------------*/
214#ifdef CONFIG_USB_UHCI_HIGH_BANDWIDTH
215_static void enable_desc_loop(uhci_t *s, struct urb *urb)
216{
217	unsigned long flags;
218
219	if (urb->transfer_flags & USB_NO_FSBR)
220		return;
221
222	spin_lock_irqsave (&s->qh_lock, flags);
223	s->chain_end->hw.qh.head&=cpu_to_le32(~UHCI_PTR_TERM);
224	mb();
225	s->loop_usage++;
226	((urb_priv_t*)urb->hcpriv)->use_loop=1;
227	spin_unlock_irqrestore (&s->qh_lock, flags);
228}
229/*-------------------------------------------------------------------*/
230_static void disable_desc_loop(uhci_t *s, struct urb *urb)
231{
232	unsigned long flags;
233
234	if (urb->transfer_flags & USB_NO_FSBR)
235		return;
236
237	spin_lock_irqsave (&s->qh_lock, flags);
238	if (((urb_priv_t*)urb->hcpriv)->use_loop) {
239		s->loop_usage--;
240
241		if (!s->loop_usage) {
242			s->chain_end->hw.qh.head|=cpu_to_le32(UHCI_PTR_TERM);
243			mb();
244		}
245		((urb_priv_t*)urb->hcpriv)->use_loop=0;
246	}
247	spin_unlock_irqrestore (&s->qh_lock, flags);
248}
249#endif
250/*-------------------------------------------------------------------*/
251_static void queue_urb_unlocked (uhci_t *s, struct urb *urb)
252{
253	struct list_head *p=&urb->urb_list;
254#ifdef CONFIG_USB_UHCI_HIGH_BANDWIDTH
255	{
256		int type;
257		type=usb_pipetype (urb->pipe);
258
259		if ((type == PIPE_BULK) || (type == PIPE_CONTROL))
260			enable_desc_loop(s, urb);
261	}
262#endif
263	urb->status = -EINPROGRESS;
264	((urb_priv_t*)urb->hcpriv)->started=jiffies;
265	list_add (p, &s->urb_list);
266	if (urb->timeout)
267		s->timeout_urbs++;
268	uhci_switch_timer_int(s);
269}
270/*-------------------------------------------------------------------*/
271_static void queue_urb (uhci_t *s, struct urb *urb)
272{
273	unsigned long flags=0;
274
275	spin_lock_irqsave (&s->urb_list_lock, flags);
276	queue_urb_unlocked(s,urb);
277	spin_unlock_irqrestore (&s->urb_list_lock, flags);
278}
279/*-------------------------------------------------------------------*/
280_static void dequeue_urb (uhci_t *s, struct urb *urb)
281{
282#ifdef CONFIG_USB_UHCI_HIGH_BANDWIDTH
283	int type;
284
285	type=usb_pipetype (urb->pipe);
286
287	if ((type == PIPE_BULK) || (type == PIPE_CONTROL))
288		disable_desc_loop(s, urb);
289#endif
290
291	list_del (&urb->urb_list);
292	if (urb->timeout && s->timeout_urbs)
293		s->timeout_urbs--;
294
295}
296/*-------------------------------------------------------------------*/
297_static int alloc_td (uhci_t *s, uhci_desc_t ** new, int flags)
298{
299	dma_addr_t dma_handle;
300
301	*new = pci_pool_alloc(s->desc_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
302	if (!*new)
303		return -ENOMEM;
304	memset (*new, 0, sizeof (uhci_desc_t));
305	(*new)->dma_addr = dma_handle;
306	set_td_link((*new), UHCI_PTR_TERM | (flags & UHCI_PTR_BITS));	// last by default
307	(*new)->type = TD_TYPE;
308	mb();
309	INIT_LIST_HEAD (&(*new)->vertical);
310	INIT_LIST_HEAD (&(*new)->horizontal);
311
312	return 0;
313}
314/*-------------------------------------------------------------------*/
315// append a qh to td.link physically, the SW linkage is not affected
316_static void append_qh(uhci_t *s, uhci_desc_t *td, uhci_desc_t* qh, int  flags)
317{
318	unsigned long xxx;
319
320	spin_lock_irqsave (&s->td_lock, xxx);
321
322	set_td_link(td, qh->dma_addr | (flags & UHCI_PTR_DEPTH) | UHCI_PTR_QH);
323
324	mb();
325	spin_unlock_irqrestore (&s->td_lock, xxx);
326}
327/*-------------------------------------------------------------------*/
328/* insert td at last position in td-list of qh (vertical) */
329_static int insert_td (uhci_t *s, uhci_desc_t *qh, uhci_desc_t* new, int flags)
330{
331	uhci_desc_t *prev;
332	unsigned long xxx;
333
334	spin_lock_irqsave (&s->td_lock, xxx);
335
336	list_add_tail (&new->vertical, &qh->vertical);
337
338	prev = list_entry (new->vertical.prev, uhci_desc_t, vertical);
339
340	if (qh == prev ) {
341		// virgin qh without any tds
342		set_qh_element(qh, new->dma_addr | UHCI_PTR_TERM);
343	}
344	else {
345		// already tds inserted, implicitely remove TERM bit of prev
346		set_td_link(prev, new->dma_addr | (flags & UHCI_PTR_DEPTH));
347	}
348	mb();
349	spin_unlock_irqrestore (&s->td_lock, xxx);
350
351	return 0;
352}
353/*-------------------------------------------------------------------*/
354/* insert new_td after td (horizontal) */
355_static int insert_td_horizontal (uhci_t *s, uhci_desc_t *td, uhci_desc_t* new)
356{
357	uhci_desc_t *next;
358	unsigned long flags;
359
360	spin_lock_irqsave (&s->td_lock, flags);
361
362	next = list_entry (td->horizontal.next, uhci_desc_t, horizontal);
363	list_add (&new->horizontal, &td->horizontal);
364	new->hw.td.link = td->hw.td.link;
365	set_td_link(td, new->dma_addr);
366	mb();
367	spin_unlock_irqrestore (&s->td_lock, flags);
368
369	return 0;
370}
371/*-------------------------------------------------------------------*/
372_static int unlink_td (uhci_t *s, uhci_desc_t *element, int phys_unlink)
373{
374	uhci_desc_t *next, *prev;
375	int dir = 0;
376	unsigned long flags;
377
378	spin_lock_irqsave (&s->td_lock, flags);
379
380	next = list_entry (element->vertical.next, uhci_desc_t, vertical);
381
382	if (next == element) {
383		dir = 1;
384		prev = list_entry (element->horizontal.prev, uhci_desc_t, horizontal);
385	}
386	else
387		prev = list_entry (element->vertical.prev, uhci_desc_t, vertical);
388
389	if (phys_unlink) {
390		// really remove HW linking
391		if (prev->type == TD_TYPE)
392			prev->hw.td.link = element->hw.td.link;
393		else
394			prev->hw.qh.element = element->hw.td.link;
395	}
396
397	mb ();
398
399	if (dir == 0)
400		list_del (&element->vertical);
401	else
402		list_del (&element->horizontal);
403
404	spin_unlock_irqrestore (&s->td_lock, flags);
405
406	return 0;
407}
408
409/*-------------------------------------------------------------------*/
410_static int delete_desc (uhci_t *s, uhci_desc_t *element)
411{
412	pci_pool_free(s->desc_pool, element, element->dma_addr);
413	return 0;
414}
415/*-------------------------------------------------------------------*/
416// Allocates qh element
417_static int alloc_qh (uhci_t *s, uhci_desc_t ** new)
418{
419	dma_addr_t dma_handle;
420
421	*new = pci_pool_alloc(s->desc_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
422	if (!*new)
423		return -ENOMEM;
424	memset (*new, 0, sizeof (uhci_desc_t));
425	(*new)->dma_addr = dma_handle;
426	set_qh_head(*new, UHCI_PTR_TERM);
427	set_qh_element(*new, UHCI_PTR_TERM);
428	(*new)->type = QH_TYPE;
429
430	mb();
431	INIT_LIST_HEAD (&(*new)->horizontal);
432	INIT_LIST_HEAD (&(*new)->vertical);
433
434	dbg("Allocated qh @ %p", *new);
435
436	return 0;
437}
438/*-------------------------------------------------------------------*/
439// inserts new qh before/after the qh at pos
440// flags: 0: insert before pos, 1: insert after pos (for low speed transfers)
441_static int insert_qh (uhci_t *s, uhci_desc_t *pos, uhci_desc_t *new, int order)
442{
443	uhci_desc_t *old;
444	unsigned long flags;
445
446	spin_lock_irqsave (&s->qh_lock, flags);
447
448	if (!order) {
449		// (OLD) (POS) -> (OLD) (NEW) (POS)
450		old = list_entry (pos->horizontal.prev, uhci_desc_t, horizontal);
451		list_add_tail (&new->horizontal, &pos->horizontal);
452		set_qh_head(new, MAKE_QH_ADDR (pos)) ;
453		if (!(old->hw.qh.head & cpu_to_le32(UHCI_PTR_TERM)))
454			set_qh_head(old, MAKE_QH_ADDR (new)) ;
455	}
456	else {
457		// (POS) (OLD) -> (POS) (NEW) (OLD)
458		old = list_entry (pos->horizontal.next, uhci_desc_t, horizontal);
459		list_add (&new->horizontal, &pos->horizontal);
460		set_qh_head(new, MAKE_QH_ADDR (old));
461		set_qh_head(pos, MAKE_QH_ADDR (new)) ;
462	}
463
464	mb ();
465
466	spin_unlock_irqrestore (&s->qh_lock, flags);
467
468	return 0;
469}
470
471/*-------------------------------------------------------------------*/
472_static int unlink_qh (uhci_t *s, uhci_desc_t *element)
473{
474	uhci_desc_t  *prev;
475	unsigned long flags;
476
477	spin_lock_irqsave (&s->qh_lock, flags);
478
479	prev = list_entry (element->horizontal.prev, uhci_desc_t, horizontal);
480	prev->hw.qh.head = element->hw.qh.head;
481
482	dbg("unlink qh %p, pqh %p, nxqh %p, to %08x", element, prev,
483	    list_entry (element->horizontal.next, uhci_desc_t, horizontal),le32_to_cpu(element->hw.qh.head) &~15);
484
485	list_del(&element->horizontal);
486
487	mb ();
488	spin_unlock_irqrestore (&s->qh_lock, flags);
489
490	return 0;
491}
492/*-------------------------------------------------------------------*/
493_static int delete_qh (uhci_t *s, uhci_desc_t *qh)
494{
495	uhci_desc_t *td;
496	struct list_head *p;
497
498	list_del (&qh->horizontal);
499
500	while ((p = qh->vertical.next) != &qh->vertical) {
501		td = list_entry (p, uhci_desc_t, vertical);
502		dbg("unlink td @ %p",td);
503		unlink_td (s, td, 0); // no physical unlink
504		delete_desc (s, td);
505	}
506
507	delete_desc (s, qh);
508
509	return 0;
510}
511/*-------------------------------------------------------------------*/
512_static void clean_td_chain (uhci_t *s, uhci_desc_t *td)
513{
514	struct list_head *p;
515	uhci_desc_t *td1;
516
517	if (!td)
518		return;
519
520	while ((p = td->horizontal.next) != &td->horizontal) {
521		td1 = list_entry (p, uhci_desc_t, horizontal);
522		delete_desc (s, td1);
523	}
524
525	delete_desc (s, td);
526}
527
528/*-------------------------------------------------------------------*/
529_static void fill_td (uhci_desc_t *td, int status, int info, __u32 buffer)
530{
531	td->hw.td.status = cpu_to_le32(status);
532	td->hw.td.info = cpu_to_le32(info);
533	td->hw.td.buffer = cpu_to_le32(buffer);
534}
535/*-------------------------------------------------------------------*/
536// Removes ALL qhs in chain (paranoia!)
537_static void cleanup_skel (uhci_t *s)
538{
539	unsigned int n;
540	uhci_desc_t *td;
541
542	dbg("cleanup_skel");
543
544	clean_descs(s,1);
545
546
547	if (s->td32ms) {
548
549		unlink_td(s,s->td32ms,1);
550		delete_desc(s, s->td32ms);
551	}
552
553	for (n = 0; n < 8; n++) {
554		td = s->int_chain[n];
555		clean_td_chain (s, td);
556	}
557
558	if (s->iso_td) {
559		for (n = 0; n < 1024; n++) {
560			td = s->iso_td[n];
561			clean_td_chain (s, td);
562		}
563		kfree (s->iso_td);
564	}
565
566	if (s->framelist)
567		pci_free_consistent(s->uhci_pci, PAGE_SIZE,
568				    s->framelist, s->framelist_dma);
569
570	if (s->control_chain) {
571		// completed init_skel?
572		struct list_head *p;
573		uhci_desc_t *qh, *qh1;
574
575		qh = s->control_chain;
576		while ((p = qh->horizontal.next) != &qh->horizontal) {
577			qh1 = list_entry (p, uhci_desc_t, horizontal);
578			delete_qh (s, qh1);
579		}
580
581		delete_qh (s, qh);
582	}
583	else {
584		if (s->ls_control_chain)
585			delete_desc (s, s->ls_control_chain);
586		if (s->control_chain)
587			delete_desc (s, s->control_chain);
588		if (s->bulk_chain)
589			delete_desc (s, s->bulk_chain);
590		if (s->chain_end)
591			delete_desc (s, s->chain_end);
592	}
593
594	if (s->desc_pool) {
595		pci_pool_destroy(s->desc_pool);
596		s->desc_pool = NULL;
597	}
598
599	dbg("cleanup_skel finished");
600}
601/*-------------------------------------------------------------------*/
602// allocates framelist and qh-skeletons
603// only HW-links provide continous linking, SW-links stay in their domain (ISO/INT)
604_static int init_skel (uhci_t *s)
605{
606	int n, ret;
607	uhci_desc_t *qh, *td;
608
609	dbg("init_skel");
610
611	s->framelist = pci_alloc_consistent(s->uhci_pci, PAGE_SIZE,
612					    &s->framelist_dma);
613
614	if (!s->framelist)
615		return -ENOMEM;
616
617	memset (s->framelist, 0, 4096);
618
619	dbg("creating descriptor pci_pool");
620
621	s->desc_pool = pci_pool_create("uhci_desc", s->uhci_pci,
622				       sizeof(uhci_desc_t), 16, 0,
623				       GFP_DMA | GFP_ATOMIC);
624	if (!s->desc_pool)
625		goto init_skel_cleanup;
626
627	dbg("allocating iso desc pointer list");
628	s->iso_td = (uhci_desc_t **) kmalloc (1024 * sizeof (uhci_desc_t*), GFP_KERNEL);
629
630	if (!s->iso_td)
631		goto init_skel_cleanup;
632
633	s->ls_control_chain = NULL;
634	s->control_chain = NULL;
635	s->bulk_chain = NULL;
636	s->chain_end = NULL;
637
638	dbg("allocating iso descs");
639	for (n = 0; n < 1024; n++) {
640	 	// allocate skeleton iso/irq-tds
641		if (alloc_td (s, &td, 0))
642			goto init_skel_cleanup;
643
644		s->iso_td[n] = td;
645		s->framelist[n] = cpu_to_le32((__u32) td->dma_addr);
646	}
647
648	dbg("allocating qh: chain_end");
649	if (alloc_qh (s, &qh))
650		goto init_skel_cleanup;
651
652	s->chain_end = qh;
653
654	if (alloc_td (s, &td, 0))
655		goto init_skel_cleanup;
656
657	fill_td (td, 0 * TD_CTRL_IOC, 0, 0); // generate 1ms interrupt (enabled on demand)
658	insert_td (s, qh, td, 0);
659	qh->hw.qh.element &= cpu_to_le32(~UHCI_PTR_TERM); // remove TERM bit
660	s->td1ms=td;
661
662	dbg("allocating qh: bulk_chain");
663	if (alloc_qh (s, &qh))
664		goto init_skel_cleanup;
665
666	insert_qh (s, s->chain_end, qh, 0);
667	s->bulk_chain = qh;
668
669	dbg("allocating qh: control_chain");
670	ret = alloc_qh (s, &qh);
671	if (ret)
672		goto init_skel_cleanup;
673
674	insert_qh (s, s->bulk_chain, qh, 0);
675	s->control_chain = qh;
676
677#ifdef	CONFIG_USB_UHCI_HIGH_BANDWIDTH
678	// disabled reclamation loop
679	set_qh_head(s->chain_end, s->control_chain->dma_addr | UHCI_PTR_QH | UHCI_PTR_TERM);
680#endif
681
682	dbg("allocating qh: ls_control_chain");
683	if (alloc_qh (s, &qh))
684		goto init_skel_cleanup;
685
686	insert_qh (s, s->control_chain, qh, 0);
687	s->ls_control_chain = qh;
688
689	for (n = 0; n < 8; n++)
690		s->int_chain[n] = 0;
691
692	dbg("allocating skeleton INT-TDs");
693
694	for (n = 0; n < 8; n++) {
695		uhci_desc_t *td;
696
697		if (alloc_td (s, &td, 0))
698			goto init_skel_cleanup;
699
700		s->int_chain[n] = td;
701		if (n == 0) {
702			set_td_link(s->int_chain[0], s->ls_control_chain->dma_addr | UHCI_PTR_QH);
703		}
704		else {
705			set_td_link(s->int_chain[n], s->int_chain[0]->dma_addr);
706		}
707	}
708
709	dbg("Linking skeleton INT-TDs");
710
711	for (n = 0; n < 1024; n++) {
712		// link all iso-tds to the interrupt chains
713		int m, o;
714		dbg("framelist[%i]=%x",n,le32_to_cpu(s->framelist[n]));
715		if ((n&127)==127)
716			((uhci_desc_t*) s->iso_td[n])->hw.td.link = cpu_to_le32(s->int_chain[0]->dma_addr);
717		else
718			for (o = 1, m = 2; m <= 128; o++, m += m)
719				if ((n & (m - 1)) == ((m - 1) / 2))
720					set_td_link(((uhci_desc_t*) s->iso_td[n]), s->int_chain[o]->dma_addr);
721	}
722
723	if (alloc_td (s, &td, 0))
724		goto init_skel_cleanup;
725
726	fill_td (td, 0 * TD_CTRL_IOC, 0, 0); // generate 32ms interrupt (activated later)
727	s->td32ms=td;
728
729	insert_td_horizontal (s, s->int_chain[5], td);
730
731	mb();
732	//uhci_show_queue(s->control_chain);
733	dbg("init_skel exit");
734	return 0;
735
736      init_skel_cleanup:
737	cleanup_skel (s);
738	return -ENOMEM;
739}
740
741/*-------------------------------------------------------------------*/
742//                         LOW LEVEL STUFF
743//          assembles QHs und TDs for control, bulk and iso
744/*-------------------------------------------------------------------*/
745_static int uhci_submit_control_urb (struct urb *urb)
746{
747	uhci_desc_t *qh, *td;
748	uhci_t *s = (uhci_t*) urb->dev->bus->hcpriv;
749	urb_priv_t *urb_priv = urb->hcpriv;
750	unsigned long destination, status;
751	int maxsze = usb_maxpacket (urb->dev, urb->pipe, usb_pipeout (urb->pipe));
752	unsigned long len;
753	char *data;
754	int depth_first=USE_CTRL_DEPTH_FIRST;  // UHCI descriptor chasing method
755
756	dbg("uhci_submit_control start");
757	if (alloc_qh (s, &qh))		// alloc qh for this request
758		return -ENOMEM;
759
760	if (alloc_td (s, &td, UHCI_PTR_DEPTH * depth_first))		// get td for setup stage
761	{
762		delete_qh (s, qh);
763		return -ENOMEM;
764	}
765
766	/* The "pipe" thing contains the destination in bits 8--18 */
767	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
768
769	/* 3 errors */
770	status = (urb->pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE |
771		(urb->transfer_flags & USB_DISABLE_SPD ? 0 : TD_CTRL_SPD) | (3 << 27);
772
773	/*  Build the TD for the control request, try forever, 8 bytes of data */
774	fill_td (td, status, destination | (7 << 21), urb_priv->setup_packet_dma);
775
776	insert_td (s, qh, td, 0);	// queue 'setup stage'-td in qh
777#if 0
778	{
779		char *sp=urb->setup_packet;
780		dbg("SETUP to pipe %x: %x %x %x %x %x %x %x %x", urb->pipe,
781		    sp[0],sp[1],sp[2],sp[3],sp[4],sp[5],sp[6],sp[7]);
782	}
783	//uhci_show_td(td);
784#endif
785
786	len = urb->transfer_buffer_length;
787	data = urb->transfer_buffer;
788
789	/* If direction is "send", change the frame from SETUP (0x2D)
790	   to OUT (0xE1). Else change it from SETUP to IN (0x69). */
791
792	destination = (urb->pipe & PIPE_DEVEP_MASK) | (usb_pipeout (urb->pipe)?USB_PID_OUT:USB_PID_IN);
793
794	while (len > 0) {
795		int pktsze = len;
796
797		if (alloc_td (s, &td, UHCI_PTR_DEPTH * depth_first))
798			goto fail_unmap_enomem;
799
800		if (pktsze > maxsze)
801			pktsze = maxsze;
802
803		destination ^= 1 << TD_TOKEN_TOGGLE;	// toggle DATA0/1
804
805		// Status, pktsze bytes of data
806		fill_td (td, status, destination | ((pktsze - 1) << 21),
807			 urb_priv->transfer_buffer_dma + (data - (char *)urb->transfer_buffer));
808
809		insert_td (s, qh, td, UHCI_PTR_DEPTH * depth_first);	// queue 'data stage'-td in qh
810
811		data += pktsze;
812		len -= pktsze;
813	}
814
815	/* Build the final TD for control status */
816	/* It's only IN if the pipe is out AND we aren't expecting data */
817
818	destination &= ~UHCI_PID;
819
820	if (usb_pipeout (urb->pipe) || (urb->transfer_buffer_length == 0))
821		destination |= USB_PID_IN;
822	else
823		destination |= USB_PID_OUT;
824
825	destination |= 1 << TD_TOKEN_TOGGLE;	/* End in Data1 */
826
827	if (alloc_td (s, &td, UHCI_PTR_DEPTH))
828		goto fail_unmap_enomem;
829
830	status &=~TD_CTRL_SPD;
831
832	/* no limit on errors on final packet , 0 bytes of data */
833	fill_td (td, status | TD_CTRL_IOC, destination | (UHCI_NULL_DATA_SIZE << 21),
834		 0);
835
836	insert_td (s, qh, td, UHCI_PTR_DEPTH * depth_first);	// queue status td
837
838	list_add (&qh->desc_list, &urb_priv->desc_list);
839
840	queue_urb (s, urb);	// queue before inserting in desc chain
841
842	qh->hw.qh.element &= cpu_to_le32(~UHCI_PTR_TERM);
843
844	//uhci_show_queue(qh);
845	/* Start it up... put low speed first */
846	if (urb->pipe & TD_CTRL_LS)
847		insert_qh (s, s->control_chain, qh, 0);
848	else
849		insert_qh (s, s->bulk_chain, qh, 0);
850
851	dbg("uhci_submit_control end");
852	return 0;
853
854fail_unmap_enomem:
855	delete_qh(s, qh);
856	return -ENOMEM;
857}
858/*-------------------------------------------------------------------*/
859// For queued bulk transfers, two additional QH helpers are allocated (nqh, bqh)
860// Due to the linking with other bulk urbs, it has to be locked with urb_list_lock!
861
862_static int uhci_submit_bulk_urb (struct urb *urb, struct urb *bulk_urb)
863{
864	uhci_t *s = (uhci_t*) urb->dev->bus->hcpriv;
865	urb_priv_t *urb_priv = urb->hcpriv, *upriv, *bpriv=NULL;
866	uhci_desc_t *qh, *td, *nqh=NULL, *bqh=NULL, *first_td=NULL;
867	unsigned long destination, status;
868	char *data;
869	unsigned int pipe = urb->pipe;
870	int maxsze = usb_maxpacket (urb->dev, pipe, usb_pipeout (pipe));
871	int info, len, last;
872	int depth_first=USE_BULK_DEPTH_FIRST;  // UHCI descriptor chasing method
873
874	if (usb_endpoint_halted (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe)))
875		return -EPIPE;
876
877	queue_dbg("uhci_submit_bulk_urb: urb %p, old %p, pipe %08x, len %i",
878		  urb,bulk_urb,urb->pipe,urb->transfer_buffer_length);
879
880	upriv = (urb_priv_t*)urb->hcpriv;
881
882	if (!bulk_urb) {
883		if (alloc_qh (s, &qh))		// get qh for this request
884			return -ENOMEM;
885
886		if (urb->transfer_flags & USB_QUEUE_BULK) {
887			if (alloc_qh(s, &nqh)) // placeholder for clean unlink
888			{
889				delete_desc (s, qh);
890				return -ENOMEM;
891			}
892			upriv->next_qh = nqh;
893			queue_dbg("new next qh %p",nqh);
894		}
895	}
896	else {
897		bpriv = (urb_priv_t*)bulk_urb->hcpriv;
898		qh = bpriv->bottom_qh;  // re-use bottom qh and next qh
899		nqh = bpriv->next_qh;
900		upriv->next_qh=nqh;
901		upriv->prev_queued_urb=bulk_urb;
902	}
903
904	if (urb->transfer_flags & USB_QUEUE_BULK) {
905		if (alloc_qh (s, &bqh))  // "bottom" QH
906		{
907			if (!bulk_urb) {
908				delete_desc(s, qh);
909				delete_desc(s, nqh);
910			}
911			return -ENOMEM;
912		}
913		set_qh_element(bqh, UHCI_PTR_TERM);
914		set_qh_head(bqh, nqh->dma_addr | UHCI_PTR_QH); // element
915		upriv->bottom_qh = bqh;
916	}
917	queue_dbg("uhci_submit_bulk: qh %p bqh %p nqh %p",qh, bqh, nqh);
918
919	/* The "pipe" thing contains the destination in bits 8--18. */
920	destination = (pipe & PIPE_DEVEP_MASK) | usb_packetid (pipe);
921
922	/* 3 errors */
923	status = (pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE |
924		((urb->transfer_flags & USB_DISABLE_SPD) ? 0 : TD_CTRL_SPD) | (3 << 27);
925
926	/* Build the TDs for the bulk request */
927	len = urb->transfer_buffer_length;
928	data = urb->transfer_buffer;
929
930	do {					// TBD: Really allow zero-length packets?
931		int pktsze = len;
932
933		if (alloc_td (s, &td, UHCI_PTR_DEPTH * depth_first))
934		{
935			delete_qh (s, qh);
936			return -ENOMEM;
937		}
938
939		if (pktsze > maxsze)
940			pktsze = maxsze;
941
942		// pktsze bytes of data
943		info = destination | (((pktsze - 1)&UHCI_NULL_DATA_SIZE) << 21) |
944			(usb_gettoggle (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe)) << TD_TOKEN_TOGGLE);
945
946		fill_td (td, status, info,
947			 urb_priv->transfer_buffer_dma + (data - (char *)urb->transfer_buffer));
948
949		data += pktsze;
950		len -= pktsze;
951		// Use USB_ZERO_PACKET to finish bulk OUTs always with a zero length packet
952		last = (len == 0 && (usb_pipein(pipe) || pktsze < maxsze || !(urb->transfer_flags & USB_ZERO_PACKET)));
953
954		if (last)
955			set_td_ioc(td);	// last one generates INT
956
957		insert_td (s, qh, td, UHCI_PTR_DEPTH * depth_first);
958		if (!first_td)
959			first_td=td;
960		usb_dotoggle (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe));
961
962	} while (!last);
963
964	if (bulk_urb && bpriv)   // everything went OK, link with old bulk URB
965		bpriv->next_queued_urb=urb;
966
967	list_add (&qh->desc_list, &urb_priv->desc_list);
968
969	if (urb->transfer_flags & USB_QUEUE_BULK)
970		append_qh(s, td, bqh, UHCI_PTR_DEPTH * depth_first);
971
972	queue_urb_unlocked (s, urb);
973
974	if (urb->transfer_flags & USB_QUEUE_BULK)
975		set_qh_element(qh, first_td->dma_addr);
976	else
977		qh->hw.qh.element &= cpu_to_le32(~UHCI_PTR_TERM);    // arm QH
978
979	if (!bulk_urb) { 					// new bulk queue
980		if (urb->transfer_flags & USB_QUEUE_BULK) {
981			spin_lock (&s->td_lock);		// both QHs in one go
982			insert_qh (s, s->chain_end, qh, 0);	// Main QH
983			insert_qh (s, s->chain_end, nqh, 0);	// Helper QH
984			spin_unlock (&s->td_lock);
985		}
986		else
987			insert_qh (s, s->chain_end, qh, 0);
988	}
989
990	//uhci_show_queue(s->bulk_chain);
991	//dbg("uhci_submit_bulk_urb: exit\n");
992	return 0;
993}
994/*-------------------------------------------------------------------*/
995_static void uhci_clean_iso_step1(uhci_t *s, urb_priv_t *urb_priv)
996{
997	struct list_head *p;
998	uhci_desc_t *td;
999
1000	for (p = urb_priv->desc_list.next; p != &urb_priv->desc_list; p = p->next) {
1001				td = list_entry (p, uhci_desc_t, desc_list);
1002				unlink_td (s, td, 1);
1003	}
1004}
1005/*-------------------------------------------------------------------*/
1006_static void uhci_clean_iso_step2(uhci_t *s, urb_priv_t *urb_priv)
1007{
1008	struct list_head *p;
1009	uhci_desc_t *td;
1010
1011	while ((p = urb_priv->desc_list.next) != &urb_priv->desc_list) {
1012				td = list_entry (p, uhci_desc_t, desc_list);
1013				list_del (p);
1014				delete_desc (s, td);
1015	}
1016}
1017/*-------------------------------------------------------------------*/
1018/* mode: CLEAN_TRANSFER_NO_DELETION: unlink but no deletion mark (step 1 of async_unlink)
1019         CLEAN_TRANSFER_REGULAR: regular (unlink/delete-mark)
1020         CLEAN_TRANSFER_DELETION_MARK: deletion mark for QH (step 2 of async_unlink)
1021 looks a bit complicated because of all the bulk queueing goodies
1022*/
1023
1024_static void uhci_clean_transfer (uhci_t *s, struct urb *urb, uhci_desc_t *qh, int mode)
1025{
1026	uhci_desc_t *bqh, *nqh, *prevqh, *prevtd;
1027	int now;
1028	urb_priv_t *priv=(urb_priv_t*)urb->hcpriv;
1029
1030	now=UHCI_GET_CURRENT_FRAME(s);
1031
1032	bqh=priv->bottom_qh;
1033
1034	if (!priv->next_queued_urb)  { // no more appended bulk queues
1035
1036		queue_dbg("uhci_clean_transfer: No more bulks for urb %p, qh %p, bqh %p, nqh %p", urb, qh, bqh, priv->next_qh);
1037
1038		if (priv->prev_queued_urb && mode != CLEAN_TRANSFER_DELETION_MARK) {  // qh not top of the queue
1039				unsigned long flags;
1040				urb_priv_t* ppriv=(urb_priv_t*)priv->prev_queued_urb->hcpriv;
1041
1042				spin_lock_irqsave (&s->qh_lock, flags);
1043				prevqh = list_entry (ppriv->desc_list.next, uhci_desc_t, desc_list);
1044				prevtd = list_entry (prevqh->vertical.prev, uhci_desc_t, vertical);
1045				set_td_link(prevtd, priv->bottom_qh->dma_addr | UHCI_PTR_QH); // skip current qh
1046				mb();
1047				queue_dbg("uhci_clean_transfer: relink pqh %p, ptd %p",prevqh, prevtd);
1048				spin_unlock_irqrestore (&s->qh_lock, flags);
1049
1050				ppriv->bottom_qh = priv->bottom_qh;
1051				ppriv->next_queued_urb = NULL;
1052			}
1053		else {   // queue is dead, qh is top of the queue
1054
1055			if (mode != CLEAN_TRANSFER_DELETION_MARK)
1056				unlink_qh(s, qh); // remove qh from horizontal chain
1057
1058			if (bqh) {  // remove remainings of bulk queue
1059				nqh=priv->next_qh;
1060
1061				if (mode != CLEAN_TRANSFER_DELETION_MARK)
1062					unlink_qh(s, nqh);  // remove nqh from horizontal chain
1063
1064				if (mode != CLEAN_TRANSFER_NO_DELETION) {  // add helper QHs to free desc list
1065					nqh->last_used = bqh->last_used = now;
1066					list_add_tail (&nqh->horizontal, &s->free_desc);
1067					list_add_tail (&bqh->horizontal, &s->free_desc);
1068				}
1069			}
1070		}
1071	}
1072	else { // there are queued urbs following
1073
1074	  queue_dbg("uhci_clean_transfer: urb %p, prevurb %p, nexturb %p, qh %p, bqh %p, nqh %p",
1075		       urb, priv->prev_queued_urb,  priv->next_queued_urb, qh, bqh, priv->next_qh);
1076
1077		if (mode != CLEAN_TRANSFER_DELETION_MARK) {	// no work for cleanup at unlink-completion
1078			struct urb *nurb;
1079			unsigned long flags;
1080
1081			nurb = priv->next_queued_urb;
1082			spin_lock_irqsave (&s->qh_lock, flags);
1083
1084			if (!priv->prev_queued_urb) { // top QH
1085
1086				prevqh = list_entry (qh->horizontal.prev, uhci_desc_t, horizontal);
1087				set_qh_head(prevqh, bqh->dma_addr | UHCI_PTR_QH);
1088				list_del (&qh->horizontal);  // remove this qh form horizontal chain
1089				list_add (&bqh->horizontal, &prevqh->horizontal); // insert next bqh in horizontal chain
1090			}
1091			else {		// intermediate QH
1092				urb_priv_t* ppriv=(urb_priv_t*)priv->prev_queued_urb->hcpriv;
1093				urb_priv_t* npriv=(urb_priv_t*)nurb->hcpriv;
1094				uhci_desc_t * bnqh;
1095
1096				bnqh = list_entry (npriv->desc_list.next, uhci_desc_t, desc_list);
1097				ppriv->bottom_qh = bnqh;
1098				ppriv->next_queued_urb = nurb;
1099				prevqh = list_entry (ppriv->desc_list.next, uhci_desc_t, desc_list);
1100				set_qh_head(prevqh, bqh->dma_addr | UHCI_PTR_QH);
1101			}
1102
1103			mb();
1104			((urb_priv_t*)nurb->hcpriv)->prev_queued_urb=priv->prev_queued_urb;
1105			spin_unlock_irqrestore (&s->qh_lock, flags);
1106		}
1107	}
1108
1109	if (mode != CLEAN_TRANSFER_NO_DELETION) {
1110		qh->last_used = now;
1111		list_add_tail (&qh->horizontal, &s->free_desc); // mark qh for later deletion/kfree
1112	}
1113}
1114/*-------------------------------------------------------------------*/
1115// Release bandwidth for Interrupt or Isoc. transfers
1116_static void uhci_release_bandwidth(struct urb *urb)
1117{
1118	if (urb->bandwidth) {
1119		switch (usb_pipetype(urb->pipe)) {
1120		case PIPE_INTERRUPT:
1121			usb_release_bandwidth (urb->dev, urb, 0);
1122			break;
1123		case PIPE_ISOCHRONOUS:
1124			usb_release_bandwidth (urb->dev, urb, 1);
1125			break;
1126		default:
1127			break;
1128		}
1129	}
1130}
1131
1132_static void uhci_urb_dma_sync(uhci_t *s, struct urb *urb, urb_priv_t *urb_priv)
1133{
1134	if (urb_priv->setup_packet_dma)
1135		pci_dma_sync_single(s->uhci_pci, urb_priv->setup_packet_dma,
1136				    sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
1137
1138	if (urb_priv->transfer_buffer_dma)
1139		pci_dma_sync_single(s->uhci_pci, urb_priv->transfer_buffer_dma,
1140				    urb->transfer_buffer_length,
1141				    usb_pipein(urb->pipe) ?
1142				    PCI_DMA_FROMDEVICE :
1143				    PCI_DMA_TODEVICE);
1144}
1145
1146_static void uhci_urb_dma_unmap(uhci_t *s, struct urb *urb, urb_priv_t *urb_priv)
1147{
1148	if (urb_priv->setup_packet_dma) {
1149		pci_unmap_single(s->uhci_pci, urb_priv->setup_packet_dma,
1150				 sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
1151		urb_priv->setup_packet_dma = 0;
1152	}
1153	if (urb_priv->transfer_buffer_dma) {
1154		pci_unmap_single(s->uhci_pci, urb_priv->transfer_buffer_dma,
1155				 urb->transfer_buffer_length,
1156				 usb_pipein(urb->pipe) ?
1157				 PCI_DMA_FROMDEVICE :
1158				 PCI_DMA_TODEVICE);
1159		urb_priv->transfer_buffer_dma = 0;
1160	}
1161}
1162/*-------------------------------------------------------------------*/
1163/* needs urb_list_lock!
1164   mode: UNLINK_ASYNC_STORE_URB: unlink and move URB into unlinked list
1165         UNLINK_ASYNC_DONT_STORE: unlink, don't move URB into unlinked list
1166*/
1167_static int uhci_unlink_urb_async (uhci_t *s,struct urb *urb, int mode)
1168{
1169	uhci_desc_t *qh;
1170	urb_priv_t *urb_priv;
1171
1172	async_dbg("unlink_urb_async called %p",urb);
1173
1174	if ((urb->status == -EINPROGRESS) ||
1175	    ((usb_pipetype (urb->pipe) ==  PIPE_INTERRUPT) && ((urb_priv_t*)urb->hcpriv)->flags))
1176	{
1177		((urb_priv_t*)urb->hcpriv)->started = ~0;  // mark
1178		dequeue_urb (s, urb);
1179
1180		if (mode==UNLINK_ASYNC_STORE_URB)
1181			list_add_tail (&urb->urb_list, &s->urb_unlinked); // store urb
1182
1183		uhci_switch_timer_int(s);
1184       		s->unlink_urb_done = 1;
1185		uhci_release_bandwidth(urb);
1186
1187		urb->status = -ECONNABORTED;	// mark urb as "waiting to be killed"
1188		urb_priv = (urb_priv_t*)urb->hcpriv;
1189
1190		switch (usb_pipetype (urb->pipe)) {
1191		case PIPE_INTERRUPT:
1192			usb_dotoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));
1193
1194		case PIPE_ISOCHRONOUS:
1195			uhci_clean_iso_step1 (s, urb_priv);
1196			break;
1197
1198		case PIPE_BULK:
1199		case PIPE_CONTROL:
1200			qh = list_entry (urb_priv->desc_list.next, uhci_desc_t, desc_list);
1201			uhci_clean_transfer (s, urb, qh, CLEAN_TRANSFER_NO_DELETION);
1202			break;
1203		}
1204		((urb_priv_t*)urb->hcpriv)->started = UHCI_GET_CURRENT_FRAME(s);
1205		return -EINPROGRESS;  // completion will follow
1206	}
1207
1208	return 0;    // URB already dead
1209}
1210/*-------------------------------------------------------------------*/
1211// kills an urb by unlinking descriptors and waiting for at least one frame
1212_static int uhci_unlink_urb_sync (uhci_t *s, struct urb *urb)
1213{
1214	uhci_desc_t *qh;
1215	urb_priv_t *urb_priv;
1216	unsigned long flags=0;
1217	struct usb_device *usb_dev;
1218
1219	spin_lock_irqsave (&s->urb_list_lock, flags);
1220
1221	if (urb->status == -EINPROGRESS) {
1222
1223		// move descriptors out of the running chains, dequeue urb
1224		uhci_unlink_urb_async(s, urb, UNLINK_ASYNC_DONT_STORE);
1225
1226		urb_priv = urb->hcpriv;
1227		urb->status = -ENOENT;	// prevent from double deletion after unlock
1228		spin_unlock_irqrestore (&s->urb_list_lock, flags);
1229
1230		// cleanup the rest
1231		switch (usb_pipetype (urb->pipe)) {
1232
1233		case PIPE_INTERRUPT:
1234		case PIPE_ISOCHRONOUS:
1235			uhci_wait_ms(1);
1236			uhci_clean_iso_step2(s, urb_priv);
1237			break;
1238
1239		case PIPE_BULK:
1240		case PIPE_CONTROL:
1241			qh = list_entry (urb_priv->desc_list.next, uhci_desc_t, desc_list);
1242			uhci_clean_transfer(s, urb, qh, CLEAN_TRANSFER_DELETION_MARK);
1243			uhci_wait_ms(1);
1244		}
1245		urb->status = -ENOENT;	// mark urb as killed
1246
1247		uhci_urb_dma_unmap(s, urb, urb->hcpriv);
1248
1249#ifdef DEBUG_SLAB
1250		kmem_cache_free (urb_priv_kmem, urb->hcpriv);
1251#else
1252		kfree (urb->hcpriv);
1253#endif
1254		usb_dev = urb->dev;
1255		if (urb->complete) {
1256			dbg("unlink_urb: calling completion");
1257			urb->dev = NULL;
1258			urb->complete ((struct urb *) urb);
1259		}
1260		usb_dec_dev_use (usb_dev);
1261	}
1262	else
1263		spin_unlock_irqrestore (&s->urb_list_lock, flags);
1264
1265	return 0;
1266}
1267/*-------------------------------------------------------------------*/
1268// async unlink_urb completion/cleanup work
1269// has to be protected by urb_list_lock!
1270// features: if set in transfer_flags, the resulting status of the killed
1271// transaction is not overwritten
1272
1273_static void uhci_cleanup_unlink(uhci_t *s, int force)
1274{
1275	struct list_head *q;
1276	struct urb *urb;
1277	struct usb_device *dev;
1278	int now, type;
1279	urb_priv_t *urb_priv;
1280
1281	q=s->urb_unlinked.next;
1282	now=UHCI_GET_CURRENT_FRAME(s);
1283
1284	while (q != &s->urb_unlinked) {
1285
1286		urb = list_entry (q, struct urb, urb_list);
1287
1288		urb_priv = (urb_priv_t*)urb->hcpriv;
1289		q = urb->urb_list.next;
1290
1291		if (!urb_priv) // avoid crash when URB is corrupted
1292			break;
1293
1294		if (force || ((urb_priv->started != ~0) && (urb_priv->started != now))) {
1295			async_dbg("async cleanup %p",urb);
1296			type=usb_pipetype (urb->pipe);
1297
1298			switch (type) { // process descriptors
1299			case PIPE_CONTROL:
1300				process_transfer (s, urb, CLEAN_TRANSFER_DELETION_MARK);  // don't unlink (already done)
1301				break;
1302			case PIPE_BULK:
1303				if (!s->avoid_bulk.counter)
1304					process_transfer (s, urb, CLEAN_TRANSFER_DELETION_MARK); // don't unlink (already done)
1305				else
1306					continue;
1307				break;
1308			case PIPE_ISOCHRONOUS:
1309				process_iso (s, urb, PROCESS_ISO_FORCE); // force, don't unlink
1310				break;
1311			case PIPE_INTERRUPT:
1312				process_interrupt (s, urb);
1313				break;
1314			}
1315
1316			if (!(urb->transfer_flags & USB_TIMEOUT_KILLED))
1317		  		urb->status = -ECONNRESET; // mark as asynchronously killed
1318
1319			dev = urb->dev;	// completion may destroy all...
1320			urb_priv = urb->hcpriv;
1321			list_del (&urb->urb_list);
1322
1323			uhci_urb_dma_sync(s, urb, urb_priv);
1324			if (urb->complete) {
1325				spin_unlock(&s->urb_list_lock);
1326				urb->dev = NULL;
1327				urb->complete ((struct urb *) urb);
1328				spin_lock(&s->urb_list_lock);
1329			}
1330
1331			if (!(urb->transfer_flags & USB_TIMEOUT_KILLED))
1332				urb->status = -ENOENT;  // now the urb is really dead
1333
1334			switch (type) {
1335			case PIPE_ISOCHRONOUS:
1336			case PIPE_INTERRUPT:
1337				uhci_clean_iso_step2(s, urb_priv);
1338				break;
1339			}
1340
1341			uhci_urb_dma_unmap(s, urb, urb_priv);
1342
1343			usb_dec_dev_use (dev);
1344#ifdef DEBUG_SLAB
1345			kmem_cache_free (urb_priv_kmem, urb_priv);
1346#else
1347			kfree (urb_priv);
1348#endif
1349
1350		}
1351	}
1352}
1353
1354/*-------------------------------------------------------------------*/
1355_static int uhci_unlink_urb (struct urb *urb)
1356{
1357	uhci_t *s;
1358	unsigned long flags=0;
1359	dbg("uhci_unlink_urb called for %p",urb);
1360	if (!urb || !urb->dev)		// you never know...
1361		return -EINVAL;
1362
1363	s = (uhci_t*) urb->dev->bus->hcpriv;
1364
1365	if (usb_pipedevice (urb->pipe) == s->rh.devnum)
1366		return rh_unlink_urb (urb);
1367
1368	if (!urb->hcpriv)
1369		return -EINVAL;
1370
1371	if (urb->transfer_flags & USB_ASYNC_UNLINK) {
1372		int ret;
1373       		spin_lock_irqsave (&s->urb_list_lock, flags);
1374
1375		uhci_release_bandwidth(urb);
1376		ret = uhci_unlink_urb_async(s, urb, UNLINK_ASYNC_STORE_URB);
1377
1378		spin_unlock_irqrestore (&s->urb_list_lock, flags);
1379		return ret;
1380	}
1381	else
1382		return uhci_unlink_urb_sync(s, urb);
1383}
1384/*-------------------------------------------------------------------*/
1385// In case of ASAP iso transfer, search the URB-list for already queued URBs
1386// for this EP and calculate the earliest start frame for the new
1387// URB (easy seamless URB continuation!)
1388_static int find_iso_limits (struct urb *urb, unsigned int *start, unsigned int *end)
1389{
1390	struct urb *u, *last_urb = NULL;
1391	uhci_t *s = (uhci_t*) urb->dev->bus->hcpriv;
1392	struct list_head *p;
1393	int ret=-1;
1394	unsigned long flags;
1395
1396	spin_lock_irqsave (&s->urb_list_lock, flags);
1397	p=s->urb_list.prev;
1398
1399	for (; p != &s->urb_list; p = p->prev) {
1400		u = list_entry (p, struct urb, urb_list);
1401		// look for pending URBs with identical pipe handle
1402		// works only because iso doesn't toggle the data bit!
1403		if ((urb->pipe == u->pipe) && (urb->dev == u->dev) && (u->status == -EINPROGRESS)) {
1404			if (!last_urb)
1405				*start = u->start_frame;
1406			last_urb = u;
1407		}
1408	}
1409
1410	if (last_urb) {
1411		*end = (last_urb->start_frame + last_urb->number_of_packets) & 1023;
1412		ret=0;
1413	}
1414
1415	spin_unlock_irqrestore(&s->urb_list_lock, flags);
1416
1417	return ret;
1418}
1419/*-------------------------------------------------------------------*/
1420// adjust start_frame according to scheduling constraints (ASAP etc)
1421
1422_static int iso_find_start (struct urb *urb)
1423{
1424	uhci_t *s = (uhci_t*) urb->dev->bus->hcpriv;
1425	unsigned int now;
1426	unsigned int start_limit = 0, stop_limit = 0, queued_size;
1427	int limits;
1428
1429	now = UHCI_GET_CURRENT_FRAME (s) & 1023;
1430
1431	if ((unsigned) urb->number_of_packets > 900)
1432		return -EFBIG;
1433
1434	limits = find_iso_limits (urb, &start_limit, &stop_limit);
1435	queued_size = (stop_limit - start_limit) & 1023;
1436
1437	if (urb->transfer_flags & USB_ISO_ASAP) {
1438		// first iso
1439		if (limits) {
1440			// 10ms setup should be enough //FIXME!
1441			urb->start_frame = (now + 10) & 1023;
1442		}
1443		else {
1444			urb->start_frame = stop_limit;		//seamless linkage
1445
1446			if (((now - urb->start_frame) & 1023) <= (unsigned) urb->number_of_packets) {
1447				info("iso_find_start: gap in seamless isochronous scheduling");
1448				dbg("iso_find_start: now %u start_frame %u number_of_packets %u pipe 0x%08x",
1449					now, urb->start_frame, urb->number_of_packets, urb->pipe);
1450				urb->start_frame = (now + 5) & 1023;	// 5ms setup should be enough //FIXME!
1451			}
1452		}
1453	}
1454	else {
1455		urb->start_frame &= 1023;
1456		if (((now - urb->start_frame) & 1023) < (unsigned) urb->number_of_packets) {
1457			dbg("iso_find_start: now between start_frame and end");
1458			return -EAGAIN;
1459		}
1460	}
1461
1462	/* check if either start_frame or start_frame+number_of_packets-1 lies between start_limit and stop_limit */
1463	if (limits)
1464		return 0;
1465
1466	if (((urb->start_frame - start_limit) & 1023) < queued_size ||
1467	    ((urb->start_frame + urb->number_of_packets - 1 - start_limit) & 1023) < queued_size) {
1468		dbg("iso_find_start: start_frame %u number_of_packets %u start_limit %u stop_limit %u",
1469			urb->start_frame, urb->number_of_packets, start_limit, stop_limit);
1470		return -EAGAIN;
1471	}
1472
1473	return 0;
1474}
1475/*-------------------------------------------------------------------*/
1476// submits USB interrupt (ie. polling ;-)
1477// ASAP-flag set implicitely
1478// if period==0, the transfer is only done once
1479
1480_static int uhci_submit_int_urb (struct urb *urb)
1481{
1482	uhci_t *s = (uhci_t*) urb->dev->bus->hcpriv;
1483	urb_priv_t *urb_priv = urb->hcpriv;
1484	int nint, n;
1485	uhci_desc_t *td;
1486	int status, destination;
1487	int info;
1488	unsigned int pipe = urb->pipe;
1489
1490	if (urb->interval < 0 || urb->interval >= 256)
1491		return -EINVAL;
1492
1493	if (urb->interval == 0)
1494		nint = 0;
1495	else {
1496		for (nint = 0, n = 1; nint <= 8; nint++, n += n)	// round interval down to 2^n
1497		 {
1498			if (urb->interval < n) {
1499				urb->interval = n / 2;
1500				break;
1501			}
1502		}
1503		nint--;
1504	}
1505
1506	dbg("Rounded interval to %i, chain  %i", urb->interval, nint);
1507
1508	urb->start_frame = UHCI_GET_CURRENT_FRAME (s) & 1023;	// remember start frame, just in case...
1509
1510	urb->number_of_packets = 1;
1511
1512	// INT allows only one packet
1513	if (urb->transfer_buffer_length > usb_maxpacket (urb->dev, pipe, usb_pipeout (pipe)))
1514		return -EINVAL;
1515
1516	if (alloc_td (s, &td, UHCI_PTR_DEPTH))
1517		return -ENOMEM;
1518
1519	status = (pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE | TD_CTRL_IOC |
1520		(urb->transfer_flags & USB_DISABLE_SPD ? 0 : TD_CTRL_SPD) | (3 << 27);
1521
1522	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid (urb->pipe) |
1523		(((urb->transfer_buffer_length - 1) & 0x7ff) << 21);
1524
1525
1526	info = destination | (usb_gettoggle (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe)) << TD_TOKEN_TOGGLE);
1527
1528	fill_td (td, status, info, urb_priv->transfer_buffer_dma);
1529	list_add_tail (&td->desc_list, &urb_priv->desc_list);
1530
1531	queue_urb (s, urb);
1532
1533	insert_td_horizontal (s, s->int_chain[nint], td);	// store in INT-TDs
1534
1535	usb_dotoggle (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe));
1536
1537	return 0;
1538}
1539/*-------------------------------------------------------------------*/
1540_static int uhci_submit_iso_urb (struct urb *urb)
1541{
1542	uhci_t *s = (uhci_t*) urb->dev->bus->hcpriv;
1543	urb_priv_t *urb_priv = urb->hcpriv;
1544#ifdef ISO_SANITY_CHECK
1545	int pipe=urb->pipe;
1546	int maxsze = usb_maxpacket (urb->dev, pipe, usb_pipeout (pipe));
1547#endif
1548	int n, ret, last=0;
1549	uhci_desc_t *td, **tdm;
1550	int status, destination;
1551	unsigned long flags;
1552
1553	__save_flags(flags);
1554	__cli();		      // Disable IRQs to schedule all ISO-TDs in time
1555	ret = iso_find_start (urb);	// adjusts urb->start_frame for later use
1556
1557	if (ret)
1558		goto err;
1559
1560	tdm = (uhci_desc_t **) kmalloc (urb->number_of_packets * sizeof (uhci_desc_t*), KMALLOC_FLAG);
1561
1562	if (!tdm) {
1563		ret = -ENOMEM;
1564		goto err;
1565	}
1566
1567	memset(tdm, 0, urb->number_of_packets * sizeof (uhci_desc_t*));
1568
1569	// First try to get all TDs. Cause: Removing already inserted TDs can only be done
1570	// racefree in three steps: unlink TDs, wait one frame, delete TDs.
1571	// So, this solutions seems simpler...
1572
1573	for (n = 0; n < urb->number_of_packets; n++) {
1574		dbg("n:%d urb->iso_frame_desc[n].length:%d", n, urb->iso_frame_desc[n].length);
1575		if (!urb->iso_frame_desc[n].length)
1576			continue;  // allows ISO striping by setting length to zero in iso_descriptor
1577
1578
1579#ifdef ISO_SANITY_CHECK
1580		if(urb->iso_frame_desc[n].length > maxsze) {
1581
1582			err("submit_iso: urb->iso_frame_desc[%d].length(%d)>%d",n , urb->iso_frame_desc[n].length, maxsze);
1583			ret=-EINVAL;
1584		}
1585		else
1586#endif
1587		if (alloc_td (s, &td, UHCI_PTR_DEPTH)) {
1588			int i;	// Cleanup allocated TDs
1589
1590			for (i = 0; i < n; n++)
1591				if (tdm[i])
1592					 delete_desc(s, tdm[i]);
1593			kfree (tdm);
1594			goto err;
1595		}
1596		last=n;
1597		tdm[n] = td;
1598	}
1599
1600	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1601
1602	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid (urb->pipe);
1603
1604	// Queue all allocated TDs
1605	for (n = 0; n < urb->number_of_packets; n++) {
1606		td = tdm[n];
1607		if (!td)
1608			continue;
1609
1610		if (n  == last) {
1611			status |= TD_CTRL_IOC;
1612			queue_urb (s, urb);
1613		}
1614
1615		fill_td (td, status, destination | (((urb->iso_frame_desc[n].length - 1) & 0x7ff) << 21),
1616			 urb_priv->transfer_buffer_dma + urb->iso_frame_desc[n].offset);
1617		list_add_tail (&td->desc_list, &urb_priv->desc_list);
1618
1619		insert_td_horizontal (s, s->iso_td[(urb->start_frame + n) & 1023], td);	// store in iso-tds
1620	}
1621
1622	kfree (tdm);
1623	dbg("ISO-INT# %i, start %i, now %i", urb->number_of_packets, urb->start_frame, UHCI_GET_CURRENT_FRAME (s) & 1023);
1624	ret = 0;
1625
1626      err:
1627	__restore_flags(flags);
1628	return ret;
1629}
1630/*-------------------------------------------------------------------*/
1631// returns: 0 (no transfer queued), urb* (this urb already queued)
1632
1633_static struct urb* search_dev_ep (uhci_t *s, struct urb *urb)
1634{
1635	struct list_head *p;
1636	struct urb *tmp;
1637	unsigned int mask = usb_pipecontrol(urb->pipe) ? (~USB_DIR_IN) : (~0);
1638
1639	dbg("search_dev_ep:");
1640
1641	p=s->urb_list.next;
1642
1643	for (; p != &s->urb_list; p = p->next) {
1644		tmp = list_entry (p, struct urb, urb_list);
1645		dbg("urb: %p", tmp);
1646		// we can accept this urb if it is not queued at this time
1647		// or if non-iso transfer requests should be scheduled for the same device and pipe
1648		if ((!usb_pipeisoc(urb->pipe) && (tmp->dev == urb->dev) && !((tmp->pipe ^ urb->pipe) & mask)) ||
1649		    (urb == tmp)) {
1650			return tmp;	// found another urb already queued for processing
1651		}
1652	}
1653
1654	return 0;
1655}
1656/*-------------------------------------------------------------------*/
1657_static int uhci_submit_urb (struct urb *urb)
1658{
1659	uhci_t *s;
1660	urb_priv_t *urb_priv;
1661	int ret = 0, type;
1662	unsigned long flags;
1663	struct urb *queued_urb=NULL;
1664	int bustime;
1665
1666	if (!urb->dev || !urb->dev->bus)
1667		return -ENODEV;
1668
1669	s = (uhci_t*) urb->dev->bus->hcpriv;
1670	//dbg("submit_urb: %p type %d",urb,usb_pipetype(urb->pipe));
1671
1672	if (!s->running)
1673		return -ENODEV;
1674
1675	type = usb_pipetype (urb->pipe);
1676
1677	if (usb_pipedevice (urb->pipe) == s->rh.devnum)
1678		return rh_submit_urb (urb);	/* virtual root hub */
1679
1680	// Sanity checks
1681	if (usb_maxpacket (urb->dev, urb->pipe, usb_pipeout (urb->pipe)) <= 0) {
1682		err("uhci_submit_urb: pipesize for pipe %x is zero", urb->pipe);
1683		return -EMSGSIZE;
1684	}
1685
1686	if (urb->transfer_buffer_length < 0 && type != PIPE_ISOCHRONOUS) {
1687		err("uhci_submit_urb: Negative transfer length for urb %p", urb);
1688		return -EINVAL;
1689	}
1690
1691	usb_inc_dev_use (urb->dev);
1692
1693	spin_lock_irqsave (&s->urb_list_lock, flags);
1694
1695	queued_urb = search_dev_ep (s, urb); // returns already queued urb for that pipe
1696
1697	if (queued_urb) {
1698
1699		queue_dbg("found bulk urb %p\n", queued_urb);
1700
1701		if (( type != PIPE_BULK) ||
1702		    ((type == PIPE_BULK) &&
1703		     (!(urb->transfer_flags & USB_QUEUE_BULK) || !(queued_urb->transfer_flags & USB_QUEUE_BULK)))) {
1704			spin_unlock_irqrestore (&s->urb_list_lock, flags);
1705			usb_dec_dev_use (urb->dev);
1706			err("ENXIO %08x, flags %x, urb %p, burb %p",urb->pipe,urb->transfer_flags,urb,queued_urb);
1707			return -ENXIO;	// urb already queued
1708		}
1709	}
1710
1711#ifdef DEBUG_SLAB
1712	urb_priv = kmem_cache_alloc(urb_priv_kmem, SLAB_FLAG);
1713#else
1714	urb_priv = kmalloc (sizeof (urb_priv_t), KMALLOC_FLAG);
1715#endif
1716	if (!urb_priv) {
1717		usb_dec_dev_use (urb->dev);
1718		spin_unlock_irqrestore (&s->urb_list_lock, flags);
1719		return -ENOMEM;
1720	}
1721
1722	memset(urb_priv, 0, sizeof(urb_priv_t));
1723	urb->hcpriv = urb_priv;
1724	INIT_LIST_HEAD (&urb_priv->desc_list);
1725
1726	dbg("submit_urb: scheduling %p", urb);
1727
1728	if (type == PIPE_CONTROL)
1729		urb_priv->setup_packet_dma = pci_map_single(s->uhci_pci, urb->setup_packet,
1730							    sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
1731
1732	if (urb->transfer_buffer_length)
1733		urb_priv->transfer_buffer_dma = pci_map_single(s->uhci_pci,
1734							       urb->transfer_buffer,
1735							       urb->transfer_buffer_length,
1736							       usb_pipein(urb->pipe) ?
1737							       PCI_DMA_FROMDEVICE :
1738							       PCI_DMA_TODEVICE);
1739
1740	if (type == PIPE_BULK) {
1741
1742		if (queued_urb) {
1743			while (((urb_priv_t*)queued_urb->hcpriv)->next_queued_urb)  // find last queued bulk
1744				queued_urb=((urb_priv_t*)queued_urb->hcpriv)->next_queued_urb;
1745
1746			((urb_priv_t*)queued_urb->hcpriv)->next_queued_urb=urb;
1747		}
1748		atomic_inc (&s->avoid_bulk);
1749		ret = uhci_submit_bulk_urb (urb, queued_urb);
1750		atomic_dec (&s->avoid_bulk);
1751		spin_unlock_irqrestore (&s->urb_list_lock, flags);
1752	}
1753	else {
1754		spin_unlock_irqrestore (&s->urb_list_lock, flags);
1755		switch (type) {
1756		case PIPE_ISOCHRONOUS:
1757			if (urb->bandwidth == 0) {      /* not yet checked/allocated */
1758				if (urb->number_of_packets <= 0) {
1759					ret = -EINVAL;
1760					break;
1761				}
1762
1763				bustime = usb_check_bandwidth (urb->dev, urb);
1764				if (bustime < 0)
1765					ret = bustime;
1766				else {
1767					ret = uhci_submit_iso_urb(urb);
1768					if (ret == 0)
1769						usb_claim_bandwidth (urb->dev, urb, bustime, 1);
1770				}
1771			} else {        /* bandwidth is already set */
1772				ret = uhci_submit_iso_urb(urb);
1773			}
1774			break;
1775		case PIPE_INTERRUPT:
1776			if (urb->bandwidth == 0) {      /* not yet checked/allocated */
1777				bustime = usb_check_bandwidth (urb->dev, urb);
1778				if (bustime < 0)
1779					ret = bustime;
1780				else {
1781					ret = uhci_submit_int_urb(urb);
1782					if (ret == 0)
1783						usb_claim_bandwidth (urb->dev, urb, bustime, 0);
1784				}
1785			} else {        /* bandwidth is already set */
1786				ret = uhci_submit_int_urb(urb);
1787			}
1788			break;
1789		case PIPE_CONTROL:
1790			ret = uhci_submit_control_urb (urb);
1791			break;
1792		default:
1793			ret = -EINVAL;
1794		}
1795	}
1796
1797	dbg("submit_urb: scheduled with ret: %d", ret);
1798
1799	if (ret != 0) {
1800		uhci_urb_dma_unmap(s, urb, urb_priv);
1801		usb_dec_dev_use (urb->dev);
1802#ifdef DEBUG_SLAB
1803		kmem_cache_free(urb_priv_kmem, urb_priv);
1804#else
1805		kfree (urb_priv);
1806#endif
1807		return ret;
1808	}
1809
1810	return 0;
1811}
1812
1813// Checks for URB timeout and removes bandwidth reclamation if URB idles too long
1814_static void uhci_check_timeouts(uhci_t *s)
1815{
1816	struct list_head *p,*p2;
1817	struct urb *urb;
1818	int type;
1819
1820	p = s->urb_list.prev;
1821
1822	while (p != &s->urb_list) {
1823		urb_priv_t *hcpriv;
1824
1825		p2 = p;
1826		p = p->prev;
1827		urb = list_entry (p2, struct urb, urb_list);
1828		type = usb_pipetype (urb->pipe);
1829
1830		hcpriv = (urb_priv_t*)urb->hcpriv;
1831
1832		if ( urb->timeout && time_after(jiffies, hcpriv->started + urb->timeout)) {
1833			urb->transfer_flags |= USB_TIMEOUT_KILLED | USB_ASYNC_UNLINK;
1834			async_dbg("uhci_check_timeout: timeout for %p",urb);
1835			uhci_unlink_urb_async(s, urb, UNLINK_ASYNC_STORE_URB);
1836		}
1837#ifdef CONFIG_USB_UHCI_HIGH_BANDWIDTH
1838		else if (((type == PIPE_BULK) || (type == PIPE_CONTROL)) &&
1839			 (hcpriv->use_loop) && time_after(jiffies, hcpriv->started + IDLE_TIMEOUT))
1840			disable_desc_loop(s, urb);
1841#endif
1842
1843	}
1844	s->timeout_check=jiffies;
1845}
1846
1847/*-------------------------------------------------------------------
1848 Virtual Root Hub
1849 -------------------------------------------------------------------*/
1850
1851_static __u8 root_hub_dev_des[] =
1852{
1853	0x12,			/*  __u8  bLength; */
1854	0x01,			/*  __u8  bDescriptorType; Device */
1855	0x00,			/*  __u16 bcdUSB; v1.0 */
1856	0x01,
1857	0x09,			/*  __u8  bDeviceClass; HUB_CLASSCODE */
1858	0x00,			/*  __u8  bDeviceSubClass; */
1859	0x00,			/*  __u8  bDeviceProtocol; */
1860	0x08,			/*  __u8  bMaxPacketSize0; 8 Bytes */
1861	0x00,			/*  __u16 idVendor; */
1862	0x00,
1863	0x00,			/*  __u16 idProduct; */
1864	0x00,
1865	0x00,			/*  __u16 bcdDevice; */
1866	0x00,
1867	0x00,			/*  __u8  iManufacturer; */
1868	0x02,			/*  __u8  iProduct; */
1869	0x01,			/*  __u8  iSerialNumber; */
1870	0x01			/*  __u8  bNumConfigurations; */
1871};
1872
1873
1874/* Configuration descriptor */
1875_static __u8 root_hub_config_des[] =
1876{
1877	0x09,			/*  __u8  bLength; */
1878	0x02,			/*  __u8  bDescriptorType; Configuration */
1879	0x19,			/*  __u16 wTotalLength; */
1880	0x00,
1881	0x01,			/*  __u8  bNumInterfaces; */
1882	0x01,			/*  __u8  bConfigurationValue; */
1883	0x00,			/*  __u8  iConfiguration; */
1884	0x40,			/*  __u8  bmAttributes;
1885				   Bit 7: Bus-powered, 6: Self-powered, 5 Remote-wakwup, 4..0: resvd */
1886	0x00,			/*  __u8  MaxPower; */
1887
1888     /* interface */
1889	0x09,			/*  __u8  if_bLength; */
1890	0x04,			/*  __u8  if_bDescriptorType; Interface */
1891	0x00,			/*  __u8  if_bInterfaceNumber; */
1892	0x00,			/*  __u8  if_bAlternateSetting; */
1893	0x01,			/*  __u8  if_bNumEndpoints; */
1894	0x09,			/*  __u8  if_bInterfaceClass; HUB_CLASSCODE */
1895	0x00,			/*  __u8  if_bInterfaceSubClass; */
1896	0x00,			/*  __u8  if_bInterfaceProtocol; */
1897	0x00,			/*  __u8  if_iInterface; */
1898
1899     /* endpoint */
1900	0x07,			/*  __u8  ep_bLength; */
1901	0x05,			/*  __u8  ep_bDescriptorType; Endpoint */
1902	0x81,			/*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
1903	0x03,			/*  __u8  ep_bmAttributes; Interrupt */
1904	0x08,			/*  __u16 ep_wMaxPacketSize; 8 Bytes */
1905	0x00,
1906	0xff			/*  __u8  ep_bInterval; 255 ms */
1907};
1908
1909
1910_static __u8 root_hub_hub_des[] =
1911{
1912	0x09,			/*  __u8  bLength; */
1913	0x29,			/*  __u8  bDescriptorType; Hub-descriptor */
1914	0x02,			/*  __u8  bNbrPorts; */
1915	0x00,			/* __u16  wHubCharacteristics; */
1916	0x00,
1917	0x01,			/*  __u8  bPwrOn2pwrGood; 2ms */
1918	0x00,			/*  __u8  bHubContrCurrent; 0 mA */
1919	0x00,			/*  __u8  DeviceRemovable; *** 7 Ports max *** */
1920	0xff			/*  __u8  PortPwrCtrlMask; *** 7 ports max *** */
1921};
1922
1923/*-------------------------------------------------------------------------*/
1924/* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
1925_static int rh_send_irq (struct urb *urb)
1926{
1927	int len = 1;
1928	int i;
1929	uhci_t *uhci = urb->dev->bus->hcpriv;
1930	unsigned int io_addr = uhci->io_addr;
1931	__u16 data = 0;
1932
1933	for (i = 0; i < uhci->rh.numports; i++) {
1934		data |= ((inw (io_addr + USBPORTSC1 + i * 2) & 0xa) > 0 ? (1 << (i + 1)) : 0);
1935		len = (i + 1) / 8 + 1;
1936	}
1937
1938	*(__u16 *) urb->transfer_buffer = cpu_to_le16 (data);
1939	urb->actual_length = len;
1940	urb->status = 0;
1941
1942	if ((data > 0) && (uhci->rh.send != 0)) {
1943		dbg("Root-Hub INT complete: port1: %x port2: %x data: %x",
1944		     inw (io_addr + USBPORTSC1), inw (io_addr + USBPORTSC2), data);
1945		urb->complete (urb);
1946	}
1947	return 0;
1948}
1949
1950/*-------------------------------------------------------------------------*/
1951/* Virtual Root Hub INTs are polled by this timer every "intervall" ms */
1952_static int rh_init_int_timer (struct urb *urb);
1953
1954_static void rh_int_timer_do (unsigned long ptr)
1955{
1956	int len;
1957	struct urb *urb = (struct urb*) ptr;
1958	uhci_t *uhci = urb->dev->bus->hcpriv;
1959
1960	if (uhci->rh.send) {
1961		len = rh_send_irq (urb);
1962		if (len > 0) {
1963			urb->actual_length = len;
1964			if (urb->complete)
1965				urb->complete (urb);
1966		}
1967	}
1968	rh_init_int_timer (urb);
1969}
1970
1971/*-------------------------------------------------------------------------*/
1972/* Root Hub INTs are polled by this timer, polling interval 20ms */
1973
1974_static int rh_init_int_timer (struct urb *urb)
1975{
1976	uhci_t *uhci = urb->dev->bus->hcpriv;
1977
1978	uhci->rh.interval = urb->interval;
1979	init_timer (&uhci->rh.rh_int_timer);
1980	uhci->rh.rh_int_timer.function = rh_int_timer_do;
1981	uhci->rh.rh_int_timer.data = (unsigned long) urb;
1982	uhci->rh.rh_int_timer.expires = jiffies + (HZ * 20) / 1000;
1983	add_timer (&uhci->rh.rh_int_timer);
1984
1985	return 0;
1986}
1987
1988/*-------------------------------------------------------------------------*/
1989#define OK(x) 			len = (x); break
1990
1991#define CLR_RH_PORTSTAT(x) \
1992		status = inw(io_addr+USBPORTSC1+2*(wIndex-1)); \
1993		status = (status & 0xfff5) & ~(x); \
1994		outw(status, io_addr+USBPORTSC1+2*(wIndex-1))
1995
1996#define SET_RH_PORTSTAT(x) \
1997		status = inw(io_addr+USBPORTSC1+2*(wIndex-1)); \
1998		status = (status & 0xfff5) | (x); \
1999		outw(status, io_addr+USBPORTSC1+2*(wIndex-1))
2000
2001
2002/*-------------------------------------------------------------------------*/
2003/****
2004 ** Root Hub Control Pipe
2005 *************************/
2006
2007
2008_static int rh_submit_urb (struct urb *urb)
2009{
2010	struct usb_device *usb_dev = urb->dev;
2011	uhci_t *uhci = usb_dev->bus->hcpriv;
2012	unsigned int pipe = urb->pipe;
2013	struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
2014	void *data = urb->transfer_buffer;
2015	int leni = urb->transfer_buffer_length;
2016	int len = 0;
2017	int status = 0;
2018	int stat = 0;
2019	int i;
2020	unsigned int io_addr = uhci->io_addr;
2021	__u16 cstatus;
2022
2023	__u16 bmRType_bReq;
2024	__u16 wValue;
2025	__u16 wIndex;
2026	__u16 wLength;
2027
2028	if (usb_pipetype (pipe) == PIPE_INTERRUPT) {
2029		dbg("Root-Hub submit IRQ: every %d ms", urb->interval);
2030		uhci->rh.urb = urb;
2031		uhci->rh.send = 1;
2032		uhci->rh.interval = urb->interval;
2033		rh_init_int_timer (urb);
2034
2035		return 0;
2036	}
2037
2038
2039	bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
2040	wValue = le16_to_cpu (cmd->wValue);
2041	wIndex = le16_to_cpu (cmd->wIndex);
2042	wLength = le16_to_cpu (cmd->wLength);
2043
2044	for (i = 0; i < 8; i++)
2045		uhci->rh.c_p_r[i] = 0;
2046
2047	dbg("Root-Hub: adr: %2x cmd(%1x): %04x %04x %04x %04x",
2048	     uhci->rh.devnum, 8, bmRType_bReq, wValue, wIndex, wLength);
2049
2050	switch (bmRType_bReq) {
2051		/* Request Destination:
2052		   without flags: Device,
2053		   RH_INTERFACE: interface,
2054		   RH_ENDPOINT: endpoint,
2055		   RH_CLASS means HUB here,
2056		   RH_OTHER | RH_CLASS  almost ever means HUB_PORT here
2057		 */
2058
2059	case RH_GET_STATUS:
2060		*(__u16 *) data = cpu_to_le16 (1);
2061		OK (2);
2062	case RH_GET_STATUS | RH_INTERFACE:
2063		*(__u16 *) data = cpu_to_le16 (0);
2064		OK (2);
2065	case RH_GET_STATUS | RH_ENDPOINT:
2066		*(__u16 *) data = cpu_to_le16 (0);
2067		OK (2);
2068	case RH_GET_STATUS | RH_CLASS:
2069		*(__u32 *) data = cpu_to_le32 (0);
2070		OK (4);		/* hub power ** */
2071	case RH_GET_STATUS | RH_OTHER | RH_CLASS:
2072		status = inw (io_addr + USBPORTSC1 + 2 * (wIndex - 1));
2073		cstatus = ((status & USBPORTSC_CSC) >> (1 - 0)) |
2074			((status & USBPORTSC_PEC) >> (3 - 1)) |
2075			(uhci->rh.c_p_r[wIndex - 1] << (0 + 4));
2076		status = (status & USBPORTSC_CCS) |
2077			((status & USBPORTSC_PE) >> (2 - 1)) |
2078			((status & USBPORTSC_SUSP) >> (12 - 2)) |
2079			((status & USBPORTSC_PR) >> (9 - 4)) |
2080			(1 << 8) |	/* power on ** */
2081			((status & USBPORTSC_LSDA) << (-8 + 9));
2082
2083		*(__u16 *) data = cpu_to_le16 (status);
2084		*(__u16 *) (data + 2) = cpu_to_le16 (cstatus);
2085		OK (4);
2086
2087	case RH_CLEAR_FEATURE | RH_ENDPOINT:
2088		switch (wValue) {
2089		case (RH_ENDPOINT_STALL):
2090			OK (0);
2091		}
2092		break;
2093
2094	case RH_CLEAR_FEATURE | RH_CLASS:
2095		switch (wValue) {
2096		case (RH_C_HUB_OVER_CURRENT):
2097			OK (0);	/* hub power over current ** */
2098		}
2099		break;
2100
2101	case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
2102		switch (wValue) {
2103		case (RH_PORT_ENABLE):
2104			CLR_RH_PORTSTAT (USBPORTSC_PE);
2105			OK (0);
2106		case (RH_PORT_SUSPEND):
2107			CLR_RH_PORTSTAT (USBPORTSC_SUSP);
2108			OK (0);
2109		case (RH_PORT_POWER):
2110			OK (0);	/* port power ** */
2111		case (RH_C_PORT_CONNECTION):
2112			SET_RH_PORTSTAT (USBPORTSC_CSC);
2113			OK (0);
2114		case (RH_C_PORT_ENABLE):
2115			SET_RH_PORTSTAT (USBPORTSC_PEC);
2116			OK (0);
2117		case (RH_C_PORT_SUSPEND):
2118/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
2119			OK (0);
2120		case (RH_C_PORT_OVER_CURRENT):
2121			OK (0);	/* port power over current ** */
2122		case (RH_C_PORT_RESET):
2123			uhci->rh.c_p_r[wIndex - 1] = 0;
2124			OK (0);
2125		}
2126		break;
2127
2128	case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
2129		switch (wValue) {
2130		case (RH_PORT_SUSPEND):
2131			SET_RH_PORTSTAT (USBPORTSC_SUSP);
2132			OK (0);
2133		case (RH_PORT_RESET):
2134			SET_RH_PORTSTAT (USBPORTSC_PR);
2135			uhci_wait_ms (10);
2136			uhci->rh.c_p_r[wIndex - 1] = 1;
2137			CLR_RH_PORTSTAT (USBPORTSC_PR);
2138			udelay (10);
2139			SET_RH_PORTSTAT (USBPORTSC_PE);
2140			uhci_wait_ms (10);
2141			SET_RH_PORTSTAT (0xa);
2142			OK (0);
2143		case (RH_PORT_POWER):
2144			OK (0);	/* port power ** */
2145		case (RH_PORT_ENABLE):
2146			SET_RH_PORTSTAT (USBPORTSC_PE);
2147			OK (0);
2148		}
2149		break;
2150
2151	case RH_SET_ADDRESS:
2152		uhci->rh.devnum = wValue;
2153		OK (0);
2154
2155	case RH_GET_DESCRIPTOR:
2156		switch ((wValue & 0xff00) >> 8) {
2157		case (0x01):	/* device descriptor */
2158			len = min_t(unsigned int, leni,
2159				  min_t(unsigned int,
2160				      sizeof (root_hub_dev_des), wLength));
2161			memcpy (data, root_hub_dev_des, len);
2162			OK (len);
2163		case (0x02):	/* configuration descriptor */
2164			len = min_t(unsigned int, leni,
2165				  min_t(unsigned int,
2166				      sizeof (root_hub_config_des), wLength));
2167			memcpy (data, root_hub_config_des, len);
2168			OK (len);
2169		case (0x03):	/* string descriptors */
2170			len = usb_root_hub_string (wValue & 0xff,
2171			        uhci->io_addr, "UHCI",
2172				data, wLength);
2173			if (len > 0) {
2174				OK(min_t(int, leni, len));
2175			} else
2176				stat = -EPIPE;
2177		}
2178		break;
2179
2180	case RH_GET_DESCRIPTOR | RH_CLASS:
2181		root_hub_hub_des[2] = uhci->rh.numports;
2182		len = min_t(unsigned int, leni,
2183			  min_t(unsigned int, sizeof (root_hub_hub_des), wLength));
2184		memcpy (data, root_hub_hub_des, len);
2185		OK (len);
2186
2187	case RH_GET_CONFIGURATION:
2188		*(__u8 *) data = 0x01;
2189		OK (1);
2190
2191	case RH_SET_CONFIGURATION:
2192		OK (0);
2193	default:
2194		stat = -EPIPE;
2195	}
2196
2197	dbg("Root-Hub stat port1: %x port2: %x",
2198	     inw (io_addr + USBPORTSC1), inw (io_addr + USBPORTSC2));
2199
2200	urb->actual_length = len;
2201	urb->status = stat;
2202	urb->dev=NULL;
2203	if (urb->complete)
2204		urb->complete (urb);
2205	return 0;
2206}
2207/*-------------------------------------------------------------------------*/
2208
2209_static int rh_unlink_urb (struct urb *urb)
2210{
2211	uhci_t *uhci = urb->dev->bus->hcpriv;
2212
2213	if (uhci->rh.urb==urb) {
2214		dbg("Root-Hub unlink IRQ");
2215		uhci->rh.send = 0;
2216		del_timer (&uhci->rh.rh_int_timer);
2217	}
2218	return 0;
2219}
2220/*-------------------------------------------------------------------*/
2221
2222/*
2223 * Map status to standard result codes
2224 *
2225 * <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)
2226 * <dir_out> is True for output TDs and False for input TDs.
2227 */
2228_static int uhci_map_status (int status, int dir_out)
2229{
2230	if (!status)
2231		return 0;
2232	if (status & TD_CTRL_BITSTUFF)	/* Bitstuff error */
2233		return -EPROTO;
2234	if (status & TD_CTRL_CRCTIMEO) {	/* CRC/Timeout */
2235		if (dir_out)
2236			return -ETIMEDOUT;
2237		else
2238			return -EILSEQ;
2239	}
2240	if (status & TD_CTRL_NAK)	/* NAK */
2241		return -ETIMEDOUT;
2242	if (status & TD_CTRL_BABBLE)	/* Babble */
2243		return -EOVERFLOW;
2244	if (status & TD_CTRL_DBUFERR)	/* Buffer error */
2245		return -ENOSR;
2246	if (status & TD_CTRL_STALLED)	/* Stalled */
2247		return -EPIPE;
2248	if (status & TD_CTRL_ACTIVE)	/* Active */
2249		return 0;
2250
2251	return -EPROTO;
2252}
2253
2254/*
2255 * Only the USB core should call uhci_alloc_dev and uhci_free_dev
2256 */
2257_static int uhci_alloc_dev (struct usb_device *usb_dev)
2258{
2259	return 0;
2260}
2261
2262_static void uhci_unlink_urbs(uhci_t *s, struct usb_device *usb_dev, int remove_all)
2263{
2264	unsigned long flags;
2265	struct list_head *p;
2266	struct list_head *p2;
2267	struct urb *urb;
2268
2269	spin_lock_irqsave (&s->urb_list_lock, flags);
2270	p = s->urb_list.prev;
2271	while (p != &s->urb_list) {
2272		p2 = p;
2273		p = p->prev ;
2274		urb = list_entry (p2, struct urb, urb_list);
2275		dbg("urb: %p, dev %p, %p", urb, usb_dev,urb->dev);
2276
2277		//urb->transfer_flags |=USB_ASYNC_UNLINK;
2278
2279		if (remove_all || (usb_dev == urb->dev)) {
2280			spin_unlock_irqrestore (&s->urb_list_lock, flags);
2281			warn("forced removing of queued URB %p due to disconnect",urb);
2282			uhci_unlink_urb(urb);
2283			urb->dev = NULL; // avoid further processing of this URB
2284			spin_lock_irqsave (&s->urb_list_lock, flags);
2285			p = s->urb_list.prev;
2286		}
2287	}
2288	spin_unlock_irqrestore (&s->urb_list_lock, flags);
2289}
2290
2291_static int uhci_free_dev (struct usb_device *usb_dev)
2292{
2293	uhci_t *s;
2294
2295
2296	if(!usb_dev || !usb_dev->bus || !usb_dev->bus->hcpriv)
2297		return -EINVAL;
2298
2299	s=(uhci_t*) usb_dev->bus->hcpriv;
2300	uhci_unlink_urbs(s, usb_dev, 0);
2301
2302	return 0;
2303}
2304
2305/*
2306 * uhci_get_current_frame_number()
2307 *
2308 * returns the current frame number for a USB bus/controller.
2309 */
2310_static int uhci_get_current_frame_number (struct usb_device *usb_dev)
2311{
2312	return UHCI_GET_CURRENT_FRAME ((uhci_t*) usb_dev->bus->hcpriv);
2313}
2314
2315struct usb_operations uhci_device_operations =
2316{
2317	uhci_alloc_dev,
2318	uhci_free_dev,
2319	uhci_get_current_frame_number,
2320	uhci_submit_urb,
2321	uhci_unlink_urb
2322};
2323
2324_static void correct_data_toggles(struct urb *urb)
2325{
2326	usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe),
2327		       !usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe)));
2328
2329	while(urb) {
2330		urb_priv_t *priv=urb->hcpriv;
2331		uhci_desc_t *qh = list_entry (priv->desc_list.next, uhci_desc_t, desc_list);
2332		struct list_head *p = qh->vertical.next;
2333		uhci_desc_t *td;
2334		dbg("URB to correct %p\n", urb);
2335
2336		for (; p != &qh->vertical; p = p->next) {
2337			td = list_entry (p, uhci_desc_t, vertical);
2338			td->hw.td.info^=cpu_to_le32(1<<TD_TOKEN_TOGGLE);
2339		}
2340		urb=priv->next_queued_urb;
2341	}
2342}
2343
2344/*
2345 * For IN-control transfers, process_transfer gets a bit more complicated,
2346 * since there are devices that return less data (eg. strings) than they
2347 * have announced. This leads to a queue abort due to the short packet,
2348 * the status stage is not executed. If this happens, the status stage
2349 * is manually re-executed.
2350 * mode: PROCESS_TRANSFER_REGULAR: regular (unlink QH)
2351 *       PROCESS_TRANSFER_DONT_UNLINK: QHs already unlinked (for async unlink_urb)
2352 */
2353
2354_static int process_transfer (uhci_t *s, struct urb *urb, int mode)
2355{
2356	int ret = 0;
2357	urb_priv_t *urb_priv = urb->hcpriv;
2358	struct list_head *qhl = urb_priv->desc_list.next;
2359	uhci_desc_t *qh = list_entry (qhl, uhci_desc_t, desc_list);
2360	struct list_head *p = qh->vertical.next;
2361	uhci_desc_t *desc= list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list);
2362	uhci_desc_t *last_desc = list_entry (desc->vertical.prev, uhci_desc_t, vertical);
2363	int data_toggle = usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));	// save initial data_toggle
2364	int maxlength; 	// extracted and remapped info from TD
2365	int actual_length;
2366	int status = 0;
2367
2368	//dbg("process_transfer: urb %p, urb_priv %p, qh %p last_desc %p\n",urb,urb_priv, qh, last_desc);
2369
2370	/* if the status phase has been retriggered and the
2371	   queue is empty or the last status-TD is inactive, the retriggered
2372	   status stage is completed
2373	 */
2374
2375	if (urb_priv->flags &&
2376	    ((qh->hw.qh.element == cpu_to_le32(UHCI_PTR_TERM)) || !is_td_active(desc)))
2377		goto transfer_finished;
2378
2379	urb->actual_length=0;
2380
2381	for (; p != &qh->vertical; p = p->next) {
2382		desc = list_entry (p, uhci_desc_t, vertical);
2383
2384		if (is_td_active(desc)) {	// do not process active TDs
2385			if (mode == CLEAN_TRANSFER_DELETION_MARK) // if called from async_unlink
2386				uhci_clean_transfer(s, urb, qh, CLEAN_TRANSFER_DELETION_MARK);
2387			return ret;
2388		}
2389
2390		actual_length = uhci_actual_length(le32_to_cpu(desc->hw.td.status));		// extract transfer parameters from TD
2391		maxlength = (((le32_to_cpu(desc->hw.td.info) >> 21) & 0x7ff) + 1) & 0x7ff;
2392		status = uhci_map_status (uhci_status_bits (le32_to_cpu(desc->hw.td.status)), usb_pipeout (urb->pipe));
2393
2394		if (status == -EPIPE) { 		// see if EP is stalled
2395			// set up stalled condition
2396			usb_endpoint_halt (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));
2397		}
2398
2399		if (status && (status != -EPIPE)) {	// if any error occurred stop processing of further TDs
2400			// only set ret if status returned an error
2401  is_error:
2402			ret = status;
2403			urb->error_count++;
2404			break;
2405		}
2406		else if ((le32_to_cpu(desc->hw.td.info) & 0xff) != USB_PID_SETUP)
2407			urb->actual_length += actual_length;
2408
2409		// got less data than requested
2410		if ( (actual_length < maxlength)) {
2411			if (urb->transfer_flags & USB_DISABLE_SPD) {
2412				status = -EREMOTEIO;	// treat as real error
2413				dbg("process_transfer: SPD!!");
2414				break;	// exit after this TD because SP was detected
2415			}
2416
2417			// short read during control-IN: re-start status stage
2418			if ((usb_pipetype (urb->pipe) == PIPE_CONTROL)) {
2419				if (uhci_packetid(le32_to_cpu(last_desc->hw.td.info)) == USB_PID_OUT) {
2420
2421					set_qh_element(qh, last_desc->dma_addr);  // re-trigger status stage
2422					dbg("short packet during control transfer, retrigger status stage @ %p",last_desc);
2423					urb_priv->flags = 1; // mark as short control packet
2424					return 0;
2425				}
2426			}
2427			// all other cases: short read is OK
2428			data_toggle = uhci_toggle (le32_to_cpu(desc->hw.td.info));
2429			break;
2430		}
2431		else if (status)
2432			goto is_error;
2433
2434		data_toggle = uhci_toggle (le32_to_cpu(desc->hw.td.info));
2435		queue_dbg("process_transfer: len:%d status:%x mapped:%x toggle:%d", actual_length, le32_to_cpu(desc->hw.td.status),status, data_toggle);
2436
2437	}
2438
2439	if (usb_pipetype (urb->pipe) == PIPE_BULK ) {  /* toggle correction for short bulk transfers (nonqueued/queued) */
2440
2441		urb_priv_t *priv=(urb_priv_t*)urb->hcpriv;
2442		struct urb *next_queued_urb=priv->next_queued_urb;
2443
2444		if (next_queued_urb) {
2445			urb_priv_t *next_priv=(urb_priv_t*)next_queued_urb->hcpriv;
2446			uhci_desc_t *qh = list_entry (next_priv->desc_list.next, uhci_desc_t, desc_list);
2447			uhci_desc_t *first_td=list_entry (qh->vertical.next, uhci_desc_t, vertical);
2448
2449			if (data_toggle == uhci_toggle (le32_to_cpu(first_td->hw.td.info))) {
2450				err("process_transfer: fixed toggle");
2451				correct_data_toggles(next_queued_urb);
2452			}
2453		}
2454		else
2455			usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe), !data_toggle);
2456	}
2457
2458 transfer_finished:
2459
2460	uhci_clean_transfer(s, urb, qh, mode);
2461
2462	urb->status = status;
2463
2464#ifdef CONFIG_USB_UHCI_HIGH_BANDWIDTH
2465	disable_desc_loop(s,urb);
2466#endif
2467
2468	queue_dbg("process_transfer: (end) urb %p, wanted len %d, len %d status %x err %d",
2469		urb,urb->transfer_buffer_length,urb->actual_length, urb->status, urb->error_count);
2470	return ret;
2471}
2472
2473_static int process_interrupt (uhci_t *s, struct urb *urb)
2474{
2475	int ret = -EINPROGRESS;
2476	urb_priv_t *urb_priv = urb->hcpriv;
2477	struct list_head *p;
2478	uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list);
2479
2480	int actual_length;
2481	int status = 0;
2482
2483	//dbg("urb contains interrupt request");
2484
2485	// Maybe we allow more than one TD later ;-)
2486	while ((p = urb_priv->desc_list.next) != &urb_priv->desc_list) {
2487
2488		desc = list_entry (p, uhci_desc_t, desc_list);
2489
2490		if (is_td_active(desc)) {
2491			// do not process active TDs
2492			//dbg("TD ACT Status @%p %08x",desc,le32_to_cpu(desc->hw.td.status));
2493			break;
2494		}
2495
2496		if (!(desc->hw.td.status & cpu_to_le32(TD_CTRL_IOC))) {
2497			// do not process one-shot TDs, no recycling
2498			break;
2499		}
2500		// extract transfer parameters from TD
2501
2502		actual_length = uhci_actual_length(le32_to_cpu(desc->hw.td.status));
2503		status = uhci_map_status (uhci_status_bits (le32_to_cpu(desc->hw.td.status)), usb_pipeout (urb->pipe));
2504
2505		// see if EP is stalled
2506		if (status == -EPIPE) {
2507			// set up stalled condition
2508			usb_endpoint_halt (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));
2509		}
2510
2511		// if any error occurred: ignore this td, and continue
2512		if (status != 0) {
2513			//uhci_show_td (desc);
2514			urb->error_count++;
2515			goto recycle;
2516		}
2517		else
2518			urb->actual_length = actual_length;
2519
2520	recycle:
2521		uhci_urb_dma_sync(s, urb, urb->hcpriv);
2522		if (urb->complete) {
2523			//dbg("process_interrupt: calling completion, status %i",status);
2524			urb->status = status;
2525			((urb_priv_t*)urb->hcpriv)->flags=1; // if unlink_urb is called during completion
2526
2527			spin_unlock(&s->urb_list_lock);
2528
2529			urb->complete ((struct urb *) urb);
2530
2531			spin_lock(&s->urb_list_lock);
2532
2533			((urb_priv_t*)urb->hcpriv)->flags=0;
2534		}
2535
2536		if ((urb->status != -ECONNABORTED) && (urb->status != ECONNRESET) &&
2537			    (urb->status != -ENOENT)) {
2538
2539			urb->status = -EINPROGRESS;
2540
2541			// Recycle INT-TD if interval!=0, else mark TD as one-shot
2542			if (urb->interval) {
2543
2544				desc->hw.td.info &= cpu_to_le32(~(1 << TD_TOKEN_TOGGLE));
2545				if (status==0) {
2546					((urb_priv_t*)urb->hcpriv)->started=jiffies;
2547					desc->hw.td.info |= cpu_to_le32((usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe),
2548									    usb_pipeout (urb->pipe)) << TD_TOKEN_TOGGLE));
2549					usb_dotoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));
2550				} else {
2551					desc->hw.td.info |= cpu_to_le32((!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe),
2552									     usb_pipeout (urb->pipe)) << TD_TOKEN_TOGGLE));
2553				}
2554				desc->hw.td.status= cpu_to_le32((urb->pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE | TD_CTRL_IOC |
2555					(urb->transfer_flags & USB_DISABLE_SPD ? 0 : TD_CTRL_SPD) | (3 << 27));
2556				mb();
2557			}
2558			else {
2559				uhci_unlink_urb_async(s, urb, UNLINK_ASYNC_STORE_URB);
2560				uhci_clean_iso_step2(s, urb_priv);
2561				// correct toggle after unlink
2562				usb_dotoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));
2563				clr_td_ioc(desc); // inactivate TD
2564			}
2565		}
2566	}
2567
2568	return ret;
2569}
2570
2571// mode: PROCESS_ISO_REGULAR: processing only for done TDs, unlink TDs
2572// mode: PROCESS_ISO_FORCE: force processing, don't unlink TDs (already unlinked)
2573
2574_static int process_iso (uhci_t *s, struct urb *urb, int mode)
2575{
2576	int i;
2577	int ret = 0;
2578	urb_priv_t *urb_priv = urb->hcpriv;
2579	struct list_head *p = urb_priv->desc_list.next, *p_tmp;
2580	uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list);
2581
2582	dbg("urb contains iso request");
2583	if (is_td_active(desc) && mode==PROCESS_ISO_REGULAR)
2584		return -EXDEV;	// last TD not finished
2585
2586	urb->error_count = 0;
2587	urb->actual_length = 0;
2588	urb->status = 0;
2589	dbg("process iso urb %p, %li, %i, %i, %i %08x",urb,jiffies,UHCI_GET_CURRENT_FRAME(s),
2590	    urb->number_of_packets,mode,le32_to_cpu(desc->hw.td.status));
2591
2592	for (i = 0; p != &urb_priv->desc_list;  i++) {
2593		desc = list_entry (p, uhci_desc_t, desc_list);
2594
2595		//uhci_show_td(desc);
2596		if (is_td_active(desc)) {
2597			// means we have completed the last TD, but not the TDs before
2598			desc->hw.td.status &= cpu_to_le32(~TD_CTRL_ACTIVE);
2599			dbg("TD still active (%x)- grrr. paranoia!", le32_to_cpu(desc->hw.td.status));
2600			ret = -EXDEV;
2601			urb->iso_frame_desc[i].status = ret;
2602			unlink_td (s, desc, 1);
2603			// FIXME: immediate deletion may be dangerous
2604			goto err;
2605		}
2606
2607		if (mode == PROCESS_ISO_REGULAR)
2608			unlink_td (s, desc, 1);
2609
2610		if (urb->number_of_packets <= i) {
2611			dbg("urb->number_of_packets (%d)<=(%d)", urb->number_of_packets, i);
2612			ret = -EINVAL;
2613			goto err;
2614		}
2615
2616		urb->iso_frame_desc[i].actual_length = uhci_actual_length(le32_to_cpu(desc->hw.td.status));
2617		urb->iso_frame_desc[i].status = uhci_map_status (uhci_status_bits (le32_to_cpu(desc->hw.td.status)), usb_pipeout (urb->pipe));
2618		urb->actual_length += urb->iso_frame_desc[i].actual_length;
2619
2620	      err:
2621
2622		if (urb->iso_frame_desc[i].status != 0) {
2623			urb->error_count++;
2624			urb->status = urb->iso_frame_desc[i].status;
2625		}
2626		dbg("process_iso: %i: len:%d %08x status:%x",
2627		     i, urb->iso_frame_desc[i].actual_length, le32_to_cpu(desc->hw.td.status),urb->iso_frame_desc[i].status);
2628
2629		p_tmp = p;
2630		p = p->next;
2631		list_del (p_tmp);
2632		delete_desc (s, desc);
2633	}
2634
2635	dbg("process_iso: exit %i (%d), actual_len %i", i, ret,urb->actual_length);
2636	return ret;
2637}
2638
2639
2640_static int process_urb (uhci_t *s, struct list_head *p)
2641{
2642	int ret = 0;
2643	struct urb *urb;
2644
2645	urb=list_entry (p, struct urb, urb_list);
2646	//dbg("process_urb: found queued urb: %p", urb);
2647
2648	switch (usb_pipetype (urb->pipe)) {
2649	case PIPE_CONTROL:
2650		ret = process_transfer (s, urb, CLEAN_TRANSFER_REGULAR);
2651		break;
2652	case PIPE_BULK:
2653		if (!s->avoid_bulk.counter)
2654			ret = process_transfer (s, urb, CLEAN_TRANSFER_REGULAR);
2655		else
2656			return 0;
2657		break;
2658	case PIPE_ISOCHRONOUS:
2659		ret = process_iso (s, urb, PROCESS_ISO_REGULAR);
2660		break;
2661	case PIPE_INTERRUPT:
2662		ret = process_interrupt (s, urb);
2663		break;
2664	}
2665
2666	if (urb->status != -EINPROGRESS) {
2667		urb_priv_t *urb_priv;
2668		struct usb_device *usb_dev;
2669
2670		usb_dev=urb->dev;
2671
2672		/* Release bandwidth for Interrupt or Iso transfers */
2673		if (urb->bandwidth) {
2674			if (usb_pipetype(urb->pipe)==PIPE_ISOCHRONOUS)
2675				usb_release_bandwidth (urb->dev, urb, 1);
2676			else if (usb_pipetype(urb->pipe)==PIPE_INTERRUPT && urb->interval)
2677				usb_release_bandwidth (urb->dev, urb, 0);
2678		}
2679
2680		dbg("dequeued urb: %p", urb);
2681		dequeue_urb (s, urb);
2682
2683		urb_priv = urb->hcpriv;
2684
2685		uhci_urb_dma_unmap(s, urb, urb_priv);
2686
2687#ifdef DEBUG_SLAB
2688		kmem_cache_free(urb_priv_kmem, urb_priv);
2689#else
2690		kfree (urb_priv);
2691#endif
2692
2693		if ((usb_pipetype (urb->pipe) != PIPE_INTERRUPT)) {  // process_interrupt does completion on its own
2694			struct urb *next_urb = urb->next;
2695			int is_ring = 0;
2696			int contains_killed = 0;
2697			int loop_count=0;
2698
2699			if (next_urb) {
2700				// Find out if the URBs are linked to a ring
2701				while  (next_urb != NULL && next_urb != urb && loop_count < MAX_NEXT_COUNT) {
2702					if (next_urb->status == -ENOENT) {// killed URBs break ring structure & resubmission
2703						contains_killed = 1;
2704						break;
2705					}
2706					next_urb = next_urb->next;
2707					loop_count++;
2708				}
2709
2710				if (loop_count == MAX_NEXT_COUNT)
2711					err("process_urb: Too much linked URBs in ring detection!");
2712
2713				if (next_urb == urb)
2714					is_ring=1;
2715			}
2716
2717			// Submit idle/non-killed URBs linked with urb->next
2718			// Stop before the current URB
2719
2720			next_urb = urb->next;
2721			if (next_urb && !contains_killed) {
2722				int ret_submit;
2723				next_urb = urb->next;
2724
2725				loop_count=0;
2726				while (next_urb != NULL && next_urb != urb && loop_count < MAX_NEXT_COUNT) {
2727					if (next_urb->status != -EINPROGRESS) {
2728
2729						if (next_urb->status == -ENOENT)
2730							break;
2731
2732						spin_unlock(&s->urb_list_lock);
2733
2734						ret_submit=uhci_submit_urb(next_urb);
2735						spin_lock(&s->urb_list_lock);
2736
2737						if (ret_submit)
2738							break;
2739					}
2740					loop_count++;
2741					next_urb = next_urb->next;
2742				}
2743				if (loop_count == MAX_NEXT_COUNT)
2744					err("process_urb: Too much linked URBs in resubmission!");
2745			}
2746
2747			// Completion
2748			if (urb->complete) {
2749				int was_unlinked = (urb->status == -ENOENT);
2750				urb->dev = NULL;
2751				spin_unlock(&s->urb_list_lock);
2752
2753				urb->complete ((struct urb *) urb);
2754
2755				// Re-submit the URB if ring-linked
2756				if (is_ring && !was_unlinked && !contains_killed) {
2757					urb->dev=usb_dev;
2758					uhci_submit_urb (urb);
2759				}
2760				spin_lock(&s->urb_list_lock);
2761			}
2762
2763			usb_dec_dev_use (usb_dev);
2764		}
2765	}
2766
2767	return ret;
2768}
2769
2770_static void reset_hc (uhci_t *s);
2771_static void start_hc (uhci_t *s);
2772
2773_static void uhci_interrupt (int irq, void *__uhci, struct pt_regs *regs)
2774{
2775	uhci_t *s = __uhci;
2776	unsigned int io_addr = s->io_addr;
2777	unsigned short status;
2778	struct list_head *p, *p2;
2779	int restarts, work_done;
2780	static unsigned long last_error_time = 0;
2781	static unsigned long recent_restart_count = 0;
2782	static int uhci_disabled = 0;
2783
2784	/*
2785	 * Read the interrupt status, and write it back to clear the
2786	 * interrupt cause
2787	 */
2788
2789	status = inw (io_addr + USBSTS);
2790
2791	if (!status)		/* shared interrupt, not mine */
2792		return;
2793
2794	if (uhci_disabled)
2795		return;
2796
2797	dbg("interrupt");
2798
2799	if (status != 1) {
2800		// Avoid too much error messages at a time
2801		if (time_after(jiffies, s->last_error_time + ERROR_SUPPRESSION_TIME)) {
2802			warn("interrupt, status %x, frame# %i", status,
2803			     UHCI_GET_CURRENT_FRAME(s));
2804			s->last_error_time = jiffies;
2805		}
2806
2807		// remove host controller halted state
2808		if ((status&0x20) && (s->running)) {
2809			err("Host controller halted, trying to restart.");
2810			if (status & 0x08) {
2811				unsigned short pci_status;
2812				// PCI Bus error???
2813				if (bcm_dump_pci_status) {
2814				    dump_pci_status_regs("usb uhci host error");
2815				}
2816				if (bcm_host_error_reset) {
2817				    reset_hc(s);
2818				    start_hc(s);
2819				}
2820			}
2821			outw (USBCMD_RS | inw(io_addr + USBCMD), io_addr + USBCMD);
2822			if (time_after(last_error_time + HZ, jiffies)) {
2823				if (recent_restart_count > 10) {
2824					err("We've had more than 10 attempted restarts in the last 50 miliseconds, so we'll give up now and disable UHCI until reboot");
2825					uhci_disabled = 1;
2826				}
2827				++recent_restart_count;
2828			} else {
2829				last_error_time = jiffies;
2830				recent_restart_count = 1;
2831			}
2832		}
2833		//uhci_show_status (s);
2834	}
2835	/*
2836	 * traverse the list in *reverse* direction, because new entries
2837	 * may be added at the end.
2838	 * also, because process_urb may unlink the current urb,
2839	 * we need to advance the list before
2840	 * New: check for max. workload and restart count
2841	 */
2842
2843	spin_lock (&s->urb_list_lock);
2844
2845	restarts=0;
2846	work_done=0;
2847
2848restart:
2849	s->unlink_urb_done=0;
2850	p = s->urb_list.prev;
2851
2852	while (p != &s->urb_list && (work_done < 1024)) {
2853		p2 = p;
2854		p = p->prev;
2855
2856		process_urb (s, p2);
2857
2858		work_done++;
2859
2860		if (s->unlink_urb_done) {
2861			s->unlink_urb_done=0;
2862			restarts++;
2863
2864			if (restarts<16)	// avoid endless restarts
2865				goto restart;
2866			else
2867				break;
2868		}
2869	}
2870	if (time_after(jiffies, s->timeout_check + (HZ/30)))
2871		uhci_check_timeouts(s);
2872
2873	clean_descs(s, CLEAN_NOT_FORCED);
2874	uhci_cleanup_unlink(s, CLEAN_NOT_FORCED);
2875	uhci_switch_timer_int(s);
2876
2877	spin_unlock (&s->urb_list_lock);
2878
2879	outw (status, io_addr + USBSTS);
2880
2881	//dbg("uhci_interrupt: done");
2882}
2883
2884_static void reset_hc (uhci_t *s)
2885{
2886	unsigned int io_addr = s->io_addr;
2887
2888	s->apm_state = 0;
2889	/* Global reset for 50ms */
2890	outw (USBCMD_GRESET, io_addr + USBCMD);
2891	uhci_wait_ms (50);
2892	outw (0, io_addr + USBCMD);
2893	uhci_wait_ms (10);
2894}
2895
2896_static void start_hc (uhci_t *s)
2897{
2898	unsigned int io_addr = s->io_addr;
2899	int timeout = 10;
2900
2901	/*
2902	 * Reset the HC - this will force us to get a
2903	 * new notification of any already connected
2904	 * ports due to the virtual disconnect that it
2905	 * implies.
2906	 */
2907	outw (USBCMD_HCRESET, io_addr + USBCMD);
2908
2909	while (inw (io_addr + USBCMD) & USBCMD_HCRESET) {
2910		if (!--timeout) {
2911			err("USBCMD_HCRESET timed out!");
2912			break;
2913		}
2914		udelay(1);
2915	}
2916
2917	/* Turn on all interrupts */
2918	outw (USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, io_addr + USBINTR);
2919
2920	/* Start at frame 0 */
2921	outw (0, io_addr + USBFRNUM);
2922	outl (s->framelist_dma, io_addr + USBFLBASEADD);
2923
2924	/* Run and mark it configured with a 64-byte max packet */
2925	outw (USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2926	s->apm_state = 1;
2927	s->running = 1;
2928}
2929
2930/* No  __devexit, since it maybe called from alloc_uhci() */
2931_static void
2932uhci_pci_remove (struct pci_dev *dev)
2933{
2934	uhci_t *s = pci_get_drvdata(dev);
2935	struct usb_device *root_hub = s->bus->root_hub;
2936
2937	s->running = 0;		    // Don't allow submit_urb
2938
2939	if (root_hub)
2940		usb_disconnect (&root_hub);
2941
2942	reset_hc (s);
2943	wait_ms (1);
2944
2945	uhci_unlink_urbs (s, 0, CLEAN_FORCED);  // Forced unlink of remaining URBs
2946	uhci_cleanup_unlink (s, CLEAN_FORCED);  // force cleanup of async killed URBs
2947
2948	usb_deregister_bus (s->bus);
2949
2950	release_region (s->io_addr, s->io_size);
2951	free_irq (s->irq, s);
2952	usb_free_bus (s->bus);
2953	cleanup_skel (s);
2954	kfree (s);
2955}
2956
2957_static int __init uhci_start_usb (uhci_t *s)
2958{				/* start it up */
2959	/* connect the virtual root hub */
2960	struct usb_device *usb_dev;
2961
2962	usb_dev = usb_alloc_dev (NULL, s->bus);
2963	if (!usb_dev)
2964		return -1;
2965
2966	s->bus->root_hub = usb_dev;
2967	usb_connect (usb_dev);
2968
2969	if (usb_new_device (usb_dev) != 0) {
2970		usb_free_dev (usb_dev);
2971		return -1;
2972	}
2973
2974	return 0;
2975}
2976
2977#ifdef CONFIG_PM
2978_static int
2979uhci_pci_suspend (struct pci_dev *dev, u32 state)
2980{
2981	reset_hc((uhci_t *) pci_get_drvdata(dev));
2982	return 0;
2983}
2984
2985_static int
2986uhci_pci_resume (struct pci_dev *dev)
2987{
2988	start_hc((uhci_t *) pci_get_drvdata(dev));
2989	return 0;
2990}
2991#endif
2992
2993_static int __devinit alloc_uhci (struct pci_dev *dev, int irq, unsigned int io_addr, unsigned int io_size)
2994{
2995	uhci_t *s;
2996	struct usb_bus *bus;
2997	char buf[8], *bufp = buf;
2998
2999#ifndef __sparc__
3000	sprintf(buf, "%d", irq);
3001#else
3002	bufp = __irq_itoa(irq);
3003#endif
3004	printk(KERN_INFO __FILE__ ": USB UHCI at I/O 0x%x, IRQ %s\n",
3005		io_addr, bufp);
3006
3007	s = kmalloc (sizeof (uhci_t), GFP_KERNEL);
3008	if (!s)
3009		return -1;
3010
3011	memset (s, 0, sizeof (uhci_t));
3012	INIT_LIST_HEAD (&s->free_desc);
3013	INIT_LIST_HEAD (&s->urb_list);
3014	INIT_LIST_HEAD (&s->urb_unlinked);
3015	spin_lock_init (&s->urb_list_lock);
3016	spin_lock_init (&s->qh_lock);
3017	spin_lock_init (&s->td_lock);
3018	atomic_set(&s->avoid_bulk, 0);
3019	s->irq = -1;
3020	s->io_addr = io_addr;
3021	s->io_size = io_size;
3022	s->uhci_pci=dev;
3023
3024	bus = usb_alloc_bus (&uhci_device_operations);
3025	if (!bus) {
3026		kfree (s);
3027		return -1;
3028	}
3029
3030	s->bus = bus;
3031	bus->bus_name = dev->slot_name;
3032	bus->hcpriv = s;
3033
3034	/* UHCI specs says devices must have 2 ports, but goes on to say */
3035	/* they may have more but give no way to determine how many they */
3036	/* have, so default to 2 */
3037	/* According to the UHCI spec, Bit 7 is always set to 1. So we try */
3038	/* to use this to our advantage */
3039
3040	for (s->maxports = 0; s->maxports < (io_size - 0x10) / 2; s->maxports++) {
3041		unsigned int portstatus;
3042
3043		portstatus = inw (io_addr + 0x10 + (s->maxports * 2));
3044		dbg("port %i, adr %x status %x", s->maxports,
3045			io_addr + 0x10 + (s->maxports * 2), portstatus);
3046		if (!(portstatus & 0x0080))
3047			break;
3048	}
3049	warn("Detected %d ports", s->maxports);
3050
3051	/* This is experimental so anything less than 2 or greater than 8 is */
3052	/*  something weird and we'll ignore it */
3053	if (s->maxports < 2 || s->maxports > 8) {
3054		dbg("Port count misdetected, forcing to 2 ports");
3055		s->maxports = 2;
3056	}
3057
3058	s->rh.numports = s->maxports;
3059	s->loop_usage=0;
3060	if (init_skel (s)) {
3061		usb_free_bus (bus);
3062		kfree(s);
3063		return -1;
3064	}
3065
3066	request_region (s->io_addr, io_size, MODNAME);
3067	reset_hc (s);
3068	usb_register_bus (s->bus);
3069
3070	start_hc (s);
3071
3072	if (request_irq (irq, uhci_interrupt, SA_SHIRQ, MODNAME, s)) {
3073		err("request_irq %d failed!",irq);
3074		usb_free_bus (bus);
3075		reset_hc (s);
3076		release_region (s->io_addr, s->io_size);
3077		cleanup_skel(s);
3078		kfree(s);
3079		return -1;
3080	}
3081
3082	/* Enable PIRQ */
3083	pci_write_config_word (dev, USBLEGSUP, USBLEGSUP_DEFAULT);
3084
3085	s->irq = irq;
3086
3087	if(uhci_start_usb (s) < 0) {
3088		uhci_pci_remove(dev);
3089		return -1;
3090	}
3091
3092	//chain new uhci device into global list
3093	pci_set_drvdata(dev, s);
3094	devs=s;
3095
3096	return 0;
3097}
3098
3099_static int __devinit
3100uhci_pci_probe (struct pci_dev *dev, const struct pci_device_id *id)
3101{
3102	int i;
3103
3104	if (pci_enable_device(dev) < 0)
3105		return -ENODEV;
3106
3107	if (!dev->irq) {
3108		err("found UHCI device with no IRQ assigned. check BIOS settings!");
3109		return -ENODEV;
3110	}
3111
3112	pci_set_master(dev);
3113
3114	{
3115		u8 misc_reg;
3116		u32 vendor_id;
3117
3118		pci_read_config_dword (dev, PCI_VENDOR_ID, &vendor_id);
3119		printk(KERN_INFO "ECHI PCI device %x found.\n", vendor_id);
3120
3121		if (vendor_id == 0x30381106) {
3122			/* VIA 6212 */
3123			pci_read_config_byte(dev, 0x41, &misc_reg);
3124			printk(KERN_INFO "UCHI reg 0x41 = %x\n", misc_reg);
3125			misc_reg &= ~0x10;
3126			pci_write_config_byte(dev, 0x41, misc_reg);
3127			pci_read_config_byte(dev, 0x41, &misc_reg);
3128			printk(KERN_INFO "UCHI reg 0x41 changed to = %x\n", misc_reg);
3129		}
3130	}
3131
3132	/* Search for the IO base address.. */
3133	for (i = 0; i < 6; i++) {
3134
3135		unsigned int io_addr = pci_resource_start(dev, i);
3136		unsigned int io_size = pci_resource_len(dev, i);
3137		if (!(pci_resource_flags(dev,i) & IORESOURCE_IO))
3138			continue;
3139
3140		/* Is it already in use? */
3141		if (check_region (io_addr, io_size))
3142			break;
3143		/* disable legacy emulation */
3144		pci_write_config_word (dev, USBLEGSUP, 0);
3145
3146		return alloc_uhci(dev, dev->irq, io_addr, io_size);
3147	}
3148	return -ENODEV;
3149}
3150
3151/*-------------------------------------------------------------------------*/
3152
3153static const struct pci_device_id __devinitdata uhci_pci_ids [] = { {
3154
3155	/* handle any USB UHCI controller */
3156	class: 		((PCI_CLASS_SERIAL_USB << 8) | 0x00),
3157	class_mask: 	~0,
3158
3159	/* no matter who makes it */
3160	vendor:		PCI_ANY_ID,
3161	device:		PCI_ANY_ID,
3162	subvendor:	PCI_ANY_ID,
3163	subdevice:	PCI_ANY_ID,
3164
3165	}, { /* end: all zeroes */ }
3166};
3167
3168MODULE_DEVICE_TABLE (pci, uhci_pci_ids);
3169
3170static struct pci_driver uhci_pci_driver = {
3171	name:		"usb-uhci",
3172	id_table:	&uhci_pci_ids [0],
3173
3174	probe:		uhci_pci_probe,
3175	remove:		uhci_pci_remove,
3176
3177#ifdef	CONFIG_PM
3178	suspend:	uhci_pci_suspend,
3179	resume:		uhci_pci_resume,
3180#endif	/* PM */
3181
3182};
3183
3184/*-------------------------------------------------------------------------*/
3185
3186static int __init uhci_hcd_init (void)
3187{
3188	int retval;
3189
3190#ifdef DEBUG_SLAB
3191	urb_priv_kmem = kmem_cache_create("urb_priv", sizeof(urb_priv_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
3192
3193	if(!urb_priv_kmem) {
3194		err("kmem_cache_create for urb_priv_t failed (out of memory)");
3195		return -ENOMEM;
3196	}
3197#endif
3198	info(VERSTR);
3199
3200#ifdef CONFIG_USB_UHCI_HIGH_BANDWIDTH
3201	info("High bandwidth mode enabled");
3202#endif
3203
3204	retval = pci_module_init (&uhci_pci_driver);
3205
3206{
3207	extern const char *nvram_get(const char *);
3208	if (nvram_get("bcm_debug_uhci_dump") != NULL) {
3209	    bcm_dump_pci_status = 1;
3210	    dump_pci_status_regs("uhci_hcd_init");
3211	}
3212	if (nvram_get("bcm_debug_uhci_reset") != NULL) {
3213	    bcm_host_error_reset = 1;
3214	}
3215}
3216
3217#ifdef DEBUG_SLAB
3218	if (retval < 0 ) {
3219		if (kmem_cache_destroy(urb_priv_kmem))
3220			err("urb_priv_kmem remained");
3221	}
3222#endif
3223
3224	info(DRIVER_VERSION ":" DRIVER_DESC);
3225
3226	return retval;
3227}
3228
3229static void __exit uhci_hcd_cleanup (void)
3230{
3231	pci_unregister_driver (&uhci_pci_driver);
3232
3233#ifdef DEBUG_SLAB
3234	if(kmem_cache_destroy(urb_priv_kmem))
3235		err("urb_priv_kmem remained");
3236#endif
3237}
3238
3239module_init (uhci_hcd_init);
3240module_exit (uhci_hcd_cleanup);
3241
3242
3243MODULE_AUTHOR( DRIVER_AUTHOR );
3244MODULE_DESCRIPTION( DRIVER_DESC );
3245MODULE_LICENSE("GPL");
3246