1/*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 *                        Gord Peters <GordPeters@smarttech.com>
5 *              2001      Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44/*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 *  . Use of pci_class to find device
49 *
50 * Emilie Chung	<emilie.chung@axis.com>
51 *  . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 *  . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 *  . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 *  . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 *  . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 *  . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 *  . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 *  . Working big-endian support
73 *  . Updated to 2.4.x module scheme (PCI aswell)
74 *  . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 *  . Reworked code for initiating bus resets
78 *    (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 *  . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
85#include <linux/kernel.h>
86#include <linux/list.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/wait.h>
90#include <linux/errno.h>
91#include <linux/module.h>
92#include <linux/moduleparam.h>
93#include <linux/pci.h>
94#include <linux/fs.h>
95#include <linux/poll.h>
96#include <asm/byteorder.h>
97#include <asm/atomic.h>
98#include <asm/uaccess.h>
99#include <linux/delay.h>
100#include <linux/spinlock.h>
101
102#include <asm/pgtable.h>
103#include <asm/page.h>
104#include <asm/irq.h>
105#include <linux/types.h>
106#include <linux/vmalloc.h>
107#include <linux/init.h>
108
109#ifdef CONFIG_PPC_PMAC
110#include <asm/machdep.h>
111#include <asm/pmac_feature.h>
112#include <asm/prom.h>
113#include <asm/pci-bridge.h>
114#endif
115
116#include "csr1212.h"
117#include "ieee1394.h"
118#include "ieee1394_types.h"
119#include "hosts.h"
120#include "dma.h"
121#include "iso.h"
122#include "ieee1394_core.h"
123#include "highlevel.h"
124#include "ohci1394.h"
125
126#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127#define OHCI1394_DEBUG
128#endif
129
130#ifdef DBGMSG
131#undef DBGMSG
132#endif
133
134#ifdef OHCI1394_DEBUG
135#define DBGMSG(fmt, args...) \
136printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137#else
138#define DBGMSG(fmt, args...) do {} while (0)
139#endif
140
141#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
142#define OHCI_DMA_ALLOC(fmt, args...) \
143	HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
144		++global_outstanding_dmas, ## args)
145#define OHCI_DMA_FREE(fmt, args...) \
146	HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
147		--global_outstanding_dmas, ## args)
148static int global_outstanding_dmas = 0;
149#else
150#define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
151#define OHCI_DMA_FREE(fmt, args...) do {} while (0)
152#endif
153
154/* print general (card independent) information */
155#define PRINT_G(level, fmt, args...) \
156printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
157
158/* print card specific information */
159#define PRINT(level, fmt, args...) \
160printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
161
162/* Module Parameters */
163static int phys_dma = 1;
164module_param(phys_dma, int, 0444);
165MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
166
167static void dma_trm_tasklet(unsigned long data);
168static void dma_trm_reset(struct dma_trm_ctx *d);
169
170static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
171			     enum context_type type, int ctx, int num_desc,
172			     int buf_size, int split_buf_size, int context_base);
173static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
174static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
175
176static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
177			     enum context_type type, int ctx, int num_desc,
178			     int context_base);
179
180static void ohci1394_pci_remove(struct pci_dev *pdev);
181
182#ifndef __LITTLE_ENDIAN
183static const size_t hdr_sizes[] = {
184	3,	/* TCODE_WRITEQ */
185	4,	/* TCODE_WRITEB */
186	3,	/* TCODE_WRITE_RESPONSE */
187	0,	/* reserved */
188	3,	/* TCODE_READQ */
189	4,	/* TCODE_READB */
190	3,	/* TCODE_READQ_RESPONSE */
191	4,	/* TCODE_READB_RESPONSE */
192	1,	/* TCODE_CYCLE_START */
193	4,	/* TCODE_LOCK_REQUEST */
194	2,	/* TCODE_ISO_DATA */
195	4,	/* TCODE_LOCK_RESPONSE */
196		/* rest is reserved or link-internal */
197};
198
199static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
200{
201	size_t size;
202
203	if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
204		return;
205
206	size = hdr_sizes[tcode];
207	while (size--)
208		data[size] = le32_to_cpu(data[size]);
209}
210#else
211#define header_le32_to_cpu(w,x) do {} while (0)
212#endif /* !LITTLE_ENDIAN */
213
214/***********************************
215 * IEEE-1394 functionality section *
216 ***********************************/
217
218static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
219{
220	int i;
221	unsigned long flags;
222	quadlet_t r;
223
224	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
225
226	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
227
228	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
229		if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
230			break;
231
232		mdelay(1);
233	}
234
235	r = reg_read(ohci, OHCI1394_PhyControl);
236
237	if (i >= OHCI_LOOP_COUNT)
238		PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
239		       r, r & 0x80000000, i);
240
241	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
242
243	return (r & 0x00ff0000) >> 16;
244}
245
246static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
247{
248	int i;
249	unsigned long flags;
250	u32 r = 0;
251
252	spin_lock_irqsave (&ohci->phy_reg_lock, flags);
253
254	reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
255
256	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
257		r = reg_read(ohci, OHCI1394_PhyControl);
258		if (!(r & 0x00004000))
259			break;
260
261		mdelay(1);
262	}
263
264	if (i == OHCI_LOOP_COUNT)
265		PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
266		       r, r & 0x00004000, i);
267
268	spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
269
270	return;
271}
272
273/* Or's our value into the current value */
274static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
275{
276	u8 old;
277
278	old = get_phy_reg (ohci, addr);
279	old |= data;
280	set_phy_reg (ohci, addr, old);
281
282	return;
283}
284
285static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
286				int phyid, int isroot)
287{
288	quadlet_t *q = ohci->selfid_buf_cpu;
289	quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
290	size_t size;
291	quadlet_t q0, q1;
292
293	/* Check status of self-id reception */
294
295	if (ohci->selfid_swap)
296		q0 = le32_to_cpu(q[0]);
297	else
298		q0 = q[0];
299
300	if ((self_id_count & 0x80000000) ||
301	    ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
302		PRINT(KERN_ERR,
303		      "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
304		      self_id_count, q0, ohci->self_id_errors);
305
306		/* Tip by James Goodwin <jamesg@Filanet.com>:
307		 * We had an error, generate another bus reset in response.  */
308		if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
309			set_phy_reg_mask (ohci, 1, 0x40);
310			ohci->self_id_errors++;
311		} else {
312			PRINT(KERN_ERR,
313			      "Too many errors on SelfID error reception, giving up!");
314		}
315		return;
316	}
317
318	/* SelfID Ok, reset error counter. */
319	ohci->self_id_errors = 0;
320
321	size = ((self_id_count & 0x00001FFC) >> 2) - 1;
322	q++;
323
324	while (size > 0) {
325		if (ohci->selfid_swap) {
326			q0 = le32_to_cpu(q[0]);
327			q1 = le32_to_cpu(q[1]);
328		} else {
329			q0 = q[0];
330			q1 = q[1];
331		}
332
333		if (q0 == ~q1) {
334			DBGMSG ("SelfID packet 0x%x received", q0);
335			hpsb_selfid_received(host, cpu_to_be32(q0));
336			if (((q0 & 0x3f000000) >> 24) == phyid)
337				DBGMSG ("SelfID for this node is 0x%08x", q0);
338		} else {
339			PRINT(KERN_ERR,
340			      "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
341		}
342		q += 2;
343		size -= 2;
344	}
345
346	DBGMSG("SelfID complete");
347
348	return;
349}
350
351static void ohci_soft_reset(struct ti_ohci *ohci) {
352	int i;
353
354	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
355
356	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
357		if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
358			break;
359		mdelay(1);
360	}
361	DBGMSG ("Soft reset finished");
362}
363
364
365/* Generate the dma receive prgs and start the context */
366static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
367{
368	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
369	int i;
370
371	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
372
373	for (i=0; i<d->num_desc; i++) {
374		u32 c;
375
376		c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
377		if (generate_irq)
378			c |= DMA_CTL_IRQ;
379
380		d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
381
382		/* End of descriptor list? */
383		if (i + 1 < d->num_desc) {
384			d->prg_cpu[i]->branchAddress =
385				cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
386		} else {
387			d->prg_cpu[i]->branchAddress =
388				cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
389		}
390
391		d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
392		d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
393	}
394
395        d->buf_ind = 0;
396        d->buf_offset = 0;
397
398	if (d->type == DMA_CTX_ISO) {
399		/* Clear contextControl */
400		reg_write(ohci, d->ctrlClear, 0xffffffff);
401
402		/* Set bufferFill, isochHeader, multichannel for IR context */
403		reg_write(ohci, d->ctrlSet, 0xd0000000);
404
405		/* Set the context match register to match on all tags */
406		reg_write(ohci, d->ctxtMatch, 0xf0000000);
407
408		/* Clear the multi channel mask high and low registers */
409		reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
410		reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
411
412		/* Set up isoRecvIntMask to generate interrupts */
413		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
414	}
415
416	/* Tell the controller where the first AR program is */
417	reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
418
419	/* Run context */
420	reg_write(ohci, d->ctrlSet, 0x00008000);
421
422	DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
423}
424
425/* Initialize the dma transmit context */
426static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
427{
428	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
429
430	/* Stop the context */
431	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
432
433        d->prg_ind = 0;
434	d->sent_ind = 0;
435	d->free_prgs = d->num_desc;
436        d->branchAddrPtr = NULL;
437	INIT_LIST_HEAD(&d->fifo_list);
438	INIT_LIST_HEAD(&d->pending_list);
439
440	if (d->type == DMA_CTX_ISO) {
441		/* enable interrupts */
442		reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
443	}
444
445	DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
446}
447
448/* Count the number of available iso contexts */
449static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
450{
451	int i,ctx=0;
452	u32 tmp;
453
454	reg_write(ohci, reg, 0xffffffff);
455	tmp = reg_read(ohci, reg);
456
457	DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
458
459	/* Count the number of contexts */
460	for (i=0; i<32; i++) {
461	    	if (tmp & 1) ctx++;
462		tmp >>= 1;
463	}
464	return ctx;
465}
466
467/* Global initialization */
468static void ohci_initialize(struct ti_ohci *ohci)
469{
470	quadlet_t buf;
471	int num_ports, i;
472
473	spin_lock_init(&ohci->phy_reg_lock);
474
475	/* Put some defaults to these undefined bus options */
476	buf = reg_read(ohci, OHCI1394_BusOptions);
477	buf |=  0x60000000; /* Enable CMC and ISC */
478	if (hpsb_disable_irm)
479		buf &= ~0x80000000;
480	else
481		buf |=  0x80000000; /* Enable IRMC */
482	buf &= ~0x00ff0000;
483	buf &= ~0x18000000; /* Disable PMC and BMC */
484	reg_write(ohci, OHCI1394_BusOptions, buf);
485
486	/* Set the bus number */
487	reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
488
489	/* Enable posted writes */
490	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
491
492	/* Clear link control register */
493	reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
494
495	/* Enable cycle timer and cycle master and set the IRM
496	 * contender bit in our self ID packets if appropriate. */
497	reg_write(ohci, OHCI1394_LinkControlSet,
498		  OHCI1394_LinkControl_CycleTimerEnable |
499		  OHCI1394_LinkControl_CycleMaster);
500	i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
501	if (hpsb_disable_irm)
502		i &= ~PHY_04_CONTENDER;
503	else
504		i |= PHY_04_CONTENDER;
505	set_phy_reg(ohci, 4, i);
506
507	/* Set up self-id dma buffer */
508	reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
509
510	/* enable self-id */
511	reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
512
513	/* Set the Config ROM mapping register */
514	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
515
516	/* Now get our max packet size */
517	ohci->max_packet_size =
518		1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
519
520	/* Clear the interrupt mask */
521	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
522	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
523
524	/* Clear the interrupt mask */
525	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
526	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
527
528	/* Initialize AR dma */
529	initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
530	initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
531
532	/* Initialize AT dma */
533	initialize_dma_trm_ctx(&ohci->at_req_context);
534	initialize_dma_trm_ctx(&ohci->at_resp_context);
535
536	/* Initialize IR Legacy DMA channel mask */
537	ohci->ir_legacy_channels = 0;
538
539	/* Accept AR requests from all nodes */
540	reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
541
542	/* Set the address range of the physical response unit.
543	 * Most controllers do not implement it as a writable register though.
544	 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
545	 * register content.
546	 * To actually enable physical responses is the job of our interrupt
547	 * handler which programs the physical request filter. */
548	reg_write(ohci, OHCI1394_PhyUpperBound,
549		  OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
550
551	DBGMSG("physUpperBoundOffset=%08x",
552	       reg_read(ohci, OHCI1394_PhyUpperBound));
553
554	/* Specify AT retries */
555	reg_write(ohci, OHCI1394_ATRetries,
556		  OHCI1394_MAX_AT_REQ_RETRIES |
557		  (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
558		  (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
559
560	/* We don't want hardware swapping */
561	reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
562
563	/* Enable interrupts */
564	reg_write(ohci, OHCI1394_IntMaskSet,
565		  OHCI1394_unrecoverableError |
566		  OHCI1394_masterIntEnable |
567		  OHCI1394_busReset |
568		  OHCI1394_selfIDComplete |
569		  OHCI1394_RSPkt |
570		  OHCI1394_RQPkt |
571		  OHCI1394_respTxComplete |
572		  OHCI1394_reqTxComplete |
573		  OHCI1394_isochRx |
574		  OHCI1394_isochTx |
575		  OHCI1394_postedWriteErr |
576		  OHCI1394_cycleTooLong |
577		  OHCI1394_cycleInconsistent);
578
579	/* Enable link */
580	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
581
582	buf = reg_read(ohci, OHCI1394_Version);
583	PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d]  "
584	      "MMIO=[%llx-%llx]  Max Packet=[%d]  IR/IT contexts=[%d/%d]",
585	      ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
586	      ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
587	      (unsigned long long)pci_resource_start(ohci->dev, 0),
588	      (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
589	      ohci->max_packet_size,
590	      ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
591
592	/* Check all of our ports to make sure that if anything is
593	 * connected, we enable that port. */
594	num_ports = get_phy_reg(ohci, 2) & 0xf;
595	for (i = 0; i < num_ports; i++) {
596		unsigned int status;
597
598		set_phy_reg(ohci, 7, i);
599		status = get_phy_reg(ohci, 8);
600
601		if (status & 0x20)
602			set_phy_reg(ohci, 8, status & ~1);
603	}
604
605        /* Serial EEPROM Sanity check. */
606        if ((ohci->max_packet_size < 512) ||
607	    (ohci->max_packet_size > 4096)) {
608		/* Serial EEPROM contents are suspect, set a sane max packet
609		 * size and print the raw contents for bug reports if verbose
610		 * debug is enabled. */
611#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
612		int i;
613#endif
614
615		PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
616                      "attempting to set max_packet_size to 512 bytes");
617		reg_write(ohci, OHCI1394_BusOptions,
618			  (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
619		ohci->max_packet_size = 512;
620#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
621		PRINT(KERN_DEBUG, "    EEPROM Present: %d",
622		      (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
623		reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
624
625		for (i = 0;
626		     ((i < 1000) &&
627		      (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
628			udelay(10);
629
630		for (i = 0; i < 0x20; i++) {
631			reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
632			PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
633			      (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
634		}
635#endif
636	}
637}
638
639static void insert_packet(struct ti_ohci *ohci,
640			  struct dma_trm_ctx *d, struct hpsb_packet *packet)
641{
642	u32 cycleTimer;
643	int idx = d->prg_ind;
644
645	DBGMSG("Inserting packet for node " NODE_BUS_FMT
646	       ", tlabel=%d, tcode=0x%x, speed=%d",
647	       NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
648	       packet->tcode, packet->speed_code);
649
650	d->prg_cpu[idx]->begin.address = 0;
651	d->prg_cpu[idx]->begin.branchAddress = 0;
652
653	if (d->type == DMA_CTX_ASYNC_RESP) {
654		/*
655		 * For response packets, we need to put a timeout value in
656		 * the 16 lower bits of the status... let's try 1 sec timeout
657		 */
658		cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
659		d->prg_cpu[idx]->begin.status = cpu_to_le32(
660			(((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
661			((cycleTimer&0x01fff000)>>12));
662
663		DBGMSG("cycleTimer: %08x timeStamp: %08x",
664		       cycleTimer, d->prg_cpu[idx]->begin.status);
665	} else
666		d->prg_cpu[idx]->begin.status = 0;
667
668        if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
669
670                if (packet->type == hpsb_raw) {
671			d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
672                        d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
673                        d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
674                } else {
675                        d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
676                                (packet->header[0] & 0xFFFF);
677
678			if (packet->tcode == TCODE_ISO_DATA) {
679				/* Sending an async stream packet */
680				d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
681			} else {
682				/* Sending a normal async request or response */
683				d->prg_cpu[idx]->data[1] =
684					(packet->header[1] & 0xFFFF) |
685					(packet->header[0] & 0xFFFF0000);
686				d->prg_cpu[idx]->data[2] = packet->header[2];
687				d->prg_cpu[idx]->data[3] = packet->header[3];
688			}
689			header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
690                }
691
692                if (packet->data_size) { /* block transmit */
693			if (packet->tcode == TCODE_STREAM_DATA){
694				d->prg_cpu[idx]->begin.control =
695					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
696						    DMA_CTL_IMMEDIATE | 0x8);
697			} else {
698				d->prg_cpu[idx]->begin.control =
699					cpu_to_le32(DMA_CTL_OUTPUT_MORE |
700						    DMA_CTL_IMMEDIATE | 0x10);
701			}
702                        d->prg_cpu[idx]->end.control =
703                                cpu_to_le32(DMA_CTL_OUTPUT_LAST |
704					    DMA_CTL_IRQ |
705					    DMA_CTL_BRANCH |
706					    packet->data_size);
707                        d->prg_cpu[idx]->end.address = cpu_to_le32(
708                                pci_map_single(ohci->dev, packet->data,
709                                               packet->data_size,
710                                               PCI_DMA_TODEVICE));
711			OHCI_DMA_ALLOC("single, block transmit packet");
712
713                        d->prg_cpu[idx]->end.branchAddress = 0;
714                        d->prg_cpu[idx]->end.status = 0;
715                        if (d->branchAddrPtr)
716                                *(d->branchAddrPtr) =
717					cpu_to_le32(d->prg_bus[idx] | 0x3);
718                        d->branchAddrPtr =
719                                &(d->prg_cpu[idx]->end.branchAddress);
720                } else { /* quadlet transmit */
721                        if (packet->type == hpsb_raw)
722                                d->prg_cpu[idx]->begin.control =
723					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
724						    DMA_CTL_IMMEDIATE |
725						    DMA_CTL_IRQ |
726						    DMA_CTL_BRANCH |
727						    (packet->header_size + 4));
728                        else
729                                d->prg_cpu[idx]->begin.control =
730					cpu_to_le32(DMA_CTL_OUTPUT_LAST |
731						    DMA_CTL_IMMEDIATE |
732						    DMA_CTL_IRQ |
733						    DMA_CTL_BRANCH |
734						    packet->header_size);
735
736                        if (d->branchAddrPtr)
737                                *(d->branchAddrPtr) =
738					cpu_to_le32(d->prg_bus[idx] | 0x2);
739                        d->branchAddrPtr =
740                                &(d->prg_cpu[idx]->begin.branchAddress);
741                }
742
743        } else { /* iso packet */
744                d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
745                        (packet->header[0] & 0xFFFF);
746                d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
747		header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
748
749                d->prg_cpu[idx]->begin.control =
750			cpu_to_le32(DMA_CTL_OUTPUT_MORE |
751				    DMA_CTL_IMMEDIATE | 0x8);
752                d->prg_cpu[idx]->end.control =
753			cpu_to_le32(DMA_CTL_OUTPUT_LAST |
754				    DMA_CTL_UPDATE |
755				    DMA_CTL_IRQ |
756				    DMA_CTL_BRANCH |
757				    packet->data_size);
758                d->prg_cpu[idx]->end.address = cpu_to_le32(
759				pci_map_single(ohci->dev, packet->data,
760				packet->data_size, PCI_DMA_TODEVICE));
761		OHCI_DMA_ALLOC("single, iso transmit packet");
762
763                d->prg_cpu[idx]->end.branchAddress = 0;
764                d->prg_cpu[idx]->end.status = 0;
765                DBGMSG("Iso xmit context info: header[%08x %08x]\n"
766                       "                       begin=%08x %08x %08x %08x\n"
767                       "                             %08x %08x %08x %08x\n"
768                       "                       end  =%08x %08x %08x %08x",
769                       d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
770                       d->prg_cpu[idx]->begin.control,
771                       d->prg_cpu[idx]->begin.address,
772                       d->prg_cpu[idx]->begin.branchAddress,
773                       d->prg_cpu[idx]->begin.status,
774                       d->prg_cpu[idx]->data[0],
775                       d->prg_cpu[idx]->data[1],
776                       d->prg_cpu[idx]->data[2],
777                       d->prg_cpu[idx]->data[3],
778                       d->prg_cpu[idx]->end.control,
779                       d->prg_cpu[idx]->end.address,
780                       d->prg_cpu[idx]->end.branchAddress,
781                       d->prg_cpu[idx]->end.status);
782                if (d->branchAddrPtr)
783  		        *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
784                d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
785        }
786	d->free_prgs--;
787
788	/* queue the packet in the appropriate context queue */
789	list_add_tail(&packet->driver_list, &d->fifo_list);
790	d->prg_ind = (d->prg_ind + 1) % d->num_desc;
791}
792
793/*
794 * This function fills the FIFO with the (eventual) pending packets
795 * and runs or wakes up the DMA prg if necessary.
796 *
797 * The function MUST be called with the d->lock held.
798 */
799static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
800{
801	struct hpsb_packet *packet, *ptmp;
802	int idx = d->prg_ind;
803	int z = 0;
804
805	/* insert the packets into the dma fifo */
806	list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
807		if (!d->free_prgs)
808			break;
809
810		/* For the first packet only */
811		if (!z)
812			z = (packet->data_size) ? 3 : 2;
813
814		/* Insert the packet */
815		list_del_init(&packet->driver_list);
816		insert_packet(ohci, d, packet);
817	}
818
819	/* Nothing must have been done, either no free_prgs or no packets */
820	if (z == 0)
821		return;
822
823	/* Is the context running ? (should be unless it is
824	   the first packet to be sent in this context) */
825	if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
826		u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
827
828		DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
829		reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
830
831		/* Check that the node id is valid, and not 63 */
832		if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
833			PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
834		else
835			reg_write(ohci, d->ctrlSet, 0x8000);
836	} else {
837		/* Wake up the dma context if necessary */
838		if (!(reg_read(ohci, d->ctrlSet) & 0x400))
839			DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
840
841		/* do this always, to avoid race condition */
842		reg_write(ohci, d->ctrlSet, 0x1000);
843	}
844
845	return;
846}
847
848/* Transmission of an async or iso packet */
849static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
850{
851	struct ti_ohci *ohci = host->hostdata;
852	struct dma_trm_ctx *d;
853	unsigned long flags;
854
855	if (packet->data_size > ohci->max_packet_size) {
856		PRINT(KERN_ERR,
857		      "Transmit packet size %Zd is too big",
858		      packet->data_size);
859		return -EOVERFLOW;
860	}
861
862	/* Decide whether we have an iso, a request, or a response packet */
863	if (packet->type == hpsb_raw)
864		d = &ohci->at_req_context;
865	else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
866		/* The legacy IT DMA context is initialized on first
867		 * use.  However, the alloc cannot be run from
868		 * interrupt context, so we bail out if that is the
869		 * case. I don't see anyone sending ISO packets from
870		 * interrupt context anyway... */
871
872		if (ohci->it_legacy_context.ohci == NULL) {
873			if (in_interrupt()) {
874				PRINT(KERN_ERR,
875				      "legacy IT context cannot be initialized during interrupt");
876				return -EINVAL;
877			}
878
879			if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
880					      DMA_CTX_ISO, 0, IT_NUM_DESC,
881					      OHCI1394_IsoXmitContextBase) < 0) {
882				PRINT(KERN_ERR,
883				      "error initializing legacy IT context");
884				return -ENOMEM;
885			}
886
887			initialize_dma_trm_ctx(&ohci->it_legacy_context);
888		}
889
890		d = &ohci->it_legacy_context;
891	} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
892		d = &ohci->at_resp_context;
893	else
894		d = &ohci->at_req_context;
895
896	spin_lock_irqsave(&d->lock,flags);
897
898	list_add_tail(&packet->driver_list, &d->pending_list);
899
900	dma_trm_flush(ohci, d);
901
902	spin_unlock_irqrestore(&d->lock,flags);
903
904	return 0;
905}
906
907static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
908{
909	struct ti_ohci *ohci = host->hostdata;
910	int retval = 0;
911	unsigned long flags;
912	int phy_reg;
913
914	switch (cmd) {
915	case RESET_BUS:
916		switch (arg) {
917		case SHORT_RESET:
918			phy_reg = get_phy_reg(ohci, 5);
919			phy_reg |= 0x40;
920			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
921			break;
922		case LONG_RESET:
923			phy_reg = get_phy_reg(ohci, 1);
924			phy_reg |= 0x40;
925			set_phy_reg(ohci, 1, phy_reg); /* set IBR */
926			break;
927		case SHORT_RESET_NO_FORCE_ROOT:
928			phy_reg = get_phy_reg(ohci, 1);
929			if (phy_reg & 0x80) {
930				phy_reg &= ~0x80;
931				set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
932			}
933
934			phy_reg = get_phy_reg(ohci, 5);
935			phy_reg |= 0x40;
936			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
937			break;
938		case LONG_RESET_NO_FORCE_ROOT:
939			phy_reg = get_phy_reg(ohci, 1);
940			phy_reg &= ~0x80;
941			phy_reg |= 0x40;
942			set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
943			break;
944		case SHORT_RESET_FORCE_ROOT:
945			phy_reg = get_phy_reg(ohci, 1);
946			if (!(phy_reg & 0x80)) {
947				phy_reg |= 0x80;
948				set_phy_reg(ohci, 1, phy_reg); /* set RHB */
949			}
950
951			phy_reg = get_phy_reg(ohci, 5);
952			phy_reg |= 0x40;
953			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
954			break;
955		case LONG_RESET_FORCE_ROOT:
956			phy_reg = get_phy_reg(ohci, 1);
957			phy_reg |= 0xc0;
958			set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
959			break;
960		default:
961			retval = -1;
962		}
963		break;
964
965	case GET_CYCLE_COUNTER:
966		retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
967		break;
968
969	case SET_CYCLE_COUNTER:
970		reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
971		break;
972
973	case SET_BUS_ID:
974		PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
975		break;
976
977	case ACT_CYCLE_MASTER:
978		if (arg) {
979			/* check if we are root and other nodes are present */
980			u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
981			if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
982				/*
983				 * enable cycleTimer, cycleMaster
984				 */
985				DBGMSG("Cycle master enabled");
986				reg_write(ohci, OHCI1394_LinkControlSet,
987					  OHCI1394_LinkControl_CycleTimerEnable |
988					  OHCI1394_LinkControl_CycleMaster);
989			}
990		} else {
991			/* disable cycleTimer, cycleMaster, cycleSource */
992			reg_write(ohci, OHCI1394_LinkControlClear,
993				  OHCI1394_LinkControl_CycleTimerEnable |
994				  OHCI1394_LinkControl_CycleMaster |
995				  OHCI1394_LinkControl_CycleSource);
996		}
997		break;
998
999	case CANCEL_REQUESTS:
1000		DBGMSG("Cancel request received");
1001		dma_trm_reset(&ohci->at_req_context);
1002		dma_trm_reset(&ohci->at_resp_context);
1003		break;
1004
1005	case ISO_LISTEN_CHANNEL:
1006        {
1007		u64 mask;
1008		struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1009		int ir_legacy_active;
1010
1011		if (arg<0 || arg>63) {
1012			PRINT(KERN_ERR,
1013			      "%s: IS0 listen channel %d is out of range",
1014			      __FUNCTION__, arg);
1015			return -EFAULT;
1016		}
1017
1018		mask = (u64)0x1<<arg;
1019
1020                spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1021
1022		if (ohci->ISO_channel_usage & mask) {
1023			PRINT(KERN_ERR,
1024			      "%s: IS0 listen channel %d is already used",
1025			      __FUNCTION__, arg);
1026			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1027			return -EFAULT;
1028		}
1029
1030		ir_legacy_active = ohci->ir_legacy_channels;
1031
1032		ohci->ISO_channel_usage |= mask;
1033		ohci->ir_legacy_channels |= mask;
1034
1035                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1036
1037		if (!ir_legacy_active) {
1038			if (ohci1394_register_iso_tasklet(ohci,
1039					  &ohci->ir_legacy_tasklet) < 0) {
1040				PRINT(KERN_ERR, "No IR DMA context available");
1041				return -EBUSY;
1042			}
1043
1044			/* the IR context can be assigned to any DMA context
1045			 * by ohci1394_register_iso_tasklet */
1046			d->ctx = ohci->ir_legacy_tasklet.context;
1047			d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1048				32*d->ctx;
1049			d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1050				32*d->ctx;
1051			d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1052			d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1053
1054			initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1055
1056			if (printk_ratelimit())
1057				DBGMSG("IR legacy activated");
1058		}
1059
1060                spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1061
1062		if (arg>31)
1063			reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1064				  1<<(arg-32));
1065		else
1066			reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1067				  1<<arg);
1068
1069                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1070                DBGMSG("Listening enabled on channel %d", arg);
1071                break;
1072        }
1073	case ISO_UNLISTEN_CHANNEL:
1074        {
1075		u64 mask;
1076
1077		if (arg<0 || arg>63) {
1078			PRINT(KERN_ERR,
1079			      "%s: IS0 unlisten channel %d is out of range",
1080			      __FUNCTION__, arg);
1081			return -EFAULT;
1082		}
1083
1084		mask = (u64)0x1<<arg;
1085
1086                spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1087
1088		if (!(ohci->ISO_channel_usage & mask)) {
1089			PRINT(KERN_ERR,
1090			      "%s: IS0 unlisten channel %d is not used",
1091			      __FUNCTION__, arg);
1092			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1093			return -EFAULT;
1094		}
1095
1096		ohci->ISO_channel_usage &= ~mask;
1097		ohci->ir_legacy_channels &= ~mask;
1098
1099		if (arg>31)
1100			reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1101				  1<<(arg-32));
1102		else
1103			reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1104				  1<<arg);
1105
1106                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1107                DBGMSG("Listening disabled on channel %d", arg);
1108
1109		if (ohci->ir_legacy_channels == 0) {
1110			stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1111			DBGMSG("ISO legacy receive context stopped");
1112		}
1113
1114                break;
1115        }
1116	default:
1117		PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1118			cmd);
1119		break;
1120	}
1121	return retval;
1122}
1123
1124/***********************************
1125 * rawiso ISO reception            *
1126 ***********************************/
1127
1128/*
1129  We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1130  buffer is split into "blocks" (regions described by one DMA
1131  descriptor). Each block must be one page or less in size, and
1132  must not cross a page boundary.
1133
1134  There is one little wrinkle with buffer-fill mode: a packet that
1135  starts in the final block may wrap around into the first block. But
1136  the user API expects all packets to be contiguous. Our solution is
1137  to keep the very last page of the DMA buffer in reserve - if a
1138  packet spans the gap, we copy its tail into this page.
1139*/
1140
1141struct ohci_iso_recv {
1142	struct ti_ohci *ohci;
1143
1144	struct ohci1394_iso_tasklet task;
1145	int task_active;
1146
1147	enum { BUFFER_FILL_MODE = 0,
1148	       PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1149
1150	/* memory and PCI mapping for the DMA descriptors */
1151	struct dma_prog_region prog;
1152	struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1153
1154	/* how many DMA blocks fit in the buffer */
1155	unsigned int nblocks;
1156
1157	/* stride of DMA blocks */
1158	unsigned int buf_stride;
1159
1160	/* number of blocks to batch between interrupts */
1161	int block_irq_interval;
1162
1163	/* block that DMA will finish next */
1164	int block_dma;
1165
1166	/* (buffer-fill only) block that the reader will release next */
1167	int block_reader;
1168
1169	/* (buffer-fill only) bytes of buffer the reader has released,
1170	   less than one block */
1171	int released_bytes;
1172
1173	/* (buffer-fill only) buffer offset at which the next packet will appear */
1174	int dma_offset;
1175
1176	/* OHCI DMA context control registers */
1177	u32 ContextControlSet;
1178	u32 ContextControlClear;
1179	u32 CommandPtr;
1180	u32 ContextMatch;
1181};
1182
1183static void ohci_iso_recv_task(unsigned long data);
1184static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1185static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1186static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1187static void ohci_iso_recv_program(struct hpsb_iso *iso);
1188
1189static int ohci_iso_recv_init(struct hpsb_iso *iso)
1190{
1191	struct ti_ohci *ohci = iso->host->hostdata;
1192	struct ohci_iso_recv *recv;
1193	int ctx;
1194	int ret = -ENOMEM;
1195
1196	recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1197	if (!recv)
1198		return -ENOMEM;
1199
1200	iso->hostdata = recv;
1201	recv->ohci = ohci;
1202	recv->task_active = 0;
1203	dma_prog_region_init(&recv->prog);
1204	recv->block = NULL;
1205
1206	/* use buffer-fill mode, unless irq_interval is 1
1207	   (note: multichannel requires buffer-fill) */
1208
1209	if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1210	     iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1211		recv->dma_mode = PACKET_PER_BUFFER_MODE;
1212	} else {
1213		recv->dma_mode = BUFFER_FILL_MODE;
1214	}
1215
1216	/* set nblocks, buf_stride, block_irq_interval */
1217
1218	if (recv->dma_mode == BUFFER_FILL_MODE) {
1219		recv->buf_stride = PAGE_SIZE;
1220
1221		/* one block per page of data in the DMA buffer, minus the final guard page */
1222		recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1223		if (recv->nblocks < 3) {
1224			DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1225			goto err;
1226		}
1227
1228		/* iso->irq_interval is in packets - translate that to blocks */
1229		if (iso->irq_interval == 1)
1230			recv->block_irq_interval = 1;
1231		else
1232			recv->block_irq_interval = iso->irq_interval *
1233							((recv->nblocks+1)/iso->buf_packets);
1234		if (recv->block_irq_interval*4 > recv->nblocks)
1235			recv->block_irq_interval = recv->nblocks/4;
1236		if (recv->block_irq_interval < 1)
1237			recv->block_irq_interval = 1;
1238
1239	} else {
1240		int max_packet_size;
1241
1242		recv->nblocks = iso->buf_packets;
1243		recv->block_irq_interval = iso->irq_interval;
1244		if (recv->block_irq_interval * 4 > iso->buf_packets)
1245			recv->block_irq_interval = iso->buf_packets / 4;
1246		if (recv->block_irq_interval < 1)
1247		recv->block_irq_interval = 1;
1248
1249		/* choose a buffer stride */
1250		/* must be a power of 2, and <= PAGE_SIZE */
1251
1252		max_packet_size = iso->buf_size / iso->buf_packets;
1253
1254		for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1255		    recv->buf_stride *= 2);
1256
1257		if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1258		   recv->buf_stride > PAGE_SIZE) {
1259			/* this shouldn't happen, but anyway... */
1260			DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1261			goto err;
1262		}
1263	}
1264
1265	recv->block_reader = 0;
1266	recv->released_bytes = 0;
1267	recv->block_dma = 0;
1268	recv->dma_offset = 0;
1269
1270	/* size of DMA program = one descriptor per block */
1271	if (dma_prog_region_alloc(&recv->prog,
1272				 sizeof(struct dma_cmd) * recv->nblocks,
1273				 recv->ohci->dev))
1274		goto err;
1275
1276	recv->block = (struct dma_cmd*) recv->prog.kvirt;
1277
1278	ohci1394_init_iso_tasklet(&recv->task,
1279				  iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1280				                       OHCI_ISO_RECEIVE,
1281				  ohci_iso_recv_task, (unsigned long) iso);
1282
1283	if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1284		ret = -EBUSY;
1285		goto err;
1286	}
1287
1288	recv->task_active = 1;
1289
1290	/* recv context registers are spaced 32 bytes apart */
1291	ctx = recv->task.context;
1292	recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1293	recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1294	recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1295	recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1296
1297	if (iso->channel == -1) {
1298		/* clear multi-channel selection mask */
1299		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1300		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1301	}
1302
1303	/* write the DMA program */
1304	ohci_iso_recv_program(iso);
1305
1306	DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1307	       " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1308	       recv->dma_mode == BUFFER_FILL_MODE ?
1309	       "buffer-fill" : "packet-per-buffer",
1310	       iso->buf_size/PAGE_SIZE, iso->buf_size,
1311	       recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1312
1313	return 0;
1314
1315err:
1316	ohci_iso_recv_shutdown(iso);
1317	return ret;
1318}
1319
1320static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1321{
1322	struct ohci_iso_recv *recv = iso->hostdata;
1323
1324	/* disable interrupts */
1325	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1326
1327	/* halt DMA */
1328	ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1329}
1330
1331static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1332{
1333	struct ohci_iso_recv *recv = iso->hostdata;
1334
1335	if (recv->task_active) {
1336		ohci_iso_recv_stop(iso);
1337		ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1338		recv->task_active = 0;
1339	}
1340
1341	dma_prog_region_free(&recv->prog);
1342	kfree(recv);
1343	iso->hostdata = NULL;
1344}
1345
1346/* set up a "gapped" ring buffer DMA program */
1347static void ohci_iso_recv_program(struct hpsb_iso *iso)
1348{
1349	struct ohci_iso_recv *recv = iso->hostdata;
1350	int blk;
1351
1352	/* address of 'branch' field in previous DMA descriptor */
1353	u32 *prev_branch = NULL;
1354
1355	for (blk = 0; blk < recv->nblocks; blk++) {
1356		u32 control;
1357
1358		/* the DMA descriptor */
1359		struct dma_cmd *cmd = &recv->block[blk];
1360
1361		/* offset of the DMA descriptor relative to the DMA prog buffer */
1362		unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1363
1364		/* offset of this packet's data within the DMA buffer */
1365		unsigned long buf_offset = blk * recv->buf_stride;
1366
1367		if (recv->dma_mode == BUFFER_FILL_MODE) {
1368			control = 2 << 28; /* INPUT_MORE */
1369		} else {
1370			control = 3 << 28; /* INPUT_LAST */
1371		}
1372
1373		control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1374
1375		/* interrupt on last block, and at intervals */
1376		if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1377			control |= 3 << 20; /* want interrupt */
1378		}
1379
1380		control |= 3 << 18; /* enable branch to address */
1381		control |= recv->buf_stride;
1382
1383		cmd->control = cpu_to_le32(control);
1384		cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1385		cmd->branchAddress = 0; /* filled in on next loop */
1386		cmd->status = cpu_to_le32(recv->buf_stride);
1387
1388		/* link the previous descriptor to this one */
1389		if (prev_branch) {
1390			*prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1391		}
1392
1393		prev_branch = &cmd->branchAddress;
1394	}
1395
1396	/* the final descriptor's branch address and Z should be left at 0 */
1397}
1398
1399/* listen or unlisten to a specific channel (multi-channel mode only) */
1400static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1401{
1402	struct ohci_iso_recv *recv = iso->hostdata;
1403	int reg, i;
1404
1405	if (channel < 32) {
1406		reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1407		i = channel;
1408	} else {
1409		reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1410		i = channel - 32;
1411	}
1412
1413	reg_write(recv->ohci, reg, (1 << i));
1414
1415	/* issue a dummy read to force all PCI writes to be posted immediately */
1416	mb();
1417	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1418}
1419
1420static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1421{
1422	struct ohci_iso_recv *recv = iso->hostdata;
1423	int i;
1424
1425	for (i = 0; i < 64; i++) {
1426		if (mask & (1ULL << i)) {
1427			if (i < 32)
1428				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1429			else
1430				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1431		} else {
1432			if (i < 32)
1433				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1434			else
1435				reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1436		}
1437	}
1438
1439	/* issue a dummy read to force all PCI writes to be posted immediately */
1440	mb();
1441	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1442}
1443
1444static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1445{
1446	struct ohci_iso_recv *recv = iso->hostdata;
1447	struct ti_ohci *ohci = recv->ohci;
1448	u32 command, contextMatch;
1449
1450	reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1451	wmb();
1452
1453	/* always keep ISO headers */
1454	command = (1 << 30);
1455
1456	if (recv->dma_mode == BUFFER_FILL_MODE)
1457		command |= (1 << 31);
1458
1459	reg_write(recv->ohci, recv->ContextControlSet, command);
1460
1461	/* match on specified tags */
1462	contextMatch = tag_mask << 28;
1463
1464	if (iso->channel == -1) {
1465		/* enable multichannel reception */
1466		reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1467	} else {
1468		/* listen on channel */
1469		contextMatch |= iso->channel;
1470	}
1471
1472	if (cycle != -1) {
1473		u32 seconds;
1474
1475		/* enable cycleMatch */
1476		reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1477
1478		/* set starting cycle */
1479		cycle &= 0x1FFF;
1480
1481		/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1482		   just snarf them from the current time */
1483		seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1484
1485		/* advance one second to give some extra time for DMA to start */
1486		seconds += 1;
1487
1488		cycle |= (seconds & 3) << 13;
1489
1490		contextMatch |= cycle << 12;
1491	}
1492
1493	if (sync != -1) {
1494		/* set sync flag on first DMA descriptor */
1495		struct dma_cmd *cmd = &recv->block[recv->block_dma];
1496		cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1497
1498		/* match sync field */
1499		contextMatch |= (sync&0xf)<<8;
1500	}
1501
1502	reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1503
1504	/* address of first descriptor block */
1505	command = dma_prog_region_offset_to_bus(&recv->prog,
1506						recv->block_dma * sizeof(struct dma_cmd));
1507	command |= 1; /* Z=1 */
1508
1509	reg_write(recv->ohci, recv->CommandPtr, command);
1510
1511	/* enable interrupts */
1512	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1513
1514	wmb();
1515
1516	/* run */
1517	reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1518
1519	/* issue a dummy read of the cycle timer register to force
1520	   all PCI writes to be posted immediately */
1521	mb();
1522	reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1523
1524	/* check RUN */
1525	if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1526		PRINT(KERN_ERR,
1527		      "Error starting IR DMA (ContextControl 0x%08x)\n",
1528		      reg_read(recv->ohci, recv->ContextControlSet));
1529		return -1;
1530	}
1531
1532	return 0;
1533}
1534
1535static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1536{
1537	/* re-use the DMA descriptor for the block */
1538	/* by linking the previous descriptor to it */
1539
1540	int next_i = block;
1541	int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1542
1543	struct dma_cmd *next = &recv->block[next_i];
1544	struct dma_cmd *prev = &recv->block[prev_i];
1545
1546	/* ignore out-of-range requests */
1547	if ((block < 0) || (block > recv->nblocks))
1548		return;
1549
1550	/* 'next' becomes the new end of the DMA chain,
1551	   so disable branch and enable interrupt */
1552	next->branchAddress = 0;
1553	next->control |= cpu_to_le32(3 << 20);
1554	next->status = cpu_to_le32(recv->buf_stride);
1555
1556	/* link prev to next */
1557	prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1558									sizeof(struct dma_cmd) * next_i)
1559					  | 1); /* Z=1 */
1560
1561	/* disable interrupt on previous DMA descriptor, except at intervals */
1562	if ((prev_i % recv->block_irq_interval) == 0) {
1563		prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1564	} else {
1565		prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1566	}
1567	wmb();
1568
1569	/* wake up DMA in case it fell asleep */
1570	reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1571}
1572
1573static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1574					     struct hpsb_iso_packet_info *info)
1575{
1576	/* release the memory where the packet was */
1577	recv->released_bytes += info->total_len;
1578
1579	/* have we released enough memory for one block? */
1580	while (recv->released_bytes > recv->buf_stride) {
1581		ohci_iso_recv_release_block(recv, recv->block_reader);
1582		recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1583		recv->released_bytes -= recv->buf_stride;
1584	}
1585}
1586
1587static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1588{
1589	struct ohci_iso_recv *recv = iso->hostdata;
1590	if (recv->dma_mode == BUFFER_FILL_MODE) {
1591		ohci_iso_recv_bufferfill_release(recv, info);
1592	} else {
1593		ohci_iso_recv_release_block(recv, info - iso->infos);
1594	}
1595}
1596
1597/* parse all packets from blocks that have been fully received */
1598static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1599{
1600	int wake = 0;
1601	int runaway = 0;
1602	struct ti_ohci *ohci = recv->ohci;
1603
1604	while (1) {
1605		/* we expect the next parsable packet to begin at recv->dma_offset */
1606		/* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1607
1608		unsigned int offset;
1609		unsigned short len, cycle, total_len;
1610		unsigned char channel, tag, sy;
1611
1612		unsigned char *p = iso->data_buf.kvirt;
1613
1614		unsigned int this_block = recv->dma_offset/recv->buf_stride;
1615
1616		/* don't loop indefinitely */
1617		if (runaway++ > 100000) {
1618			atomic_inc(&iso->overflows);
1619			PRINT(KERN_ERR,
1620			      "IR DMA error - Runaway during buffer parsing!\n");
1621			break;
1622		}
1623
1624		/* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1625		if (this_block == recv->block_dma)
1626			break;
1627
1628		wake = 1;
1629
1630		/* parse data length, tag, channel, and sy */
1631
1632		/* note: we keep our own local copies of 'len' and 'offset'
1633		   so the user can't mess with them by poking in the mmap area */
1634
1635		len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1636
1637		if (len > 4096) {
1638			PRINT(KERN_ERR,
1639			      "IR DMA error - bogus 'len' value %u\n", len);
1640		}
1641
1642		channel = p[recv->dma_offset+1] & 0x3F;
1643		tag = p[recv->dma_offset+1] >> 6;
1644		sy = p[recv->dma_offset+0] & 0xF;
1645
1646		/* advance to data payload */
1647		recv->dma_offset += 4;
1648
1649		/* check for wrap-around */
1650		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1651			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1652		}
1653
1654		/* dma_offset now points to the first byte of the data payload */
1655		offset = recv->dma_offset;
1656
1657		/* advance to xferStatus/timeStamp */
1658		recv->dma_offset += len;
1659
1660		total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1661		/* payload is padded to 4 bytes */
1662		if (len % 4) {
1663			recv->dma_offset += 4 - (len%4);
1664			total_len += 4 - (len%4);
1665		}
1666
1667		/* check for wrap-around */
1668		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1669			/* uh oh, the packet data wraps from the last
1670                           to the first DMA block - make the packet
1671                           contiguous by copying its "tail" into the
1672                           guard page */
1673
1674			int guard_off = recv->buf_stride*recv->nblocks;
1675			int tail_len = len - (guard_off - offset);
1676
1677			if (tail_len > 0  && tail_len < recv->buf_stride) {
1678				memcpy(iso->data_buf.kvirt + guard_off,
1679				       iso->data_buf.kvirt,
1680				       tail_len);
1681			}
1682
1683			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1684		}
1685
1686		/* parse timestamp */
1687		cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1688		cycle &= 0x1FFF;
1689
1690		/* advance to next packet */
1691		recv->dma_offset += 4;
1692
1693		/* check for wrap-around */
1694		if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1695			recv->dma_offset -= recv->buf_stride*recv->nblocks;
1696		}
1697
1698		hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1699	}
1700
1701	if (wake)
1702		hpsb_iso_wake(iso);
1703}
1704
1705static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1706{
1707	int loop;
1708	struct ti_ohci *ohci = recv->ohci;
1709
1710	/* loop over all blocks */
1711	for (loop = 0; loop < recv->nblocks; loop++) {
1712
1713		/* check block_dma to see if it's done */
1714		struct dma_cmd *im = &recv->block[recv->block_dma];
1715
1716		/* check the DMA descriptor for new writes to xferStatus */
1717		u16 xferstatus = le32_to_cpu(im->status) >> 16;
1718
1719		/* rescount is the number of bytes *remaining to be written* in the block */
1720		u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1721
1722		unsigned char event = xferstatus & 0x1F;
1723
1724		if (!event) {
1725			/* nothing has happened to this block yet */
1726			break;
1727		}
1728
1729		if (event != 0x11) {
1730			atomic_inc(&iso->overflows);
1731			PRINT(KERN_ERR,
1732			      "IR DMA error - OHCI error code 0x%02x\n", event);
1733		}
1734
1735		if (rescount != 0) {
1736			/* the card is still writing to this block;
1737			   we can't touch it until it's done */
1738			break;
1739		}
1740
1741		/* OK, the block is finished... */
1742
1743		/* sync our view of the block */
1744		dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1745
1746		/* reset the DMA descriptor */
1747		im->status = recv->buf_stride;
1748
1749		/* advance block_dma */
1750		recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1751
1752		if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1753			atomic_inc(&iso->overflows);
1754			DBGMSG("ISO reception overflow - "
1755			       "ran out of DMA blocks");
1756		}
1757	}
1758
1759	/* parse any packets that have arrived */
1760	ohci_iso_recv_bufferfill_parse(iso, recv);
1761}
1762
1763static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1764{
1765	int count;
1766	int wake = 0;
1767	struct ti_ohci *ohci = recv->ohci;
1768
1769	/* loop over the entire buffer */
1770	for (count = 0; count < recv->nblocks; count++) {
1771		u32 packet_len = 0;
1772
1773		/* pointer to the DMA descriptor */
1774		struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1775
1776		/* check the DMA descriptor for new writes to xferStatus */
1777		u16 xferstatus = le32_to_cpu(il->status) >> 16;
1778		u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1779
1780		unsigned char event = xferstatus & 0x1F;
1781
1782		if (!event) {
1783			/* this packet hasn't come in yet; we are done for now */
1784			goto out;
1785		}
1786
1787		if (event == 0x11) {
1788			/* packet received successfully! */
1789
1790			/* rescount is the number of bytes *remaining* in the packet buffer,
1791			   after the packet was written */
1792			packet_len = recv->buf_stride - rescount;
1793
1794		} else if (event == 0x02) {
1795			PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1796		} else if (event) {
1797			PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1798		}
1799
1800		/* sync our view of the buffer */
1801		dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1802
1803		/* record the per-packet info */
1804		{
1805			/* iso header is 8 bytes ahead of the data payload */
1806			unsigned char *hdr;
1807
1808			unsigned int offset;
1809			unsigned short cycle;
1810			unsigned char channel, tag, sy;
1811
1812			offset = iso->pkt_dma * recv->buf_stride;
1813			hdr = iso->data_buf.kvirt + offset;
1814
1815			/* skip iso header */
1816			offset += 8;
1817			packet_len -= 8;
1818
1819			cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1820			channel = hdr[5] & 0x3F;
1821			tag = hdr[5] >> 6;
1822			sy = hdr[4] & 0xF;
1823
1824			hpsb_iso_packet_received(iso, offset, packet_len,
1825					recv->buf_stride, cycle, channel, tag, sy);
1826		}
1827
1828		/* reset the DMA descriptor */
1829		il->status = recv->buf_stride;
1830
1831		wake = 1;
1832		recv->block_dma = iso->pkt_dma;
1833	}
1834
1835out:
1836	if (wake)
1837		hpsb_iso_wake(iso);
1838}
1839
1840static void ohci_iso_recv_task(unsigned long data)
1841{
1842	struct hpsb_iso *iso = (struct hpsb_iso*) data;
1843	struct ohci_iso_recv *recv = iso->hostdata;
1844
1845	if (recv->dma_mode == BUFFER_FILL_MODE)
1846		ohci_iso_recv_bufferfill_task(iso, recv);
1847	else
1848		ohci_iso_recv_packetperbuf_task(iso, recv);
1849}
1850
1851/***********************************
1852 * rawiso ISO transmission         *
1853 ***********************************/
1854
1855struct ohci_iso_xmit {
1856	struct ti_ohci *ohci;
1857	struct dma_prog_region prog;
1858	struct ohci1394_iso_tasklet task;
1859	int task_active;
1860
1861	u32 ContextControlSet;
1862	u32 ContextControlClear;
1863	u32 CommandPtr;
1864};
1865
1866/* transmission DMA program:
1867   one OUTPUT_MORE_IMMEDIATE for the IT header
1868   one OUTPUT_LAST for the buffer data */
1869
1870struct iso_xmit_cmd {
1871	struct dma_cmd output_more_immediate;
1872	u8 iso_hdr[8];
1873	u32 unused[2];
1874	struct dma_cmd output_last;
1875};
1876
1877static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1878static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1879static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1880static void ohci_iso_xmit_task(unsigned long data);
1881
1882static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1883{
1884	struct ohci_iso_xmit *xmit;
1885	unsigned int prog_size;
1886	int ctx;
1887	int ret = -ENOMEM;
1888
1889	xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1890	if (!xmit)
1891		return -ENOMEM;
1892
1893	iso->hostdata = xmit;
1894	xmit->ohci = iso->host->hostdata;
1895	xmit->task_active = 0;
1896
1897	dma_prog_region_init(&xmit->prog);
1898
1899	prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1900
1901	if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1902		goto err;
1903
1904	ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1905				  ohci_iso_xmit_task, (unsigned long) iso);
1906
1907	if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1908		ret = -EBUSY;
1909		goto err;
1910	}
1911
1912	xmit->task_active = 1;
1913
1914	/* xmit context registers are spaced 16 bytes apart */
1915	ctx = xmit->task.context;
1916	xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1917	xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1918	xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1919
1920	return 0;
1921
1922err:
1923	ohci_iso_xmit_shutdown(iso);
1924	return ret;
1925}
1926
1927static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1928{
1929	struct ohci_iso_xmit *xmit = iso->hostdata;
1930	struct ti_ohci *ohci = xmit->ohci;
1931
1932	/* disable interrupts */
1933	reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1934
1935	/* halt DMA */
1936	if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1937		PRINT(KERN_ERR,
1938		      "you probably exceeded the OHCI card's bandwidth limit - "
1939		      "reload the module and reduce xmit bandwidth");
1940	}
1941}
1942
1943static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1944{
1945	struct ohci_iso_xmit *xmit = iso->hostdata;
1946
1947	if (xmit->task_active) {
1948		ohci_iso_xmit_stop(iso);
1949		ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1950		xmit->task_active = 0;
1951	}
1952
1953	dma_prog_region_free(&xmit->prog);
1954	kfree(xmit);
1955	iso->hostdata = NULL;
1956}
1957
1958static void ohci_iso_xmit_task(unsigned long data)
1959{
1960	struct hpsb_iso *iso = (struct hpsb_iso*) data;
1961	struct ohci_iso_xmit *xmit = iso->hostdata;
1962	struct ti_ohci *ohci = xmit->ohci;
1963	int wake = 0;
1964	int count;
1965
1966	/* check the whole buffer if necessary, starting at pkt_dma */
1967	for (count = 0; count < iso->buf_packets; count++) {
1968		int cycle;
1969
1970		/* DMA descriptor */
1971		struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1972
1973		/* check for new writes to xferStatus */
1974		u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1975		u8  event = xferstatus & 0x1F;
1976
1977		if (!event) {
1978			/* packet hasn't been sent yet; we are done for now */
1979			break;
1980		}
1981
1982		if (event != 0x11)
1983			PRINT(KERN_ERR,
1984			      "IT DMA error - OHCI error code 0x%02x\n", event);
1985
1986		/* at least one packet went out, so wake up the writer */
1987		wake = 1;
1988
1989		/* parse cycle */
1990		cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1991
1992		/* tell the subsystem the packet has gone out */
1993		hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1994
1995		/* reset the DMA descriptor for next time */
1996		cmd->output_last.status = 0;
1997	}
1998
1999	if (wake)
2000		hpsb_iso_wake(iso);
2001}
2002
2003static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2004{
2005	struct ohci_iso_xmit *xmit = iso->hostdata;
2006	struct ti_ohci *ohci = xmit->ohci;
2007
2008	int next_i, prev_i;
2009	struct iso_xmit_cmd *next, *prev;
2010
2011	unsigned int offset;
2012	unsigned short len;
2013	unsigned char tag, sy;
2014
2015	/* check that the packet doesn't cross a page boundary
2016	   (we could allow this if we added OUTPUT_MORE descriptor support) */
2017	if (cross_bound(info->offset, info->len)) {
2018		PRINT(KERN_ERR,
2019		      "rawiso xmit: packet %u crosses a page boundary",
2020		      iso->first_packet);
2021		return -EINVAL;
2022	}
2023
2024	offset = info->offset;
2025	len = info->len;
2026	tag = info->tag;
2027	sy = info->sy;
2028
2029	/* sync up the card's view of the buffer */
2030	dma_region_sync_for_device(&iso->data_buf, offset, len);
2031
2032	/* append first_packet to the DMA chain */
2033	/* by linking the previous descriptor to it */
2034	/* (next will become the new end of the DMA chain) */
2035
2036	next_i = iso->first_packet;
2037	prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2038
2039	next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2040	prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2041
2042	/* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2043	memset(next, 0, sizeof(struct iso_xmit_cmd));
2044	next->output_more_immediate.control = cpu_to_le32(0x02000008);
2045
2046	/* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2047
2048	/* tcode = 0xA, and sy */
2049	next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2050
2051	/* tag and channel number */
2052	next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2053
2054	/* transmission speed */
2055	next->iso_hdr[2] = iso->speed & 0x7;
2056
2057	/* payload size */
2058	next->iso_hdr[6] = len & 0xFF;
2059	next->iso_hdr[7] = len >> 8;
2060
2061	/* set up the OUTPUT_LAST */
2062	next->output_last.control = cpu_to_le32(1 << 28);
2063	next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2064	next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2065	next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2066	next->output_last.control |= cpu_to_le32(len);
2067
2068	/* payload bus address */
2069	next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2070
2071	/* leave branchAddress at zero for now */
2072
2073	/* re-write the previous DMA descriptor to chain to this one */
2074
2075	/* set prev branch address to point to next (Z=3) */
2076	prev->output_last.branchAddress = cpu_to_le32(
2077		dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2078
2079	/* disable interrupt, unless required by the IRQ interval */
2080	if (prev_i % iso->irq_interval) {
2081		prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2082	} else {
2083		prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2084	}
2085
2086	wmb();
2087
2088	/* wake DMA in case it is sleeping */
2089	reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2090
2091	/* issue a dummy read of the cycle timer to force all PCI
2092	   writes to be posted immediately */
2093	mb();
2094	reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2095
2096	return 0;
2097}
2098
2099static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2100{
2101	struct ohci_iso_xmit *xmit = iso->hostdata;
2102	struct ti_ohci *ohci = xmit->ohci;
2103
2104	/* clear out the control register */
2105	reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2106	wmb();
2107
2108	/* address and length of first descriptor block (Z=3) */
2109	reg_write(xmit->ohci, xmit->CommandPtr,
2110		  dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2111
2112	/* cycle match */
2113	if (cycle != -1) {
2114		u32 start = cycle & 0x1FFF;
2115
2116		/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2117		   just snarf them from the current time */
2118		u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2119
2120		/* advance one second to give some extra time for DMA to start */
2121		seconds += 1;
2122
2123		start |= (seconds & 3) << 13;
2124
2125		reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2126	}
2127
2128	/* enable interrupts */
2129	reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2130
2131	/* run */
2132	reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2133	mb();
2134
2135	/* wait 100 usec to give the card time to go active */
2136	udelay(100);
2137
2138	/* check the RUN bit */
2139	if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2140		PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2141		      reg_read(xmit->ohci, xmit->ContextControlSet));
2142		return -1;
2143	}
2144
2145	return 0;
2146}
2147
2148static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2149{
2150
2151	switch(cmd) {
2152	case XMIT_INIT:
2153		return ohci_iso_xmit_init(iso);
2154	case XMIT_START:
2155		return ohci_iso_xmit_start(iso, arg);
2156	case XMIT_STOP:
2157		ohci_iso_xmit_stop(iso);
2158		return 0;
2159	case XMIT_QUEUE:
2160		return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2161	case XMIT_SHUTDOWN:
2162		ohci_iso_xmit_shutdown(iso);
2163		return 0;
2164
2165	case RECV_INIT:
2166		return ohci_iso_recv_init(iso);
2167	case RECV_START: {
2168		int *args = (int*) arg;
2169		return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2170	}
2171	case RECV_STOP:
2172		ohci_iso_recv_stop(iso);
2173		return 0;
2174	case RECV_RELEASE:
2175		ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2176		return 0;
2177	case RECV_FLUSH:
2178		ohci_iso_recv_task((unsigned long) iso);
2179		return 0;
2180	case RECV_SHUTDOWN:
2181		ohci_iso_recv_shutdown(iso);
2182		return 0;
2183	case RECV_LISTEN_CHANNEL:
2184		ohci_iso_recv_change_channel(iso, arg, 1);
2185		return 0;
2186	case RECV_UNLISTEN_CHANNEL:
2187		ohci_iso_recv_change_channel(iso, arg, 0);
2188		return 0;
2189	case RECV_SET_CHANNEL_MASK:
2190		ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2191		return 0;
2192
2193	default:
2194		PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2195			cmd);
2196		break;
2197	}
2198	return -EINVAL;
2199}
2200
2201/***************************************
2202 * IEEE-1394 functionality section END *
2203 ***************************************/
2204
2205
2206/********************************************************
2207 * Global stuff (interrupt handler, init/shutdown code) *
2208 ********************************************************/
2209
2210static void dma_trm_reset(struct dma_trm_ctx *d)
2211{
2212	unsigned long flags;
2213	LIST_HEAD(packet_list);
2214	struct ti_ohci *ohci = d->ohci;
2215	struct hpsb_packet *packet, *ptmp;
2216
2217	ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2218
2219	/* Lock the context, reset it and release it. Move the packets
2220	 * that were pending in the context to packet_list and free
2221	 * them after releasing the lock. */
2222
2223	spin_lock_irqsave(&d->lock, flags);
2224
2225	list_splice(&d->fifo_list, &packet_list);
2226	list_splice(&d->pending_list, &packet_list);
2227	INIT_LIST_HEAD(&d->fifo_list);
2228	INIT_LIST_HEAD(&d->pending_list);
2229
2230	d->branchAddrPtr = NULL;
2231	d->sent_ind = d->prg_ind;
2232	d->free_prgs = d->num_desc;
2233
2234	spin_unlock_irqrestore(&d->lock, flags);
2235
2236	if (list_empty(&packet_list))
2237		return;
2238
2239	PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2240
2241	/* Now process subsystem callbacks for the packets from this
2242	 * context. */
2243	list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2244		list_del_init(&packet->driver_list);
2245		hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2246	}
2247}
2248
2249static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2250				       quadlet_t rx_event,
2251				       quadlet_t tx_event)
2252{
2253	struct ohci1394_iso_tasklet *t;
2254	unsigned long mask;
2255	unsigned long flags;
2256
2257	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2258
2259	list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2260		mask = 1 << t->context;
2261
2262		if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2263			tasklet_schedule(&t->tasklet);
2264		else if (rx_event & mask)
2265			tasklet_schedule(&t->tasklet);
2266	}
2267
2268	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2269}
2270
2271static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2272{
2273	quadlet_t event, node_id;
2274	struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2275	struct hpsb_host *host = ohci->host;
2276	int phyid = -1, isroot = 0;
2277	unsigned long flags;
2278
2279	/* Read and clear the interrupt event register.  Don't clear
2280	 * the busReset event, though. This is done when we get the
2281	 * selfIDComplete interrupt. */
2282	spin_lock_irqsave(&ohci->event_lock, flags);
2283	event = reg_read(ohci, OHCI1394_IntEventClear);
2284	reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2285	spin_unlock_irqrestore(&ohci->event_lock, flags);
2286
2287	if (!event)
2288		return IRQ_NONE;
2289
2290	/* If event is ~(u32)0 cardbus card was ejected.  In this case
2291	 * we just return, and clean up in the ohci1394_pci_remove
2292	 * function. */
2293	if (event == ~(u32) 0) {
2294		DBGMSG("Device removed.");
2295		return IRQ_NONE;
2296	}
2297
2298	DBGMSG("IntEvent: %08x", event);
2299
2300	if (event & OHCI1394_unrecoverableError) {
2301		int ctx;
2302		PRINT(KERN_ERR, "Unrecoverable error!");
2303
2304		if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2305			PRINT(KERN_ERR, "Async Req Tx Context died: "
2306				"ctrl[%08x] cmdptr[%08x]",
2307				reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2308				reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2309
2310		if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2311			PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2312				"ctrl[%08x] cmdptr[%08x]",
2313				reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2314				reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2315
2316		if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2317			PRINT(KERN_ERR, "Async Req Rcv Context died: "
2318				"ctrl[%08x] cmdptr[%08x]",
2319				reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2320				reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2321
2322		if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2323			PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2324				"ctrl[%08x] cmdptr[%08x]",
2325				reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2326				reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2327
2328		for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2329			if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2330				PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2331					"ctrl[%08x] cmdptr[%08x]", ctx,
2332					reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2333					reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2334		}
2335
2336		for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2337			if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2338				PRINT(KERN_ERR, "Iso Recv %d Context died: "
2339					"ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2340					reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2341					reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2342					reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2343		}
2344
2345		event &= ~OHCI1394_unrecoverableError;
2346	}
2347	if (event & OHCI1394_postedWriteErr) {
2348		PRINT(KERN_ERR, "physical posted write error");
2349		/* no recovery strategy yet, had to involve protocol drivers */
2350		event &= ~OHCI1394_postedWriteErr;
2351	}
2352	if (event & OHCI1394_cycleTooLong) {
2353		if(printk_ratelimit())
2354			PRINT(KERN_WARNING, "isochronous cycle too long");
2355		else
2356			DBGMSG("OHCI1394_cycleTooLong");
2357		reg_write(ohci, OHCI1394_LinkControlSet,
2358			  OHCI1394_LinkControl_CycleMaster);
2359		event &= ~OHCI1394_cycleTooLong;
2360	}
2361	if (event & OHCI1394_cycleInconsistent) {
2362		/* We subscribe to the cycleInconsistent event only to
2363		 * clear the corresponding event bit... otherwise,
2364		 * isochronous cycleMatch DMA won't work. */
2365		DBGMSG("OHCI1394_cycleInconsistent");
2366		event &= ~OHCI1394_cycleInconsistent;
2367	}
2368	if (event & OHCI1394_busReset) {
2369		/* The busReset event bit can't be cleared during the
2370		 * selfID phase, so we disable busReset interrupts, to
2371		 * avoid burying the cpu in interrupt requests. */
2372		spin_lock_irqsave(&ohci->event_lock, flags);
2373		reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2374
2375		if (ohci->check_busreset) {
2376			int loop_count = 0;
2377
2378			udelay(10);
2379
2380			while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2381				reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2382
2383				spin_unlock_irqrestore(&ohci->event_lock, flags);
2384				udelay(10);
2385				spin_lock_irqsave(&ohci->event_lock, flags);
2386
2387				/* The loop counter check is to prevent the driver
2388				 * from remaining in this state forever. For the
2389				 * initial bus reset, the loop continues for ever
2390				 * and the system hangs, until some device is plugged-in
2391				 * or out manually into a port! The forced reset seems
2392				 * to solve this problem. This mainly effects nForce2. */
2393				if (loop_count > 10000) {
2394					ohci_devctl(host, RESET_BUS, LONG_RESET);
2395					DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2396					loop_count = 0;
2397				}
2398
2399				loop_count++;
2400			}
2401		}
2402		spin_unlock_irqrestore(&ohci->event_lock, flags);
2403		if (!host->in_bus_reset) {
2404			DBGMSG("irq_handler: Bus reset requested");
2405
2406			/* Subsystem call */
2407			hpsb_bus_reset(ohci->host);
2408		}
2409		event &= ~OHCI1394_busReset;
2410	}
2411	if (event & OHCI1394_reqTxComplete) {
2412		struct dma_trm_ctx *d = &ohci->at_req_context;
2413		DBGMSG("Got reqTxComplete interrupt "
2414		       "status=0x%08X", reg_read(ohci, d->ctrlSet));
2415		if (reg_read(ohci, d->ctrlSet) & 0x800)
2416			ohci1394_stop_context(ohci, d->ctrlClear,
2417					      "reqTxComplete");
2418		else
2419			dma_trm_tasklet((unsigned long)d);
2420			//tasklet_schedule(&d->task);
2421		event &= ~OHCI1394_reqTxComplete;
2422	}
2423	if (event & OHCI1394_respTxComplete) {
2424		struct dma_trm_ctx *d = &ohci->at_resp_context;
2425		DBGMSG("Got respTxComplete interrupt "
2426		       "status=0x%08X", reg_read(ohci, d->ctrlSet));
2427		if (reg_read(ohci, d->ctrlSet) & 0x800)
2428			ohci1394_stop_context(ohci, d->ctrlClear,
2429					      "respTxComplete");
2430		else
2431			tasklet_schedule(&d->task);
2432		event &= ~OHCI1394_respTxComplete;
2433	}
2434	if (event & OHCI1394_RQPkt) {
2435		struct dma_rcv_ctx *d = &ohci->ar_req_context;
2436		DBGMSG("Got RQPkt interrupt status=0x%08X",
2437		       reg_read(ohci, d->ctrlSet));
2438		if (reg_read(ohci, d->ctrlSet) & 0x800)
2439			ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2440		else
2441			tasklet_schedule(&d->task);
2442		event &= ~OHCI1394_RQPkt;
2443	}
2444	if (event & OHCI1394_RSPkt) {
2445		struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2446		DBGMSG("Got RSPkt interrupt status=0x%08X",
2447		       reg_read(ohci, d->ctrlSet));
2448		if (reg_read(ohci, d->ctrlSet) & 0x800)
2449			ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2450		else
2451			tasklet_schedule(&d->task);
2452		event &= ~OHCI1394_RSPkt;
2453	}
2454	if (event & OHCI1394_isochRx) {
2455		quadlet_t rx_event;
2456
2457		rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2458		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2459		ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2460		event &= ~OHCI1394_isochRx;
2461	}
2462	if (event & OHCI1394_isochTx) {
2463		quadlet_t tx_event;
2464
2465		tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2466		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2467		ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2468		event &= ~OHCI1394_isochTx;
2469	}
2470	if (event & OHCI1394_selfIDComplete) {
2471		if (host->in_bus_reset) {
2472			node_id = reg_read(ohci, OHCI1394_NodeID);
2473
2474			if (!(node_id & 0x80000000)) {
2475				PRINT(KERN_ERR,
2476				      "SelfID received, but NodeID invalid "
2477				      "(probably new bus reset occurred): %08X",
2478				      node_id);
2479				goto selfid_not_valid;
2480			}
2481
2482			phyid =  node_id & 0x0000003f;
2483			isroot = (node_id & 0x40000000) != 0;
2484
2485			DBGMSG("SelfID interrupt received "
2486			      "(phyid %d, %s)", phyid,
2487			      (isroot ? "root" : "not root"));
2488
2489			handle_selfid(ohci, host, phyid, isroot);
2490
2491			/* Clear the bus reset event and re-enable the
2492			 * busReset interrupt.  */
2493			spin_lock_irqsave(&ohci->event_lock, flags);
2494			reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2495			reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2496			spin_unlock_irqrestore(&ohci->event_lock, flags);
2497
2498			/* Turn on phys dma reception.
2499			 *
2500			 * TODO: Enable some sort of filtering management.
2501			 */
2502			if (phys_dma) {
2503				reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2504					  0xffffffff);
2505				reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2506					  0xffffffff);
2507			}
2508
2509			DBGMSG("PhyReqFilter=%08x%08x",
2510			       reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2511			       reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2512
2513			hpsb_selfid_complete(host, phyid, isroot);
2514		} else
2515			PRINT(KERN_ERR,
2516			      "SelfID received outside of bus reset sequence");
2517
2518selfid_not_valid:
2519		event &= ~OHCI1394_selfIDComplete;
2520	}
2521
2522	/* Make sure we handle everything, just in case we accidentally
2523	 * enabled an interrupt that we didn't write a handler for.  */
2524	if (event)
2525		PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2526		      event);
2527
2528	return IRQ_HANDLED;
2529}
2530
2531/* Put the buffer back into the dma context */
2532static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2533{
2534	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2535	DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2536
2537	d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2538	d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2539	idx = (idx + d->num_desc - 1 ) % d->num_desc;
2540	d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2541
2542	/* To avoid a race, ensure 1394 interface hardware sees the inserted
2543	 * context program descriptors before it sees the wakeup bit set. */
2544	wmb();
2545
2546	/* wake up the dma context if necessary */
2547	if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2548		PRINT(KERN_INFO,
2549		      "Waking dma ctx=%d ... processing is probably too slow",
2550		      d->ctx);
2551	}
2552
2553	/* do this always, to avoid race condition */
2554	reg_write(ohci, d->ctrlSet, 0x1000);
2555}
2556
2557#define cond_le32_to_cpu(data, noswap) \
2558	(noswap ? data : le32_to_cpu(data))
2559
2560static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2561			    -1, 0, -1, 0, -1, -1, 16, -1};
2562
2563/*
2564 * Determine the length of a packet in the buffer
2565 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2566 */
2567static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2568				quadlet_t *buf_ptr, int offset,
2569				unsigned char tcode, int noswap)
2570{
2571	int length = -1;
2572
2573	if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2574		length = TCODE_SIZE[tcode];
2575		if (length == 0) {
2576			if (offset + 12 >= d->buf_size) {
2577				length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2578						[3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2579			} else {
2580				length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2581			}
2582			length += 20;
2583		}
2584	} else if (d->type == DMA_CTX_ISO) {
2585		/* Assumption: buffer fill mode with header/trailer */
2586		length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2587	}
2588
2589	if (length > 0 && length % 4)
2590		length += 4 - (length % 4);
2591
2592	return length;
2593}
2594
2595/* Tasklet that processes dma receive buffers */
2596static void dma_rcv_tasklet (unsigned long data)
2597{
2598	struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2599	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2600	unsigned int split_left, idx, offset, rescount;
2601	unsigned char tcode;
2602	int length, bytes_left, ack;
2603	unsigned long flags;
2604	quadlet_t *buf_ptr;
2605	char *split_ptr;
2606	char msg[256];
2607
2608	spin_lock_irqsave(&d->lock, flags);
2609
2610	idx = d->buf_ind;
2611	offset = d->buf_offset;
2612	buf_ptr = d->buf_cpu[idx] + offset/4;
2613
2614	rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2615	bytes_left = d->buf_size - rescount - offset;
2616
2617	while (bytes_left > 0) {
2618		tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2619
2620		/* packet_length() will return < 4 for an error */
2621		length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2622
2623		if (length < 4) { /* something is wrong */
2624			sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2625				tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2626				d->ctx, length);
2627			ohci1394_stop_context(ohci, d->ctrlClear, msg);
2628			spin_unlock_irqrestore(&d->lock, flags);
2629			return;
2630		}
2631
2632		/* The first case is where we have a packet that crosses
2633		 * over more than one descriptor. The next case is where
2634		 * it's all in the first descriptor.  */
2635		if ((offset + length) > d->buf_size) {
2636			DBGMSG("Split packet rcv'd");
2637			if (length > d->split_buf_size) {
2638				ohci1394_stop_context(ohci, d->ctrlClear,
2639					     "Split packet size exceeded");
2640				d->buf_ind = idx;
2641				d->buf_offset = offset;
2642				spin_unlock_irqrestore(&d->lock, flags);
2643				return;
2644			}
2645
2646			if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2647			    == d->buf_size) {
2648				/* Other part of packet not written yet.
2649				 * this should never happen I think
2650				 * anyway we'll get it on the next call.  */
2651				PRINT(KERN_INFO,
2652				      "Got only half a packet!");
2653				d->buf_ind = idx;
2654				d->buf_offset = offset;
2655				spin_unlock_irqrestore(&d->lock, flags);
2656				return;
2657			}
2658
2659			split_left = length;
2660			split_ptr = (char *)d->spb;
2661			memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2662			split_left -= d->buf_size-offset;
2663			split_ptr += d->buf_size-offset;
2664			insert_dma_buffer(d, idx);
2665			idx = (idx+1) % d->num_desc;
2666			buf_ptr = d->buf_cpu[idx];
2667			offset=0;
2668
2669			while (split_left >= d->buf_size) {
2670				memcpy(split_ptr,buf_ptr,d->buf_size);
2671				split_ptr += d->buf_size;
2672				split_left -= d->buf_size;
2673				insert_dma_buffer(d, idx);
2674				idx = (idx+1) % d->num_desc;
2675				buf_ptr = d->buf_cpu[idx];
2676			}
2677
2678			if (split_left > 0) {
2679				memcpy(split_ptr, buf_ptr, split_left);
2680				offset = split_left;
2681				buf_ptr += offset/4;
2682			}
2683		} else {
2684			DBGMSG("Single packet rcv'd");
2685			memcpy(d->spb, buf_ptr, length);
2686			offset += length;
2687			buf_ptr += length/4;
2688			if (offset==d->buf_size) {
2689				insert_dma_buffer(d, idx);
2690				idx = (idx+1) % d->num_desc;
2691				buf_ptr = d->buf_cpu[idx];
2692				offset=0;
2693			}
2694		}
2695
2696		/* We get one phy packet to the async descriptor for each
2697		 * bus reset. We always ignore it.  */
2698		if (tcode != OHCI1394_TCODE_PHY) {
2699			if (!ohci->no_swap_incoming)
2700				header_le32_to_cpu(d->spb, tcode);
2701			DBGMSG("Packet received from node"
2702				" %d ack=0x%02X spd=%d tcode=0x%X"
2703				" length=%d ctx=%d tlabel=%d",
2704				(d->spb[1]>>16)&0x3f,
2705				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2706				(cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2707				tcode, length, d->ctx,
2708				(d->spb[0]>>10)&0x3f);
2709
2710			ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2711				== 0x11) ? 1 : 0;
2712
2713			hpsb_packet_received(ohci->host, d->spb,
2714					     length-4, ack);
2715		}
2716#ifdef OHCI1394_DEBUG
2717		else
2718			PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2719			       d->ctx);
2720#endif
2721
2722	       	rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2723
2724		bytes_left = d->buf_size - rescount - offset;
2725
2726	}
2727
2728	d->buf_ind = idx;
2729	d->buf_offset = offset;
2730
2731	spin_unlock_irqrestore(&d->lock, flags);
2732}
2733
2734/* Bottom half that processes sent packets */
2735static void dma_trm_tasklet (unsigned long data)
2736{
2737	struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2738	struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2739	struct hpsb_packet *packet, *ptmp;
2740	unsigned long flags;
2741	u32 status, ack;
2742        size_t datasize;
2743
2744	spin_lock_irqsave(&d->lock, flags);
2745
2746	list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2747                datasize = packet->data_size;
2748		if (datasize && packet->type != hpsb_raw)
2749			status = le32_to_cpu(
2750				d->prg_cpu[d->sent_ind]->end.status) >> 16;
2751		else
2752			status = le32_to_cpu(
2753				d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2754
2755		if (status == 0)
2756			/* this packet hasn't been sent yet*/
2757			break;
2758
2759#ifdef OHCI1394_DEBUG
2760		if (datasize)
2761			if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2762				DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2763				       "ack=0x%X spd=%d dataLength=%d ctx=%d",
2764				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2765				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2766				       status&0x1f, (status>>5)&0x3,
2767				       le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2768				       d->ctx);
2769			else
2770				DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2771				       "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2772				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2773				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2774				       (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2775				       status&0x1f, (status>>5)&0x3,
2776				       le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2777				       d->ctx);
2778		else
2779			DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2780			       "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2781                                (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2782                                        >>16)&0x3f,
2783                                (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2784                                        >>4)&0xf,
2785                                (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2786                                        >>10)&0x3f,
2787                                status&0x1f, (status>>5)&0x3,
2788                                le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2789                                d->ctx);
2790#endif
2791
2792		if (status & 0x10) {
2793			ack = status & 0xf;
2794		} else {
2795			switch (status & 0x1f) {
2796			case EVT_NO_STATUS: /* that should never happen */
2797			case EVT_RESERVED_A: /* that should never happen */
2798			case EVT_LONG_PACKET: /* that should never happen */
2799				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2800				ack = ACKX_SEND_ERROR;
2801				break;
2802			case EVT_MISSING_ACK:
2803				ack = ACKX_TIMEOUT;
2804				break;
2805			case EVT_UNDERRUN:
2806				ack = ACKX_SEND_ERROR;
2807				break;
2808			case EVT_OVERRUN: /* that should never happen */
2809				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2810				ack = ACKX_SEND_ERROR;
2811				break;
2812			case EVT_DESCRIPTOR_READ:
2813			case EVT_DATA_READ:
2814			case EVT_DATA_WRITE:
2815				ack = ACKX_SEND_ERROR;
2816				break;
2817			case EVT_BUS_RESET: /* that should never happen */
2818				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2819				ack = ACKX_SEND_ERROR;
2820				break;
2821			case EVT_TIMEOUT:
2822				ack = ACKX_TIMEOUT;
2823				break;
2824			case EVT_TCODE_ERR:
2825				ack = ACKX_SEND_ERROR;
2826				break;
2827			case EVT_RESERVED_B: /* that should never happen */
2828			case EVT_RESERVED_C: /* that should never happen */
2829				PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2830				ack = ACKX_SEND_ERROR;
2831				break;
2832			case EVT_UNKNOWN:
2833			case EVT_FLUSHED:
2834				ack = ACKX_SEND_ERROR;
2835				break;
2836			default:
2837				PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2838				ack = ACKX_SEND_ERROR;
2839				BUG();
2840			}
2841		}
2842
2843		list_del_init(&packet->driver_list);
2844		hpsb_packet_sent(ohci->host, packet, ack);
2845
2846		if (datasize) {
2847			pci_unmap_single(ohci->dev,
2848					 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2849					 datasize, PCI_DMA_TODEVICE);
2850			OHCI_DMA_FREE("single Xmit data packet");
2851		}
2852
2853		d->sent_ind = (d->sent_ind+1)%d->num_desc;
2854		d->free_prgs++;
2855	}
2856
2857	dma_trm_flush(ohci, d);
2858
2859	spin_unlock_irqrestore(&d->lock, flags);
2860}
2861
2862static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2863{
2864	if (d->ctrlClear) {
2865		ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2866
2867		if (d->type == DMA_CTX_ISO) {
2868			/* disable interrupts */
2869			reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2870			ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2871		} else {
2872			tasklet_kill(&d->task);
2873		}
2874	}
2875}
2876
2877
2878static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2879{
2880	int i;
2881	struct ti_ohci *ohci = d->ohci;
2882
2883	if (ohci == NULL)
2884		return;
2885
2886	DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2887
2888	if (d->buf_cpu) {
2889		for (i=0; i<d->num_desc; i++)
2890			if (d->buf_cpu[i] && d->buf_bus[i]) {
2891				pci_free_consistent(
2892					ohci->dev, d->buf_size,
2893					d->buf_cpu[i], d->buf_bus[i]);
2894				OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2895			}
2896		kfree(d->buf_cpu);
2897		kfree(d->buf_bus);
2898	}
2899	if (d->prg_cpu) {
2900		for (i=0; i<d->num_desc; i++)
2901			if (d->prg_cpu[i] && d->prg_bus[i]) {
2902				pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2903				OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2904			}
2905		pci_pool_destroy(d->prg_pool);
2906		OHCI_DMA_FREE("dma_rcv prg pool");
2907		kfree(d->prg_cpu);
2908		kfree(d->prg_bus);
2909	}
2910	kfree(d->spb);
2911
2912	/* Mark this context as freed. */
2913	d->ohci = NULL;
2914}
2915
2916static int
2917alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2918		  enum context_type type, int ctx, int num_desc,
2919		  int buf_size, int split_buf_size, int context_base)
2920{
2921	int i, len;
2922	static int num_allocs;
2923	static char pool_name[20];
2924
2925	d->ohci = ohci;
2926	d->type = type;
2927	d->ctx = ctx;
2928
2929	d->num_desc = num_desc;
2930	d->buf_size = buf_size;
2931	d->split_buf_size = split_buf_size;
2932
2933	d->ctrlSet = 0;
2934	d->ctrlClear = 0;
2935	d->cmdPtr = 0;
2936
2937	d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2938	d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2939
2940	if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2941		PRINT(KERN_ERR, "Failed to allocate dma buffer");
2942		free_dma_rcv_ctx(d);
2943		return -ENOMEM;
2944	}
2945
2946	d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2947	d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2948
2949	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2950		PRINT(KERN_ERR, "Failed to allocate dma prg");
2951		free_dma_rcv_ctx(d);
2952		return -ENOMEM;
2953	}
2954
2955	d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2956
2957	if (d->spb == NULL) {
2958		PRINT(KERN_ERR, "Failed to allocate split buffer");
2959		free_dma_rcv_ctx(d);
2960		return -ENOMEM;
2961	}
2962
2963	len = sprintf(pool_name, "ohci1394_rcv_prg");
2964	sprintf(pool_name+len, "%d", num_allocs);
2965	d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2966				sizeof(struct dma_cmd), 4, 0);
2967	if(d->prg_pool == NULL)
2968	{
2969		PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2970		free_dma_rcv_ctx(d);
2971		return -ENOMEM;
2972	}
2973	num_allocs++;
2974
2975	OHCI_DMA_ALLOC("dma_rcv prg pool");
2976
2977	for (i=0; i<d->num_desc; i++) {
2978		d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2979						     d->buf_size,
2980						     d->buf_bus+i);
2981		OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2982
2983		if (d->buf_cpu[i] != NULL) {
2984			memset(d->buf_cpu[i], 0, d->buf_size);
2985		} else {
2986			PRINT(KERN_ERR,
2987			      "Failed to allocate dma buffer");
2988			free_dma_rcv_ctx(d);
2989			return -ENOMEM;
2990		}
2991
2992		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2993		OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2994
2995                if (d->prg_cpu[i] != NULL) {
2996                        memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2997		} else {
2998			PRINT(KERN_ERR,
2999			      "Failed to allocate dma prg");
3000			free_dma_rcv_ctx(d);
3001			return -ENOMEM;
3002		}
3003	}
3004
3005        spin_lock_init(&d->lock);
3006
3007	if (type == DMA_CTX_ISO) {
3008		ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3009					  OHCI_ISO_MULTICHANNEL_RECEIVE,
3010					  dma_rcv_tasklet, (unsigned long) d);
3011	} else {
3012		d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3013		d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3014		d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3015
3016		tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3017	}
3018
3019	return 0;
3020}
3021
3022static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3023{
3024	int i;
3025	struct ti_ohci *ohci = d->ohci;
3026
3027	if (ohci == NULL)
3028		return;
3029
3030	DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3031
3032	if (d->prg_cpu) {
3033		for (i=0; i<d->num_desc; i++)
3034			if (d->prg_cpu[i] && d->prg_bus[i]) {
3035				pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3036				OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3037			}
3038		pci_pool_destroy(d->prg_pool);
3039		OHCI_DMA_FREE("dma_trm prg pool");
3040		kfree(d->prg_cpu);
3041		kfree(d->prg_bus);
3042	}
3043
3044	/* Mark this context as freed. */
3045	d->ohci = NULL;
3046}
3047
3048static int
3049alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3050		  enum context_type type, int ctx, int num_desc,
3051		  int context_base)
3052{
3053	int i, len;
3054	static char pool_name[20];
3055	static int num_allocs=0;
3056
3057	d->ohci = ohci;
3058	d->type = type;
3059	d->ctx = ctx;
3060	d->num_desc = num_desc;
3061	d->ctrlSet = 0;
3062	d->ctrlClear = 0;
3063	d->cmdPtr = 0;
3064
3065	d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3066	d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3067
3068	if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3069		PRINT(KERN_ERR, "Failed to allocate at dma prg");
3070		free_dma_trm_ctx(d);
3071		return -ENOMEM;
3072	}
3073
3074	len = sprintf(pool_name, "ohci1394_trm_prg");
3075	sprintf(pool_name+len, "%d", num_allocs);
3076	d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3077				sizeof(struct at_dma_prg), 4, 0);
3078	if (d->prg_pool == NULL) {
3079		PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3080		free_dma_trm_ctx(d);
3081		return -ENOMEM;
3082	}
3083	num_allocs++;
3084
3085	OHCI_DMA_ALLOC("dma_rcv prg pool");
3086
3087	for (i = 0; i < d->num_desc; i++) {
3088		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3089		OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3090
3091                if (d->prg_cpu[i] != NULL) {
3092                        memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3093		} else {
3094			PRINT(KERN_ERR,
3095			      "Failed to allocate at dma prg");
3096			free_dma_trm_ctx(d);
3097			return -ENOMEM;
3098		}
3099	}
3100
3101        spin_lock_init(&d->lock);
3102
3103	/* initialize tasklet */
3104	if (type == DMA_CTX_ISO) {
3105		ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3106					  dma_trm_tasklet, (unsigned long) d);
3107		if (ohci1394_register_iso_tasklet(ohci,
3108						  &ohci->it_legacy_tasklet) < 0) {
3109			PRINT(KERN_ERR, "No IT DMA context available");
3110			free_dma_trm_ctx(d);
3111			return -EBUSY;
3112		}
3113
3114		/* IT can be assigned to any context by register_iso_tasklet */
3115		d->ctx = ohci->it_legacy_tasklet.context;
3116		d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3117		d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3118		d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3119	} else {
3120		d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3121		d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3122		d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3123		tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3124	}
3125
3126	return 0;
3127}
3128
3129static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3130{
3131	struct ti_ohci *ohci = host->hostdata;
3132
3133	reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3134	reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3135
3136	memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3137}
3138
3139
3140static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3141                                 quadlet_t data, quadlet_t compare)
3142{
3143	struct ti_ohci *ohci = host->hostdata;
3144	int i;
3145
3146	reg_write(ohci, OHCI1394_CSRData, data);
3147	reg_write(ohci, OHCI1394_CSRCompareData, compare);
3148	reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3149
3150	for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3151		if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3152			break;
3153
3154		mdelay(1);
3155	}
3156
3157	return reg_read(ohci, OHCI1394_CSRData);
3158}
3159
3160static struct hpsb_host_driver ohci1394_driver = {
3161	.owner =		THIS_MODULE,
3162	.name =			OHCI1394_DRIVER_NAME,
3163	.set_hw_config_rom =	ohci_set_hw_config_rom,
3164	.transmit_packet =	ohci_transmit,
3165	.devctl =		ohci_devctl,
3166	.isoctl =               ohci_isoctl,
3167	.hw_csr_reg =		ohci_hw_csr_reg,
3168};
3169
3170/***********************************
3171 * PCI Driver Interface functions  *
3172 ***********************************/
3173
3174#define FAIL(err, fmt, args...)			\
3175do {						\
3176	PRINT_G(KERN_ERR, fmt , ## args);	\
3177        ohci1394_pci_remove(dev);               \
3178	return err;				\
3179} while (0)
3180
3181static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3182					const struct pci_device_id *ent)
3183{
3184	struct hpsb_host *host;
3185	struct ti_ohci *ohci;	/* shortcut to currently handled device */
3186	resource_size_t ohci_base;
3187
3188#ifdef CONFIG_PPC_PMAC
3189	/* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3190	if (machine_is(powermac)) {
3191		struct device_node *ofn = pci_device_to_OF_node(dev);
3192
3193		if (ofn) {
3194			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3195			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3196		}
3197	}
3198#endif /* CONFIG_PPC_PMAC */
3199
3200        if (pci_enable_device(dev))
3201		FAIL(-ENXIO, "Failed to enable OHCI hardware");
3202        pci_set_master(dev);
3203
3204	host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3205	if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3206
3207	ohci = host->hostdata;
3208	ohci->dev = dev;
3209	ohci->host = host;
3210	ohci->init_state = OHCI_INIT_ALLOC_HOST;
3211	host->pdev = dev;
3212	pci_set_drvdata(dev, ohci);
3213
3214	/* We don't want hardware swapping */
3215	pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3216
3217	/* Some oddball Apple controllers do not order the selfid
3218	 * properly, so we make up for it here.  */
3219#ifndef __LITTLE_ENDIAN
3220	if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3221	    dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3222		ohci->no_swap_incoming = 1;
3223		ohci->selfid_swap = 0;
3224	} else
3225		ohci->selfid_swap = 1;
3226#endif
3227
3228
3229#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3230#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3231#endif
3232
3233	/* These chipsets require a bit of extra care when checking after
3234	 * a busreset.  */
3235	if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3236	     dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3237	    (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3238	     dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3239		ohci->check_busreset = 1;
3240
3241	/* We hardwire the MMIO length, since some CardBus adaptors
3242	 * fail to report the right length.  Anyway, the ohci spec
3243	 * clearly says it's 2kb, so this shouldn't be a problem. */
3244	ohci_base = pci_resource_start(dev, 0);
3245	if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3246		PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3247		      (unsigned long long)pci_resource_len(dev, 0));
3248
3249	if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3250				OHCI1394_DRIVER_NAME))
3251		FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3252			(unsigned long long)ohci_base,
3253			(unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3254	ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3255
3256	ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3257	if (ohci->registers == NULL)
3258		FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3259	ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3260	DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3261
3262	/* csr_config rom allocation */
3263	ohci->csr_config_rom_cpu =
3264		pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3265				     &ohci->csr_config_rom_bus);
3266	OHCI_DMA_ALLOC("consistent csr_config_rom");
3267	if (ohci->csr_config_rom_cpu == NULL)
3268		FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3269	ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3270
3271	/* self-id dma buffer allocation */
3272	ohci->selfid_buf_cpu =
3273		pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3274                      &ohci->selfid_buf_bus);
3275	OHCI_DMA_ALLOC("consistent selfid_buf");
3276
3277	if (ohci->selfid_buf_cpu == NULL)
3278		FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3279	ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3280
3281	if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3282		PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3283		      "8Kb boundary... may cause problems on some CXD3222 chip",
3284		      ohci->selfid_buf_cpu);
3285
3286	/* No self-id errors at startup */
3287	ohci->self_id_errors = 0;
3288
3289	ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3290	/* AR DMA request context allocation */
3291	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3292			      DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3293			      AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3294			      OHCI1394_AsReqRcvContextBase) < 0)
3295		FAIL(-ENOMEM, "Failed to allocate AR Req context");
3296
3297	/* AR DMA response context allocation */
3298	if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3299			      DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3300			      AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3301			      OHCI1394_AsRspRcvContextBase) < 0)
3302		FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3303
3304	/* AT DMA request context */
3305	if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3306			      DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3307			      OHCI1394_AsReqTrContextBase) < 0)
3308		FAIL(-ENOMEM, "Failed to allocate AT Req context");
3309
3310	/* AT DMA response context */
3311	if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3312			      DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3313			      OHCI1394_AsRspTrContextBase) < 0)
3314		FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3315
3316	/* Start off with a soft reset, to clear everything to a sane
3317	 * state. */
3318	ohci_soft_reset(ohci);
3319
3320	/* Now enable LPS, which we need in order to start accessing
3321	 * most of the registers.  In fact, on some cards (ALI M5251),
3322	 * accessing registers in the SClk domain without LPS enabled
3323	 * will lock up the machine.  Wait 50msec to make sure we have
3324	 * full link enabled.  */
3325	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3326
3327	/* Disable and clear interrupts */
3328	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3329	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3330
3331	mdelay(50);
3332
3333	/* Determine the number of available IR and IT contexts. */
3334	ohci->nb_iso_rcv_ctx =
3335		get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3336	ohci->nb_iso_xmit_ctx =
3337		get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3338
3339	/* Set the usage bits for non-existent contexts so they can't
3340	 * be allocated */
3341	ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3342	ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3343
3344	INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3345	spin_lock_init(&ohci->iso_tasklet_list_lock);
3346	ohci->ISO_channel_usage = 0;
3347        spin_lock_init(&ohci->IR_channel_lock);
3348
3349	/* Allocate the IR DMA context right here so we don't have
3350	 * to do it in interrupt path - note that this doesn't
3351	 * waste much memory and avoids the jugglery required to
3352	 * allocate it in IRQ path. */
3353	if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3354			      DMA_CTX_ISO, 0, IR_NUM_DESC,
3355			      IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3356			      OHCI1394_IsoRcvContextBase) < 0) {
3357		FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3358	}
3359
3360	/* We hopefully don't have to pre-allocate IT DMA like we did
3361	 * for IR DMA above. Allocate it on-demand and mark inactive. */
3362	ohci->it_legacy_context.ohci = NULL;
3363	spin_lock_init(&ohci->event_lock);
3364
3365	/*
3366	 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3367	 * might get called anyway.  We'll see no event, of course, but
3368	 * we need to get to that "no event", so enough should be initialized
3369	 * by that point.
3370	 */
3371	if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3372			 OHCI1394_DRIVER_NAME, ohci))
3373		FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3374
3375	ohci->init_state = OHCI_INIT_HAVE_IRQ;
3376	ohci_initialize(ohci);
3377
3378	/* Set certain csr values */
3379	host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3380	host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3381	host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3382	host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3383	host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3384
3385	if (phys_dma) {
3386		host->low_addr_space =
3387			(u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3388		if (!host->low_addr_space)
3389			host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3390	}
3391	host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3392
3393	/* Tell the highlevel this host is ready */
3394	if (hpsb_add_host(host))
3395		FAIL(-ENOMEM, "Failed to register host with highlevel");
3396
3397	ohci->init_state = OHCI_INIT_DONE;
3398
3399	return 0;
3400#undef FAIL
3401}
3402
3403static void ohci1394_pci_remove(struct pci_dev *pdev)
3404{
3405	struct ti_ohci *ohci;
3406	struct device *dev;
3407
3408	ohci = pci_get_drvdata(pdev);
3409	if (!ohci)
3410		return;
3411
3412	dev = get_device(&ohci->host->device);
3413
3414	switch (ohci->init_state) {
3415	case OHCI_INIT_DONE:
3416		hpsb_remove_host(ohci->host);
3417
3418		/* Clear out BUS Options */
3419		reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3420		reg_write(ohci, OHCI1394_BusOptions,
3421			  (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3422			  0x00ff0000);
3423		memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3424
3425	case OHCI_INIT_HAVE_IRQ:
3426		/* Clear interrupt registers */
3427		reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3428		reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3429		reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3430		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3431		reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3432		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3433
3434		/* Disable IRM Contender */
3435		set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3436
3437		/* Clear link control register */
3438		reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3439
3440		/* Let all other nodes know to ignore us */
3441		ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3442
3443		/* Soft reset before we start - this disables
3444		 * interrupts and clears linkEnable and LPS. */
3445		ohci_soft_reset(ohci);
3446		free_irq(ohci->dev->irq, ohci);
3447
3448	case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3449		/* The ohci_soft_reset() stops all DMA contexts, so we
3450		 * dont need to do this.  */
3451		free_dma_rcv_ctx(&ohci->ar_req_context);
3452		free_dma_rcv_ctx(&ohci->ar_resp_context);
3453		free_dma_trm_ctx(&ohci->at_req_context);
3454		free_dma_trm_ctx(&ohci->at_resp_context);
3455		free_dma_rcv_ctx(&ohci->ir_legacy_context);
3456		free_dma_trm_ctx(&ohci->it_legacy_context);
3457
3458	case OHCI_INIT_HAVE_SELFID_BUFFER:
3459		pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3460				    ohci->selfid_buf_cpu,
3461				    ohci->selfid_buf_bus);
3462		OHCI_DMA_FREE("consistent selfid_buf");
3463
3464	case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3465		pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3466				    ohci->csr_config_rom_cpu,
3467				    ohci->csr_config_rom_bus);
3468		OHCI_DMA_FREE("consistent csr_config_rom");
3469
3470	case OHCI_INIT_HAVE_IOMAPPING:
3471		iounmap(ohci->registers);
3472
3473	case OHCI_INIT_HAVE_MEM_REGION:
3474		release_mem_region(pci_resource_start(ohci->dev, 0),
3475				   OHCI1394_REGISTER_SIZE);
3476
3477#ifdef CONFIG_PPC_PMAC
3478	/* On UniNorth, power down the cable and turn off the chip clock
3479	 * to save power on laptops */
3480	if (machine_is(powermac)) {
3481		struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3482
3483		if (ofn) {
3484			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3485			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3486		}
3487	}
3488#endif /* CONFIG_PPC_PMAC */
3489
3490	case OHCI_INIT_ALLOC_HOST:
3491		pci_set_drvdata(ohci->dev, NULL);
3492	}
3493
3494	if (dev)
3495		put_device(dev);
3496}
3497
3498#ifdef CONFIG_PM
3499static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3500{
3501	int err;
3502	struct ti_ohci *ohci = pci_get_drvdata(pdev);
3503
3504	if (!ohci) {
3505		printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3506		       OHCI1394_DRIVER_NAME);
3507		return -ENXIO;
3508	}
3509	DBGMSG("suspend called");
3510
3511	/* Clear the async DMA contexts and stop using the controller */
3512	hpsb_bus_reset(ohci->host);
3513
3514	/* See ohci1394_pci_remove() for comments on this sequence */
3515	reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3516	reg_write(ohci, OHCI1394_BusOptions,
3517		  (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3518		  0x00ff0000);
3519	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3520	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3521	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3522	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3523	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3524	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3525	set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3526	reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3527	ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3528	ohci_soft_reset(ohci);
3529
3530	err = pci_save_state(pdev);
3531	if (err) {
3532		PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3533		return err;
3534	}
3535	err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3536	if (err)
3537		DBGMSG("pci_set_power_state failed with %d", err);
3538
3539/* PowerMac suspend code comes last */
3540#ifdef CONFIG_PPC_PMAC
3541	if (machine_is(powermac)) {
3542		struct device_node *ofn = pci_device_to_OF_node(pdev);
3543
3544		if (ofn)
3545			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3546	}
3547#endif /* CONFIG_PPC_PMAC */
3548
3549	return 0;
3550}
3551
3552static int ohci1394_pci_resume(struct pci_dev *pdev)
3553{
3554	int err;
3555	struct ti_ohci *ohci = pci_get_drvdata(pdev);
3556
3557	if (!ohci) {
3558		printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3559		       OHCI1394_DRIVER_NAME);
3560		return -ENXIO;
3561	}
3562	DBGMSG("resume called");
3563
3564/* PowerMac resume code comes first */
3565#ifdef CONFIG_PPC_PMAC
3566	if (machine_is(powermac)) {
3567		struct device_node *ofn = pci_device_to_OF_node(pdev);
3568
3569		if (ofn)
3570			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3571	}
3572#endif /* CONFIG_PPC_PMAC */
3573
3574	pci_set_power_state(pdev, PCI_D0);
3575	pci_restore_state(pdev);
3576	err = pci_enable_device(pdev);
3577	if (err) {
3578		PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3579		return err;
3580	}
3581
3582	/* See ohci1394_pci_probe() for comments on this sequence */
3583	ohci_soft_reset(ohci);
3584	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3585	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3586	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3587	mdelay(50);
3588	ohci_initialize(ohci);
3589
3590	hpsb_resume_host(ohci->host);
3591	return 0;
3592}
3593#endif /* CONFIG_PM */
3594
3595static struct pci_device_id ohci1394_pci_tbl[] = {
3596	{
3597		.class = 	PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3598		.class_mask = 	PCI_ANY_ID,
3599		.vendor =	PCI_ANY_ID,
3600		.device =	PCI_ANY_ID,
3601		.subvendor =	PCI_ANY_ID,
3602		.subdevice =	PCI_ANY_ID,
3603	},
3604	{ 0, },
3605};
3606
3607MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3608
3609static struct pci_driver ohci1394_pci_driver = {
3610	.name =		OHCI1394_DRIVER_NAME,
3611	.id_table =	ohci1394_pci_tbl,
3612	.probe =	ohci1394_pci_probe,
3613	.remove =	ohci1394_pci_remove,
3614#ifdef CONFIG_PM
3615	.resume =	ohci1394_pci_resume,
3616	.suspend =	ohci1394_pci_suspend,
3617#endif
3618};
3619
3620/***********************************
3621 * OHCI1394 Video Interface        *
3622 ***********************************/
3623
3624/* essentially the only purpose of this code is to allow another
3625   module to hook into ohci's interrupt handler */
3626
3627/* returns zero if successful, one if DMA context is locked up */
3628int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3629{
3630	int i=0;
3631
3632	/* stop the channel program if it's still running */
3633	reg_write(ohci, reg, 0x8000);
3634
3635	/* Wait until it effectively stops */
3636	while (reg_read(ohci, reg) & 0x400) {
3637		i++;
3638		if (i>5000) {
3639			PRINT(KERN_ERR,
3640			      "Runaway loop while stopping context: %s...", msg ? msg : "");
3641			return 1;
3642		}
3643
3644		mb();
3645		udelay(10);
3646	}
3647	if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3648	return 0;
3649}
3650
3651void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3652			       void (*func)(unsigned long), unsigned long data)
3653{
3654	tasklet_init(&tasklet->tasklet, func, data);
3655	tasklet->type = type;
3656	/* We init the tasklet->link field, so we can list_del() it
3657	 * without worrying whether it was added to the list or not. */
3658	INIT_LIST_HEAD(&tasklet->link);
3659}
3660
3661int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3662				  struct ohci1394_iso_tasklet *tasklet)
3663{
3664	unsigned long flags, *usage;
3665	int n, i, r = -EBUSY;
3666
3667	if (tasklet->type == OHCI_ISO_TRANSMIT) {
3668		n = ohci->nb_iso_xmit_ctx;
3669		usage = &ohci->it_ctx_usage;
3670	}
3671	else {
3672		n = ohci->nb_iso_rcv_ctx;
3673		usage = &ohci->ir_ctx_usage;
3674
3675		/* only one receive context can be multichannel (OHCI sec 10.4.1) */
3676		if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3677			if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3678				return r;
3679			}
3680		}
3681	}
3682
3683	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3684
3685	for (i = 0; i < n; i++)
3686		if (!test_and_set_bit(i, usage)) {
3687			tasklet->context = i;
3688			list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3689			r = 0;
3690			break;
3691		}
3692
3693	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3694
3695	return r;
3696}
3697
3698void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3699				     struct ohci1394_iso_tasklet *tasklet)
3700{
3701	unsigned long flags;
3702
3703	tasklet_kill(&tasklet->tasklet);
3704
3705	spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3706
3707	if (tasklet->type == OHCI_ISO_TRANSMIT)
3708		clear_bit(tasklet->context, &ohci->it_ctx_usage);
3709	else {
3710		clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3711
3712		if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3713			clear_bit(0, &ohci->ir_multichannel_used);
3714		}
3715	}
3716
3717	list_del(&tasklet->link);
3718
3719	spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3720}
3721
3722EXPORT_SYMBOL(ohci1394_stop_context);
3723EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3724EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3725EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3726
3727/***********************************
3728 * General module initialization   *
3729 ***********************************/
3730
3731MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3732MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3733MODULE_LICENSE("GPL");
3734
3735static void __exit ohci1394_cleanup (void)
3736{
3737	pci_unregister_driver(&ohci1394_pci_driver);
3738}
3739
3740static int __init ohci1394_init(void)
3741{
3742	return pci_register_driver(&ohci1394_pci_driver);
3743}
3744
3745/* Register before most other device drivers.
3746 * Useful for remote debugging via physical DMA, e.g. using firescope. */
3747fs_initcall(ohci1394_init);
3748module_exit(ohci1394_cleanup);
3749