1/*
2 * IEEE 1394 for Linux
3 *
4 * Core support: hpsb_packet management, packet handling and forwarding to
5 *               highlevel or lowlevel code
6 *
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 *                     2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
9 *
10 * This code is licensed under the GPL.  See the file COPYING in the root
11 * directory of the kernel sources for details.
12 *
13 *
14 * Contributions:
15 *
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 *        loopback functionality in hpsb_send_packet
18 *        allow highlevel drivers to disable automatic response generation
19 *              and to generate responses themselves (deferred)
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/list.h>
25#include <linux/string.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/bitops.h>
32#include <linux/kdev_t.h>
33#include <linux/suspend.h>
34#include <linux/kthread.h>
35#include <linux/preempt.h>
36#include <linux/time.h>
37
38#include <asm/system.h>
39#include <asm/byteorder.h>
40
41#include "ieee1394_types.h"
42#include "ieee1394.h"
43#include "hosts.h"
44#include "ieee1394_core.h"
45#include "highlevel.h"
46#include "ieee1394_transactions.h"
47#include "csr.h"
48#include "nodemgr.h"
49#include "dma.h"
50#include "iso.h"
51#include "config_roms.h"
52
53/*
54 * Disable the nodemgr detection and config rom reading functionality.
55 */
56static int disable_nodemgr;
57module_param(disable_nodemgr, int, 0444);
58MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
59
60/* Disable Isochronous Resource Manager functionality */
61int hpsb_disable_irm = 0;
62module_param_named(disable_irm, hpsb_disable_irm, bool, 0444);
63MODULE_PARM_DESC(disable_irm,
64		 "Disable Isochronous Resource Manager functionality.");
65
66/* We are GPL, so treat us special */
67MODULE_LICENSE("GPL");
68
69/* Some globals used */
70const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
71struct class *hpsb_protocol_class;
72
73#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
74static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
75{
76	int i;
77
78	size /= 4;
79	size = (size > 4 ? 4 : size);
80
81	printk(KERN_DEBUG "ieee1394: %s", text);
82	if (speed > -1 && speed < 6)
83		printk(" at %s", hpsb_speedto_str[speed]);
84	printk(":");
85	for (i = 0; i < size; i++)
86		printk(" %08x", data[i]);
87	printk("\n");
88}
89#else
90#define dump_packet(a,b,c,d) do {} while (0)
91#endif
92
93static void abort_requests(struct hpsb_host *host);
94static void queue_packet_complete(struct hpsb_packet *packet);
95
96
97/**
98 * hpsb_set_packet_complete_task - set task that runs when a packet completes
99 * @packet: the packet whose completion we want the task added to
100 * @routine: function to call
101 * @data: data (if any) to pass to the above function
102 *
103 * Set the task that runs when a packet completes. You cannot call this more
104 * than once on a single packet before it is sent.
105 *
106 * Typically, the complete @routine is responsible to call hpsb_free_packet().
107 */
108void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
109				   void (*routine)(void *), void *data)
110{
111	WARN_ON(packet->complete_routine != NULL);
112	packet->complete_routine = routine;
113	packet->complete_data = data;
114	return;
115}
116
117/**
118 * hpsb_alloc_packet - allocate new packet structure
119 * @data_size: size of the data block to be allocated, in bytes
120 *
121 * This function allocates, initializes and returns a new &struct hpsb_packet.
122 * It can be used in interrupt context.  A header block is always included and
123 * initialized with zeros.  Its size is big enough to contain all possible 1394
124 * headers.  The data block is only allocated if @data_size is not zero.
125 *
126 * For packets for which responses will be received the @data_size has to be big
127 * enough to contain the response's data block since no further allocation
128 * occurs at response matching time.
129 *
130 * The packet's generation value will be set to the current generation number
131 * for ease of use.  Remember to overwrite it with your own recorded generation
132 * number if you can not be sure that your code will not race with a bus reset.
133 *
134 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
135 * failure.
136 */
137struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
138{
139	struct hpsb_packet *packet;
140
141	data_size = ((data_size + 3) & ~3);
142
143	packet = kzalloc(sizeof(*packet) + data_size, GFP_ATOMIC);
144	if (!packet)
145		return NULL;
146
147	packet->state = hpsb_unused;
148	packet->generation = -1;
149	INIT_LIST_HEAD(&packet->driver_list);
150	INIT_LIST_HEAD(&packet->queue);
151	atomic_set(&packet->refcnt, 1);
152
153	if (data_size) {
154		packet->data = packet->embedded_data;
155		packet->allocated_data_size = data_size;
156	}
157	return packet;
158}
159
160/**
161 * hpsb_free_packet - free packet and data associated with it
162 * @packet: packet to free (is NULL safe)
163 *
164 * Frees @packet->data only if it was allocated through hpsb_alloc_packet().
165 */
166void hpsb_free_packet(struct hpsb_packet *packet)
167{
168	if (packet && atomic_dec_and_test(&packet->refcnt)) {
169		BUG_ON(!list_empty(&packet->driver_list) ||
170		       !list_empty(&packet->queue));
171		kfree(packet);
172	}
173}
174
175/**
176 * hpsb_reset_bus - initiate bus reset on the given host
177 * @host: host controller whose bus to reset
178 * @type: one of enum reset_types
179 *
180 * Returns 1 if bus reset already in progress, 0 otherwise.
181 */
182int hpsb_reset_bus(struct hpsb_host *host, int type)
183{
184	if (!host->in_bus_reset) {
185		host->driver->devctl(host, RESET_BUS, type);
186		return 0;
187	} else {
188		return 1;
189	}
190}
191
192/**
193 * hpsb_read_cycle_timer - read cycle timer register and system time
194 * @host: host whose isochronous cycle timer register is read
195 * @cycle_timer: address of bitfield to return the register contents
196 * @local_time: address to return the system time
197 *
198 * The format of * @cycle_timer, is described in OHCI 1.1 clause 5.13. This
199 * format is also read from non-OHCI controllers. * @local_time contains the
200 * system time in microseconds since the Epoch, read at the moment when the
201 * cycle timer was read.
202 *
203 * Return value: 0 for success or error number otherwise.
204 */
205int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
206			  u64 *local_time)
207{
208	int ctr;
209	struct timeval tv;
210	unsigned long flags;
211
212	if (!host || !cycle_timer || !local_time)
213		return -EINVAL;
214
215	preempt_disable();
216	local_irq_save(flags);
217
218	ctr = host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
219	if (ctr)
220		do_gettimeofday(&tv);
221
222	local_irq_restore(flags);
223	preempt_enable();
224
225	if (!ctr)
226		return -EIO;
227	*cycle_timer = ctr;
228	*local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
229	return 0;
230}
231
232/**
233 * hpsb_bus_reset - notify a bus reset to the core
234 *
235 * For host driver module usage.  Safe to use in interrupt context, although
236 * quite complex; so you may want to run it in the bottom rather than top half.
237 *
238 * Returns 1 if bus reset already in progress, 0 otherwise.
239 */
240int hpsb_bus_reset(struct hpsb_host *host)
241{
242	if (host->in_bus_reset) {
243		HPSB_NOTICE("%s called while bus reset already in progress",
244			    __FUNCTION__);
245		return 1;
246	}
247
248	abort_requests(host);
249	host->in_bus_reset = 1;
250	host->irm_id = -1;
251	host->is_irm = 0;
252	host->busmgr_id = -1;
253	host->is_busmgr = 0;
254	host->is_cycmst = 0;
255	host->node_count = 0;
256	host->selfid_count = 0;
257
258	return 0;
259}
260
261
262/*
263 * Verify num_of_selfids SelfIDs and return number of nodes.  Return zero in
264 * case verification failed.
265 */
266static int check_selfids(struct hpsb_host *host)
267{
268	int nodeid = -1;
269	int rest_of_selfids = host->selfid_count;
270	struct selfid *sid = (struct selfid *)host->topology_map;
271	struct ext_selfid *esid;
272	int esid_seq = 23;
273
274	host->nodes_active = 0;
275
276	while (rest_of_selfids--) {
277		if (!sid->extended) {
278			nodeid++;
279			esid_seq = 0;
280
281			if (sid->phy_id != nodeid) {
282				HPSB_INFO("SelfIDs failed monotony check with "
283					  "%d", sid->phy_id);
284				return 0;
285			}
286
287			if (sid->link_active) {
288				host->nodes_active++;
289				if (sid->contender)
290					host->irm_id = LOCAL_BUS | sid->phy_id;
291			}
292		} else {
293			esid = (struct ext_selfid *)sid;
294
295			if ((esid->phy_id != nodeid)
296			    || (esid->seq_nr != esid_seq)) {
297				HPSB_INFO("SelfIDs failed monotony check with "
298					  "%d/%d", esid->phy_id, esid->seq_nr);
299				return 0;
300			}
301			esid_seq++;
302		}
303		sid++;
304	}
305
306	esid = (struct ext_selfid *)(sid - 1);
307	while (esid->extended) {
308		if ((esid->porta == SELFID_PORT_PARENT) ||
309		    (esid->portb == SELFID_PORT_PARENT) ||
310		    (esid->portc == SELFID_PORT_PARENT) ||
311		    (esid->portd == SELFID_PORT_PARENT) ||
312		    (esid->porte == SELFID_PORT_PARENT) ||
313		    (esid->portf == SELFID_PORT_PARENT) ||
314		    (esid->portg == SELFID_PORT_PARENT) ||
315		    (esid->porth == SELFID_PORT_PARENT)) {
316			HPSB_INFO("SelfIDs failed root check on "
317				  "extended SelfID");
318			return 0;
319		}
320		esid--;
321	}
322
323	sid = (struct selfid *)esid;
324	if ((sid->port0 == SELFID_PORT_PARENT) ||
325	    (sid->port1 == SELFID_PORT_PARENT) ||
326	    (sid->port2 == SELFID_PORT_PARENT)) {
327		HPSB_INFO("SelfIDs failed root check");
328		return 0;
329	}
330
331	host->node_count = nodeid + 1;
332	return 1;
333}
334
335static void build_speed_map(struct hpsb_host *host, int nodecount)
336{
337	u8 cldcnt[nodecount];
338	u8 *map = host->speed_map;
339	u8 *speedcap = host->speed;
340	struct selfid *sid;
341	struct ext_selfid *esid;
342	int i, j, n;
343
344	for (i = 0; i < (nodecount * 64); i += 64) {
345		for (j = 0; j < nodecount; j++) {
346			map[i+j] = IEEE1394_SPEED_MAX;
347		}
348	}
349
350	for (i = 0; i < nodecount; i++) {
351		cldcnt[i] = 0;
352	}
353
354	/* find direct children count and speed */
355	for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
356		     n = nodecount - 1;
357	     (void *)sid >= (void *)host->topology_map; sid--) {
358		if (sid->extended) {
359			esid = (struct ext_selfid *)sid;
360
361			if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
362			if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
363			if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
364			if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
365			if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
366			if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
367			if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
368			if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
369                } else {
370			if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
371			if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
372			if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
373
374			speedcap[n] = sid->speed;
375			n--;
376		}
377	}
378
379	/* set self mapping */
380	for (i = 0; i < nodecount; i++) {
381		map[64*i + i] = speedcap[i];
382	}
383
384	/* fix up direct children count to total children count;
385	 * also fix up speedcaps for sibling and parent communication */
386	for (i = 1; i < nodecount; i++) {
387		for (j = cldcnt[i], n = i - 1; j > 0; j--) {
388			cldcnt[i] += cldcnt[n];
389			speedcap[n] = min(speedcap[n], speedcap[i]);
390			n -= cldcnt[n] + 1;
391		}
392	}
393
394	for (n = 0; n < nodecount; n++) {
395		for (i = n - cldcnt[n]; i <= n; i++) {
396			for (j = 0; j < (n - cldcnt[n]); j++) {
397				map[j*64 + i] = map[i*64 + j] =
398					min(map[i*64 + j], speedcap[n]);
399			}
400			for (j = n + 1; j < nodecount; j++) {
401				map[j*64 + i] = map[i*64 + j] =
402					min(map[i*64 + j], speedcap[n]);
403			}
404		}
405	}
406
407#if SELFID_SPEED_UNKNOWN != IEEE1394_SPEED_MAX
408	/* assume maximum speed for 1394b PHYs, nodemgr will correct it */
409	for (n = 0; n < nodecount; n++)
410		if (speedcap[n] == SELFID_SPEED_UNKNOWN)
411			speedcap[n] = IEEE1394_SPEED_MAX;
412#endif
413}
414
415
416/**
417 * hpsb_selfid_received - hand over received selfid packet to the core
418 *
419 * For host driver module usage.  Safe to use in interrupt context.
420 *
421 * The host driver should have done a successful complement check (second
422 * quadlet is complement of first) beforehand.
423 */
424void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
425{
426	if (host->in_bus_reset) {
427		HPSB_VERBOSE("Including SelfID 0x%x", sid);
428		host->topology_map[host->selfid_count++] = sid;
429	} else {
430		HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
431			    sid, NODEID_TO_BUS(host->node_id));
432	}
433}
434
435/**
436 * hpsb_selfid_complete - notify completion of SelfID stage to the core
437 *
438 * For host driver module usage.  Safe to use in interrupt context, although
439 * quite complex; so you may want to run it in the bottom rather than top half.
440 *
441 * Notify completion of SelfID stage to the core and report new physical ID
442 * and whether host is root now.
443 */
444void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
445{
446	if (!host->in_bus_reset)
447		HPSB_NOTICE("SelfID completion called outside of bus reset!");
448
449	host->node_id = LOCAL_BUS | phyid;
450	host->is_root = isroot;
451
452	if (!check_selfids(host)) {
453		if (host->reset_retries++ < 20) {
454			/* selfid stage did not complete without error */
455			HPSB_NOTICE("Error in SelfID stage, resetting");
456			host->in_bus_reset = 0;
457			/* this should work from ohci1394 now... */
458			hpsb_reset_bus(host, LONG_RESET);
459			return;
460		} else {
461			HPSB_NOTICE("Stopping out-of-control reset loop");
462			HPSB_NOTICE("Warning - topology map and speed map will not be valid");
463			host->reset_retries = 0;
464		}
465	} else {
466		host->reset_retries = 0;
467		build_speed_map(host, host->node_count);
468	}
469
470	HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
471		     "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
472
473	/* irm_id is kept up to date by check_selfids() */
474	if (host->irm_id == host->node_id) {
475		host->is_irm = 1;
476	} else {
477		host->is_busmgr = 0;
478		host->is_irm = 0;
479	}
480
481	if (isroot) {
482		host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
483		host->is_cycmst = 1;
484	}
485	atomic_inc(&host->generation);
486	host->in_bus_reset = 0;
487	highlevel_host_reset(host);
488}
489
490static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED;
491
492/**
493 * hpsb_packet_sent - notify core of sending a packet
494 *
495 * For host driver module usage.  Safe to call from within a transmit packet
496 * routine.
497 *
498 * Notify core of sending a packet.  Ackcode is the ack code returned for async
499 * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
500 * for other cases (internal errors that don't justify a panic).
501 */
502void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
503		      int ackcode)
504{
505	unsigned long flags;
506
507	spin_lock_irqsave(&pending_packets_lock, flags);
508
509	packet->ack_code = ackcode;
510
511	if (packet->no_waiter || packet->state == hpsb_complete) {
512		/* if packet->no_waiter, must not have a tlabel allocated */
513		spin_unlock_irqrestore(&pending_packets_lock, flags);
514		hpsb_free_packet(packet);
515		return;
516	}
517
518	atomic_dec(&packet->refcnt);	/* drop HC's reference */
519	/* here the packet must be on the host->pending_packets queue */
520
521	if (ackcode != ACK_PENDING || !packet->expect_response) {
522		packet->state = hpsb_complete;
523		list_del_init(&packet->queue);
524		spin_unlock_irqrestore(&pending_packets_lock, flags);
525		queue_packet_complete(packet);
526		return;
527	}
528
529	packet->state = hpsb_pending;
530	packet->sendtime = jiffies;
531
532	spin_unlock_irqrestore(&pending_packets_lock, flags);
533
534	mod_timer(&host->timeout, jiffies + host->timeout_interval);
535}
536
537/**
538 * hpsb_send_phy_config - transmit a PHY configuration packet on the bus
539 * @host: host that PHY config packet gets sent through
540 * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
541 * @gapcnt: gap count value to set (-1 = don't set gap count)
542 *
543 * This function sends a PHY config packet on the bus through the specified
544 * host.
545 *
546 * Return value: 0 for success or negative error number otherwise.
547 */
548int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
549{
550	struct hpsb_packet *packet;
551	quadlet_t d = 0;
552	int retval = 0;
553
554	if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
555	   (rootid == -1 && gapcnt == -1)) {
556		HPSB_DEBUG("Invalid Parameter: rootid = %d   gapcnt = %d",
557			   rootid, gapcnt);
558		return -EINVAL;
559	}
560
561	if (rootid != -1)
562		d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
563	if (gapcnt != -1)
564		d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
565
566	packet = hpsb_make_phypacket(host, d);
567	if (!packet)
568		return -ENOMEM;
569
570	packet->generation = get_hpsb_generation(host);
571	retval = hpsb_send_packet_and_wait(packet);
572	hpsb_free_packet(packet);
573
574	return retval;
575}
576
577/**
578 * hpsb_send_packet - transmit a packet on the bus
579 * @packet: packet to send
580 *
581 * The packet is sent through the host specified in the packet->host field.
582 * Before sending, the packet's transmit speed is automatically determined
583 * using the local speed map when it is an async, non-broadcast packet.
584 *
585 * Possibilities for failure are that host is either not initialized, in bus
586 * reset, the packet's generation number doesn't match the current generation
587 * number or the host reports a transmit error.
588 *
589 * Return value: 0 on success, negative errno on failure.
590 */
591int hpsb_send_packet(struct hpsb_packet *packet)
592{
593	struct hpsb_host *host = packet->host;
594
595	if (host->is_shutdown)
596		return -EINVAL;
597	if (host->in_bus_reset ||
598	    (packet->generation != get_hpsb_generation(host)))
599		return -EAGAIN;
600
601	packet->state = hpsb_queued;
602
603	/* This just seems silly to me */
604	WARN_ON(packet->no_waiter && packet->expect_response);
605
606	if (!packet->no_waiter || packet->expect_response) {
607		unsigned long flags;
608
609		atomic_inc(&packet->refcnt);
610		/* Set the initial "sendtime" to 10 seconds from now, to
611		   prevent premature expiry.  If a packet takes more than
612		   10 seconds to hit the wire, we have bigger problems :) */
613		packet->sendtime = jiffies + 10 * HZ;
614		spin_lock_irqsave(&pending_packets_lock, flags);
615		list_add_tail(&packet->queue, &host->pending_packets);
616		spin_unlock_irqrestore(&pending_packets_lock, flags);
617	}
618
619	if (packet->node_id == host->node_id) {
620		/* it is a local request, so handle it locally */
621
622		quadlet_t *data;
623		size_t size = packet->data_size + packet->header_size;
624
625		data = kmalloc(size, GFP_ATOMIC);
626		if (!data) {
627			HPSB_ERR("unable to allocate memory for concatenating header and data");
628			return -ENOMEM;
629		}
630
631		memcpy(data, packet->header, packet->header_size);
632
633		if (packet->data_size)
634			memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
635
636		dump_packet("send packet local", packet->header, packet->header_size, -1);
637
638		hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
639		hpsb_packet_received(host, data, size, 0);
640
641		kfree(data);
642
643		return 0;
644	}
645
646	if (packet->type == hpsb_async &&
647	    NODEID_TO_NODE(packet->node_id) != ALL_NODES)
648		packet->speed_code =
649			host->speed[NODEID_TO_NODE(packet->node_id)];
650
651	dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
652
653	return host->driver->transmit_packet(host, packet);
654}
655
656/* We could just use complete() directly as the packet complete
657 * callback, but this is more typesafe, in the sense that we get a
658 * compiler error if the prototype for complete() changes. */
659
660static void complete_packet(void *data)
661{
662	complete((struct completion *) data);
663}
664
665/**
666 * hpsb_send_packet_and_wait - enqueue packet, block until transaction completes
667 * @packet: packet to send
668 *
669 * Return value: 0 on success, negative errno on failure.
670 */
671int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
672{
673	struct completion done;
674	int retval;
675
676	init_completion(&done);
677	hpsb_set_packet_complete_task(packet, complete_packet, &done);
678	retval = hpsb_send_packet(packet);
679	if (retval == 0)
680		wait_for_completion(&done);
681
682	return retval;
683}
684
685static void send_packet_nocare(struct hpsb_packet *packet)
686{
687	if (hpsb_send_packet(packet) < 0) {
688		hpsb_free_packet(packet);
689	}
690}
691
692static size_t packet_size_to_data_size(size_t packet_size, size_t header_size,
693				       size_t buffer_size, int tcode)
694{
695	size_t ret = packet_size <= header_size ? 0 : packet_size - header_size;
696
697	if (unlikely(ret > buffer_size))
698		ret = buffer_size;
699
700	if (unlikely(ret + header_size != packet_size))
701		HPSB_ERR("unexpected packet size %zd (tcode %d), bug?",
702			 packet_size, tcode);
703	return ret;
704}
705
706static void handle_packet_response(struct hpsb_host *host, int tcode,
707				   quadlet_t *data, size_t size)
708{
709	struct hpsb_packet *packet;
710	int tlabel = (data[0] >> 10) & 0x3f;
711	size_t header_size;
712	unsigned long flags;
713
714	spin_lock_irqsave(&pending_packets_lock, flags);
715
716	list_for_each_entry(packet, &host->pending_packets, queue)
717		if (packet->tlabel == tlabel &&
718		    packet->node_id == (data[1] >> 16))
719			goto found;
720
721	spin_unlock_irqrestore(&pending_packets_lock, flags);
722	HPSB_DEBUG("unsolicited response packet received - %s",
723		   "no tlabel match");
724	dump_packet("contents", data, 16, -1);
725	return;
726
727found:
728	switch (packet->tcode) {
729	case TCODE_WRITEQ:
730	case TCODE_WRITEB:
731		if (unlikely(tcode != TCODE_WRITE_RESPONSE))
732			break;
733		header_size = 12;
734		size = 0;
735		goto dequeue;
736
737	case TCODE_READQ:
738		if (unlikely(tcode != TCODE_READQ_RESPONSE))
739			break;
740		header_size = 16;
741		size = 0;
742		goto dequeue;
743
744	case TCODE_READB:
745		if (unlikely(tcode != TCODE_READB_RESPONSE))
746			break;
747		header_size = 16;
748		size = packet_size_to_data_size(size, header_size,
749						packet->allocated_data_size,
750						tcode);
751		goto dequeue;
752
753	case TCODE_LOCK_REQUEST:
754		if (unlikely(tcode != TCODE_LOCK_RESPONSE))
755			break;
756		header_size = 16;
757		size = packet_size_to_data_size(min(size, (size_t)(16 + 8)),
758						header_size,
759						packet->allocated_data_size,
760						tcode);
761		goto dequeue;
762	}
763
764	spin_unlock_irqrestore(&pending_packets_lock, flags);
765	HPSB_DEBUG("unsolicited response packet received - %s",
766		   "tcode mismatch");
767	dump_packet("contents", data, 16, -1);
768	return;
769
770dequeue:
771	list_del_init(&packet->queue);
772	spin_unlock_irqrestore(&pending_packets_lock, flags);
773
774	if (packet->state == hpsb_queued) {
775		packet->sendtime = jiffies;
776		packet->ack_code = ACK_PENDING;
777	}
778	packet->state = hpsb_complete;
779
780	memcpy(packet->header, data, header_size);
781	if (size)
782		memcpy(packet->data, data + 4, size);
783
784	queue_packet_complete(packet);
785}
786
787
788static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
789					       quadlet_t *data, size_t dsize)
790{
791	struct hpsb_packet *p;
792
793	p = hpsb_alloc_packet(dsize);
794	if (unlikely(p == NULL)) {
795		HPSB_ERR("out of memory, cannot send response packet");
796		return NULL;
797	}
798
799	p->type = hpsb_async;
800	p->state = hpsb_unused;
801	p->host = host;
802	p->node_id = data[1] >> 16;
803	p->tlabel = (data[0] >> 10) & 0x3f;
804	p->no_waiter = 1;
805
806	p->generation = get_hpsb_generation(host);
807
808	if (dsize % 4)
809		p->data[dsize / 4] = 0;
810
811	return p;
812}
813
814#define PREP_ASYNC_HEAD_RCODE(tc) \
815	packet->tcode = tc; \
816	packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
817		| (1 << 8) | (tc << 4); \
818	packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
819	packet->header[2] = 0
820
821static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
822			      quadlet_t data)
823{
824	PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
825	packet->header[3] = data;
826	packet->header_size = 16;
827	packet->data_size = 0;
828}
829
830static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
831			       int length)
832{
833	if (rcode != RCODE_COMPLETE)
834		length = 0;
835
836	PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
837	packet->header[3] = length << 16;
838	packet->header_size = 16;
839	packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
840}
841
842static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
843{
844	PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
845	packet->header_size = 12;
846	packet->data_size = 0;
847}
848
849static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
850			  int length)
851{
852	if (rcode != RCODE_COMPLETE)
853		length = 0;
854
855	PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
856	packet->header[3] = (length << 16) | extcode;
857	packet->header_size = 16;
858	packet->data_size = length;
859}
860
861static void handle_incoming_packet(struct hpsb_host *host, int tcode,
862				   quadlet_t *data, size_t size,
863				   int write_acked)
864{
865	struct hpsb_packet *packet;
866	int length, rcode, extcode;
867	quadlet_t buffer;
868	nodeid_t source = data[1] >> 16;
869	nodeid_t dest = data[0] >> 16;
870	u16 flags = (u16) data[0];
871	u64 addr;
872
873
874	switch (tcode) {
875	case TCODE_WRITEQ:
876		addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
877		rcode = highlevel_write(host, source, dest, data + 3,
878					addr, 4, flags);
879		goto handle_write_request;
880
881	case TCODE_WRITEB:
882		addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
883		rcode = highlevel_write(host, source, dest, data + 4,
884					addr, data[3] >> 16, flags);
885handle_write_request:
886		if (rcode < 0 || write_acked ||
887		    NODEID_TO_NODE(data[0] >> 16) == NODE_MASK)
888			return;
889		/* not a broadcast write, reply */
890		packet = create_reply_packet(host, data, 0);
891		if (packet) {
892			fill_async_write_resp(packet, rcode);
893			send_packet_nocare(packet);
894		}
895		return;
896
897	case TCODE_READQ:
898		addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
899		rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
900		if (rcode < 0)
901			return;
902
903		packet = create_reply_packet(host, data, 0);
904		if (packet) {
905			fill_async_readquad_resp(packet, rcode, buffer);
906			send_packet_nocare(packet);
907		}
908		return;
909
910	case TCODE_READB:
911		length = data[3] >> 16;
912		packet = create_reply_packet(host, data, length);
913		if (!packet)
914			return;
915
916		addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
917		rcode = highlevel_read(host, source, packet->data, addr,
918				       length, flags);
919		if (rcode < 0) {
920			hpsb_free_packet(packet);
921			return;
922		}
923		fill_async_readblock_resp(packet, rcode, length);
924		send_packet_nocare(packet);
925		return;
926
927	case TCODE_LOCK_REQUEST:
928		length = data[3] >> 16;
929		extcode = data[3] & 0xffff;
930		addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
931
932		packet = create_reply_packet(host, data, 8);
933		if (!packet)
934			return;
935
936		if (extcode == 0 || extcode >= 7) {
937			/* let switch default handle error */
938			length = 0;
939		}
940
941		switch (length) {
942		case 4:
943			rcode = highlevel_lock(host, source, packet->data, addr,
944					       data[4], 0, extcode, flags);
945			fill_async_lock_resp(packet, rcode, extcode, 4);
946			break;
947		case 8:
948			if (extcode != EXTCODE_FETCH_ADD &&
949			    extcode != EXTCODE_LITTLE_ADD) {
950				rcode = highlevel_lock(host, source,
951						       packet->data, addr,
952						       data[5], data[4],
953						       extcode, flags);
954				fill_async_lock_resp(packet, rcode, extcode, 4);
955			} else {
956				rcode = highlevel_lock64(host, source,
957					     (octlet_t *)packet->data, addr,
958					     *(octlet_t *)(data + 4), 0ULL,
959					     extcode, flags);
960				fill_async_lock_resp(packet, rcode, extcode, 8);
961			}
962			break;
963		case 16:
964			rcode = highlevel_lock64(host, source,
965						 (octlet_t *)packet->data, addr,
966						 *(octlet_t *)(data + 6),
967						 *(octlet_t *)(data + 4),
968						 extcode, flags);
969			fill_async_lock_resp(packet, rcode, extcode, 8);
970			break;
971		default:
972			rcode = RCODE_TYPE_ERROR;
973			fill_async_lock_resp(packet, rcode, extcode, 0);
974		}
975
976		if (rcode < 0)
977			hpsb_free_packet(packet);
978		else
979			send_packet_nocare(packet);
980		return;
981	}
982}
983
984/**
985 * hpsb_packet_received - hand over received packet to the core
986 *
987 * For host driver module usage.
988 *
989 * The contents of data are expected to be the full packet but with the CRCs
990 * left out (data block follows header immediately), with the header (i.e. the
991 * first four quadlets) in machine byte order and the data block in big endian.
992 * *@data can be safely overwritten after this call.
993 *
994 * If the packet is a write request, @write_acked is to be set to true if it was
995 * ack_complete'd already, false otherwise.  This argument is ignored for any
996 * other packet type.
997 */
998void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
999			  int write_acked)
1000{
1001	int tcode;
1002
1003	if (unlikely(host->in_bus_reset)) {
1004		HPSB_DEBUG("received packet during reset; ignoring");
1005		return;
1006	}
1007
1008	dump_packet("received packet", data, size, -1);
1009
1010	tcode = (data[0] >> 4) & 0xf;
1011
1012	switch (tcode) {
1013	case TCODE_WRITE_RESPONSE:
1014	case TCODE_READQ_RESPONSE:
1015	case TCODE_READB_RESPONSE:
1016	case TCODE_LOCK_RESPONSE:
1017		handle_packet_response(host, tcode, data, size);
1018		break;
1019
1020	case TCODE_WRITEQ:
1021	case TCODE_WRITEB:
1022	case TCODE_READQ:
1023	case TCODE_READB:
1024	case TCODE_LOCK_REQUEST:
1025		handle_incoming_packet(host, tcode, data, size, write_acked);
1026		break;
1027
1028
1029	case TCODE_ISO_DATA:
1030		highlevel_iso_receive(host, data, size);
1031		break;
1032
1033	case TCODE_CYCLE_START:
1034		/* simply ignore this packet if it is passed on */
1035		break;
1036
1037	default:
1038		HPSB_DEBUG("received packet with bogus transaction code %d",
1039			   tcode);
1040		break;
1041	}
1042}
1043
1044static void abort_requests(struct hpsb_host *host)
1045{
1046	struct hpsb_packet *packet, *p;
1047	struct list_head tmp;
1048	unsigned long flags;
1049
1050	host->driver->devctl(host, CANCEL_REQUESTS, 0);
1051
1052	INIT_LIST_HEAD(&tmp);
1053	spin_lock_irqsave(&pending_packets_lock, flags);
1054	list_splice_init(&host->pending_packets, &tmp);
1055	spin_unlock_irqrestore(&pending_packets_lock, flags);
1056
1057	list_for_each_entry_safe(packet, p, &tmp, queue) {
1058		list_del_init(&packet->queue);
1059		packet->state = hpsb_complete;
1060		packet->ack_code = ACKX_ABORTED;
1061		queue_packet_complete(packet);
1062	}
1063}
1064
1065void abort_timedouts(unsigned long __opaque)
1066{
1067	struct hpsb_host *host = (struct hpsb_host *)__opaque;
1068	struct hpsb_packet *packet, *p;
1069	struct list_head tmp;
1070	unsigned long flags, expire, j;
1071
1072	spin_lock_irqsave(&host->csr.lock, flags);
1073	expire = host->csr.expire;
1074	spin_unlock_irqrestore(&host->csr.lock, flags);
1075
1076	j = jiffies;
1077	INIT_LIST_HEAD(&tmp);
1078	spin_lock_irqsave(&pending_packets_lock, flags);
1079
1080	list_for_each_entry_safe(packet, p, &host->pending_packets, queue) {
1081		if (time_before(packet->sendtime + expire, j))
1082			list_move_tail(&packet->queue, &tmp);
1083		else
1084			/* Since packets are added to the tail, the oldest
1085			 * ones are first, always. When we get to one that
1086			 * isn't timed out, the rest aren't either. */
1087			break;
1088	}
1089	if (!list_empty(&host->pending_packets))
1090		mod_timer(&host->timeout, j + host->timeout_interval);
1091
1092	spin_unlock_irqrestore(&pending_packets_lock, flags);
1093
1094	list_for_each_entry_safe(packet, p, &tmp, queue) {
1095		list_del_init(&packet->queue);
1096		packet->state = hpsb_complete;
1097		packet->ack_code = ACKX_TIMEOUT;
1098		queue_packet_complete(packet);
1099	}
1100}
1101
1102static struct task_struct *khpsbpkt_thread;
1103static LIST_HEAD(hpsbpkt_queue);
1104
1105static void queue_packet_complete(struct hpsb_packet *packet)
1106{
1107	unsigned long flags;
1108
1109	if (packet->no_waiter) {
1110		hpsb_free_packet(packet);
1111		return;
1112	}
1113	if (packet->complete_routine != NULL) {
1114		spin_lock_irqsave(&pending_packets_lock, flags);
1115		list_add_tail(&packet->queue, &hpsbpkt_queue);
1116		spin_unlock_irqrestore(&pending_packets_lock, flags);
1117		wake_up_process(khpsbpkt_thread);
1118	}
1119	return;
1120}
1121
1122/*
1123 * Kernel thread which handles packets that are completed.  This way the
1124 * packet's "complete" function is asynchronously run in process context.
1125 * Only packets which have a "complete" function may be sent here.
1126 */
1127static int hpsbpkt_thread(void *__hi)
1128{
1129	struct hpsb_packet *packet, *p;
1130	struct list_head tmp;
1131	int may_schedule;
1132
1133	current->flags |= PF_NOFREEZE;
1134
1135	while (!kthread_should_stop()) {
1136
1137		INIT_LIST_HEAD(&tmp);
1138		spin_lock_irq(&pending_packets_lock);
1139		list_splice_init(&hpsbpkt_queue, &tmp);
1140		spin_unlock_irq(&pending_packets_lock);
1141
1142		list_for_each_entry_safe(packet, p, &tmp, queue) {
1143			list_del_init(&packet->queue);
1144			packet->complete_routine(packet->complete_data);
1145		}
1146
1147		set_current_state(TASK_INTERRUPTIBLE);
1148		spin_lock_irq(&pending_packets_lock);
1149		may_schedule = list_empty(&hpsbpkt_queue);
1150		spin_unlock_irq(&pending_packets_lock);
1151		if (may_schedule)
1152			schedule();
1153		__set_current_state(TASK_RUNNING);
1154	}
1155	return 0;
1156}
1157
1158static int __init ieee1394_init(void)
1159{
1160	int i, ret;
1161
1162	/* non-fatal error */
1163	if (hpsb_init_config_roms()) {
1164		HPSB_ERR("Failed to initialize some config rom entries.\n");
1165		HPSB_ERR("Some features may not be available\n");
1166	}
1167
1168	khpsbpkt_thread = kthread_run(hpsbpkt_thread, NULL, "khpsbpkt");
1169	if (IS_ERR(khpsbpkt_thread)) {
1170		HPSB_ERR("Failed to start hpsbpkt thread!\n");
1171		ret = PTR_ERR(khpsbpkt_thread);
1172		goto exit_cleanup_config_roms;
1173	}
1174
1175	if (register_chrdev_region(IEEE1394_CORE_DEV, 256, "ieee1394")) {
1176		HPSB_ERR("unable to register character device major %d!\n", IEEE1394_MAJOR);
1177		ret = -ENODEV;
1178		goto exit_release_kernel_thread;
1179	}
1180
1181	ret = bus_register(&ieee1394_bus_type);
1182	if (ret < 0) {
1183		HPSB_INFO("bus register failed");
1184		goto release_chrdev;
1185	}
1186
1187	for (i = 0; fw_bus_attrs[i]; i++) {
1188		ret = bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1189		if (ret < 0) {
1190			while (i >= 0) {
1191				bus_remove_file(&ieee1394_bus_type,
1192						fw_bus_attrs[i--]);
1193			}
1194			bus_unregister(&ieee1394_bus_type);
1195			goto release_chrdev;
1196		}
1197	}
1198
1199	ret = class_register(&hpsb_host_class);
1200	if (ret < 0)
1201		goto release_all_bus;
1202
1203	hpsb_protocol_class = class_create(THIS_MODULE, "ieee1394_protocol");
1204	if (IS_ERR(hpsb_protocol_class)) {
1205		ret = PTR_ERR(hpsb_protocol_class);
1206		goto release_class_host;
1207	}
1208
1209	ret = init_csr();
1210	if (ret) {
1211		HPSB_INFO("init csr failed");
1212		ret = -ENOMEM;
1213		goto release_class_protocol;
1214	}
1215
1216	if (disable_nodemgr) {
1217		HPSB_INFO("nodemgr and IRM functionality disabled");
1218		/* We shouldn't contend for IRM with nodemgr disabled, since
1219		   nodemgr implements functionality required of ieee1394a-2000
1220		   IRMs */
1221		hpsb_disable_irm = 1;
1222
1223		return 0;
1224	}
1225
1226	if (hpsb_disable_irm) {
1227		HPSB_INFO("IRM functionality disabled");
1228	}
1229
1230	ret = init_ieee1394_nodemgr();
1231	if (ret < 0) {
1232		HPSB_INFO("init nodemgr failed");
1233		goto cleanup_csr;
1234	}
1235
1236	return 0;
1237
1238cleanup_csr:
1239	cleanup_csr();
1240release_class_protocol:
1241	class_destroy(hpsb_protocol_class);
1242release_class_host:
1243	class_unregister(&hpsb_host_class);
1244release_all_bus:
1245	for (i = 0; fw_bus_attrs[i]; i++)
1246		bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1247	bus_unregister(&ieee1394_bus_type);
1248release_chrdev:
1249	unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1250exit_release_kernel_thread:
1251	kthread_stop(khpsbpkt_thread);
1252exit_cleanup_config_roms:
1253	hpsb_cleanup_config_roms();
1254	return ret;
1255}
1256
1257static void __exit ieee1394_cleanup(void)
1258{
1259	int i;
1260
1261	if (!disable_nodemgr)
1262		cleanup_ieee1394_nodemgr();
1263
1264	cleanup_csr();
1265
1266	class_destroy(hpsb_protocol_class);
1267	class_unregister(&hpsb_host_class);
1268	for (i = 0; fw_bus_attrs[i]; i++)
1269		bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1270	bus_unregister(&ieee1394_bus_type);
1271
1272	kthread_stop(khpsbpkt_thread);
1273
1274	hpsb_cleanup_config_roms();
1275
1276	unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1277}
1278
1279fs_initcall(ieee1394_init); /* same as ohci1394 */
1280module_exit(ieee1394_cleanup);
1281
1282/* Exported symbols */
1283
1284/** hosts.c **/
1285EXPORT_SYMBOL(hpsb_alloc_host);
1286EXPORT_SYMBOL(hpsb_add_host);
1287EXPORT_SYMBOL(hpsb_resume_host);
1288EXPORT_SYMBOL(hpsb_remove_host);
1289EXPORT_SYMBOL(hpsb_update_config_rom_image);
1290
1291/** ieee1394_core.c **/
1292EXPORT_SYMBOL(hpsb_speedto_str);
1293EXPORT_SYMBOL(hpsb_protocol_class);
1294EXPORT_SYMBOL(hpsb_set_packet_complete_task);
1295EXPORT_SYMBOL(hpsb_alloc_packet);
1296EXPORT_SYMBOL(hpsb_free_packet);
1297EXPORT_SYMBOL(hpsb_send_packet);
1298EXPORT_SYMBOL(hpsb_reset_bus);
1299EXPORT_SYMBOL(hpsb_read_cycle_timer);
1300EXPORT_SYMBOL(hpsb_bus_reset);
1301EXPORT_SYMBOL(hpsb_selfid_received);
1302EXPORT_SYMBOL(hpsb_selfid_complete);
1303EXPORT_SYMBOL(hpsb_packet_sent);
1304EXPORT_SYMBOL(hpsb_packet_received);
1305EXPORT_SYMBOL_GPL(hpsb_disable_irm);
1306
1307/** ieee1394_transactions.c **/
1308EXPORT_SYMBOL(hpsb_get_tlabel);
1309EXPORT_SYMBOL(hpsb_free_tlabel);
1310EXPORT_SYMBOL(hpsb_make_readpacket);
1311EXPORT_SYMBOL(hpsb_make_writepacket);
1312EXPORT_SYMBOL(hpsb_make_streampacket);
1313EXPORT_SYMBOL(hpsb_make_lockpacket);
1314EXPORT_SYMBOL(hpsb_make_lock64packet);
1315EXPORT_SYMBOL(hpsb_make_phypacket);
1316EXPORT_SYMBOL(hpsb_make_isopacket);
1317EXPORT_SYMBOL(hpsb_read);
1318EXPORT_SYMBOL(hpsb_write);
1319EXPORT_SYMBOL(hpsb_packet_success);
1320
1321/** highlevel.c **/
1322EXPORT_SYMBOL(hpsb_register_highlevel);
1323EXPORT_SYMBOL(hpsb_unregister_highlevel);
1324EXPORT_SYMBOL(hpsb_register_addrspace);
1325EXPORT_SYMBOL(hpsb_unregister_addrspace);
1326EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
1327EXPORT_SYMBOL(hpsb_listen_channel);
1328EXPORT_SYMBOL(hpsb_unlisten_channel);
1329EXPORT_SYMBOL(hpsb_get_hostinfo);
1330EXPORT_SYMBOL(hpsb_create_hostinfo);
1331EXPORT_SYMBOL(hpsb_destroy_hostinfo);
1332EXPORT_SYMBOL(hpsb_set_hostinfo_key);
1333EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
1334EXPORT_SYMBOL(hpsb_set_hostinfo);
1335
1336/** nodemgr.c **/
1337EXPORT_SYMBOL(hpsb_node_fill_packet);
1338EXPORT_SYMBOL(hpsb_node_write);
1339EXPORT_SYMBOL(__hpsb_register_protocol);
1340EXPORT_SYMBOL(hpsb_unregister_protocol);
1341
1342/** csr.c **/
1343EXPORT_SYMBOL(hpsb_update_config_rom);
1344
1345/** dma.c **/
1346EXPORT_SYMBOL(dma_prog_region_init);
1347EXPORT_SYMBOL(dma_prog_region_alloc);
1348EXPORT_SYMBOL(dma_prog_region_free);
1349EXPORT_SYMBOL(dma_region_init);
1350EXPORT_SYMBOL(dma_region_alloc);
1351EXPORT_SYMBOL(dma_region_free);
1352EXPORT_SYMBOL(dma_region_sync_for_cpu);
1353EXPORT_SYMBOL(dma_region_sync_for_device);
1354EXPORT_SYMBOL(dma_region_mmap);
1355EXPORT_SYMBOL(dma_region_offset_to_bus);
1356
1357/** iso.c **/
1358EXPORT_SYMBOL(hpsb_iso_xmit_init);
1359EXPORT_SYMBOL(hpsb_iso_recv_init);
1360EXPORT_SYMBOL(hpsb_iso_xmit_start);
1361EXPORT_SYMBOL(hpsb_iso_recv_start);
1362EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
1363EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
1364EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
1365EXPORT_SYMBOL(hpsb_iso_stop);
1366EXPORT_SYMBOL(hpsb_iso_shutdown);
1367EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
1368EXPORT_SYMBOL(hpsb_iso_xmit_sync);
1369EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
1370EXPORT_SYMBOL(hpsb_iso_n_ready);
1371EXPORT_SYMBOL(hpsb_iso_packet_sent);
1372EXPORT_SYMBOL(hpsb_iso_packet_received);
1373EXPORT_SYMBOL(hpsb_iso_wake);
1374EXPORT_SYMBOL(hpsb_iso_recv_flush);
1375
1376/** csr1212.c **/
1377EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
1378EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
1379EXPORT_SYMBOL(csr1212_get_keyval);
1380EXPORT_SYMBOL(csr1212_new_directory);
1381EXPORT_SYMBOL(csr1212_parse_keyval);
1382EXPORT_SYMBOL(csr1212_read);
1383EXPORT_SYMBOL(csr1212_release_keyval);
1384