• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/
1/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2
3/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
4 * Setting to > 1518 effectively disables this feature.
5 */
6static int rx_copybreak = 200;
7
8/* Should we use MMIO or Port IO?
9 * 0: Port IO
10 * 1: MMIO
11 * 2: Try MMIO, fallback to Port IO
12 */
13static unsigned int use_mmio = 2;
14
15/* end user-configurable values */
16
17/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
18 */
19static const int multicast_filter_limit = 32;
20
21/* Operational parameters that are set at compile time. */
22
23/* Keep the ring sizes a power of two for compile efficiency.
24 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
25 * Making the Tx ring too large decreases the effectiveness of channel
26 * bonding and packet priority.
27 * There are no ill effects from too-large receive rings.
28 *
29 * We don't currently use the Hi Tx ring so, don't make it very big.
30 *
31 * Beware that if we start using the Hi Tx ring, we will need to change
32 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
33 */
34#define TXHI_ENTRIES		2
35#define TXLO_ENTRIES		128
36#define RX_ENTRIES		32
37#define COMMAND_ENTRIES		16
38#define RESPONSE_ENTRIES	32
39
40#define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
41#define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
42
43/* The 3XP will preload and remove 64 entries from the free buffer
44 * list, and we need one entry to keep the ring from wrapping, so
45 * to keep this a power of two, we use 128 entries.
46 */
47#define RXFREE_ENTRIES		128
48#define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
49
50/* Operational parameters that usually are not changed. */
51
52/* Time in jiffies before concluding the transmitter is hung. */
53#define TX_TIMEOUT  (2*HZ)
54
55#define PKT_BUF_SZ		1536
56#define FIRMWARE_NAME		"3com/typhoon.bin"
57
58#define pr_fmt(fmt)		KBUILD_MODNAME " " fmt
59
60#include <linux/module.h>
61#include <linux/kernel.h>
62#include <linux/sched.h>
63#include <linux/string.h>
64#include <linux/timer.h>
65#include <linux/errno.h>
66#include <linux/ioport.h>
67#include <linux/interrupt.h>
68#include <linux/pci.h>
69#include <linux/netdevice.h>
70#include <linux/etherdevice.h>
71#include <linux/skbuff.h>
72#include <linux/mm.h>
73#include <linux/init.h>
74#include <linux/delay.h>
75#include <linux/ethtool.h>
76#include <linux/if_vlan.h>
77#include <linux/crc32.h>
78#include <linux/bitops.h>
79#include <asm/processor.h>
80#include <asm/io.h>
81#include <asm/uaccess.h>
82#include <linux/in6.h>
83#include <linux/dma-mapping.h>
84#include <linux/firmware.h>
85#include <generated/utsrelease.h>
86
87#include "typhoon.h"
88
89MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
90MODULE_VERSION(UTS_RELEASE);
91MODULE_LICENSE("GPL");
92MODULE_FIRMWARE(FIRMWARE_NAME);
93MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
94MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
95			       "the buffer given back to the NIC. Default "
96			       "is 200.");
97MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
98			   "Default is to try MMIO and fallback to PIO.");
99module_param(rx_copybreak, int, 0);
100module_param(use_mmio, int, 0);
101
102#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
103#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
104#undef NETIF_F_TSO
105#endif
106
107#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
108#error TX ring too small!
109#endif
110
111struct typhoon_card_info {
112	const char *name;
113	const int capabilities;
114};
115
116#define TYPHOON_CRYPTO_NONE		0x00
117#define TYPHOON_CRYPTO_DES		0x01
118#define TYPHOON_CRYPTO_3DES		0x02
119#define	TYPHOON_CRYPTO_VARIABLE		0x04
120#define TYPHOON_FIBER			0x08
121#define TYPHOON_WAKEUP_NEEDS_RESET	0x10
122
123enum typhoon_cards {
124	TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
125	TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
126	TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
127	TYPHOON_FXM,
128};
129
130/* directly indexed by enum typhoon_cards, above */
131static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
132	{ "3Com Typhoon (3C990-TX)",
133		TYPHOON_CRYPTO_NONE},
134	{ "3Com Typhoon (3CR990-TX-95)",
135		TYPHOON_CRYPTO_DES},
136	{ "3Com Typhoon (3CR990-TX-97)",
137	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
138	{ "3Com Typhoon (3C990SVR)",
139		TYPHOON_CRYPTO_NONE},
140	{ "3Com Typhoon (3CR990SVR95)",
141		TYPHOON_CRYPTO_DES},
142	{ "3Com Typhoon (3CR990SVR97)",
143	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
144	{ "3Com Typhoon2 (3C990B-TX-M)",
145		TYPHOON_CRYPTO_VARIABLE},
146	{ "3Com Typhoon2 (3C990BSVR)",
147		TYPHOON_CRYPTO_VARIABLE},
148	{ "3Com Typhoon (3CR990-FX-95)",
149		TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
150	{ "3Com Typhoon (3CR990-FX-97)",
151	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
152	{ "3Com Typhoon (3CR990-FX-95 Server)",
153	 	TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
154	{ "3Com Typhoon (3CR990-FX-97 Server)",
155	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
156	{ "3Com Typhoon2 (3C990B-FX-97)",
157		TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
158};
159
160/* Notes on the new subsystem numbering scheme:
161 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
162 * bit 4 indicates if this card has secured firmware (we don't support it)
163 * bit 8 indicates if this is a (0) copper or (1) fiber card
164 * bits 12-16 indicate card type: (0) client and (1) server
165 */
166static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
167	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
168	  PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
169	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
170	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
171	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
172	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
173	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
174	  PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
175	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
176	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
177	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
178	  PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
179	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
180	  PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
181	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
182	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
183	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
184	  PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
185	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
186	  PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
187	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
188	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
189	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
190	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
191	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
192	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
193	{ 0, }
194};
195MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
196
197/* Define the shared memory area
198 * Align everything the 3XP will normally be using.
199 * We'll need to move/align txHi if we start using that ring.
200 */
201#define __3xp_aligned	____cacheline_aligned
202struct typhoon_shared {
203	struct typhoon_interface	iface;
204	struct typhoon_indexes		indexes			__3xp_aligned;
205	struct tx_desc			txLo[TXLO_ENTRIES] 	__3xp_aligned;
206	struct rx_desc			rxLo[RX_ENTRIES]	__3xp_aligned;
207	struct rx_desc			rxHi[RX_ENTRIES]	__3xp_aligned;
208	struct cmd_desc			cmd[COMMAND_ENTRIES]	__3xp_aligned;
209	struct resp_desc		resp[RESPONSE_ENTRIES]	__3xp_aligned;
210	struct rx_free			rxBuff[RXFREE_ENTRIES]	__3xp_aligned;
211	u32				zeroWord;
212	struct tx_desc			txHi[TXHI_ENTRIES];
213} __packed;
214
215struct rxbuff_ent {
216	struct sk_buff *skb;
217	dma_addr_t	dma_addr;
218};
219
220struct typhoon {
221	/* Tx cache line section */
222	struct transmit_ring 	txLoRing	____cacheline_aligned;
223	struct pci_dev *	tx_pdev;
224	void __iomem		*tx_ioaddr;
225	u32			txlo_dma_addr;
226
227	/* Irq/Rx cache line section */
228	void __iomem		*ioaddr		____cacheline_aligned;
229	struct typhoon_indexes *indexes;
230	u8			awaiting_resp;
231	u8			duplex;
232	u8			speed;
233	u8			card_state;
234	struct basic_ring	rxLoRing;
235	struct pci_dev *	pdev;
236	struct net_device *	dev;
237	struct napi_struct	napi;
238	spinlock_t		state_lock;
239	struct vlan_group *	vlgrp;
240	struct basic_ring	rxHiRing;
241	struct basic_ring	rxBuffRing;
242	struct rxbuff_ent	rxbuffers[RXENT_ENTRIES];
243
244	/* general section */
245	spinlock_t		command_lock	____cacheline_aligned;
246	struct basic_ring	cmdRing;
247	struct basic_ring	respRing;
248	struct net_device_stats	stats;
249	struct net_device_stats	stats_saved;
250	struct typhoon_shared *	shared;
251	dma_addr_t		shared_dma;
252	__le16			xcvr_select;
253	__le16			wol_events;
254	__le32			offload;
255
256	/* unused stuff (future use) */
257	int			capabilities;
258	struct transmit_ring 	txHiRing;
259};
260
261enum completion_wait_values {
262	NoWait = 0, WaitNoSleep, WaitSleep,
263};
264
265/* These are the values for the typhoon.card_state variable.
266 * These determine where the statistics will come from in get_stats().
267 * The sleep image does not support the statistics we need.
268 */
269enum state_values {
270	Sleeping = 0, Running,
271};
272
273/* PCI writes are not guaranteed to be posted in order, but outstanding writes
274 * cannot pass a read, so this forces current writes to post.
275 */
276#define typhoon_post_pci_writes(x) \
277	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
278
279/* We'll wait up to six seconds for a reset, and half a second normally.
280 */
281#define TYPHOON_UDELAY			50
282#define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
283#define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
284#define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
285
286#if defined(NETIF_F_TSO)
287#define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
288#define TSO_NUM_DESCRIPTORS	2
289#define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
290#else
291#define NETIF_F_TSO 		0
292#define skb_tso_size(x) 	0
293#define TSO_NUM_DESCRIPTORS	0
294#define TSO_OFFLOAD_ON		0
295#endif
296
297static inline void
298typhoon_inc_index(u32 *index, const int count, const int num_entries)
299{
300	/* Increment a ring index -- we can use this for all rings execept
301	 * the Rx rings, as they use different size descriptors
302	 * otherwise, everything is the same size as a cmd_desc
303	 */
304	*index += count * sizeof(struct cmd_desc);
305	*index %= num_entries * sizeof(struct cmd_desc);
306}
307
308static inline void
309typhoon_inc_cmd_index(u32 *index, const int count)
310{
311	typhoon_inc_index(index, count, COMMAND_ENTRIES);
312}
313
314static inline void
315typhoon_inc_resp_index(u32 *index, const int count)
316{
317	typhoon_inc_index(index, count, RESPONSE_ENTRIES);
318}
319
320static inline void
321typhoon_inc_rxfree_index(u32 *index, const int count)
322{
323	typhoon_inc_index(index, count, RXFREE_ENTRIES);
324}
325
326static inline void
327typhoon_inc_tx_index(u32 *index, const int count)
328{
329	/* if we start using the Hi Tx ring, this needs updateing */
330	typhoon_inc_index(index, count, TXLO_ENTRIES);
331}
332
333static inline void
334typhoon_inc_rx_index(u32 *index, const int count)
335{
336	/* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
337	*index += count * sizeof(struct rx_desc);
338	*index %= RX_ENTRIES * sizeof(struct rx_desc);
339}
340
341static int
342typhoon_reset(void __iomem *ioaddr, int wait_type)
343{
344	int i, err = 0;
345	int timeout;
346
347	if(wait_type == WaitNoSleep)
348		timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
349	else
350		timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
351
352	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
353	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
354
355	iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
356	typhoon_post_pci_writes(ioaddr);
357	udelay(1);
358	iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
359
360	if(wait_type != NoWait) {
361		for(i = 0; i < timeout; i++) {
362			if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
363			   TYPHOON_STATUS_WAITING_FOR_HOST)
364				goto out;
365
366			if(wait_type == WaitSleep)
367				schedule_timeout_uninterruptible(1);
368			else
369				udelay(TYPHOON_UDELAY);
370		}
371
372		err = -ETIMEDOUT;
373	}
374
375out:
376	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
377	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
378
379	/* The 3XP seems to need a little extra time to complete the load
380	 * of the sleep image before we can reliably boot it. Failure to
381	 * do this occasionally results in a hung adapter after boot in
382	 * typhoon_init_one() while trying to read the MAC address or
383	 * putting the card to sleep. 3Com's driver waits 5ms, but
384	 * that seems to be overkill. However, if we can sleep, we might
385	 * as well give it that much time. Otherwise, we'll give it 500us,
386	 * which should be enough (I've see it work well at 100us, but still
387	 * saw occasional problems.)
388	 */
389	if(wait_type == WaitSleep)
390		msleep(5);
391	else
392		udelay(500);
393	return err;
394}
395
396static int
397typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
398{
399	int i, err = 0;
400
401	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
402		if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
403			goto out;
404		udelay(TYPHOON_UDELAY);
405	}
406
407	err = -ETIMEDOUT;
408
409out:
410	return err;
411}
412
413static inline void
414typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
415{
416	if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
417		netif_carrier_off(dev);
418	else
419		netif_carrier_on(dev);
420}
421
422static inline void
423typhoon_hello(struct typhoon *tp)
424{
425	struct basic_ring *ring = &tp->cmdRing;
426	struct cmd_desc *cmd;
427
428	/* We only get a hello request if we've not sent anything to the
429	 * card in a long while. If the lock is held, then we're in the
430	 * process of issuing a command, so we don't need to respond.
431	 */
432	if(spin_trylock(&tp->command_lock)) {
433		cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
434		typhoon_inc_cmd_index(&ring->lastWrite, 1);
435
436		INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
437		wmb();
438		iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
439		spin_unlock(&tp->command_lock);
440	}
441}
442
443static int
444typhoon_process_response(struct typhoon *tp, int resp_size,
445				struct resp_desc *resp_save)
446{
447	struct typhoon_indexes *indexes = tp->indexes;
448	struct resp_desc *resp;
449	u8 *base = tp->respRing.ringBase;
450	int count, len, wrap_len;
451	u32 cleared;
452	u32 ready;
453
454	cleared = le32_to_cpu(indexes->respCleared);
455	ready = le32_to_cpu(indexes->respReady);
456	while(cleared != ready) {
457		resp = (struct resp_desc *)(base + cleared);
458		count = resp->numDesc + 1;
459		if(resp_save && resp->seqNo) {
460			if(count > resp_size) {
461				resp_save->flags = TYPHOON_RESP_ERROR;
462				goto cleanup;
463			}
464
465			wrap_len = 0;
466			len = count * sizeof(*resp);
467			if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
468				wrap_len = cleared + len - RESPONSE_RING_SIZE;
469				len = RESPONSE_RING_SIZE - cleared;
470			}
471
472			memcpy(resp_save, resp, len);
473			if(unlikely(wrap_len)) {
474				resp_save += len / sizeof(*resp);
475				memcpy(resp_save, base, wrap_len);
476			}
477
478			resp_save = NULL;
479		} else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
480			typhoon_media_status(tp->dev, resp);
481		} else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
482			typhoon_hello(tp);
483		} else {
484			netdev_err(tp->dev,
485				   "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
486				   le16_to_cpu(resp->cmd),
487				   resp->numDesc, resp->flags,
488				   le16_to_cpu(resp->parm1),
489				   le32_to_cpu(resp->parm2),
490				   le32_to_cpu(resp->parm3));
491		}
492
493cleanup:
494		typhoon_inc_resp_index(&cleared, count);
495	}
496
497	indexes->respCleared = cpu_to_le32(cleared);
498	wmb();
499	return (resp_save == NULL);
500}
501
502static inline int
503typhoon_num_free(int lastWrite, int lastRead, int ringSize)
504{
505	/* this works for all descriptors but rx_desc, as they are a
506	 * different size than the cmd_desc -- everyone else is the same
507	 */
508	lastWrite /= sizeof(struct cmd_desc);
509	lastRead /= sizeof(struct cmd_desc);
510	return (ringSize + lastRead - lastWrite - 1) % ringSize;
511}
512
513static inline int
514typhoon_num_free_cmd(struct typhoon *tp)
515{
516	int lastWrite = tp->cmdRing.lastWrite;
517	int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
518
519	return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
520}
521
522static inline int
523typhoon_num_free_resp(struct typhoon *tp)
524{
525	int respReady = le32_to_cpu(tp->indexes->respReady);
526	int respCleared = le32_to_cpu(tp->indexes->respCleared);
527
528	return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
529}
530
531static inline int
532typhoon_num_free_tx(struct transmit_ring *ring)
533{
534	/* if we start using the Hi Tx ring, this needs updating */
535	return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
536}
537
538static int
539typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
540		      int num_resp, struct resp_desc *resp)
541{
542	struct typhoon_indexes *indexes = tp->indexes;
543	struct basic_ring *ring = &tp->cmdRing;
544	struct resp_desc local_resp;
545	int i, err = 0;
546	int got_resp;
547	int freeCmd, freeResp;
548	int len, wrap_len;
549
550	spin_lock(&tp->command_lock);
551
552	freeCmd = typhoon_num_free_cmd(tp);
553	freeResp = typhoon_num_free_resp(tp);
554
555	if(freeCmd < num_cmd || freeResp < num_resp) {
556		netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
557			   freeCmd, num_cmd, freeResp, num_resp);
558		err = -ENOMEM;
559		goto out;
560	}
561
562	if(cmd->flags & TYPHOON_CMD_RESPOND) {
563		/* If we're expecting a response, but the caller hasn't given
564		 * us a place to put it, we'll provide one.
565		 */
566		tp->awaiting_resp = 1;
567		if(resp == NULL) {
568			resp = &local_resp;
569			num_resp = 1;
570		}
571	}
572
573	wrap_len = 0;
574	len = num_cmd * sizeof(*cmd);
575	if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
576		wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
577		len = COMMAND_RING_SIZE - ring->lastWrite;
578	}
579
580	memcpy(ring->ringBase + ring->lastWrite, cmd, len);
581	if(unlikely(wrap_len)) {
582		struct cmd_desc *wrap_ptr = cmd;
583		wrap_ptr += len / sizeof(*cmd);
584		memcpy(ring->ringBase, wrap_ptr, wrap_len);
585	}
586
587	typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
588
589	/* "I feel a presence... another warrior is on the mesa."
590	 */
591	wmb();
592	iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
593	typhoon_post_pci_writes(tp->ioaddr);
594
595	if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
596		goto out;
597
598	/* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
599	 * preempt or do anything other than take interrupts. So, don't
600	 * wait for a response unless you have to.
601	 *
602	 * I've thought about trying to sleep here, but we're called
603	 * from many contexts that don't allow that. Also, given the way
604	 * 3Com has implemented irq coalescing, we would likely timeout --
605	 * this has been observed in real life!
606	 *
607	 * The big killer is we have to wait to get stats from the card,
608	 * though we could go to a periodic refresh of those if we don't
609	 * mind them getting somewhat stale. The rest of the waiting
610	 * commands occur during open/close/suspend/resume, so they aren't
611	 * time critical. Creating SAs in the future will also have to
612	 * wait here.
613	 */
614	got_resp = 0;
615	for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
616		if(indexes->respCleared != indexes->respReady)
617			got_resp = typhoon_process_response(tp, num_resp,
618								resp);
619		udelay(TYPHOON_UDELAY);
620	}
621
622	if(!got_resp) {
623		err = -ETIMEDOUT;
624		goto out;
625	}
626
627	/* Collect the error response even if we don't care about the
628	 * rest of the response
629	 */
630	if(resp->flags & TYPHOON_RESP_ERROR)
631		err = -EIO;
632
633out:
634	if(tp->awaiting_resp) {
635		tp->awaiting_resp = 0;
636		smp_wmb();
637
638		/* Ugh. If a response was added to the ring between
639		 * the call to typhoon_process_response() and the clearing
640		 * of tp->awaiting_resp, we could have missed the interrupt
641		 * and it could hang in the ring an indeterminate amount of
642		 * time. So, check for it, and interrupt ourselves if this
643		 * is the case.
644		 */
645		if(indexes->respCleared != indexes->respReady)
646			iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
647	}
648
649	spin_unlock(&tp->command_lock);
650	return err;
651}
652
653static void
654typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
655{
656	struct typhoon *tp = netdev_priv(dev);
657	struct cmd_desc xp_cmd;
658	int err;
659
660	spin_lock_bh(&tp->state_lock);
661	if(!tp->vlgrp != !grp) {
662		/* We've either been turned on for the first time, or we've
663		 * been turned off. Update the 3XP.
664		 */
665		if(grp)
666			tp->offload |= TYPHOON_OFFLOAD_VLAN;
667		else
668			tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
669
670		/* If the interface is up, the runtime is running -- and we
671		 * must be up for the vlan core to call us.
672		 *
673		 * Do the command outside of the spin lock, as it is slow.
674		 */
675		INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
676					TYPHOON_CMD_SET_OFFLOAD_TASKS);
677		xp_cmd.parm2 = tp->offload;
678		xp_cmd.parm3 = tp->offload;
679		spin_unlock_bh(&tp->state_lock);
680		err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
681		if(err < 0)
682			netdev_err(tp->dev, "vlan offload error %d\n", -err);
683		spin_lock_bh(&tp->state_lock);
684	}
685
686	/* now make the change visible */
687	tp->vlgrp = grp;
688	spin_unlock_bh(&tp->state_lock);
689}
690
691static inline void
692typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
693			u32 ring_dma)
694{
695	struct tcpopt_desc *tcpd;
696	u32 tcpd_offset = ring_dma;
697
698	tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
699	tcpd_offset += txRing->lastWrite;
700	tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
701	typhoon_inc_tx_index(&txRing->lastWrite, 1);
702
703	tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
704	tcpd->numDesc = 1;
705	tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
706	tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
707	tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
708	tcpd->bytesTx = cpu_to_le32(skb->len);
709	tcpd->status = 0;
710}
711
712static netdev_tx_t
713typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
714{
715	struct typhoon *tp = netdev_priv(dev);
716	struct transmit_ring *txRing;
717	struct tx_desc *txd, *first_txd;
718	dma_addr_t skb_dma;
719	int numDesc;
720
721	/* we have two rings to choose from, but we only use txLo for now
722	 * If we start using the Hi ring as well, we'll need to update
723	 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
724	 * and TXHI_ENTRIES to match, as well as update the TSO code below
725	 * to get the right DMA address
726	 */
727	txRing = &tp->txLoRing;
728
729	/* We need one descriptor for each fragment of the sk_buff, plus the
730	 * one for the ->data area of it.
731	 *
732	 * The docs say a maximum of 16 fragment descriptors per TCP option
733	 * descriptor, then make a new packet descriptor and option descriptor
734	 * for the next 16 fragments. The engineers say just an option
735	 * descriptor is needed. I've tested up to 26 fragments with a single
736	 * packet descriptor/option descriptor combo, so I use that for now.
737	 *
738	 * If problems develop with TSO, check this first.
739	 */
740	numDesc = skb_shinfo(skb)->nr_frags + 1;
741	if (skb_is_gso(skb))
742		numDesc++;
743
744	/* When checking for free space in the ring, we need to also
745	 * account for the initial Tx descriptor, and we always must leave
746	 * at least one descriptor unused in the ring so that it doesn't
747	 * wrap and look empty.
748	 *
749	 * The only time we should loop here is when we hit the race
750	 * between marking the queue awake and updating the cleared index.
751	 * Just loop and it will appear. This comes from the acenic driver.
752	 */
753	while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
754		smp_rmb();
755
756	first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
757	typhoon_inc_tx_index(&txRing->lastWrite, 1);
758
759	first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
760	first_txd->numDesc = 0;
761	first_txd->len = 0;
762	first_txd->tx_addr = (u64)((unsigned long) skb);
763	first_txd->processFlags = 0;
764
765	if(skb->ip_summed == CHECKSUM_PARTIAL) {
766		/* The 3XP will figure out if this is UDP/TCP */
767		first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
768		first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
769		first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
770	}
771
772	if(vlan_tx_tag_present(skb)) {
773		first_txd->processFlags |=
774		    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
775		first_txd->processFlags |=
776		    cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
777				TYPHOON_TX_PF_VLAN_TAG_SHIFT);
778	}
779
780	if (skb_is_gso(skb)) {
781		first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
782		first_txd->numDesc++;
783
784		typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
785	}
786
787	txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
788	typhoon_inc_tx_index(&txRing->lastWrite, 1);
789
790	/* No need to worry about padding packet -- the firmware pads
791	 * it with zeros to ETH_ZLEN for us.
792	 */
793	if(skb_shinfo(skb)->nr_frags == 0) {
794		skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
795				       PCI_DMA_TODEVICE);
796		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
797		txd->len = cpu_to_le16(skb->len);
798		txd->frag.addr = cpu_to_le32(skb_dma);
799		txd->frag.addrHi = 0;
800		first_txd->numDesc++;
801	} else {
802		int i, len;
803
804		len = skb_headlen(skb);
805		skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
806				         PCI_DMA_TODEVICE);
807		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
808		txd->len = cpu_to_le16(len);
809		txd->frag.addr = cpu_to_le32(skb_dma);
810		txd->frag.addrHi = 0;
811		first_txd->numDesc++;
812
813		for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
814			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
815			void *frag_addr;
816
817			txd = (struct tx_desc *) (txRing->ringBase +
818						txRing->lastWrite);
819			typhoon_inc_tx_index(&txRing->lastWrite, 1);
820
821			len = frag->size;
822			frag_addr = (void *) page_address(frag->page) +
823						frag->page_offset;
824			skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
825					 PCI_DMA_TODEVICE);
826			txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
827			txd->len = cpu_to_le16(len);
828			txd->frag.addr = cpu_to_le32(skb_dma);
829			txd->frag.addrHi = 0;
830			first_txd->numDesc++;
831		}
832	}
833
834	/* Kick the 3XP
835	 */
836	wmb();
837	iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
838
839	/* If we don't have room to put the worst case packet on the
840	 * queue, then we must stop the queue. We need 2 extra
841	 * descriptors -- one to prevent ring wrap, and one for the
842	 * Tx header.
843	 */
844	numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
845
846	if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
847		netif_stop_queue(dev);
848
849		/* A Tx complete IRQ could have gotten inbetween, making
850		 * the ring free again. Only need to recheck here, since
851		 * Tx is serialized.
852		 */
853		if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
854			netif_wake_queue(dev);
855	}
856
857	return NETDEV_TX_OK;
858}
859
860static void
861typhoon_set_rx_mode(struct net_device *dev)
862{
863	struct typhoon *tp = netdev_priv(dev);
864	struct cmd_desc xp_cmd;
865	u32 mc_filter[2];
866	__le16 filter;
867
868	filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
869	if(dev->flags & IFF_PROMISC) {
870		filter |= TYPHOON_RX_FILTER_PROMISCOUS;
871	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
872		  (dev->flags & IFF_ALLMULTI)) {
873		/* Too many to match, or accept all multicasts. */
874		filter |= TYPHOON_RX_FILTER_ALL_MCAST;
875	} else if (!netdev_mc_empty(dev)) {
876		struct netdev_hw_addr *ha;
877
878		memset(mc_filter, 0, sizeof(mc_filter));
879		netdev_for_each_mc_addr(ha, dev) {
880			int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
881			mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
882		}
883
884		INIT_COMMAND_NO_RESPONSE(&xp_cmd,
885					 TYPHOON_CMD_SET_MULTICAST_HASH);
886		xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
887		xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
888		xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
889		typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
890
891		filter |= TYPHOON_RX_FILTER_MCAST_HASH;
892	}
893
894	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
895	xp_cmd.parm1 = filter;
896	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
897}
898
899static int
900typhoon_do_get_stats(struct typhoon *tp)
901{
902	struct net_device_stats *stats = &tp->stats;
903	struct net_device_stats *saved = &tp->stats_saved;
904	struct cmd_desc xp_cmd;
905	struct resp_desc xp_resp[7];
906	struct stats_resp *s = (struct stats_resp *) xp_resp;
907	int err;
908
909	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
910	err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
911	if(err < 0)
912		return err;
913
914	/* 3Com's Linux driver uses txMultipleCollisions as it's
915	 * collisions value, but there is some other collision info as well...
916	 *
917	 * The extra status reported would be a good candidate for
918	 * ethtool_ops->get_{strings,stats}()
919	 */
920	stats->tx_packets = le32_to_cpu(s->txPackets);
921	stats->tx_bytes = le64_to_cpu(s->txBytes);
922	stats->tx_errors = le32_to_cpu(s->txCarrierLost);
923	stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
924	stats->collisions = le32_to_cpu(s->txMultipleCollisions);
925	stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
926	stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
927	stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
928	stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
929			le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
930	stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
931	stats->rx_length_errors = le32_to_cpu(s->rxOversized);
932	tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
933			SPEED_100 : SPEED_10;
934	tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
935			DUPLEX_FULL : DUPLEX_HALF;
936
937	/* add in the saved statistics
938	 */
939	stats->tx_packets += saved->tx_packets;
940	stats->tx_bytes += saved->tx_bytes;
941	stats->tx_errors += saved->tx_errors;
942	stats->collisions += saved->collisions;
943	stats->rx_packets += saved->rx_packets;
944	stats->rx_bytes += saved->rx_bytes;
945	stats->rx_fifo_errors += saved->rx_fifo_errors;
946	stats->rx_errors += saved->rx_errors;
947	stats->rx_crc_errors += saved->rx_crc_errors;
948	stats->rx_length_errors += saved->rx_length_errors;
949
950	return 0;
951}
952
953static struct net_device_stats *
954typhoon_get_stats(struct net_device *dev)
955{
956	struct typhoon *tp = netdev_priv(dev);
957	struct net_device_stats *stats = &tp->stats;
958	struct net_device_stats *saved = &tp->stats_saved;
959
960	smp_rmb();
961	if(tp->card_state == Sleeping)
962		return saved;
963
964	if(typhoon_do_get_stats(tp) < 0) {
965		netdev_err(dev, "error getting stats\n");
966		return saved;
967	}
968
969	return stats;
970}
971
972static int
973typhoon_set_mac_address(struct net_device *dev, void *addr)
974{
975	struct sockaddr *saddr = (struct sockaddr *) addr;
976
977	if(netif_running(dev))
978		return -EBUSY;
979
980	memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
981	return 0;
982}
983
984static void
985typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
986{
987	struct typhoon *tp = netdev_priv(dev);
988	struct pci_dev *pci_dev = tp->pdev;
989	struct cmd_desc xp_cmd;
990	struct resp_desc xp_resp[3];
991
992	smp_rmb();
993	if(tp->card_state == Sleeping) {
994		strcpy(info->fw_version, "Sleep image");
995	} else {
996		INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
997		if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
998			strcpy(info->fw_version, "Unknown runtime");
999		} else {
1000			u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1001			snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1002				 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1003				 sleep_ver & 0xfff);
1004		}
1005	}
1006
1007	strcpy(info->driver, KBUILD_MODNAME);
1008	strcpy(info->version, UTS_RELEASE);
1009	strcpy(info->bus_info, pci_name(pci_dev));
1010}
1011
1012static int
1013typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1014{
1015	struct typhoon *tp = netdev_priv(dev);
1016
1017	cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1018				SUPPORTED_Autoneg;
1019
1020	switch (tp->xcvr_select) {
1021	case TYPHOON_XCVR_10HALF:
1022		cmd->advertising = ADVERTISED_10baseT_Half;
1023		break;
1024	case TYPHOON_XCVR_10FULL:
1025		cmd->advertising = ADVERTISED_10baseT_Full;
1026		break;
1027	case TYPHOON_XCVR_100HALF:
1028		cmd->advertising = ADVERTISED_100baseT_Half;
1029		break;
1030	case TYPHOON_XCVR_100FULL:
1031		cmd->advertising = ADVERTISED_100baseT_Full;
1032		break;
1033	case TYPHOON_XCVR_AUTONEG:
1034		cmd->advertising = ADVERTISED_10baseT_Half |
1035					    ADVERTISED_10baseT_Full |
1036					    ADVERTISED_100baseT_Half |
1037					    ADVERTISED_100baseT_Full |
1038					    ADVERTISED_Autoneg;
1039		break;
1040	}
1041
1042	if(tp->capabilities & TYPHOON_FIBER) {
1043		cmd->supported |= SUPPORTED_FIBRE;
1044		cmd->advertising |= ADVERTISED_FIBRE;
1045		cmd->port = PORT_FIBRE;
1046	} else {
1047		cmd->supported |= SUPPORTED_10baseT_Half |
1048		    			SUPPORTED_10baseT_Full |
1049					SUPPORTED_TP;
1050		cmd->advertising |= ADVERTISED_TP;
1051		cmd->port = PORT_TP;
1052	}
1053
1054	/* need to get stats to make these link speed/duplex valid */
1055	typhoon_do_get_stats(tp);
1056	cmd->speed = tp->speed;
1057	cmd->duplex = tp->duplex;
1058	cmd->phy_address = 0;
1059	cmd->transceiver = XCVR_INTERNAL;
1060	if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1061		cmd->autoneg = AUTONEG_ENABLE;
1062	else
1063		cmd->autoneg = AUTONEG_DISABLE;
1064	cmd->maxtxpkt = 1;
1065	cmd->maxrxpkt = 1;
1066
1067	return 0;
1068}
1069
1070static int
1071typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1072{
1073	struct typhoon *tp = netdev_priv(dev);
1074	struct cmd_desc xp_cmd;
1075	__le16 xcvr;
1076	int err;
1077
1078	err = -EINVAL;
1079	if(cmd->autoneg == AUTONEG_ENABLE) {
1080		xcvr = TYPHOON_XCVR_AUTONEG;
1081	} else {
1082		if(cmd->duplex == DUPLEX_HALF) {
1083			if(cmd->speed == SPEED_10)
1084				xcvr = TYPHOON_XCVR_10HALF;
1085			else if(cmd->speed == SPEED_100)
1086				xcvr = TYPHOON_XCVR_100HALF;
1087			else
1088				goto out;
1089		} else if(cmd->duplex == DUPLEX_FULL) {
1090			if(cmd->speed == SPEED_10)
1091				xcvr = TYPHOON_XCVR_10FULL;
1092			else if(cmd->speed == SPEED_100)
1093				xcvr = TYPHOON_XCVR_100FULL;
1094			else
1095				goto out;
1096		} else
1097			goto out;
1098	}
1099
1100	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1101	xp_cmd.parm1 = xcvr;
1102	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1103	if(err < 0)
1104		goto out;
1105
1106	tp->xcvr_select = xcvr;
1107	if(cmd->autoneg == AUTONEG_ENABLE) {
1108		tp->speed = 0xff;	/* invalid */
1109		tp->duplex = 0xff;	/* invalid */
1110	} else {
1111		tp->speed = cmd->speed;
1112		tp->duplex = cmd->duplex;
1113	}
1114
1115out:
1116	return err;
1117}
1118
1119static void
1120typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1121{
1122	struct typhoon *tp = netdev_priv(dev);
1123
1124	wol->supported = WAKE_PHY | WAKE_MAGIC;
1125	wol->wolopts = 0;
1126	if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1127		wol->wolopts |= WAKE_PHY;
1128	if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1129		wol->wolopts |= WAKE_MAGIC;
1130	memset(&wol->sopass, 0, sizeof(wol->sopass));
1131}
1132
1133static int
1134typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1135{
1136	struct typhoon *tp = netdev_priv(dev);
1137
1138	if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1139		return -EINVAL;
1140
1141	tp->wol_events = 0;
1142	if(wol->wolopts & WAKE_PHY)
1143		tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1144	if(wol->wolopts & WAKE_MAGIC)
1145		tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1146
1147	return 0;
1148}
1149
1150static u32
1151typhoon_get_rx_csum(struct net_device *dev)
1152{
1153	/* For now, we don't allow turning off RX checksums.
1154	 */
1155	return 1;
1156}
1157
1158static void
1159typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1160{
1161	ering->rx_max_pending = RXENT_ENTRIES;
1162	ering->rx_mini_max_pending = 0;
1163	ering->rx_jumbo_max_pending = 0;
1164	ering->tx_max_pending = TXLO_ENTRIES - 1;
1165
1166	ering->rx_pending = RXENT_ENTRIES;
1167	ering->rx_mini_pending = 0;
1168	ering->rx_jumbo_pending = 0;
1169	ering->tx_pending = TXLO_ENTRIES - 1;
1170}
1171
1172static const struct ethtool_ops typhoon_ethtool_ops = {
1173	.get_settings		= typhoon_get_settings,
1174	.set_settings		= typhoon_set_settings,
1175	.get_drvinfo		= typhoon_get_drvinfo,
1176	.get_wol		= typhoon_get_wol,
1177	.set_wol		= typhoon_set_wol,
1178	.get_link		= ethtool_op_get_link,
1179	.get_rx_csum		= typhoon_get_rx_csum,
1180	.set_tx_csum		= ethtool_op_set_tx_csum,
1181	.set_sg			= ethtool_op_set_sg,
1182	.set_tso		= ethtool_op_set_tso,
1183	.get_ringparam		= typhoon_get_ringparam,
1184};
1185
1186static int
1187typhoon_wait_interrupt(void __iomem *ioaddr)
1188{
1189	int i, err = 0;
1190
1191	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1192		if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1193		   TYPHOON_INTR_BOOTCMD)
1194			goto out;
1195		udelay(TYPHOON_UDELAY);
1196	}
1197
1198	err = -ETIMEDOUT;
1199
1200out:
1201	iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1202	return err;
1203}
1204
1205#define shared_offset(x)	offsetof(struct typhoon_shared, x)
1206
1207static void
1208typhoon_init_interface(struct typhoon *tp)
1209{
1210	struct typhoon_interface *iface = &tp->shared->iface;
1211	dma_addr_t shared_dma;
1212
1213	memset(tp->shared, 0, sizeof(struct typhoon_shared));
1214
1215	/* The *Hi members of iface are all init'd to zero by the memset().
1216	 */
1217	shared_dma = tp->shared_dma + shared_offset(indexes);
1218	iface->ringIndex = cpu_to_le32(shared_dma);
1219
1220	shared_dma = tp->shared_dma + shared_offset(txLo);
1221	iface->txLoAddr = cpu_to_le32(shared_dma);
1222	iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1223
1224	shared_dma = tp->shared_dma + shared_offset(txHi);
1225	iface->txHiAddr = cpu_to_le32(shared_dma);
1226	iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1227
1228	shared_dma = tp->shared_dma + shared_offset(rxBuff);
1229	iface->rxBuffAddr = cpu_to_le32(shared_dma);
1230	iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1231					sizeof(struct rx_free));
1232
1233	shared_dma = tp->shared_dma + shared_offset(rxLo);
1234	iface->rxLoAddr = cpu_to_le32(shared_dma);
1235	iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1236
1237	shared_dma = tp->shared_dma + shared_offset(rxHi);
1238	iface->rxHiAddr = cpu_to_le32(shared_dma);
1239	iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1240
1241	shared_dma = tp->shared_dma + shared_offset(cmd);
1242	iface->cmdAddr = cpu_to_le32(shared_dma);
1243	iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1244
1245	shared_dma = tp->shared_dma + shared_offset(resp);
1246	iface->respAddr = cpu_to_le32(shared_dma);
1247	iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1248
1249	shared_dma = tp->shared_dma + shared_offset(zeroWord);
1250	iface->zeroAddr = cpu_to_le32(shared_dma);
1251
1252	tp->indexes = &tp->shared->indexes;
1253	tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1254	tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1255	tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1256	tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1257	tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1258	tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1259	tp->respRing.ringBase = (u8 *) tp->shared->resp;
1260
1261	tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1262	tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1263
1264	tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1265	tp->card_state = Sleeping;
1266
1267	tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1268	tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1269
1270	spin_lock_init(&tp->command_lock);
1271	spin_lock_init(&tp->state_lock);
1272
1273	/* Force the writes to the shared memory area out before continuing. */
1274	wmb();
1275}
1276
1277static void
1278typhoon_init_rings(struct typhoon *tp)
1279{
1280	memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1281
1282	tp->txLoRing.lastWrite = 0;
1283	tp->txHiRing.lastWrite = 0;
1284	tp->rxLoRing.lastWrite = 0;
1285	tp->rxHiRing.lastWrite = 0;
1286	tp->rxBuffRing.lastWrite = 0;
1287	tp->cmdRing.lastWrite = 0;
1288	tp->cmdRing.lastWrite = 0;
1289
1290	tp->txLoRing.lastRead = 0;
1291	tp->txHiRing.lastRead = 0;
1292}
1293
1294static const struct firmware *typhoon_fw;
1295
1296static int
1297typhoon_request_firmware(struct typhoon *tp)
1298{
1299	const struct typhoon_file_header *fHdr;
1300	const struct typhoon_section_header *sHdr;
1301	const u8 *image_data;
1302	u32 numSections;
1303	u32 section_len;
1304	u32 remaining;
1305	int err;
1306
1307	if (typhoon_fw)
1308		return 0;
1309
1310	err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1311	if (err) {
1312		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1313			   FIRMWARE_NAME);
1314		return err;
1315	}
1316
1317	image_data = (u8 *) typhoon_fw->data;
1318	remaining = typhoon_fw->size;
1319	if (remaining < sizeof(struct typhoon_file_header))
1320		goto invalid_fw;
1321
1322	fHdr = (struct typhoon_file_header *) image_data;
1323	if (memcmp(fHdr->tag, "TYPHOON", 8))
1324		goto invalid_fw;
1325
1326	numSections = le32_to_cpu(fHdr->numSections);
1327	image_data += sizeof(struct typhoon_file_header);
1328	remaining -= sizeof(struct typhoon_file_header);
1329
1330	while (numSections--) {
1331		if (remaining < sizeof(struct typhoon_section_header))
1332			goto invalid_fw;
1333
1334		sHdr = (struct typhoon_section_header *) image_data;
1335		image_data += sizeof(struct typhoon_section_header);
1336		section_len = le32_to_cpu(sHdr->len);
1337
1338		if (remaining < section_len)
1339			goto invalid_fw;
1340
1341		image_data += section_len;
1342		remaining -= section_len;
1343	}
1344
1345	return 0;
1346
1347invalid_fw:
1348	netdev_err(tp->dev, "Invalid firmware image\n");
1349	release_firmware(typhoon_fw);
1350	typhoon_fw = NULL;
1351	return -EINVAL;
1352}
1353
1354static int
1355typhoon_download_firmware(struct typhoon *tp)
1356{
1357	void __iomem *ioaddr = tp->ioaddr;
1358	struct pci_dev *pdev = tp->pdev;
1359	const struct typhoon_file_header *fHdr;
1360	const struct typhoon_section_header *sHdr;
1361	const u8 *image_data;
1362	void *dpage;
1363	dma_addr_t dpage_dma;
1364	__sum16 csum;
1365	u32 irqEnabled;
1366	u32 irqMasked;
1367	u32 numSections;
1368	u32 section_len;
1369	u32 len;
1370	u32 load_addr;
1371	u32 hmac;
1372	int i;
1373	int err;
1374
1375	image_data = (u8 *) typhoon_fw->data;
1376	fHdr = (struct typhoon_file_header *) image_data;
1377
1378	/* Cannot just map the firmware image using pci_map_single() as
1379	 * the firmware is vmalloc()'d and may not be physically contiguous,
1380	 * so we allocate some consistent memory to copy the sections into.
1381	 */
1382	err = -ENOMEM;
1383	dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1384	if(!dpage) {
1385		netdev_err(tp->dev, "no DMA mem for firmware\n");
1386		goto err_out;
1387	}
1388
1389	irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1390	iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1391	       ioaddr + TYPHOON_REG_INTR_ENABLE);
1392	irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1393	iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1394	       ioaddr + TYPHOON_REG_INTR_MASK);
1395
1396	err = -ETIMEDOUT;
1397	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1398		netdev_err(tp->dev, "card ready timeout\n");
1399		goto err_out_irq;
1400	}
1401
1402	numSections = le32_to_cpu(fHdr->numSections);
1403	load_addr = le32_to_cpu(fHdr->startAddr);
1404
1405	iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1406	iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1407	hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1408	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1409	hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1410	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1411	hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1412	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1413	hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1414	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1415	hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1416	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1417	typhoon_post_pci_writes(ioaddr);
1418	iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1419
1420	image_data += sizeof(struct typhoon_file_header);
1421
1422	/* The ioread32() in typhoon_wait_interrupt() will force the
1423	 * last write to the command register to post, so
1424	 * we don't need a typhoon_post_pci_writes() after it.
1425	 */
1426	for(i = 0; i < numSections; i++) {
1427		sHdr = (struct typhoon_section_header *) image_data;
1428		image_data += sizeof(struct typhoon_section_header);
1429		load_addr = le32_to_cpu(sHdr->startAddr);
1430		section_len = le32_to_cpu(sHdr->len);
1431
1432		while(section_len) {
1433			len = min_t(u32, section_len, PAGE_SIZE);
1434
1435			if(typhoon_wait_interrupt(ioaddr) < 0 ||
1436			   ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1437			   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1438				netdev_err(tp->dev, "segment ready timeout\n");
1439				goto err_out_irq;
1440			}
1441
1442			/* Do an pseudo IPv4 checksum on the data -- first
1443			 * need to convert each u16 to cpu order before
1444			 * summing. Fortunately, due to the properties of
1445			 * the checksum, we can do this once, at the end.
1446			 */
1447			csum = csum_fold(csum_partial_copy_nocheck(image_data,
1448								   dpage, len,
1449								   0));
1450
1451			iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1452			iowrite32(le16_to_cpu((__force __le16)csum),
1453					ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1454			iowrite32(load_addr,
1455					ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1456			iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1457			iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1458			typhoon_post_pci_writes(ioaddr);
1459			iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1460					ioaddr + TYPHOON_REG_COMMAND);
1461
1462			image_data += len;
1463			load_addr += len;
1464			section_len -= len;
1465		}
1466	}
1467
1468	if(typhoon_wait_interrupt(ioaddr) < 0 ||
1469	   ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1470	   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1471		netdev_err(tp->dev, "final segment ready timeout\n");
1472		goto err_out_irq;
1473	}
1474
1475	iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1476
1477	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1478		netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1479			   ioread32(ioaddr + TYPHOON_REG_STATUS));
1480		goto err_out_irq;
1481	}
1482
1483	err = 0;
1484
1485err_out_irq:
1486	iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1487	iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1488
1489	pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1490
1491err_out:
1492	return err;
1493}
1494
1495static int
1496typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1497{
1498	void __iomem *ioaddr = tp->ioaddr;
1499
1500	if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1501		netdev_err(tp->dev, "boot ready timeout\n");
1502		goto out_timeout;
1503	}
1504
1505	iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1506	iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1507	typhoon_post_pci_writes(ioaddr);
1508	iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1509				ioaddr + TYPHOON_REG_COMMAND);
1510
1511	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1512		netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1513			   ioread32(ioaddr + TYPHOON_REG_STATUS));
1514		goto out_timeout;
1515	}
1516
1517	/* Clear the Transmit and Command ready registers
1518	 */
1519	iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1520	iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1521	iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1522	typhoon_post_pci_writes(ioaddr);
1523	iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1524
1525	return 0;
1526
1527out_timeout:
1528	return -ETIMEDOUT;
1529}
1530
1531static u32
1532typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1533			volatile __le32 * index)
1534{
1535	u32 lastRead = txRing->lastRead;
1536	struct tx_desc *tx;
1537	dma_addr_t skb_dma;
1538	int dma_len;
1539	int type;
1540
1541	while(lastRead != le32_to_cpu(*index)) {
1542		tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1543		type = tx->flags & TYPHOON_TYPE_MASK;
1544
1545		if(type == TYPHOON_TX_DESC) {
1546			/* This tx_desc describes a packet.
1547			 */
1548			unsigned long ptr = tx->tx_addr;
1549			struct sk_buff *skb = (struct sk_buff *) ptr;
1550			dev_kfree_skb_irq(skb);
1551		} else if(type == TYPHOON_FRAG_DESC) {
1552			/* This tx_desc describes a memory mapping. Free it.
1553			 */
1554			skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1555			dma_len = le16_to_cpu(tx->len);
1556			pci_unmap_single(tp->pdev, skb_dma, dma_len,
1557				       PCI_DMA_TODEVICE);
1558		}
1559
1560		tx->flags = 0;
1561		typhoon_inc_tx_index(&lastRead, 1);
1562	}
1563
1564	return lastRead;
1565}
1566
1567static void
1568typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1569			volatile __le32 * index)
1570{
1571	u32 lastRead;
1572	int numDesc = MAX_SKB_FRAGS + 1;
1573
1574	/* This will need changing if we start to use the Hi Tx ring. */
1575	lastRead = typhoon_clean_tx(tp, txRing, index);
1576	if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1577				lastRead, TXLO_ENTRIES) > (numDesc + 2))
1578		netif_wake_queue(tp->dev);
1579
1580	txRing->lastRead = lastRead;
1581	smp_wmb();
1582}
1583
1584static void
1585typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1586{
1587	struct typhoon_indexes *indexes = tp->indexes;
1588	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1589	struct basic_ring *ring = &tp->rxBuffRing;
1590	struct rx_free *r;
1591
1592	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1593				le32_to_cpu(indexes->rxBuffCleared)) {
1594		/* no room in ring, just drop the skb
1595		 */
1596		dev_kfree_skb_any(rxb->skb);
1597		rxb->skb = NULL;
1598		return;
1599	}
1600
1601	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1602	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1603	r->virtAddr = idx;
1604	r->physAddr = cpu_to_le32(rxb->dma_addr);
1605
1606	/* Tell the card about it */
1607	wmb();
1608	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1609}
1610
1611static int
1612typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1613{
1614	struct typhoon_indexes *indexes = tp->indexes;
1615	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1616	struct basic_ring *ring = &tp->rxBuffRing;
1617	struct rx_free *r;
1618	struct sk_buff *skb;
1619	dma_addr_t dma_addr;
1620
1621	rxb->skb = NULL;
1622
1623	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1624				le32_to_cpu(indexes->rxBuffCleared))
1625		return -ENOMEM;
1626
1627	skb = dev_alloc_skb(PKT_BUF_SZ);
1628	if(!skb)
1629		return -ENOMEM;
1630
1631
1632	skb->dev = tp->dev;
1633	dma_addr = pci_map_single(tp->pdev, skb->data,
1634				  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1635
1636	/* Since no card does 64 bit DAC, the high bits will never
1637	 * change from zero.
1638	 */
1639	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1640	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1641	r->virtAddr = idx;
1642	r->physAddr = cpu_to_le32(dma_addr);
1643	rxb->skb = skb;
1644	rxb->dma_addr = dma_addr;
1645
1646	/* Tell the card about it */
1647	wmb();
1648	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1649	return 0;
1650}
1651
1652static int
1653typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1654	   volatile __le32 * cleared, int budget)
1655{
1656	struct rx_desc *rx;
1657	struct sk_buff *skb, *new_skb;
1658	struct rxbuff_ent *rxb;
1659	dma_addr_t dma_addr;
1660	u32 local_ready;
1661	u32 rxaddr;
1662	int pkt_len;
1663	u32 idx;
1664	__le32 csum_bits;
1665	int received;
1666
1667	received = 0;
1668	local_ready = le32_to_cpu(*ready);
1669	rxaddr = le32_to_cpu(*cleared);
1670	while(rxaddr != local_ready && budget > 0) {
1671		rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1672		idx = rx->addr;
1673		rxb = &tp->rxbuffers[idx];
1674		skb = rxb->skb;
1675		dma_addr = rxb->dma_addr;
1676
1677		typhoon_inc_rx_index(&rxaddr, 1);
1678
1679		if(rx->flags & TYPHOON_RX_ERROR) {
1680			typhoon_recycle_rx_skb(tp, idx);
1681			continue;
1682		}
1683
1684		pkt_len = le16_to_cpu(rx->frameLen);
1685
1686		if(pkt_len < rx_copybreak &&
1687		   (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1688			skb_reserve(new_skb, 2);
1689			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1690						    PKT_BUF_SZ,
1691						    PCI_DMA_FROMDEVICE);
1692			skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1693			pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1694						       PKT_BUF_SZ,
1695						       PCI_DMA_FROMDEVICE);
1696			skb_put(new_skb, pkt_len);
1697			typhoon_recycle_rx_skb(tp, idx);
1698		} else {
1699			new_skb = skb;
1700			skb_put(new_skb, pkt_len);
1701			pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1702				       PCI_DMA_FROMDEVICE);
1703			typhoon_alloc_rx_skb(tp, idx);
1704		}
1705		new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1706		csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1707			TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1708		if(csum_bits ==
1709		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1710		   csum_bits ==
1711		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1712			new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1713		} else
1714			new_skb->ip_summed = CHECKSUM_NONE;
1715
1716		spin_lock(&tp->state_lock);
1717		if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1718			vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1719						 ntohl(rx->vlanTag) & 0xffff);
1720		else
1721			netif_receive_skb(new_skb);
1722		spin_unlock(&tp->state_lock);
1723
1724		received++;
1725		budget--;
1726	}
1727	*cleared = cpu_to_le32(rxaddr);
1728
1729	return received;
1730}
1731
1732static void
1733typhoon_fill_free_ring(struct typhoon *tp)
1734{
1735	u32 i;
1736
1737	for(i = 0; i < RXENT_ENTRIES; i++) {
1738		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1739		if(rxb->skb)
1740			continue;
1741		if(typhoon_alloc_rx_skb(tp, i) < 0)
1742			break;
1743	}
1744}
1745
1746static int
1747typhoon_poll(struct napi_struct *napi, int budget)
1748{
1749	struct typhoon *tp = container_of(napi, struct typhoon, napi);
1750	struct typhoon_indexes *indexes = tp->indexes;
1751	int work_done;
1752
1753	rmb();
1754	if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1755			typhoon_process_response(tp, 0, NULL);
1756
1757	if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1758		typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1759
1760	work_done = 0;
1761
1762	if(indexes->rxHiCleared != indexes->rxHiReady) {
1763		work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1764			   		&indexes->rxHiCleared, budget);
1765	}
1766
1767	if(indexes->rxLoCleared != indexes->rxLoReady) {
1768		work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1769					&indexes->rxLoCleared, budget - work_done);
1770	}
1771
1772	if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1773		/* rxBuff ring is empty, try to fill it. */
1774		typhoon_fill_free_ring(tp);
1775	}
1776
1777	if (work_done < budget) {
1778		napi_complete(napi);
1779		iowrite32(TYPHOON_INTR_NONE,
1780				tp->ioaddr + TYPHOON_REG_INTR_MASK);
1781		typhoon_post_pci_writes(tp->ioaddr);
1782	}
1783
1784	return work_done;
1785}
1786
1787static irqreturn_t
1788typhoon_interrupt(int irq, void *dev_instance)
1789{
1790	struct net_device *dev = dev_instance;
1791	struct typhoon *tp = netdev_priv(dev);
1792	void __iomem *ioaddr = tp->ioaddr;
1793	u32 intr_status;
1794
1795	intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1796	if(!(intr_status & TYPHOON_INTR_HOST_INT))
1797		return IRQ_NONE;
1798
1799	iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1800
1801	if (napi_schedule_prep(&tp->napi)) {
1802		iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1803		typhoon_post_pci_writes(ioaddr);
1804		__napi_schedule(&tp->napi);
1805	} else {
1806		netdev_err(dev, "Error, poll already scheduled\n");
1807	}
1808	return IRQ_HANDLED;
1809}
1810
1811static void
1812typhoon_free_rx_rings(struct typhoon *tp)
1813{
1814	u32 i;
1815
1816	for(i = 0; i < RXENT_ENTRIES; i++) {
1817		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1818		if(rxb->skb) {
1819			pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1820				       PCI_DMA_FROMDEVICE);
1821			dev_kfree_skb(rxb->skb);
1822			rxb->skb = NULL;
1823		}
1824	}
1825}
1826
1827static int
1828typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1829{
1830	struct pci_dev *pdev = tp->pdev;
1831	void __iomem *ioaddr = tp->ioaddr;
1832	struct cmd_desc xp_cmd;
1833	int err;
1834
1835	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1836	xp_cmd.parm1 = events;
1837	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1838	if(err < 0) {
1839		netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1840			   err);
1841		return err;
1842	}
1843
1844	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1845	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1846	if(err < 0) {
1847		netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1848		return err;
1849	}
1850
1851	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1852		return -ETIMEDOUT;
1853
1854	/* Since we cannot monitor the status of the link while sleeping,
1855	 * tell the world it went away.
1856	 */
1857	netif_carrier_off(tp->dev);
1858
1859	pci_enable_wake(tp->pdev, state, 1);
1860	pci_disable_device(pdev);
1861	return pci_set_power_state(pdev, state);
1862}
1863
1864static int
1865typhoon_wakeup(struct typhoon *tp, int wait_type)
1866{
1867	struct pci_dev *pdev = tp->pdev;
1868	void __iomem *ioaddr = tp->ioaddr;
1869
1870	pci_set_power_state(pdev, PCI_D0);
1871	pci_restore_state(pdev);
1872
1873	/* Post 2.x.x versions of the Sleep Image require a reset before
1874	 * we can download the Runtime Image. But let's not make users of
1875	 * the old firmware pay for the reset.
1876	 */
1877	iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1878	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1879			(tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1880		return typhoon_reset(ioaddr, wait_type);
1881
1882	return 0;
1883}
1884
1885static int
1886typhoon_start_runtime(struct typhoon *tp)
1887{
1888	struct net_device *dev = tp->dev;
1889	void __iomem *ioaddr = tp->ioaddr;
1890	struct cmd_desc xp_cmd;
1891	int err;
1892
1893	typhoon_init_rings(tp);
1894	typhoon_fill_free_ring(tp);
1895
1896	err = typhoon_download_firmware(tp);
1897	if(err < 0) {
1898		netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1899		goto error_out;
1900	}
1901
1902	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1903		netdev_err(tp->dev, "cannot boot 3XP\n");
1904		err = -EIO;
1905		goto error_out;
1906	}
1907
1908	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1909	xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1910	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1911	if(err < 0)
1912		goto error_out;
1913
1914	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1915	xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1916	xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1917	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1918	if(err < 0)
1919		goto error_out;
1920
1921	/* Disable IRQ coalescing -- we can reenable it when 3Com gives
1922	 * us some more information on how to control it.
1923	 */
1924	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1925	xp_cmd.parm1 = 0;
1926	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1927	if(err < 0)
1928		goto error_out;
1929
1930	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1931	xp_cmd.parm1 = tp->xcvr_select;
1932	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1933	if(err < 0)
1934		goto error_out;
1935
1936	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1937	xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1938	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1939	if(err < 0)
1940		goto error_out;
1941
1942	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1943	spin_lock_bh(&tp->state_lock);
1944	xp_cmd.parm2 = tp->offload;
1945	xp_cmd.parm3 = tp->offload;
1946	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1947	spin_unlock_bh(&tp->state_lock);
1948	if(err < 0)
1949		goto error_out;
1950
1951	typhoon_set_rx_mode(dev);
1952
1953	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1954	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1955	if(err < 0)
1956		goto error_out;
1957
1958	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1959	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1960	if(err < 0)
1961		goto error_out;
1962
1963	tp->card_state = Running;
1964	smp_wmb();
1965
1966	iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1967	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1968	typhoon_post_pci_writes(ioaddr);
1969
1970	return 0;
1971
1972error_out:
1973	typhoon_reset(ioaddr, WaitNoSleep);
1974	typhoon_free_rx_rings(tp);
1975	typhoon_init_rings(tp);
1976	return err;
1977}
1978
1979static int
1980typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1981{
1982	struct typhoon_indexes *indexes = tp->indexes;
1983	struct transmit_ring *txLo = &tp->txLoRing;
1984	void __iomem *ioaddr = tp->ioaddr;
1985	struct cmd_desc xp_cmd;
1986	int i;
1987
1988	/* Disable interrupts early, since we can't schedule a poll
1989	 * when called with !netif_running(). This will be posted
1990	 * when we force the posting of the command.
1991	 */
1992	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1993
1994	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1995	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1996
1997	/* Wait 1/2 sec for any outstanding transmits to occur
1998	 * We'll cleanup after the reset if this times out.
1999	 */
2000	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2001		if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2002			break;
2003		udelay(TYPHOON_UDELAY);
2004	}
2005
2006	if(i == TYPHOON_WAIT_TIMEOUT)
2007		netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2008
2009	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2010	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2011
2012	/* save the statistics so when we bring the interface up again,
2013	 * the values reported to userspace are correct.
2014	 */
2015	tp->card_state = Sleeping;
2016	smp_wmb();
2017	typhoon_do_get_stats(tp);
2018	memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2019
2020	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2021	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2022
2023	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2024		netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2025
2026	if(typhoon_reset(ioaddr, wait_type) < 0) {
2027		netdev_err(tp->dev, "unable to reset 3XP\n");
2028		return -ETIMEDOUT;
2029	}
2030
2031	/* cleanup any outstanding Tx packets */
2032	if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2033		indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2034		typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2035	}
2036
2037	return 0;
2038}
2039
2040static void
2041typhoon_tx_timeout(struct net_device *dev)
2042{
2043	struct typhoon *tp = netdev_priv(dev);
2044
2045	if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2046		netdev_warn(dev, "could not reset in tx timeout\n");
2047		goto truly_dead;
2048	}
2049
2050	/* If we ever start using the Hi ring, it will need cleaning too */
2051	typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2052	typhoon_free_rx_rings(tp);
2053
2054	if(typhoon_start_runtime(tp) < 0) {
2055		netdev_err(dev, "could not start runtime in tx timeout\n");
2056		goto truly_dead;
2057        }
2058
2059	netif_wake_queue(dev);
2060	return;
2061
2062truly_dead:
2063	/* Reset the hardware, and turn off carrier to avoid more timeouts */
2064	typhoon_reset(tp->ioaddr, NoWait);
2065	netif_carrier_off(dev);
2066}
2067
2068static int
2069typhoon_open(struct net_device *dev)
2070{
2071	struct typhoon *tp = netdev_priv(dev);
2072	int err;
2073
2074	err = typhoon_request_firmware(tp);
2075	if (err)
2076		goto out;
2077
2078	err = typhoon_wakeup(tp, WaitSleep);
2079	if(err < 0) {
2080		netdev_err(dev, "unable to wakeup device\n");
2081		goto out_sleep;
2082	}
2083
2084	err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2085				dev->name, dev);
2086	if(err < 0)
2087		goto out_sleep;
2088
2089	napi_enable(&tp->napi);
2090
2091	err = typhoon_start_runtime(tp);
2092	if(err < 0) {
2093		napi_disable(&tp->napi);
2094		goto out_irq;
2095	}
2096
2097	netif_start_queue(dev);
2098	return 0;
2099
2100out_irq:
2101	free_irq(dev->irq, dev);
2102
2103out_sleep:
2104	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2105		netdev_err(dev, "unable to reboot into sleep img\n");
2106		typhoon_reset(tp->ioaddr, NoWait);
2107		goto out;
2108	}
2109
2110	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2111		netdev_err(dev, "unable to go back to sleep\n");
2112
2113out:
2114	return err;
2115}
2116
2117static int
2118typhoon_close(struct net_device *dev)
2119{
2120	struct typhoon *tp = netdev_priv(dev);
2121
2122	netif_stop_queue(dev);
2123	napi_disable(&tp->napi);
2124
2125	if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2126		netdev_err(dev, "unable to stop runtime\n");
2127
2128	/* Make sure there is no irq handler running on a different CPU. */
2129	free_irq(dev->irq, dev);
2130
2131	typhoon_free_rx_rings(tp);
2132	typhoon_init_rings(tp);
2133
2134	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2135		netdev_err(dev, "unable to boot sleep image\n");
2136
2137	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2138		netdev_err(dev, "unable to put card to sleep\n");
2139
2140	return 0;
2141}
2142
2143#ifdef CONFIG_PM
2144static int
2145typhoon_resume(struct pci_dev *pdev)
2146{
2147	struct net_device *dev = pci_get_drvdata(pdev);
2148	struct typhoon *tp = netdev_priv(dev);
2149
2150	/* If we're down, resume when we are upped.
2151	 */
2152	if(!netif_running(dev))
2153		return 0;
2154
2155	if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2156		netdev_err(dev, "critical: could not wake up in resume\n");
2157		goto reset;
2158	}
2159
2160	if(typhoon_start_runtime(tp) < 0) {
2161		netdev_err(dev, "critical: could not start runtime in resume\n");
2162		goto reset;
2163	}
2164
2165	netif_device_attach(dev);
2166	return 0;
2167
2168reset:
2169	typhoon_reset(tp->ioaddr, NoWait);
2170	return -EBUSY;
2171}
2172
2173static int
2174typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2175{
2176	struct net_device *dev = pci_get_drvdata(pdev);
2177	struct typhoon *tp = netdev_priv(dev);
2178	struct cmd_desc xp_cmd;
2179
2180	/* If we're down, we're already suspended.
2181	 */
2182	if(!netif_running(dev))
2183		return 0;
2184
2185	spin_lock_bh(&tp->state_lock);
2186	if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2187		spin_unlock_bh(&tp->state_lock);
2188		netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2189		return -EBUSY;
2190	}
2191	spin_unlock_bh(&tp->state_lock);
2192
2193	netif_device_detach(dev);
2194
2195	if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2196		netdev_err(dev, "unable to stop runtime\n");
2197		goto need_resume;
2198	}
2199
2200	typhoon_free_rx_rings(tp);
2201	typhoon_init_rings(tp);
2202
2203	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2204		netdev_err(dev, "unable to boot sleep image\n");
2205		goto need_resume;
2206	}
2207
2208	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2209	xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2210	xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2211	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2212		netdev_err(dev, "unable to set mac address in suspend\n");
2213		goto need_resume;
2214	}
2215
2216	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2217	xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2218	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2219		netdev_err(dev, "unable to set rx filter in suspend\n");
2220		goto need_resume;
2221	}
2222
2223	if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2224		netdev_err(dev, "unable to put card to sleep\n");
2225		goto need_resume;
2226	}
2227
2228	return 0;
2229
2230need_resume:
2231	typhoon_resume(pdev);
2232	return -EBUSY;
2233}
2234#endif
2235
2236static int __devinit
2237typhoon_test_mmio(struct pci_dev *pdev)
2238{
2239	void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2240	int mode = 0;
2241	u32 val;
2242
2243	if(!ioaddr)
2244		goto out;
2245
2246	if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2247				TYPHOON_STATUS_WAITING_FOR_HOST)
2248		goto out_unmap;
2249
2250	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2251	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2252	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2253
2254	/* Ok, see if we can change our interrupt status register by
2255	 * sending ourselves an interrupt. If so, then MMIO works.
2256	 * The 50usec delay is arbitrary -- it could probably be smaller.
2257	 */
2258	val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2259	if((val & TYPHOON_INTR_SELF) == 0) {
2260		iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2261		ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2262		udelay(50);
2263		val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2264		if(val & TYPHOON_INTR_SELF)
2265			mode = 1;
2266	}
2267
2268	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2269	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2270	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2271	ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2272
2273out_unmap:
2274	pci_iounmap(pdev, ioaddr);
2275
2276out:
2277	if(!mode)
2278		pr_info("%s: falling back to port IO\n", pci_name(pdev));
2279	return mode;
2280}
2281
2282static const struct net_device_ops typhoon_netdev_ops = {
2283	.ndo_open		= typhoon_open,
2284	.ndo_stop		= typhoon_close,
2285	.ndo_start_xmit		= typhoon_start_tx,
2286	.ndo_set_multicast_list	= typhoon_set_rx_mode,
2287	.ndo_tx_timeout		= typhoon_tx_timeout,
2288	.ndo_get_stats		= typhoon_get_stats,
2289	.ndo_validate_addr	= eth_validate_addr,
2290	.ndo_set_mac_address	= typhoon_set_mac_address,
2291	.ndo_change_mtu		= eth_change_mtu,
2292	.ndo_vlan_rx_register	= typhoon_vlan_rx_register,
2293};
2294
2295static int __devinit
2296typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2297{
2298	struct net_device *dev;
2299	struct typhoon *tp;
2300	int card_id = (int) ent->driver_data;
2301	void __iomem *ioaddr;
2302	void *shared;
2303	dma_addr_t shared_dma;
2304	struct cmd_desc xp_cmd;
2305	struct resp_desc xp_resp[3];
2306	int err = 0;
2307	const char *err_msg;
2308
2309	dev = alloc_etherdev(sizeof(*tp));
2310	if(dev == NULL) {
2311		err_msg = "unable to alloc new net device";
2312		err = -ENOMEM;
2313		goto error_out;
2314	}
2315	SET_NETDEV_DEV(dev, &pdev->dev);
2316
2317	err = pci_enable_device(pdev);
2318	if(err < 0) {
2319		err_msg = "unable to enable device";
2320		goto error_out_dev;
2321	}
2322
2323	err = pci_set_mwi(pdev);
2324	if(err < 0) {
2325		err_msg = "unable to set MWI";
2326		goto error_out_disable;
2327	}
2328
2329	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2330	if(err < 0) {
2331		err_msg = "No usable DMA configuration";
2332		goto error_out_mwi;
2333	}
2334
2335	/* sanity checks on IO and MMIO BARs
2336	 */
2337	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2338		err_msg = "region #1 not a PCI IO resource, aborting";
2339		err = -ENODEV;
2340		goto error_out_mwi;
2341	}
2342	if(pci_resource_len(pdev, 0) < 128) {
2343		err_msg = "Invalid PCI IO region size, aborting";
2344		err = -ENODEV;
2345		goto error_out_mwi;
2346	}
2347	if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2348		err_msg = "region #1 not a PCI MMIO resource, aborting";
2349		err = -ENODEV;
2350		goto error_out_mwi;
2351	}
2352	if(pci_resource_len(pdev, 1) < 128) {
2353		err_msg = "Invalid PCI MMIO region size, aborting";
2354		err = -ENODEV;
2355		goto error_out_mwi;
2356	}
2357
2358	err = pci_request_regions(pdev, KBUILD_MODNAME);
2359	if(err < 0) {
2360		err_msg = "could not request regions";
2361		goto error_out_mwi;
2362	}
2363
2364	/* map our registers
2365	 */
2366	if(use_mmio != 0 && use_mmio != 1)
2367		use_mmio = typhoon_test_mmio(pdev);
2368
2369	ioaddr = pci_iomap(pdev, use_mmio, 128);
2370	if (!ioaddr) {
2371		err_msg = "cannot remap registers, aborting";
2372		err = -EIO;
2373		goto error_out_regions;
2374	}
2375
2376	/* allocate pci dma space for rx and tx descriptor rings
2377	 */
2378	shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2379				      &shared_dma);
2380	if(!shared) {
2381		err_msg = "could not allocate DMA memory";
2382		err = -ENOMEM;
2383		goto error_out_remap;
2384	}
2385
2386	dev->irq = pdev->irq;
2387	tp = netdev_priv(dev);
2388	tp->shared = (struct typhoon_shared *) shared;
2389	tp->shared_dma = shared_dma;
2390	tp->pdev = pdev;
2391	tp->tx_pdev = pdev;
2392	tp->ioaddr = ioaddr;
2393	tp->tx_ioaddr = ioaddr;
2394	tp->dev = dev;
2395
2396	/* Init sequence:
2397	 * 1) Reset the adapter to clear any bad juju
2398	 * 2) Reload the sleep image
2399	 * 3) Boot the sleep image
2400	 * 4) Get the hardware address.
2401	 * 5) Put the card to sleep.
2402	 */
2403	if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2404		err_msg = "could not reset 3XP";
2405		err = -EIO;
2406		goto error_out_dma;
2407	}
2408
2409	/* Now that we've reset the 3XP and are sure it's not going to
2410	 * write all over memory, enable bus mastering, and save our
2411	 * state for resuming after a suspend.
2412	 */
2413	pci_set_master(pdev);
2414	pci_save_state(pdev);
2415
2416	typhoon_init_interface(tp);
2417	typhoon_init_rings(tp);
2418
2419	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2420		err_msg = "cannot boot 3XP sleep image";
2421		err = -EIO;
2422		goto error_out_reset;
2423	}
2424
2425	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2426	if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2427		err_msg = "cannot read MAC address";
2428		err = -EIO;
2429		goto error_out_reset;
2430	}
2431
2432	*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2433	*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2434
2435	if(!is_valid_ether_addr(dev->dev_addr)) {
2436		err_msg = "Could not obtain valid ethernet address, aborting";
2437		goto error_out_reset;
2438	}
2439
2440	/* Read the Sleep Image version last, so the response is valid
2441	 * later when we print out the version reported.
2442	 */
2443	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2444	if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2445		err_msg = "Could not get Sleep Image version";
2446		goto error_out_reset;
2447	}
2448
2449	tp->capabilities = typhoon_card_info[card_id].capabilities;
2450	tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2451
2452	/* Typhoon 1.0 Sleep Images return one response descriptor to the
2453	 * READ_VERSIONS command. Those versions are OK after waking up
2454	 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2455	 * seem to need a little extra help to get started. Since we don't
2456	 * know how to nudge it along, just kick it.
2457	 */
2458	if(xp_resp[0].numDesc != 0)
2459		tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2460
2461	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2462		err_msg = "cannot put adapter to sleep";
2463		err = -EIO;
2464		goto error_out_reset;
2465	}
2466
2467	/* The chip-specific entries in the device structure. */
2468	dev->netdev_ops		= &typhoon_netdev_ops;
2469	netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2470	dev->watchdog_timeo	= TX_TIMEOUT;
2471
2472	SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2473
2474	/* We can handle scatter gather, up to 16 entries, and
2475	 * we can do IP checksumming (only version 4, doh...)
2476	 */
2477	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2478	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2479	dev->features |= NETIF_F_TSO;
2480
2481	if(register_netdev(dev) < 0) {
2482		err_msg = "unable to register netdev";
2483		goto error_out_reset;
2484	}
2485
2486	pci_set_drvdata(pdev, dev);
2487
2488	netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2489		    typhoon_card_info[card_id].name,
2490		    use_mmio ? "MMIO" : "IO",
2491		    (unsigned long long)pci_resource_start(pdev, use_mmio),
2492		    dev->dev_addr);
2493
2494	/* xp_resp still contains the response to the READ_VERSIONS command.
2495	 * For debugging, let the user know what version he has.
2496	 */
2497	if(xp_resp[0].numDesc == 0) {
2498		/* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2499		 * of version is Month/Day of build.
2500		 */
2501		u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2502		netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2503			    monthday >> 8, monthday & 0xff);
2504	} else if(xp_resp[0].numDesc == 2) {
2505		/* This is the Typhoon 1.1+ type Sleep Image
2506		 */
2507		u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2508		u8 *ver_string = (u8 *) &xp_resp[1];
2509		ver_string[25] = 0;
2510		netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2511			    sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2512			    sleep_ver & 0xfff, ver_string);
2513	} else {
2514		netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2515			    xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2516	}
2517
2518	return 0;
2519
2520error_out_reset:
2521	typhoon_reset(ioaddr, NoWait);
2522
2523error_out_dma:
2524	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2525			    shared, shared_dma);
2526error_out_remap:
2527	pci_iounmap(pdev, ioaddr);
2528error_out_regions:
2529	pci_release_regions(pdev);
2530error_out_mwi:
2531	pci_clear_mwi(pdev);
2532error_out_disable:
2533	pci_disable_device(pdev);
2534error_out_dev:
2535	free_netdev(dev);
2536error_out:
2537	pr_err("%s: %s\n", pci_name(pdev), err_msg);
2538	return err;
2539}
2540
2541static void __devexit
2542typhoon_remove_one(struct pci_dev *pdev)
2543{
2544	struct net_device *dev = pci_get_drvdata(pdev);
2545	struct typhoon *tp = netdev_priv(dev);
2546
2547	unregister_netdev(dev);
2548	pci_set_power_state(pdev, PCI_D0);
2549	pci_restore_state(pdev);
2550	typhoon_reset(tp->ioaddr, NoWait);
2551	pci_iounmap(pdev, tp->ioaddr);
2552	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2553			    tp->shared, tp->shared_dma);
2554	pci_release_regions(pdev);
2555	pci_clear_mwi(pdev);
2556	pci_disable_device(pdev);
2557	pci_set_drvdata(pdev, NULL);
2558	free_netdev(dev);
2559}
2560
2561static struct pci_driver typhoon_driver = {
2562	.name		= KBUILD_MODNAME,
2563	.id_table	= typhoon_pci_tbl,
2564	.probe		= typhoon_init_one,
2565	.remove		= __devexit_p(typhoon_remove_one),
2566#ifdef CONFIG_PM
2567	.suspend	= typhoon_suspend,
2568	.resume		= typhoon_resume,
2569#endif
2570};
2571
2572static int __init
2573typhoon_init(void)
2574{
2575	return pci_register_driver(&typhoon_driver);
2576}
2577
2578static void __exit
2579typhoon_cleanup(void)
2580{
2581	if (typhoon_fw)
2582		release_firmware(typhoon_fw);
2583	pci_unregister_driver(&typhoon_driver);
2584}
2585
2586module_init(typhoon_init);
2587module_exit(typhoon_cleanup);
2588