• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/tulip/
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3	Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5	Copyright 1994, 1995 Digital Equipment Corporation.	    [de4x5.c]
6	Written/copyright 1994-2001 by Donald Becker.		    [tulip.c]
7
8	This software may be used and distributed according to the terms of
9	the GNU General Public License (GPL), incorporated herein by reference.
10	Drivers based on or derived from this code fall under the GPL and must
11	retain the authorship, copyright and license notice.  This file is not
12	a complete program and may only be used when the entire operating
13	system is licensed under the GPL.
14
15	See the file COPYING in this distribution for more information.
16
17	TODO, in rough priority order:
18	* Support forcing media type with a module parameter,
19	  like dl2k.c/sundance.c
20	* Constants (module parms?) for Rx work limit
21	* Complete reset on PciErr
22	* Jumbo frames / dev->change_mtu
23	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25	* Implement Tx software interrupt mitigation via
26	  Tx descriptor bit
27
28 */
29
30#define DRV_NAME		"de2104x"
31#define DRV_VERSION		"0.7"
32#define DRV_RELDATE		"Mar 17, 2004"
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
45#include <linux/slab.h>
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h>
50#include <asm/unaligned.h>
51
52/* These identify the driver base version and may not be removed. */
53static char version[] =
54KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
55
56MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58MODULE_LICENSE("GPL");
59MODULE_VERSION(DRV_VERSION);
60
61static int debug = -1;
62module_param (debug, int, 0);
63MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
64
65/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
66#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
67	defined(CONFIG_SPARC) || defined(__ia64__) || defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72module_param (rx_copybreak, int, 0);
73MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
74
75#define PFX			DRV_NAME ": "
76
77#define DE_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
78				 NETIF_MSG_PROBE 	| \
79				 NETIF_MSG_LINK		| \
80				 NETIF_MSG_IFDOWN	| \
81				 NETIF_MSG_IFUP		| \
82				 NETIF_MSG_RX_ERR	| \
83				 NETIF_MSG_TX_ERR)
84
85/* Descriptor skip length in 32 bit longwords. */
86#ifndef CONFIG_DE2104X_DSL
87#define DSL			0
88#else
89#define DSL			CONFIG_DE2104X_DSL
90#endif
91
92#define DE_RX_RING_SIZE		64
93#define DE_TX_RING_SIZE		64
94#define DE_RING_BYTES		\
95		((sizeof(struct de_desc) * DE_RX_RING_SIZE) +	\
96		(sizeof(struct de_desc) * DE_TX_RING_SIZE))
97#define NEXT_TX(N)		(((N) + 1) & (DE_TX_RING_SIZE - 1))
98#define NEXT_RX(N)		(((N) + 1) & (DE_RX_RING_SIZE - 1))
99#define TX_BUFFS_AVAIL(CP)					\
100	(((CP)->tx_tail <= (CP)->tx_head) ?			\
101	  (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head :	\
102	  (CP)->tx_tail - (CP)->tx_head - 1)
103
104#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
105#define RX_OFFSET		2
106
107#define DE_SETUP_SKB		((struct sk_buff *) 1)
108#define DE_DUMMY_SKB		((struct sk_buff *) 2)
109#define DE_SETUP_FRAME_WORDS	96
110#define DE_EEPROM_WORDS		256
111#define DE_EEPROM_SIZE		(DE_EEPROM_WORDS * sizeof(u16))
112#define DE_MAX_MEDIA		5
113
114#define DE_MEDIA_TP_AUTO	0
115#define DE_MEDIA_BNC		1
116#define DE_MEDIA_AUI		2
117#define DE_MEDIA_TP		3
118#define DE_MEDIA_TP_FD		4
119#define DE_MEDIA_INVALID	DE_MAX_MEDIA
120#define DE_MEDIA_FIRST		0
121#define DE_MEDIA_LAST		(DE_MAX_MEDIA - 1)
122#define DE_AUI_BNC		(SUPPORTED_AUI | SUPPORTED_BNC)
123
124#define DE_TIMER_LINK		(60 * HZ)
125#define DE_TIMER_NO_LINK	(5 * HZ)
126
127#define DE_NUM_REGS		16
128#define DE_REGS_SIZE		(DE_NUM_REGS * sizeof(u32))
129#define DE_REGS_VER		1
130
131/* Time in jiffies before concluding the transmitter is hung. */
132#define TX_TIMEOUT		(6*HZ)
133
134/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
135   to support a pre-NWay full-duplex signaling mechanism using short frames.
136   No one knows what it should be, but if left at its default value some
137   10base2(!) packets trigger a full-duplex-request interrupt. */
138#define FULL_DUPLEX_MAGIC	0x6969
139
140enum {
141	/* NIC registers */
142	BusMode			= 0x00,
143	TxPoll			= 0x08,
144	RxPoll			= 0x10,
145	RxRingAddr		= 0x18,
146	TxRingAddr		= 0x20,
147	MacStatus		= 0x28,
148	MacMode			= 0x30,
149	IntrMask		= 0x38,
150	RxMissed		= 0x40,
151	ROMCmd			= 0x48,
152	CSR11			= 0x58,
153	SIAStatus		= 0x60,
154	CSR13			= 0x68,
155	CSR14			= 0x70,
156	CSR15			= 0x78,
157	PCIPM			= 0x40,
158
159	/* BusMode bits */
160	CmdReset		= (1 << 0),
161	CacheAlign16		= 0x00008000,
162	BurstLen4		= 0x00000400,
163	DescSkipLen		= (DSL << 2),
164
165	/* Rx/TxPoll bits */
166	NormalTxPoll		= (1 << 0),
167	NormalRxPoll		= (1 << 0),
168
169	/* Tx/Rx descriptor status bits */
170	DescOwn			= (1 << 31),
171	RxError			= (1 << 15),
172	RxErrLong		= (1 << 7),
173	RxErrCRC		= (1 << 1),
174	RxErrFIFO		= (1 << 0),
175	RxErrRunt		= (1 << 11),
176	RxErrFrame		= (1 << 14),
177	RingEnd			= (1 << 25),
178	FirstFrag		= (1 << 29),
179	LastFrag		= (1 << 30),
180	TxError			= (1 << 15),
181	TxFIFOUnder		= (1 << 1),
182	TxLinkFail		= (1 << 2) | (1 << 10) | (1 << 11),
183	TxMaxCol		= (1 << 8),
184	TxOWC			= (1 << 9),
185	TxJabber		= (1 << 14),
186	SetupFrame		= (1 << 27),
187	TxSwInt			= (1 << 31),
188
189	/* MacStatus bits */
190	IntrOK			= (1 << 16),
191	IntrErr			= (1 << 15),
192	RxIntr			= (1 << 6),
193	RxEmpty			= (1 << 7),
194	TxIntr			= (1 << 0),
195	TxEmpty			= (1 << 2),
196	PciErr			= (1 << 13),
197	TxState			= (1 << 22) | (1 << 21) | (1 << 20),
198	RxState			= (1 << 19) | (1 << 18) | (1 << 17),
199	LinkFail		= (1 << 12),
200	LinkPass		= (1 << 4),
201	RxStopped		= (1 << 8),
202	TxStopped		= (1 << 1),
203
204	/* MacMode bits */
205	TxEnable		= (1 << 13),
206	RxEnable		= (1 << 1),
207	RxTx			= TxEnable | RxEnable,
208	FullDuplex		= (1 << 9),
209	AcceptAllMulticast	= (1 << 7),
210	AcceptAllPhys		= (1 << 6),
211	BOCnt			= (1 << 5),
212	MacModeClear		= (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
213				  RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
214
215	/* ROMCmd bits */
216	EE_SHIFT_CLK		= 0x02,	/* EEPROM shift clock. */
217	EE_CS			= 0x01,	/* EEPROM chip select. */
218	EE_DATA_WRITE		= 0x04,	/* Data from the Tulip to EEPROM. */
219	EE_WRITE_0		= 0x01,
220	EE_WRITE_1		= 0x05,
221	EE_DATA_READ		= 0x08,	/* Data from the EEPROM chip. */
222	EE_ENB			= (0x4800 | EE_CS),
223
224	/* The EEPROM commands include the alway-set leading bit. */
225	EE_READ_CMD		= 6,
226
227	/* RxMissed bits */
228	RxMissedOver		= (1 << 16),
229	RxMissedMask		= 0xffff,
230
231	/* SROM-related bits */
232	SROMC0InfoLeaf		= 27,
233	MediaBlockMask		= 0x3f,
234	MediaCustomCSRs		= (1 << 6),
235
236	/* PCIPM bits */
237	PM_Sleep		= (1 << 31),
238	PM_Snooze		= (1 << 30),
239	PM_Mask			= PM_Sleep | PM_Snooze,
240
241	/* SIAStatus bits */
242	NWayState		= (1 << 14) | (1 << 13) | (1 << 12),
243	NWayRestart		= (1 << 12),
244	NonselPortActive	= (1 << 9),
245	SelPortActive		= (1 << 8),
246	LinkFailStatus		= (1 << 2),
247	NetCxnErr		= (1 << 1),
248};
249
250static const u32 de_intr_mask =
251	IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
252	LinkPass | LinkFail | PciErr;
253
254/*
255 * Set the programmable burst length to 4 longwords for all:
256 * DMA errors result without these values. Cache align 16 long.
257 */
258static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
259
260struct de_srom_media_block {
261	u8			opts;
262	u16			csr13;
263	u16			csr14;
264	u16			csr15;
265} __packed;
266
267struct de_srom_info_leaf {
268	u16			default_media;
269	u8			n_blocks;
270	u8			unused;
271} __packed;
272
273struct de_desc {
274	__le32			opts1;
275	__le32			opts2;
276	__le32			addr1;
277	__le32			addr2;
278#if DSL
279	__le32			skip[DSL];
280#endif
281};
282
283struct media_info {
284	u16			type;	/* DE_MEDIA_xxx */
285	u16			csr13;
286	u16			csr14;
287	u16			csr15;
288};
289
290struct ring_info {
291	struct sk_buff		*skb;
292	dma_addr_t		mapping;
293};
294
295struct de_private {
296	unsigned		tx_head;
297	unsigned		tx_tail;
298	unsigned		rx_tail;
299
300	void			__iomem *regs;
301	struct net_device	*dev;
302	spinlock_t		lock;
303
304	struct de_desc		*rx_ring;
305	struct de_desc		*tx_ring;
306	struct ring_info	tx_skb[DE_TX_RING_SIZE];
307	struct ring_info	rx_skb[DE_RX_RING_SIZE];
308	unsigned		rx_buf_sz;
309	dma_addr_t		ring_dma;
310
311	u32			msg_enable;
312
313	struct net_device_stats net_stats;
314
315	struct pci_dev		*pdev;
316
317	u16			setup_frame[DE_SETUP_FRAME_WORDS];
318
319	u32			media_type;
320	u32			media_supported;
321	u32			media_advertise;
322	struct media_info	media[DE_MAX_MEDIA];
323	struct timer_list	media_timer;
324
325	u8			*ee_data;
326	unsigned		board_idx;
327	unsigned		de21040 : 1;
328	unsigned		media_lock : 1;
329};
330
331
332static void de_set_rx_mode (struct net_device *dev);
333static void de_tx (struct de_private *de);
334static void de_clean_rings (struct de_private *de);
335static void de_media_interrupt (struct de_private *de, u32 status);
336static void de21040_media_timer (unsigned long data);
337static void de21041_media_timer (unsigned long data);
338static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
339
340
341static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
342	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
343	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
344	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
345	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
346	{ },
347};
348MODULE_DEVICE_TABLE(pci, de_pci_tbl);
349
350static const char * const media_name[DE_MAX_MEDIA] = {
351	"10baseT auto",
352	"BNC",
353	"AUI",
354	"10baseT-HD",
355	"10baseT-FD"
356};
357
358/* 21040 transceiver register settings:
359 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
360static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
361static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
362static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363
364/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
367/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
368static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
369static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
370
371
372#define dr32(reg)	ioread32(de->regs + (reg))
373#define dw32(reg, val)	iowrite32((val), de->regs + (reg))
374
375
376static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
377			    u32 status, u32 len)
378{
379	if (netif_msg_rx_err (de))
380		printk (KERN_DEBUG
381			"%s: rx err, slot %d status 0x%x len %d\n",
382			de->dev->name, rx_tail, status, len);
383
384	if ((status & 0x38000300) != 0x0300) {
385		/* Ingore earlier buffers. */
386		if ((status & 0xffff) != 0x7fff) {
387			if (netif_msg_rx_err(de))
388				dev_warn(&de->dev->dev,
389					 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390					 status);
391			de->net_stats.rx_length_errors++;
392		}
393	} else if (status & RxError) {
394		/* There was a fatal error. */
395		de->net_stats.rx_errors++; /* end of a packet.*/
396		if (status & 0x0890) de->net_stats.rx_length_errors++;
397		if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
398		if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
399	}
400}
401
402static void de_rx (struct de_private *de)
403{
404	unsigned rx_tail = de->rx_tail;
405	unsigned rx_work = DE_RX_RING_SIZE;
406	unsigned drop = 0;
407	int rc;
408
409	while (--rx_work) {
410		u32 status, len;
411		dma_addr_t mapping;
412		struct sk_buff *skb, *copy_skb;
413		unsigned copying_skb, buflen;
414
415		skb = de->rx_skb[rx_tail].skb;
416		BUG_ON(!skb);
417		rmb();
418		status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
419		if (status & DescOwn)
420			break;
421
422		len = ((status >> 16) & 0x7ff) - 4;
423		mapping = de->rx_skb[rx_tail].mapping;
424
425		if (unlikely(drop)) {
426			de->net_stats.rx_dropped++;
427			goto rx_next;
428		}
429
430		if (unlikely((status & 0x38008300) != 0x0300)) {
431			de_rx_err_acct(de, rx_tail, status, len);
432			goto rx_next;
433		}
434
435		copying_skb = (len <= rx_copybreak);
436
437		if (unlikely(netif_msg_rx_status(de)))
438			printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
439			       de->dev->name, rx_tail, status, len,
440			       copying_skb);
441
442		buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
443		copy_skb = dev_alloc_skb (buflen);
444		if (unlikely(!copy_skb)) {
445			de->net_stats.rx_dropped++;
446			drop = 1;
447			rx_work = 100;
448			goto rx_next;
449		}
450
451		if (!copying_skb) {
452			pci_unmap_single(de->pdev, mapping,
453					 buflen, PCI_DMA_FROMDEVICE);
454			skb_put(skb, len);
455
456			mapping =
457			de->rx_skb[rx_tail].mapping =
458				pci_map_single(de->pdev, copy_skb->data,
459					       buflen, PCI_DMA_FROMDEVICE);
460			de->rx_skb[rx_tail].skb = copy_skb;
461		} else {
462			pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
463			skb_reserve(copy_skb, RX_OFFSET);
464			skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
465						  len);
466			pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
467
468			/* We'll reuse the original ring buffer. */
469			skb = copy_skb;
470		}
471
472		skb->protocol = eth_type_trans (skb, de->dev);
473
474		de->net_stats.rx_packets++;
475		de->net_stats.rx_bytes += skb->len;
476		rc = netif_rx (skb);
477		if (rc == NET_RX_DROP)
478			drop = 1;
479
480rx_next:
481		if (rx_tail == (DE_RX_RING_SIZE - 1))
482			de->rx_ring[rx_tail].opts2 =
483				cpu_to_le32(RingEnd | de->rx_buf_sz);
484		else
485			de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
486		de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
487		wmb();
488		de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
489		rx_tail = NEXT_RX(rx_tail);
490	}
491
492	if (!rx_work)
493		dev_warn(&de->dev->dev, "rx work limit reached\n");
494
495	de->rx_tail = rx_tail;
496}
497
498static irqreturn_t de_interrupt (int irq, void *dev_instance)
499{
500	struct net_device *dev = dev_instance;
501	struct de_private *de = netdev_priv(dev);
502	u32 status;
503
504	status = dr32(MacStatus);
505	if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
506		return IRQ_NONE;
507
508	if (netif_msg_intr(de))
509		printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
510		       dev->name, status, dr32(MacMode),
511		       de->rx_tail, de->tx_head, de->tx_tail);
512
513	dw32(MacStatus, status);
514
515	if (status & (RxIntr | RxEmpty)) {
516		de_rx(de);
517		if (status & RxEmpty)
518			dw32(RxPoll, NormalRxPoll);
519	}
520
521	spin_lock(&de->lock);
522
523	if (status & (TxIntr | TxEmpty))
524		de_tx(de);
525
526	if (status & (LinkPass | LinkFail))
527		de_media_interrupt(de, status);
528
529	spin_unlock(&de->lock);
530
531	if (status & PciErr) {
532		u16 pci_status;
533
534		pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
535		pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
536		dev_err(&de->dev->dev,
537			"PCI bus error, status=%08x, PCI status=%04x\n",
538			status, pci_status);
539	}
540
541	return IRQ_HANDLED;
542}
543
544static void de_tx (struct de_private *de)
545{
546	unsigned tx_head = de->tx_head;
547	unsigned tx_tail = de->tx_tail;
548
549	while (tx_tail != tx_head) {
550		struct sk_buff *skb;
551		u32 status;
552
553		rmb();
554		status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
555		if (status & DescOwn)
556			break;
557
558		skb = de->tx_skb[tx_tail].skb;
559		BUG_ON(!skb);
560		if (unlikely(skb == DE_DUMMY_SKB))
561			goto next;
562
563		if (unlikely(skb == DE_SETUP_SKB)) {
564			pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
565					 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
566			goto next;
567		}
568
569		pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
570				 skb->len, PCI_DMA_TODEVICE);
571
572		if (status & LastFrag) {
573			if (status & TxError) {
574				if (netif_msg_tx_err(de))
575					printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
576					       de->dev->name, status);
577				de->net_stats.tx_errors++;
578				if (status & TxOWC)
579					de->net_stats.tx_window_errors++;
580				if (status & TxMaxCol)
581					de->net_stats.tx_aborted_errors++;
582				if (status & TxLinkFail)
583					de->net_stats.tx_carrier_errors++;
584				if (status & TxFIFOUnder)
585					de->net_stats.tx_fifo_errors++;
586			} else {
587				de->net_stats.tx_packets++;
588				de->net_stats.tx_bytes += skb->len;
589				if (netif_msg_tx_done(de))
590					printk(KERN_DEBUG "%s: tx done, slot %d\n",
591					       de->dev->name, tx_tail);
592			}
593			dev_kfree_skb_irq(skb);
594		}
595
596next:
597		de->tx_skb[tx_tail].skb = NULL;
598
599		tx_tail = NEXT_TX(tx_tail);
600	}
601
602	de->tx_tail = tx_tail;
603
604	if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
605		netif_wake_queue(de->dev);
606}
607
608static netdev_tx_t de_start_xmit (struct sk_buff *skb,
609					struct net_device *dev)
610{
611	struct de_private *de = netdev_priv(dev);
612	unsigned int entry, tx_free;
613	u32 mapping, len, flags = FirstFrag | LastFrag;
614	struct de_desc *txd;
615
616	spin_lock_irq(&de->lock);
617
618	tx_free = TX_BUFFS_AVAIL(de);
619	if (tx_free == 0) {
620		netif_stop_queue(dev);
621		spin_unlock_irq(&de->lock);
622		return NETDEV_TX_BUSY;
623	}
624	tx_free--;
625
626	entry = de->tx_head;
627
628	txd = &de->tx_ring[entry];
629
630	len = skb->len;
631	mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
632	if (entry == (DE_TX_RING_SIZE - 1))
633		flags |= RingEnd;
634	if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
635		flags |= TxSwInt;
636	flags |= len;
637	txd->opts2 = cpu_to_le32(flags);
638	txd->addr1 = cpu_to_le32(mapping);
639
640	de->tx_skb[entry].skb = skb;
641	de->tx_skb[entry].mapping = mapping;
642	wmb();
643
644	txd->opts1 = cpu_to_le32(DescOwn);
645	wmb();
646
647	de->tx_head = NEXT_TX(entry);
648	if (netif_msg_tx_queued(de))
649		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
650		       dev->name, entry, skb->len);
651
652	if (tx_free == 0)
653		netif_stop_queue(dev);
654
655	spin_unlock_irq(&de->lock);
656
657	/* Trigger an immediate transmit demand. */
658	dw32(TxPoll, NormalTxPoll);
659
660	return NETDEV_TX_OK;
661}
662
663/* Set or clear the multicast filter for this adaptor.
664   Note that we only use exclusion around actually queueing the
665   new frame, not around filling de->setup_frame.  This is non-deterministic
666   when re-entered but still correct. */
667
668#undef set_bit_le
669#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
670
671static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
672{
673	struct de_private *de = netdev_priv(dev);
674	u16 hash_table[32];
675	struct netdev_hw_addr *ha;
676	int i;
677	u16 *eaddrs;
678
679	memset(hash_table, 0, sizeof(hash_table));
680	set_bit_le(255, hash_table); 			/* Broadcast entry */
681	/* This should work on big-endian machines as well. */
682	netdev_for_each_mc_addr(ha, dev) {
683		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
684
685		set_bit_le(index, hash_table);
686	}
687
688	for (i = 0; i < 32; i++) {
689		*setup_frm++ = hash_table[i];
690		*setup_frm++ = hash_table[i];
691	}
692	setup_frm = &de->setup_frame[13*6];
693
694	/* Fill the final entry with our physical address. */
695	eaddrs = (u16 *)dev->dev_addr;
696	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
697	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
698	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
699}
700
701static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
702{
703	struct de_private *de = netdev_priv(dev);
704	struct netdev_hw_addr *ha;
705	u16 *eaddrs;
706
707	/* We have <= 14 addresses so we can use the wonderful
708	   16 address perfect filtering of the Tulip. */
709	netdev_for_each_mc_addr(ha, dev) {
710		eaddrs = (u16 *) ha->addr;
711		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
714	}
715	/* Fill the unused entries with the broadcast address. */
716	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
717	setup_frm = &de->setup_frame[15*6];
718
719	/* Fill the final entry with our physical address. */
720	eaddrs = (u16 *)dev->dev_addr;
721	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
722	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
723	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
724}
725
726
727static void __de_set_rx_mode (struct net_device *dev)
728{
729	struct de_private *de = netdev_priv(dev);
730	u32 macmode;
731	unsigned int entry;
732	u32 mapping;
733	struct de_desc *txd;
734	struct de_desc *dummy_txd = NULL;
735
736	macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
737
738	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
739		macmode |= AcceptAllMulticast | AcceptAllPhys;
740		goto out;
741	}
742
743	if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
744		/* Too many to filter well -- accept all multicasts. */
745		macmode |= AcceptAllMulticast;
746		goto out;
747	}
748
749	/* Note that only the low-address shortword of setup_frame is valid!
750	   The values are doubled for big-endian architectures. */
751	if (netdev_mc_count(dev) > 14)	/* Must use a multicast hash table. */
752		build_setup_frame_hash (de->setup_frame, dev);
753	else
754		build_setup_frame_perfect (de->setup_frame, dev);
755
756	/*
757	 * Now add this frame to the Tx list.
758	 */
759
760	entry = de->tx_head;
761
762	/* Avoid a chip errata by prefixing a dummy entry. */
763	if (entry != 0) {
764		de->tx_skb[entry].skb = DE_DUMMY_SKB;
765
766		dummy_txd = &de->tx_ring[entry];
767		dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
768				   cpu_to_le32(RingEnd) : 0;
769		dummy_txd->addr1 = 0;
770
771		/* Must set DescOwned later to avoid race with chip */
772
773		entry = NEXT_TX(entry);
774	}
775
776	de->tx_skb[entry].skb = DE_SETUP_SKB;
777	de->tx_skb[entry].mapping = mapping =
778	    pci_map_single (de->pdev, de->setup_frame,
779			    sizeof (de->setup_frame), PCI_DMA_TODEVICE);
780
781	/* Put the setup frame on the Tx list. */
782	txd = &de->tx_ring[entry];
783	if (entry == (DE_TX_RING_SIZE - 1))
784		txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
785	else
786		txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
787	txd->addr1 = cpu_to_le32(mapping);
788	wmb();
789
790	txd->opts1 = cpu_to_le32(DescOwn);
791	wmb();
792
793	if (dummy_txd) {
794		dummy_txd->opts1 = cpu_to_le32(DescOwn);
795		wmb();
796	}
797
798	de->tx_head = NEXT_TX(entry);
799
800	if (TX_BUFFS_AVAIL(de) == 0)
801		netif_stop_queue(dev);
802
803	/* Trigger an immediate transmit demand. */
804	dw32(TxPoll, NormalTxPoll);
805
806out:
807	if (macmode != dr32(MacMode))
808		dw32(MacMode, macmode);
809}
810
811static void de_set_rx_mode (struct net_device *dev)
812{
813	unsigned long flags;
814	struct de_private *de = netdev_priv(dev);
815
816	spin_lock_irqsave (&de->lock, flags);
817	__de_set_rx_mode(dev);
818	spin_unlock_irqrestore (&de->lock, flags);
819}
820
821static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
822{
823	if (unlikely(rx_missed & RxMissedOver))
824		de->net_stats.rx_missed_errors += RxMissedMask;
825	else
826		de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
827}
828
829static void __de_get_stats(struct de_private *de)
830{
831	u32 tmp = dr32(RxMissed); /* self-clearing */
832
833	de_rx_missed(de, tmp);
834}
835
836static struct net_device_stats *de_get_stats(struct net_device *dev)
837{
838	struct de_private *de = netdev_priv(dev);
839
840	/* The chip only need report frame silently dropped. */
841	spin_lock_irq(&de->lock);
842 	if (netif_running(dev) && netif_device_present(dev))
843 		__de_get_stats(de);
844	spin_unlock_irq(&de->lock);
845
846	return &de->net_stats;
847}
848
849static inline int de_is_running (struct de_private *de)
850{
851	return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
852}
853
854static void de_stop_rxtx (struct de_private *de)
855{
856	u32 macmode;
857	unsigned int i = 1300/100;
858
859	macmode = dr32(MacMode);
860	if (macmode & RxTx) {
861		dw32(MacMode, macmode & ~RxTx);
862		dr32(MacMode);
863	}
864
865	/* wait until in-flight frame completes.
866	 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
867	 * Typically expect this loop to end in < 50 us on 100BT.
868	 */
869	while (--i) {
870		if (!de_is_running(de))
871			return;
872		udelay(100);
873	}
874
875	dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
876}
877
878static inline void de_start_rxtx (struct de_private *de)
879{
880	u32 macmode;
881
882	macmode = dr32(MacMode);
883	if ((macmode & RxTx) != RxTx) {
884		dw32(MacMode, macmode | RxTx);
885		dr32(MacMode);
886	}
887}
888
889static void de_stop_hw (struct de_private *de)
890{
891
892	udelay(5);
893	dw32(IntrMask, 0);
894
895	de_stop_rxtx(de);
896
897	dw32(MacStatus, dr32(MacStatus));
898
899	udelay(10);
900
901	de->rx_tail = 0;
902	de->tx_head = de->tx_tail = 0;
903}
904
905static void de_link_up(struct de_private *de)
906{
907	if (!netif_carrier_ok(de->dev)) {
908		netif_carrier_on(de->dev);
909		if (netif_msg_link(de))
910			dev_info(&de->dev->dev, "link up, media %s\n",
911				 media_name[de->media_type]);
912	}
913}
914
915static void de_link_down(struct de_private *de)
916{
917	if (netif_carrier_ok(de->dev)) {
918		netif_carrier_off(de->dev);
919		if (netif_msg_link(de))
920			dev_info(&de->dev->dev, "link down\n");
921	}
922}
923
924static void de_set_media (struct de_private *de)
925{
926	unsigned media = de->media_type;
927	u32 macmode = dr32(MacMode);
928
929	if (de_is_running(de))
930		dev_warn(&de->dev->dev,
931			 "chip is running while changing media!\n");
932
933	if (de->de21040)
934		dw32(CSR11, FULL_DUPLEX_MAGIC);
935	dw32(CSR13, 0); /* Reset phy */
936	dw32(CSR14, de->media[media].csr14);
937	dw32(CSR15, de->media[media].csr15);
938	dw32(CSR13, de->media[media].csr13);
939
940	/* must delay 10ms before writing to other registers,
941	 * especially CSR6
942	 */
943	mdelay(10);
944
945	if (media == DE_MEDIA_TP_FD)
946		macmode |= FullDuplex;
947	else
948		macmode &= ~FullDuplex;
949
950	if (netif_msg_link(de)) {
951		dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
952		dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
953			 dr32(MacMode), dr32(SIAStatus),
954			 dr32(CSR13), dr32(CSR14), dr32(CSR15));
955
956		dev_info(&de->dev->dev,
957			 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
958			 macmode, de->media[media].csr13,
959			 de->media[media].csr14, de->media[media].csr15);
960	}
961	if (macmode != dr32(MacMode))
962		dw32(MacMode, macmode);
963}
964
965static void de_next_media (struct de_private *de, u32 *media,
966			   unsigned int n_media)
967{
968	unsigned int i;
969
970	for (i = 0; i < n_media; i++) {
971		if (de_ok_to_advertise(de, media[i])) {
972			de->media_type = media[i];
973			return;
974		}
975	}
976}
977
978static void de21040_media_timer (unsigned long data)
979{
980	struct de_private *de = (struct de_private *) data;
981	struct net_device *dev = de->dev;
982	u32 status = dr32(SIAStatus);
983	unsigned int carrier;
984	unsigned long flags;
985
986	carrier = (status & NetCxnErr) ? 0 : 1;
987
988	if (carrier) {
989		if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
990			goto no_link_yet;
991
992		de->media_timer.expires = jiffies + DE_TIMER_LINK;
993		add_timer(&de->media_timer);
994		if (!netif_carrier_ok(dev))
995			de_link_up(de);
996		else
997			if (netif_msg_timer(de))
998				dev_info(&dev->dev, "%s link ok, status %x\n",
999					 media_name[de->media_type], status);
1000		return;
1001	}
1002
1003	de_link_down(de);
1004
1005	if (de->media_lock)
1006		return;
1007
1008	if (de->media_type == DE_MEDIA_AUI) {
1009		u32 next_state = DE_MEDIA_TP;
1010		de_next_media(de, &next_state, 1);
1011	} else {
1012		u32 next_state = DE_MEDIA_AUI;
1013		de_next_media(de, &next_state, 1);
1014	}
1015
1016	spin_lock_irqsave(&de->lock, flags);
1017	de_stop_rxtx(de);
1018	spin_unlock_irqrestore(&de->lock, flags);
1019	de_set_media(de);
1020	de_start_rxtx(de);
1021
1022no_link_yet:
1023	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1024	add_timer(&de->media_timer);
1025
1026	if (netif_msg_timer(de))
1027		dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1028			 media_name[de->media_type], status);
1029}
1030
1031static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1032{
1033	switch (new_media) {
1034	case DE_MEDIA_TP_AUTO:
1035		if (!(de->media_advertise & ADVERTISED_Autoneg))
1036			return 0;
1037		if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1038			return 0;
1039		break;
1040	case DE_MEDIA_BNC:
1041		if (!(de->media_advertise & ADVERTISED_BNC))
1042			return 0;
1043		break;
1044	case DE_MEDIA_AUI:
1045		if (!(de->media_advertise & ADVERTISED_AUI))
1046			return 0;
1047		break;
1048	case DE_MEDIA_TP:
1049		if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1050			return 0;
1051		break;
1052	case DE_MEDIA_TP_FD:
1053		if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1054			return 0;
1055		break;
1056	}
1057
1058	return 1;
1059}
1060
1061static void de21041_media_timer (unsigned long data)
1062{
1063	struct de_private *de = (struct de_private *) data;
1064	struct net_device *dev = de->dev;
1065	u32 status = dr32(SIAStatus);
1066	unsigned int carrier;
1067	unsigned long flags;
1068
1069	/* clear port active bits */
1070	dw32(SIAStatus, NonselPortActive | SelPortActive);
1071
1072	carrier = (status & NetCxnErr) ? 0 : 1;
1073
1074	if (carrier) {
1075		if ((de->media_type == DE_MEDIA_TP_AUTO ||
1076		     de->media_type == DE_MEDIA_TP ||
1077		     de->media_type == DE_MEDIA_TP_FD) &&
1078		    (status & LinkFailStatus))
1079			goto no_link_yet;
1080
1081		de->media_timer.expires = jiffies + DE_TIMER_LINK;
1082		add_timer(&de->media_timer);
1083		if (!netif_carrier_ok(dev))
1084			de_link_up(de);
1085		else
1086			if (netif_msg_timer(de))
1087				dev_info(&dev->dev,
1088					 "%s link ok, mode %x status %x\n",
1089					 media_name[de->media_type],
1090					 dr32(MacMode), status);
1091		return;
1092	}
1093
1094	de_link_down(de);
1095
1096	/* if media type locked, don't switch media */
1097	if (de->media_lock)
1098		goto set_media;
1099
1100	/* if activity detected, use that as hint for new media type */
1101	if (status & NonselPortActive) {
1102		unsigned int have_media = 1;
1103
1104		/* if AUI/BNC selected, then activity is on TP port */
1105		if (de->media_type == DE_MEDIA_AUI ||
1106		    de->media_type == DE_MEDIA_BNC) {
1107			if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1108				de->media_type = DE_MEDIA_TP_AUTO;
1109			else
1110				have_media = 0;
1111		}
1112
1113		/* TP selected.  If there is only TP and BNC, then it's BNC */
1114		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1115			 de_ok_to_advertise(de, DE_MEDIA_BNC))
1116			de->media_type = DE_MEDIA_BNC;
1117
1118		/* TP selected.  If there is only TP and AUI, then it's AUI */
1119		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1120			 de_ok_to_advertise(de, DE_MEDIA_AUI))
1121			de->media_type = DE_MEDIA_AUI;
1122
1123		/* otherwise, ignore the hint */
1124		else
1125			have_media = 0;
1126
1127		if (have_media)
1128			goto set_media;
1129	}
1130
1131	/*
1132	 * Absent or ambiguous activity hint, move to next advertised
1133	 * media state.  If de->media_type is left unchanged, this
1134	 * simply resets the PHY and reloads the current media settings.
1135	 */
1136	if (de->media_type == DE_MEDIA_AUI) {
1137		u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1138		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1139	} else if (de->media_type == DE_MEDIA_BNC) {
1140		u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1141		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1142	} else {
1143		u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1144		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1145	}
1146
1147set_media:
1148	spin_lock_irqsave(&de->lock, flags);
1149	de_stop_rxtx(de);
1150	spin_unlock_irqrestore(&de->lock, flags);
1151	de_set_media(de);
1152	de_start_rxtx(de);
1153
1154no_link_yet:
1155	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1156	add_timer(&de->media_timer);
1157
1158	if (netif_msg_timer(de))
1159		dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1160			 media_name[de->media_type], status);
1161}
1162
1163static void de_media_interrupt (struct de_private *de, u32 status)
1164{
1165	if (status & LinkPass) {
1166		/* Ignore if current media is AUI or BNC and we can't use TP */
1167		if ((de->media_type == DE_MEDIA_AUI ||
1168		     de->media_type == DE_MEDIA_BNC) &&
1169		    (de->media_lock ||
1170		     !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1171			return;
1172		/* If current media is not TP, change it to TP */
1173		if ((de->media_type == DE_MEDIA_AUI ||
1174		     de->media_type == DE_MEDIA_BNC)) {
1175			de->media_type = DE_MEDIA_TP_AUTO;
1176			de_stop_rxtx(de);
1177			de_set_media(de);
1178			de_start_rxtx(de);
1179		}
1180		de_link_up(de);
1181		mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1182		return;
1183	}
1184
1185	BUG_ON(!(status & LinkFail));
1186	/* Mark the link as down only if current media is TP */
1187	if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1188	    de->media_type != DE_MEDIA_BNC) {
1189		de_link_down(de);
1190		mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1191	}
1192}
1193
1194static int de_reset_mac (struct de_private *de)
1195{
1196	u32 status, tmp;
1197
1198	/*
1199	 * Reset MAC.  de4x5.c and tulip.c examined for "advice"
1200	 * in this area.
1201	 */
1202
1203	if (dr32(BusMode) == 0xffffffff)
1204		return -EBUSY;
1205
1206	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1207	dw32 (BusMode, CmdReset);
1208	mdelay (1);
1209
1210	dw32 (BusMode, de_bus_mode);
1211	mdelay (1);
1212
1213	for (tmp = 0; tmp < 5; tmp++) {
1214		dr32 (BusMode);
1215		mdelay (1);
1216	}
1217
1218	mdelay (1);
1219
1220	status = dr32(MacStatus);
1221	if (status & (RxState | TxState))
1222		return -EBUSY;
1223	if (status == 0xffffffff)
1224		return -ENODEV;
1225	return 0;
1226}
1227
1228static void de_adapter_wake (struct de_private *de)
1229{
1230	u32 pmctl;
1231
1232	if (de->de21040)
1233		return;
1234
1235	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1236	if (pmctl & PM_Mask) {
1237		pmctl &= ~PM_Mask;
1238		pci_write_config_dword(de->pdev, PCIPM, pmctl);
1239
1240		/* de4x5.c delays, so we do too */
1241		msleep(10);
1242	}
1243}
1244
1245static void de_adapter_sleep (struct de_private *de)
1246{
1247	u32 pmctl;
1248
1249	if (de->de21040)
1250		return;
1251
1252	dw32(CSR13, 0); /* Reset phy */
1253	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1254	pmctl |= PM_Sleep;
1255	pci_write_config_dword(de->pdev, PCIPM, pmctl);
1256}
1257
1258static int de_init_hw (struct de_private *de)
1259{
1260	struct net_device *dev = de->dev;
1261	u32 macmode;
1262	int rc;
1263
1264	de_adapter_wake(de);
1265
1266	macmode = dr32(MacMode) & ~MacModeClear;
1267
1268	rc = de_reset_mac(de);
1269	if (rc)
1270		return rc;
1271
1272	de_set_media(de); /* reset phy */
1273
1274	dw32(RxRingAddr, de->ring_dma);
1275	dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1276
1277	dw32(MacMode, RxTx | macmode);
1278
1279	dr32(RxMissed); /* self-clearing */
1280
1281	dw32(IntrMask, de_intr_mask);
1282
1283	de_set_rx_mode(dev);
1284
1285	return 0;
1286}
1287
1288static int de_refill_rx (struct de_private *de)
1289{
1290	unsigned i;
1291
1292	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1293		struct sk_buff *skb;
1294
1295		skb = dev_alloc_skb(de->rx_buf_sz);
1296		if (!skb)
1297			goto err_out;
1298
1299		skb->dev = de->dev;
1300
1301		de->rx_skb[i].mapping = pci_map_single(de->pdev,
1302			skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1303		de->rx_skb[i].skb = skb;
1304
1305		de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1306		if (i == (DE_RX_RING_SIZE - 1))
1307			de->rx_ring[i].opts2 =
1308				cpu_to_le32(RingEnd | de->rx_buf_sz);
1309		else
1310			de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1311		de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1312		de->rx_ring[i].addr2 = 0;
1313	}
1314
1315	return 0;
1316
1317err_out:
1318	de_clean_rings(de);
1319	return -ENOMEM;
1320}
1321
1322static int de_init_rings (struct de_private *de)
1323{
1324	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1325	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1326
1327	de->rx_tail = 0;
1328	de->tx_head = de->tx_tail = 0;
1329
1330	return de_refill_rx (de);
1331}
1332
1333static int de_alloc_rings (struct de_private *de)
1334{
1335	de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1336	if (!de->rx_ring)
1337		return -ENOMEM;
1338	de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1339	return de_init_rings(de);
1340}
1341
1342static void de_clean_rings (struct de_private *de)
1343{
1344	unsigned i;
1345
1346	memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1347	de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1348	wmb();
1349	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1350	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1351	wmb();
1352
1353	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1354		if (de->rx_skb[i].skb) {
1355			pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1356					 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1357			dev_kfree_skb(de->rx_skb[i].skb);
1358		}
1359	}
1360
1361	for (i = 0; i < DE_TX_RING_SIZE; i++) {
1362		struct sk_buff *skb = de->tx_skb[i].skb;
1363		if ((skb) && (skb != DE_DUMMY_SKB)) {
1364			if (skb != DE_SETUP_SKB) {
1365				de->net_stats.tx_dropped++;
1366				pci_unmap_single(de->pdev,
1367					de->tx_skb[i].mapping,
1368					skb->len, PCI_DMA_TODEVICE);
1369				dev_kfree_skb(skb);
1370			} else {
1371				pci_unmap_single(de->pdev,
1372					de->tx_skb[i].mapping,
1373					sizeof(de->setup_frame),
1374					PCI_DMA_TODEVICE);
1375			}
1376		}
1377	}
1378
1379	memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1380	memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1381}
1382
1383static void de_free_rings (struct de_private *de)
1384{
1385	de_clean_rings(de);
1386	pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1387	de->rx_ring = NULL;
1388	de->tx_ring = NULL;
1389}
1390
1391static int de_open (struct net_device *dev)
1392{
1393	struct de_private *de = netdev_priv(dev);
1394	int rc;
1395
1396	if (netif_msg_ifup(de))
1397		printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1398
1399	de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1400
1401	rc = de_alloc_rings(de);
1402	if (rc) {
1403		dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1404		return rc;
1405	}
1406
1407	dw32(IntrMask, 0);
1408
1409	rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1410	if (rc) {
1411		dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1412			dev->irq, rc);
1413		goto err_out_free;
1414	}
1415
1416	rc = de_init_hw(de);
1417	if (rc) {
1418		dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
1419		goto err_out_free_irq;
1420	}
1421
1422	netif_start_queue(dev);
1423	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1424
1425	return 0;
1426
1427err_out_free_irq:
1428	free_irq(dev->irq, dev);
1429err_out_free:
1430	de_free_rings(de);
1431	return rc;
1432}
1433
1434static int de_close (struct net_device *dev)
1435{
1436	struct de_private *de = netdev_priv(dev);
1437	unsigned long flags;
1438
1439	if (netif_msg_ifdown(de))
1440		printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1441
1442	del_timer_sync(&de->media_timer);
1443
1444	spin_lock_irqsave(&de->lock, flags);
1445	de_stop_hw(de);
1446	netif_stop_queue(dev);
1447	netif_carrier_off(dev);
1448	spin_unlock_irqrestore(&de->lock, flags);
1449
1450	free_irq(dev->irq, dev);
1451
1452	de_free_rings(de);
1453	de_adapter_sleep(de);
1454	return 0;
1455}
1456
1457static void de_tx_timeout (struct net_device *dev)
1458{
1459	struct de_private *de = netdev_priv(dev);
1460
1461	printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1462	       dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1463	       de->rx_tail, de->tx_head, de->tx_tail);
1464
1465	del_timer_sync(&de->media_timer);
1466
1467	disable_irq(dev->irq);
1468	spin_lock_irq(&de->lock);
1469
1470	de_stop_hw(de);
1471	netif_stop_queue(dev);
1472	netif_carrier_off(dev);
1473
1474	spin_unlock_irq(&de->lock);
1475	enable_irq(dev->irq);
1476
1477	/* Update the error counts. */
1478	__de_get_stats(de);
1479
1480	synchronize_irq(dev->irq);
1481	de_clean_rings(de);
1482
1483	de_init_rings(de);
1484
1485	de_init_hw(de);
1486
1487	netif_wake_queue(dev);
1488}
1489
1490static void __de_get_regs(struct de_private *de, u8 *buf)
1491{
1492	int i;
1493	u32 *rbuf = (u32 *)buf;
1494
1495	/* read all CSRs */
1496	for (i = 0; i < DE_NUM_REGS; i++)
1497		rbuf[i] = dr32(i * 8);
1498
1499	/* handle self-clearing RxMissed counter, CSR8 */
1500	de_rx_missed(de, rbuf[8]);
1501}
1502
1503static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1504{
1505	ecmd->supported = de->media_supported;
1506	ecmd->transceiver = XCVR_INTERNAL;
1507	ecmd->phy_address = 0;
1508	ecmd->advertising = de->media_advertise;
1509
1510	switch (de->media_type) {
1511	case DE_MEDIA_AUI:
1512		ecmd->port = PORT_AUI;
1513		ecmd->speed = 5;
1514		break;
1515	case DE_MEDIA_BNC:
1516		ecmd->port = PORT_BNC;
1517		ecmd->speed = 2;
1518		break;
1519	default:
1520		ecmd->port = PORT_TP;
1521		ecmd->speed = SPEED_10;
1522		break;
1523	}
1524
1525	if (dr32(MacMode) & FullDuplex)
1526		ecmd->duplex = DUPLEX_FULL;
1527	else
1528		ecmd->duplex = DUPLEX_HALF;
1529
1530	if (de->media_lock)
1531		ecmd->autoneg = AUTONEG_DISABLE;
1532	else
1533		ecmd->autoneg = AUTONEG_ENABLE;
1534
1535	/* ignore maxtxpkt, maxrxpkt for now */
1536
1537	return 0;
1538}
1539
1540static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1541{
1542	u32 new_media;
1543	unsigned int media_lock;
1544
1545	if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1546		return -EINVAL;
1547	if (de->de21040 && ecmd->speed == 2)
1548		return -EINVAL;
1549	if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1550		return -EINVAL;
1551	if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1552		return -EINVAL;
1553	if (de->de21040 && ecmd->port == PORT_BNC)
1554		return -EINVAL;
1555	if (ecmd->transceiver != XCVR_INTERNAL)
1556		return -EINVAL;
1557	if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1558		return -EINVAL;
1559	if (ecmd->advertising & ~de->media_supported)
1560		return -EINVAL;
1561	if (ecmd->autoneg == AUTONEG_ENABLE &&
1562	    (!(ecmd->advertising & ADVERTISED_Autoneg)))
1563		return -EINVAL;
1564
1565	switch (ecmd->port) {
1566	case PORT_AUI:
1567		new_media = DE_MEDIA_AUI;
1568		if (!(ecmd->advertising & ADVERTISED_AUI))
1569			return -EINVAL;
1570		break;
1571	case PORT_BNC:
1572		new_media = DE_MEDIA_BNC;
1573		if (!(ecmd->advertising & ADVERTISED_BNC))
1574			return -EINVAL;
1575		break;
1576	default:
1577		if (ecmd->autoneg == AUTONEG_ENABLE)
1578			new_media = DE_MEDIA_TP_AUTO;
1579		else if (ecmd->duplex == DUPLEX_FULL)
1580			new_media = DE_MEDIA_TP_FD;
1581		else
1582			new_media = DE_MEDIA_TP;
1583		if (!(ecmd->advertising & ADVERTISED_TP))
1584			return -EINVAL;
1585		if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1586			return -EINVAL;
1587		break;
1588	}
1589
1590	media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1591
1592	if ((new_media == de->media_type) &&
1593	    (media_lock == de->media_lock) &&
1594	    (ecmd->advertising == de->media_advertise))
1595		return 0; /* nothing to change */
1596
1597	de_link_down(de);
1598	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1599	de_stop_rxtx(de);
1600
1601	de->media_type = new_media;
1602	de->media_lock = media_lock;
1603	de->media_advertise = ecmd->advertising;
1604	de_set_media(de);
1605	if (netif_running(de->dev))
1606		de_start_rxtx(de);
1607
1608	return 0;
1609}
1610
1611static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1612{
1613	struct de_private *de = netdev_priv(dev);
1614
1615	strcpy (info->driver, DRV_NAME);
1616	strcpy (info->version, DRV_VERSION);
1617	strcpy (info->bus_info, pci_name(de->pdev));
1618	info->eedump_len = DE_EEPROM_SIZE;
1619}
1620
1621static int de_get_regs_len(struct net_device *dev)
1622{
1623	return DE_REGS_SIZE;
1624}
1625
1626static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1627{
1628	struct de_private *de = netdev_priv(dev);
1629	int rc;
1630
1631	spin_lock_irq(&de->lock);
1632	rc = __de_get_settings(de, ecmd);
1633	spin_unlock_irq(&de->lock);
1634
1635	return rc;
1636}
1637
1638static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1639{
1640	struct de_private *de = netdev_priv(dev);
1641	int rc;
1642
1643	spin_lock_irq(&de->lock);
1644	rc = __de_set_settings(de, ecmd);
1645	spin_unlock_irq(&de->lock);
1646
1647	return rc;
1648}
1649
1650static u32 de_get_msglevel(struct net_device *dev)
1651{
1652	struct de_private *de = netdev_priv(dev);
1653
1654	return de->msg_enable;
1655}
1656
1657static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1658{
1659	struct de_private *de = netdev_priv(dev);
1660
1661	de->msg_enable = msglvl;
1662}
1663
1664static int de_get_eeprom(struct net_device *dev,
1665			 struct ethtool_eeprom *eeprom, u8 *data)
1666{
1667	struct de_private *de = netdev_priv(dev);
1668
1669	if (!de->ee_data)
1670		return -EOPNOTSUPP;
1671	if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1672	    (eeprom->len != DE_EEPROM_SIZE))
1673		return -EINVAL;
1674	memcpy(data, de->ee_data, eeprom->len);
1675
1676	return 0;
1677}
1678
1679static int de_nway_reset(struct net_device *dev)
1680{
1681	struct de_private *de = netdev_priv(dev);
1682	u32 status;
1683
1684	if (de->media_type != DE_MEDIA_TP_AUTO)
1685		return -EINVAL;
1686	if (netif_carrier_ok(de->dev))
1687		de_link_down(de);
1688
1689	status = dr32(SIAStatus);
1690	dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1691	if (netif_msg_link(de))
1692		dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1693			 status, dr32(SIAStatus));
1694	return 0;
1695}
1696
1697static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1698			void *data)
1699{
1700	struct de_private *de = netdev_priv(dev);
1701
1702	regs->version = (DE_REGS_VER << 2) | de->de21040;
1703
1704	spin_lock_irq(&de->lock);
1705	__de_get_regs(de, data);
1706	spin_unlock_irq(&de->lock);
1707}
1708
1709static const struct ethtool_ops de_ethtool_ops = {
1710	.get_link		= ethtool_op_get_link,
1711	.get_drvinfo		= de_get_drvinfo,
1712	.get_regs_len		= de_get_regs_len,
1713	.get_settings		= de_get_settings,
1714	.set_settings		= de_set_settings,
1715	.get_msglevel		= de_get_msglevel,
1716	.set_msglevel		= de_set_msglevel,
1717	.get_eeprom		= de_get_eeprom,
1718	.nway_reset		= de_nway_reset,
1719	.get_regs		= de_get_regs,
1720};
1721
1722static void __devinit de21040_get_mac_address (struct de_private *de)
1723{
1724	unsigned i;
1725
1726	dw32 (ROMCmd, 0);	/* Reset the pointer with a dummy write. */
1727	udelay(5);
1728
1729	for (i = 0; i < 6; i++) {
1730		int value, boguscnt = 100000;
1731		do {
1732			value = dr32(ROMCmd);
1733			rmb();
1734		} while (value < 0 && --boguscnt > 0);
1735		de->dev->dev_addr[i] = value;
1736		udelay(1);
1737		if (boguscnt <= 0)
1738			pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1739	}
1740}
1741
1742static void __devinit de21040_get_media_info(struct de_private *de)
1743{
1744	unsigned int i;
1745
1746	de->media_type = DE_MEDIA_TP;
1747	de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1748			       SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1749	de->media_advertise = de->media_supported;
1750
1751	for (i = 0; i < DE_MAX_MEDIA; i++) {
1752		switch (i) {
1753		case DE_MEDIA_AUI:
1754		case DE_MEDIA_TP:
1755		case DE_MEDIA_TP_FD:
1756			de->media[i].type = i;
1757			de->media[i].csr13 = t21040_csr13[i];
1758			de->media[i].csr14 = t21040_csr14[i];
1759			de->media[i].csr15 = t21040_csr15[i];
1760			break;
1761		default:
1762			de->media[i].type = DE_MEDIA_INVALID;
1763			break;
1764		}
1765	}
1766}
1767
1768/* Note: this routine returns extra data bits for size detection. */
1769static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1770{
1771	int i;
1772	unsigned retval = 0;
1773	void __iomem *ee_addr = regs + ROMCmd;
1774	int read_cmd = location | (EE_READ_CMD << addr_len);
1775
1776	writel(EE_ENB & ~EE_CS, ee_addr);
1777	writel(EE_ENB, ee_addr);
1778
1779	/* Shift the read command bits out. */
1780	for (i = 4 + addr_len; i >= 0; i--) {
1781		short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1782		writel(EE_ENB | dataval, ee_addr);
1783		readl(ee_addr);
1784		writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1785		readl(ee_addr);
1786		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1787	}
1788	writel(EE_ENB, ee_addr);
1789	readl(ee_addr);
1790
1791	for (i = 16; i > 0; i--) {
1792		writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1793		readl(ee_addr);
1794		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1795		writel(EE_ENB, ee_addr);
1796		readl(ee_addr);
1797	}
1798
1799	/* Terminate the EEPROM access. */
1800	writel(EE_ENB & ~EE_CS, ee_addr);
1801	return retval;
1802}
1803
1804static void __devinit de21041_get_srom_info (struct de_private *de)
1805{
1806	unsigned i, sa_offset = 0, ofs;
1807	u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1808	unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1809	struct de_srom_info_leaf *il;
1810	void *bufp;
1811
1812	/* download entire eeprom */
1813	for (i = 0; i < DE_EEPROM_WORDS; i++)
1814		((__le16 *)ee_data)[i] =
1815			cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1816
1817	/* DEC now has a specification but early board makers
1818	   just put the address in the first EEPROM locations. */
1819	/* This does  memcmp(eedata, eedata+16, 8) */
1820
1821#ifndef CONFIG_MIPS_COBALT
1822
1823	for (i = 0; i < 8; i ++)
1824		if (ee_data[i] != ee_data[16+i])
1825			sa_offset = 20;
1826
1827#endif
1828
1829	/* store MAC address */
1830	for (i = 0; i < 6; i ++)
1831		de->dev->dev_addr[i] = ee_data[i + sa_offset];
1832
1833	/* get offset of controller 0 info leaf.  ignore 2nd byte. */
1834	ofs = ee_data[SROMC0InfoLeaf];
1835	if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1836		goto bad_srom;
1837
1838	/* get pointer to info leaf */
1839	il = (struct de_srom_info_leaf *) &ee_data[ofs];
1840
1841	/* paranoia checks */
1842	if (il->n_blocks == 0)
1843		goto bad_srom;
1844	if ((sizeof(ee_data) - ofs) <
1845	    (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1846		goto bad_srom;
1847
1848	/* get default media type */
1849	switch (get_unaligned(&il->default_media)) {
1850	case 0x0001:  de->media_type = DE_MEDIA_BNC; break;
1851	case 0x0002:  de->media_type = DE_MEDIA_AUI; break;
1852	case 0x0204:  de->media_type = DE_MEDIA_TP_FD; break;
1853	default: de->media_type = DE_MEDIA_TP_AUTO; break;
1854	}
1855
1856	if (netif_msg_probe(de))
1857		pr_info("de%d: SROM leaf offset %u, default media %s\n",
1858		       de->board_idx, ofs, media_name[de->media_type]);
1859
1860	/* init SIA register values to defaults */
1861	for (i = 0; i < DE_MAX_MEDIA; i++) {
1862		de->media[i].type = DE_MEDIA_INVALID;
1863		de->media[i].csr13 = 0xffff;
1864		de->media[i].csr14 = 0xffff;
1865		de->media[i].csr15 = 0xffff;
1866	}
1867
1868	/* parse media blocks to see what medias are supported,
1869	 * and if any custom CSR values are provided
1870	 */
1871	bufp = ((void *)il) + sizeof(*il);
1872	for (i = 0; i < il->n_blocks; i++) {
1873		struct de_srom_media_block *ib = bufp;
1874		unsigned idx;
1875
1876		/* index based on media type in media block */
1877		switch(ib->opts & MediaBlockMask) {
1878		case 0: /* 10baseT */
1879			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1880					  | SUPPORTED_Autoneg;
1881			idx = DE_MEDIA_TP;
1882			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1883			break;
1884		case 1: /* BNC */
1885			de->media_supported |= SUPPORTED_BNC;
1886			idx = DE_MEDIA_BNC;
1887			break;
1888		case 2: /* AUI */
1889			de->media_supported |= SUPPORTED_AUI;
1890			idx = DE_MEDIA_AUI;
1891			break;
1892		case 4: /* 10baseT-FD */
1893			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1894					  | SUPPORTED_Autoneg;
1895			idx = DE_MEDIA_TP_FD;
1896			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1897			break;
1898		default:
1899			goto bad_srom;
1900		}
1901
1902		de->media[idx].type = idx;
1903
1904		if (netif_msg_probe(de))
1905			pr_info("de%d:   media block #%u: %s",
1906				de->board_idx, i,
1907				media_name[de->media[idx].type]);
1908
1909		bufp += sizeof (ib->opts);
1910
1911		if (ib->opts & MediaCustomCSRs) {
1912			de->media[idx].csr13 = get_unaligned(&ib->csr13);
1913			de->media[idx].csr14 = get_unaligned(&ib->csr14);
1914			de->media[idx].csr15 = get_unaligned(&ib->csr15);
1915			bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1916				sizeof(ib->csr15);
1917
1918			if (netif_msg_probe(de))
1919				pr_cont(" (%x,%x,%x)\n",
1920					de->media[idx].csr13,
1921					de->media[idx].csr14,
1922					de->media[idx].csr15);
1923
1924		} else if (netif_msg_probe(de))
1925			pr_cont("\n");
1926
1927		if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1928			break;
1929	}
1930
1931	de->media_advertise = de->media_supported;
1932
1933fill_defaults:
1934	/* fill in defaults, for cases where custom CSRs not used */
1935	for (i = 0; i < DE_MAX_MEDIA; i++) {
1936		if (de->media[i].csr13 == 0xffff)
1937			de->media[i].csr13 = t21041_csr13[i];
1938		if (de->media[i].csr14 == 0xffff) {
1939			/* autonegotiation is broken at least on some chip
1940			   revisions - rev. 0x21 works, 0x11 does not */
1941			if (de->pdev->revision < 0x20)
1942				de->media[i].csr14 = t21041_csr14_brk[i];
1943			else
1944				de->media[i].csr14 = t21041_csr14[i];
1945		}
1946		if (de->media[i].csr15 == 0xffff)
1947			de->media[i].csr15 = t21041_csr15[i];
1948	}
1949
1950	de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1951
1952	return;
1953
1954bad_srom:
1955	/* for error cases, it's ok to assume we support all these */
1956	for (i = 0; i < DE_MAX_MEDIA; i++)
1957		de->media[i].type = i;
1958	de->media_supported =
1959		SUPPORTED_10baseT_Half |
1960		SUPPORTED_10baseT_Full |
1961		SUPPORTED_Autoneg |
1962		SUPPORTED_TP |
1963		SUPPORTED_AUI |
1964		SUPPORTED_BNC;
1965	goto fill_defaults;
1966}
1967
1968static const struct net_device_ops de_netdev_ops = {
1969	.ndo_open		= de_open,
1970	.ndo_stop		= de_close,
1971	.ndo_set_multicast_list = de_set_rx_mode,
1972	.ndo_start_xmit		= de_start_xmit,
1973	.ndo_get_stats		= de_get_stats,
1974	.ndo_tx_timeout 	= de_tx_timeout,
1975	.ndo_change_mtu		= eth_change_mtu,
1976	.ndo_set_mac_address 	= eth_mac_addr,
1977	.ndo_validate_addr	= eth_validate_addr,
1978};
1979
1980static int __devinit de_init_one (struct pci_dev *pdev,
1981				  const struct pci_device_id *ent)
1982{
1983	struct net_device *dev;
1984	struct de_private *de;
1985	int rc;
1986	void __iomem *regs;
1987	unsigned long pciaddr;
1988	static int board_idx = -1;
1989
1990	board_idx++;
1991
1992#ifndef MODULE
1993	if (board_idx == 0)
1994		printk("%s", version);
1995#endif
1996
1997	/* allocate a new ethernet device structure, and fill in defaults */
1998	dev = alloc_etherdev(sizeof(struct de_private));
1999	if (!dev)
2000		return -ENOMEM;
2001
2002	dev->netdev_ops = &de_netdev_ops;
2003	SET_NETDEV_DEV(dev, &pdev->dev);
2004	dev->ethtool_ops = &de_ethtool_ops;
2005	dev->watchdog_timeo = TX_TIMEOUT;
2006
2007	de = netdev_priv(dev);
2008	de->de21040 = ent->driver_data == 0 ? 1 : 0;
2009	de->pdev = pdev;
2010	de->dev = dev;
2011	de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2012	de->board_idx = board_idx;
2013	spin_lock_init (&de->lock);
2014	init_timer(&de->media_timer);
2015	if (de->de21040)
2016		de->media_timer.function = de21040_media_timer;
2017	else
2018		de->media_timer.function = de21041_media_timer;
2019	de->media_timer.data = (unsigned long) de;
2020
2021	netif_carrier_off(dev);
2022	netif_stop_queue(dev);
2023
2024	/* wake up device, assign resources */
2025	rc = pci_enable_device(pdev);
2026	if (rc)
2027		goto err_out_free;
2028
2029	/* reserve PCI resources to ensure driver atomicity */
2030	rc = pci_request_regions(pdev, DRV_NAME);
2031	if (rc)
2032		goto err_out_disable;
2033
2034	/* check for invalid IRQ value */
2035	if (pdev->irq < 2) {
2036		rc = -EIO;
2037		pr_err(PFX "invalid irq (%d) for pci dev %s\n",
2038		       pdev->irq, pci_name(pdev));
2039		goto err_out_res;
2040	}
2041
2042	dev->irq = pdev->irq;
2043
2044	/* obtain and check validity of PCI I/O address */
2045	pciaddr = pci_resource_start(pdev, 1);
2046	if (!pciaddr) {
2047		rc = -EIO;
2048		pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
2049		goto err_out_res;
2050	}
2051	if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2052		rc = -EIO;
2053		pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2054		       (unsigned long long)pci_resource_len(pdev, 1),
2055		       pci_name(pdev));
2056		goto err_out_res;
2057	}
2058
2059	/* remap CSR registers */
2060	regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2061	if (!regs) {
2062		rc = -EIO;
2063		pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2064		       (unsigned long long)pci_resource_len(pdev, 1),
2065		       pciaddr, pci_name(pdev));
2066		goto err_out_res;
2067	}
2068	dev->base_addr = (unsigned long) regs;
2069	de->regs = regs;
2070
2071	de_adapter_wake(de);
2072
2073	/* make sure hardware is not running */
2074	rc = de_reset_mac(de);
2075	if (rc) {
2076		pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2077		goto err_out_iomap;
2078	}
2079
2080	/* get MAC address, initialize default media type and
2081	 * get list of supported media
2082	 */
2083	if (de->de21040) {
2084		de21040_get_mac_address(de);
2085		de21040_get_media_info(de);
2086	} else {
2087		de21041_get_srom_info(de);
2088	}
2089
2090	/* register new network interface with kernel */
2091	rc = register_netdev(dev);
2092	if (rc)
2093		goto err_out_iomap;
2094
2095	/* print info about board and interface just registered */
2096	dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2097		 de->de21040 ? "21040" : "21041",
2098		 dev->base_addr,
2099		 dev->dev_addr,
2100		 dev->irq);
2101
2102	pci_set_drvdata(pdev, dev);
2103
2104	/* enable busmastering */
2105	pci_set_master(pdev);
2106
2107	/* put adapter to sleep */
2108	de_adapter_sleep(de);
2109
2110	return 0;
2111
2112err_out_iomap:
2113	kfree(de->ee_data);
2114	iounmap(regs);
2115err_out_res:
2116	pci_release_regions(pdev);
2117err_out_disable:
2118	pci_disable_device(pdev);
2119err_out_free:
2120	free_netdev(dev);
2121	return rc;
2122}
2123
2124static void __devexit de_remove_one (struct pci_dev *pdev)
2125{
2126	struct net_device *dev = pci_get_drvdata(pdev);
2127	struct de_private *de = netdev_priv(dev);
2128
2129	BUG_ON(!dev);
2130	unregister_netdev(dev);
2131	kfree(de->ee_data);
2132	iounmap(de->regs);
2133	pci_release_regions(pdev);
2134	pci_disable_device(pdev);
2135	pci_set_drvdata(pdev, NULL);
2136	free_netdev(dev);
2137}
2138
2139#ifdef CONFIG_PM
2140
2141static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2142{
2143	struct net_device *dev = pci_get_drvdata (pdev);
2144	struct de_private *de = netdev_priv(dev);
2145
2146	rtnl_lock();
2147	if (netif_running (dev)) {
2148		del_timer_sync(&de->media_timer);
2149
2150		disable_irq(dev->irq);
2151		spin_lock_irq(&de->lock);
2152
2153		de_stop_hw(de);
2154		netif_stop_queue(dev);
2155		netif_device_detach(dev);
2156		netif_carrier_off(dev);
2157
2158		spin_unlock_irq(&de->lock);
2159		enable_irq(dev->irq);
2160
2161		/* Update the error counts. */
2162		__de_get_stats(de);
2163
2164		synchronize_irq(dev->irq);
2165		de_clean_rings(de);
2166
2167		de_adapter_sleep(de);
2168		pci_disable_device(pdev);
2169	} else {
2170		netif_device_detach(dev);
2171	}
2172	rtnl_unlock();
2173	return 0;
2174}
2175
2176static int de_resume (struct pci_dev *pdev)
2177{
2178	struct net_device *dev = pci_get_drvdata (pdev);
2179	struct de_private *de = netdev_priv(dev);
2180	int retval = 0;
2181
2182	rtnl_lock();
2183	if (netif_device_present(dev))
2184		goto out;
2185	if (!netif_running(dev))
2186		goto out_attach;
2187	if ((retval = pci_enable_device(pdev))) {
2188		dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2189		goto out;
2190	}
2191	pci_set_master(pdev);
2192	de_init_rings(de);
2193	de_init_hw(de);
2194out_attach:
2195	netif_device_attach(dev);
2196out:
2197	rtnl_unlock();
2198	return 0;
2199}
2200
2201#endif /* CONFIG_PM */
2202
2203static struct pci_driver de_driver = {
2204	.name		= DRV_NAME,
2205	.id_table	= de_pci_tbl,
2206	.probe		= de_init_one,
2207	.remove		= __devexit_p(de_remove_one),
2208#ifdef CONFIG_PM
2209	.suspend	= de_suspend,
2210	.resume		= de_resume,
2211#endif
2212};
2213
2214static int __init de_init (void)
2215{
2216#ifdef MODULE
2217	printk("%s", version);
2218#endif
2219	return pci_register_driver(&de_driver);
2220}
2221
2222static void __exit de_exit (void)
2223{
2224	pci_unregister_driver (&de_driver);
2225}
2226
2227module_init(de_init);
2228module_exit(de_exit);
2229