1/* winbond-840.c: A Linux PCI network adapter device driver. */
2/*
3	Written 1998-2001 by Donald Becker.
4
5	This software may be used and distributed according to the terms of
6	the GNU General Public License (GPL), incorporated herein by reference.
7	Drivers based on or derived from this code fall under the GPL and must
8	retain the authorship, copyright and license notice.  This file is not
9	a complete program and may only be used when the entire operating
10	system is licensed under the GPL.
11
12	The author may be reached as becker@scyld.com, or C/O
13	Scyld Computing Corporation
14	410 Severn Ave., Suite 210
15	Annapolis MD 21403
16
17	Support and updates available at
18	http://www.scyld.com/network/drivers.html
19
20	Do not remove the copyright information.
21	Do not change the version information unless an improvement has been made.
22	Merely removing my name, as Compex has done in the past, does not count
23	as an improvement.
24
25	Changelog:
26	* ported to 2.4
27		???
28	* spin lock update, memory barriers, new style dma mappings
29		limit each tx buffer to < 1024 bytes
30		remove DescIntr from Rx descriptors (that's an Tx flag)
31		remove next pointer from Tx descriptors
32		synchronize tx_q_bytes
33		software reset in tx_timeout
34			Copyright (C) 2000 Manfred Spraul
35	* further cleanups
36		power management.
37		support for big endian descriptors
38			Copyright (C) 2001 Manfred Spraul
39  	* ethtool support (jgarzik)
40	* Replace some MII-related magic numbers with constants (jgarzik)
41
42	TODO:
43	* enable pci_power_off
44	* Wake-On-LAN
45*/
46
47#define DRV_NAME	"winbond-840"
48#define DRV_VERSION	"1.01-e"
49#define DRV_RELDATE	"Sep-11-2006"
50
51
52/* Automatically extracted configuration info:
53probe-func: winbond840_probe
54config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
55
56c-help-name: Winbond W89c840 PCI Ethernet support
57c-help-symbol: CONFIG_WINBOND_840
58c-help: This driver is for the Winbond W89c840 chip.  It also works with
59c-help: the TX9882 chip on the Compex RL100-ATX board.
60c-help: More specific information and updates are available from
61c-help: http://www.scyld.com/network/drivers.html
62*/
63
64/* The user-configurable values.
65   These may be modified when a driver module is loaded.*/
66
67static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
68static int max_interrupt_work = 20;
69/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70   The '840 uses a 64 element hash table based on the Ethernet CRC.  */
71static int multicast_filter_limit = 32;
72
73/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74   Setting to > 1518 effectively disables this feature. */
75static int rx_copybreak;
76
77/* Used to pass the media type, etc.
78   Both 'options[]' and 'full_duplex[]' should exist for driver
79   interoperability.
80   The media type is usually passed in 'options[]'.
81*/
82#define MAX_UNITS 8		/* More are supported, limit only on options */
83static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
85
86/* Operational parameters that are set at compile time. */
87
88/* Keep the ring sizes a power of two for compile efficiency.
89   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90   Making the Tx ring too large decreases the effectiveness of channel
91   bonding and packet priority.
92   There are no ill effects from too-large receive rings. */
93#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
94#define TX_QUEUE_LEN_RESTART	5
95
96#define TX_BUFLIMIT	(1024-128)
97
98/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
99   To avoid overflowing we don't queue again until we have room for a
100   full-size packet.
101 */
102#define TX_FIFO_SIZE (2048)
103#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
104
105
106/* Operational parameters that usually are not changed. */
107/* Time in jiffies before concluding the transmitter is hung. */
108#define TX_TIMEOUT  (2*HZ)
109
110#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
111
112/* Include files, designed to support most kernel versions 2.0.0 and later. */
113#include <linux/module.h>
114#include <linux/kernel.h>
115#include <linux/string.h>
116#include <linux/timer.h>
117#include <linux/errno.h>
118#include <linux/ioport.h>
119#include <linux/slab.h>
120#include <linux/interrupt.h>
121#include <linux/pci.h>
122#include <linux/dma-mapping.h>
123#include <linux/netdevice.h>
124#include <linux/etherdevice.h>
125#include <linux/skbuff.h>
126#include <linux/init.h>
127#include <linux/delay.h>
128#include <linux/ethtool.h>
129#include <linux/mii.h>
130#include <linux/rtnetlink.h>
131#include <linux/crc32.h>
132#include <linux/bitops.h>
133#include <asm/uaccess.h>
134#include <asm/processor.h>		/* Processor type for cache alignment. */
135#include <asm/io.h>
136#include <asm/irq.h>
137
138#include "tulip.h"
139
140/* These identify the driver base version and may not be removed. */
141static char version[] =
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE "  Donald Becker <becker@scyld.com>\n"
143KERN_INFO "  http://www.scyld.com/network/drivers.html\n";
144
145MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
146MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
147MODULE_LICENSE("GPL");
148MODULE_VERSION(DRV_VERSION);
149
150module_param(max_interrupt_work, int, 0);
151module_param(debug, int, 0);
152module_param(rx_copybreak, int, 0);
153module_param(multicast_filter_limit, int, 0);
154module_param_array(options, int, NULL, 0);
155module_param_array(full_duplex, int, NULL, 0);
156MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
157MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
158MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
159MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
160MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
161MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
162
163/*
164				Theory of Operation
165
166I. Board Compatibility
167
168This driver is for the Winbond w89c840 chip.
169
170II. Board-specific settings
171
172None.
173
174III. Driver operation
175
176This chip is very similar to the Digital 21*4* "Tulip" family.  The first
177twelve registers and the descriptor format are nearly identical.  Read a
178Tulip manual for operational details.
179
180A significant difference is that the multicast filter and station address are
181stored in registers rather than loaded through a pseudo-transmit packet.
182
183Unlike the Tulip, transmit buffers are limited to 1KB.  To transmit a
184full-sized packet we must use both data buffers in a descriptor.  Thus the
185driver uses ring mode where descriptors are implicitly sequential in memory,
186rather than using the second descriptor address as a chain pointer to
187subsequent descriptors.
188
189IV. Notes
190
191If you are going to almost clone a Tulip, why not go all the way and avoid
192the need for a new driver?
193
194IVb. References
195
196http://www.scyld.com/expert/100mbps.html
197http://www.scyld.com/expert/NWay.html
198http://www.winbond.com.tw/
199
200IVc. Errata
201
202A horrible bug exists in the transmit FIFO.  Apparently the chip doesn't
203correctly detect a full FIFO, and queuing more than 2048 bytes may result in
204silent data corruption.
205
206Test with 'ping -s 10000' on a fast computer.
207
208*/
209
210
211
212/*
213  PCI probe table.
214*/
215enum chip_capability_flags {
216	CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
217};
218
219static const struct pci_device_id w840_pci_tbl[] = {
220	{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153,     0, 0, 0 },
221	{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
222	{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
223	{ }
224};
225MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
226
227enum {
228	netdev_res_size		= 128,	/* size of PCI BAR resource */
229};
230
231struct pci_id_info {
232        const char *name;
233        int drv_flags;		/* Driver use, intended as capability flags. */
234};
235
236static const struct pci_id_info pci_id_tbl[] __devinitdata = {
237	{ 				/* Sometime a Level-One switch card. */
238	  "Winbond W89c840",	CanHaveMII | HasBrokenTx | FDXOnNoMII},
239	{ "Winbond W89c840",	CanHaveMII | HasBrokenTx},
240	{ "Compex RL100-ATX",	CanHaveMII | HasBrokenTx},
241	{ }	/* terminate list. */
242};
243
244/* This driver was written to use PCI memory space, however some x86 systems
245   work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
246*/
247
248/* Offsets to the Command and Status Registers, "CSRs".
249   While similar to the Tulip, these registers are longword aligned.
250   Note: It's not useful to define symbolic names for every register bit in
251   the device.  The name can only partially document the semantics and make
252   the driver longer and more difficult to read.
253*/
254enum w840_offsets {
255	PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
256	RxRingPtr=0x0C, TxRingPtr=0x10,
257	IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
258	RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
259	CurRxDescAddr=0x30, CurRxBufAddr=0x34,			/* Debug use */
260	MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
261	CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
262};
263
264/* Bits in the NetworkConfig register. */
265enum rx_mode_bits {
266	AcceptErr=0x80,
267	RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
268	RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
269};
270
271enum mii_reg_bits {
272	MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
273	MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
274};
275
276/* The Tulip Rx and Tx buffer descriptors. */
277struct w840_rx_desc {
278	s32 status;
279	s32 length;
280	u32 buffer1;
281	u32 buffer2;
282};
283
284struct w840_tx_desc {
285	s32 status;
286	s32 length;
287	u32 buffer1, buffer2;
288};
289
290#define MII_CNT		1 /* winbond only supports one MII */
291struct netdev_private {
292	struct w840_rx_desc *rx_ring;
293	dma_addr_t	rx_addr[RX_RING_SIZE];
294	struct w840_tx_desc *tx_ring;
295	dma_addr_t	tx_addr[TX_RING_SIZE];
296	dma_addr_t ring_dma_addr;
297	/* The addresses of receive-in-place skbuffs. */
298	struct sk_buff* rx_skbuff[RX_RING_SIZE];
299	/* The saved address of a sent-in-place packet/buffer, for later free(). */
300	struct sk_buff* tx_skbuff[TX_RING_SIZE];
301	struct net_device_stats stats;
302	struct timer_list timer;	/* Media monitoring timer. */
303	/* Frequently used values: keep some adjacent for cache effect. */
304	spinlock_t lock;
305	int chip_id, drv_flags;
306	struct pci_dev *pci_dev;
307	int csr6;
308	struct w840_rx_desc *rx_head_desc;
309	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
310	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
311	unsigned int cur_tx, dirty_tx;
312	unsigned int tx_q_bytes;
313	unsigned int tx_full;				/* The Tx queue is full. */
314	/* MII transceiver section. */
315	int mii_cnt;						/* MII device addresses. */
316	unsigned char phys[MII_CNT];		/* MII device addresses, but only the first is used */
317	u32 mii;
318	struct mii_if_info mii_if;
319	void __iomem *base_addr;
320};
321
322static int  eeprom_read(void __iomem *ioaddr, int location);
323static int  mdio_read(struct net_device *dev, int phy_id, int location);
324static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
325static int  netdev_open(struct net_device *dev);
326static int  update_link(struct net_device *dev);
327static void netdev_timer(unsigned long data);
328static void init_rxtx_rings(struct net_device *dev);
329static void free_rxtx_rings(struct netdev_private *np);
330static void init_registers(struct net_device *dev);
331static void tx_timeout(struct net_device *dev);
332static int alloc_ringdesc(struct net_device *dev);
333static void free_ringdesc(struct netdev_private *np);
334static int  start_tx(struct sk_buff *skb, struct net_device *dev);
335static irqreturn_t intr_handler(int irq, void *dev_instance);
336static void netdev_error(struct net_device *dev, int intr_status);
337static int  netdev_rx(struct net_device *dev);
338static u32 __set_rx_mode(struct net_device *dev);
339static void set_rx_mode(struct net_device *dev);
340static struct net_device_stats *get_stats(struct net_device *dev);
341static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
342static const struct ethtool_ops netdev_ethtool_ops;
343static int  netdev_close(struct net_device *dev);
344
345
346
347static int __devinit w840_probe1 (struct pci_dev *pdev,
348				  const struct pci_device_id *ent)
349{
350	struct net_device *dev;
351	struct netdev_private *np;
352	static int find_cnt;
353	int chip_idx = ent->driver_data;
354	int irq;
355	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
356	void __iomem *ioaddr;
357
358	i = pci_enable_device(pdev);
359	if (i) return i;
360
361	pci_set_master(pdev);
362
363	irq = pdev->irq;
364
365	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
366		printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
367		       pci_name(pdev));
368		return -EIO;
369	}
370	dev = alloc_etherdev(sizeof(*np));
371	if (!dev)
372		return -ENOMEM;
373	SET_MODULE_OWNER(dev);
374	SET_NETDEV_DEV(dev, &pdev->dev);
375
376	if (pci_request_regions(pdev, DRV_NAME))
377		goto err_out_netdev;
378
379	ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
380	if (!ioaddr)
381		goto err_out_free_res;
382
383	for (i = 0; i < 3; i++)
384		((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
385
386	/* Reset the chip to erase previous misconfiguration.
387	   No hold time required! */
388	iowrite32(0x00000001, ioaddr + PCIBusCfg);
389
390	dev->base_addr = (unsigned long)ioaddr;
391	dev->irq = irq;
392
393	np = netdev_priv(dev);
394	np->pci_dev = pdev;
395	np->chip_id = chip_idx;
396	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
397	spin_lock_init(&np->lock);
398	np->mii_if.dev = dev;
399	np->mii_if.mdio_read = mdio_read;
400	np->mii_if.mdio_write = mdio_write;
401	np->base_addr = ioaddr;
402
403	pci_set_drvdata(pdev, dev);
404
405	if (dev->mem_start)
406		option = dev->mem_start;
407
408	/* The lower four bits are the media type. */
409	if (option > 0) {
410		if (option & 0x200)
411			np->mii_if.full_duplex = 1;
412		if (option & 15)
413			printk(KERN_INFO "%s: ignoring user supplied media type %d",
414				dev->name, option & 15);
415	}
416	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
417		np->mii_if.full_duplex = 1;
418
419	if (np->mii_if.full_duplex)
420		np->mii_if.force_media = 1;
421
422	/* The chip-specific entries in the device structure. */
423	dev->open = &netdev_open;
424	dev->hard_start_xmit = &start_tx;
425	dev->stop = &netdev_close;
426	dev->get_stats = &get_stats;
427	dev->set_multicast_list = &set_rx_mode;
428	dev->do_ioctl = &netdev_ioctl;
429	dev->ethtool_ops = &netdev_ethtool_ops;
430	dev->tx_timeout = &tx_timeout;
431	dev->watchdog_timeo = TX_TIMEOUT;
432
433	i = register_netdev(dev);
434	if (i)
435		goto err_out_cleardev;
436
437	printk(KERN_INFO "%s: %s at %p, ",
438		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
439	for (i = 0; i < 5; i++)
440			printk("%2.2x:", dev->dev_addr[i]);
441	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
442
443	if (np->drv_flags & CanHaveMII) {
444		int phy, phy_idx = 0;
445		for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
446			int mii_status = mdio_read(dev, phy, MII_BMSR);
447			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
448				np->phys[phy_idx++] = phy;
449				np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
450				np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
451						mdio_read(dev, phy, MII_PHYSID2);
452				printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
453					   "0x%4.4x advertising %4.4x.\n",
454					   dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
455			}
456		}
457		np->mii_cnt = phy_idx;
458		np->mii_if.phy_id = np->phys[0];
459		if (phy_idx == 0) {
460				printk(KERN_WARNING "%s: MII PHY not found -- this device may "
461					   "not operate correctly.\n", dev->name);
462		}
463	}
464
465	find_cnt++;
466	return 0;
467
468err_out_cleardev:
469	pci_set_drvdata(pdev, NULL);
470	pci_iounmap(pdev, ioaddr);
471err_out_free_res:
472	pci_release_regions(pdev);
473err_out_netdev:
474	free_netdev (dev);
475	return -ENODEV;
476}
477
478
479/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
480   often serial bit streams generated by the host processor.
481   The example below is for the common 93c46 EEPROM, 64 16 bit words. */
482
483/* Delay between EEPROM clock transitions.
484   No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
485   a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
486   made udelay() unreliable.
487   The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
488   depricated.
489*/
490#define eeprom_delay(ee_addr)	ioread32(ee_addr)
491
492enum EEPROM_Ctrl_Bits {
493	EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
494	EE_ChipSelect=0x801, EE_DataIn=0x08,
495};
496
497/* The EEPROM commands include the alway-set leading bit. */
498enum EEPROM_Cmds {
499	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
500};
501
502static int eeprom_read(void __iomem *addr, int location)
503{
504	int i;
505	int retval = 0;
506	void __iomem *ee_addr = addr + EECtrl;
507	int read_cmd = location | EE_ReadCmd;
508	iowrite32(EE_ChipSelect, ee_addr);
509
510	/* Shift the read command bits out. */
511	for (i = 10; i >= 0; i--) {
512		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
513		iowrite32(dataval, ee_addr);
514		eeprom_delay(ee_addr);
515		iowrite32(dataval | EE_ShiftClk, ee_addr);
516		eeprom_delay(ee_addr);
517	}
518	iowrite32(EE_ChipSelect, ee_addr);
519	eeprom_delay(ee_addr);
520
521	for (i = 16; i > 0; i--) {
522		iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
523		eeprom_delay(ee_addr);
524		retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
525		iowrite32(EE_ChipSelect, ee_addr);
526		eeprom_delay(ee_addr);
527	}
528
529	/* Terminate the EEPROM access. */
530	iowrite32(0, ee_addr);
531	return retval;
532}
533
534/*  MII transceiver control section.
535	Read and write the MII registers using software-generated serial
536	MDIO protocol.  See the MII specifications or DP83840A data sheet
537	for details.
538
539	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
540	met by back-to-back 33Mhz PCI cycles. */
541#define mdio_delay(mdio_addr) ioread32(mdio_addr)
542
543/* Set iff a MII transceiver on any interface requires mdio preamble.
544   This only set with older transceivers, so the extra
545   code size of a per-interface flag is not worthwhile. */
546static char mii_preamble_required = 1;
547
548#define MDIO_WRITE0 (MDIO_EnbOutput)
549#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
550
551/* Generate the preamble required for initial synchronization and
552   a few older transceivers. */
553static void mdio_sync(void __iomem *mdio_addr)
554{
555	int bits = 32;
556
557	/* Establish sync by sending at least 32 logic ones. */
558	while (--bits >= 0) {
559		iowrite32(MDIO_WRITE1, mdio_addr);
560		mdio_delay(mdio_addr);
561		iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
562		mdio_delay(mdio_addr);
563	}
564}
565
566static int mdio_read(struct net_device *dev, int phy_id, int location)
567{
568	struct netdev_private *np = netdev_priv(dev);
569	void __iomem *mdio_addr = np->base_addr + MIICtrl;
570	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
571	int i, retval = 0;
572
573	if (mii_preamble_required)
574		mdio_sync(mdio_addr);
575
576	/* Shift the read command bits out. */
577	for (i = 15; i >= 0; i--) {
578		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
579
580		iowrite32(dataval, mdio_addr);
581		mdio_delay(mdio_addr);
582		iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
583		mdio_delay(mdio_addr);
584	}
585	/* Read the two transition, 16 data, and wire-idle bits. */
586	for (i = 20; i > 0; i--) {
587		iowrite32(MDIO_EnbIn, mdio_addr);
588		mdio_delay(mdio_addr);
589		retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
590		iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
591		mdio_delay(mdio_addr);
592	}
593	return (retval>>1) & 0xffff;
594}
595
596static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
597{
598	struct netdev_private *np = netdev_priv(dev);
599	void __iomem *mdio_addr = np->base_addr + MIICtrl;
600	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
601	int i;
602
603	if (location == 4  &&  phy_id == np->phys[0])
604		np->mii_if.advertising = value;
605
606	if (mii_preamble_required)
607		mdio_sync(mdio_addr);
608
609	/* Shift the command bits out. */
610	for (i = 31; i >= 0; i--) {
611		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
612
613		iowrite32(dataval, mdio_addr);
614		mdio_delay(mdio_addr);
615		iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
616		mdio_delay(mdio_addr);
617	}
618	/* Clear out extra bits. */
619	for (i = 2; i > 0; i--) {
620		iowrite32(MDIO_EnbIn, mdio_addr);
621		mdio_delay(mdio_addr);
622		iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
623		mdio_delay(mdio_addr);
624	}
625	return;
626}
627
628
629static int netdev_open(struct net_device *dev)
630{
631	struct netdev_private *np = netdev_priv(dev);
632	void __iomem *ioaddr = np->base_addr;
633	int i;
634
635	iowrite32(0x00000001, ioaddr + PCIBusCfg);		/* Reset */
636
637	netif_device_detach(dev);
638	i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
639	if (i)
640		goto out_err;
641
642	if (debug > 1)
643		printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
644			   dev->name, dev->irq);
645
646	if((i=alloc_ringdesc(dev)))
647		goto out_err;
648
649	spin_lock_irq(&np->lock);
650	netif_device_attach(dev);
651	init_registers(dev);
652	spin_unlock_irq(&np->lock);
653
654	netif_start_queue(dev);
655	if (debug > 2)
656		printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
657
658	/* Set the timer to check for link beat. */
659	init_timer(&np->timer);
660	np->timer.expires = jiffies + 1*HZ;
661	np->timer.data = (unsigned long)dev;
662	np->timer.function = &netdev_timer;				/* timer handler */
663	add_timer(&np->timer);
664	return 0;
665out_err:
666	netif_device_attach(dev);
667	return i;
668}
669
670#define MII_DAVICOM_DM9101	0x0181b800
671
672static int update_link(struct net_device *dev)
673{
674	struct netdev_private *np = netdev_priv(dev);
675	int duplex, fasteth, result, mii_reg;
676
677	/* BSMR */
678	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
679
680	if (mii_reg == 0xffff)
681		return np->csr6;
682	/* reread: the link status bit is sticky */
683	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
684	if (!(mii_reg & 0x4)) {
685		if (netif_carrier_ok(dev)) {
686			if (debug)
687				printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
688					dev->name, np->phys[0]);
689			netif_carrier_off(dev);
690		}
691		return np->csr6;
692	}
693	if (!netif_carrier_ok(dev)) {
694		if (debug)
695			printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
696				dev->name, np->phys[0]);
697		netif_carrier_on(dev);
698	}
699
700	if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
701		/* If the link partner doesn't support autonegotiation
702		 * the MII detects it's abilities with the "parallel detection".
703		 * Some MIIs update the LPA register to the result of the parallel
704		 * detection, some don't.
705		 * The Davicom PHY [at least 0181b800] doesn't.
706		 * Instead bit 9 and 13 of the BMCR are updated to the result
707		 * of the negotiation..
708		 */
709		mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
710		duplex = mii_reg & BMCR_FULLDPLX;
711		fasteth = mii_reg & BMCR_SPEED100;
712	} else {
713		int negotiated;
714		mii_reg	= mdio_read(dev, np->phys[0], MII_LPA);
715		negotiated = mii_reg & np->mii_if.advertising;
716
717		duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
718		fasteth = negotiated & 0x380;
719	}
720	duplex |= np->mii_if.force_media;
721	/* remove fastether and fullduplex */
722	result = np->csr6 & ~0x20000200;
723	if (duplex)
724		result |= 0x200;
725	if (fasteth)
726		result |= 0x20000000;
727	if (result != np->csr6 && debug)
728		printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
729				 dev->name, fasteth ? 100 : 10,
730			   	duplex ? "full" : "half", np->phys[0]);
731	return result;
732}
733
734#define RXTX_TIMEOUT	2000
735static inline void update_csr6(struct net_device *dev, int new)
736{
737	struct netdev_private *np = netdev_priv(dev);
738	void __iomem *ioaddr = np->base_addr;
739	int limit = RXTX_TIMEOUT;
740
741	if (!netif_device_present(dev))
742		new = 0;
743	if (new==np->csr6)
744		return;
745	/* stop both Tx and Rx processes */
746	iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
747	/* wait until they have really stopped */
748	for (;;) {
749		int csr5 = ioread32(ioaddr + IntrStatus);
750		int t;
751
752		t = (csr5 >> 17) & 0x07;
753		if (t==0||t==1) {
754			/* rx stopped */
755			t = (csr5 >> 20) & 0x07;
756			if (t==0||t==1)
757				break;
758		}
759
760		limit--;
761		if(!limit) {
762			printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
763					dev->name, csr5);
764			break;
765		}
766		udelay(1);
767	}
768	np->csr6 = new;
769	/* and restart them with the new configuration */
770	iowrite32(np->csr6, ioaddr + NetworkConfig);
771	if (new & 0x200)
772		np->mii_if.full_duplex = 1;
773}
774
775static void netdev_timer(unsigned long data)
776{
777	struct net_device *dev = (struct net_device *)data;
778	struct netdev_private *np = netdev_priv(dev);
779	void __iomem *ioaddr = np->base_addr;
780
781	if (debug > 2)
782		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
783			   "config %8.8x.\n",
784			   dev->name, ioread32(ioaddr + IntrStatus),
785			   ioread32(ioaddr + NetworkConfig));
786	spin_lock_irq(&np->lock);
787	update_csr6(dev, update_link(dev));
788	spin_unlock_irq(&np->lock);
789	np->timer.expires = jiffies + 10*HZ;
790	add_timer(&np->timer);
791}
792
793static void init_rxtx_rings(struct net_device *dev)
794{
795	struct netdev_private *np = netdev_priv(dev);
796	int i;
797
798	np->rx_head_desc = &np->rx_ring[0];
799	np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
800
801	/* Initial all Rx descriptors. */
802	for (i = 0; i < RX_RING_SIZE; i++) {
803		np->rx_ring[i].length = np->rx_buf_sz;
804		np->rx_ring[i].status = 0;
805		np->rx_skbuff[i] = NULL;
806	}
807	/* Mark the last entry as wrapping the ring. */
808	np->rx_ring[i-1].length |= DescEndRing;
809
810	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
811	for (i = 0; i < RX_RING_SIZE; i++) {
812		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
813		np->rx_skbuff[i] = skb;
814		if (skb == NULL)
815			break;
816		np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
817					np->rx_buf_sz,PCI_DMA_FROMDEVICE);
818
819		np->rx_ring[i].buffer1 = np->rx_addr[i];
820		np->rx_ring[i].status = DescOwned;
821	}
822
823	np->cur_rx = 0;
824	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
825
826	/* Initialize the Tx descriptors */
827	for (i = 0; i < TX_RING_SIZE; i++) {
828		np->tx_skbuff[i] = NULL;
829		np->tx_ring[i].status = 0;
830	}
831	np->tx_full = 0;
832	np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
833
834	iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
835	iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
836		np->base_addr + TxRingPtr);
837
838}
839
840static void free_rxtx_rings(struct netdev_private* np)
841{
842	int i;
843	/* Free all the skbuffs in the Rx queue. */
844	for (i = 0; i < RX_RING_SIZE; i++) {
845		np->rx_ring[i].status = 0;
846		if (np->rx_skbuff[i]) {
847			pci_unmap_single(np->pci_dev,
848						np->rx_addr[i],
849						np->rx_skbuff[i]->len,
850						PCI_DMA_FROMDEVICE);
851			dev_kfree_skb(np->rx_skbuff[i]);
852		}
853		np->rx_skbuff[i] = NULL;
854	}
855	for (i = 0; i < TX_RING_SIZE; i++) {
856		if (np->tx_skbuff[i]) {
857			pci_unmap_single(np->pci_dev,
858						np->tx_addr[i],
859						np->tx_skbuff[i]->len,
860						PCI_DMA_TODEVICE);
861			dev_kfree_skb(np->tx_skbuff[i]);
862		}
863		np->tx_skbuff[i] = NULL;
864	}
865}
866
867static void init_registers(struct net_device *dev)
868{
869	struct netdev_private *np = netdev_priv(dev);
870	void __iomem *ioaddr = np->base_addr;
871	int i;
872
873	for (i = 0; i < 6; i++)
874		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
875
876	/* Initialize other registers. */
877#ifdef __BIG_ENDIAN
878	i = (1<<20);	/* Big-endian descriptors */
879#else
880	i = 0;
881#endif
882	i |= (0x04<<2);		/* skip length 4 u32 */
883	i |= 0x02;		/* give Rx priority */
884
885	/* Configure the PCI bus bursts and FIFO thresholds.
886	   486: Set 8 longword cache alignment, 8 longword burst.
887	   586: Set 16 longword cache alignment, no burst limit.
888	   Cache alignment bits 15:14	     Burst length 13:8
889		0000	<not allowed> 		0000 align to cache	0800 8 longwords
890		4000	8  longwords		0100 1 longword		1000 16 longwords
891		8000	16 longwords		0200 2 longwords	2000 32 longwords
892		C000	32  longwords		0400 4 longwords */
893
894#if defined(__i386__) && !defined(MODULE)
895	if (boot_cpu_data.x86 <= 4) {
896		i |= 0x4800;
897		printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
898			   "alignment to 8 longwords.\n", dev->name);
899	} else {
900		i |= 0xE000;
901	}
902#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || \
903	defined(__ia64__) || defined(__x86_64__)
904	i |= 0xE000;
905#elif defined(CONFIG_SPARC) || defined(CONFIG_PARISC)
906	i |= 0x4800;
907#else
908#warning Processor architecture undefined
909	i |= 0x4800;
910#endif
911	iowrite32(i, ioaddr + PCIBusCfg);
912
913	np->csr6 = 0;
914	/* 128 byte Tx threshold;
915		Transmit on; Receive on; */
916	update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
917
918	/* Clear and Enable interrupts by setting the interrupt mask. */
919	iowrite32(0x1A0F5, ioaddr + IntrStatus);
920	iowrite32(0x1A0F5, ioaddr + IntrEnable);
921
922	iowrite32(0, ioaddr + RxStartDemand);
923}
924
925static void tx_timeout(struct net_device *dev)
926{
927	struct netdev_private *np = netdev_priv(dev);
928	void __iomem *ioaddr = np->base_addr;
929
930	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
931		   " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
932
933	{
934		int i;
935		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
936		for (i = 0; i < RX_RING_SIZE; i++)
937			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
938		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
939		for (i = 0; i < TX_RING_SIZE; i++)
940			printk(" %8.8x", np->tx_ring[i].status);
941		printk("\n");
942	}
943	printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
944				np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
945	printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
946
947	disable_irq(dev->irq);
948	spin_lock_irq(&np->lock);
949	/*
950	 * Under high load dirty_tx and the internal tx descriptor pointer
951	 * come out of sync, thus perform a software reset and reinitialize
952	 * everything.
953	 */
954
955	iowrite32(1, np->base_addr+PCIBusCfg);
956	udelay(1);
957
958	free_rxtx_rings(np);
959	init_rxtx_rings(dev);
960	init_registers(dev);
961	spin_unlock_irq(&np->lock);
962	enable_irq(dev->irq);
963
964	netif_wake_queue(dev);
965	dev->trans_start = jiffies;
966	np->stats.tx_errors++;
967	return;
968}
969
970/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
971static int alloc_ringdesc(struct net_device *dev)
972{
973	struct netdev_private *np = netdev_priv(dev);
974
975	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
976
977	np->rx_ring = pci_alloc_consistent(np->pci_dev,
978			sizeof(struct w840_rx_desc)*RX_RING_SIZE +
979			sizeof(struct w840_tx_desc)*TX_RING_SIZE,
980			&np->ring_dma_addr);
981	if(!np->rx_ring)
982		return -ENOMEM;
983	init_rxtx_rings(dev);
984	return 0;
985}
986
987static void free_ringdesc(struct netdev_private *np)
988{
989	pci_free_consistent(np->pci_dev,
990			sizeof(struct w840_rx_desc)*RX_RING_SIZE +
991			sizeof(struct w840_tx_desc)*TX_RING_SIZE,
992			np->rx_ring, np->ring_dma_addr);
993
994}
995
996static int start_tx(struct sk_buff *skb, struct net_device *dev)
997{
998	struct netdev_private *np = netdev_priv(dev);
999	unsigned entry;
1000
1001	/* Caution: the write order is important here, set the field
1002	   with the "ownership" bits last. */
1003
1004	/* Calculate the next Tx descriptor entry. */
1005	entry = np->cur_tx % TX_RING_SIZE;
1006
1007	np->tx_addr[entry] = pci_map_single(np->pci_dev,
1008				skb->data,skb->len, PCI_DMA_TODEVICE);
1009	np->tx_skbuff[entry] = skb;
1010
1011	np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1012	if (skb->len < TX_BUFLIMIT) {
1013		np->tx_ring[entry].length = DescWholePkt | skb->len;
1014	} else {
1015		int len = skb->len - TX_BUFLIMIT;
1016
1017		np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1018		np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1019	}
1020	if(entry == TX_RING_SIZE-1)
1021		np->tx_ring[entry].length |= DescEndRing;
1022
1023	/* Now acquire the irq spinlock.
1024	 * The difficult race is the ordering between
1025	 * increasing np->cur_tx and setting DescOwned:
1026	 * - if np->cur_tx is increased first the interrupt
1027	 *   handler could consider the packet as transmitted
1028	 *   since DescOwned is cleared.
1029	 * - If DescOwned is set first the NIC could report the
1030	 *   packet as sent, but the interrupt handler would ignore it
1031	 *   since the np->cur_tx was not yet increased.
1032	 */
1033	spin_lock_irq(&np->lock);
1034	np->cur_tx++;
1035
1036	wmb(); /* flush length, buffer1, buffer2 */
1037	np->tx_ring[entry].status = DescOwned;
1038	wmb(); /* flush status and kick the hardware */
1039	iowrite32(0, np->base_addr + TxStartDemand);
1040	np->tx_q_bytes += skb->len;
1041	if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1042		((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1043		netif_stop_queue(dev);
1044		wmb();
1045		np->tx_full = 1;
1046	}
1047	spin_unlock_irq(&np->lock);
1048
1049	dev->trans_start = jiffies;
1050
1051	if (debug > 4) {
1052		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1053			   dev->name, np->cur_tx, entry);
1054	}
1055	return 0;
1056}
1057
1058static void netdev_tx_done(struct net_device *dev)
1059{
1060	struct netdev_private *np = netdev_priv(dev);
1061	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1062		int entry = np->dirty_tx % TX_RING_SIZE;
1063		int tx_status = np->tx_ring[entry].status;
1064
1065		if (tx_status < 0)
1066			break;
1067		if (tx_status & 0x8000) { 	/* There was an error, log it. */
1068#ifndef final_version
1069			if (debug > 1)
1070				printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1071					   dev->name, tx_status);
1072#endif
1073			np->stats.tx_errors++;
1074			if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1075			if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1076			if (tx_status & 0x0200) np->stats.tx_window_errors++;
1077			if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1078			if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1079				np->stats.tx_heartbeat_errors++;
1080		} else {
1081#ifndef final_version
1082			if (debug > 3)
1083				printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1084					   dev->name, entry, tx_status);
1085#endif
1086			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1087			np->stats.collisions += (tx_status >> 3) & 15;
1088			np->stats.tx_packets++;
1089		}
1090		/* Free the original skb. */
1091		pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1092					np->tx_skbuff[entry]->len,
1093					PCI_DMA_TODEVICE);
1094		np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1095		dev_kfree_skb_irq(np->tx_skbuff[entry]);
1096		np->tx_skbuff[entry] = NULL;
1097	}
1098	if (np->tx_full &&
1099		np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1100		np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1101		/* The ring is no longer full, clear tbusy. */
1102		np->tx_full = 0;
1103		wmb();
1104		netif_wake_queue(dev);
1105	}
1106}
1107
1108/* The interrupt handler does all of the Rx thread work and cleans up
1109   after the Tx thread. */
1110static irqreturn_t intr_handler(int irq, void *dev_instance)
1111{
1112	struct net_device *dev = (struct net_device *)dev_instance;
1113	struct netdev_private *np = netdev_priv(dev);
1114	void __iomem *ioaddr = np->base_addr;
1115	int work_limit = max_interrupt_work;
1116	int handled = 0;
1117
1118	if (!netif_device_present(dev))
1119		return IRQ_NONE;
1120	do {
1121		u32 intr_status = ioread32(ioaddr + IntrStatus);
1122
1123		/* Acknowledge all of the current interrupt sources ASAP. */
1124		iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1125
1126		if (debug > 4)
1127			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1128				   dev->name, intr_status);
1129
1130		if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1131			break;
1132
1133		handled = 1;
1134
1135		if (intr_status & (RxIntr | RxNoBuf))
1136			netdev_rx(dev);
1137		if (intr_status & RxNoBuf)
1138			iowrite32(0, ioaddr + RxStartDemand);
1139
1140		if (intr_status & (TxNoBuf | TxIntr) &&
1141			np->cur_tx != np->dirty_tx) {
1142			spin_lock(&np->lock);
1143			netdev_tx_done(dev);
1144			spin_unlock(&np->lock);
1145		}
1146
1147		/* Abnormal error summary/uncommon events handlers. */
1148		if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1149						   TimerInt | TxDied))
1150			netdev_error(dev, intr_status);
1151
1152		if (--work_limit < 0) {
1153			printk(KERN_WARNING "%s: Too much work at interrupt, "
1154				   "status=0x%4.4x.\n", dev->name, intr_status);
1155			/* Set the timer to re-enable the other interrupts after
1156			   10*82usec ticks. */
1157			spin_lock(&np->lock);
1158			if (netif_device_present(dev)) {
1159				iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1160				iowrite32(10, ioaddr + GPTimer);
1161			}
1162			spin_unlock(&np->lock);
1163			break;
1164		}
1165	} while (1);
1166
1167	if (debug > 3)
1168		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1169			   dev->name, ioread32(ioaddr + IntrStatus));
1170	return IRQ_RETVAL(handled);
1171}
1172
1173/* This routine is logically part of the interrupt handler, but separated
1174   for clarity and better register allocation. */
1175static int netdev_rx(struct net_device *dev)
1176{
1177	struct netdev_private *np = netdev_priv(dev);
1178	int entry = np->cur_rx % RX_RING_SIZE;
1179	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1180
1181	if (debug > 4) {
1182		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1183			   entry, np->rx_ring[entry].status);
1184	}
1185
1186	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1187	while (--work_limit >= 0) {
1188		struct w840_rx_desc *desc = np->rx_head_desc;
1189		s32 status = desc->status;
1190
1191		if (debug > 4)
1192			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1193				   status);
1194		if (status < 0)
1195			break;
1196		if ((status & 0x38008300) != 0x0300) {
1197			if ((status & 0x38000300) != 0x0300) {
1198				/* Ingore earlier buffers. */
1199				if ((status & 0xffff) != 0x7fff) {
1200					printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1201						   "multiple buffers, entry %#x status %4.4x!\n",
1202						   dev->name, np->cur_rx, status);
1203					np->stats.rx_length_errors++;
1204				}
1205			} else if (status & 0x8000) {
1206				/* There was a fatal error. */
1207				if (debug > 2)
1208					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1209						   dev->name, status);
1210				np->stats.rx_errors++; /* end of a packet.*/
1211				if (status & 0x0890) np->stats.rx_length_errors++;
1212				if (status & 0x004C) np->stats.rx_frame_errors++;
1213				if (status & 0x0002) np->stats.rx_crc_errors++;
1214			}
1215		} else {
1216			struct sk_buff *skb;
1217			/* Omit the four octet CRC from the length. */
1218			int pkt_len = ((status >> 16) & 0x7ff) - 4;
1219
1220#ifndef final_version
1221			if (debug > 4)
1222				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1223					   " status %x.\n", pkt_len, status);
1224#endif
1225			/* Check if the packet is long enough to accept without copying
1226			   to a minimally-sized skbuff. */
1227			if (pkt_len < rx_copybreak
1228				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1229				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1230				pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1231							    np->rx_skbuff[entry]->len,
1232							    PCI_DMA_FROMDEVICE);
1233				eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1234				skb_put(skb, pkt_len);
1235				pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1236							       np->rx_skbuff[entry]->len,
1237							       PCI_DMA_FROMDEVICE);
1238			} else {
1239				pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1240							np->rx_skbuff[entry]->len,
1241							PCI_DMA_FROMDEVICE);
1242				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1243				np->rx_skbuff[entry] = NULL;
1244			}
1245#ifndef final_version				    /* Remove after testing. */
1246			/* You will want this info for the initial debug. */
1247			if (debug > 5)
1248				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1249					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1250					   "%d.%d.%d.%d.\n",
1251					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1252					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1253					   skb->data[8], skb->data[9], skb->data[10],
1254					   skb->data[11], skb->data[12], skb->data[13],
1255					   skb->data[14], skb->data[15], skb->data[16],
1256					   skb->data[17]);
1257#endif
1258			skb->protocol = eth_type_trans(skb, dev);
1259			netif_rx(skb);
1260			dev->last_rx = jiffies;
1261			np->stats.rx_packets++;
1262			np->stats.rx_bytes += pkt_len;
1263		}
1264		entry = (++np->cur_rx) % RX_RING_SIZE;
1265		np->rx_head_desc = &np->rx_ring[entry];
1266	}
1267
1268	/* Refill the Rx ring buffers. */
1269	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1270		struct sk_buff *skb;
1271		entry = np->dirty_rx % RX_RING_SIZE;
1272		if (np->rx_skbuff[entry] == NULL) {
1273			skb = dev_alloc_skb(np->rx_buf_sz);
1274			np->rx_skbuff[entry] = skb;
1275			if (skb == NULL)
1276				break;			/* Better luck next round. */
1277			np->rx_addr[entry] = pci_map_single(np->pci_dev,
1278							skb->data,
1279							np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1280			np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1281		}
1282		wmb();
1283		np->rx_ring[entry].status = DescOwned;
1284	}
1285
1286	return 0;
1287}
1288
1289static void netdev_error(struct net_device *dev, int intr_status)
1290{
1291	struct netdev_private *np = netdev_priv(dev);
1292	void __iomem *ioaddr = np->base_addr;
1293
1294	if (debug > 2)
1295		printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1296			   dev->name, intr_status);
1297	if (intr_status == 0xffffffff)
1298		return;
1299	spin_lock(&np->lock);
1300	if (intr_status & TxFIFOUnderflow) {
1301		int new;
1302		/* Bump up the Tx threshold */
1303		new = (np->csr6 >> 14)&0x7f;
1304		if (new < 64)
1305			new *= 2;
1306		 else
1307		 	new = 127; /* load full packet before starting */
1308		new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1309		printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1310			   dev->name, new);
1311		update_csr6(dev, new);
1312	}
1313	if (intr_status & RxDied) {		/* Missed a Rx frame. */
1314		np->stats.rx_errors++;
1315	}
1316	if (intr_status & TimerInt) {
1317		/* Re-enable other interrupts. */
1318		if (netif_device_present(dev))
1319			iowrite32(0x1A0F5, ioaddr + IntrEnable);
1320	}
1321	np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1322	iowrite32(0, ioaddr + RxStartDemand);
1323	spin_unlock(&np->lock);
1324}
1325
1326static struct net_device_stats *get_stats(struct net_device *dev)
1327{
1328	struct netdev_private *np = netdev_priv(dev);
1329	void __iomem *ioaddr = np->base_addr;
1330
1331	/* The chip only need report frame silently dropped. */
1332	spin_lock_irq(&np->lock);
1333	if (netif_running(dev) && netif_device_present(dev))
1334		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1335	spin_unlock_irq(&np->lock);
1336
1337	return &np->stats;
1338}
1339
1340
1341static u32 __set_rx_mode(struct net_device *dev)
1342{
1343	struct netdev_private *np = netdev_priv(dev);
1344	void __iomem *ioaddr = np->base_addr;
1345	u32 mc_filter[2];			/* Multicast hash filter */
1346	u32 rx_mode;
1347
1348	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1349		memset(mc_filter, 0xff, sizeof(mc_filter));
1350		rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1351			| AcceptMyPhys;
1352	} else if ((dev->mc_count > multicast_filter_limit)
1353			   ||  (dev->flags & IFF_ALLMULTI)) {
1354		/* Too many to match, or accept all multicasts. */
1355		memset(mc_filter, 0xff, sizeof(mc_filter));
1356		rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1357	} else {
1358		struct dev_mc_list *mclist;
1359		int i;
1360		memset(mc_filter, 0, sizeof(mc_filter));
1361		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1362			 i++, mclist = mclist->next) {
1363			int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1364			filterbit &= 0x3f;
1365			mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1366		}
1367		rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1368	}
1369	iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1370	iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1371	return rx_mode;
1372}
1373
1374static void set_rx_mode(struct net_device *dev)
1375{
1376	struct netdev_private *np = netdev_priv(dev);
1377	u32 rx_mode = __set_rx_mode(dev);
1378	spin_lock_irq(&np->lock);
1379	update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1380	spin_unlock_irq(&np->lock);
1381}
1382
1383static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1384{
1385	struct netdev_private *np = netdev_priv(dev);
1386
1387	strcpy (info->driver, DRV_NAME);
1388	strcpy (info->version, DRV_VERSION);
1389	strcpy (info->bus_info, pci_name(np->pci_dev));
1390}
1391
1392static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1393{
1394	struct netdev_private *np = netdev_priv(dev);
1395	int rc;
1396
1397	spin_lock_irq(&np->lock);
1398	rc = mii_ethtool_gset(&np->mii_if, cmd);
1399	spin_unlock_irq(&np->lock);
1400
1401	return rc;
1402}
1403
1404static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1405{
1406	struct netdev_private *np = netdev_priv(dev);
1407	int rc;
1408
1409	spin_lock_irq(&np->lock);
1410	rc = mii_ethtool_sset(&np->mii_if, cmd);
1411	spin_unlock_irq(&np->lock);
1412
1413	return rc;
1414}
1415
1416static int netdev_nway_reset(struct net_device *dev)
1417{
1418	struct netdev_private *np = netdev_priv(dev);
1419	return mii_nway_restart(&np->mii_if);
1420}
1421
1422static u32 netdev_get_link(struct net_device *dev)
1423{
1424	struct netdev_private *np = netdev_priv(dev);
1425	return mii_link_ok(&np->mii_if);
1426}
1427
1428static u32 netdev_get_msglevel(struct net_device *dev)
1429{
1430	return debug;
1431}
1432
1433static void netdev_set_msglevel(struct net_device *dev, u32 value)
1434{
1435	debug = value;
1436}
1437
1438static const struct ethtool_ops netdev_ethtool_ops = {
1439	.get_drvinfo		= netdev_get_drvinfo,
1440	.get_settings		= netdev_get_settings,
1441	.set_settings		= netdev_set_settings,
1442	.nway_reset		= netdev_nway_reset,
1443	.get_link		= netdev_get_link,
1444	.get_msglevel		= netdev_get_msglevel,
1445	.set_msglevel		= netdev_set_msglevel,
1446	.get_sg			= ethtool_op_get_sg,
1447	.get_tx_csum		= ethtool_op_get_tx_csum,
1448};
1449
1450static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1451{
1452	struct mii_ioctl_data *data = if_mii(rq);
1453	struct netdev_private *np = netdev_priv(dev);
1454
1455	switch(cmd) {
1456	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1457		data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1458		/* Fall Through */
1459
1460	case SIOCGMIIREG:		/* Read MII PHY register. */
1461		spin_lock_irq(&np->lock);
1462		data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1463		spin_unlock_irq(&np->lock);
1464		return 0;
1465
1466	case SIOCSMIIREG:		/* Write MII PHY register. */
1467		if (!capable(CAP_NET_ADMIN))
1468			return -EPERM;
1469		spin_lock_irq(&np->lock);
1470		mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1471		spin_unlock_irq(&np->lock);
1472		return 0;
1473	default:
1474		return -EOPNOTSUPP;
1475	}
1476}
1477
1478static int netdev_close(struct net_device *dev)
1479{
1480	struct netdev_private *np = netdev_priv(dev);
1481	void __iomem *ioaddr = np->base_addr;
1482
1483	netif_stop_queue(dev);
1484
1485	if (debug > 1) {
1486		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1487			   "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
1488			   ioread32(ioaddr + NetworkConfig));
1489		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1490			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1491	}
1492
1493 	/* Stop the chip's Tx and Rx processes. */
1494	spin_lock_irq(&np->lock);
1495	netif_device_detach(dev);
1496	update_csr6(dev, 0);
1497	iowrite32(0x0000, ioaddr + IntrEnable);
1498	spin_unlock_irq(&np->lock);
1499
1500	free_irq(dev->irq, dev);
1501	wmb();
1502	netif_device_attach(dev);
1503
1504	if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1505		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1506
1507#ifdef __i386__
1508	if (debug > 2) {
1509		int i;
1510
1511		printk(KERN_DEBUG"  Tx ring at %8.8x:\n",
1512			   (int)np->tx_ring);
1513		for (i = 0; i < TX_RING_SIZE; i++)
1514			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1515				   i, np->tx_ring[i].length,
1516				   np->tx_ring[i].status, np->tx_ring[i].buffer1);
1517		printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
1518			   (int)np->rx_ring);
1519		for (i = 0; i < RX_RING_SIZE; i++) {
1520			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1521				   i, np->rx_ring[i].length,
1522				   np->rx_ring[i].status, np->rx_ring[i].buffer1);
1523		}
1524	}
1525#endif /* __i386__ debugging only */
1526
1527	del_timer_sync(&np->timer);
1528
1529	free_rxtx_rings(np);
1530	free_ringdesc(np);
1531
1532	return 0;
1533}
1534
1535static void __devexit w840_remove1 (struct pci_dev *pdev)
1536{
1537	struct net_device *dev = pci_get_drvdata(pdev);
1538
1539	if (dev) {
1540		struct netdev_private *np = netdev_priv(dev);
1541		unregister_netdev(dev);
1542		pci_release_regions(pdev);
1543		pci_iounmap(pdev, np->base_addr);
1544		free_netdev(dev);
1545	}
1546
1547	pci_set_drvdata(pdev, NULL);
1548}
1549
1550#ifdef CONFIG_PM
1551
1552/*
1553 * suspend/resume synchronization:
1554 * - open, close, do_ioctl:
1555 * 	rtnl_lock, & netif_device_detach after the rtnl_unlock.
1556 * - get_stats:
1557 * 	spin_lock_irq(np->lock), doesn't touch hw if not present
1558 * - hard_start_xmit:
1559 * 	synchronize_irq + netif_tx_disable;
1560 * - tx_timeout:
1561 * 	netif_device_detach + netif_tx_disable;
1562 * - set_multicast_list
1563 * 	netif_device_detach + netif_tx_disable;
1564 * - interrupt handler
1565 * 	doesn't touch hw if not present, synchronize_irq waits for
1566 * 	running instances of the interrupt handler.
1567 *
1568 * Disabling hw requires clearing csr6 & IntrEnable.
1569 * update_csr6 & all function that write IntrEnable check netif_device_present
1570 * before settings any bits.
1571 *
1572 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1573 * device would cause an irq storm.
1574 */
1575static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1576{
1577	struct net_device *dev = pci_get_drvdata (pdev);
1578	struct netdev_private *np = netdev_priv(dev);
1579	void __iomem *ioaddr = np->base_addr;
1580
1581	rtnl_lock();
1582	if (netif_running (dev)) {
1583		del_timer_sync(&np->timer);
1584
1585		spin_lock_irq(&np->lock);
1586		netif_device_detach(dev);
1587		update_csr6(dev, 0);
1588		iowrite32(0, ioaddr + IntrEnable);
1589		spin_unlock_irq(&np->lock);
1590
1591		synchronize_irq(dev->irq);
1592		netif_tx_disable(dev);
1593
1594		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1595
1596		/* no more hardware accesses behind this line. */
1597
1598		BUG_ON(np->csr6);
1599		if (ioread32(ioaddr + IntrEnable)) BUG();
1600
1601		/* pci_power_off(pdev, -1); */
1602
1603		free_rxtx_rings(np);
1604	} else {
1605		netif_device_detach(dev);
1606	}
1607	rtnl_unlock();
1608	return 0;
1609}
1610
1611static int w840_resume (struct pci_dev *pdev)
1612{
1613	struct net_device *dev = pci_get_drvdata (pdev);
1614	struct netdev_private *np = netdev_priv(dev);
1615	int retval = 0;
1616
1617	rtnl_lock();
1618	if (netif_device_present(dev))
1619		goto out; /* device not suspended */
1620	if (netif_running(dev)) {
1621		if ((retval = pci_enable_device(pdev))) {
1622			printk (KERN_ERR
1623				"%s: pci_enable_device failed in resume\n",
1624				dev->name);
1625			goto out;
1626		}
1627		spin_lock_irq(&np->lock);
1628		iowrite32(1, np->base_addr+PCIBusCfg);
1629		ioread32(np->base_addr+PCIBusCfg);
1630		udelay(1);
1631		netif_device_attach(dev);
1632		init_rxtx_rings(dev);
1633		init_registers(dev);
1634		spin_unlock_irq(&np->lock);
1635
1636		netif_wake_queue(dev);
1637
1638		mod_timer(&np->timer, jiffies + 1*HZ);
1639	} else {
1640		netif_device_attach(dev);
1641	}
1642out:
1643	rtnl_unlock();
1644	return retval;
1645}
1646#endif
1647
1648static struct pci_driver w840_driver = {
1649	.name		= DRV_NAME,
1650	.id_table	= w840_pci_tbl,
1651	.probe		= w840_probe1,
1652	.remove		= __devexit_p(w840_remove1),
1653#ifdef CONFIG_PM
1654	.suspend	= w840_suspend,
1655	.resume		= w840_resume,
1656#endif
1657};
1658
1659static int __init w840_init(void)
1660{
1661	printk(version);
1662	return pci_register_driver(&w840_driver);
1663}
1664
1665static void __exit w840_exit(void)
1666{
1667	pci_unregister_driver(&w840_driver);
1668}
1669
1670module_init(w840_init);
1671module_exit(w840_exit);
1672