1/* winbond-840.c: A Linux PCI network adapter device driver. */
2/*
3	Written 1998-2001 by Donald Becker.
4
5	This software may be used and distributed according to the terms of
6	the GNU General Public License (GPL), incorporated herein by reference.
7	Drivers based on or derived from this code fall under the GPL and must
8	retain the authorship, copyright and license notice.  This file is not
9	a complete program and may only be used when the entire operating
10	system is licensed under the GPL.
11
12	The author may be reached as becker@scyld.com, or C/O
13	Scyld Computing Corporation
14	410 Severn Ave., Suite 210
15	Annapolis MD 21403
16
17	Support and updates available at
18	http://www.scyld.com/network/drivers.html
19
20	Do not remove the copyright infomation.
21	Do not change the version information unless an improvement has been made.
22	Merely removing my name, as Compex has done in the past, does not count
23	as an improvement.
24
25	Changelog:
26	* ported to 2.4
27		???
28	* spin lock update, memory barriers, new style dma mappings
29		limit each tx buffer to < 1024 bytes
30		remove DescIntr from Rx descriptors (that's an Tx flag)
31		remove next pointer from Tx descriptors
32		synchronize tx_q_bytes
33		software reset in tx_timeout
34			Copyright (C) 2000 Manfred Spraul
35	* further cleanups
36		power management.
37		support for big endian descriptors
38			Copyright (C) 2001 Manfred Spraul
39  	* ethtool support (jgarzik)
40	* Replace some MII-related magic numbers with constants (jgarzik)
41
42	TODO:
43	* enable pci_power_off
44	* Wake-On-LAN
45*/
46
47#define DRV_NAME	"winbond-840"
48#define DRV_VERSION	"1.01-d"
49#define DRV_RELDATE	"Nov-17-2001"
50
51
52/* Automatically extracted configuration info:
53probe-func: winbond840_probe
54config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
55
56c-help-name: Winbond W89c840 PCI Ethernet support
57c-help-symbol: CONFIG_WINBOND_840
58c-help: This driver is for the Winbond W89c840 chip.  It also works with
59c-help: the TX9882 chip on the Compex RL100-ATX board.
60c-help: More specific information and updates are available from
61c-help: http://www.scyld.com/network/drivers.html
62*/
63
64/* The user-configurable values.
65   These may be modified when a driver module is loaded.*/
66
67static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
68static int max_interrupt_work = 20;
69/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70   The '840 uses a 64 element hash table based on the Ethernet CRC.  */
71static int multicast_filter_limit = 32;
72
73/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74   Setting to > 1518 effectively disables this feature. */
75static int rx_copybreak;
76
77/* Used to pass the media type, etc.
78   Both 'options[]' and 'full_duplex[]' should exist for driver
79   interoperability.
80   The media type is usually passed in 'options[]'.
81*/
82#define MAX_UNITS 8		/* More are supported, limit only on options */
83static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
85
86/* Operational parameters that are set at compile time. */
87
88/* Keep the ring sizes a power of two for compile efficiency.
89   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90   Making the Tx ring too large decreases the effectiveness of channel
91   bonding and packet priority.
92   There are no ill effects from too-large receive rings. */
93#define TX_RING_SIZE	16
94#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
95#define TX_QUEUE_LEN_RESTART	5
96#define RX_RING_SIZE	32
97
98#define TX_BUFLIMIT	(1024-128)
99
100/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101   To avoid overflowing we don't queue again until we have room for a
102   full-size packet.
103 */
104#define TX_FIFO_SIZE (2048)
105#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108/* Operational parameters that usually are not changed. */
109/* Time in jiffies before concluding the transmitter is hung. */
110#define TX_TIMEOUT  (2*HZ)
111
112#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
113
114#ifndef __KERNEL__
115#define __KERNEL__
116#endif
117#if !defined(__OPTIMIZE__)
118#warning  You must compile this file with the correct options!
119#warning  See the last lines of the source file.
120#error You must compile this driver with "-O".
121#endif
122
123/* Include files, designed to support most kernel versions 2.0.0 and later. */
124#include <linux/module.h>
125#include <linux/kernel.h>
126#include <linux/string.h>
127#include <linux/timer.h>
128#include <linux/errno.h>
129#include <linux/ioport.h>
130#include <linux/slab.h>
131#include <linux/interrupt.h>
132#include <linux/pci.h>
133#include <linux/netdevice.h>
134#include <linux/etherdevice.h>
135#include <linux/skbuff.h>
136#include <linux/init.h>
137#include <linux/delay.h>
138#include <linux/ethtool.h>
139#include <linux/mii.h>
140#include <linux/rtnetlink.h>
141#include <linux/crc32.h>
142#include <asm/uaccess.h>
143#include <asm/processor.h>		/* Processor type for cache alignment. */
144#include <asm/bitops.h>
145#include <asm/io.h>
146#include <asm/irq.h>
147
148/* These identify the driver base version and may not be removed. */
149static char version[] __devinitdata =
150KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE "  Donald Becker <becker@scyld.com>\n"
151KERN_INFO "  http://www.scyld.com/network/drivers.html\n";
152
153MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
154MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
155MODULE_LICENSE("GPL");
156
157MODULE_PARM(max_interrupt_work, "i");
158MODULE_PARM(debug, "i");
159MODULE_PARM(rx_copybreak, "i");
160MODULE_PARM(multicast_filter_limit, "i");
161MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
162MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
163MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
164MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
165MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
166MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
167MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
168MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
169
170/*
171				Theory of Operation
172
173I. Board Compatibility
174
175This driver is for the Winbond w89c840 chip.
176
177II. Board-specific settings
178
179None.
180
181III. Driver operation
182
183This chip is very similar to the Digital 21*4* "Tulip" family.  The first
184twelve registers and the descriptor format are nearly identical.  Read a
185Tulip manual for operational details.
186
187A significant difference is that the multicast filter and station address are
188stored in registers rather than loaded through a pseudo-transmit packet.
189
190Unlike the Tulip, transmit buffers are limited to 1KB.  To transmit a
191full-sized packet we must use both data buffers in a descriptor.  Thus the
192driver uses ring mode where descriptors are implicitly sequential in memory,
193rather than using the second descriptor address as a chain pointer to
194subsequent descriptors.
195
196IV. Notes
197
198If you are going to almost clone a Tulip, why not go all the way and avoid
199the need for a new driver?
200
201IVb. References
202
203http://www.scyld.com/expert/100mbps.html
204http://www.scyld.com/expert/NWay.html
205http://www.winbond.com.tw/
206
207IVc. Errata
208
209A horrible bug exists in the transmit FIFO.  Apparently the chip doesn't
210correctly detect a full FIFO, and queuing more than 2048 bytes may result in
211silent data corruption.
212
213Test with 'ping -s 10000' on a fast computer.
214
215*/
216
217
218
219/*
220  PCI probe table.
221*/
222enum pci_id_flags_bits {
223        /* Set PCI command register bits before calling probe1(). */
224        PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
225        /* Read and map the single following PCI BAR. */
226        PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
227        PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
228};
229enum chip_capability_flags {
230	CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
231#ifdef USE_IO_OPS
232#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
233#else
234#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
235#endif
236
237static struct pci_device_id w840_pci_tbl[] __devinitdata = {
238	{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153,     0, 0, 0 },
239	{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
240	{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
241	{ 0, }
242};
243MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
244
245struct pci_id_info {
246        const char *name;
247        struct match_info {
248                int     pci, pci_mask, subsystem, subsystem_mask;
249                int revision, revision_mask;                            /* Only 8 bits. */
250        } id;
251        enum pci_id_flags_bits pci_flags;
252        int io_size;                            /* Needed for I/O region check or ioremap(). */
253        int drv_flags;                          /* Driver use, intended as capability flags. */
254};
255static struct pci_id_info pci_id_tbl[] = {
256	{"Winbond W89c840",			/* Sometime a Level-One switch card. */
257	 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
258	 W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
259	{"Winbond W89c840", { 0x08401050, 0xffffffff, },
260	 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
261	{"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
262	 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
263	{0,},						/* 0 terminated list. */
264};
265
266/* This driver was written to use PCI memory space, however some x86 systems
267   work only with I/O space accesses.  Pass -DUSE_IO_OPS to use PCI I/O space
268   accesses instead of memory space. */
269
270#ifdef USE_IO_OPS
271#undef readb
272#undef readw
273#undef readl
274#undef writeb
275#undef writew
276#undef writel
277#define readb inb
278#define readw inw
279#define readl inl
280#define writeb outb
281#define writew outw
282#define writel outl
283#endif
284
285/* Offsets to the Command and Status Registers, "CSRs".
286   While similar to the Tulip, these registers are longword aligned.
287   Note: It's not useful to define symbolic names for every register bit in
288   the device.  The name can only partially document the semantics and make
289   the driver longer and more difficult to read.
290*/
291enum w840_offsets {
292	PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
293	RxRingPtr=0x0C, TxRingPtr=0x10,
294	IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
295	RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
296	CurRxDescAddr=0x30, CurRxBufAddr=0x34,			/* Debug use */
297	MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
298	CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
299};
300
301/* Bits in the interrupt status/enable registers. */
302/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
303enum intr_status_bits {
304	NormalIntr=0x10000, AbnormalIntr=0x8000,
305	IntrPCIErr=0x2000, TimerInt=0x800,
306	IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
307	TxFIFOUnderflow=0x20, RxErrIntr=0x10,
308	TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
309};
310
311/* Bits in the NetworkConfig register. */
312enum rx_mode_bits {
313	AcceptErr=0x80, AcceptRunt=0x40,
314	AcceptBroadcast=0x20, AcceptMulticast=0x10,
315	AcceptAllPhys=0x08, AcceptMyPhys=0x02,
316};
317
318enum mii_reg_bits {
319	MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
320	MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
321};
322
323/* The Tulip Rx and Tx buffer descriptors. */
324struct w840_rx_desc {
325	s32 status;
326	s32 length;
327	u32 buffer1;
328	u32 buffer2;
329};
330
331struct w840_tx_desc {
332	s32 status;
333	s32 length;
334	u32 buffer1, buffer2;
335};
336
337/* Bits in network_desc.status */
338enum desc_status_bits {
339	DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
340	DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
341	DescIntr=0x80000000,
342};
343
344#define PRIV_ALIGN	15 	/* Required alignment mask */
345#define MII_CNT		1 /* winbond only supports one MII */
346struct netdev_private {
347	struct w840_rx_desc *rx_ring;
348	dma_addr_t	rx_addr[RX_RING_SIZE];
349	struct w840_tx_desc *tx_ring;
350	dma_addr_t	tx_addr[TX_RING_SIZE];
351	dma_addr_t ring_dma_addr;
352	/* The addresses of receive-in-place skbuffs. */
353	struct sk_buff* rx_skbuff[RX_RING_SIZE];
354	/* The saved address of a sent-in-place packet/buffer, for later free(). */
355	struct sk_buff* tx_skbuff[TX_RING_SIZE];
356	struct net_device_stats stats;
357	struct timer_list timer;	/* Media monitoring timer. */
358	/* Frequently used values: keep some adjacent for cache effect. */
359	spinlock_t lock;
360	int chip_id, drv_flags;
361	struct pci_dev *pci_dev;
362	int csr6;
363	struct w840_rx_desc *rx_head_desc;
364	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
365	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
366	unsigned int cur_tx, dirty_tx;
367	unsigned int tx_q_bytes;
368	unsigned int tx_full;				/* The Tx queue is full. */
369	/* MII transceiver section. */
370	int mii_cnt;						/* MII device addresses. */
371	unsigned char phys[MII_CNT];		/* MII device addresses, but only the first is used */
372	u32 mii;
373	struct mii_if_info mii_if;
374};
375
376static int  eeprom_read(long ioaddr, int location);
377static int  mdio_read(struct net_device *dev, int phy_id, int location);
378static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
379static int  netdev_open(struct net_device *dev);
380static int  update_link(struct net_device *dev);
381static void netdev_timer(unsigned long data);
382static void init_rxtx_rings(struct net_device *dev);
383static void free_rxtx_rings(struct netdev_private *np);
384static void init_registers(struct net_device *dev);
385static void tx_timeout(struct net_device *dev);
386static int alloc_ringdesc(struct net_device *dev);
387static void free_ringdesc(struct netdev_private *np);
388static int  start_tx(struct sk_buff *skb, struct net_device *dev);
389static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
390static void netdev_error(struct net_device *dev, int intr_status);
391static int  netdev_rx(struct net_device *dev);
392static u32 __set_rx_mode(struct net_device *dev);
393static void set_rx_mode(struct net_device *dev);
394static struct net_device_stats *get_stats(struct net_device *dev);
395static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
396static int  netdev_close(struct net_device *dev);
397
398
399
400static int __devinit w840_probe1 (struct pci_dev *pdev,
401				  const struct pci_device_id *ent)
402{
403	struct net_device *dev;
404	struct netdev_private *np;
405	static int find_cnt;
406	int chip_idx = ent->driver_data;
407	int irq;
408	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
409	long ioaddr;
410
411	i = pci_enable_device(pdev);
412	if (i) return i;
413
414	pci_set_master(pdev);
415
416	irq = pdev->irq;
417
418	if (pci_set_dma_mask(pdev,0xFFFFffff)) {
419		printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
420		       pdev->slot_name);
421		return -EIO;
422	}
423	dev = alloc_etherdev(sizeof(*np));
424	if (!dev)
425		return -ENOMEM;
426	SET_MODULE_OWNER(dev);
427
428	if (pci_request_regions(pdev, DRV_NAME))
429		goto err_out_netdev;
430
431#ifdef USE_IO_OPS
432	ioaddr = pci_resource_start(pdev, 0);
433#else
434	ioaddr = pci_resource_start(pdev, 1);
435	ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
436	if (!ioaddr)
437		goto err_out_free_res;
438#endif
439
440	for (i = 0; i < 3; i++)
441		((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
442
443	/* Reset the chip to erase previous misconfiguration.
444	   No hold time required! */
445	writel(0x00000001, ioaddr + PCIBusCfg);
446
447	dev->base_addr = ioaddr;
448	dev->irq = irq;
449
450	np = dev->priv;
451	np->pci_dev = pdev;
452	np->chip_id = chip_idx;
453	np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
454	spin_lock_init(&np->lock);
455	np->mii_if.dev = dev;
456	np->mii_if.mdio_read = mdio_read;
457	np->mii_if.mdio_write = mdio_write;
458
459	pci_set_drvdata(pdev, dev);
460
461	if (dev->mem_start)
462		option = dev->mem_start;
463
464	/* The lower four bits are the media type. */
465	if (option > 0) {
466		if (option & 0x200)
467			np->mii_if.full_duplex = 1;
468		if (option & 15)
469			printk(KERN_INFO "%s: ignoring user supplied media type %d",
470				dev->name, option & 15);
471	}
472	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
473		np->mii_if.full_duplex = 1;
474
475	if (np->mii_if.full_duplex)
476		np->mii_if.force_media = 1;
477
478	/* The chip-specific entries in the device structure. */
479	dev->open = &netdev_open;
480	dev->hard_start_xmit = &start_tx;
481	dev->stop = &netdev_close;
482	dev->get_stats = &get_stats;
483	dev->set_multicast_list = &set_rx_mode;
484	dev->do_ioctl = &netdev_ioctl;
485	dev->tx_timeout = &tx_timeout;
486	dev->watchdog_timeo = TX_TIMEOUT;
487
488	i = register_netdev(dev);
489	if (i)
490		goto err_out_cleardev;
491
492	printk(KERN_INFO "%s: %s at 0x%lx, ",
493		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
494	for (i = 0; i < 5; i++)
495			printk("%2.2x:", dev->dev_addr[i]);
496	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
497
498	if (np->drv_flags & CanHaveMII) {
499		int phy, phy_idx = 0;
500		for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
501			int mii_status = mdio_read(dev, phy, MII_BMSR);
502			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
503				np->phys[phy_idx++] = phy;
504				np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
505				np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
506						mdio_read(dev, phy, MII_PHYSID2);
507				printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
508					   "0x%4.4x advertising %4.4x.\n",
509					   dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
510			}
511		}
512		np->mii_cnt = phy_idx;
513		np->mii_if.phy_id = np->phys[0];
514		if (phy_idx == 0) {
515				printk(KERN_WARNING "%s: MII PHY not found -- this device may "
516					   "not operate correctly.\n", dev->name);
517		}
518	}
519
520	find_cnt++;
521	return 0;
522
523err_out_cleardev:
524	pci_set_drvdata(pdev, NULL);
525#ifndef USE_IO_OPS
526	iounmap((void *)ioaddr);
527err_out_free_res:
528#endif
529	pci_release_regions(pdev);
530err_out_netdev:
531	kfree (dev);
532	return -ENODEV;
533}
534
535
536/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
537   often serial bit streams generated by the host processor.
538   The example below is for the common 93c46 EEPROM, 64 16 bit words. */
539
540/* Delay between EEPROM clock transitions.
541   No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
542   a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
543   made udelay() unreliable.
544   The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
545   depricated.
546*/
547#define eeprom_delay(ee_addr)	readl(ee_addr)
548
549enum EEPROM_Ctrl_Bits {
550	EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
551	EE_ChipSelect=0x801, EE_DataIn=0x08,
552};
553
554/* The EEPROM commands include the alway-set leading bit. */
555enum EEPROM_Cmds {
556	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
557};
558
559static int eeprom_read(long addr, int location)
560{
561	int i;
562	int retval = 0;
563	long ee_addr = addr + EECtrl;
564	int read_cmd = location | EE_ReadCmd;
565	writel(EE_ChipSelect, ee_addr);
566
567	/* Shift the read command bits out. */
568	for (i = 10; i >= 0; i--) {
569		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
570		writel(dataval, ee_addr);
571		eeprom_delay(ee_addr);
572		writel(dataval | EE_ShiftClk, ee_addr);
573		eeprom_delay(ee_addr);
574	}
575	writel(EE_ChipSelect, ee_addr);
576	eeprom_delay(ee_addr);
577
578	for (i = 16; i > 0; i--) {
579		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
580		eeprom_delay(ee_addr);
581		retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
582		writel(EE_ChipSelect, ee_addr);
583		eeprom_delay(ee_addr);
584	}
585
586	/* Terminate the EEPROM access. */
587	writel(0, ee_addr);
588	return retval;
589}
590
591/*  MII transceiver control section.
592	Read and write the MII registers using software-generated serial
593	MDIO protocol.  See the MII specifications or DP83840A data sheet
594	for details.
595
596	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
597	met by back-to-back 33Mhz PCI cycles. */
598#define mdio_delay(mdio_addr) readl(mdio_addr)
599
600/* Set iff a MII transceiver on any interface requires mdio preamble.
601   This only set with older tranceivers, so the extra
602   code size of a per-interface flag is not worthwhile. */
603static char mii_preamble_required = 1;
604
605#define MDIO_WRITE0 (MDIO_EnbOutput)
606#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
607
608/* Generate the preamble required for initial synchronization and
609   a few older transceivers. */
610static void mdio_sync(long mdio_addr)
611{
612	int bits = 32;
613
614	/* Establish sync by sending at least 32 logic ones. */
615	while (--bits >= 0) {
616		writel(MDIO_WRITE1, mdio_addr);
617		mdio_delay(mdio_addr);
618		writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
619		mdio_delay(mdio_addr);
620	}
621}
622
623static int mdio_read(struct net_device *dev, int phy_id, int location)
624{
625	long mdio_addr = dev->base_addr + MIICtrl;
626	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
627	int i, retval = 0;
628
629	if (mii_preamble_required)
630		mdio_sync(mdio_addr);
631
632	/* Shift the read command bits out. */
633	for (i = 15; i >= 0; i--) {
634		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
635
636		writel(dataval, mdio_addr);
637		mdio_delay(mdio_addr);
638		writel(dataval | MDIO_ShiftClk, mdio_addr);
639		mdio_delay(mdio_addr);
640	}
641	/* Read the two transition, 16 data, and wire-idle bits. */
642	for (i = 20; i > 0; i--) {
643		writel(MDIO_EnbIn, mdio_addr);
644		mdio_delay(mdio_addr);
645		retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);
646		writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
647		mdio_delay(mdio_addr);
648	}
649	return (retval>>1) & 0xffff;
650}
651
652static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
653{
654	struct netdev_private *np = dev->priv;
655	long mdio_addr = dev->base_addr + MIICtrl;
656	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
657	int i;
658
659	if (location == 4  &&  phy_id == np->phys[0])
660		np->mii_if.advertising = value;
661
662	if (mii_preamble_required)
663		mdio_sync(mdio_addr);
664
665	/* Shift the command bits out. */
666	for (i = 31; i >= 0; i--) {
667		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
668
669		writel(dataval, mdio_addr);
670		mdio_delay(mdio_addr);
671		writel(dataval | MDIO_ShiftClk, mdio_addr);
672		mdio_delay(mdio_addr);
673	}
674	/* Clear out extra bits. */
675	for (i = 2; i > 0; i--) {
676		writel(MDIO_EnbIn, mdio_addr);
677		mdio_delay(mdio_addr);
678		writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
679		mdio_delay(mdio_addr);
680	}
681	return;
682}
683
684
685static int netdev_open(struct net_device *dev)
686{
687	struct netdev_private *np = dev->priv;
688	long ioaddr = dev->base_addr;
689	int i;
690
691	writel(0x00000001, ioaddr + PCIBusCfg);		/* Reset */
692
693	netif_device_detach(dev);
694	i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
695	if (i)
696		goto out_err;
697
698	if (debug > 1)
699		printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
700			   dev->name, dev->irq);
701
702	if((i=alloc_ringdesc(dev)))
703		goto out_err;
704
705	spin_lock_irq(&np->lock);
706	netif_device_attach(dev);
707	init_registers(dev);
708	spin_unlock_irq(&np->lock);
709
710	netif_start_queue(dev);
711	if (debug > 2)
712		printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
713
714	/* Set the timer to check for link beat. */
715	init_timer(&np->timer);
716	np->timer.expires = jiffies + 1*HZ;
717	np->timer.data = (unsigned long)dev;
718	np->timer.function = &netdev_timer;				/* timer handler */
719	add_timer(&np->timer);
720	return 0;
721out_err:
722	netif_device_attach(dev);
723	return i;
724}
725
726#define MII_DAVICOM_DM9101	0x0181b800
727
728static int update_link(struct net_device *dev)
729{
730	struct netdev_private *np = dev->priv;
731	int duplex, fasteth, result, mii_reg;
732
733	/* BSMR */
734	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
735
736	if (mii_reg == 0xffff)
737		return np->csr6;
738	/* reread: the link status bit is sticky */
739	mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
740	if (!(mii_reg & 0x4)) {
741		if (netif_carrier_ok(dev)) {
742			if (debug)
743				printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
744					dev->name, np->phys[0]);
745			netif_carrier_off(dev);
746		}
747		return np->csr6;
748	}
749	if (!netif_carrier_ok(dev)) {
750		if (debug)
751			printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
752				dev->name, np->phys[0]);
753		netif_carrier_on(dev);
754	}
755
756	if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
757		/* If the link partner doesn't support autonegotiation
758		 * the MII detects it's abilities with the "parallel detection".
759		 * Some MIIs update the LPA register to the result of the parallel
760		 * detection, some don't.
761		 * The Davicom PHY [at least 0181b800] doesn't.
762		 * Instead bit 9 and 13 of the BMCR are updated to the result
763		 * of the negotiation..
764		 */
765		mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
766		duplex = mii_reg & BMCR_FULLDPLX;
767		fasteth = mii_reg & BMCR_SPEED100;
768	} else {
769		int negotiated;
770		mii_reg	= mdio_read(dev, np->phys[0], MII_LPA);
771		negotiated = mii_reg & np->mii_if.advertising;
772
773		duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
774		fasteth = negotiated & 0x380;
775	}
776	duplex |= np->mii_if.force_media;
777	/* remove fastether and fullduplex */
778	result = np->csr6 & ~0x20000200;
779	if (duplex)
780		result |= 0x200;
781	if (fasteth)
782		result |= 0x20000000;
783	if (result != np->csr6 && debug)
784		printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
785				 dev->name, fasteth ? 100 : 10,
786			   	duplex ? "full" : "half", np->phys[0]);
787	return result;
788}
789
790#define RXTX_TIMEOUT	2000
791static inline void update_csr6(struct net_device *dev, int new)
792{
793	struct netdev_private *np = dev->priv;
794	long ioaddr = dev->base_addr;
795	int limit = RXTX_TIMEOUT;
796
797	if (!netif_device_present(dev))
798		new = 0;
799	if (new==np->csr6)
800		return;
801	/* stop both Tx and Rx processes */
802	writel(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
803	/* wait until they have really stopped */
804	for (;;) {
805		int csr5 = readl(ioaddr + IntrStatus);
806		int t;
807
808		t = (csr5 >> 17) & 0x07;
809		if (t==0||t==1) {
810			/* rx stopped */
811			t = (csr5 >> 20) & 0x07;
812			if (t==0||t==1)
813				break;
814		}
815
816		limit--;
817		if(!limit) {
818			printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
819					dev->name, csr5);
820			break;
821		}
822		udelay(1);
823	}
824	np->csr6 = new;
825	/* and restart them with the new configuration */
826	writel(np->csr6, ioaddr + NetworkConfig);
827	if (new & 0x200)
828		np->mii_if.full_duplex = 1;
829}
830
831static void netdev_timer(unsigned long data)
832{
833	struct net_device *dev = (struct net_device *)data;
834	struct netdev_private *np = dev->priv;
835	long ioaddr = dev->base_addr;
836
837	if (debug > 2)
838		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
839			   "config %8.8x.\n",
840			   dev->name, (int)readl(ioaddr + IntrStatus),
841			   (int)readl(ioaddr + NetworkConfig));
842	spin_lock_irq(&np->lock);
843	update_csr6(dev, update_link(dev));
844	spin_unlock_irq(&np->lock);
845	np->timer.expires = jiffies + 10*HZ;
846	add_timer(&np->timer);
847}
848
849static void init_rxtx_rings(struct net_device *dev)
850{
851	struct netdev_private *np = dev->priv;
852	int i;
853
854	np->rx_head_desc = &np->rx_ring[0];
855	np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
856
857	/* Initial all Rx descriptors. */
858	for (i = 0; i < RX_RING_SIZE; i++) {
859		np->rx_ring[i].length = np->rx_buf_sz;
860		np->rx_ring[i].status = 0;
861		np->rx_skbuff[i] = 0;
862	}
863	/* Mark the last entry as wrapping the ring. */
864	np->rx_ring[i-1].length |= DescEndRing;
865
866	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
867	for (i = 0; i < RX_RING_SIZE; i++) {
868		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
869		np->rx_skbuff[i] = skb;
870		if (skb == NULL)
871			break;
872		skb->dev = dev;			/* Mark as being used by this device. */
873		np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
874					skb->len,PCI_DMA_FROMDEVICE);
875
876		np->rx_ring[i].buffer1 = np->rx_addr[i];
877		np->rx_ring[i].status = DescOwn;
878	}
879
880	np->cur_rx = 0;
881	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
882
883	/* Initialize the Tx descriptors */
884	for (i = 0; i < TX_RING_SIZE; i++) {
885		np->tx_skbuff[i] = 0;
886		np->tx_ring[i].status = 0;
887	}
888	np->tx_full = 0;
889	np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
890
891	writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
892	writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
893		dev->base_addr + TxRingPtr);
894
895}
896
897static void free_rxtx_rings(struct netdev_private* np)
898{
899	int i;
900	/* Free all the skbuffs in the Rx queue. */
901	for (i = 0; i < RX_RING_SIZE; i++) {
902		np->rx_ring[i].status = 0;
903		if (np->rx_skbuff[i]) {
904			pci_unmap_single(np->pci_dev,
905						np->rx_addr[i],
906						np->rx_skbuff[i]->len,
907						PCI_DMA_FROMDEVICE);
908			dev_kfree_skb(np->rx_skbuff[i]);
909		}
910		np->rx_skbuff[i] = 0;
911	}
912	for (i = 0; i < TX_RING_SIZE; i++) {
913		if (np->tx_skbuff[i]) {
914			pci_unmap_single(np->pci_dev,
915						np->tx_addr[i],
916						np->tx_skbuff[i]->len,
917						PCI_DMA_TODEVICE);
918			dev_kfree_skb(np->tx_skbuff[i]);
919		}
920		np->tx_skbuff[i] = 0;
921	}
922}
923
924static void init_registers(struct net_device *dev)
925{
926	struct netdev_private *np = dev->priv;
927	long ioaddr = dev->base_addr;
928	int i;
929
930	for (i = 0; i < 6; i++)
931		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
932
933	/* Initialize other registers. */
934#ifdef __BIG_ENDIAN
935	i = (1<<20);	/* Big-endian descriptors */
936#else
937	i = 0;
938#endif
939	i |= (0x04<<2);		/* skip length 4 u32 */
940	i |= 0x02;		/* give Rx priority */
941
942	/* Configure the PCI bus bursts and FIFO thresholds.
943	   486: Set 8 longword cache alignment, 8 longword burst.
944	   586: Set 16 longword cache alignment, no burst limit.
945	   Cache alignment bits 15:14	     Burst length 13:8
946		0000	<not allowed> 		0000 align to cache	0800 8 longwords
947		4000	8  longwords		0100 1 longword		1000 16 longwords
948		8000	16 longwords		0200 2 longwords	2000 32 longwords
949		C000	32  longwords		0400 4 longwords */
950
951#if defined(__i386__) && !defined(MODULE)
952	/* When not a module we can work around broken '486 PCI boards. */
953	if (boot_cpu_data.x86 <= 4) {
954		i |= 0x4800;
955		printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
956			   "alignment to 8 longwords.\n", dev->name);
957	} else {
958		i |= 0xE000;
959	}
960#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
961	i |= 0xE000;
962#elif defined(__sparc__)
963	i |= 0x4800;
964#else
965#warning Processor architecture undefined
966	i |= 0x4800;
967#endif
968	writel(i, ioaddr + PCIBusCfg);
969
970	np->csr6 = 0;
971	/* 128 byte Tx threshold;
972		Transmit on; Receive on; */
973	update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
974
975	/* Clear and Enable interrupts by setting the interrupt mask. */
976	writel(0x1A0F5, ioaddr + IntrStatus);
977	writel(0x1A0F5, ioaddr + IntrEnable);
978
979	writel(0, ioaddr + RxStartDemand);
980}
981
982static void tx_timeout(struct net_device *dev)
983{
984	struct netdev_private *np = dev->priv;
985	long ioaddr = dev->base_addr;
986
987	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
988		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
989
990	{
991		int i;
992		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
993		for (i = 0; i < RX_RING_SIZE; i++)
994			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
995		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
996		for (i = 0; i < TX_RING_SIZE; i++)
997			printk(" %8.8x", np->tx_ring[i].status);
998		printk("\n");
999	}
1000	printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
1001				np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
1002	printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));
1003
1004	disable_irq(dev->irq);
1005	spin_lock_irq(&np->lock);
1006	/*
1007	 * Under high load dirty_tx and the internal tx descriptor pointer
1008	 * come out of sync, thus perform a software reset and reinitialize
1009	 * everything.
1010	 */
1011
1012	writel(1, dev->base_addr+PCIBusCfg);
1013	udelay(1);
1014
1015	free_rxtx_rings(np);
1016	init_rxtx_rings(dev);
1017	init_registers(dev);
1018	spin_unlock_irq(&np->lock);
1019	enable_irq(dev->irq);
1020
1021	netif_wake_queue(dev);
1022	dev->trans_start = jiffies;
1023	np->stats.tx_errors++;
1024	return;
1025}
1026
1027/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1028static int alloc_ringdesc(struct net_device *dev)
1029{
1030	struct netdev_private *np = dev->priv;
1031
1032	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1033
1034	np->rx_ring = pci_alloc_consistent(np->pci_dev,
1035			sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1036			sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1037			&np->ring_dma_addr);
1038	if(!np->rx_ring)
1039		return -ENOMEM;
1040	init_rxtx_rings(dev);
1041	return 0;
1042}
1043
1044static void free_ringdesc(struct netdev_private *np)
1045{
1046	pci_free_consistent(np->pci_dev,
1047			sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1048			sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1049			np->rx_ring, np->ring_dma_addr);
1050
1051}
1052
1053static int start_tx(struct sk_buff *skb, struct net_device *dev)
1054{
1055	struct netdev_private *np = dev->priv;
1056	unsigned entry;
1057
1058	/* Caution: the write order is important here, set the field
1059	   with the "ownership" bits last. */
1060
1061	/* Calculate the next Tx descriptor entry. */
1062	entry = np->cur_tx % TX_RING_SIZE;
1063
1064	np->tx_addr[entry] = pci_map_single(np->pci_dev,
1065				skb->data,skb->len, PCI_DMA_TODEVICE);
1066	np->tx_skbuff[entry] = skb;
1067
1068	np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1069	if (skb->len < TX_BUFLIMIT) {
1070		np->tx_ring[entry].length = DescWholePkt | skb->len;
1071	} else {
1072		int len = skb->len - TX_BUFLIMIT;
1073
1074		np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1075		np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1076	}
1077	if(entry == TX_RING_SIZE-1)
1078		np->tx_ring[entry].length |= DescEndRing;
1079
1080	/* Now acquire the irq spinlock.
1081	 * The difficult race is the ordering between
1082	 * increasing np->cur_tx and setting DescOwn:
1083	 * - if np->cur_tx is increased first the interrupt
1084	 *   handler could consider the packet as transmitted
1085	 *   since DescOwn is cleared.
1086	 * - If DescOwn is set first the NIC could report the
1087	 *   packet as sent, but the interrupt handler would ignore it
1088	 *   since the np->cur_tx was not yet increased.
1089	 */
1090	spin_lock_irq(&np->lock);
1091	np->cur_tx++;
1092
1093	wmb(); /* flush length, buffer1, buffer2 */
1094	np->tx_ring[entry].status = DescOwn;
1095	wmb(); /* flush status and kick the hardware */
1096	writel(0, dev->base_addr + TxStartDemand);
1097	np->tx_q_bytes += skb->len;
1098	/* Work around horrible bug in the chip by marking the queue as full
1099	   when we do not have FIFO room for a maximum sized packet. */
1100	if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1101		((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1102		netif_stop_queue(dev);
1103		wmb();
1104		np->tx_full = 1;
1105	}
1106	spin_unlock_irq(&np->lock);
1107
1108	dev->trans_start = jiffies;
1109
1110	if (debug > 4) {
1111		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1112			   dev->name, np->cur_tx, entry);
1113	}
1114	return 0;
1115}
1116
1117static void netdev_tx_done(struct net_device *dev)
1118{
1119	struct netdev_private *np = dev->priv;
1120	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1121		int entry = np->dirty_tx % TX_RING_SIZE;
1122		int tx_status = np->tx_ring[entry].status;
1123
1124		if (tx_status < 0)
1125			break;
1126		if (tx_status & 0x8000) { 	/* There was an error, log it. */
1127#ifndef final_version
1128			if (debug > 1)
1129				printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1130					   dev->name, tx_status);
1131#endif
1132			np->stats.tx_errors++;
1133			if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1134			if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1135			if (tx_status & 0x0200) np->stats.tx_window_errors++;
1136			if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1137			if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1138				np->stats.tx_heartbeat_errors++;
1139		} else {
1140#ifndef final_version
1141			if (debug > 3)
1142				printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1143					   dev->name, entry, tx_status);
1144#endif
1145			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1146			np->stats.collisions += (tx_status >> 3) & 15;
1147			np->stats.tx_packets++;
1148		}
1149		/* Free the original skb. */
1150		pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1151					np->tx_skbuff[entry]->len,
1152					PCI_DMA_TODEVICE);
1153		np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1154		dev_kfree_skb_irq(np->tx_skbuff[entry]);
1155		np->tx_skbuff[entry] = 0;
1156	}
1157	if (np->tx_full &&
1158		np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1159		np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1160		/* The ring is no longer full, clear tbusy. */
1161		np->tx_full = 0;
1162		wmb();
1163		netif_wake_queue(dev);
1164	}
1165}
1166
1167/* The interrupt handler does all of the Rx thread work and cleans up
1168   after the Tx thread. */
1169static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1170{
1171	struct net_device *dev = (struct net_device *)dev_instance;
1172	struct netdev_private *np = dev->priv;
1173	long ioaddr = dev->base_addr;
1174	int work_limit = max_interrupt_work;
1175
1176	if (!netif_device_present(dev))
1177		return;
1178	do {
1179		u32 intr_status = readl(ioaddr + IntrStatus);
1180
1181		/* Acknowledge all of the current interrupt sources ASAP. */
1182		writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
1183
1184		if (debug > 4)
1185			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1186				   dev->name, intr_status);
1187
1188		if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1189			break;
1190
1191		if (intr_status & (IntrRxDone | RxNoBuf))
1192			netdev_rx(dev);
1193		if (intr_status & RxNoBuf)
1194			writel(0, ioaddr + RxStartDemand);
1195
1196		if (intr_status & (TxIdle | IntrTxDone) &&
1197			np->cur_tx != np->dirty_tx) {
1198			spin_lock(&np->lock);
1199			netdev_tx_done(dev);
1200			spin_unlock(&np->lock);
1201		}
1202
1203		/* Abnormal error summary/uncommon events handlers. */
1204		if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
1205						   TimerInt | IntrTxStopped))
1206			netdev_error(dev, intr_status);
1207
1208		if (--work_limit < 0) {
1209			printk(KERN_WARNING "%s: Too much work at interrupt, "
1210				   "status=0x%4.4x.\n", dev->name, intr_status);
1211			/* Set the timer to re-enable the other interrupts after
1212			   10*82usec ticks. */
1213			spin_lock(&np->lock);
1214			if (netif_device_present(dev)) {
1215				writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1216				writel(10, ioaddr + GPTimer);
1217			}
1218			spin_unlock(&np->lock);
1219			break;
1220		}
1221	} while (1);
1222
1223	if (debug > 3)
1224		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1225			   dev->name, (int)readl(ioaddr + IntrStatus));
1226}
1227
1228/* This routine is logically part of the interrupt handler, but separated
1229   for clarity and better register allocation. */
1230static int netdev_rx(struct net_device *dev)
1231{
1232	struct netdev_private *np = dev->priv;
1233	int entry = np->cur_rx % RX_RING_SIZE;
1234	int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1235
1236	if (debug > 4) {
1237		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1238			   entry, np->rx_ring[entry].status);
1239	}
1240
1241	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1242	while (--work_limit >= 0) {
1243		struct w840_rx_desc *desc = np->rx_head_desc;
1244		s32 status = desc->status;
1245
1246		if (debug > 4)
1247			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1248				   status);
1249		if (status < 0)
1250			break;
1251		if ((status & 0x38008300) != 0x0300) {
1252			if ((status & 0x38000300) != 0x0300) {
1253				/* Ingore earlier buffers. */
1254				if ((status & 0xffff) != 0x7fff) {
1255					printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1256						   "multiple buffers, entry %#x status %4.4x!\n",
1257						   dev->name, np->cur_rx, status);
1258					np->stats.rx_length_errors++;
1259				}
1260			} else if (status & 0x8000) {
1261				/* There was a fatal error. */
1262				if (debug > 2)
1263					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1264						   dev->name, status);
1265				np->stats.rx_errors++; /* end of a packet.*/
1266				if (status & 0x0890) np->stats.rx_length_errors++;
1267				if (status & 0x004C) np->stats.rx_frame_errors++;
1268				if (status & 0x0002) np->stats.rx_crc_errors++;
1269			}
1270		} else {
1271			struct sk_buff *skb;
1272			/* Omit the four octet CRC from the length. */
1273			int pkt_len = ((status >> 16) & 0x7ff) - 4;
1274
1275#ifndef final_version
1276			if (debug > 4)
1277				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1278					   " status %x.\n", pkt_len, status);
1279#endif
1280			/* Check if the packet is long enough to accept without copying
1281			   to a minimally-sized skbuff. */
1282			if (pkt_len < rx_copybreak
1283				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1284				skb->dev = dev;
1285				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1286				pci_dma_sync_single(np->pci_dev,np->rx_addr[entry],
1287							np->rx_skbuff[entry]->len,
1288							PCI_DMA_FROMDEVICE);
1289				/* Call copy + cksum if available. */
1290#if HAS_IP_COPYSUM
1291				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1292				skb_put(skb, pkt_len);
1293#else
1294				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1295					   pkt_len);
1296#endif
1297			} else {
1298				pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1299							np->rx_skbuff[entry]->len,
1300							PCI_DMA_FROMDEVICE);
1301				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1302				np->rx_skbuff[entry] = NULL;
1303			}
1304#ifndef final_version				    /* Remove after testing. */
1305			/* You will want this info for the initial debug. */
1306			if (debug > 5)
1307				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1308					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1309					   "%d.%d.%d.%d.\n",
1310					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1311					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1312					   skb->data[8], skb->data[9], skb->data[10],
1313					   skb->data[11], skb->data[12], skb->data[13],
1314					   skb->data[14], skb->data[15], skb->data[16],
1315					   skb->data[17]);
1316#endif
1317			skb->protocol = eth_type_trans(skb, dev);
1318			netif_rx(skb);
1319			dev->last_rx = jiffies;
1320			np->stats.rx_packets++;
1321			np->stats.rx_bytes += pkt_len;
1322		}
1323		entry = (++np->cur_rx) % RX_RING_SIZE;
1324		np->rx_head_desc = &np->rx_ring[entry];
1325	}
1326
1327	/* Refill the Rx ring buffers. */
1328	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1329		struct sk_buff *skb;
1330		entry = np->dirty_rx % RX_RING_SIZE;
1331		if (np->rx_skbuff[entry] == NULL) {
1332			skb = dev_alloc_skb(np->rx_buf_sz);
1333			np->rx_skbuff[entry] = skb;
1334			if (skb == NULL)
1335				break;			/* Better luck next round. */
1336			skb->dev = dev;			/* Mark as being used by this device. */
1337			np->rx_addr[entry] = pci_map_single(np->pci_dev,
1338							skb->tail,
1339							skb->len, PCI_DMA_FROMDEVICE);
1340			np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1341		}
1342		wmb();
1343		np->rx_ring[entry].status = DescOwn;
1344	}
1345
1346	return 0;
1347}
1348
1349static void netdev_error(struct net_device *dev, int intr_status)
1350{
1351	long ioaddr = dev->base_addr;
1352	struct netdev_private *np = dev->priv;
1353
1354	if (debug > 2)
1355		printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1356			   dev->name, intr_status);
1357	if (intr_status == 0xffffffff)
1358		return;
1359	spin_lock(&np->lock);
1360	if (intr_status & TxFIFOUnderflow) {
1361		int new;
1362		/* Bump up the Tx threshold */
1363		new = (np->csr6 >> 14)&0x7f;
1364		if (new < 64)
1365			new *= 2;
1366		 else
1367		 	new = 127; /* load full packet before starting */
1368		new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1369		printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1370			   dev->name, new);
1371		update_csr6(dev, new);
1372	}
1373	if (intr_status & IntrRxDied) {		/* Missed a Rx frame. */
1374		np->stats.rx_errors++;
1375	}
1376	if (intr_status & TimerInt) {
1377		/* Re-enable other interrupts. */
1378		if (netif_device_present(dev))
1379			writel(0x1A0F5, ioaddr + IntrEnable);
1380	}
1381	np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1382	writel(0, ioaddr + RxStartDemand);
1383	spin_unlock(&np->lock);
1384}
1385
1386static struct net_device_stats *get_stats(struct net_device *dev)
1387{
1388	long ioaddr = dev->base_addr;
1389	struct netdev_private *np = dev->priv;
1390
1391	/* The chip only need report frame silently dropped. */
1392	spin_lock_irq(&np->lock);
1393	if (netif_running(dev) && netif_device_present(dev))
1394		np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1395	spin_unlock_irq(&np->lock);
1396
1397	return &np->stats;
1398}
1399
1400
1401static u32 __set_rx_mode(struct net_device *dev)
1402{
1403	long ioaddr = dev->base_addr;
1404	u32 mc_filter[2];			/* Multicast hash filter */
1405	u32 rx_mode;
1406
1407	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1408		/* Unconditionally log net taps. */
1409		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1410		memset(mc_filter, 0xff, sizeof(mc_filter));
1411		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
1412			| AcceptMyPhys;
1413	} else if ((dev->mc_count > multicast_filter_limit)
1414			   ||  (dev->flags & IFF_ALLMULTI)) {
1415		/* Too many to match, or accept all multicasts. */
1416		memset(mc_filter, 0xff, sizeof(mc_filter));
1417		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1418	} else {
1419		struct dev_mc_list *mclist;
1420		int i;
1421		memset(mc_filter, 0, sizeof(mc_filter));
1422		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1423			 i++, mclist = mclist->next) {
1424			set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F,
1425					mc_filter);
1426		}
1427		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1428	}
1429	writel(mc_filter[0], ioaddr + MulticastFilter0);
1430	writel(mc_filter[1], ioaddr + MulticastFilter1);
1431	return rx_mode;
1432}
1433
1434static void set_rx_mode(struct net_device *dev)
1435{
1436	struct netdev_private *np = dev->priv;
1437	u32 rx_mode = __set_rx_mode(dev);
1438	spin_lock_irq(&np->lock);
1439	update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1440	spin_unlock_irq(&np->lock);
1441}
1442
1443static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1444{
1445	struct netdev_private *np = dev->priv;
1446	u32 ethcmd;
1447
1448	if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1449		return -EFAULT;
1450
1451        switch (ethcmd) {
1452        case ETHTOOL_GDRVINFO: {
1453		struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1454		strcpy(info.driver, DRV_NAME);
1455		strcpy(info.version, DRV_VERSION);
1456		strcpy(info.bus_info, np->pci_dev->slot_name);
1457		if (copy_to_user(useraddr, &info, sizeof(info)))
1458			return -EFAULT;
1459		return 0;
1460	}
1461
1462	/* get settings */
1463	case ETHTOOL_GSET: {
1464		struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1465		spin_lock_irq(&np->lock);
1466		mii_ethtool_gset(&np->mii_if, &ecmd);
1467		spin_unlock_irq(&np->lock);
1468		if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1469			return -EFAULT;
1470		return 0;
1471	}
1472	/* set settings */
1473	case ETHTOOL_SSET: {
1474		int r;
1475		struct ethtool_cmd ecmd;
1476		if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1477			return -EFAULT;
1478		spin_lock_irq(&np->lock);
1479		r = mii_ethtool_sset(&np->mii_if, &ecmd);
1480		spin_unlock_irq(&np->lock);
1481		return r;
1482	}
1483	/* restart autonegotiation */
1484	case ETHTOOL_NWAY_RST: {
1485		return mii_nway_restart(&np->mii_if);
1486	}
1487	/* get link status */
1488	case ETHTOOL_GLINK: {
1489		struct ethtool_value edata = {ETHTOOL_GLINK};
1490		edata.data = mii_link_ok(&np->mii_if);
1491		if (copy_to_user(useraddr, &edata, sizeof(edata)))
1492			return -EFAULT;
1493		return 0;
1494	}
1495
1496	/* get message-level */
1497	case ETHTOOL_GMSGLVL: {
1498		struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1499		edata.data = debug;
1500		if (copy_to_user(useraddr, &edata, sizeof(edata)))
1501			return -EFAULT;
1502		return 0;
1503	}
1504	/* set message-level */
1505	case ETHTOOL_SMSGLVL: {
1506		struct ethtool_value edata;
1507		if (copy_from_user(&edata, useraddr, sizeof(edata)))
1508			return -EFAULT;
1509		debug = edata.data;
1510		return 0;
1511	}
1512        }
1513
1514	return -EOPNOTSUPP;
1515}
1516
1517static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1518{
1519	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1520	struct netdev_private *np = dev->priv;
1521
1522	switch(cmd) {
1523	case SIOCETHTOOL:
1524		return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1525	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1526	case SIOCDEVPRIVATE:		/* for binary compat, remove in 2.5 */
1527		data->phy_id = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1528		/* Fall Through */
1529
1530	case SIOCGMIIREG:		/* Read MII PHY register. */
1531	case SIOCDEVPRIVATE+1:		/* for binary compat, remove in 2.5 */
1532		spin_lock_irq(&np->lock);
1533		data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1534		spin_unlock_irq(&np->lock);
1535		return 0;
1536
1537	case SIOCSMIIREG:		/* Write MII PHY register. */
1538	case SIOCDEVPRIVATE+2:		/* for binary compat, remove in 2.5 */
1539		if (!capable(CAP_NET_ADMIN))
1540			return -EPERM;
1541		spin_lock_irq(&np->lock);
1542		mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1543		spin_unlock_irq(&np->lock);
1544		return 0;
1545	default:
1546		return -EOPNOTSUPP;
1547	}
1548}
1549
1550static int netdev_close(struct net_device *dev)
1551{
1552	long ioaddr = dev->base_addr;
1553	struct netdev_private *np = dev->priv;
1554
1555	netif_stop_queue(dev);
1556
1557	if (debug > 1) {
1558		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1559			   "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
1560			   (int)readl(ioaddr + NetworkConfig));
1561		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1562			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1563	}
1564
1565 	/* Stop the chip's Tx and Rx processes. */
1566	spin_lock_irq(&np->lock);
1567	netif_device_detach(dev);
1568	update_csr6(dev, 0);
1569	writel(0x0000, ioaddr + IntrEnable);
1570	spin_unlock_irq(&np->lock);
1571
1572	free_irq(dev->irq, dev);
1573	wmb();
1574	netif_device_attach(dev);
1575
1576	if (readl(ioaddr + NetworkConfig) != 0xffffffff)
1577		np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1578
1579#ifdef __i386__
1580	if (debug > 2) {
1581		int i;
1582
1583		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
1584			   (int)np->tx_ring);
1585		for (i = 0; i < TX_RING_SIZE; i++)
1586			printk(" #%d desc. %4.4x %4.4x %8.8x.\n",
1587				   i, np->tx_ring[i].length,
1588				   np->tx_ring[i].status, np->tx_ring[i].buffer1);
1589		printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
1590			   (int)np->rx_ring);
1591		for (i = 0; i < RX_RING_SIZE; i++) {
1592			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1593				   i, np->rx_ring[i].length,
1594				   np->rx_ring[i].status, np->rx_ring[i].buffer1);
1595		}
1596	}
1597#endif /* __i386__ debugging only */
1598
1599	del_timer_sync(&np->timer);
1600
1601	free_rxtx_rings(np);
1602	free_ringdesc(np);
1603
1604	return 0;
1605}
1606
1607static void __devexit w840_remove1 (struct pci_dev *pdev)
1608{
1609	struct net_device *dev = pci_get_drvdata(pdev);
1610
1611	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1612	if (dev) {
1613		unregister_netdev(dev);
1614		pci_release_regions(pdev);
1615#ifndef USE_IO_OPS
1616		iounmap((char *)(dev->base_addr));
1617#endif
1618		kfree(dev);
1619	}
1620
1621	pci_set_drvdata(pdev, NULL);
1622}
1623
1624#ifdef CONFIG_PM
1625
1626/*
1627 * suspend/resume synchronization:
1628 * - open, close, do_ioctl:
1629 * 	rtnl_lock, & netif_device_detach after the rtnl_unlock.
1630 * - get_stats:
1631 * 	spin_lock_irq(np->lock), doesn't touch hw if not present
1632 * - hard_start_xmit:
1633 * 	netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
1634 * - tx_timeout:
1635 * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1636 * - set_multicast_list
1637 * 	netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1638 * - interrupt handler
1639 * 	doesn't touch hw if not present, synchronize_irq waits for
1640 * 	running instances of the interrupt handler.
1641 *
1642 * Disabling hw requires clearing csr6 & IntrEnable.
1643 * update_csr6 & all function that write IntrEnable check netif_device_present
1644 * before settings any bits.
1645 *
1646 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1647 * device would cause an irq storm.
1648 */
1649static int w840_suspend (struct pci_dev *pdev, u32 state)
1650{
1651	struct net_device *dev = pci_get_drvdata (pdev);
1652	struct netdev_private *np = dev->priv;
1653	long ioaddr = dev->base_addr;
1654
1655	rtnl_lock();
1656	if (netif_running (dev)) {
1657		del_timer_sync(&np->timer);
1658
1659		spin_lock_irq(&np->lock);
1660		netif_device_detach(dev);
1661		update_csr6(dev, 0);
1662		writel(0, ioaddr + IntrEnable);
1663		netif_stop_queue(dev);
1664		spin_unlock_irq(&np->lock);
1665
1666		spin_unlock_wait(&dev->xmit_lock);
1667		synchronize_irq();
1668
1669		np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1670
1671		/* no more hardware accesses behind this line. */
1672
1673		if (np->csr6) BUG();
1674		if (readl(ioaddr + IntrEnable)) BUG();
1675
1676		/* pci_power_off(pdev, -1); */
1677
1678		free_rxtx_rings(np);
1679	} else {
1680		netif_device_detach(dev);
1681	}
1682	rtnl_unlock();
1683	return 0;
1684}
1685
1686
1687static int w840_resume (struct pci_dev *pdev)
1688{
1689	struct net_device *dev = pci_get_drvdata (pdev);
1690	struct netdev_private *np = dev->priv;
1691
1692	rtnl_lock();
1693	if (netif_device_present(dev))
1694		goto out; /* device not suspended */
1695	if (netif_running(dev)) {
1696		pci_enable_device(pdev);
1697	/*	pci_power_on(pdev); */
1698
1699		spin_lock_irq(&np->lock);
1700		writel(1, dev->base_addr+PCIBusCfg);
1701		readl(dev->base_addr+PCIBusCfg);
1702		udelay(1);
1703		netif_device_attach(dev);
1704		init_rxtx_rings(dev);
1705		init_registers(dev);
1706		spin_unlock_irq(&np->lock);
1707
1708		netif_wake_queue(dev);
1709
1710		np->timer.expires = jiffies + 1*HZ;
1711		add_timer(&np->timer);
1712	} else {
1713		netif_device_attach(dev);
1714	}
1715out:
1716	rtnl_unlock();
1717	return 0;
1718}
1719#endif
1720
1721static struct pci_driver w840_driver = {
1722	name:		DRV_NAME,
1723	id_table:	w840_pci_tbl,
1724	probe:		w840_probe1,
1725	remove:		__devexit_p(w840_remove1),
1726#ifdef CONFIG_PM
1727	suspend:	w840_suspend,
1728	resume:		w840_resume,
1729#endif
1730};
1731
1732static int __init w840_init(void)
1733{
1734/* when a module, this is printed whether or not devices are found in probe */
1735#ifdef MODULE
1736	printk(version);
1737#endif
1738	return pci_module_init(&w840_driver);
1739}
1740
1741static void __exit w840_exit(void)
1742{
1743	pci_unregister_driver(&w840_driver);
1744}
1745
1746module_init(w840_init);
1747module_exit(w840_exit);
1748