1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3	Written 1998-2001 by Donald Becker.
4
5	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7	This software may be used and distributed according to the terms of
8	the GNU General Public License (GPL), incorporated herein by reference.
9	Drivers based on or derived from this code fall under the GPL and must
10	retain the authorship, copyright and license notice.  This file is not
11	a complete program and may only be used when the entire operating
12	system is licensed under the GPL.
13
14	This driver is designed for the VIA VT86C100A Rhine-I.
15	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16	and management NIC 6105M).
17
18	The author may be reached as becker@scyld.com, or C/O
19	Scyld Computing Corporation
20	410 Severn Ave., Suite 210
21	Annapolis MD 21403
22
23
24	This driver contains some changes from the original Donald Becker
25	version. He may or may not be interested in bug reports on this
26	code. You can find his versions at:
27	http://www.scyld.com/network/via-rhine.html
28	[link no longer provides useful info -jgarzik]
29
30*/
31
32#define DRV_NAME	"via-rhine"
33#define DRV_VERSION	"1.4.3"
34#define DRV_RELDATE	"2007-03-06"
35
36
37/* A few user-configurable values.
38   These may be modified when a driver module is loaded. */
39
40static int debug = 1;	/* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44   Setting to > 1518 effectively disables this feature. */
45static int rx_copybreak;
46
47static int avoid_D3;
48
49/*
50 * In case you are looking for 'options[]' or 'full_duplex[]', they
51 * are gone. Use ethtool(8) instead.
52 */
53
54/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
55   The Rhine has a 64 element 8390-like hash table. */
56static const int multicast_filter_limit = 32;
57
58
59/* Operational parameters that are set at compile time. */
60
61/* Keep the ring sizes a power of two for compile efficiency.
62   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
63   Making the Tx ring too large decreases the effectiveness of channel
64   bonding and packet priority.
65   There are no ill effects from too-large receive rings. */
66#define TX_RING_SIZE	16
67#define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
68#ifdef CONFIG_VIA_RHINE_NAPI
69#define RX_RING_SIZE	64
70#else
71#define RX_RING_SIZE	16
72#endif
73
74
75/* Operational parameters that usually are not changed. */
76
77/* Time in jiffies before concluding the transmitter is hung. */
78#define TX_TIMEOUT	(2*HZ)
79
80#define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
81
82#include <linux/module.h>
83#include <linux/moduleparam.h>
84#include <linux/kernel.h>
85#include <linux/string.h>
86#include <linux/timer.h>
87#include <linux/errno.h>
88#include <linux/ioport.h>
89#include <linux/slab.h>
90#include <linux/interrupt.h>
91#include <linux/pci.h>
92#include <linux/dma-mapping.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
95#include <linux/skbuff.h>
96#include <linux/init.h>
97#include <linux/delay.h>
98#include <linux/mii.h>
99#include <linux/ethtool.h>
100#include <linux/crc32.h>
101#include <linux/bitops.h>
102#include <asm/processor.h>	/* Processor type for cache alignment. */
103#include <asm/io.h>
104#include <asm/irq.h>
105#include <asm/uaccess.h>
106#include <linux/dmi.h>
107
108/* These identify the driver base version and may not be removed. */
109static char version[] __devinitdata =
110KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
111
112/* This driver was written to use PCI memory space. Some early versions
113   of the Rhine may only work correctly with I/O space accesses. */
114#ifdef CONFIG_VIA_RHINE_MMIO
115#define USE_MMIO
116#else
117#endif
118
119MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
120MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
121MODULE_LICENSE("GPL");
122
123module_param(max_interrupt_work, int, 0);
124module_param(debug, int, 0);
125module_param(rx_copybreak, int, 0);
126module_param(avoid_D3, bool, 0);
127MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
128MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
129MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
130MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
131
132/*
133		Theory of Operation
134
135I. Board Compatibility
136
137This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
138controller.
139
140II. Board-specific settings
141
142Boards with this chip are functional only in a bus-master PCI slot.
143
144Many operational settings are loaded from the EEPROM to the Config word at
145offset 0x78. For most of these settings, this driver assumes that they are
146correct.
147If this driver is compiled to use PCI memory space operations the EEPROM
148must be configured to enable memory ops.
149
150III. Driver operation
151
152IIIa. Ring buffers
153
154This driver uses two statically allocated fixed-size descriptor lists
155formed into rings by a branch from the final descriptor to the beginning of
156the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
157
158IIIb/c. Transmit/Receive Structure
159
160This driver attempts to use a zero-copy receive and transmit scheme.
161
162Alas, all data buffers are required to start on a 32 bit boundary, so
163the driver must often copy transmit packets into bounce buffers.
164
165The driver allocates full frame size skbuffs for the Rx ring buffers at
166open() time and passes the skb->data field to the chip as receive data
167buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
168a fresh skbuff is allocated and the frame is copied to the new skbuff.
169When the incoming frame is larger, the skbuff is passed directly up the
170protocol stack. Buffers consumed this way are replaced by newly allocated
171skbuffs in the last phase of rhine_rx().
172
173The RX_COPYBREAK value is chosen to trade-off the memory wasted by
174using a full-sized skbuff for small frames vs. the copying costs of larger
175frames. New boards are typically used in generously configured machines
176and the underfilled buffers have negligible impact compared to the benefit of
177a single allocation size, so the default value of zero results in never
178copying packets. When copying is done, the cost is usually mitigated by using
179a combined copy/checksum routine. Copying also preloads the cache, which is
180most useful with small frames.
181
182Since the VIA chips are only able to transfer data to buffers on 32 bit
183boundaries, the IP header at offset 14 in an ethernet frame isn't
184longword aligned for further processing. Copying these unaligned buffers
185has the beneficial effect of 16-byte aligning the IP header.
186
187IIId. Synchronization
188
189The driver runs as two independent, single-threaded flows of control. One
190is the send-packet routine, which enforces single-threaded use by the
191dev->priv->lock spinlock. The other thread is the interrupt handler, which
192is single threaded by the hardware and interrupt handling software.
193
194The send packet thread has partial control over the Tx ring. It locks the
195dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
196is not available it stops the transmit queue by calling netif_stop_queue.
197
198The interrupt handler has exclusive control over the Rx ring and records stats
199from the Tx ring. After reaping the stats, it marks the Tx queue entry as
200empty by incrementing the dirty_tx mark. If at least half of the entries in
201the Rx ring are available the transmit queue is woken up if it was stopped.
202
203IV. Notes
204
205IVb. References
206
207Preliminary VT86C100A manual from http://www.via.com.tw/
208http://www.scyld.com/expert/100mbps.html
209http://www.scyld.com/expert/NWay.html
210ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
211ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
212
213
214IVc. Errata
215
216The VT86C100A manual is not reliable information.
217The 3043 chip does not handle unaligned transmit or receive buffers, resulting
218in significant performance degradation for bounce buffer copies on transmit
219and unaligned IP headers on receive.
220The chip does not pad to minimum transmit length.
221
222*/
223
224
225/* This table drives the PCI probe routines. It's mostly boilerplate in all
226   of the drivers, and will likely be provided by some future kernel.
227   Note the matching code -- the first table entry matchs all 56** cards but
228   second only the 1234 card.
229*/
230
231enum rhine_revs {
232	VT86C100A	= 0x00,
233	VTunknown0	= 0x20,
234	VT6102		= 0x40,
235	VT8231		= 0x50,	/* Integrated MAC */
236	VT8233		= 0x60,	/* Integrated MAC */
237	VT8235		= 0x74,	/* Integrated MAC */
238	VT8237		= 0x78,	/* Integrated MAC */
239	VTunknown1	= 0x7C,
240	VT6105		= 0x80,
241	VT6105_B0	= 0x83,
242	VT6105L		= 0x8A,
243	VT6107		= 0x8C,
244	VTunknown2	= 0x8E,
245	VT6105M		= 0x90,	/* Management adapter */
246};
247
248enum rhine_quirks {
249	rqWOL		= 0x0001,	/* Wake-On-LAN support */
250	rqForceReset	= 0x0002,
251	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
252	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
253	rqRhineI	= 0x0100,	/* See comment below */
254};
255/*
256 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
257 * MMIO as well as for the collision counter and the Tx FIFO underflow
258 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
259 */
260
261/* Beware of PCI posted writes */
262#define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
263
264static const struct pci_device_id rhine_pci_tbl[] = {
265	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
266	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
267	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
268	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
269	{ }	/* terminate list */
270};
271MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
272
273
274/* Offsets to the device registers. */
275enum register_offsets {
276	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
277	ChipCmd1=0x09,
278	IntrStatus=0x0C, IntrEnable=0x0E,
279	MulticastFilter0=0x10, MulticastFilter1=0x14,
280	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
281	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
282	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
283	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
284	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
285	StickyHW=0x83, IntrStatus2=0x84,
286	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
287	WOLcrClr1=0xA6, WOLcgClr=0xA7,
288	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
289};
290
291/* Bits in ConfigD */
292enum backoff_bits {
293	BackOptional=0x01, BackModify=0x02,
294	BackCaptureEffect=0x04, BackRandom=0x08
295};
296
297#ifdef USE_MMIO
298/* Registers we check that mmio and reg are the same. */
299static const int mmio_verify_registers[] = {
300	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
301	0
302};
303#endif
304
305/* Bits in the interrupt status/mask registers. */
306enum intr_status_bits {
307	IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
308	IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
309	IntrPCIErr=0x0040,
310	IntrStatsMax=0x0080, IntrRxEarly=0x0100,
311	IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
312	IntrTxAborted=0x2000, IntrLinkChange=0x4000,
313	IntrRxWakeUp=0x8000,
314	IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
315	IntrTxDescRace=0x080000,	/* mapped from IntrStatus2 */
316	IntrTxErrSummary=0x082218,
317};
318
319/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
320enum wol_bits {
321	WOLucast	= 0x10,
322	WOLmagic	= 0x20,
323	WOLbmcast	= 0x30,
324	WOLlnkon	= 0x40,
325	WOLlnkoff	= 0x80,
326};
327
328/* The Rx and Tx buffer descriptors. */
329struct rx_desc {
330	s32 rx_status;
331	u32 desc_length; /* Chain flag, Buffer/frame length */
332	u32 addr;
333	u32 next_desc;
334};
335struct tx_desc {
336	s32 tx_status;
337	u32 desc_length; /* Chain flag, Tx Config, Frame length */
338	u32 addr;
339	u32 next_desc;
340};
341
342/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
343#define TXDESC		0x00e08000
344
345enum rx_status_bits {
346	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
347};
348
349/* Bits in *_desc.*_status */
350enum desc_status_bits {
351	DescOwn=0x80000000
352};
353
354/* Bits in ChipCmd. */
355enum chip_cmd_bits {
356	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
357	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
358	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
359	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
360};
361
362struct rhine_private {
363	/* Descriptor rings */
364	struct rx_desc *rx_ring;
365	struct tx_desc *tx_ring;
366	dma_addr_t rx_ring_dma;
367	dma_addr_t tx_ring_dma;
368
369	/* The addresses of receive-in-place skbuffs. */
370	struct sk_buff *rx_skbuff[RX_RING_SIZE];
371	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
372
373	/* The saved address of a sent-in-place packet/buffer, for later free(). */
374	struct sk_buff *tx_skbuff[TX_RING_SIZE];
375	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
376
377	/* Tx bounce buffers (Rhine-I only) */
378	unsigned char *tx_buf[TX_RING_SIZE];
379	unsigned char *tx_bufs;
380	dma_addr_t tx_bufs_dma;
381
382	struct pci_dev *pdev;
383	long pioaddr;
384	struct net_device_stats stats;
385	spinlock_t lock;
386
387	/* Frequently used values: keep some adjacent for cache effect. */
388	u32 quirks;
389	struct rx_desc *rx_head_desc;
390	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
391	unsigned int cur_tx, dirty_tx;
392	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
393	u8 wolopts;
394
395	u8 tx_thresh, rx_thresh;
396
397	struct mii_if_info mii_if;
398	void __iomem *base;
399};
400
401static int  mdio_read(struct net_device *dev, int phy_id, int location);
402static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
403static int  rhine_open(struct net_device *dev);
404static void rhine_tx_timeout(struct net_device *dev);
405static int  rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
406static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
407static void rhine_tx(struct net_device *dev);
408static int rhine_rx(struct net_device *dev, int limit);
409static void rhine_error(struct net_device *dev, int intr_status);
410static void rhine_set_rx_mode(struct net_device *dev);
411static struct net_device_stats *rhine_get_stats(struct net_device *dev);
412static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
413static const struct ethtool_ops netdev_ethtool_ops;
414static int  rhine_close(struct net_device *dev);
415static void rhine_shutdown (struct pci_dev *pdev);
416
417#define RHINE_WAIT_FOR(condition) do {					\
418	int i=1024;							\
419	while (!(condition) && --i)					\
420		;							\
421	if (debug > 1 && i < 512)					\
422		printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n",	\
423				DRV_NAME, 1024-i, __func__, __LINE__);	\
424} while(0)
425
426static inline u32 get_intr_status(struct net_device *dev)
427{
428	struct rhine_private *rp = netdev_priv(dev);
429	void __iomem *ioaddr = rp->base;
430	u32 intr_status;
431
432	intr_status = ioread16(ioaddr + IntrStatus);
433	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
434	if (rp->quirks & rqStatusWBRace)
435		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
436	return intr_status;
437}
438
439/*
440 * Get power related registers into sane state.
441 * Notify user about past WOL event.
442 */
443static void rhine_power_init(struct net_device *dev)
444{
445	struct rhine_private *rp = netdev_priv(dev);
446	void __iomem *ioaddr = rp->base;
447	u16 wolstat;
448
449	if (rp->quirks & rqWOL) {
450		/* Make sure chip is in power state D0 */
451		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
452
453		/* Disable "force PME-enable" */
454		iowrite8(0x80, ioaddr + WOLcgClr);
455
456		/* Clear power-event config bits (WOL) */
457		iowrite8(0xFF, ioaddr + WOLcrClr);
458		/* More recent cards can manage two additional patterns */
459		if (rp->quirks & rq6patterns)
460			iowrite8(0x03, ioaddr + WOLcrClr1);
461
462		/* Save power-event status bits */
463		wolstat = ioread8(ioaddr + PwrcsrSet);
464		if (rp->quirks & rq6patterns)
465			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
466
467		/* Clear power-event status bits */
468		iowrite8(0xFF, ioaddr + PwrcsrClr);
469		if (rp->quirks & rq6patterns)
470			iowrite8(0x03, ioaddr + PwrcsrClr1);
471
472		if (wolstat) {
473			char *reason;
474			switch (wolstat) {
475			case WOLmagic:
476				reason = "Magic packet";
477				break;
478			case WOLlnkon:
479				reason = "Link went up";
480				break;
481			case WOLlnkoff:
482				reason = "Link went down";
483				break;
484			case WOLucast:
485				reason = "Unicast packet";
486				break;
487			case WOLbmcast:
488				reason = "Multicast/broadcast packet";
489				break;
490			default:
491				reason = "Unknown";
492			}
493			printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
494			       DRV_NAME, reason);
495		}
496	}
497}
498
499static void rhine_chip_reset(struct net_device *dev)
500{
501	struct rhine_private *rp = netdev_priv(dev);
502	void __iomem *ioaddr = rp->base;
503
504	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
505	IOSYNC;
506
507	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
508		printk(KERN_INFO "%s: Reset not complete yet. "
509			"Trying harder.\n", DRV_NAME);
510
511		/* Force reset */
512		if (rp->quirks & rqForceReset)
513			iowrite8(0x40, ioaddr + MiscCmd);
514
515		/* Reset can take somewhat longer (rare) */
516		RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
517	}
518
519	if (debug > 1)
520		printk(KERN_INFO "%s: Reset %s.\n", dev->name,
521			(ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
522			"failed" : "succeeded");
523}
524
525#ifdef USE_MMIO
526static void enable_mmio(long pioaddr, u32 quirks)
527{
528	int n;
529	if (quirks & rqRhineI) {
530		/* More recent docs say that this bit is reserved ... */
531		n = inb(pioaddr + ConfigA) | 0x20;
532		outb(n, pioaddr + ConfigA);
533	} else {
534		n = inb(pioaddr + ConfigD) | 0x80;
535		outb(n, pioaddr + ConfigD);
536	}
537}
538#endif
539
540/*
541 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
542 * (plus 0x6C for Rhine-I/II)
543 */
544static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
545{
546	struct rhine_private *rp = netdev_priv(dev);
547	void __iomem *ioaddr = rp->base;
548
549	outb(0x20, pioaddr + MACRegEEcsr);
550	RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
551
552#ifdef USE_MMIO
553	/*
554	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
555	 * MMIO. If reloading EEPROM was done first this could be avoided, but
556	 * it is not known if that still works with the "win98-reboot" problem.
557	 */
558	enable_mmio(pioaddr, rp->quirks);
559#endif
560
561	/* Turn off EEPROM-controlled wake-up (magic packet) */
562	if (rp->quirks & rqWOL)
563		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
564
565}
566
567#ifdef CONFIG_NET_POLL_CONTROLLER
568static void rhine_poll(struct net_device *dev)
569{
570	disable_irq(dev->irq);
571	rhine_interrupt(dev->irq, (void *)dev);
572	enable_irq(dev->irq);
573}
574#endif
575
576#ifdef CONFIG_VIA_RHINE_NAPI
577static int rhine_napipoll(struct net_device *dev, int *budget)
578{
579	struct rhine_private *rp = netdev_priv(dev);
580	void __iomem *ioaddr = rp->base;
581	int done, limit = min(dev->quota, *budget);
582
583	done = rhine_rx(dev, limit);
584	*budget -= done;
585	dev->quota -= done;
586
587	if (done < limit) {
588		netif_rx_complete(dev);
589
590		iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
591			  IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
592			  IntrTxDone | IntrTxError | IntrTxUnderrun |
593			  IntrPCIErr | IntrStatsMax | IntrLinkChange,
594			  ioaddr + IntrEnable);
595		return 0;
596	}
597	else
598		return 1;
599}
600#endif
601
602static void rhine_hw_init(struct net_device *dev, long pioaddr)
603{
604	struct rhine_private *rp = netdev_priv(dev);
605
606	/* Reset the chip to erase previous misconfiguration. */
607	rhine_chip_reset(dev);
608
609	/* Rhine-I needs extra time to recuperate before EEPROM reload */
610	if (rp->quirks & rqRhineI)
611		msleep(5);
612
613	/* Reload EEPROM controlled bytes cleared by soft reset */
614	rhine_reload_eeprom(pioaddr, dev);
615}
616
617static int __devinit rhine_init_one(struct pci_dev *pdev,
618				    const struct pci_device_id *ent)
619{
620	struct net_device *dev;
621	struct rhine_private *rp;
622	int i, rc;
623	u8 pci_rev;
624	u32 quirks;
625	long pioaddr;
626	long memaddr;
627	void __iomem *ioaddr;
628	int io_size, phy_id;
629	const char *name;
630#ifdef USE_MMIO
631	int bar = 1;
632#else
633	int bar = 0;
634#endif
635
636/* when built into the kernel, we only print version if device is found */
637#ifndef MODULE
638	static int printed_version;
639	if (!printed_version++)
640		printk(version);
641#endif
642
643	pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
644
645	io_size = 256;
646	phy_id = 0;
647	quirks = 0;
648	name = "Rhine";
649	if (pci_rev < VTunknown0) {
650		quirks = rqRhineI;
651		io_size = 128;
652	}
653	else if (pci_rev >= VT6102) {
654		quirks = rqWOL | rqForceReset;
655		if (pci_rev < VT6105) {
656			name = "Rhine II";
657			quirks |= rqStatusWBRace;	/* Rhine-II exclusive */
658		}
659		else {
660			phy_id = 1;	/* Integrated PHY, phy_id fixed to 1 */
661			if (pci_rev >= VT6105_B0)
662				quirks |= rq6patterns;
663			if (pci_rev < VT6105M)
664				name = "Rhine III";
665			else
666				name = "Rhine III (Management Adapter)";
667		}
668	}
669
670	rc = pci_enable_device(pdev);
671	if (rc)
672		goto err_out;
673
674	/* this should always be supported */
675	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
676	if (rc) {
677		printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
678		       "the card!?\n");
679		goto err_out;
680	}
681
682	/* sanity check */
683	if ((pci_resource_len(pdev, 0) < io_size) ||
684	    (pci_resource_len(pdev, 1) < io_size)) {
685		rc = -EIO;
686		printk(KERN_ERR "Insufficient PCI resources, aborting\n");
687		goto err_out;
688	}
689
690	pioaddr = pci_resource_start(pdev, 0);
691	memaddr = pci_resource_start(pdev, 1);
692
693	pci_set_master(pdev);
694
695	dev = alloc_etherdev(sizeof(struct rhine_private));
696	if (!dev) {
697		rc = -ENOMEM;
698		printk(KERN_ERR "alloc_etherdev failed\n");
699		goto err_out;
700	}
701	SET_MODULE_OWNER(dev);
702	SET_NETDEV_DEV(dev, &pdev->dev);
703
704	rp = netdev_priv(dev);
705	rp->quirks = quirks;
706	rp->pioaddr = pioaddr;
707	rp->pdev = pdev;
708
709	rc = pci_request_regions(pdev, DRV_NAME);
710	if (rc)
711		goto err_out_free_netdev;
712
713	ioaddr = pci_iomap(pdev, bar, io_size);
714	if (!ioaddr) {
715		rc = -EIO;
716		printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
717		       "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
718		goto err_out_free_res;
719	}
720
721#ifdef USE_MMIO
722	enable_mmio(pioaddr, quirks);
723
724	/* Check that selected MMIO registers match the PIO ones */
725	i = 0;
726	while (mmio_verify_registers[i]) {
727		int reg = mmio_verify_registers[i++];
728		unsigned char a = inb(pioaddr+reg);
729		unsigned char b = readb(ioaddr+reg);
730		if (a != b) {
731			rc = -EIO;
732			printk(KERN_ERR "MMIO do not match PIO [%02x] "
733			       "(%02x != %02x)\n", reg, a, b);
734			goto err_out_unmap;
735		}
736	}
737#endif /* USE_MMIO */
738
739	dev->base_addr = (unsigned long)ioaddr;
740	rp->base = ioaddr;
741
742	/* Get chip registers into a sane state */
743	rhine_power_init(dev);
744	rhine_hw_init(dev, pioaddr);
745
746	for (i = 0; i < 6; i++)
747		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
748	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
749
750	if (!is_valid_ether_addr(dev->perm_addr)) {
751		rc = -EIO;
752		printk(KERN_ERR "Invalid MAC address\n");
753		goto err_out_unmap;
754	}
755
756	/* For Rhine-I/II, phy_id is loaded from EEPROM */
757	if (!phy_id)
758		phy_id = ioread8(ioaddr + 0x6C);
759
760	dev->irq = pdev->irq;
761
762	spin_lock_init(&rp->lock);
763	rp->mii_if.dev = dev;
764	rp->mii_if.mdio_read = mdio_read;
765	rp->mii_if.mdio_write = mdio_write;
766	rp->mii_if.phy_id_mask = 0x1f;
767	rp->mii_if.reg_num_mask = 0x1f;
768
769	/* The chip-specific entries in the device structure. */
770	dev->open = rhine_open;
771	dev->hard_start_xmit = rhine_start_tx;
772	dev->stop = rhine_close;
773	dev->get_stats = rhine_get_stats;
774	dev->set_multicast_list = rhine_set_rx_mode;
775	dev->do_ioctl = netdev_ioctl;
776	dev->ethtool_ops = &netdev_ethtool_ops;
777	dev->tx_timeout = rhine_tx_timeout;
778	dev->watchdog_timeo = TX_TIMEOUT;
779#ifdef CONFIG_NET_POLL_CONTROLLER
780	dev->poll_controller = rhine_poll;
781#endif
782#ifdef CONFIG_VIA_RHINE_NAPI
783	dev->poll = rhine_napipoll;
784	dev->weight = 64;
785#endif
786	if (rp->quirks & rqRhineI)
787		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
788
789	/* dev->name not defined before register_netdev()! */
790	rc = register_netdev(dev);
791	if (rc)
792		goto err_out_unmap;
793
794	printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
795	       dev->name, name,
796#ifdef USE_MMIO
797		memaddr
798#else
799		(long)ioaddr
800#endif
801		 );
802
803	for (i = 0; i < 5; i++)
804		printk("%2.2x:", dev->dev_addr[i]);
805	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
806
807	pci_set_drvdata(pdev, dev);
808
809	{
810		u16 mii_cmd;
811		int mii_status = mdio_read(dev, phy_id, 1);
812		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
813		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
814		if (mii_status != 0xffff && mii_status != 0x0000) {
815			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
816			printk(KERN_INFO "%s: MII PHY found at address "
817			       "%d, status 0x%4.4x advertising %4.4x "
818			       "Link %4.4x.\n", dev->name, phy_id,
819			       mii_status, rp->mii_if.advertising,
820			       mdio_read(dev, phy_id, 5));
821
822			/* set IFF_RUNNING */
823			if (mii_status & BMSR_LSTATUS)
824				netif_carrier_on(dev);
825			else
826				netif_carrier_off(dev);
827
828		}
829	}
830	rp->mii_if.phy_id = phy_id;
831	if (debug > 1 && avoid_D3)
832		printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
833		       dev->name);
834
835	return 0;
836
837err_out_unmap:
838	pci_iounmap(pdev, ioaddr);
839err_out_free_res:
840	pci_release_regions(pdev);
841err_out_free_netdev:
842	free_netdev(dev);
843err_out:
844	return rc;
845}
846
847static int alloc_ring(struct net_device* dev)
848{
849	struct rhine_private *rp = netdev_priv(dev);
850	void *ring;
851	dma_addr_t ring_dma;
852
853	ring = pci_alloc_consistent(rp->pdev,
854				    RX_RING_SIZE * sizeof(struct rx_desc) +
855				    TX_RING_SIZE * sizeof(struct tx_desc),
856				    &ring_dma);
857	if (!ring) {
858		printk(KERN_ERR "Could not allocate DMA memory.\n");
859		return -ENOMEM;
860	}
861	if (rp->quirks & rqRhineI) {
862		rp->tx_bufs = pci_alloc_consistent(rp->pdev,
863						   PKT_BUF_SZ * TX_RING_SIZE,
864						   &rp->tx_bufs_dma);
865		if (rp->tx_bufs == NULL) {
866			pci_free_consistent(rp->pdev,
867				    RX_RING_SIZE * sizeof(struct rx_desc) +
868				    TX_RING_SIZE * sizeof(struct tx_desc),
869				    ring, ring_dma);
870			return -ENOMEM;
871		}
872	}
873
874	rp->rx_ring = ring;
875	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
876	rp->rx_ring_dma = ring_dma;
877	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
878
879	return 0;
880}
881
882static void free_ring(struct net_device* dev)
883{
884	struct rhine_private *rp = netdev_priv(dev);
885
886	pci_free_consistent(rp->pdev,
887			    RX_RING_SIZE * sizeof(struct rx_desc) +
888			    TX_RING_SIZE * sizeof(struct tx_desc),
889			    rp->rx_ring, rp->rx_ring_dma);
890	rp->tx_ring = NULL;
891
892	if (rp->tx_bufs)
893		pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
894				    rp->tx_bufs, rp->tx_bufs_dma);
895
896	rp->tx_bufs = NULL;
897
898}
899
900static void alloc_rbufs(struct net_device *dev)
901{
902	struct rhine_private *rp = netdev_priv(dev);
903	dma_addr_t next;
904	int i;
905
906	rp->dirty_rx = rp->cur_rx = 0;
907
908	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
909	rp->rx_head_desc = &rp->rx_ring[0];
910	next = rp->rx_ring_dma;
911
912	/* Init the ring entries */
913	for (i = 0; i < RX_RING_SIZE; i++) {
914		rp->rx_ring[i].rx_status = 0;
915		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
916		next += sizeof(struct rx_desc);
917		rp->rx_ring[i].next_desc = cpu_to_le32(next);
918		rp->rx_skbuff[i] = NULL;
919	}
920	/* Mark the last entry as wrapping the ring. */
921	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
922
923	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
924	for (i = 0; i < RX_RING_SIZE; i++) {
925		struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
926		rp->rx_skbuff[i] = skb;
927		if (skb == NULL)
928			break;
929		skb->dev = dev;                 /* Mark as being used by this device. */
930
931		rp->rx_skbuff_dma[i] =
932			pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
933				       PCI_DMA_FROMDEVICE);
934
935		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
936		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
937	}
938	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
939}
940
941static void free_rbufs(struct net_device* dev)
942{
943	struct rhine_private *rp = netdev_priv(dev);
944	int i;
945
946	/* Free all the skbuffs in the Rx queue. */
947	for (i = 0; i < RX_RING_SIZE; i++) {
948		rp->rx_ring[i].rx_status = 0;
949		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
950		if (rp->rx_skbuff[i]) {
951			pci_unmap_single(rp->pdev,
952					 rp->rx_skbuff_dma[i],
953					 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
954			dev_kfree_skb(rp->rx_skbuff[i]);
955		}
956		rp->rx_skbuff[i] = NULL;
957	}
958}
959
960static void alloc_tbufs(struct net_device* dev)
961{
962	struct rhine_private *rp = netdev_priv(dev);
963	dma_addr_t next;
964	int i;
965
966	rp->dirty_tx = rp->cur_tx = 0;
967	next = rp->tx_ring_dma;
968	for (i = 0; i < TX_RING_SIZE; i++) {
969		rp->tx_skbuff[i] = NULL;
970		rp->tx_ring[i].tx_status = 0;
971		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
972		next += sizeof(struct tx_desc);
973		rp->tx_ring[i].next_desc = cpu_to_le32(next);
974		if (rp->quirks & rqRhineI)
975			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
976	}
977	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
978
979}
980
981static void free_tbufs(struct net_device* dev)
982{
983	struct rhine_private *rp = netdev_priv(dev);
984	int i;
985
986	for (i = 0; i < TX_RING_SIZE; i++) {
987		rp->tx_ring[i].tx_status = 0;
988		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
989		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
990		if (rp->tx_skbuff[i]) {
991			if (rp->tx_skbuff_dma[i]) {
992				pci_unmap_single(rp->pdev,
993						 rp->tx_skbuff_dma[i],
994						 rp->tx_skbuff[i]->len,
995						 PCI_DMA_TODEVICE);
996			}
997			dev_kfree_skb(rp->tx_skbuff[i]);
998		}
999		rp->tx_skbuff[i] = NULL;
1000		rp->tx_buf[i] = NULL;
1001	}
1002}
1003
1004static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1005{
1006	struct rhine_private *rp = netdev_priv(dev);
1007	void __iomem *ioaddr = rp->base;
1008
1009	mii_check_media(&rp->mii_if, debug, init_media);
1010
1011	if (rp->mii_if.full_duplex)
1012	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1013		   ioaddr + ChipCmd1);
1014	else
1015	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1016		   ioaddr + ChipCmd1);
1017	if (debug > 1)
1018		printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1019			rp->mii_if.force_media, netif_carrier_ok(dev));
1020}
1021
1022/* Called after status of force_media possibly changed */
1023static void rhine_set_carrier(struct mii_if_info *mii)
1024{
1025	if (mii->force_media) {
1026		/* autoneg is off: Link is always assumed to be up */
1027		if (!netif_carrier_ok(mii->dev))
1028			netif_carrier_on(mii->dev);
1029	}
1030	else	/* Let MMI library update carrier status */
1031		rhine_check_media(mii->dev, 0);
1032	if (debug > 1)
1033		printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1034		       mii->dev->name, mii->force_media,
1035		       netif_carrier_ok(mii->dev));
1036}
1037
1038static void init_registers(struct net_device *dev)
1039{
1040	struct rhine_private *rp = netdev_priv(dev);
1041	void __iomem *ioaddr = rp->base;
1042	int i;
1043
1044	for (i = 0; i < 6; i++)
1045		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1046
1047	/* Initialize other registers. */
1048	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1049	/* Configure initial FIFO thresholds. */
1050	iowrite8(0x20, ioaddr + TxConfig);
1051	rp->tx_thresh = 0x20;
1052	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1053
1054	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1055	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1056
1057	rhine_set_rx_mode(dev);
1058
1059	netif_poll_enable(dev);
1060
1061	/* Enable interrupts by setting the interrupt mask. */
1062	iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1063	       IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1064	       IntrTxDone | IntrTxError | IntrTxUnderrun |
1065	       IntrPCIErr | IntrStatsMax | IntrLinkChange,
1066	       ioaddr + IntrEnable);
1067
1068	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1069	       ioaddr + ChipCmd);
1070	rhine_check_media(dev, 1);
1071}
1072
1073/* Enable MII link status auto-polling (required for IntrLinkChange) */
1074static void rhine_enable_linkmon(void __iomem *ioaddr)
1075{
1076	iowrite8(0, ioaddr + MIICmd);
1077	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1078	iowrite8(0x80, ioaddr + MIICmd);
1079
1080	RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1081
1082	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1083}
1084
1085/* Disable MII link status auto-polling (required for MDIO access) */
1086static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1087{
1088	iowrite8(0, ioaddr + MIICmd);
1089
1090	if (quirks & rqRhineI) {
1091		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1092
1093		/* Can be called from ISR. Evil. */
1094		mdelay(1);
1095
1096		/* 0x80 must be set immediately before turning it off */
1097		iowrite8(0x80, ioaddr + MIICmd);
1098
1099		RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1100
1101		/* Heh. Now clear 0x80 again. */
1102		iowrite8(0, ioaddr + MIICmd);
1103	}
1104	else
1105		RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1106}
1107
1108/* Read and write over the MII Management Data I/O (MDIO) interface. */
1109
1110static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1111{
1112	struct rhine_private *rp = netdev_priv(dev);
1113	void __iomem *ioaddr = rp->base;
1114	int result;
1115
1116	rhine_disable_linkmon(ioaddr, rp->quirks);
1117
1118	/* rhine_disable_linkmon already cleared MIICmd */
1119	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1120	iowrite8(regnum, ioaddr + MIIRegAddr);
1121	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1122	RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1123	result = ioread16(ioaddr + MIIData);
1124
1125	rhine_enable_linkmon(ioaddr);
1126	return result;
1127}
1128
1129static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1130{
1131	struct rhine_private *rp = netdev_priv(dev);
1132	void __iomem *ioaddr = rp->base;
1133
1134	rhine_disable_linkmon(ioaddr, rp->quirks);
1135
1136	/* rhine_disable_linkmon already cleared MIICmd */
1137	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1138	iowrite8(regnum, ioaddr + MIIRegAddr);
1139	iowrite16(value, ioaddr + MIIData);
1140	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1141	RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1142
1143	rhine_enable_linkmon(ioaddr);
1144}
1145
1146static int rhine_open(struct net_device *dev)
1147{
1148	struct rhine_private *rp = netdev_priv(dev);
1149	void __iomem *ioaddr = rp->base;
1150	int rc;
1151
1152	rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1153			dev);
1154	if (rc)
1155		return rc;
1156
1157	if (debug > 1)
1158		printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1159		       dev->name, rp->pdev->irq);
1160
1161	rc = alloc_ring(dev);
1162	if (rc) {
1163		free_irq(rp->pdev->irq, dev);
1164		return rc;
1165	}
1166	alloc_rbufs(dev);
1167	alloc_tbufs(dev);
1168	rhine_chip_reset(dev);
1169	init_registers(dev);
1170	if (debug > 2)
1171		printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1172		       "MII status: %4.4x.\n",
1173		       dev->name, ioread16(ioaddr + ChipCmd),
1174		       mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1175
1176	netif_start_queue(dev);
1177
1178	return 0;
1179}
1180
1181static void rhine_tx_timeout(struct net_device *dev)
1182{
1183	struct rhine_private *rp = netdev_priv(dev);
1184	void __iomem *ioaddr = rp->base;
1185
1186	printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1187	       "%4.4x, resetting...\n",
1188	       dev->name, ioread16(ioaddr + IntrStatus),
1189	       mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1190
1191	/* protect against concurrent rx interrupts */
1192	disable_irq(rp->pdev->irq);
1193
1194	spin_lock(&rp->lock);
1195
1196	/* clear all descriptors */
1197	free_tbufs(dev);
1198	free_rbufs(dev);
1199	alloc_tbufs(dev);
1200	alloc_rbufs(dev);
1201
1202	/* Reinitialize the hardware. */
1203	rhine_chip_reset(dev);
1204	init_registers(dev);
1205
1206	spin_unlock(&rp->lock);
1207	enable_irq(rp->pdev->irq);
1208
1209	dev->trans_start = jiffies;
1210	rp->stats.tx_errors++;
1211	netif_wake_queue(dev);
1212}
1213
1214static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1215{
1216	struct rhine_private *rp = netdev_priv(dev);
1217	void __iomem *ioaddr = rp->base;
1218	unsigned entry;
1219
1220	/* Caution: the write order is important here, set the field
1221	   with the "ownership" bits last. */
1222
1223	/* Calculate the next Tx descriptor entry. */
1224	entry = rp->cur_tx % TX_RING_SIZE;
1225
1226	if (skb_padto(skb, ETH_ZLEN))
1227		return 0;
1228
1229	rp->tx_skbuff[entry] = skb;
1230
1231	if ((rp->quirks & rqRhineI) &&
1232	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1233		/* Must use alignment buffer. */
1234		if (skb->len > PKT_BUF_SZ) {
1235			/* packet too long, drop it */
1236			dev_kfree_skb(skb);
1237			rp->tx_skbuff[entry] = NULL;
1238			rp->stats.tx_dropped++;
1239			return 0;
1240		}
1241
1242		/* Padding is not copied and so must be redone. */
1243		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1244		if (skb->len < ETH_ZLEN)
1245			memset(rp->tx_buf[entry] + skb->len, 0,
1246			       ETH_ZLEN - skb->len);
1247		rp->tx_skbuff_dma[entry] = 0;
1248		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1249						      (rp->tx_buf[entry] -
1250						       rp->tx_bufs));
1251	} else {
1252		rp->tx_skbuff_dma[entry] =
1253			pci_map_single(rp->pdev, skb->data, skb->len,
1254				       PCI_DMA_TODEVICE);
1255		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1256	}
1257
1258	rp->tx_ring[entry].desc_length =
1259		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1260
1261	/* lock eth irq */
1262	spin_lock_irq(&rp->lock);
1263	wmb();
1264	rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1265	wmb();
1266
1267	rp->cur_tx++;
1268
1269	/* Non-x86 Todo: explicitly flush cache lines here. */
1270
1271	/* Wake the potentially-idle transmit channel */
1272	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1273	       ioaddr + ChipCmd1);
1274	IOSYNC;
1275
1276	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1277		netif_stop_queue(dev);
1278
1279	dev->trans_start = jiffies;
1280
1281	spin_unlock_irq(&rp->lock);
1282
1283	if (debug > 4) {
1284		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1285		       dev->name, rp->cur_tx-1, entry);
1286	}
1287	return 0;
1288}
1289
1290/* The interrupt handler does all of the Rx thread work and cleans up
1291   after the Tx thread. */
1292static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1293{
1294	struct net_device *dev = dev_instance;
1295	struct rhine_private *rp = netdev_priv(dev);
1296	void __iomem *ioaddr = rp->base;
1297	u32 intr_status;
1298	int boguscnt = max_interrupt_work;
1299	int handled = 0;
1300
1301	while ((intr_status = get_intr_status(dev))) {
1302		handled = 1;
1303
1304		/* Acknowledge all of the current interrupt sources ASAP. */
1305		if (intr_status & IntrTxDescRace)
1306			iowrite8(0x08, ioaddr + IntrStatus2);
1307		iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1308		IOSYNC;
1309
1310		if (debug > 4)
1311			printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1312			       dev->name, intr_status);
1313
1314		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1315				   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1316#ifdef CONFIG_VIA_RHINE_NAPI
1317			iowrite16(IntrTxAborted |
1318				  IntrTxDone | IntrTxError | IntrTxUnderrun |
1319				  IntrPCIErr | IntrStatsMax | IntrLinkChange,
1320				  ioaddr + IntrEnable);
1321
1322			netif_rx_schedule(dev);
1323#else
1324			rhine_rx(dev, RX_RING_SIZE);
1325#endif
1326		}
1327
1328		if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1329			if (intr_status & IntrTxErrSummary) {
1330				/* Avoid scavenging before Tx engine turned off */
1331				RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1332				if (debug > 2 &&
1333				    ioread8(ioaddr+ChipCmd) & CmdTxOn)
1334					printk(KERN_WARNING "%s: "
1335					       "rhine_interrupt() Tx engine"
1336					       "still on.\n", dev->name);
1337			}
1338			rhine_tx(dev);
1339		}
1340
1341		/* Abnormal error summary/uncommon events handlers. */
1342		if (intr_status & (IntrPCIErr | IntrLinkChange |
1343				   IntrStatsMax | IntrTxError | IntrTxAborted |
1344				   IntrTxUnderrun | IntrTxDescRace))
1345			rhine_error(dev, intr_status);
1346
1347		if (--boguscnt < 0) {
1348			printk(KERN_WARNING "%s: Too much work at interrupt, "
1349			       "status=%#8.8x.\n",
1350			       dev->name, intr_status);
1351			break;
1352		}
1353	}
1354
1355	if (debug > 3)
1356		printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1357		       dev->name, ioread16(ioaddr + IntrStatus));
1358	return IRQ_RETVAL(handled);
1359}
1360
1361/* This routine is logically part of the interrupt handler, but isolated
1362   for clarity. */
1363static void rhine_tx(struct net_device *dev)
1364{
1365	struct rhine_private *rp = netdev_priv(dev);
1366	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1367
1368	spin_lock(&rp->lock);
1369
1370	/* find and cleanup dirty tx descriptors */
1371	while (rp->dirty_tx != rp->cur_tx) {
1372		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1373		if (debug > 6)
1374			printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1375			       entry, txstatus);
1376		if (txstatus & DescOwn)
1377			break;
1378		if (txstatus & 0x8000) {
1379			if (debug > 1)
1380				printk(KERN_DEBUG "%s: Transmit error, "
1381				       "Tx status %8.8x.\n",
1382				       dev->name, txstatus);
1383			rp->stats.tx_errors++;
1384			if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1385			if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1386			if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1387			if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1388			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1389			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1390				rp->stats.tx_fifo_errors++;
1391				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1392				break; /* Keep the skb - we try again */
1393			}
1394			/* Transmitter restarted in 'abnormal' handler. */
1395		} else {
1396			if (rp->quirks & rqRhineI)
1397				rp->stats.collisions += (txstatus >> 3) & 0x0F;
1398			else
1399				rp->stats.collisions += txstatus & 0x0F;
1400			if (debug > 6)
1401				printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1402				       (txstatus >> 3) & 0xF,
1403				       txstatus & 0xF);
1404			rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1405			rp->stats.tx_packets++;
1406		}
1407		/* Free the original skb. */
1408		if (rp->tx_skbuff_dma[entry]) {
1409			pci_unmap_single(rp->pdev,
1410					 rp->tx_skbuff_dma[entry],
1411					 rp->tx_skbuff[entry]->len,
1412					 PCI_DMA_TODEVICE);
1413		}
1414		dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1415		rp->tx_skbuff[entry] = NULL;
1416		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1417	}
1418	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1419		netif_wake_queue(dev);
1420
1421	spin_unlock(&rp->lock);
1422}
1423
1424/* Process up to limit frames from receive ring */
1425static int rhine_rx(struct net_device *dev, int limit)
1426{
1427	struct rhine_private *rp = netdev_priv(dev);
1428	int count;
1429	int entry = rp->cur_rx % RX_RING_SIZE;
1430
1431	if (debug > 4) {
1432		printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1433		       dev->name, entry,
1434		       le32_to_cpu(rp->rx_head_desc->rx_status));
1435	}
1436
1437	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1438	for (count = 0; count < limit; ++count) {
1439		struct rx_desc *desc = rp->rx_head_desc;
1440		u32 desc_status = le32_to_cpu(desc->rx_status);
1441		int data_size = desc_status >> 16;
1442
1443		if (desc_status & DescOwn)
1444			break;
1445
1446		if (debug > 4)
1447			printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1448			       desc_status);
1449
1450		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1451			if ((desc_status & RxWholePkt) != RxWholePkt) {
1452				printk(KERN_WARNING "%s: Oversized Ethernet "
1453				       "frame spanned multiple buffers, entry "
1454				       "%#x length %d status %8.8x!\n",
1455				       dev->name, entry, data_size,
1456				       desc_status);
1457				printk(KERN_WARNING "%s: Oversized Ethernet "
1458				       "frame %p vs %p.\n", dev->name,
1459				       rp->rx_head_desc, &rp->rx_ring[entry]);
1460				rp->stats.rx_length_errors++;
1461			} else if (desc_status & RxErr) {
1462				/* There was a error. */
1463				if (debug > 2)
1464					printk(KERN_DEBUG "rhine_rx() Rx "
1465					       "error was %8.8x.\n",
1466					       desc_status);
1467				rp->stats.rx_errors++;
1468				if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1469				if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1470				if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1471				if (desc_status & 0x0002) {
1472					/* this can also be updated outside the interrupt handler */
1473					spin_lock(&rp->lock);
1474					rp->stats.rx_crc_errors++;
1475					spin_unlock(&rp->lock);
1476				}
1477			}
1478		} else {
1479			struct sk_buff *skb;
1480			/* Length should omit the CRC */
1481			int pkt_len = data_size - 4;
1482
1483			/* Check if the packet is long enough to accept without
1484			   copying to a minimally-sized skbuff. */
1485			if (pkt_len < rx_copybreak &&
1486				(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1487				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1488				pci_dma_sync_single_for_cpu(rp->pdev,
1489							    rp->rx_skbuff_dma[entry],
1490							    rp->rx_buf_sz,
1491							    PCI_DMA_FROMDEVICE);
1492
1493				eth_copy_and_sum(skb,
1494						 rp->rx_skbuff[entry]->data,
1495						 pkt_len, 0);
1496				skb_put(skb, pkt_len);
1497				pci_dma_sync_single_for_device(rp->pdev,
1498							       rp->rx_skbuff_dma[entry],
1499							       rp->rx_buf_sz,
1500							       PCI_DMA_FROMDEVICE);
1501			} else {
1502				skb = rp->rx_skbuff[entry];
1503				if (skb == NULL) {
1504					printk(KERN_ERR "%s: Inconsistent Rx "
1505					       "descriptor chain.\n",
1506					       dev->name);
1507					break;
1508				}
1509				rp->rx_skbuff[entry] = NULL;
1510				skb_put(skb, pkt_len);
1511				pci_unmap_single(rp->pdev,
1512						 rp->rx_skbuff_dma[entry],
1513						 rp->rx_buf_sz,
1514						 PCI_DMA_FROMDEVICE);
1515			}
1516			skb->protocol = eth_type_trans(skb, dev);
1517#ifdef CONFIG_VIA_RHINE_NAPI
1518			netif_receive_skb(skb);
1519#else
1520			netif_rx(skb);
1521#endif
1522			dev->last_rx = jiffies;
1523			rp->stats.rx_bytes += pkt_len;
1524			rp->stats.rx_packets++;
1525		}
1526		entry = (++rp->cur_rx) % RX_RING_SIZE;
1527		rp->rx_head_desc = &rp->rx_ring[entry];
1528	}
1529
1530	/* Refill the Rx ring buffers. */
1531	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1532		struct sk_buff *skb;
1533		entry = rp->dirty_rx % RX_RING_SIZE;
1534		if (rp->rx_skbuff[entry] == NULL) {
1535			skb = dev_alloc_skb(rp->rx_buf_sz);
1536			rp->rx_skbuff[entry] = skb;
1537			if (skb == NULL)
1538				break;	/* Better luck next round. */
1539			skb->dev = dev;	/* Mark as being used by this device. */
1540			rp->rx_skbuff_dma[entry] =
1541				pci_map_single(rp->pdev, skb->data,
1542					       rp->rx_buf_sz,
1543					       PCI_DMA_FROMDEVICE);
1544			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1545		}
1546		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1547	}
1548
1549	return count;
1550}
1551
1552/*
1553 * Clears the "tally counters" for CRC errors and missed frames(?).
1554 * It has been reported that some chips need a write of 0 to clear
1555 * these, for others the counters are set to 1 when written to and
1556 * instead cleared when read. So we clear them both ways ...
1557 */
1558static inline void clear_tally_counters(void __iomem *ioaddr)
1559{
1560	iowrite32(0, ioaddr + RxMissed);
1561	ioread16(ioaddr + RxCRCErrs);
1562	ioread16(ioaddr + RxMissed);
1563}
1564
1565static void rhine_restart_tx(struct net_device *dev) {
1566	struct rhine_private *rp = netdev_priv(dev);
1567	void __iomem *ioaddr = rp->base;
1568	int entry = rp->dirty_tx % TX_RING_SIZE;
1569	u32 intr_status;
1570
1571	/*
1572	 * If new errors occured, we need to sort them out before doing Tx.
1573	 * In that case the ISR will be back here RSN anyway.
1574	 */
1575	intr_status = get_intr_status(dev);
1576
1577	if ((intr_status & IntrTxErrSummary) == 0) {
1578
1579		/* We know better than the chip where it should continue. */
1580		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1581		       ioaddr + TxRingPtr);
1582
1583		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1584		       ioaddr + ChipCmd);
1585		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1586		       ioaddr + ChipCmd1);
1587		IOSYNC;
1588	}
1589	else {
1590		/* This should never happen */
1591		if (debug > 1)
1592			printk(KERN_WARNING "%s: rhine_restart_tx() "
1593			       "Another error occured %8.8x.\n",
1594			       dev->name, intr_status);
1595	}
1596
1597}
1598
1599static void rhine_error(struct net_device *dev, int intr_status)
1600{
1601	struct rhine_private *rp = netdev_priv(dev);
1602	void __iomem *ioaddr = rp->base;
1603
1604	spin_lock(&rp->lock);
1605
1606	if (intr_status & IntrLinkChange)
1607		rhine_check_media(dev, 0);
1608	if (intr_status & IntrStatsMax) {
1609		rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1610		rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1611		clear_tally_counters(ioaddr);
1612	}
1613	if (intr_status & IntrTxAborted) {
1614		if (debug > 1)
1615			printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1616			       dev->name, intr_status);
1617	}
1618	if (intr_status & IntrTxUnderrun) {
1619		if (rp->tx_thresh < 0xE0)
1620			iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1621		if (debug > 1)
1622			printk(KERN_INFO "%s: Transmitter underrun, Tx "
1623			       "threshold now %2.2x.\n",
1624			       dev->name, rp->tx_thresh);
1625	}
1626	if (intr_status & IntrTxDescRace) {
1627		if (debug > 2)
1628			printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1629			       dev->name);
1630	}
1631	if ((intr_status & IntrTxError) &&
1632	    (intr_status & (IntrTxAborted |
1633	     IntrTxUnderrun | IntrTxDescRace)) == 0) {
1634		if (rp->tx_thresh < 0xE0) {
1635			iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1636		}
1637		if (debug > 1)
1638			printk(KERN_INFO "%s: Unspecified error. Tx "
1639			       "threshold now %2.2x.\n",
1640			       dev->name, rp->tx_thresh);
1641	}
1642	if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1643			   IntrTxError))
1644		rhine_restart_tx(dev);
1645
1646	if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1647			    IntrTxError | IntrTxAborted | IntrNormalSummary |
1648			    IntrTxDescRace)) {
1649		if (debug > 1)
1650			printk(KERN_ERR "%s: Something Wicked happened! "
1651			       "%8.8x.\n", dev->name, intr_status);
1652	}
1653
1654	spin_unlock(&rp->lock);
1655}
1656
1657static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1658{
1659	struct rhine_private *rp = netdev_priv(dev);
1660	void __iomem *ioaddr = rp->base;
1661	unsigned long flags;
1662
1663	spin_lock_irqsave(&rp->lock, flags);
1664	rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1665	rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1666	clear_tally_counters(ioaddr);
1667	spin_unlock_irqrestore(&rp->lock, flags);
1668
1669	return &rp->stats;
1670}
1671
1672static void rhine_set_rx_mode(struct net_device *dev)
1673{
1674	struct rhine_private *rp = netdev_priv(dev);
1675	void __iomem *ioaddr = rp->base;
1676	u32 mc_filter[2];	/* Multicast hash filter */
1677	u8 rx_mode;		/* Note: 0x02=accept runt, 0x01=accept errs */
1678
1679	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
1680		rx_mode = 0x1C;
1681		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1682		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1683	} else if ((dev->mc_count > multicast_filter_limit)
1684		   || (dev->flags & IFF_ALLMULTI)) {
1685		/* Too many to match, or accept all multicasts. */
1686		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1687		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1688		rx_mode = 0x0C;
1689	} else {
1690		struct dev_mc_list *mclist;
1691		int i;
1692		memset(mc_filter, 0, sizeof(mc_filter));
1693		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1694		     i++, mclist = mclist->next) {
1695			int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1696
1697			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1698		}
1699		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1700		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1701		rx_mode = 0x0C;
1702	}
1703	iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1704}
1705
1706static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1707{
1708	struct rhine_private *rp = netdev_priv(dev);
1709
1710	strcpy(info->driver, DRV_NAME);
1711	strcpy(info->version, DRV_VERSION);
1712	strcpy(info->bus_info, pci_name(rp->pdev));
1713}
1714
1715static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1716{
1717	struct rhine_private *rp = netdev_priv(dev);
1718	int rc;
1719
1720	spin_lock_irq(&rp->lock);
1721	rc = mii_ethtool_gset(&rp->mii_if, cmd);
1722	spin_unlock_irq(&rp->lock);
1723
1724	return rc;
1725}
1726
1727static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1728{
1729	struct rhine_private *rp = netdev_priv(dev);
1730	int rc;
1731
1732	spin_lock_irq(&rp->lock);
1733	rc = mii_ethtool_sset(&rp->mii_if, cmd);
1734	spin_unlock_irq(&rp->lock);
1735	rhine_set_carrier(&rp->mii_if);
1736
1737	return rc;
1738}
1739
1740static int netdev_nway_reset(struct net_device *dev)
1741{
1742	struct rhine_private *rp = netdev_priv(dev);
1743
1744	return mii_nway_restart(&rp->mii_if);
1745}
1746
1747static u32 netdev_get_link(struct net_device *dev)
1748{
1749	struct rhine_private *rp = netdev_priv(dev);
1750
1751	return mii_link_ok(&rp->mii_if);
1752}
1753
1754static u32 netdev_get_msglevel(struct net_device *dev)
1755{
1756	return debug;
1757}
1758
1759static void netdev_set_msglevel(struct net_device *dev, u32 value)
1760{
1761	debug = value;
1762}
1763
1764static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1765{
1766	struct rhine_private *rp = netdev_priv(dev);
1767
1768	if (!(rp->quirks & rqWOL))
1769		return;
1770
1771	spin_lock_irq(&rp->lock);
1772	wol->supported = WAKE_PHY | WAKE_MAGIC |
1773			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
1774	wol->wolopts = rp->wolopts;
1775	spin_unlock_irq(&rp->lock);
1776}
1777
1778static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1779{
1780	struct rhine_private *rp = netdev_priv(dev);
1781	u32 support = WAKE_PHY | WAKE_MAGIC |
1782		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
1783
1784	if (!(rp->quirks & rqWOL))
1785		return -EINVAL;
1786
1787	if (wol->wolopts & ~support)
1788		return -EINVAL;
1789
1790	spin_lock_irq(&rp->lock);
1791	rp->wolopts = wol->wolopts;
1792	spin_unlock_irq(&rp->lock);
1793
1794	return 0;
1795}
1796
1797static const struct ethtool_ops netdev_ethtool_ops = {
1798	.get_drvinfo		= netdev_get_drvinfo,
1799	.get_settings		= netdev_get_settings,
1800	.set_settings		= netdev_set_settings,
1801	.nway_reset		= netdev_nway_reset,
1802	.get_link		= netdev_get_link,
1803	.get_msglevel		= netdev_get_msglevel,
1804	.set_msglevel		= netdev_set_msglevel,
1805	.get_wol		= rhine_get_wol,
1806	.set_wol		= rhine_set_wol,
1807	.get_sg			= ethtool_op_get_sg,
1808	.get_tx_csum		= ethtool_op_get_tx_csum,
1809	.get_perm_addr		= ethtool_op_get_perm_addr,
1810};
1811
1812static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1813{
1814	struct rhine_private *rp = netdev_priv(dev);
1815	int rc;
1816
1817	if (!netif_running(dev))
1818		return -EINVAL;
1819
1820	spin_lock_irq(&rp->lock);
1821	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1822	spin_unlock_irq(&rp->lock);
1823	rhine_set_carrier(&rp->mii_if);
1824
1825	return rc;
1826}
1827
1828static int rhine_close(struct net_device *dev)
1829{
1830	struct rhine_private *rp = netdev_priv(dev);
1831	void __iomem *ioaddr = rp->base;
1832
1833	spin_lock_irq(&rp->lock);
1834
1835	netif_stop_queue(dev);
1836	netif_poll_disable(dev);
1837
1838	if (debug > 1)
1839		printk(KERN_DEBUG "%s: Shutting down ethercard, "
1840		       "status was %4.4x.\n",
1841		       dev->name, ioread16(ioaddr + ChipCmd));
1842
1843	/* Switch to loopback mode to avoid hardware races. */
1844	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1845
1846	/* Disable interrupts by clearing the interrupt mask. */
1847	iowrite16(0x0000, ioaddr + IntrEnable);
1848
1849	/* Stop the chip's Tx and Rx processes. */
1850	iowrite16(CmdStop, ioaddr + ChipCmd);
1851
1852	spin_unlock_irq(&rp->lock);
1853
1854	free_irq(rp->pdev->irq, dev);
1855	free_rbufs(dev);
1856	free_tbufs(dev);
1857	free_ring(dev);
1858
1859	return 0;
1860}
1861
1862
1863static void __devexit rhine_remove_one(struct pci_dev *pdev)
1864{
1865	struct net_device *dev = pci_get_drvdata(pdev);
1866	struct rhine_private *rp = netdev_priv(dev);
1867
1868	unregister_netdev(dev);
1869
1870	pci_iounmap(pdev, rp->base);
1871	pci_release_regions(pdev);
1872
1873	free_netdev(dev);
1874	pci_disable_device(pdev);
1875	pci_set_drvdata(pdev, NULL);
1876}
1877
1878static void rhine_shutdown (struct pci_dev *pdev)
1879{
1880	struct net_device *dev = pci_get_drvdata(pdev);
1881	struct rhine_private *rp = netdev_priv(dev);
1882	void __iomem *ioaddr = rp->base;
1883
1884	if (!(rp->quirks & rqWOL))
1885		return; /* Nothing to do for non-WOL adapters */
1886
1887	rhine_power_init(dev);
1888
1889	/* Make sure we use pattern 0, 1 and not 4, 5 */
1890	if (rp->quirks & rq6patterns)
1891		iowrite8(0x04, ioaddr + 0xA7);
1892
1893	if (rp->wolopts & WAKE_MAGIC) {
1894		iowrite8(WOLmagic, ioaddr + WOLcrSet);
1895		/*
1896		 * Turn EEPROM-controlled wake-up back on -- some hardware may
1897		 * not cooperate otherwise.
1898		 */
1899		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1900	}
1901
1902	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1903		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1904
1905	if (rp->wolopts & WAKE_PHY)
1906		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1907
1908	if (rp->wolopts & WAKE_UCAST)
1909		iowrite8(WOLucast, ioaddr + WOLcrSet);
1910
1911	if (rp->wolopts) {
1912		/* Enable legacy WOL (for old motherboards) */
1913		iowrite8(0x01, ioaddr + PwcfgSet);
1914		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1915	}
1916
1917	/* Hit power state D3 (sleep) */
1918	if (!avoid_D3)
1919		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1920
1921	/* TODO: Check use of pci_enable_wake() */
1922
1923}
1924
1925#ifdef CONFIG_PM
1926static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1927{
1928	struct net_device *dev = pci_get_drvdata(pdev);
1929	struct rhine_private *rp = netdev_priv(dev);
1930	unsigned long flags;
1931
1932	if (!netif_running(dev))
1933		return 0;
1934
1935	netif_device_detach(dev);
1936	pci_save_state(pdev);
1937
1938	spin_lock_irqsave(&rp->lock, flags);
1939	rhine_shutdown(pdev);
1940	spin_unlock_irqrestore(&rp->lock, flags);
1941
1942	free_irq(dev->irq, dev);
1943	return 0;
1944}
1945
1946static int rhine_resume(struct pci_dev *pdev)
1947{
1948	struct net_device *dev = pci_get_drvdata(pdev);
1949	struct rhine_private *rp = netdev_priv(dev);
1950	unsigned long flags;
1951	int ret;
1952
1953	if (!netif_running(dev))
1954		return 0;
1955
1956        if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1957		printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1958
1959	ret = pci_set_power_state(pdev, PCI_D0);
1960	if (debug > 1)
1961		printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1962			dev->name, ret ? "failed" : "succeeded", ret);
1963
1964	pci_restore_state(pdev);
1965
1966	spin_lock_irqsave(&rp->lock, flags);
1967#ifdef USE_MMIO
1968	enable_mmio(rp->pioaddr, rp->quirks);
1969#endif
1970	rhine_power_init(dev);
1971	free_tbufs(dev);
1972	free_rbufs(dev);
1973	alloc_tbufs(dev);
1974	alloc_rbufs(dev);
1975	init_registers(dev);
1976	spin_unlock_irqrestore(&rp->lock, flags);
1977
1978	netif_device_attach(dev);
1979
1980	return 0;
1981}
1982#endif /* CONFIG_PM */
1983
1984static struct pci_driver rhine_driver = {
1985	.name		= DRV_NAME,
1986	.id_table	= rhine_pci_tbl,
1987	.probe		= rhine_init_one,
1988	.remove		= __devexit_p(rhine_remove_one),
1989#ifdef CONFIG_PM
1990	.suspend	= rhine_suspend,
1991	.resume		= rhine_resume,
1992#endif /* CONFIG_PM */
1993	.shutdown =	rhine_shutdown,
1994};
1995
1996static struct dmi_system_id __initdata rhine_dmi_table[] = {
1997	{
1998		.ident = "EPIA-M",
1999		.matches = {
2000			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2001			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2002		},
2003	},
2004	{
2005		.ident = "KV7",
2006		.matches = {
2007			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2008			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2009		},
2010	},
2011	{ NULL }
2012};
2013
2014static int __init rhine_init(void)
2015{
2016/* when a module, this is printed whether or not devices are found in probe */
2017#ifdef MODULE
2018	printk(version);
2019#endif
2020	if (dmi_check_system(rhine_dmi_table)) {
2021		/* these BIOSes fail at PXE boot if chip is in D3 */
2022		avoid_D3 = 1;
2023		printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2024				    "enabled.\n",
2025		       DRV_NAME);
2026	}
2027	else if (avoid_D3)
2028		printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2029
2030	return pci_register_driver(&rhine_driver);
2031}
2032
2033
2034static void __exit rhine_cleanup(void)
2035{
2036	pci_unregister_driver(&rhine_driver);
2037}
2038
2039
2040module_init(rhine_init);
2041module_exit(rhine_cleanup);
2042