1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3	Written 1998-2001 by Donald Becker.
4
5	This software may be used and distributed according to the terms of
6	the GNU General Public License (GPL), incorporated herein by reference.
7	Drivers based on or derived from this code fall under the GPL and must
8	retain the authorship, copyright and license notice.  This file is not
9	a complete program and may only be used when the entire operating
10	system is licensed under the GPL.
11
12	This driver is designed for the VIA VT86C100A Rhine-I.
13	It also works with the 6102 Rhine-II, and 6105/6105M Rhine-III.
14
15	The author may be reached as becker@scyld.com, or C/O
16	Scyld Computing Corporation
17	410 Severn Ave., Suite 210
18	Annapolis MD 21403
19
20
21	This driver contains some changes from the original Donald Becker
22	version. He may or may not be interested in bug reports on this
23	code. You can find his versions at:
24	http://www.scyld.com/network/via-rhine.html
25
26
27	Linux kernel version history:
28
29	LK1.1.0:
30	- Jeff Garzik: softnet 'n stuff
31
32	LK1.1.1:
33	- Justin Guyett: softnet and locking fixes
34	- Jeff Garzik: use PCI interface
35
36	LK1.1.2:
37	- Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
38
39	LK1.1.3:
40	- Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
41			 code) update "Theory of Operation" with
42			 softnet/locking changes
43	- Dave Miller: PCI DMA and endian fixups
44	- Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
45
46	LK1.1.4:
47	- Urban Widmark: fix gcc 2.95.2 problem and
48	                 remove writel's to fixed address 0x7c
49
50	LK1.1.5:
51	- Urban Widmark: mdio locking, bounce buffer changes
52	                 merges from Beckers 1.05 version
53	                 added netif_running_on/off support
54
55	LK1.1.6:
56	- Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
57	                 set netif_running_on/off on startup, del_timer_sync
58
59	LK1.1.7:
60	- Manfred Spraul: added reset into tx_timeout
61
62	LK1.1.9:
63	- Urban Widmark: merges from Beckers 1.10 version
64	                 (media selection + eeprom reload)
65	- David Vrabel:  merges from D-Link "1.11" version
66	                 (disable WOL and PME on startup)
67
68	LK1.1.10:
69	- Manfred Spraul: use "singlecopy" for unaligned buffers
70	                  don't allocate bounce buffers for !ReqTxAlign cards
71
72	LK1.1.11:
73	- David Woodhouse: Set dev->base_addr before the first time we call
74					   wait_for_reset(). It's a lot happier that way.
75					   Free np->tx_bufs only if we actually allocated it.
76
77	LK1.1.12:
78	- Martin Eriksson: Allow Memory-Mapped IO to be enabled.
79
80	LK1.1.13 (jgarzik):
81	- Add ethtool support
82	- Replace some MII-related magic numbers with constants
83
84	LK1.1.14 (Ivan G.):
85 	- fixes comments for Rhine-III
86	- removes W_MAX_TIMEOUT (unused)
87	- adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
88	  is R-I and has Davicom chip, flag is referenced in kernel driver)
89	- sends chip_id as a parameter to wait_for_reset since np is not
90	  initialized on first call
91	- changes mmio "else if (chip_id==VT6102)" to "else" so it will work
92	  for Rhine-III's (documentation says same bit is correct)
93	- transmit frame queue message is off by one - fixed
94	- adds IntrNormalSummary to "Something Wicked" exclusion list
95	  so normal interrupts will not trigger the message (src: Donald Becker)
96 	(Roger Luethi)
97 	- show confused chip where to continue after Tx error
98 	- location of collision counter is chip specific
99 	- allow selecting backoff algorithm (module parameter)
100
101*/
102
103#define DRV_NAME	"via-rhine"
104#define DRV_VERSION	"1.1.14"
105#define DRV_RELDATE	"May-3-2002"
106
107
108/* A few user-configurable values.
109   These may be modified when a driver module is loaded. */
110
111static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
112static int max_interrupt_work = 20;
113
114/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
115   Setting to > 1518 effectively disables this feature. */
116static int rx_copybreak;
117
118/* Select a backoff algorithm (Ethernet capture effect) */
119static int backoff;
120
121/* Used to pass the media type, etc.
122   Both 'options[]' and 'full_duplex[]' should exist for driver
123   interoperability.
124   The media type is usually passed in 'options[]'.
125   The default is autonegotation for speed and duplex.
126     This should rarely be overridden.
127   Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
128   Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
129   Use option values 0x20 and 0x200 for forcing full duplex operation.
130*/
131#define MAX_UNITS 8		/* More are supported, limit only on options */
132static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
133static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
134
135/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
136   The Rhine has a 64 element 8390-like hash table.  */
137static const int multicast_filter_limit = 32;
138
139
140/* Operational parameters that are set at compile time. */
141
142/* Keep the ring sizes a power of two for compile efficiency.
143   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
144   Making the Tx ring too large decreases the effectiveness of channel
145   bonding and packet priority.
146   There are no ill effects from too-large receive rings. */
147#define TX_RING_SIZE	16
148#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
149#define RX_RING_SIZE	16
150
151
152/* Operational parameters that usually are not changed. */
153
154/* Time in jiffies before concluding the transmitter is hung. */
155#define TX_TIMEOUT  (2*HZ)
156
157#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
158
159#if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
160#warning  You must compile this file with the correct options!
161#warning  See the last lines of the source file.
162#error  You must compile this driver with "-O".
163#endif
164
165#include <linux/module.h>
166#include <linux/kernel.h>
167#include <linux/string.h>
168#include <linux/timer.h>
169#include <linux/errno.h>
170#include <linux/ioport.h>
171#include <linux/slab.h>
172#include <linux/interrupt.h>
173#include <linux/pci.h>
174#include <linux/netdevice.h>
175#include <linux/etherdevice.h>
176#include <linux/skbuff.h>
177#include <linux/init.h>
178#include <linux/delay.h>
179#include <linux/mii.h>
180#include <linux/ethtool.h>
181#include <linux/crc32.h>
182#include <asm/processor.h>		/* Processor type for cache alignment. */
183#include <asm/bitops.h>
184#include <asm/io.h>
185#include <asm/irq.h>
186#include <asm/uaccess.h>
187
188/* These identify the driver base version and may not be removed. */
189static char version[] __devinitdata =
190KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION "  " DRV_RELDATE "  Written by Donald Becker\n"
191KERN_INFO "  http://www.scyld.com/network/via-rhine.html\n";
192
193static char shortname[] = DRV_NAME;
194
195
196/* This driver was written to use PCI memory space, however most versions
197   of the Rhine only work correctly with I/O space accesses. */
198#ifdef CONFIG_VIA_RHINE_MMIO
199#define USE_MEM
200#else
201#define USE_IO
202#undef readb
203#undef readw
204#undef readl
205#undef writeb
206#undef writew
207#undef writel
208#define readb inb
209#define readw inw
210#define readl inl
211#define writeb outb
212#define writew outw
213#define writel outl
214#endif
215
216MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
217MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
218MODULE_LICENSE("GPL");
219
220MODULE_PARM(max_interrupt_work, "i");
221MODULE_PARM(debug, "i");
222MODULE_PARM(rx_copybreak, "i");
223MODULE_PARM(backoff, "i");
224MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
225MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
226MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
227MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
228MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
229MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
230MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
231MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
232
233/*
234				Theory of Operation
235
236I. Board Compatibility
237
238This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
239controller.
240
241II. Board-specific settings
242
243Boards with this chip are functional only in a bus-master PCI slot.
244
245Many operational settings are loaded from the EEPROM to the Config word at
246offset 0x78. For most of these settings, this driver assumes that they are
247correct.
248If this driver is compiled to use PCI memory space operations the EEPROM
249must be configured to enable memory ops.
250
251III. Driver operation
252
253IIIa. Ring buffers
254
255This driver uses two statically allocated fixed-size descriptor lists
256formed into rings by a branch from the final descriptor to the beginning of
257the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
258
259IIIb/c. Transmit/Receive Structure
260
261This driver attempts to use a zero-copy receive and transmit scheme.
262
263Alas, all data buffers are required to start on a 32 bit boundary, so
264the driver must often copy transmit packets into bounce buffers.
265
266The driver allocates full frame size skbuffs for the Rx ring buffers at
267open() time and passes the skb->data field to the chip as receive data
268buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
269a fresh skbuff is allocated and the frame is copied to the new skbuff.
270When the incoming frame is larger, the skbuff is passed directly up the
271protocol stack.  Buffers consumed this way are replaced by newly allocated
272skbuffs in the last phase of via_rhine_rx().
273
274The RX_COPYBREAK value is chosen to trade-off the memory wasted by
275using a full-sized skbuff for small frames vs. the copying costs of larger
276frames.  New boards are typically used in generously configured machines
277and the underfilled buffers have negligible impact compared to the benefit of
278a single allocation size, so the default value of zero results in never
279copying packets.  When copying is done, the cost is usually mitigated by using
280a combined copy/checksum routine.  Copying also preloads the cache, which is
281most useful with small frames.
282
283Since the VIA chips are only able to transfer data to buffers on 32 bit
284boundaries, the IP header at offset 14 in an ethernet frame isn't
285longword aligned for further processing.  Copying these unaligned buffers
286has the beneficial effect of 16-byte aligning the IP header.
287
288IIId. Synchronization
289
290The driver runs as two independent, single-threaded flows of control.  One
291is the send-packet routine, which enforces single-threaded use by the
292dev->priv->lock spinlock. The other thread is the interrupt handler, which
293is single threaded by the hardware and interrupt handling software.
294
295The send packet thread has partial control over the Tx ring. It locks the
296dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
297is not available it stops the transmit queue by calling netif_stop_queue.
298
299The interrupt handler has exclusive control over the Rx ring and records stats
300from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
301empty by incrementing the dirty_tx mark. If at least half of the entries in
302the Rx ring are available the transmit queue is woken up if it was stopped.
303
304IV. Notes
305
306IVb. References
307
308Preliminary VT86C100A manual from http://www.via.com.tw/
309http://www.scyld.com/expert/100mbps.html
310http://www.scyld.com/expert/NWay.html
311ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
312ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
313
314
315IVc. Errata
316
317The VT86C100A manual is not reliable information.
318The 3043 chip does not handle unaligned transmit or receive buffers, resulting
319in significant performance degradation for bounce buffer copies on transmit
320and unaligned IP headers on receive.
321The chip does not pad to minimum transmit length.
322
323*/
324
325
326/* This table drives the PCI probe routines.  It's mostly boilerplate in all
327   of the drivers, and will likely be provided by some future kernel.
328   Note the matching code -- the first table entry matchs all 56** cards but
329   second only the 1234 card.
330*/
331
332enum pci_flags_bit {
333	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
334	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
335};
336
337enum via_rhine_chips {
338	VT86C100A = 0,
339	VT6102,
340	VT6105,
341	VT6105M
342};
343
344struct via_rhine_chip_info {
345	const char *name;
346	u16 pci_flags;
347	int io_size;
348	int drv_flags;
349};
350
351
352enum chip_capability_flags {
353	CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
354	ReqTxAlign=0x10, HasWOL=0x20, };
355
356#ifdef USE_MEM
357#define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
358#else
359#define RHINE_IOTYPE (PCI_USES_IO  | PCI_USES_MASTER | PCI_ADDR0)
360#endif
361
362/* directly indexed by enum via_rhine_chips, above */
363static struct via_rhine_chip_info via_rhine_chip_info[] __devinitdata =
364{
365	{ "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
366	  CanHaveMII | ReqTxAlign | HasDavicomPhy },
367	{ "VIA VT6102 Rhine-II", RHINE_IOTYPE, 256,
368	  CanHaveMII | HasWOL },
369	{ "VIA VT6105 Rhine-III", RHINE_IOTYPE, 256,
370	  CanHaveMII | HasWOL },
371	{ "VIA VT6105M Rhine-III", RHINE_IOTYPE, 256,
372	  CanHaveMII | HasWOL },
373};
374
375static struct pci_device_id via_rhine_pci_tbl[] __devinitdata =
376{
377	{0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
378	{0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
379	{0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105},
380	{0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105M},
381	{0,}			/* terminate list */
382};
383MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
384
385
386/* Offsets to the device registers. */
387enum register_offsets {
388	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
389	IntrStatus=0x0C, IntrEnable=0x0E,
390	MulticastFilter0=0x10, MulticastFilter1=0x14,
391	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
392	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
393	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
394	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
395	RxMissed=0x7C, RxCRCErrs=0x7E,
396	StickyHW=0x83, WOLcrClr=0xA4, WOLcgClr=0xA7, PwrcsrClr=0xAC,
397};
398
399/* Bits in ConfigD */
400enum backoff_bits {
401	BackOptional=0x01, BackModify=0x02,
402	BackCaptureEffect=0x04, BackRandom=0x08
403};
404
405#ifdef USE_MEM
406/* Registers we check that mmio and reg are the same. */
407int mmio_verify_registers[] = {
408	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
409	0
410};
411#endif
412
413/* Bits in the interrupt status/mask registers. */
414enum intr_status_bits {
415	IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
416	IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0010,
417	IntrPCIErr=0x0040,
418	IntrStatsMax=0x0080, IntrRxEarly=0x0100, IntrMIIChange=0x0200,
419	IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
420	IntrTxAborted=0x2000, IntrLinkChange=0x4000,
421	IntrRxWakeUp=0x8000,
422	IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
423};
424
425/* MII interface, status flags.
426   Not to be confused with the MIIStatus register ... */
427enum mii_status_bits {
428	MIICap100T4			= 0x8000,
429	MIICap10100HdFd		= 0x7800,
430	MIIPreambleSupr		= 0x0040,
431	MIIAutoNegCompleted	= 0x0020,
432	MIIRemoteFault		= 0x0010,
433	MIICapAutoNeg		= 0x0008,
434	MIILink				= 0x0004,
435	MIIJabber			= 0x0002,
436	MIIExtended			= 0x0001
437};
438
439/* The Rx and Tx buffer descriptors. */
440struct rx_desc {
441	s32 rx_status;
442	u32 desc_length; /* Chain flag, Buffer/frame length */
443	u32 addr;
444	u32 next_desc;
445};
446struct tx_desc {
447	s32 tx_status;
448	u32 desc_length; /* Chain flag, Tx Config, Frame length */
449	u32 addr;
450	u32 next_desc;
451};
452
453/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
454#define TXDESC 0x00e08000
455
456enum rx_status_bits {
457	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
458};
459
460/* Bits in *_desc.*_status */
461enum desc_status_bits {
462	DescOwn=0x80000000
463};
464
465/* Bits in ChipCmd. */
466enum chip_cmd_bits {
467	CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
468	CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
469	CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
470	CmdNoTxPoll=0x0800, CmdReset=0x8000,
471};
472
473#define MAX_MII_CNT	4
474struct netdev_private {
475	/* Descriptor rings */
476	struct rx_desc *rx_ring;
477	struct tx_desc *tx_ring;
478	dma_addr_t rx_ring_dma;
479	dma_addr_t tx_ring_dma;
480
481	/* The addresses of receive-in-place skbuffs. */
482	struct sk_buff *rx_skbuff[RX_RING_SIZE];
483	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
484
485	/* The saved address of a sent-in-place packet/buffer, for later free(). */
486	struct sk_buff *tx_skbuff[TX_RING_SIZE];
487	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
488
489	/* Tx bounce buffers */
490	unsigned char *tx_buf[TX_RING_SIZE];
491	unsigned char *tx_bufs;
492	dma_addr_t tx_bufs_dma;
493
494	struct pci_dev *pdev;
495	struct net_device_stats stats;
496	struct timer_list timer;	/* Media monitoring timer. */
497	spinlock_t lock;
498
499	/* Frequently used values: keep some adjacent for cache effect. */
500	int chip_id, drv_flags;
501	struct rx_desc *rx_head_desc;
502	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
503	unsigned int cur_tx, dirty_tx;
504	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
505	u16 chip_cmd;						/* Current setting for ChipCmd */
506
507	/* These values are keep track of the transceiver/media in use. */
508	unsigned int default_port:4;		/* Last dev->if_port value. */
509	u8 tx_thresh, rx_thresh;
510
511	/* MII transceiver section. */
512	unsigned char phys[MAX_MII_CNT];			/* MII device addresses. */
513	unsigned int mii_cnt;			/* number of MIIs found, but only the first one is used */
514	u16 mii_status;						/* last read MII status */
515	struct mii_if_info mii_if;
516};
517
518static int  mdio_read(struct net_device *dev, int phy_id, int location);
519static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
520static int  via_rhine_open(struct net_device *dev);
521static void via_rhine_check_duplex(struct net_device *dev);
522static void via_rhine_timer(unsigned long data);
523static void via_rhine_tx_timeout(struct net_device *dev);
524static int  via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
525static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
526static void via_rhine_tx(struct net_device *dev);
527static void via_rhine_rx(struct net_device *dev);
528static void via_rhine_error(struct net_device *dev, int intr_status);
529static void via_rhine_set_rx_mode(struct net_device *dev);
530static struct net_device_stats *via_rhine_get_stats(struct net_device *dev);
531static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
532static int  via_rhine_close(struct net_device *dev);
533static inline void clear_tally_counters(long ioaddr);
534static inline void via_restart_tx(struct net_device *dev);
535
536static void wait_for_reset(struct net_device *dev, int chip_id, char *name)
537{
538	long ioaddr = dev->base_addr;
539	int i;
540
541	/* VT86C100A may need long delay after reset (dlink) */
542	if (chip_id == VT86C100A)
543		udelay(100);
544
545	i = 0;
546	do {
547		udelay(5);
548		i++;
549		if(i > 2000) {
550			printk(KERN_ERR "%s: reset did not complete in 10 ms.\n", name);
551			break;
552		}
553	} while(readw(ioaddr + ChipCmd) & CmdReset);
554	if (debug > 1)
555		printk(KERN_INFO "%s: reset finished after %d microseconds.\n",
556			   name, 5*i);
557}
558
559#ifdef USE_MEM
560static void __devinit enable_mmio(long ioaddr, int chip_id)
561{
562	int n;
563	if (chip_id == VT86C100A) {
564		/* More recent docs say that this bit is reserved ... */
565		n = inb(ioaddr + ConfigA) | 0x20;
566		outb(n, ioaddr + ConfigA);
567	} else {
568		n = inb(ioaddr + ConfigD) | 0x80;
569		outb(n, ioaddr + ConfigD);
570	}
571}
572#endif
573
574static void __devinit reload_eeprom(long ioaddr)
575{
576	int i;
577	outb(0x20, ioaddr + MACRegEEcsr);
578	/* Typically 2 cycles to reload. */
579	for (i = 0; i < 150; i++)
580		if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
581			break;
582}
583
584static int __devinit via_rhine_init_one (struct pci_dev *pdev,
585					 const struct pci_device_id *ent)
586{
587	struct net_device *dev;
588	struct netdev_private *np;
589	int i, option;
590	int chip_id = (int) ent->driver_data;
591	static int card_idx = -1;
592	long ioaddr;
593	long memaddr;
594	int io_size;
595	int pci_flags;
596#ifdef USE_MEM
597	long ioaddr0;
598#endif
599
600/* when built into the kernel, we only print version if device is found */
601#ifndef MODULE
602	static int printed_version;
603	if (!printed_version++)
604		printk(version);
605#endif
606
607	card_idx++;
608	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
609	io_size = via_rhine_chip_info[chip_id].io_size;
610	pci_flags = via_rhine_chip_info[chip_id].pci_flags;
611
612	if (pci_enable_device (pdev))
613		goto err_out;
614
615	/* this should always be supported */
616	if (pci_set_dma_mask(pdev, 0xffffffff)) {
617		printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
618		goto err_out;
619	}
620
621	/* sanity check */
622	if ((pci_resource_len (pdev, 0) < io_size) ||
623	    (pci_resource_len (pdev, 1) < io_size)) {
624		printk (KERN_ERR "Insufficient PCI resources, aborting\n");
625		goto err_out;
626	}
627
628	ioaddr = pci_resource_start (pdev, 0);
629	memaddr = pci_resource_start (pdev, 1);
630
631	if (pci_flags & PCI_USES_MASTER)
632		pci_set_master (pdev);
633
634	dev = alloc_etherdev(sizeof(*np));
635	if (dev == NULL) {
636		printk (KERN_ERR "init_ethernet failed for card #%d\n", card_idx);
637		goto err_out;
638	}
639	SET_MODULE_OWNER(dev);
640
641	if (pci_request_regions(pdev, shortname))
642		goto err_out_free_netdev;
643
644#ifdef USE_MEM
645	ioaddr0 = ioaddr;
646	enable_mmio(ioaddr0, chip_id);
647
648	ioaddr = (long) ioremap (memaddr, io_size);
649	if (!ioaddr) {
650		printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
651				pdev->slot_name, io_size, memaddr);
652		goto err_out_free_res;
653	}
654
655	/* Check that selected MMIO registers match the PIO ones */
656	i = 0;
657	while (mmio_verify_registers[i]) {
658		int reg = mmio_verify_registers[i++];
659		unsigned char a = inb(ioaddr0+reg);
660		unsigned char b = readb(ioaddr+reg);
661		if (a != b) {
662			printk (KERN_ERR "MMIO do not match PIO [%02x] (%02x != %02x)\n",
663					reg, a, b);
664			goto err_out_unmap;
665		}
666	}
667#endif
668
669	/* D-Link provided reset code (with comment additions) */
670	if (via_rhine_chip_info[chip_id].drv_flags & HasWOL) {
671		unsigned char byOrgValue;
672
673		/* clear sticky bit before reset & read ethernet address */
674		byOrgValue = readb(ioaddr + StickyHW);
675		byOrgValue = byOrgValue & 0xFC;
676		writeb(byOrgValue, ioaddr + StickyHW);
677
678		/* (bits written are cleared?) */
679		/* disable force PME-enable */
680		writeb(0x80, ioaddr + WOLcgClr);
681		/* disable power-event config bit */
682		writeb(0xFF, ioaddr + WOLcrClr);
683		/* clear power status (undocumented in vt6102 docs?) */
684		writeb(0xFF, ioaddr + PwrcsrClr);
685	}
686
687	/* Reset the chip to erase previous misconfiguration. */
688	writew(CmdReset, ioaddr + ChipCmd);
689
690	dev->base_addr = ioaddr;
691	wait_for_reset(dev, chip_id, shortname);
692
693	/* Reload the station address from the EEPROM. */
694#ifdef USE_IO
695	reload_eeprom(ioaddr);
696#else
697	reload_eeprom(ioaddr0);
698	/* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
699	   If reload_eeprom() was done first this could be avoided, but it is
700	   not known if that still works with the "win98-reboot" problem. */
701	enable_mmio(ioaddr0, chip_id);
702#endif
703
704	for (i = 0; i < 6; i++)
705		dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
706
707	if (!is_valid_ether_addr(dev->dev_addr)) {
708		printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
709		goto err_out_unmap;
710	}
711
712	if (chip_id == VT6102) {
713		/*
714		 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
715		 * turned on.  it makes MAC receive magic packet
716		 * automatically. So, we turn it off. (D-Link)
717		 */
718		writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
719	}
720
721	/* Select backoff algorithm */
722	if (backoff)
723		writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
724			ioaddr + ConfigD);
725
726	dev->irq = pdev->irq;
727
728	np = dev->priv;
729	spin_lock_init (&np->lock);
730	np->chip_id = chip_id;
731	np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
732	np->pdev = pdev;
733	np->mii_if.dev = dev;
734	np->mii_if.mdio_read = mdio_read;
735	np->mii_if.mdio_write = mdio_write;
736	np->mii_if.phy_id_mask = 0x1f;
737	np->mii_if.reg_num_mask = 0x1f;
738
739	if (dev->mem_start)
740		option = dev->mem_start;
741
742	/* The lower four bits are the media type. */
743	if (option > 0) {
744		if (option & 0x200)
745			np->mii_if.full_duplex = 1;
746		np->default_port = option & 15;
747	}
748	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
749		np->mii_if.full_duplex = 1;
750
751	if (np->mii_if.full_duplex) {
752		printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
753			   " disabled.\n", dev->name);
754		np->mii_if.force_media = 1;
755	}
756
757	/* The chip-specific entries in the device structure. */
758	dev->open = via_rhine_open;
759	dev->hard_start_xmit = via_rhine_start_tx;
760	dev->stop = via_rhine_close;
761	dev->get_stats = via_rhine_get_stats;
762	dev->set_multicast_list = via_rhine_set_rx_mode;
763	dev->do_ioctl = netdev_ioctl;
764	dev->tx_timeout = via_rhine_tx_timeout;
765	dev->watchdog_timeo = TX_TIMEOUT;
766	if (np->drv_flags & ReqTxAlign)
767		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
768
769	i = register_netdev(dev);
770	if (i)
771		goto err_out_unmap;
772
773	printk(KERN_INFO "%s: %s at 0x%lx, ",
774		   dev->name, via_rhine_chip_info[chip_id].name,
775		   (pci_flags & PCI_USES_IO) ? ioaddr : memaddr);
776
777	for (i = 0; i < 5; i++)
778			printk("%2.2x:", dev->dev_addr[i]);
779	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
780
781	pci_set_drvdata(pdev, dev);
782
783	if (np->drv_flags & CanHaveMII) {
784		int phy, phy_idx = 0;
785		np->phys[0] = 1;		/* Standard for this chip. */
786		for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
787			int mii_status = mdio_read(dev, phy, 1);
788			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
789				np->phys[phy_idx++] = phy;
790				np->mii_if.advertising = mdio_read(dev, phy, 4);
791				printk(KERN_INFO "%s: MII PHY found at address %d, status "
792					   "0x%4.4x advertising %4.4x Link %4.4x.\n",
793					   dev->name, phy, mii_status, np->mii_if.advertising,
794					   mdio_read(dev, phy, 5));
795
796				/* set IFF_RUNNING */
797				if (mii_status & MIILink)
798					netif_carrier_on(dev);
799				else
800					netif_carrier_off(dev);
801			}
802		}
803		np->mii_cnt = phy_idx;
804		np->mii_if.phy_id = np->phys[0];
805	}
806
807	/* Allow forcing the media type. */
808	if (option > 0) {
809		if (option & 0x220)
810			np->mii_if.full_duplex = 1;
811		np->default_port = option & 0x3ff;
812		if (np->default_port & 0x330) {
813			/* np->medialock = 1; */
814			printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
815				   (option & 0x300 ? 100 : 10),
816				   (option & 0x220 ? "full" : "half"));
817			if (np->mii_cnt)
818				mdio_write(dev, np->phys[0], MII_BMCR,
819						   ((option & 0x300) ? 0x2000 : 0) |  /* 100mbps? */
820						   ((option & 0x220) ? 0x0100 : 0));  /* Full duplex? */
821		}
822	}
823
824	return 0;
825
826err_out_unmap:
827#ifdef USE_MEM
828	iounmap((void *)ioaddr);
829err_out_free_res:
830#endif
831	pci_release_regions(pdev);
832err_out_free_netdev:
833	kfree (dev);
834err_out:
835	return -ENODEV;
836}
837
838static int alloc_ring(struct net_device* dev)
839{
840	struct netdev_private *np = dev->priv;
841	void *ring;
842	dma_addr_t ring_dma;
843
844	ring = pci_alloc_consistent(np->pdev,
845				    RX_RING_SIZE * sizeof(struct rx_desc) +
846				    TX_RING_SIZE * sizeof(struct tx_desc),
847				    &ring_dma);
848	if (!ring) {
849		printk(KERN_ERR "Could not allocate DMA memory.\n");
850		return -ENOMEM;
851	}
852	if (np->drv_flags & ReqTxAlign) {
853		np->tx_bufs = pci_alloc_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
854								   &np->tx_bufs_dma);
855		if (np->tx_bufs == NULL) {
856			pci_free_consistent(np->pdev,
857				    RX_RING_SIZE * sizeof(struct rx_desc) +
858				    TX_RING_SIZE * sizeof(struct tx_desc),
859				    ring, ring_dma);
860			return -ENOMEM;
861		}
862	}
863
864	np->rx_ring = ring;
865	np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
866	np->rx_ring_dma = ring_dma;
867	np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
868
869	return 0;
870}
871
872void free_ring(struct net_device* dev)
873{
874	struct netdev_private *np = dev->priv;
875
876	pci_free_consistent(np->pdev,
877			    RX_RING_SIZE * sizeof(struct rx_desc) +
878			    TX_RING_SIZE * sizeof(struct tx_desc),
879			    np->rx_ring, np->rx_ring_dma);
880	np->tx_ring = NULL;
881
882	if (np->tx_bufs)
883		pci_free_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
884							np->tx_bufs, np->tx_bufs_dma);
885
886	np->tx_bufs = NULL;
887
888}
889
890static void alloc_rbufs(struct net_device *dev)
891{
892	struct netdev_private *np = dev->priv;
893	dma_addr_t next;
894	int i;
895
896	np->dirty_rx = np->cur_rx = 0;
897
898	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
899	np->rx_head_desc = &np->rx_ring[0];
900	next = np->rx_ring_dma;
901
902	/* Init the ring entries */
903	for (i = 0; i < RX_RING_SIZE; i++) {
904		np->rx_ring[i].rx_status = 0;
905		np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
906		next += sizeof(struct rx_desc);
907		np->rx_ring[i].next_desc = cpu_to_le32(next);
908		np->rx_skbuff[i] = 0;
909	}
910	/* Mark the last entry as wrapping the ring. */
911	np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
912
913	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
914	for (i = 0; i < RX_RING_SIZE; i++) {
915		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
916		np->rx_skbuff[i] = skb;
917		if (skb == NULL)
918			break;
919		skb->dev = dev;                 /* Mark as being used by this device. */
920
921		np->rx_skbuff_dma[i] =
922			pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
923						   PCI_DMA_FROMDEVICE);
924
925		np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
926		np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
927	}
928	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
929}
930
931static void free_rbufs(struct net_device* dev)
932{
933	struct netdev_private *np = dev->priv;
934	int i;
935
936	/* Free all the skbuffs in the Rx queue. */
937	for (i = 0; i < RX_RING_SIZE; i++) {
938		np->rx_ring[i].rx_status = 0;
939		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
940		if (np->rx_skbuff[i]) {
941			pci_unmap_single(np->pdev,
942							 np->rx_skbuff_dma[i],
943							 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
944			dev_kfree_skb(np->rx_skbuff[i]);
945		}
946		np->rx_skbuff[i] = 0;
947	}
948}
949
950static void alloc_tbufs(struct net_device* dev)
951{
952	struct netdev_private *np = dev->priv;
953	dma_addr_t next;
954	int i;
955
956	np->dirty_tx = np->cur_tx = 0;
957	next = np->tx_ring_dma;
958	for (i = 0; i < TX_RING_SIZE; i++) {
959		np->tx_skbuff[i] = 0;
960		np->tx_ring[i].tx_status = 0;
961		np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
962		next += sizeof(struct tx_desc);
963		np->tx_ring[i].next_desc = cpu_to_le32(next);
964		np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
965	}
966	np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
967
968}
969
970static void free_tbufs(struct net_device* dev)
971{
972	struct netdev_private *np = dev->priv;
973	int i;
974
975	for (i = 0; i < TX_RING_SIZE; i++) {
976		np->tx_ring[i].tx_status = 0;
977		np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
978		np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
979		if (np->tx_skbuff[i]) {
980			if (np->tx_skbuff_dma[i]) {
981				pci_unmap_single(np->pdev,
982								 np->tx_skbuff_dma[i],
983								 np->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
984			}
985			dev_kfree_skb(np->tx_skbuff[i]);
986		}
987		np->tx_skbuff[i] = 0;
988		np->tx_buf[i] = 0;
989	}
990}
991
992static void init_registers(struct net_device *dev)
993{
994	struct netdev_private *np = dev->priv;
995	long ioaddr = dev->base_addr;
996	int i;
997
998	for (i = 0; i < 6; i++)
999		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
1000
1001	/* Initialize other registers. */
1002	writew(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1003	/* Configure initial FIFO thresholds. */
1004	writeb(0x20, ioaddr + TxConfig);
1005	np->tx_thresh = 0x20;
1006	np->rx_thresh = 0x60;			/* Written in via_rhine_set_rx_mode(). */
1007
1008	if (dev->if_port == 0)
1009		dev->if_port = np->default_port;
1010
1011	writel(np->rx_ring_dma, ioaddr + RxRingPtr);
1012	writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1013
1014	via_rhine_set_rx_mode(dev);
1015
1016	/* Enable interrupts by setting the interrupt mask. */
1017	writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1018		   IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1019		   IntrTxDone | IntrTxError | IntrTxUnderrun |
1020		   IntrPCIErr | IntrStatsMax | IntrLinkChange | IntrMIIChange,
1021		   ioaddr + IntrEnable);
1022
1023	np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
1024	if (np->mii_if.force_media)
1025		np->chip_cmd |= CmdFDuplex;
1026	writew(np->chip_cmd, ioaddr + ChipCmd);
1027
1028	via_rhine_check_duplex(dev);
1029
1030	/* The LED outputs of various MII xcvrs should be configured.  */
1031	/* For NS or Mison phys, turn on bit 1 in register 0x17 */
1032	/* For ESI phys, turn on bit 7 in register 0x17. */
1033	mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
1034			   (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
1035}
1036/* Read and write over the MII Management Data I/O (MDIO) interface. */
1037
1038static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1039{
1040	long ioaddr = dev->base_addr;
1041	int boguscnt = 1024;
1042
1043	/* Wait for a previous command to complete. */
1044	while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1045		;
1046	writeb(0x00, ioaddr + MIICmd);
1047	writeb(phy_id, ioaddr + MIIPhyAddr);
1048	writeb(regnum, ioaddr + MIIRegAddr);
1049	writeb(0x40, ioaddr + MIICmd);			/* Trigger read */
1050	boguscnt = 1024;
1051	while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
1052		;
1053	return readw(ioaddr + MIIData);
1054}
1055
1056static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1057{
1058	struct netdev_private *np = dev->priv;
1059	long ioaddr = dev->base_addr;
1060	int boguscnt = 1024;
1061
1062	if (phy_id == np->phys[0]) {
1063		switch (regnum) {
1064		case MII_BMCR:					/* Is user forcing speed/duplex? */
1065			if (value & 0x9000)			/* Autonegotiation. */
1066				np->mii_if.force_media = 0;
1067			else
1068				np->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
1069			break;
1070		case MII_ADVERTISE:
1071			np->mii_if.advertising = value;
1072			break;
1073		}
1074	}
1075
1076	/* Wait for a previous command to complete. */
1077	while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1078		;
1079	writeb(0x00, ioaddr + MIICmd);
1080	writeb(phy_id, ioaddr + MIIPhyAddr);
1081	writeb(regnum, ioaddr + MIIRegAddr);
1082	writew(value, ioaddr + MIIData);
1083	writeb(0x20, ioaddr + MIICmd);			/* Trigger write. */
1084}
1085
1086
1087static int via_rhine_open(struct net_device *dev)
1088{
1089	struct netdev_private *np = dev->priv;
1090	long ioaddr = dev->base_addr;
1091	int i;
1092
1093	/* Reset the chip. */
1094	writew(CmdReset, ioaddr + ChipCmd);
1095
1096	i = request_irq(np->pdev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev);
1097	if (i)
1098		return i;
1099
1100	if (debug > 1)
1101		printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
1102			   dev->name, np->pdev->irq);
1103
1104	i = alloc_ring(dev);
1105	if (i)
1106		return i;
1107	alloc_rbufs(dev);
1108	alloc_tbufs(dev);
1109	wait_for_reset(dev, np->chip_id, dev->name);
1110	init_registers(dev);
1111	if (debug > 2)
1112		printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
1113			   "MII status: %4.4x.\n",
1114			   dev->name, readw(ioaddr + ChipCmd),
1115			   mdio_read(dev, np->phys[0], MII_BMSR));
1116
1117	netif_start_queue(dev);
1118
1119	/* Set the timer to check for link beat. */
1120	init_timer(&np->timer);
1121	np->timer.expires = jiffies + 2;
1122	np->timer.data = (unsigned long)dev;
1123	np->timer.function = &via_rhine_timer;				/* timer handler */
1124	add_timer(&np->timer);
1125
1126	return 0;
1127}
1128
1129static void via_rhine_check_duplex(struct net_device *dev)
1130{
1131	struct netdev_private *np = dev->priv;
1132	long ioaddr = dev->base_addr;
1133	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1134	int negotiated = mii_lpa & np->mii_if.advertising;
1135	int duplex;
1136
1137	if (np->mii_if.force_media  ||  mii_lpa == 0xffff)
1138		return;
1139	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
1140	if (np->mii_if.full_duplex != duplex) {
1141		np->mii_if.full_duplex = duplex;
1142		if (debug)
1143			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
1144				   " partner capability of %4.4x.\n", dev->name,
1145				   duplex ? "full" : "half", np->phys[0], mii_lpa);
1146		if (duplex)
1147			np->chip_cmd |= CmdFDuplex;
1148		else
1149			np->chip_cmd &= ~CmdFDuplex;
1150		writew(np->chip_cmd, ioaddr + ChipCmd);
1151	}
1152}
1153
1154
1155static void via_rhine_timer(unsigned long data)
1156{
1157	struct net_device *dev = (struct net_device *)data;
1158	struct netdev_private *np = dev->priv;
1159	long ioaddr = dev->base_addr;
1160	int next_tick = 10*HZ;
1161	int mii_status;
1162
1163	if (debug > 3) {
1164		printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
1165			   dev->name, readw(ioaddr + IntrStatus));
1166	}
1167
1168	spin_lock_irq (&np->lock);
1169
1170	via_rhine_check_duplex(dev);
1171
1172	/* make IFF_RUNNING follow the MII status bit "Link established" */
1173	mii_status = mdio_read(dev, np->phys[0], MII_BMSR);
1174	if ( (mii_status & MIILink) != (np->mii_status & MIILink) ) {
1175		if (mii_status & MIILink)
1176			netif_carrier_on(dev);
1177		else
1178			netif_carrier_off(dev);
1179	}
1180	np->mii_status = mii_status;
1181
1182	spin_unlock_irq (&np->lock);
1183
1184	np->timer.expires = jiffies + next_tick;
1185	add_timer(&np->timer);
1186}
1187
1188
1189static void via_rhine_tx_timeout (struct net_device *dev)
1190{
1191	struct netdev_private *np = dev->priv;
1192	long ioaddr = dev->base_addr;
1193
1194	printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1195		"%4.4x, resetting...\n",
1196		dev->name, readw (ioaddr + IntrStatus),
1197		mdio_read (dev, np->phys[0], MII_BMSR));
1198
1199	dev->if_port = 0;
1200
1201	/* protect against concurrent rx interrupts */
1202	disable_irq(np->pdev->irq);
1203
1204	spin_lock(&np->lock);
1205
1206	/* Reset the chip. */
1207	writew(CmdReset, ioaddr + ChipCmd);
1208
1209	/* clear all descriptors */
1210	free_tbufs(dev);
1211	free_rbufs(dev);
1212	alloc_tbufs(dev);
1213	alloc_rbufs(dev);
1214
1215	/* Reinitialize the hardware. */
1216	wait_for_reset(dev, np->chip_id, dev->name);
1217	init_registers(dev);
1218
1219	spin_unlock(&np->lock);
1220	enable_irq(np->pdev->irq);
1221
1222	dev->trans_start = jiffies;
1223	np->stats.tx_errors++;
1224	netif_wake_queue(dev);
1225}
1226
1227static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1228{
1229	struct netdev_private *np = dev->priv;
1230	unsigned entry;
1231
1232	/* Caution: the write order is important here, set the field
1233	   with the "ownership" bits last. */
1234
1235	/* Calculate the next Tx descriptor entry. */
1236	entry = np->cur_tx % TX_RING_SIZE;
1237
1238	np->tx_skbuff[entry] = skb;
1239
1240	if ((np->drv_flags & ReqTxAlign) &&
1241		(((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)
1242		) {
1243		/* Must use alignment buffer. */
1244		if (skb->len > PKT_BUF_SZ) {
1245			/* packet too long, drop it */
1246			dev_kfree_skb(skb);
1247			np->tx_skbuff[entry] = NULL;
1248			np->stats.tx_dropped++;
1249			return 0;
1250		}
1251		skb_copy_and_csum_dev(skb, np->tx_buf[entry]);
1252		np->tx_skbuff_dma[entry] = 0;
1253		np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
1254										  (np->tx_buf[entry] - np->tx_bufs));
1255	} else {
1256		np->tx_skbuff_dma[entry] =
1257			pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
1258		np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
1259	}
1260
1261	np->tx_ring[entry].desc_length =
1262		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1263
1264	/* lock eth irq */
1265	spin_lock_irq (&np->lock);
1266	wmb();
1267	np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1268	wmb();
1269
1270	np->cur_tx++;
1271
1272	/* Non-x86 Todo: explicitly flush cache lines here. */
1273
1274	/* Wake the potentially-idle transmit channel. */
1275	writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1276
1277	if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
1278		netif_stop_queue(dev);
1279
1280	dev->trans_start = jiffies;
1281
1282	spin_unlock_irq (&np->lock);
1283
1284	if (debug > 4) {
1285		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1286			   dev->name, np->cur_tx-1, entry);
1287	}
1288	return 0;
1289}
1290
1291/* The interrupt handler does all of the Rx thread work and cleans up
1292   after the Tx thread. */
1293static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1294{
1295	struct net_device *dev = dev_instance;
1296	long ioaddr;
1297	u32 intr_status;
1298	int boguscnt = max_interrupt_work;
1299
1300	ioaddr = dev->base_addr;
1301
1302	while ((intr_status = readw(ioaddr + IntrStatus))) {
1303		/* Acknowledge all of the current interrupt sources ASAP. */
1304		writew(intr_status & 0xffff, ioaddr + IntrStatus);
1305
1306		if (debug > 4)
1307			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1308				   dev->name, intr_status);
1309
1310		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1311						   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1312			via_rhine_rx(dev);
1313
1314		if (intr_status & (IntrTxDone | IntrTxError | IntrTxUnderrun |
1315						   IntrTxAborted))
1316			via_rhine_tx(dev);
1317
1318		/* Abnormal error summary/uncommon events handlers. */
1319		if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange |
1320				   IntrStatsMax | IntrTxError | IntrTxAborted |
1321				   IntrTxUnderrun))
1322			via_rhine_error(dev, intr_status);
1323
1324		if (--boguscnt < 0) {
1325			printk(KERN_WARNING "%s: Too much work at interrupt, "
1326				   "status=0x%4.4x.\n",
1327				   dev->name, intr_status);
1328			break;
1329		}
1330	}
1331
1332	if (debug > 3)
1333		printk(KERN_DEBUG "%s: exiting interrupt, status=%4.4x.\n",
1334			   dev->name, readw(ioaddr + IntrStatus));
1335}
1336
1337/* This routine is logically part of the interrupt handler, but isolated
1338   for clarity. */
1339static void via_rhine_tx(struct net_device *dev)
1340{
1341	struct netdev_private *np = dev->priv;
1342	int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
1343
1344	spin_lock (&np->lock);
1345
1346	/* find and cleanup dirty tx descriptors */
1347	while (np->dirty_tx != np->cur_tx) {
1348		txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
1349		if (debug > 6)
1350			printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1351				   entry, txstatus);
1352		if (txstatus & DescOwn)
1353			break;
1354		if (txstatus & 0x8000) {
1355			if (debug > 1)
1356				printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1357					   dev->name, txstatus);
1358			np->stats.tx_errors++;
1359			if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
1360			if (txstatus & 0x0200) np->stats.tx_window_errors++;
1361			if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
1362			if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
1363			if (((np->chip_id == VT86C100A) && txstatus & 0x0002) ||
1364				(txstatus & 0x0800) || (txstatus & 0x1000)) {
1365				np->stats.tx_fifo_errors++;
1366				np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1367				break; /* Keep the skb - we try again */
1368			}
1369			/* Transmitter restarted in 'abnormal' handler. */
1370		} else {
1371			if (np->chip_id == VT86C100A)
1372				np->stats.collisions += (txstatus >> 3) & 0x0F;
1373			else
1374				np->stats.collisions += txstatus & 0x0F;
1375			if (debug > 6)
1376				printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1377					(txstatus >> 3) & 0xF,
1378					txstatus & 0xF);
1379			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1380			np->stats.tx_packets++;
1381		}
1382		/* Free the original skb. */
1383		if (np->tx_skbuff_dma[entry]) {
1384			pci_unmap_single(np->pdev,
1385							 np->tx_skbuff_dma[entry],
1386							 np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1387		}
1388		dev_kfree_skb_irq(np->tx_skbuff[entry]);
1389		np->tx_skbuff[entry] = NULL;
1390		entry = (++np->dirty_tx) % TX_RING_SIZE;
1391	}
1392	if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
1393		netif_wake_queue (dev);
1394
1395	spin_unlock (&np->lock);
1396}
1397
1398/* This routine is logically part of the interrupt handler, but isolated
1399   for clarity and better register allocation. */
1400static void via_rhine_rx(struct net_device *dev)
1401{
1402	struct netdev_private *np = dev->priv;
1403	int entry = np->cur_rx % RX_RING_SIZE;
1404	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1405
1406	if (debug > 4) {
1407		printk(KERN_DEBUG " In via_rhine_rx(), entry %d status %8.8x.\n",
1408			   entry, le32_to_cpu(np->rx_head_desc->rx_status));
1409	}
1410
1411	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1412	while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1413		struct rx_desc *desc = np->rx_head_desc;
1414		u32 desc_status = le32_to_cpu(desc->rx_status);
1415		int data_size = desc_status >> 16;
1416
1417		if (debug > 4)
1418			printk(KERN_DEBUG "  via_rhine_rx() status is %8.8x.\n",
1419				   desc_status);
1420		if (--boguscnt < 0)
1421			break;
1422		if ( (desc_status & (RxWholePkt | RxErr)) !=  RxWholePkt) {
1423			if ((desc_status & RxWholePkt) !=  RxWholePkt) {
1424				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1425					   "multiple buffers, entry %#x length %d status %8.8x!\n",
1426					   dev->name, entry, data_size, desc_status);
1427				printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
1428					   dev->name, np->rx_head_desc, &np->rx_ring[entry]);
1429				np->stats.rx_length_errors++;
1430			} else if (desc_status & RxErr) {
1431				/* There was a error. */
1432				if (debug > 2)
1433					printk(KERN_DEBUG "  via_rhine_rx() Rx error was %8.8x.\n",
1434						   desc_status);
1435				np->stats.rx_errors++;
1436				if (desc_status & 0x0030) np->stats.rx_length_errors++;
1437				if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
1438				if (desc_status & 0x0004) np->stats.rx_frame_errors++;
1439				if (desc_status & 0x0002) {
1440					/* this can also be updated outside the interrupt handler */
1441					spin_lock (&np->lock);
1442					np->stats.rx_crc_errors++;
1443					spin_unlock (&np->lock);
1444				}
1445			}
1446		} else {
1447			struct sk_buff *skb;
1448			/* Length should omit the CRC */
1449			int pkt_len = data_size - 4;
1450
1451			/* Check if the packet is long enough to accept without copying
1452			   to a minimally-sized skbuff. */
1453			if (pkt_len < rx_copybreak &&
1454				(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1455				skb->dev = dev;
1456				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1457				pci_dma_sync_single(np->pdev, np->rx_skbuff_dma[entry],
1458						    np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1459
1460				/* *_IP_COPYSUM isn't defined anywhere and eth_copy_and_sum
1461				   is memcpy for all archs so this is kind of pointless right
1462				   now ... or? */
1463#if HAS_IP_COPYSUM                         /* Call copy + cksum if available. */
1464				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1465				skb_put(skb, pkt_len);
1466#else
1467				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1468					   pkt_len);
1469#endif
1470			} else {
1471				skb = np->rx_skbuff[entry];
1472				if (skb == NULL) {
1473					printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1474						   dev->name);
1475					break;
1476				}
1477				np->rx_skbuff[entry] = NULL;
1478				skb_put(skb, pkt_len);
1479				pci_unmap_single(np->pdev, np->rx_skbuff_dma[entry],
1480								 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1481			}
1482			skb->protocol = eth_type_trans(skb, dev);
1483			netif_rx(skb);
1484			dev->last_rx = jiffies;
1485			np->stats.rx_bytes += pkt_len;
1486			np->stats.rx_packets++;
1487		}
1488		entry = (++np->cur_rx) % RX_RING_SIZE;
1489		np->rx_head_desc = &np->rx_ring[entry];
1490	}
1491
1492	/* Refill the Rx ring buffers. */
1493	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1494		struct sk_buff *skb;
1495		entry = np->dirty_rx % RX_RING_SIZE;
1496		if (np->rx_skbuff[entry] == NULL) {
1497			skb = dev_alloc_skb(np->rx_buf_sz);
1498			np->rx_skbuff[entry] = skb;
1499			if (skb == NULL)
1500				break;			/* Better luck next round. */
1501			skb->dev = dev;			/* Mark as being used by this device. */
1502			np->rx_skbuff_dma[entry] =
1503				pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
1504							   PCI_DMA_FROMDEVICE);
1505			np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
1506		}
1507		np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1508	}
1509
1510	/* Pre-emptively restart Rx engine. */
1511	writew(CmdRxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1512}
1513
1514static inline void via_restart_tx(struct net_device *dev) {
1515	struct netdev_private *np = dev->priv;
1516	int entry = np->dirty_tx % TX_RING_SIZE;
1517
1518	/* We know better than the chip where it should continue */
1519	writel(np->tx_ring_dma + entry * sizeof(struct tx_desc),
1520		   dev->base_addr + TxRingPtr);
1521
1522	writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1523}
1524
1525static void via_rhine_error(struct net_device *dev, int intr_status)
1526{
1527	struct netdev_private *np = dev->priv;
1528	long ioaddr = dev->base_addr;
1529
1530	spin_lock (&np->lock);
1531
1532	if (intr_status & (IntrMIIChange | IntrLinkChange)) {
1533		if (readb(ioaddr + MIIStatus) & 0x02) {
1534			/* Link failed, restart autonegotiation. */
1535			if (np->drv_flags & HasDavicomPhy)
1536				mdio_write(dev, np->phys[0], MII_BMCR, 0x3300);
1537		} else
1538			via_rhine_check_duplex(dev);
1539		if (debug)
1540			printk(KERN_ERR "%s: MII status changed: Autonegotiation "
1541				   "advertising %4.4x  partner %4.4x.\n", dev->name,
1542			   mdio_read(dev, np->phys[0], MII_ADVERTISE),
1543			   mdio_read(dev, np->phys[0], MII_LPA));
1544	}
1545	if (intr_status & IntrStatsMax) {
1546		np->stats.rx_crc_errors	+= readw(ioaddr + RxCRCErrs);
1547		np->stats.rx_missed_errors	+= readw(ioaddr + RxMissed);
1548		clear_tally_counters(ioaddr);
1549	}
1550	if (intr_status & IntrTxError) {
1551		if (debug > 1)
1552			printk(KERN_INFO "%s: Abort %4.4x, frame dropped.\n",
1553				   dev->name, intr_status);
1554		via_restart_tx(dev);
1555	}
1556	if (intr_status & IntrTxUnderrun) {
1557		if (np->tx_thresh < 0xE0)
1558			writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
1559		if (debug > 1)
1560			printk(KERN_INFO "%s: Transmitter underrun, Tx "
1561				   "threshold now %2.2x.\n",
1562				   dev->name, np->tx_thresh);
1563		via_restart_tx(dev);
1564	}
1565	if (intr_status & ~( IntrLinkChange | IntrStatsMax |
1566 						 IntrTxError | IntrTxAborted | IntrNormalSummary)) {
1567		if (debug > 1)
1568			printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1569			   dev->name, intr_status);
1570		/* Recovery for other fault sources not known. */
1571		writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1572	}
1573
1574	spin_unlock (&np->lock);
1575}
1576
1577static struct net_device_stats *via_rhine_get_stats(struct net_device *dev)
1578{
1579	struct netdev_private *np = dev->priv;
1580	long ioaddr = dev->base_addr;
1581	unsigned long flags;
1582
1583	spin_lock_irqsave(&np->lock, flags);
1584	np->stats.rx_crc_errors	+= readw(ioaddr + RxCRCErrs);
1585	np->stats.rx_missed_errors	+= readw(ioaddr + RxMissed);
1586	clear_tally_counters(ioaddr);
1587	spin_unlock_irqrestore(&np->lock, flags);
1588
1589	return &np->stats;
1590}
1591
1592/* Clears the "tally counters" for CRC errors and missed frames(?).
1593   It has been reported that some chips need a write of 0 to clear
1594   these, for others the counters are set to 1 when written to and
1595   instead cleared when read. So we clear them both ways ... */
1596static inline void clear_tally_counters(const long ioaddr)
1597{
1598	writel(0, ioaddr + RxMissed);
1599	readw(ioaddr + RxCRCErrs);
1600	readw(ioaddr + RxMissed);
1601}
1602
1603static void via_rhine_set_rx_mode(struct net_device *dev)
1604{
1605	struct netdev_private *np = dev->priv;
1606	long ioaddr = dev->base_addr;
1607	u32 mc_filter[2];			/* Multicast hash filter */
1608	u8 rx_mode;					/* Note: 0x02=accept runt, 0x01=accept errs */
1609
1610	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1611		/* Unconditionally log net taps. */
1612		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1613		rx_mode = 0x1C;
1614	} else if ((dev->mc_count > multicast_filter_limit)
1615			   ||  (dev->flags & IFF_ALLMULTI)) {
1616		/* Too many to match, or accept all multicasts. */
1617		writel(0xffffffff, ioaddr + MulticastFilter0);
1618		writel(0xffffffff, ioaddr + MulticastFilter1);
1619		rx_mode = 0x0C;
1620	} else {
1621		struct dev_mc_list *mclist;
1622		int i;
1623		memset(mc_filter, 0, sizeof(mc_filter));
1624		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1625			 i++, mclist = mclist->next) {
1626			int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1627
1628			mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
1629		}
1630		writel(mc_filter[0], ioaddr + MulticastFilter0);
1631		writel(mc_filter[1], ioaddr + MulticastFilter1);
1632		rx_mode = 0x0C;
1633	}
1634	writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
1635}
1636
1637static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
1638{
1639	struct netdev_private *np = dev->priv;
1640	u32 ethcmd;
1641
1642	if (get_user(ethcmd, (u32 *)useraddr))
1643		return -EFAULT;
1644
1645	switch (ethcmd) {
1646	case ETHTOOL_GDRVINFO: {
1647		struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1648		strcpy (info.driver, DRV_NAME);
1649		strcpy (info.version, DRV_VERSION);
1650		strcpy (info.bus_info, np->pdev->slot_name);
1651		if (copy_to_user (useraddr, &info, sizeof (info)))
1652			return -EFAULT;
1653		return 0;
1654	}
1655
1656	/* get settings */
1657	case ETHTOOL_GSET: {
1658		struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1659		if (!(np->drv_flags & CanHaveMII))
1660			break;
1661		spin_lock_irq(&np->lock);
1662		mii_ethtool_gset(&np->mii_if, &ecmd);
1663		spin_unlock_irq(&np->lock);
1664		if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1665			return -EFAULT;
1666		return 0;
1667	}
1668	/* set settings */
1669	case ETHTOOL_SSET: {
1670		int r;
1671		struct ethtool_cmd ecmd;
1672		if (!(np->drv_flags & CanHaveMII))
1673			break;
1674		if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1675			return -EFAULT;
1676		spin_lock_irq(&np->lock);
1677		r = mii_ethtool_sset(&np->mii_if, &ecmd);
1678		spin_unlock_irq(&np->lock);
1679		return r;
1680	}
1681	/* restart autonegotiation */
1682	case ETHTOOL_NWAY_RST: {
1683		if (!(np->drv_flags & CanHaveMII))
1684			break;
1685		return mii_nway_restart(&np->mii_if);
1686	}
1687	/* get link status */
1688	case ETHTOOL_GLINK: {
1689		struct ethtool_value edata = {ETHTOOL_GLINK};
1690		if (!(np->drv_flags & CanHaveMII))
1691			break;
1692		edata.data = mii_link_ok(&np->mii_if);
1693		if (copy_to_user(useraddr, &edata, sizeof(edata)))
1694			return -EFAULT;
1695		return 0;
1696	}
1697
1698	/* get message-level */
1699	case ETHTOOL_GMSGLVL: {
1700		struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1701		edata.data = debug;
1702		if (copy_to_user(useraddr, &edata, sizeof(edata)))
1703			return -EFAULT;
1704		return 0;
1705	}
1706	/* set message-level */
1707	case ETHTOOL_SMSGLVL: {
1708		struct ethtool_value edata;
1709		if (copy_from_user(&edata, useraddr, sizeof(edata)))
1710			return -EFAULT;
1711		debug = edata.data;
1712		return 0;
1713	}
1714	default:
1715		break;
1716	}
1717
1718	return -EOPNOTSUPP;
1719}
1720
1721static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1722{
1723	struct netdev_private *np = dev->priv;
1724	struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1725	int rc;
1726
1727	if (!netif_running(dev))
1728		return -EINVAL;
1729
1730	if (cmd == SIOCETHTOOL)
1731		rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1732
1733	else {
1734		spin_lock_irq(&np->lock);
1735		rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1736		spin_unlock_irq(&np->lock);
1737	}
1738
1739	return rc;
1740}
1741
1742static int via_rhine_close(struct net_device *dev)
1743{
1744	long ioaddr = dev->base_addr;
1745	struct netdev_private *np = dev->priv;
1746
1747	del_timer_sync(&np->timer);
1748
1749	spin_lock_irq(&np->lock);
1750
1751	netif_stop_queue(dev);
1752
1753	if (debug > 1)
1754		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1755			   dev->name, readw(ioaddr + ChipCmd));
1756
1757	/* Switch to loopback mode to avoid hardware races. */
1758	writeb(np->tx_thresh | 0x02, ioaddr + TxConfig);
1759
1760	/* Disable interrupts by clearing the interrupt mask. */
1761	writew(0x0000, ioaddr + IntrEnable);
1762
1763	/* Stop the chip's Tx and Rx processes. */
1764	writew(CmdStop, ioaddr + ChipCmd);
1765
1766	spin_unlock_irq(&np->lock);
1767
1768	free_irq(np->pdev->irq, dev);
1769	free_rbufs(dev);
1770	free_tbufs(dev);
1771	free_ring(dev);
1772
1773	return 0;
1774}
1775
1776
1777static void __devexit via_rhine_remove_one (struct pci_dev *pdev)
1778{
1779	struct net_device *dev = pci_get_drvdata(pdev);
1780
1781	unregister_netdev(dev);
1782
1783	pci_release_regions(pdev);
1784
1785#ifdef USE_MEM
1786	iounmap((char *)(dev->base_addr));
1787#endif
1788
1789	kfree(dev);
1790	pci_disable_device(pdev);
1791	pci_set_drvdata(pdev, NULL);
1792}
1793
1794
1795static struct pci_driver via_rhine_driver = {
1796	name:		"via-rhine",
1797	id_table:	via_rhine_pci_tbl,
1798	probe:		via_rhine_init_one,
1799	remove:		__devexit_p(via_rhine_remove_one),
1800};
1801
1802
1803static int __init via_rhine_init (void)
1804{
1805/* when a module, this is printed whether or not devices are found in probe */
1806#ifdef MODULE
1807	printk(version);
1808#endif
1809	return pci_module_init (&via_rhine_driver);
1810}
1811
1812
1813static void __exit via_rhine_cleanup (void)
1814{
1815	pci_unregister_driver (&via_rhine_driver);
1816}
1817
1818
1819module_init(via_rhine_init);
1820module_exit(via_rhine_cleanup);
1821
1822
1823/*
1824 * Local variables:
1825 *  compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1826 *  c-indent-level: 4
1827 *  c-basic-offset: 4
1828 *  tab-width: 4
1829 * End:
1830 */
1831