1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3	Written 1999-2000 by Donald Becker.
4
5	This software may be used and distributed according to the terms of
6	the GNU General Public License (GPL), incorporated herein by reference.
7	Drivers based on or derived from this code fall under the GPL and must
8	retain the authorship, copyright and license notice.  This file is not
9	a complete program and may only be used when the entire operating
10	system is licensed under the GPL.
11
12	The author may be reached as becker@scyld.com, or C/O
13	Scyld Computing Corporation
14	410 Severn Ave., Suite 210
15	Annapolis MD 21403
16
17	Support and updates available at
18	http://www.scyld.com/network/sundance.html
19
20
21	Version LK1.01a (jgarzik):
22	- Replace some MII-related magic numbers with constants
23
24	Version LK1.02 (D-Link):
25	- Add new board to PCI ID list
26	- Fix multicast bug
27
28	Version LK1.03 (D-Link):
29	- New Rx scheme, reduce Rx congestion
30	- Option to disable flow control
31
32	Version LK1.04 (D-Link):
33	- Tx timeout recovery
34	- More support for ethtool.
35
36	Version LK1.04a:
37	- Remove unused/constant members from struct pci_id_info
38	(which then allows removal of 'drv_flags' from private struct)
39	(jgarzik)
40	- If no phy is found, fail to load that board (jgarzik)
41	- Always start phy id scan at id 1 to avoid problems (Donald Becker)
42	- Autodetect where mii_preable_required is needed,
43	default to not needed.  (Donald Becker)
44
45	Version LK1.04b:
46	- Remove mii_preamble_required module parameter (Donald Becker)
47	- Add per-interface mii_preamble_required (setting is autodetected)
48	  (Donald Becker)
49	- Remove unnecessary cast from void pointer (jgarzik)
50	- Re-align comments in private struct (jgarzik)
51
52	Version LK1.04c (jgarzik):
53	- Support bitmapped message levels (NETIF_MSG_xxx), and the
54	  two ethtool ioctls that get/set them
55	- Don't hand-code MII ethtool support, use standard API/lib
56
57	Version LK1.04d:
58	- Merge from Donald Becker's sundance.c: (Jason Lunz)
59		* proper support for variably-sized MTUs
60		* default to PIO, to fix chip bugs
61	- Add missing unregister_netdev (Jason Lunz)
62	- Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63	- Better rx buf size calculation (Donald Becker)
64
65	Version LK1.05 (D-Link):
66	- Fix DFE-580TX packet drop issue (for DL10050C)
67	- Fix reset_tx logic
68
69	Version LK1.06 (D-Link):
70	- Fix crash while unloading driver
71
72	Versin LK1.06b (D-Link):
73	- New tx scheme, adaptive tx_coalesce
74
75*/
76
77#define DRV_NAME	"sundance"
78#define DRV_VERSION	"1.01+LK1.06b"
79#define DRV_RELDATE	"6-Nov-2002"
80
81
82/* The user-configurable values.
83   These may be modified when a driver module is loaded.*/
84static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
85/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
86static int max_interrupt_work = 0;
87/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
88   Typical is a 64 element hash table based on the Ethernet CRC.  */
89static int multicast_filter_limit = 32;
90
91/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
92   Setting to > 1518 effectively disables this feature.
93   This chip can receive into offset buffers, so the Alpha does not
94   need a copy-align. */
95static int rx_copybreak;
96static int flowctrl=1;
97
98/* media[] specifies the media type the NIC operates at.
99		 autosense	Autosensing active media.
100		 10mbps_hd 	10Mbps half duplex.
101		 10mbps_fd 	10Mbps full duplex.
102		 100mbps_hd 	100Mbps half duplex.
103		 100mbps_fd 	100Mbps full duplex.
104		 0		Autosensing active media.
105		 1	 	10Mbps half duplex.
106		 2	 	10Mbps full duplex.
107		 3	 	100Mbps half duplex.
108		 4	 	100Mbps full duplex.
109*/
110#define MAX_UNITS 8
111static char *media[MAX_UNITS];
112
113
114/* Operational parameters that are set at compile time. */
115
116/* Keep the ring sizes a power of two for compile efficiency.
117   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
118   Making the Tx ring too large decreases the effectiveness of channel
119   bonding and packet priority, and more than 128 requires modifying the
120   Tx error recovery.
121   Large receive rings merely waste memory. */
122#define TX_RING_SIZE	32
123#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
124#define RX_RING_SIZE	64
125#define RX_BUDGET	32
126#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
127#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
128
129/* Operational parameters that usually are not changed. */
130/* Time in jiffies before concluding the transmitter is hung. */
131#define TX_TIMEOUT  (4*HZ)
132
133#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
134
135#ifndef __KERNEL__
136#define __KERNEL__
137#endif
138#if !defined(__OPTIMIZE__)
139#warning  You must compile this file with the correct options!
140#warning  See the last lines of the source file.
141#error You must compile this driver with "-O".
142#endif
143
144/* Include files, designed to support most kernel versions 2.0.0 and later. */
145#include <linux/module.h>
146#include <linux/kernel.h>
147#include <linux/string.h>
148#include <linux/timer.h>
149#include <linux/errno.h>
150#include <linux/ioport.h>
151#include <linux/slab.h>
152#include <linux/interrupt.h>
153#include <linux/pci.h>
154#include <linux/netdevice.h>
155#include <linux/etherdevice.h>
156#include <linux/skbuff.h>
157#include <linux/init.h>
158#include <asm/uaccess.h>
159#include <asm/processor.h>		/* Processor type for cache alignment. */
160#include <asm/bitops.h>
161#include <asm/io.h>
162#include <linux/delay.h>
163#include <linux/spinlock.h>
164#ifndef _COMPAT_WITH_OLD_KERNEL
165#include <linux/crc32.h>
166#include <linux/ethtool.h>
167#include <linux/mii.h>
168#else
169#include "crc32.h"
170#include "ethtool.h"
171#include "mii.h"
172#include "compat.h"
173#endif
174
175/* These identify the driver base version and may not be removed. */
176static char version[] __devinitdata =
177KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "  Written by Donald Becker\n"
178KERN_INFO "  http://www.scyld.com/network/sundance.html\n";
179
180MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
181MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
182MODULE_LICENSE("GPL");
183
184MODULE_PARM(max_interrupt_work, "i");
185MODULE_PARM(debug, "i");
186MODULE_PARM(rx_copybreak, "i");
187MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "s");
188MODULE_PARM(flowctrl, "i");
189MODULE_PARM_DESC(max_interrupt_work, "Sundance Alta maximum events handled per interrupt");
190MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
191MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
192MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
193
194/*
195				Theory of Operation
196
197I. Board Compatibility
198
199This driver is designed for the Sundance Technologies "Alta" ST201 chip.
200
201II. Board-specific settings
202
203III. Driver operation
204
205IIIa. Ring buffers
206
207This driver uses two statically allocated fixed-size descriptor lists
208formed into rings by a branch from the final descriptor to the beginning of
209the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
210Some chips explicitly use only 2^N sized rings, while others use a
211'next descriptor' pointer that the driver forms into rings.
212
213IIIb/c. Transmit/Receive Structure
214
215This driver uses a zero-copy receive and transmit scheme.
216The driver allocates full frame size skbuffs for the Rx ring buffers at
217open() time and passes the skb->data field to the chip as receive data
218buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
219a fresh skbuff is allocated and the frame is copied to the new skbuff.
220When the incoming frame is larger, the skbuff is passed directly up the
221protocol stack.  Buffers consumed this way are replaced by newly allocated
222skbuffs in a later phase of receives.
223
224The RX_COPYBREAK value is chosen to trade-off the memory wasted by
225using a full-sized skbuff for small frames vs. the copying costs of larger
226frames.  New boards are typically used in generously configured machines
227and the underfilled buffers have negligible impact compared to the benefit of
228a single allocation size, so the default value of zero results in never
229copying packets.  When copying is done, the cost is usually mitigated by using
230a combined copy/checksum routine.  Copying also preloads the cache, which is
231most useful with small frames.
232
233A subtle aspect of the operation is that the IP header at offset 14 in an
234ethernet frame isn't longword aligned for further processing.
235Unaligned buffers are permitted by the Sundance hardware, so
236frames are received into the skbuff at an offset of "+2", 16-byte aligning
237the IP header.
238
239IIId. Synchronization
240
241The driver runs as two independent, single-threaded flows of control.  One
242is the send-packet routine, which enforces single-threaded use by the
243dev->tbusy flag.  The other thread is the interrupt handler, which is single
244threaded by the hardware and interrupt handling software.
245
246The send packet thread has partial control over the Tx ring and 'dev->tbusy'
247flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
248queue slot is empty, it clears the tbusy flag when finished otherwise it sets
249the 'lp->tx_full' flag.
250
251The interrupt handler has exclusive control over the Rx ring and records stats
252from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
253empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
254clears both the tx_full and tbusy flags.
255
256IV. Notes
257
258IVb. References
259
260The Sundance ST201 datasheet, preliminary version.
261http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
262http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
263
264IVc. Errata
265
266*/
267
268/* Work-around for Kendin chip bugs. */
269#ifndef CONFIG_SUNDANCE_MMIO
270#define USE_IO_OPS 1
271#endif
272
273static struct pci_device_id sundance_pci_tbl[] __devinitdata = {
274	{0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
275	{0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
276	{0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
277	{0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
278	{0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
279	{0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
280	{0,}
281};
282MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
283
284enum {
285	netdev_io_size = 128
286};
287
288struct pci_id_info {
289        const char *name;
290};
291static struct pci_id_info pci_id_tbl[] = {
292	{"D-Link DFE-550TX FAST Ethernet Adapter"},
293	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
294	{"D-Link DFE-580TX 4 port Server Adapter"},
295	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
296	{"D-Link DL10050-based FAST Ethernet Adapter"},
297	{"Sundance Technology Alta"},
298	{0,},			/* 0 terminated list. */
299};
300
301/* This driver was written to use PCI memory space, however x86-oriented
302   hardware often uses I/O space accesses. */
303#ifdef USE_IO_OPS
304#undef readb
305#undef readw
306#undef readl
307#undef writeb
308#undef writew
309#undef writel
310#define readb inb
311#define readw inw
312#define readl inl
313#define writeb outb
314#define writew outw
315#define writel outl
316#endif
317
318/* Offsets to the device registers.
319   Unlike software-only systems, device drivers interact with complex hardware.
320   It's not useful to define symbolic names for every register bit in the
321   device.  The name can only partially document the semantics and make
322   the driver longer and more difficult to read.
323   In general, only the important configuration values or bits changed
324   multiple times should be defined symbolically.
325*/
326enum alta_offsets {
327	DMACtrl = 0x00,
328	TxListPtr = 0x04,
329	TxDMABurstThresh = 0x08,
330	TxDMAUrgentThresh = 0x09,
331	TxDMAPollPeriod = 0x0a,
332	RxDMAStatus = 0x0c,
333	RxListPtr = 0x10,
334	DebugCtrl0 = 0x1a,
335	DebugCtrl1 = 0x1c,
336	RxDMABurstThresh = 0x14,
337	RxDMAUrgentThresh = 0x15,
338	RxDMAPollPeriod = 0x16,
339	LEDCtrl = 0x1a,
340	ASICCtrl = 0x30,
341	EEData = 0x34,
342	EECtrl = 0x36,
343	TxStartThresh = 0x3c,
344	RxEarlyThresh = 0x3e,
345	FlashAddr = 0x40,
346	FlashData = 0x44,
347	TxStatus = 0x46,
348	TxFrameId = 0x47,
349	DownCounter = 0x18,
350	IntrClear = 0x4a,
351	IntrEnable = 0x4c,
352	IntrStatus = 0x4e,
353	MACCtrl0 = 0x50,
354	MACCtrl1 = 0x52,
355	StationAddr = 0x54,
356	MaxFrameSize = 0x5A,
357	RxMode = 0x5c,
358	MIICtrl = 0x5e,
359	MulticastFilter0 = 0x60,
360	MulticastFilter1 = 0x64,
361	RxOctetsLow = 0x68,
362	RxOctetsHigh = 0x6a,
363	TxOctetsLow = 0x6c,
364	TxOctetsHigh = 0x6e,
365	TxFramesOK = 0x70,
366	RxFramesOK = 0x72,
367	StatsCarrierError = 0x74,
368	StatsLateColl = 0x75,
369	StatsMultiColl = 0x76,
370	StatsOneColl = 0x77,
371	StatsTxDefer = 0x78,
372	RxMissed = 0x79,
373	StatsTxXSDefer = 0x7a,
374	StatsTxAbort = 0x7b,
375	StatsBcastTx = 0x7c,
376	StatsBcastRx = 0x7d,
377	StatsMcastTx = 0x7e,
378	StatsMcastRx = 0x7f,
379	/* Aliased and bogus values! */
380	RxStatus = 0x0c,
381};
382enum ASICCtrl_HiWord_bit {
383	GlobalReset = 0x0001,
384	RxReset = 0x0002,
385	TxReset = 0x0004,
386	DMAReset = 0x0008,
387	FIFOReset = 0x0010,
388	NetworkReset = 0x0020,
389	HostReset = 0x0040,
390	ResetBusy = 0x0400,
391};
392
393/* Bits in the interrupt status/mask registers. */
394enum intr_status_bits {
395	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
396	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
397	IntrDrvRqst=0x0040,
398	StatsMax=0x0080, LinkChange=0x0100,
399	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
400};
401
402/* Bits in the RxMode register. */
403enum rx_mode_bits {
404	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
405	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
406};
407/* Bits in MACCtrl. */
408enum mac_ctrl0_bits {
409	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
410	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
411};
412enum mac_ctrl1_bits {
413	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
414	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
415	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
416};
417
418/* The Rx and Tx buffer descriptors. */
419/* Note that using only 32 bit fields simplifies conversion to big-endian
420   architectures. */
421struct netdev_desc {
422	u32 next_desc;
423	u32 status;
424	struct desc_frag { u32 addr, length; } frag[1];
425};
426
427/* Bits in netdev_desc.status */
428enum desc_status_bits {
429	DescOwn=0x8000,
430	DescEndPacket=0x4000,
431	DescEndRing=0x2000,
432	LastFrag=0x80000000,
433	DescIntrOnTx=0x8000,
434	DescIntrOnDMADone=0x80000000,
435	DisableAlign = 0x00000001,
436};
437
438#define PRIV_ALIGN	15 	/* Required alignment mask */
439/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
440   within the structure. */
441#define MII_CNT		4
442struct netdev_private {
443	/* Descriptor rings first for alignment. */
444	struct netdev_desc *rx_ring;
445	struct netdev_desc *tx_ring;
446	struct sk_buff* rx_skbuff[RX_RING_SIZE];
447	struct sk_buff* tx_skbuff[TX_RING_SIZE];
448        dma_addr_t tx_ring_dma;
449        dma_addr_t rx_ring_dma;
450	struct net_device_stats stats;
451	struct timer_list timer;		/* Media monitoring timer. */
452	/* Frequently used values: keep some adjacent for cache effect. */
453	spinlock_t lock;
454	spinlock_t rx_lock;			/* Group with Tx control cache line. */
455	int msg_enable;
456	int chip_id;
457	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
458	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
459	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
460	unsigned int cur_tx, dirty_tx;
461	/* These values are keep track of the transceiver/media in use. */
462	unsigned int flowctrl:1;
463	unsigned int default_port:4;		/* Last dev->if_port value. */
464	unsigned int an_enable:1;
465	unsigned int speed;
466	struct tasklet_struct rx_tasklet;
467	struct tasklet_struct tx_tasklet;
468	int budget;
469	int cur_task;
470	/* Multicast and receive mode. */
471	spinlock_t mcastlock;			/* SMP lock multicast updates. */
472	u16 mcast_filter[4];
473	/* MII transceiver section. */
474	struct mii_if_info mii_if;
475	int mii_preamble_required;
476	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
477	struct pci_dev *pci_dev;
478	unsigned char pci_rev_id;
479};
480
481/* The station address location in the EEPROM. */
482#define EEPROM_SA_OFFSET	0x10
483#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
484			IntrDrvRqst | IntrTxDone | StatsMax | \
485			LinkChange)
486
487static int  change_mtu(struct net_device *dev, int new_mtu);
488static int  eeprom_read(long ioaddr, int location);
489static int  mdio_read(struct net_device *dev, int phy_id, int location);
490static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
491static int  netdev_open(struct net_device *dev);
492static void check_duplex(struct net_device *dev);
493static void netdev_timer(unsigned long data);
494static void tx_timeout(struct net_device *dev);
495static void init_ring(struct net_device *dev);
496static int  start_tx(struct sk_buff *skb, struct net_device *dev);
497static int reset_tx (struct net_device *dev);
498static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
499static void rx_poll(unsigned long data);
500static void tx_poll(unsigned long data);
501static void refill_rx (struct net_device *dev);
502static void netdev_error(struct net_device *dev, int intr_status);
503static void netdev_error(struct net_device *dev, int intr_status);
504static void set_rx_mode(struct net_device *dev);
505static struct net_device_stats *get_stats(struct net_device *dev);
506static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
507static int  netdev_close(struct net_device *dev);
508
509
510
511static int __devinit sundance_probe1 (struct pci_dev *pdev,
512				      const struct pci_device_id *ent)
513{
514	struct net_device *dev;
515	struct netdev_private *np;
516	static int card_idx;
517	int chip_idx = ent->driver_data;
518	int irq;
519	int i;
520	long ioaddr;
521	u16 mii_ctl;
522	void *ring_space;
523	dma_addr_t ring_dma;
524
525
526/* when built into the kernel, we only print version if device is found */
527#ifndef MODULE
528	static int printed_version;
529	if (!printed_version++)
530		printk(version);
531#endif
532
533	if (pci_enable_device(pdev))
534		return -EIO;
535	pci_set_master(pdev);
536
537	irq = pdev->irq;
538
539	dev = alloc_etherdev(sizeof(*np));
540	if (!dev)
541		return -ENOMEM;
542	SET_MODULE_OWNER(dev);
543
544	if (pci_request_regions(pdev, DRV_NAME))
545		goto err_out_netdev;
546
547#ifdef USE_IO_OPS
548	ioaddr = pci_resource_start(pdev, 0);
549#else
550	ioaddr = pci_resource_start(pdev, 1);
551	ioaddr = (long) ioremap (ioaddr, netdev_io_size);
552	if (!ioaddr)
553		goto err_out_res;
554#endif
555
556	for (i = 0; i < 3; i++)
557		((u16 *)dev->dev_addr)[i] =
558			le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
559
560	dev->base_addr = ioaddr;
561	dev->irq = irq;
562
563	np = dev->priv;
564	np->pci_dev = pdev;
565	np->chip_id = chip_idx;
566	np->msg_enable = (1 << debug) - 1;
567	spin_lock_init(&np->lock);
568	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
569	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
570
571	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
572	if (!ring_space)
573		goto err_out_cleardev;
574	np->tx_ring = (struct netdev_desc *)ring_space;
575	np->tx_ring_dma = ring_dma;
576
577	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
578	if (!ring_space)
579		goto err_out_unmap_tx;
580	np->rx_ring = (struct netdev_desc *)ring_space;
581	np->rx_ring_dma = ring_dma;
582
583	np->mii_if.dev = dev;
584	np->mii_if.mdio_read = mdio_read;
585	np->mii_if.mdio_write = mdio_write;
586	np->mii_if.phy_id_mask = 0x1f;
587	np->mii_if.reg_num_mask = 0x1f;
588
589	/* The chip-specific entries in the device structure. */
590	dev->open = &netdev_open;
591	dev->hard_start_xmit = &start_tx;
592	dev->stop = &netdev_close;
593	dev->get_stats = &get_stats;
594	dev->set_multicast_list = &set_rx_mode;
595	dev->do_ioctl = &netdev_ioctl;
596	dev->tx_timeout = &tx_timeout;
597	dev->watchdog_timeo = TX_TIMEOUT;
598	dev->change_mtu = &change_mtu;
599	pci_set_drvdata(pdev, dev);
600
601	pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
602
603	i = register_netdev(dev);
604	if (i)
605		goto err_out_unmap_rx;
606
607	printk(KERN_INFO "%s: %s at 0x%lx, ",
608		   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
609	for (i = 0; i < 5; i++)
610			printk("%2.2x:", dev->dev_addr[i]);
611	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
612
613	if (1) {
614		int phy, phy_idx = 0;
615		np->phys[0] = 1;		/* Default setting */
616		np->mii_preamble_required++;
617		for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
618			int mii_status = mdio_read(dev, phy, MII_BMSR);
619			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
620				np->phys[phy_idx++] = phy;
621				np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
622				if ((mii_status & 0x0040) == 0)
623					np->mii_preamble_required++;
624				printk(KERN_INFO "%s: MII PHY found at address %d, status "
625					   "0x%4.4x advertising %4.4x.\n",
626					   dev->name, phy, mii_status, np->mii_if.advertising);
627			}
628		}
629		np->mii_preamble_required--;
630
631		if (phy_idx == 0) {
632			printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
633				   dev->name, readl(ioaddr + ASICCtrl));
634			goto err_out_unregister;
635		}
636
637		np->mii_if.phy_id = np->phys[0];
638	}
639
640	/* Parse override configuration */
641	np->an_enable = 1;
642	if (card_idx < MAX_UNITS) {
643		if (media[card_idx] != NULL) {
644			np->an_enable = 0;
645			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
646			    strcmp (media[card_idx], "4") == 0) {
647				np->speed = 100;
648				np->mii_if.full_duplex = 1;
649			} else if (strcmp (media[card_idx], "100mbps_hd") == 0
650				   || strcmp (media[card_idx], "3") == 0) {
651				np->speed = 100;
652				np->mii_if.full_duplex = 0;
653			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
654				   strcmp (media[card_idx], "2") == 0) {
655				np->speed = 10;
656				np->mii_if.full_duplex = 1;
657			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
658				   strcmp (media[card_idx], "1") == 0) {
659				np->speed = 10;
660				np->mii_if.full_duplex = 0;
661			} else {
662				np->an_enable = 1;
663			}
664		}
665		if (flowctrl == 0)
666			np->flowctrl = 0;
667	}
668
669	/* Fibre PHY? */
670	if (readl (ioaddr + ASICCtrl) & 0x80) {
671		/* Default 100Mbps Full */
672		if (np->an_enable) {
673			np->speed = 100;
674			np->mii_if.full_duplex = 1;
675			np->an_enable = 0;
676		}
677	}
678	/* Reset PHY */
679	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
680	mdelay (300);
681	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
682	/* Force media type */
683	if (!np->an_enable) {
684		mii_ctl = 0;
685		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
686		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
687		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
688		printk (KERN_INFO "Override speed=%d, %s duplex\n",
689			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
690
691	}
692
693	/* Perhaps move the reset here? */
694	/* Reset the chip to erase previous misconfiguration. */
695	if (netif_msg_hw(np))
696		printk("ASIC Control is %x.\n", readl(ioaddr + ASICCtrl));
697	writew(0x007f, ioaddr + ASICCtrl + 2);
698	if (netif_msg_hw(np))
699		printk("ASIC Control is now %x.\n", readl(ioaddr + ASICCtrl));
700
701	card_idx++;
702	return 0;
703
704err_out_unregister:
705	unregister_netdev(dev);
706err_out_unmap_rx:
707        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
708err_out_unmap_tx:
709        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
710err_out_cleardev:
711	pci_set_drvdata(pdev, NULL);
712#ifndef USE_IO_OPS
713	iounmap((void *)ioaddr);
714err_out_res:
715#endif
716	pci_release_regions(pdev);
717err_out_netdev:
718	kfree (dev);
719	return -ENODEV;
720}
721
722static int change_mtu(struct net_device *dev, int new_mtu)
723{
724	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
725		return -EINVAL;
726	if (netif_running(dev))
727		return -EBUSY;
728	dev->mtu = new_mtu;
729	return 0;
730}
731
732/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
733static int __devinit eeprom_read(long ioaddr, int location)
734{
735	int boguscnt = 1000;		/* Typical 190 ticks. */
736	writew(0x0200 | (location & 0xff), ioaddr + EECtrl);
737	do {
738		if (! (readw(ioaddr + EECtrl) & 0x8000)) {
739			return readw(ioaddr + EEData);
740		}
741	} while (--boguscnt > 0);
742	return 0;
743}
744
745/*  MII transceiver control section.
746	Read and write the MII registers using software-generated serial
747	MDIO protocol.  See the MII specifications or DP83840A data sheet
748	for details.
749
750	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
751	met by back-to-back 33Mhz PCI cycles. */
752#define mdio_delay() readb(mdio_addr)
753
754enum mii_reg_bits {
755	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
756};
757#define MDIO_EnbIn  (0)
758#define MDIO_WRITE0 (MDIO_EnbOutput)
759#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
760
761/* Generate the preamble required for initial synchronization and
762   a few older transceivers. */
763static void mdio_sync(long mdio_addr)
764{
765	int bits = 32;
766
767	/* Establish sync by sending at least 32 logic ones. */
768	while (--bits >= 0) {
769		writeb(MDIO_WRITE1, mdio_addr);
770		mdio_delay();
771		writeb(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
772		mdio_delay();
773	}
774}
775
776static int mdio_read(struct net_device *dev, int phy_id, int location)
777{
778	struct netdev_private *np = dev->priv;
779	long mdio_addr = dev->base_addr + MIICtrl;
780	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
781	int i, retval = 0;
782
783	if (np->mii_preamble_required)
784		mdio_sync(mdio_addr);
785
786	/* Shift the read command bits out. */
787	for (i = 15; i >= 0; i--) {
788		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
789
790		writeb(dataval, mdio_addr);
791		mdio_delay();
792		writeb(dataval | MDIO_ShiftClk, mdio_addr);
793		mdio_delay();
794	}
795	/* Read the two transition, 16 data, and wire-idle bits. */
796	for (i = 19; i > 0; i--) {
797		writeb(MDIO_EnbIn, mdio_addr);
798		mdio_delay();
799		retval = (retval << 1) | ((readb(mdio_addr) & MDIO_Data) ? 1 : 0);
800		writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
801		mdio_delay();
802	}
803	return (retval>>1) & 0xffff;
804}
805
806static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
807{
808	struct netdev_private *np = dev->priv;
809	long mdio_addr = dev->base_addr + MIICtrl;
810	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
811	int i;
812
813	if (np->mii_preamble_required)
814		mdio_sync(mdio_addr);
815
816	/* Shift the command bits out. */
817	for (i = 31; i >= 0; i--) {
818		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
819
820		writeb(dataval, mdio_addr);
821		mdio_delay();
822		writeb(dataval | MDIO_ShiftClk, mdio_addr);
823		mdio_delay();
824	}
825	/* Clear out extra bits. */
826	for (i = 2; i > 0; i--) {
827		writeb(MDIO_EnbIn, mdio_addr);
828		mdio_delay();
829		writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
830		mdio_delay();
831	}
832	return;
833}
834
835static int netdev_open(struct net_device *dev)
836{
837	struct netdev_private *np = dev->priv;
838	long ioaddr = dev->base_addr;
839	int i;
840
841	/* Do we need to reset the chip??? */
842
843	i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
844	if (i)
845		return i;
846
847	if (netif_msg_ifup(np))
848		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
849			   dev->name, dev->irq);
850
851	init_ring(dev);
852
853	writel(np->rx_ring_dma, ioaddr + RxListPtr);
854	/* The Tx list pointer is written as packets are queued. */
855
856	for (i = 0; i < 6; i++)
857		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
858
859	/* Initialize other registers. */
860	writew(dev->mtu + 14, ioaddr + MaxFrameSize);
861	if (dev->mtu > 2047)
862		writel(readl(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
863
864	/* Configure the PCI bus bursts and FIFO thresholds. */
865
866	if (dev->if_port == 0)
867		dev->if_port = np->default_port;
868
869	np->mcastlock = (spinlock_t) SPIN_LOCK_UNLOCKED;
870
871	set_rx_mode(dev);
872	writew(0, ioaddr + IntrEnable);
873	writew(0, ioaddr + DownCounter);
874	/* Set the chip to poll every N*320nsec. */
875	writeb(100, ioaddr + RxDMAPollPeriod);
876	writeb(127, ioaddr + TxDMAPollPeriod);
877	/* Fix DFE-580TX packet drop issue */
878	if (np->pci_rev_id >= 0x14)
879		writeb(0x01, ioaddr + DebugCtrl1);
880	netif_start_queue(dev);
881
882	writew(StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
883
884	if (netif_msg_ifup(np))
885		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
886			   "MAC Control %x, %4.4x %4.4x.\n",
887			   dev->name, readl(ioaddr + RxStatus), readb(ioaddr + TxStatus),
888			   readl(ioaddr + MACCtrl0),
889			   readw(ioaddr + MACCtrl1), readw(ioaddr + MACCtrl0));
890
891	/* Set the timer to check for link beat. */
892	init_timer(&np->timer);
893	np->timer.expires = jiffies + 3*HZ;
894	np->timer.data = (unsigned long)dev;
895	np->timer.function = &netdev_timer;				/* timer handler */
896	add_timer(&np->timer);
897
898	/* Enable interrupts by setting the interrupt mask. */
899	writew(DEFAULT_INTR, ioaddr + IntrEnable);
900
901	return 0;
902}
903
904static void check_duplex(struct net_device *dev)
905{
906	struct netdev_private *np = dev->priv;
907	long ioaddr = dev->base_addr;
908	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
909	int negotiated = mii_lpa & np->mii_if.advertising;
910	int duplex;
911
912	/* Force media */
913	if (!np->an_enable || mii_lpa == 0xffff) {
914		if (np->mii_if.full_duplex)
915			writew (readw (ioaddr + MACCtrl0) | EnbFullDuplex,
916				ioaddr + MACCtrl0);
917		return;
918	}
919
920	/* Autonegotiation */
921	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
922	if (np->mii_if.full_duplex != duplex) {
923		np->mii_if.full_duplex = duplex;
924		if (netif_msg_link(np))
925			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
926				   "negotiated capability %4.4x.\n", dev->name,
927				   duplex ? "full" : "half", np->phys[0], negotiated);
928		writew(duplex ? 0x20 : 0, ioaddr + MACCtrl0);
929	}
930}
931
932static void netdev_timer(unsigned long data)
933{
934	struct net_device *dev = (struct net_device *)data;
935	struct netdev_private *np = dev->priv;
936	long ioaddr = dev->base_addr;
937	int next_tick = 10*HZ;
938
939	if (netif_msg_timer(np)) {
940		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
941			   "Tx %x Rx %x.\n",
942			   dev->name, readw(ioaddr + IntrEnable),
943			   readb(ioaddr + TxStatus), readl(ioaddr + RxStatus));
944	}
945	check_duplex(dev);
946	np->timer.expires = jiffies + next_tick;
947	add_timer(&np->timer);
948}
949
950static void tx_timeout(struct net_device *dev)
951{
952	struct netdev_private *np = dev->priv;
953	long ioaddr = dev->base_addr;
954	long flag;
955
956	netif_stop_queue(dev);
957	tasklet_disable(&np->tx_tasklet);
958	writew(0, ioaddr + IntrEnable);
959	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
960		   "TxFrameId %2.2x,"
961		   " resetting...\n", dev->name, readb(ioaddr + TxStatus),
962		   readb(ioaddr + TxFrameId));
963
964	{
965		int i;
966		for (i=0; i<TX_RING_SIZE; i++) {
967			printk(KERN_DEBUG "%02x %08x %08x %08x(%02x) %08x %08x\n", i,
968				np->tx_ring_dma + i*sizeof(*np->tx_ring),
969				np->tx_ring[i].next_desc,
970				np->tx_ring[i].status,
971				(np->tx_ring[i].status >> 2) & 0xff,
972				np->tx_ring[i].frag[0].addr,
973				np->tx_ring[i].frag[0].length);
974		}
975		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
976			readl(dev->base_addr + TxListPtr),
977			netif_queue_stopped(dev));
978		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
979			np->cur_tx, np->cur_tx % TX_RING_SIZE,
980			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
981		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
982		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
983	}
984	spin_lock_irqsave(&np->lock, flag);
985
986	/* Stop and restart the chip's Tx processes . */
987	reset_tx(dev);
988	spin_unlock_irqrestore(&np->lock, flag);
989
990	dev->if_port = 0;
991
992	dev->trans_start = jiffies;
993	np->stats.tx_errors++;
994	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
995		netif_wake_queue(dev);
996	}
997	writew(DEFAULT_INTR, ioaddr + IntrEnable);
998	tasklet_enable(&np->tx_tasklet);
999}
1000
1001
1002/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1003static void init_ring(struct net_device *dev)
1004{
1005	struct netdev_private *np = dev->priv;
1006	int i;
1007
1008	np->cur_rx = np->cur_tx = 0;
1009	np->dirty_rx = np->dirty_tx = 0;
1010	np->cur_task = 0;
1011
1012	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1013
1014	/* Initialize all Rx descriptors. */
1015	for (i = 0; i < RX_RING_SIZE; i++) {
1016		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1017			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1018		np->rx_ring[i].status = 0;
1019		np->rx_ring[i].frag[0].length = 0;
1020		np->rx_skbuff[i] = 0;
1021	}
1022
1023	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1024	for (i = 0; i < RX_RING_SIZE; i++) {
1025		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1026		np->rx_skbuff[i] = skb;
1027		if (skb == NULL)
1028			break;
1029		skb->dev = dev;		/* Mark as being used by this device. */
1030		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1031		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1032			pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz,
1033				PCI_DMA_FROMDEVICE));
1034		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1035	}
1036	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1037
1038	for (i = 0; i < TX_RING_SIZE; i++) {
1039		np->tx_skbuff[i] = 0;
1040		np->tx_ring[i].status = 0;
1041	}
1042	return;
1043}
1044
1045static void tx_poll (unsigned long data)
1046{
1047	struct net_device *dev = (struct net_device *)data;
1048	struct netdev_private *np = dev->priv;
1049	unsigned head = np->cur_task % TX_RING_SIZE;
1050	struct netdev_desc *txdesc =
1051		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1052
1053	/* Chain the next pointer */
1054	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1055		int entry = np->cur_task % TX_RING_SIZE;
1056		txdesc = &np->tx_ring[entry];
1057		if (np->last_tx) {
1058			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1059				entry*sizeof(struct netdev_desc));
1060		}
1061		np->last_tx = txdesc;
1062	}
1063	/* Indicate the latest descriptor of tx ring */
1064	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1065
1066	if (readl (dev->base_addr + TxListPtr) == 0)
1067		writel (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1068			dev->base_addr + TxListPtr);
1069	return;
1070}
1071
1072static int
1073start_tx (struct sk_buff *skb, struct net_device *dev)
1074{
1075	struct netdev_private *np = dev->priv;
1076	struct netdev_desc *txdesc;
1077	unsigned entry;
1078
1079	/* Calculate the next Tx descriptor entry. */
1080	entry = np->cur_tx % TX_RING_SIZE;
1081	np->tx_skbuff[entry] = skb;
1082	txdesc = &np->tx_ring[entry];
1083
1084	txdesc->next_desc = 0;
1085	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1086	txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1087							skb->len,
1088							PCI_DMA_TODEVICE));
1089	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1090
1091	/* Increment cur_tx before tasklet_schedule() */
1092	np->cur_tx++;
1093	mb();
1094	/* Schedule a tx_poll() task */
1095	tasklet_schedule(&np->tx_tasklet);
1096
1097	/* On some architectures: explicitly flush cache lines here. */
1098	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1099			&& !netif_queue_stopped(dev)) {
1100		/* do nothing */
1101	} else {
1102		netif_stop_queue (dev);
1103	}
1104	dev->trans_start = jiffies;
1105	if (netif_msg_tx_queued(np)) {
1106		printk (KERN_DEBUG
1107			"%s: Transmit frame #%d queued in slot %d.\n",
1108			dev->name, np->cur_tx, entry);
1109	}
1110	return 0;
1111}
1112
1113/* Reset hardware tx and free all of tx buffers */
1114static int
1115reset_tx (struct net_device *dev)
1116{
1117	struct netdev_private *np = (struct netdev_private*) dev->priv;
1118	long ioaddr = dev->base_addr;
1119	struct sk_buff *skb;
1120	int i;
1121	int irq = in_interrupt();
1122
1123	/* Reset tx logic, TxListPtr will be cleaned */
1124	writew (TxDisable, ioaddr + MACCtrl1);
1125	writew (TxReset | DMAReset | FIFOReset | NetworkReset,
1126			ioaddr + ASICCtrl + 2);
1127	for (i=50; i > 0; i--) {
1128		if ((readw(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1129			break;
1130		mdelay(1);
1131	}
1132	/* free all tx skbuff */
1133	for (i = 0; i < TX_RING_SIZE; i++) {
1134		skb = np->tx_skbuff[i];
1135		if (skb) {
1136			pci_unmap_single(np->pci_dev,
1137				np->tx_ring[i].frag[0].addr, skb->len,
1138				PCI_DMA_TODEVICE);
1139			if (irq)
1140				dev_kfree_skb_irq (skb);
1141			else
1142				dev_kfree_skb (skb);
1143			np->tx_skbuff[i] = 0;
1144			np->stats.tx_dropped++;
1145		}
1146	}
1147	np->cur_tx = np->dirty_tx = 0;
1148	np->cur_task = 0;
1149	writew (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1150	return 0;
1151}
1152
1153/* The interrupt handler cleans up after the Tx thread,
1154   and schedule a Rx thread work */
1155static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1156{
1157	struct net_device *dev = (struct net_device *)dev_instance;
1158	struct netdev_private *np;
1159	long ioaddr;
1160	int boguscnt = max_interrupt_work;
1161	int hw_frame_id;
1162	int tx_cnt;
1163	int tx_status;
1164
1165	ioaddr = dev->base_addr;
1166	np = dev->priv;
1167
1168	do {
1169		int intr_status = readw(ioaddr + IntrStatus);
1170		writew(intr_status, ioaddr + IntrStatus);
1171
1172		if (netif_msg_intr(np))
1173			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1174				   dev->name, intr_status);
1175
1176		if (!(intr_status & DEFAULT_INTR))
1177			break;
1178
1179		if (intr_status & (IntrRxDMADone)) {
1180			writew(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1181					ioaddr + IntrEnable);
1182			if (np->budget < 0)
1183				np->budget = RX_BUDGET;
1184			tasklet_schedule(&np->rx_tasklet);
1185		}
1186		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1187			tx_status = readw (ioaddr + TxStatus);
1188			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1189				if (netif_msg_tx_done(np))
1190					printk
1191					    ("%s: Transmit status is %2.2x.\n",
1192				     	dev->name, tx_status);
1193				if (tx_status & 0x1e) {
1194					np->stats.tx_errors++;
1195					if (tx_status & 0x10)
1196						np->stats.tx_fifo_errors++;
1197					if (tx_status & 0x08)
1198						np->stats.collisions++;
1199					if (tx_status & 0x02)
1200						np->stats.tx_window_errors++;
1201					/* This reset has not been verified!. */
1202					if (tx_status & 0x10) {	/* Reset the Tx. */
1203						np->stats.tx_fifo_errors++;
1204						spin_lock(&np->lock);
1205						reset_tx(dev);
1206						spin_unlock(&np->lock);
1207					}
1208					if (tx_status & 0x1e)	/* Restart the Tx. */
1209						writew (TxEnable,
1210							ioaddr + MACCtrl1);
1211				}
1212				/* Yup, this is a documentation bug.  It cost me *hours*. */
1213				writew (0, ioaddr + TxStatus);
1214				tx_status = readw (ioaddr + TxStatus);
1215				if (tx_cnt < 0)
1216					break;
1217			}
1218			hw_frame_id = (tx_status >> 8) & 0xff;
1219		} else 	{
1220			hw_frame_id = readb(ioaddr + TxFrameId);
1221		}
1222
1223		if (np->pci_rev_id >= 0x14) {
1224			spin_lock(&np->lock);
1225			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1226				int entry = np->dirty_tx % TX_RING_SIZE;
1227				struct sk_buff *skb;
1228				int sw_frame_id;
1229				sw_frame_id = (np->tx_ring[entry].status >> 2) & 0xff;
1230					if (sw_frame_id == hw_frame_id &&
1231						!(np->tx_ring[entry].status & 0x00010000))
1232						break;
1233					if (sw_frame_id == (hw_frame_id + 1) % TX_RING_SIZE)
1234						break;
1235				skb = np->tx_skbuff[entry];
1236				/* Free the original skb. */
1237				pci_unmap_single(np->pci_dev,
1238					np->tx_ring[entry].frag[0].addr,
1239					skb->len, PCI_DMA_TODEVICE);
1240				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1241				np->tx_skbuff[entry] = 0;
1242				np->tx_ring[entry].frag[0].addr = 0;
1243				np->tx_ring[entry].frag[0].length = 0;
1244			}
1245			spin_unlock(&np->lock);
1246		} else {
1247			spin_lock(&np->lock);
1248			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1249				int entry = np->dirty_tx % TX_RING_SIZE;
1250				struct sk_buff *skb;
1251				if (!(np->tx_ring[entry].status & 0x00010000))
1252					break;
1253				skb = np->tx_skbuff[entry];
1254				/* Free the original skb. */
1255				pci_unmap_single(np->pci_dev,
1256					np->tx_ring[entry].frag[0].addr,
1257					skb->len, PCI_DMA_TODEVICE);
1258				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1259				np->tx_skbuff[entry] = 0;
1260				np->tx_ring[entry].frag[0].addr = 0;
1261				np->tx_ring[entry].frag[0].length = 0;
1262			}
1263			spin_unlock(&np->lock);
1264		}
1265
1266		if (netif_queue_stopped(dev) &&
1267			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1268			/* The ring is no longer full, clear busy flag. */
1269			netif_wake_queue (dev);
1270		}
1271		/* Abnormal error summary/uncommon events handlers. */
1272		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1273			netdev_error(dev, intr_status);
1274		if (--boguscnt < 0) {
1275			get_stats(dev);
1276			if (netif_msg_hw(np))
1277				printk(KERN_WARNING "%s: Too much work at interrupt, "
1278				   "status=0x%4.4x / 0x%4.4x.\n",
1279				   dev->name, intr_status, readw(ioaddr + IntrClear));
1280			break;
1281		}
1282	} while (1);
1283	if (netif_msg_intr(np))
1284		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1285			   dev->name, readw(ioaddr + IntrStatus));
1286	writel(5000, ioaddr + DownCounter);
1287
1288}
1289
1290static void rx_poll(unsigned long data)
1291{
1292	struct net_device *dev = (struct net_device *)data;
1293	struct netdev_private *np = dev->priv;
1294	int entry = np->cur_rx % RX_RING_SIZE;
1295	int boguscnt = np->budget;
1296	long ioaddr = dev->base_addr;
1297	int received = 0;
1298
1299	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1300	while (1) {
1301		struct netdev_desc *desc = &(np->rx_ring[entry]);
1302		u32 frame_status = le32_to_cpu(desc->status);
1303		int pkt_len;
1304
1305		if (--boguscnt < 0) {
1306			goto not_done;
1307		}
1308		if (!(frame_status & DescOwn))
1309			break;
1310		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1311		if (netif_msg_rx_status(np))
1312			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1313				   frame_status);
1314		pci_dma_sync_single(np->pci_dev, desc->frag[0].addr,
1315			np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1316
1317		if (frame_status & 0x001f4000) {
1318			/* There was a error. */
1319			if (netif_msg_rx_err(np))
1320				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1321					   frame_status);
1322			np->stats.rx_errors++;
1323			if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1324			if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1325			if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1326			if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1327			if (frame_status & 0x00100000) {
1328				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1329					   " status %8.8x.\n",
1330					   dev->name, frame_status);
1331			}
1332		} else {
1333			struct sk_buff *skb;
1334#ifndef final_version
1335			if (netif_msg_rx_status(np))
1336				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1337					   ", bogus_cnt %d.\n",
1338					   pkt_len, boguscnt);
1339#endif
1340			/* Check if the packet is long enough to accept without copying
1341			   to a minimally-sized skbuff. */
1342			if (pkt_len < rx_copybreak
1343				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1344				skb->dev = dev;
1345				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1346				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1347				skb_put(skb, pkt_len);
1348			} else {
1349				pci_unmap_single(np->pci_dev,
1350					desc->frag[0].addr,
1351					np->rx_buf_sz,
1352					PCI_DMA_FROMDEVICE);
1353				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1354				np->rx_skbuff[entry] = NULL;
1355			}
1356			skb->protocol = eth_type_trans(skb, dev);
1357			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1358			netif_rx(skb);
1359			dev->last_rx = jiffies;
1360		}
1361		entry = (entry + 1) % RX_RING_SIZE;
1362		received++;
1363	}
1364	np->cur_rx = entry;
1365	refill_rx (dev);
1366	np->budget -= received;
1367	writew(DEFAULT_INTR, ioaddr + IntrEnable);
1368	return;
1369
1370not_done:
1371	np->cur_rx = entry;
1372	refill_rx (dev);
1373	if (!received)
1374		received = 1;
1375	np->budget -= received;
1376	if (np->budget <= 0)
1377		np->budget = RX_BUDGET;
1378	tasklet_schedule(&np->rx_tasklet);
1379	return;
1380}
1381
1382static void refill_rx (struct net_device *dev)
1383{
1384	struct netdev_private *np = dev->priv;
1385	int entry;
1386	int cnt = 0;
1387
1388	/* Refill the Rx ring buffers. */
1389	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1390		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1391		struct sk_buff *skb;
1392		entry = np->dirty_rx % RX_RING_SIZE;
1393		if (np->rx_skbuff[entry] == NULL) {
1394			skb = dev_alloc_skb(np->rx_buf_sz);
1395			np->rx_skbuff[entry] = skb;
1396			if (skb == NULL)
1397				break;		/* Better luck next round. */
1398			skb->dev = dev;		/* Mark as being used by this device. */
1399			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1400			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1401				pci_map_single(np->pci_dev, skb->tail,
1402					np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1403		}
1404		/* Perhaps we need not reset this field. */
1405		np->rx_ring[entry].frag[0].length =
1406			cpu_to_le32(np->rx_buf_sz | LastFrag);
1407		np->rx_ring[entry].status = 0;
1408		cnt++;
1409	}
1410	return;
1411}
1412static void netdev_error(struct net_device *dev, int intr_status)
1413{
1414	long ioaddr = dev->base_addr;
1415	struct netdev_private *np = dev->priv;
1416	u16 mii_ctl, mii_advertise, mii_lpa;
1417	int speed;
1418
1419	if (intr_status & LinkChange) {
1420		if (np->an_enable) {
1421			mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1422			mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1423			mii_advertise &= mii_lpa;
1424			printk (KERN_INFO "%s: Link changed: ", dev->name);
1425			if (mii_advertise & ADVERTISE_100FULL) {
1426				np->speed = 100;
1427				printk ("100Mbps, full duplex\n");
1428			} else if (mii_advertise & ADVERTISE_100HALF) {
1429				np->speed = 100;
1430				printk ("100Mbps, half duplex\n");
1431			} else if (mii_advertise & ADVERTISE_10FULL) {
1432				np->speed = 10;
1433				printk ("10Mbps, full duplex\n");
1434			} else if (mii_advertise & ADVERTISE_10HALF) {
1435				np->speed = 10;
1436				printk ("10Mbps, half duplex\n");
1437			} else
1438				printk ("\n");
1439
1440		} else {
1441			mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1442			speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1443			np->speed = speed;
1444			printk (KERN_INFO "%s: Link changed: %dMbps ,",
1445				dev->name, speed);
1446			printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1447				"full" : "half");
1448		}
1449		check_duplex (dev);
1450		if (np->flowctrl == 0)
1451			writew(readw(ioaddr + MACCtrl0) & ~EnbFlowCtrl,
1452				ioaddr + MACCtrl0);
1453	}
1454	if (intr_status & StatsMax) {
1455		get_stats(dev);
1456	}
1457	if (intr_status & IntrPCIErr) {
1458		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1459			   dev->name, intr_status);
1460		/* We must do a global reset of DMA to continue. */
1461	}
1462}
1463
1464static struct net_device_stats *get_stats(struct net_device *dev)
1465{
1466	long ioaddr = dev->base_addr;
1467	struct netdev_private *np = dev->priv;
1468	int i;
1469
1470	/* We should lock this segment of code for SMP eventually, although
1471	   the vulnerability window is very small and statistics are
1472	   non-critical. */
1473	/* The chip only need report frame silently dropped. */
1474	np->stats.rx_missed_errors	+= readb(ioaddr + RxMissed);
1475	np->stats.tx_packets += readw(ioaddr + TxFramesOK);
1476	np->stats.rx_packets += readw(ioaddr + RxFramesOK);
1477	np->stats.collisions += readb(ioaddr + StatsLateColl);
1478	np->stats.collisions += readb(ioaddr + StatsMultiColl);
1479	np->stats.collisions += readb(ioaddr + StatsOneColl);
1480	readb(ioaddr + StatsCarrierError);
1481	readb(ioaddr + StatsTxDefer);
1482	for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1483		readb(ioaddr + i);
1484	np->stats.tx_bytes += readw(ioaddr + TxOctetsLow);
1485	np->stats.tx_bytes += readw(ioaddr + TxOctetsHigh) << 16;
1486	np->stats.rx_bytes += readw(ioaddr + RxOctetsLow);
1487	np->stats.rx_bytes += readw(ioaddr + RxOctetsHigh) << 16;
1488
1489	return &np->stats;
1490}
1491
1492static void set_rx_mode(struct net_device *dev)
1493{
1494	long ioaddr = dev->base_addr;
1495	u16 mc_filter[4];			/* Multicast hash filter */
1496	u32 rx_mode;
1497	int i;
1498
1499	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1500		/* Unconditionally log net taps. */
1501		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1502		memset(mc_filter, 0xff, sizeof(mc_filter));
1503		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1504	} else if ((dev->mc_count > multicast_filter_limit)
1505			   ||  (dev->flags & IFF_ALLMULTI)) {
1506		/* Too many to match, or accept all multicasts. */
1507		memset(mc_filter, 0xff, sizeof(mc_filter));
1508		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1509	} else if (dev->mc_count) {
1510		struct dev_mc_list *mclist;
1511		int bit;
1512		int index;
1513		int crc;
1514		memset (mc_filter, 0, sizeof (mc_filter));
1515		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1516		     i++, mclist = mclist->next) {
1517			crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1518			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1519				if (crc & 0x80000000) index |= 1 << bit;
1520			mc_filter[index/16] |= (1 << (index % 16));
1521		}
1522		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1523	} else {
1524		writeb(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1525		return;
1526	}
1527	for (i = 0; i < 4; i++)
1528		writew(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1529	writeb(rx_mode, ioaddr + RxMode);
1530}
1531
1532static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1533{
1534	struct netdev_private *np = dev->priv;
1535	u32 ethcmd;
1536
1537	if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1538		return -EFAULT;
1539
1540        switch (ethcmd) {
1541		/* get constant driver settings/info */
1542        	case ETHTOOL_GDRVINFO: {
1543			struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1544			strcpy(info.driver, DRV_NAME);
1545			strcpy(info.version, DRV_VERSION);
1546			strcpy(info.bus_info, np->pci_dev->slot_name);
1547			memset(&info.fw_version, 0, sizeof(info.fw_version));
1548			if (copy_to_user(useraddr, &info, sizeof(info)))
1549				return -EFAULT;
1550			return 0;
1551		}
1552
1553		/* get media settings */
1554		case ETHTOOL_GSET: {
1555			struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1556			spin_lock_irq(&np->lock);
1557			mii_ethtool_gset(&np->mii_if, &ecmd);
1558			spin_unlock_irq(&np->lock);
1559			if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1560				return -EFAULT;
1561			return 0;
1562		}
1563		/* set media settings */
1564		case ETHTOOL_SSET: {
1565			int r;
1566			struct ethtool_cmd ecmd;
1567			if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1568				return -EFAULT;
1569			spin_lock_irq(&np->lock);
1570			r = mii_ethtool_sset(&np->mii_if, &ecmd);
1571			spin_unlock_irq(&np->lock);
1572			return r;
1573		}
1574
1575		/* restart autonegotiation */
1576		case ETHTOOL_NWAY_RST: {
1577			return mii_nway_restart(&np->mii_if);
1578		}
1579
1580		/* get link status */
1581		case ETHTOOL_GLINK: {
1582			struct ethtool_value edata = {ETHTOOL_GLINK};
1583			edata.data = mii_link_ok(&np->mii_if);
1584			if (copy_to_user(useraddr, &edata, sizeof(edata)))
1585				return -EFAULT;
1586			return 0;
1587		}
1588
1589		/* get message-level */
1590		case ETHTOOL_GMSGLVL: {
1591			struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1592			edata.data = np->msg_enable;
1593			if (copy_to_user(useraddr, &edata, sizeof(edata)))
1594				return -EFAULT;
1595			return 0;
1596		}
1597		/* set message-level */
1598		case ETHTOOL_SMSGLVL: {
1599			struct ethtool_value edata;
1600			if (copy_from_user(&edata, useraddr, sizeof(edata)))
1601				return -EFAULT;
1602			np->msg_enable = edata.data;
1603			return 0;
1604		}
1605
1606		default:
1607		return -EOPNOTSUPP;
1608
1609        }
1610}
1611
1612static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1613{
1614	struct netdev_private *np = dev->priv;
1615	struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1616	int rc;
1617	int i;
1618
1619	if (!netif_running(dev))
1620		return -EINVAL;
1621
1622	if (cmd == SIOCETHTOOL)
1623		rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1624
1625	else {
1626		spin_lock_irq(&np->lock);
1627		rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1628		spin_unlock_irq(&np->lock);
1629	}
1630	switch (cmd) {
1631		case SIOCDEVPRIVATE:
1632		for (i=0; i<TX_RING_SIZE; i++) {
1633			printk(KERN_DEBUG "%02x %08x %08x %08x(%02x) %08x %08x\n", i,
1634				np->tx_ring_dma + i*sizeof(*np->tx_ring),
1635				np->tx_ring[i].next_desc,
1636				np->tx_ring[i].status,
1637				(np->tx_ring[i].status >> 2) & 0xff,
1638				np->tx_ring[i].frag[0].addr,
1639				np->tx_ring[i].frag[0].length);
1640		}
1641		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1642			readl(dev->base_addr + TxListPtr),
1643			netif_queue_stopped(dev));
1644		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1645			np->cur_tx, np->cur_tx % TX_RING_SIZE,
1646			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1647		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1648		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1649			return 0;
1650	}
1651
1652
1653	return rc;
1654}
1655
1656static int netdev_close(struct net_device *dev)
1657{
1658	long ioaddr = dev->base_addr;
1659	struct netdev_private *np = dev->priv;
1660	struct sk_buff *skb;
1661	int i;
1662
1663	netif_stop_queue(dev);
1664
1665	if (netif_msg_ifdown(np)) {
1666		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1667			   "Rx %4.4x Int %2.2x.\n",
1668			   dev->name, readb(ioaddr + TxStatus),
1669			   readl(ioaddr + RxStatus), readw(ioaddr + IntrStatus));
1670		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1671			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1672	}
1673
1674	/* Disable interrupts by clearing the interrupt mask. */
1675	writew(0x0000, ioaddr + IntrEnable);
1676
1677	/* Stop the chip's Tx and Rx processes. */
1678	writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1679
1680	/* Wait and kill tasklet */
1681	tasklet_kill(&np->rx_tasklet);
1682	tasklet_kill(&np->tx_tasklet);
1683
1684#ifdef __i386__
1685	if (netif_msg_hw(np)) {
1686		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
1687			   (int)(np->tx_ring_dma));
1688		for (i = 0; i < TX_RING_SIZE; i++)
1689			printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1690				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1691				   np->tx_ring[i].frag[0].length);
1692		printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
1693			   (int)(np->rx_ring_dma));
1694		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1695			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1696				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1697				   np->rx_ring[i].frag[0].length);
1698		}
1699	}
1700#endif /* __i386__ debugging only */
1701
1702	free_irq(dev->irq, dev);
1703
1704	del_timer_sync(&np->timer);
1705
1706	/* Free all the skbuffs in the Rx queue. */
1707	for (i = 0; i < RX_RING_SIZE; i++) {
1708		np->rx_ring[i].status = 0;
1709		np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1710		skb = np->rx_skbuff[i];
1711		if (skb) {
1712			pci_unmap_single(np->pci_dev,
1713				np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1714				PCI_DMA_FROMDEVICE);
1715			dev_kfree_skb(skb);
1716			np->rx_skbuff[i] = 0;
1717		}
1718	}
1719	for (i = 0; i < TX_RING_SIZE; i++) {
1720		skb = np->tx_skbuff[i];
1721		if (skb) {
1722			pci_unmap_single(np->pci_dev,
1723				np->tx_ring[i].frag[0].addr, skb->len,
1724				PCI_DMA_TODEVICE);
1725			dev_kfree_skb(skb);
1726			np->tx_skbuff[i] = 0;
1727		}
1728	}
1729
1730	return 0;
1731}
1732
1733static void __devexit sundance_remove1 (struct pci_dev *pdev)
1734{
1735	struct net_device *dev = pci_get_drvdata(pdev);
1736
1737	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1738	if (dev) {
1739		struct netdev_private *np = dev->priv;
1740
1741		unregister_netdev(dev);
1742        	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1743			np->rx_ring_dma);
1744	        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1745			np->tx_ring_dma);
1746		pci_release_regions(pdev);
1747#ifndef USE_IO_OPS
1748		iounmap((char *)(dev->base_addr));
1749#endif
1750		kfree(dev);
1751		pci_set_drvdata(pdev, NULL);
1752	}
1753}
1754
1755static struct pci_driver sundance_driver = {
1756	name:		DRV_NAME,
1757	id_table:	sundance_pci_tbl,
1758	probe:		sundance_probe1,
1759	remove:		__devexit_p(sundance_remove1),
1760};
1761
1762static int __init sundance_init(void)
1763{
1764/* when a module, this is printed whether or not devices are found in probe */
1765#ifdef MODULE
1766	printk(version);
1767#endif
1768	return pci_module_init(&sundance_driver);
1769}
1770
1771static void __exit sundance_exit(void)
1772{
1773	pci_unregister_driver(&sundance_driver);
1774}
1775
1776module_init(sundance_init);
1777module_exit(sundance_exit);
1778
1779
1780