1/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2/*
3	Written 1996-1999 by Donald Becker.
4
5	The driver also contains updates by different kernel developers
6	(see incomplete list below).
7	Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8	Please use this email address and linux-kernel mailing list for bug reports.
9
10	This software may be used and distributed according to the terms
11	of the GNU General Public License, incorporated herein by reference.
12
13	This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14	It should work with all i82557/558/559 boards.
15
16	Version history:
17	1998 Apr - 2000 Feb  Andrey V. Savochkin <saw@saw.sw.com.sg>
18		Serious fixes for multicast filter list setting, TX timeout routine;
19		RX ring refilling logic;  other stuff
20	2000 Feb  Jeff Garzik <jgarzik@pobox.com>
21		Convert to new PCI driver interface
22	2000 Mar 24  Dragan Stancevic <visitor@valinux.com>
23		Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24	2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25		PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26	2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27		rx_align support: enables rx DMA without causing unaligned accesses.
28*/
29
30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32"eepro100.c: $Revision: 1.1.1.1 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34/* A few user-configurable values that apply to all boards.
35   First set is undocumented and spelled per Intel recommendations. */
36
37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38static int txfifo = 8;		/* Tx FIFO threshold in 4 byte units, 0-15 */
39static int rxfifo = 8;		/* Rx FIFO threshold, default 32 bytes. */
40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41static int txdmacount = 128;
42static int rxdmacount /* = 0 */;
43
44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) \
45	|| defined(__arm__)
46  /* align rx buffers to 2 bytes so that IP header is aligned */
47# define rx_align(skb)		skb_reserve((skb), 2)
48# define RxFD_ALIGNMENT		__attribute__ ((aligned (2), packed))
49#else
50# define rx_align(skb)
51# define RxFD_ALIGNMENT
52#endif
53
54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55   Lower values use more memory, but are faster. */
56static int rx_copybreak = 200;
57
58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59static int max_interrupt_work = 20;
60
61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62static int multicast_filter_limit = 64;
63
64/* 'options' is used to pass a transceiver override or full-duplex flag
65   e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69/* A few values that may be tweaked. */
70/* The ring sizes should be a power of two for efficiency. */
71#define TX_RING_SIZE	64
72#define RX_RING_SIZE	64
73/* How much slots multicast filter setup may take.
74   Do not descrease without changing set_rx_mode() implementaion. */
75#define TX_MULTICAST_SIZE   2
76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77/* Actual number of TX packets queued, must be
78   <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79#define TX_QUEUE_LIMIT  (TX_RING_SIZE-TX_MULTICAST_RESERV)
80/* Hysteresis marking queue as no longer full. */
81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT		(2*HZ)
87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88#define PKT_BUF_SZ		1536
89
90#include <linux/module.h>
91
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
96#include <linux/slab.h>
97#include <linux/interrupt.h>
98#include <linux/timer.h>
99#include <linux/pci.h>
100#include <linux/spinlock.h>
101#include <linux/init.h>
102#include <linux/mii.h>
103#include <linux/delay.h>
104#include <linux/bitops.h>
105
106#include <asm/io.h>
107#include <asm/uaccess.h>
108#include <asm/irq.h>
109
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/rtnetlink.h>
113#include <linux/skbuff.h>
114#include <linux/ethtool.h>
115
116static int use_io;
117static int debug = -1;
118#define DEBUG_DEFAULT		(NETIF_MSG_DRV		| \
119				 NETIF_MSG_HW		| \
120				 NETIF_MSG_RX_ERR	| \
121				 NETIF_MSG_TX_ERR)
122#define DEBUG			((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
123
124
125MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
126MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
127MODULE_LICENSE("GPL");
128module_param(use_io, int, 0);
129module_param(debug, int, 0);
130module_param_array(options, int, NULL, 0);
131module_param_array(full_duplex, int, NULL, 0);
132module_param(congenb, int, 0);
133module_param(txfifo, int, 0);
134module_param(rxfifo, int, 0);
135module_param(txdmacount, int, 0);
136module_param(rxdmacount, int, 0);
137module_param(rx_copybreak, int, 0);
138module_param(max_interrupt_work, int, 0);
139module_param(multicast_filter_limit, int, 0);
140MODULE_PARM_DESC(debug, "debug level (0-6)");
141MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
142MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
143MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
144MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
145MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
146MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
147MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
148MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
149MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
150MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
151
152#define RUN_AT(x) (jiffies + (x))
153
154#define netdevice_start(dev)
155#define netdevice_stop(dev)
156#define netif_set_tx_timeout(dev, tf, tm) \
157								do { \
158									(dev)->tx_timeout = (tf); \
159									(dev)->watchdog_timeo = (tm); \
160								} while(0)
161
162
163
164/*
165				Theory of Operation
166
167I. Board Compatibility
168
169This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
170single-chip fast Ethernet controller for PCI, as used on the Intel
171EtherExpress Pro 100 adapter.
172
173II. Board-specific settings
174
175PCI bus devices are configured by the system at boot time, so no jumpers
176need to be set on the board.  The system BIOS should be set to assign the
177PCI INTA signal to an otherwise unused system IRQ line.  While it's
178possible to share PCI interrupt lines, it negatively impacts performance and
179only recent kernels support it.
180
181III. Driver operation
182
183IIIA. General
184The Speedo3 is very similar to other Intel network chips, that is to say
185"apparently designed on a different planet".  This chips retains the complex
186Rx and Tx descriptors and multiple buffers pointers as previous chips, but
187also has simplified Tx and Rx buffer modes.  This driver uses the "flexible"
188Tx mode, but in a simplified lower-overhead manner: it associates only a
189single buffer descriptor with each frame descriptor.
190
191Despite the extra space overhead in each receive skbuff, the driver must use
192the simplified Rx buffer mode to assure that only a single data buffer is
193associated with each RxFD. The driver implements this by reserving space
194for the Rx descriptor at the head of each Rx skbuff.
195
196The Speedo-3 has receive and command unit base addresses that are added to
197almost all descriptor pointers.  The driver sets these to zero, so that all
198pointer fields are absolute addresses.
199
200The System Control Block (SCB) of some previous Intel chips exists on the
201chip in both PCI I/O and memory space.  This driver uses the I/O space
202registers, but might switch to memory mapped mode to better support non-x86
203processors.
204
205IIIB. Transmit structure
206
207The driver must use the complex Tx command+descriptor mode in order to
208have a indirect pointer to the skbuff data section.  Each Tx command block
209(TxCB) is associated with two immediately appended Tx Buffer Descriptor
210(TxBD).  A fixed ring of these TxCB+TxBD pairs are kept as part of the
211speedo_private data structure for each adapter instance.
212
213The newer i82558 explicitly supports this structure, and can read the two
214TxBDs in the same PCI burst as the TxCB.
215
216This ring structure is used for all normal transmit packets, but the
217transmit packet descriptors aren't long enough for most non-Tx commands such
218as CmdConfigure.  This is complicated by the possibility that the chip has
219already loaded the link address in the previous descriptor.  So for these
220commands we convert the next free descriptor on the ring to a NoOp, and point
221that descriptor's link to the complex command.
222
223An additional complexity of these non-transmit commands are that they may be
224added asynchronous to the normal transmit queue, so we disable interrupts
225whenever the Tx descriptor ring is manipulated.
226
227A notable aspect of these special configure commands is that they do
228work with the normal Tx ring entry scavenge method.  The Tx ring scavenge
229is done at interrupt time using the 'dirty_tx' index, and checking for the
230command-complete bit.  While the setup frames may have the NoOp command on the
231Tx ring marked as complete, but not have completed the setup command, this
232is not a problem.  The tx_ring entry can be still safely reused, as the
233tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
234
235Commands may have bits set e.g. CmdSuspend in the command word to either
236suspend or stop the transmit/command unit.  This driver always flags the last
237command with CmdSuspend, erases the CmdSuspend in the previous command, and
238then issues a CU_RESUME.
239Note: Watch out for the potential race condition here: imagine
240	erasing the previous suspend
241		the chip processes the previous command
242		the chip processes the final command, and suspends
243	doing the CU_RESUME
244		the chip processes the next-yet-valid post-final-command.
245So blindly sending a CU_RESUME is only safe if we do it immediately after
246after erasing the previous CmdSuspend, without the possibility of an
247intervening delay.  Thus the resume command is always within the
248interrupts-disabled region.  This is a timing dependence, but handling this
249condition in a timing-independent way would considerably complicate the code.
250
251Note: In previous generation Intel chips, restarting the command unit was a
252notoriously slow process.  This is presumably no longer true.
253
254IIIC. Receive structure
255
256Because of the bus-master support on the Speedo3 this driver uses the new
257SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
258This scheme allocates full-sized skbuffs as receive buffers.  The value
259SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
260trade-off the memory wasted by passing the full-sized skbuff to the queue
261layer for all frames vs. the copying cost of copying a frame to a
262correctly-sized skbuff.
263
264For small frames the copying cost is negligible (esp. considering that we
265are pre-loading the cache with immediately useful header information), so we
266allocate a new, minimally-sized skbuff.  For large frames the copying cost
267is non-trivial, and the larger copy might flush the cache of useful data, so
268we pass up the skbuff the packet was received into.
269
270IV. Notes
271
272Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
273that stated that I could disclose the information.  But I still resent
274having to sign an Intel NDA when I'm helping Intel sell their own product!
275
276*/
277
278static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
279
280/* Offsets to the various registers.
281   All accesses need not be longword aligned. */
282enum speedo_offsets {
283	SCBStatus = 0, SCBCmd = 2,	/* Rx/Command Unit command and status. */
284	SCBIntmask = 3,
285	SCBPointer = 4,				/* General purpose pointer. */
286	SCBPort = 8,				/* Misc. commands and operands.  */
287	SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
288	SCBCtrlMDI = 16,			/* MDI interface control. */
289	SCBEarlyRx = 20,			/* Early receive byte count. */
290};
291/* Commands that can be put in a command list entry. */
292enum commands {
293	CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
294	CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
295	CmdDump = 0x60000, CmdDiagnose = 0x70000,
296	CmdSuspend = 0x40000000,	/* Suspend after completion. */
297	CmdIntr = 0x20000000,		/* Interrupt after completion. */
298	CmdTxFlex = 0x00080000,		/* Use "Flexible mode" for CmdTx command. */
299};
300/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
301   status bits.  Previous driver versions used separate 16 bit fields for
302   commands and statuses.  --SAW
303 */
304#if defined(__alpha__)
305# define clear_suspend(cmd)  clear_bit(30, &(cmd)->cmd_status);
306#else
307# if defined(__LITTLE_ENDIAN)
308#  define clear_suspend(cmd)  ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
309# elif defined(__BIG_ENDIAN)
310#  define clear_suspend(cmd)  ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
311# else
312#  error Unsupported byteorder
313# endif
314#endif
315
316enum SCBCmdBits {
317	SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
318	SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
319	SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
320	/* The rest are Rx and Tx commands. */
321	CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
322	CUCmdBase=0x0060,	/* CU Base address (set to zero) . */
323	CUDumpStats=0x0070, /* Dump then reset stats counters. */
324	RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
325	RxResumeNoResources=0x0007,
326};
327
328enum SCBPort_cmds {
329	PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
330};
331
332/* The Speedo3 Rx and Tx frame/buffer descriptors. */
333struct descriptor {			    /* A generic descriptor. */
334	volatile s32 cmd_status;	/* All command and status fields. */
335	u32 link;				    /* struct descriptor *  */
336	unsigned char params[0];
337};
338
339/* The Speedo3 Rx and Tx buffer descriptors. */
340struct RxFD {					/* Receive frame descriptor. */
341	volatile s32 status;
342	u32 link;					/* struct RxFD * */
343	u32 rx_buf_addr;			/* void * */
344	u32 count;
345} RxFD_ALIGNMENT;
346
347/* Selected elements of the Tx/RxFD.status word. */
348enum RxFD_bits {
349	RxComplete=0x8000, RxOK=0x2000,
350	RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
351	RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
352	TxUnderrun=0x1000,  StatusComplete=0x8000,
353};
354
355#define CONFIG_DATA_SIZE 22
356struct TxFD {					/* Transmit frame descriptor set. */
357	s32 status;
358	u32 link;					/* void * */
359	u32 tx_desc_addr;			/* Always points to the tx_buf_addr element. */
360	s32 count;					/* # of TBD (=1), Tx start thresh., etc. */
361	/* This constitutes two "TBD" entries -- we only use one. */
362#define TX_DESCR_BUF_OFFSET 16
363	u32 tx_buf_addr0;			/* void *, frame to be transmitted.  */
364	s32 tx_buf_size0;			/* Length of Tx frame. */
365	u32 tx_buf_addr1;			/* void *, frame to be transmitted.  */
366	s32 tx_buf_size1;			/* Length of Tx frame. */
367	/* the structure must have space for at least CONFIG_DATA_SIZE starting
368	 * from tx_desc_addr field */
369};
370
371/* Multicast filter setting block.  --SAW */
372struct speedo_mc_block {
373	struct speedo_mc_block *next;
374	unsigned int tx;
375	dma_addr_t frame_dma;
376	unsigned int len;
377	struct descriptor frame __attribute__ ((__aligned__(16)));
378};
379
380/* Elements of the dump_statistics block. This block must be lword aligned. */
381struct speedo_stats {
382	u32 tx_good_frames;
383	u32 tx_coll16_errs;
384	u32 tx_late_colls;
385	u32 tx_underruns;
386	u32 tx_lost_carrier;
387	u32 tx_deferred;
388	u32 tx_one_colls;
389	u32 tx_multi_colls;
390	u32 tx_total_colls;
391	u32 rx_good_frames;
392	u32 rx_crc_errs;
393	u32 rx_align_errs;
394	u32 rx_resource_errs;
395	u32 rx_overrun_errs;
396	u32 rx_colls_errs;
397	u32 rx_runt_errs;
398	u32 done_marker;
399};
400
401enum Rx_ring_state_bits {
402	RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
403};
404
405/* Do not change the position (alignment) of the first few elements!
406   The later elements are grouped for cache locality.
407
408   Unfortunately, all the positions have been shifted since there.
409   A new re-alignment is required.  2000/03/06  SAW */
410struct speedo_private {
411    void __iomem *regs;
412	struct TxFD	*tx_ring;		/* Commands (usually CmdTxPacket). */
413	struct RxFD *rx_ringp[RX_RING_SIZE];	/* Rx descriptor, used as ring. */
414	/* The addresses of a Tx/Rx-in-place packets/buffers. */
415	struct sk_buff *tx_skbuff[TX_RING_SIZE];
416	struct sk_buff *rx_skbuff[RX_RING_SIZE];
417	/* Mapped addresses of the rings. */
418	dma_addr_t tx_ring_dma;
419#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
420	dma_addr_t rx_ring_dma[RX_RING_SIZE];
421	struct descriptor *last_cmd;		/* Last command sent. */
422	unsigned int cur_tx, dirty_tx;		/* The ring entries to be free()ed. */
423	spinlock_t lock;			/* Group with Tx control cache line. */
424	u32 tx_threshold;			/* The value for txdesc.count. */
425	struct RxFD *last_rxf;			/* Last filled RX buffer. */
426	dma_addr_t last_rxf_dma;
427	unsigned int cur_rx, dirty_rx;		/* The next free ring entry */
428	long last_rx_time;			/* Last Rx, in jiffies, to handle Rx hang. */
429	struct net_device_stats stats;
430	struct speedo_stats *lstats;
431	dma_addr_t lstats_dma;
432	int chip_id;
433	struct pci_dev *pdev;
434	struct timer_list timer;		/* Media selection timer. */
435	struct speedo_mc_block *mc_setup_head;	/* Multicast setup frame list head. */
436	struct speedo_mc_block *mc_setup_tail;	/* Multicast setup frame list tail. */
437	long in_interrupt;			/* Word-aligned dev->interrupt */
438	unsigned char acpi_pwr;
439	signed char rx_mode;			/* Current PROMISC/ALLMULTI setting. */
440	unsigned int tx_full:1;			/* The Tx queue is full. */
441	unsigned int flow_ctrl:1;		/* Use 802.3x flow control. */
442	unsigned int rx_bug:1;
443	unsigned char default_port:8;		/* Last dev->if_port value. */
444	unsigned char rx_ring_state;		/* RX ring status flags. */
445	unsigned short phy[2];			/* PHY media interfaces available. */
446	unsigned short partner;			/* Link partner caps. */
447	struct mii_if_info mii_if;		/* MII API hooks, info */
448	u32 msg_enable;				/* debug message level */
449};
450
451/* The parameters for a CmdConfigure operation.
452   There are so many options that it would be difficult to document each bit.
453   We mostly use the default or recommended settings. */
454static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
455	22, 0x08, 0, 0,  0, 0, 0x32, 0x03,  1, /* 1=Use MII  0=Use AUI */
456	0, 0x2E, 0,  0x60, 0,
457	0xf2, 0x48,   0, 0x40, 0xf2, 0x80, 		/* 0x40=Force full-duplex */
458	0x3f, 0x05, };
459static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
460	22, 0x08, 0, 1,  0, 0, 0x22, 0x03,  1, /* 1=Use MII  0=Use AUI */
461	0, 0x2E, 0,  0x60, 0x08, 0x88,
462	0x68, 0, 0x40, 0xf2, 0x84,		/* Disable FC */
463	0x31, 0x05, };
464
465/* PHY media interface chips. */
466static const char * const phys[] = {
467	"None", "i82553-A/B", "i82553-C", "i82503",
468	"DP83840", "80c240", "80c24", "i82555",
469	"unknown-8", "unknown-9", "DP83840A", "unknown-11",
470	"unknown-12", "unknown-13", "unknown-14", "unknown-15", };
471enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
472					 S80C24, I82555, DP83840A=10, };
473static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
474#define EE_READ_CMD		(6)
475
476static int eepro100_init_one(struct pci_dev *pdev,
477		const struct pci_device_id *ent);
478
479static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
480static int mdio_read(struct net_device *dev, int phy_id, int location);
481static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
482static int speedo_open(struct net_device *dev);
483static void speedo_resume(struct net_device *dev);
484static void speedo_timer(unsigned long data);
485static void speedo_init_rx_ring(struct net_device *dev);
486static void speedo_tx_timeout(struct net_device *dev);
487static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
488static void speedo_refill_rx_buffers(struct net_device *dev, int force);
489static int speedo_rx(struct net_device *dev);
490static void speedo_tx_buffer_gc(struct net_device *dev);
491static irqreturn_t speedo_interrupt(int irq, void *dev_instance);
492static int speedo_close(struct net_device *dev);
493static struct net_device_stats *speedo_get_stats(struct net_device *dev);
494static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
495static void set_rx_mode(struct net_device *dev);
496static void speedo_show_state(struct net_device *dev);
497static const struct ethtool_ops ethtool_ops;
498
499
500
501#ifdef honor_default_port
502/* Optional driver feature to allow forcing the transceiver setting.
503   Not recommended. */
504static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
505						   0x2000, 0x2100, 0x0400, 0x3100};
506#endif
507
508/* How to wait for the command unit to accept a command.
509   Typically this takes 0 ticks. */
510static inline unsigned char wait_for_cmd_done(struct net_device *dev,
511											  	struct speedo_private *sp)
512{
513	int wait = 1000;
514	void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
515	unsigned char r;
516
517	do  {
518		udelay(1);
519		r = ioread8(cmd_ioaddr);
520	} while(r && --wait >= 0);
521
522	if (wait < 0)
523		printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
524	return r;
525}
526
527static int __devinit eepro100_init_one (struct pci_dev *pdev,
528		const struct pci_device_id *ent)
529{
530	void __iomem *ioaddr;
531	int irq, pci_bar;
532	int acpi_idle_state = 0, pm;
533	static int cards_found /* = 0 */;
534	unsigned long pci_base;
535
536#ifndef MODULE
537	/* when built-in, we only print version if device is found */
538	static int did_version;
539	if (did_version++ == 0)
540		printk(version);
541#endif
542
543	/* save power state before pci_enable_device overwrites it */
544	pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
545	if (pm) {
546		u16 pwr_command;
547		pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
548		acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
549	}
550
551	if (pci_enable_device(pdev))
552		goto err_out_free_mmio_region;
553
554	pci_set_master(pdev);
555
556	if (!request_region(pci_resource_start(pdev, 1),
557			pci_resource_len(pdev, 1), "eepro100")) {
558		dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
559		goto err_out_none;
560	}
561	if (!request_mem_region(pci_resource_start(pdev, 0),
562			pci_resource_len(pdev, 0), "eepro100")) {
563		dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
564		goto err_out_free_pio_region;
565	}
566
567	irq = pdev->irq;
568	pci_bar = use_io ? 1 : 0;
569	pci_base = pci_resource_start(pdev, pci_bar);
570	if (DEBUG & NETIF_MSG_PROBE)
571		printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
572		       pci_base, irq);
573
574	ioaddr = pci_iomap(pdev, pci_bar, 0);
575	if (!ioaddr) {
576		dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
577		goto err_out_free_mmio_region;
578	}
579
580	if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
581		cards_found++;
582	else
583		goto err_out_iounmap;
584
585	return 0;
586
587err_out_iounmap: ;
588	pci_iounmap(pdev, ioaddr);
589err_out_free_mmio_region:
590	release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
591err_out_free_pio_region:
592	release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
593err_out_none:
594	return -ENODEV;
595}
596
597#ifdef CONFIG_NET_POLL_CONTROLLER
598/*
599 * Polling 'interrupt' - used by things like netconsole to send skbs
600 * without having to re-enable interrupts. It's not called while
601 * the interrupt routine is executing.
602 */
603
604static void poll_speedo (struct net_device *dev)
605{
606	/* disable_irq is not very nice, but with the funny lockless design
607	   we have no other choice. */
608	disable_irq(dev->irq);
609	speedo_interrupt (dev->irq, dev);
610	enable_irq(dev->irq);
611}
612#endif
613
614static int __devinit speedo_found1(struct pci_dev *pdev,
615		void __iomem *ioaddr, int card_idx, int acpi_idle_state)
616{
617	struct net_device *dev;
618	struct speedo_private *sp;
619	const char *product;
620	int i, option;
621	u16 eeprom[0x100];
622	int size;
623	void *tx_ring_space;
624	dma_addr_t tx_ring_dma;
625
626	size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
627	tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
628	if (tx_ring_space == NULL)
629		return -1;
630
631	dev = alloc_etherdev(sizeof(struct speedo_private));
632	if (dev == NULL) {
633		printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
634		pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
635		return -1;
636	}
637
638	SET_MODULE_OWNER(dev);
639	SET_NETDEV_DEV(dev, &pdev->dev);
640
641	if (dev->mem_start > 0)
642		option = dev->mem_start;
643	else if (card_idx >= 0  &&  options[card_idx] >= 0)
644		option = options[card_idx];
645	else
646		option = 0;
647
648	rtnl_lock();
649	if (dev_alloc_name(dev, dev->name) < 0)
650		goto err_free_unlock;
651
652	/* Read the station address EEPROM before doing the reset.
653	   Nominally his should even be done before accepting the device, but
654	   then we wouldn't have a device name with which to report the error.
655	   The size test is for 6 bit vs. 8 bit address serial EEPROMs.
656	*/
657	{
658		void __iomem *iobase;
659		int read_cmd, ee_size;
660		u16 sum;
661		int j;
662
663		/* Use IO only to avoid postponed writes and satisfy EEPROM timing
664		   requirements. */
665		iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
666		if (!iobase)
667			goto err_free_unlock;
668		if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
669			== 0xffe0000) {
670			ee_size = 0x100;
671			read_cmd = EE_READ_CMD << 24;
672		} else {
673			ee_size = 0x40;
674			read_cmd = EE_READ_CMD << 22;
675		}
676
677		for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
678			u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
679			eeprom[i] = value;
680			sum += value;
681			if (i < 3) {
682				dev->dev_addr[j++] = value;
683				dev->dev_addr[j++] = value >> 8;
684			}
685		}
686		if (sum != 0xBABA)
687			printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
688				   "check settings before activating this device!\n",
689				   dev->name, sum);
690		/* Don't  unregister_netdev(dev);  as the EEPro may actually be
691		   usable, especially if the MAC address is set later.
692		   On the other hand, it may be unusable if MDI data is corrupted. */
693
694		pci_iounmap(pdev, iobase);
695	}
696
697	/* Reset the chip: stop Tx and Rx processes and clear counters.
698	   This takes less than 10usec and will easily finish before the next
699	   action. */
700	iowrite32(PortReset, ioaddr + SCBPort);
701	ioread32(ioaddr + SCBPort);
702	udelay(10);
703
704	if (eeprom[3] & 0x0100)
705		product = "OEM i82557/i82558 10/100 Ethernet";
706	else
707		product = pci_name(pdev);
708
709	printk(KERN_INFO "%s: %s, ", dev->name, product);
710
711	for (i = 0; i < 5; i++)
712		printk("%2.2X:", dev->dev_addr[i]);
713	printk("%2.2X, ", dev->dev_addr[i]);
714	printk("IRQ %d.\n", pdev->irq);
715
716	sp = netdev_priv(dev);
717
718	/* we must initialize this early, for mdio_{read,write} */
719	sp->regs = ioaddr;
720
721	/* OK, this is pure kernel bloat.  I don't like it when other drivers
722	   waste non-pageable kernel space to emit similar messages, but I need
723	   them for bug reports. */
724	{
725		const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
726		/* The self-test results must be paragraph aligned. */
727		volatile s32 *self_test_results;
728		int boguscnt = 16000;	/* Timeout for set-test. */
729		if ((eeprom[3] & 0x03) != 0x03)
730			printk(KERN_INFO "  Receiver lock-up bug exists -- enabling"
731				   " work-around.\n");
732		printk(KERN_INFO "  Board assembly %4.4x%2.2x-%3.3d, Physical"
733			   " connectors present:",
734			   eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
735		for (i = 0; i < 4; i++)
736			if (eeprom[5] & (1<<i))
737				printk(connectors[i]);
738		printk("\n"KERN_INFO"  Primary interface chip %s PHY #%d.\n",
739			   phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
740		if (eeprom[7] & 0x0700)
741			printk(KERN_INFO "    Secondary interface chip %s.\n",
742				   phys[(eeprom[7]>>8)&7]);
743		if (((eeprom[6]>>8) & 0x3f) == DP83840
744			||  ((eeprom[6]>>8) & 0x3f) == DP83840A) {
745			int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
746			if (congenb)
747			  mdi_reg23 |= 0x0100;
748			printk(KERN_INFO"  DP83840 specific setup, setting register 23 to %4.4x.\n",
749				   mdi_reg23);
750			mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
751		}
752		if ((option >= 0) && (option & 0x70)) {
753			printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
754				   (option & 0x20 ? 100 : 10),
755				   (option & 0x10 ? "full" : "half"));
756			mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
757					   ((option & 0x20) ? 0x2000 : 0) | 	/* 100mbps? */
758					   ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
759		}
760
761		/* Perform a system self-test. */
762		self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
763		self_test_results[0] = 0;
764		self_test_results[1] = -1;
765		iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
766		do {
767			udelay(10);
768		} while (self_test_results[1] == -1  &&  --boguscnt >= 0);
769
770		if (boguscnt < 0) {		/* Test optimized out. */
771			printk(KERN_ERR "Self test failed, status %8.8x:\n"
772				   KERN_ERR " Failure to initialize the i82557.\n"
773				   KERN_ERR " Verify that the card is a bus-master"
774				   " capable slot.\n",
775				   self_test_results[1]);
776		} else
777			printk(KERN_INFO "  General self-test: %s.\n"
778				   KERN_INFO "  Serial sub-system self-test: %s.\n"
779				   KERN_INFO "  Internal registers self-test: %s.\n"
780				   KERN_INFO "  ROM checksum self-test: %s (%#8.8x).\n",
781				   self_test_results[1] & 0x1000 ? "failed" : "passed",
782				   self_test_results[1] & 0x0020 ? "failed" : "passed",
783				   self_test_results[1] & 0x0008 ? "failed" : "passed",
784				   self_test_results[1] & 0x0004 ? "failed" : "passed",
785				   self_test_results[0]);
786	}
787
788	iowrite32(PortReset, ioaddr + SCBPort);
789	ioread32(ioaddr + SCBPort);
790	udelay(10);
791
792	/* Return the chip to its original power state. */
793	pci_set_power_state(pdev, acpi_idle_state);
794
795	pci_set_drvdata (pdev, dev);
796	SET_NETDEV_DEV(dev, &pdev->dev);
797
798	dev->irq = pdev->irq;
799
800	sp->pdev = pdev;
801	sp->msg_enable = DEBUG;
802	sp->acpi_pwr = acpi_idle_state;
803	sp->tx_ring = tx_ring_space;
804	sp->tx_ring_dma = tx_ring_dma;
805	sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
806	sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
807	init_timer(&sp->timer); /* used in ioctl() */
808	spin_lock_init(&sp->lock);
809
810	sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
811	if (card_idx >= 0) {
812		if (full_duplex[card_idx] >= 0)
813			sp->mii_if.full_duplex = full_duplex[card_idx];
814	}
815	sp->default_port = option >= 0 ? (option & 0x0f) : 0;
816
817	sp->phy[0] = eeprom[6];
818	sp->phy[1] = eeprom[7];
819
820	sp->mii_if.phy_id = eeprom[6] & 0x1f;
821	sp->mii_if.phy_id_mask = 0x1f;
822	sp->mii_if.reg_num_mask = 0x1f;
823	sp->mii_if.dev = dev;
824	sp->mii_if.mdio_read = mdio_read;
825	sp->mii_if.mdio_write = mdio_write;
826
827	sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
828	if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
829	    || (pdev->device == 0x2449) || (pdev->device == 0x2459)
830            || (pdev->device == 0x245D)) {
831	    	sp->chip_id = 1;
832	}
833
834	if (sp->rx_bug)
835		printk(KERN_INFO "  Receiver lock-up workaround activated.\n");
836
837	/* The Speedo-specific entries in the device structure. */
838	dev->open = &speedo_open;
839	dev->hard_start_xmit = &speedo_start_xmit;
840	netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
841	dev->stop = &speedo_close;
842	dev->get_stats = &speedo_get_stats;
843	dev->set_multicast_list = &set_rx_mode;
844	dev->do_ioctl = &speedo_ioctl;
845	SET_ETHTOOL_OPS(dev, &ethtool_ops);
846#ifdef CONFIG_NET_POLL_CONTROLLER
847	dev->poll_controller = &poll_speedo;
848#endif
849
850	if (register_netdevice(dev))
851		goto err_free_unlock;
852	rtnl_unlock();
853
854	return 0;
855
856 err_free_unlock:
857	rtnl_unlock();
858	free_netdev(dev);
859	return -1;
860}
861
862static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
863{
864	void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
865	int wait = 0;
866	do
867		if (ioread8(cmd_ioaddr) == 0) break;
868	while(++wait <= 200);
869	if (wait > 100)
870		printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
871		       ioread8(cmd_ioaddr), wait);
872
873	iowrite8(cmd, cmd_ioaddr);
874
875	for (wait = 0; wait <= 100; wait++)
876		if (ioread8(cmd_ioaddr) == 0) return;
877	for (; wait <= 20000; wait++)
878		if (ioread8(cmd_ioaddr) == 0) return;
879		else udelay(1);
880	printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
881	       "  Current status %8.8x.\n",
882	       cmd, wait, ioread32(sp->regs + SCBStatus));
883}
884
885/* Serial EEPROM section.
886   A "bit" grungy, but we work our way through bit-by-bit :->. */
887/*  EEPROM_Ctrl bits. */
888#define EE_SHIFT_CLK	0x01	/* EEPROM shift clock. */
889#define EE_CS			0x02	/* EEPROM chip select. */
890#define EE_DATA_WRITE	0x04	/* EEPROM chip data in. */
891#define EE_DATA_READ	0x08	/* EEPROM chip data out. */
892#define EE_ENB			(0x4800 | EE_CS)
893#define EE_WRITE_0		0x4802
894#define EE_WRITE_1		0x4806
895#define EE_OFFSET		SCBeeprom
896
897/* The fixes for the code were kindly provided by Dragan Stancevic
898   <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
899   access timing.
900   The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
901   interval for serial EEPROM.  However, it looks like that there is an
902   additional requirement dictating larger udelay's in the code below.
903   2000/05/24  SAW */
904static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
905{
906	unsigned retval = 0;
907	void __iomem *ee_addr = ioaddr + SCBeeprom;
908
909	iowrite16(EE_ENB, ee_addr); udelay(2);
910	iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
911
912	/* Shift the command bits out. */
913	do {
914		short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
915		iowrite16(dataval, ee_addr); udelay(2);
916		iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
917		retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
918	} while (--cmd_len >= 0);
919	iowrite16(EE_ENB, ee_addr); udelay(2);
920
921	/* Terminate the EEPROM access. */
922	iowrite16(EE_ENB & ~EE_CS, ee_addr);
923	return retval;
924}
925
926static int mdio_read(struct net_device *dev, int phy_id, int location)
927{
928	struct speedo_private *sp = netdev_priv(dev);
929	void __iomem *ioaddr = sp->regs;
930	int val, boguscnt = 64*10;		/* <64 usec. to complete, typ 27 ticks */
931	iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
932	do {
933		val = ioread32(ioaddr + SCBCtrlMDI);
934		if (--boguscnt < 0) {
935			printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
936			break;
937		}
938	} while (! (val & 0x10000000));
939	return val & 0xffff;
940}
941
942static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
943{
944	struct speedo_private *sp = netdev_priv(dev);
945	void __iomem *ioaddr = sp->regs;
946	int val, boguscnt = 64*10;		/* <64 usec. to complete, typ 27 ticks */
947	iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
948		 ioaddr + SCBCtrlMDI);
949	do {
950		val = ioread32(ioaddr + SCBCtrlMDI);
951		if (--boguscnt < 0) {
952			printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
953			break;
954		}
955	} while (! (val & 0x10000000));
956}
957
958static int
959speedo_open(struct net_device *dev)
960{
961	struct speedo_private *sp = netdev_priv(dev);
962	void __iomem *ioaddr = sp->regs;
963	int retval;
964
965	if (netif_msg_ifup(sp))
966		printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
967
968	pci_set_power_state(sp->pdev, PCI_D0);
969
970	/* Set up the Tx queue early.. */
971	sp->cur_tx = 0;
972	sp->dirty_tx = 0;
973	sp->last_cmd = NULL;
974	sp->tx_full = 0;
975	sp->in_interrupt = 0;
976
977	/* .. we can safely take handler calls during init. */
978	retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
979	if (retval) {
980		return retval;
981	}
982
983	dev->if_port = sp->default_port;
984
985#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
986	/* Retrigger negotiation to reset previous errors. */
987	if ((sp->phy[0] & 0x8000) == 0) {
988		int phy_addr = sp->phy[0] & 0x1f ;
989		/* Use 0x3300 for restarting NWay, other values to force xcvr:
990		   0x0000 10-HD
991		   0x0100 10-FD
992		   0x2000 100-HD
993		   0x2100 100-FD
994		*/
995#ifdef honor_default_port
996		mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
997#else
998		mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
999#endif
1000	}
1001#endif
1002
1003	speedo_init_rx_ring(dev);
1004
1005	/* Fire up the hardware. */
1006	iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1007	speedo_resume(dev);
1008
1009	netdevice_start(dev);
1010	netif_start_queue(dev);
1011
1012	/* Setup the chip and configure the multicast list. */
1013	sp->mc_setup_head = NULL;
1014	sp->mc_setup_tail = NULL;
1015	sp->flow_ctrl = sp->partner = 0;
1016	sp->rx_mode = -1;			/* Invalid -> always reset the mode. */
1017	set_rx_mode(dev);
1018	if ((sp->phy[0] & 0x8000) == 0)
1019		sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1020
1021	mii_check_link(&sp->mii_if);
1022
1023	if (netif_msg_ifup(sp)) {
1024		printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1025			   dev->name, ioread16(ioaddr + SCBStatus));
1026	}
1027
1028	/* Set the timer.  The timer serves a dual purpose:
1029	   1) to monitor the media interface (e.g. link beat) and perhaps switch
1030	   to an alternate media type
1031	   2) to monitor Rx activity, and restart the Rx process if the receiver
1032	   hangs. */
1033	sp->timer.expires = RUN_AT((24*HZ)/10); 			/* 2.4 sec. */
1034	sp->timer.data = (unsigned long)dev;
1035	sp->timer.function = &speedo_timer;					/* timer handler */
1036	add_timer(&sp->timer);
1037
1038	/* No need to wait for the command unit to accept here. */
1039	if ((sp->phy[0] & 0x8000) == 0)
1040		mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1041
1042	return 0;
1043}
1044
1045/* Start the chip hardware after a full reset. */
1046static void speedo_resume(struct net_device *dev)
1047{
1048	struct speedo_private *sp = netdev_priv(dev);
1049	void __iomem *ioaddr = sp->regs;
1050
1051	/* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1052	sp->tx_threshold = 0x01208000;
1053
1054	/* Set the segment registers to '0'. */
1055	if (wait_for_cmd_done(dev, sp) != 0) {
1056		iowrite32(PortPartialReset, ioaddr + SCBPort);
1057		udelay(10);
1058	}
1059
1060        iowrite32(0, ioaddr + SCBPointer);
1061        ioread32(ioaddr + SCBPointer);			/* Flush to PCI. */
1062        udelay(10);			/* Bogus, but it avoids the bug. */
1063
1064        /* Note: these next two operations can take a while. */
1065        do_slow_command(dev, sp, RxAddrLoad);
1066        do_slow_command(dev, sp, CUCmdBase);
1067
1068	/* Load the statistics block and rx ring addresses. */
1069	iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1070	ioread32(ioaddr + SCBPointer);			/* Flush to PCI */
1071
1072	iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1073	sp->lstats->done_marker = 0;
1074	wait_for_cmd_done(dev, sp);
1075
1076	if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1077		if (netif_msg_rx_err(sp))
1078			printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1079					dev->name);
1080	} else {
1081		iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1082			 ioaddr + SCBPointer);
1083		ioread32(ioaddr + SCBPointer);		/* Flush to PCI */
1084	}
1085
1086	/* Note: RxStart should complete instantly. */
1087	do_slow_command(dev, sp, RxStart);
1088	do_slow_command(dev, sp, CUDumpStats);
1089
1090	/* Fill the first command with our physical address. */
1091	{
1092		struct descriptor *ias_cmd;
1093
1094		ias_cmd =
1095			(struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1096		/* Avoid a bug(?!) here by marking the command already completed. */
1097		ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1098		ias_cmd->link =
1099			cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1100		memcpy(ias_cmd->params, dev->dev_addr, 6);
1101		if (sp->last_cmd)
1102			clear_suspend(sp->last_cmd);
1103		sp->last_cmd = ias_cmd;
1104	}
1105
1106	/* Start the chip's Tx process and unmask interrupts. */
1107	iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1108		 ioaddr + SCBPointer);
1109	/* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1110	   remain masked --Dragan */
1111	iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1112}
1113
1114/*
1115 * Sometimes the receiver stops making progress.  This routine knows how to
1116 * get it going again, without losing packets or being otherwise nasty like
1117 * a chip reset would be.  Previously the driver had a whole sequence
1118 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1119 * do another, etc.  But those things don't really matter.  Separate logic
1120 * in the ISR provides for allocating buffers--the other half of operation
1121 * is just making sure the receiver is active.  speedo_rx_soft_reset does that.
1122 * This problem with the old, more involved algorithm is shown up under
1123 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1124 */
1125static void
1126speedo_rx_soft_reset(struct net_device *dev)
1127{
1128	struct speedo_private *sp = netdev_priv(dev);
1129	struct RxFD *rfd;
1130	void __iomem *ioaddr;
1131
1132	ioaddr = sp->regs;
1133	if (wait_for_cmd_done(dev, sp) != 0) {
1134		printk("%s: previous command stalled\n", dev->name);
1135		return;
1136	}
1137	/*
1138	* Put the hardware into a known state.
1139	*/
1140	iowrite8(RxAbort, ioaddr + SCBCmd);
1141
1142	rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1143
1144	rfd->rx_buf_addr = 0xffffffff;
1145
1146	if (wait_for_cmd_done(dev, sp) != 0) {
1147		printk("%s: RxAbort command stalled\n", dev->name);
1148		return;
1149	}
1150	iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1151		ioaddr + SCBPointer);
1152	iowrite8(RxStart, ioaddr + SCBCmd);
1153}
1154
1155
1156/* Media monitoring and control. */
1157static void speedo_timer(unsigned long data)
1158{
1159	struct net_device *dev = (struct net_device *)data;
1160	struct speedo_private *sp = netdev_priv(dev);
1161	void __iomem *ioaddr = sp->regs;
1162	int phy_num = sp->phy[0] & 0x1f;
1163
1164	/* We have MII and lost link beat. */
1165	if ((sp->phy[0] & 0x8000) == 0) {
1166		int partner = mdio_read(dev, phy_num, MII_LPA);
1167		if (partner != sp->partner) {
1168			int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1169			if (netif_msg_link(sp)) {
1170				printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1171				printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1172					   dev->name, sp->partner, partner, sp->mii_if.advertising);
1173			}
1174			sp->partner = partner;
1175			if (flow_ctrl != sp->flow_ctrl) {
1176				sp->flow_ctrl = flow_ctrl;
1177				sp->rx_mode = -1;	/* Trigger a reload. */
1178			}
1179		}
1180	}
1181	mii_check_link(&sp->mii_if);
1182	if (netif_msg_timer(sp)) {
1183		printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1184			   dev->name, ioread16(ioaddr + SCBStatus));
1185	}
1186	if (sp->rx_mode < 0  ||
1187		(sp->rx_bug  && jiffies - sp->last_rx_time > 2*HZ)) {
1188		/* We haven't received a packet in a Long Time.  We might have been
1189		   bitten by the receiver hang bug.  This can be cleared by sending
1190		   a set multicast list command. */
1191		if (netif_msg_timer(sp))
1192			printk(KERN_DEBUG "%s: Sending a multicast list set command"
1193				   " from a timer routine,"
1194				   " m=%d, j=%ld, l=%ld.\n",
1195				   dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1196		set_rx_mode(dev);
1197	}
1198	/* We must continue to monitor the media. */
1199	sp->timer.expires = RUN_AT(2*HZ); 			/* 2.0 sec. */
1200	add_timer(&sp->timer);
1201}
1202
1203static void speedo_show_state(struct net_device *dev)
1204{
1205	struct speedo_private *sp = netdev_priv(dev);
1206	int i;
1207
1208	if (netif_msg_pktdata(sp)) {
1209		printk(KERN_DEBUG "%s: Tx ring dump,  Tx queue %u / %u:\n",
1210		    dev->name, sp->cur_tx, sp->dirty_tx);
1211		for (i = 0; i < TX_RING_SIZE; i++)
1212			printk(KERN_DEBUG "%s:  %c%c%2d %8.8x.\n", dev->name,
1213			    i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1214			    i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1215			    i, sp->tx_ring[i].status);
1216
1217		printk(KERN_DEBUG "%s: Printing Rx ring"
1218		    " (next to receive into %u, dirty index %u).\n",
1219		    dev->name, sp->cur_rx, sp->dirty_rx);
1220		for (i = 0; i < RX_RING_SIZE; i++)
1221			printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1222			    sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1223			    i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1224			    i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1225			    i, (sp->rx_ringp[i] != NULL) ?
1226			    (unsigned)sp->rx_ringp[i]->status : 0);
1227	}
1228
1229
1230}
1231
1232/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1233static void
1234speedo_init_rx_ring(struct net_device *dev)
1235{
1236	struct speedo_private *sp = netdev_priv(dev);
1237	struct RxFD *rxf, *last_rxf = NULL;
1238	dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1239	int i;
1240
1241	sp->cur_rx = 0;
1242
1243	for (i = 0; i < RX_RING_SIZE; i++) {
1244		struct sk_buff *skb;
1245		skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1246		if (skb)
1247			rx_align(skb);        /* Align IP on 16 byte boundary */
1248		sp->rx_skbuff[i] = skb;
1249		if (skb == NULL)
1250			break;			/* OK.  Just initially short of Rx bufs. */
1251		skb->dev = dev;			/* Mark as being used by this device. */
1252		rxf = (struct RxFD *)skb->data;
1253		sp->rx_ringp[i] = rxf;
1254		sp->rx_ring_dma[i] =
1255			pci_map_single(sp->pdev, rxf,
1256					PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1257		skb_reserve(skb, sizeof(struct RxFD));
1258		if (last_rxf) {
1259			last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1260			pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1261										   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1262		}
1263		last_rxf = rxf;
1264		last_rxf_dma = sp->rx_ring_dma[i];
1265		rxf->status = cpu_to_le32(0x00000001);	/* '1' is flag value only. */
1266		rxf->link = 0;						/* None yet. */
1267		/* This field unused by i82557. */
1268		rxf->rx_buf_addr = 0xffffffff;
1269		rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1270		pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1271									   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1272	}
1273	sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1274	/* Mark the last entry as end-of-list. */
1275	last_rxf->status = cpu_to_le32(0xC0000002);	/* '2' is flag value only. */
1276	pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1277								   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1278	sp->last_rxf = last_rxf;
1279	sp->last_rxf_dma = last_rxf_dma;
1280}
1281
1282static void speedo_purge_tx(struct net_device *dev)
1283{
1284	struct speedo_private *sp = netdev_priv(dev);
1285	int entry;
1286
1287	while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1288		entry = sp->dirty_tx % TX_RING_SIZE;
1289		if (sp->tx_skbuff[entry]) {
1290			sp->stats.tx_errors++;
1291			pci_unmap_single(sp->pdev,
1292					le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1293					sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1294			dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1295			sp->tx_skbuff[entry] = NULL;
1296		}
1297		sp->dirty_tx++;
1298	}
1299	while (sp->mc_setup_head != NULL) {
1300		struct speedo_mc_block *t;
1301		if (netif_msg_tx_err(sp))
1302			printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1303		pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1304				sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1305		t = sp->mc_setup_head->next;
1306		kfree(sp->mc_setup_head);
1307		sp->mc_setup_head = t;
1308	}
1309	sp->mc_setup_tail = NULL;
1310	sp->tx_full = 0;
1311	netif_wake_queue(dev);
1312}
1313
1314static void reset_mii(struct net_device *dev)
1315{
1316	struct speedo_private *sp = netdev_priv(dev);
1317
1318	/* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1319	if ((sp->phy[0] & 0x8000) == 0) {
1320		int phy_addr = sp->phy[0] & 0x1f;
1321		int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1322		int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1323		mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1324		mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1325		mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1326		mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1327#ifdef honor_default_port
1328		mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1329#else
1330		mdio_read(dev, phy_addr, MII_BMCR);
1331		mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1332		mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1333#endif
1334	}
1335}
1336
1337static void speedo_tx_timeout(struct net_device *dev)
1338{
1339	struct speedo_private *sp = netdev_priv(dev);
1340	void __iomem *ioaddr = sp->regs;
1341	int status = ioread16(ioaddr + SCBStatus);
1342	unsigned long flags;
1343
1344	if (netif_msg_tx_err(sp)) {
1345		printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1346		   " %4.4x at %d/%d command %8.8x.\n",
1347		   dev->name, status, ioread16(ioaddr + SCBCmd),
1348		   sp->dirty_tx, sp->cur_tx,
1349		   sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1350
1351	}
1352	speedo_show_state(dev);
1353	{
1354		del_timer_sync(&sp->timer);
1355		/* Reset the Tx and Rx units. */
1356		iowrite32(PortReset, ioaddr + SCBPort);
1357		/* We may get spurious interrupts here.  But I don't think that they
1358		   may do much harm.  1999/12/09 SAW */
1359		udelay(10);
1360		/* Disable interrupts. */
1361		iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1362		synchronize_irq(dev->irq);
1363		speedo_tx_buffer_gc(dev);
1364		/* Free as much as possible.
1365		   It helps to recover from a hang because of out-of-memory.
1366		   It also simplifies speedo_resume() in case TX ring is full or
1367		   close-to-be full. */
1368		speedo_purge_tx(dev);
1369		speedo_refill_rx_buffers(dev, 1);
1370		spin_lock_irqsave(&sp->lock, flags);
1371		speedo_resume(dev);
1372		sp->rx_mode = -1;
1373		dev->trans_start = jiffies;
1374		spin_unlock_irqrestore(&sp->lock, flags);
1375		set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1376		/* Reset MII transceiver.  Do it before starting the timer to serialize
1377		   mdio_xxx operations.  Yes, it's a paranoya :-)  2000/05/09 SAW */
1378		reset_mii(dev);
1379		sp->timer.expires = RUN_AT(2*HZ);
1380		add_timer(&sp->timer);
1381	}
1382	return;
1383}
1384
1385static int
1386speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1387{
1388	struct speedo_private *sp = netdev_priv(dev);
1389	void __iomem *ioaddr = sp->regs;
1390	int entry;
1391
1392	/* Prevent interrupts from changing the Tx ring from underneath us. */
1393	unsigned long flags;
1394
1395	spin_lock_irqsave(&sp->lock, flags);
1396
1397	/* Check if there are enough space. */
1398	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1399		printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1400		netif_stop_queue(dev);
1401		sp->tx_full = 1;
1402		spin_unlock_irqrestore(&sp->lock, flags);
1403		return 1;
1404	}
1405
1406	/* Calculate the Tx descriptor entry. */
1407	entry = sp->cur_tx++ % TX_RING_SIZE;
1408
1409	sp->tx_skbuff[entry] = skb;
1410	sp->tx_ring[entry].status =
1411		cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1412	if (!(entry & ((TX_RING_SIZE>>2)-1)))
1413		sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1414	sp->tx_ring[entry].link =
1415		cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1416	sp->tx_ring[entry].tx_desc_addr =
1417		cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1418	/* The data region is always in one buffer descriptor. */
1419	sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1420	sp->tx_ring[entry].tx_buf_addr0 =
1421		cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1422					   skb->len, PCI_DMA_TODEVICE));
1423	sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1424
1425
1426	if ((sp->partner == 0) && (sp->chip_id == 1)) {
1427		wait_for_cmd_done(dev, sp);
1428		iowrite8(0 , ioaddr + SCBCmd);
1429		udelay(1);
1430	}
1431
1432	/* Trigger the command unit resume. */
1433	wait_for_cmd_done(dev, sp);
1434	clear_suspend(sp->last_cmd);
1435	/* We want the time window between clearing suspend flag on the previous
1436	   command and resuming CU to be as small as possible.
1437	   Interrupts in between are very undesired.  --SAW */
1438	iowrite8(CUResume, ioaddr + SCBCmd);
1439	sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1440
1441	/* Leave room for set_rx_mode(). If there is no more space than reserved
1442	   for multicast filter mark the ring as full. */
1443	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1444		netif_stop_queue(dev);
1445		sp->tx_full = 1;
1446	}
1447
1448	spin_unlock_irqrestore(&sp->lock, flags);
1449
1450	dev->trans_start = jiffies;
1451
1452	return 0;
1453}
1454
1455static void speedo_tx_buffer_gc(struct net_device *dev)
1456{
1457	unsigned int dirty_tx;
1458	struct speedo_private *sp = netdev_priv(dev);
1459
1460	dirty_tx = sp->dirty_tx;
1461	while ((int)(sp->cur_tx - dirty_tx) > 0) {
1462		int entry = dirty_tx % TX_RING_SIZE;
1463		int status = le32_to_cpu(sp->tx_ring[entry].status);
1464
1465		if (netif_msg_tx_done(sp))
1466			printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1467				   entry, status);
1468		if ((status & StatusComplete) == 0)
1469			break;			/* It still hasn't been processed. */
1470		if (status & TxUnderrun)
1471			if (sp->tx_threshold < 0x01e08000) {
1472				if (netif_msg_tx_err(sp))
1473					printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1474						   dev->name);
1475				sp->tx_threshold += 0x00040000;
1476			}
1477		/* Free the original skb. */
1478		if (sp->tx_skbuff[entry]) {
1479			sp->stats.tx_packets++;	/* Count only user packets. */
1480			sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1481			pci_unmap_single(sp->pdev,
1482					le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1483					sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1484			dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1485			sp->tx_skbuff[entry] = NULL;
1486		}
1487		dirty_tx++;
1488	}
1489
1490	if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1491		printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1492			   " full=%d.\n",
1493			   dirty_tx, sp->cur_tx, sp->tx_full);
1494		dirty_tx += TX_RING_SIZE;
1495	}
1496
1497	while (sp->mc_setup_head != NULL
1498		   && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1499		struct speedo_mc_block *t;
1500		if (netif_msg_tx_err(sp))
1501			printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1502		pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1503				sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1504		t = sp->mc_setup_head->next;
1505		kfree(sp->mc_setup_head);
1506		sp->mc_setup_head = t;
1507	}
1508	if (sp->mc_setup_head == NULL)
1509		sp->mc_setup_tail = NULL;
1510
1511	sp->dirty_tx = dirty_tx;
1512}
1513
1514/* The interrupt handler does all of the Rx thread work and cleans up
1515   after the Tx thread. */
1516static irqreturn_t speedo_interrupt(int irq, void *dev_instance)
1517{
1518	struct net_device *dev = (struct net_device *)dev_instance;
1519	struct speedo_private *sp;
1520	void __iomem *ioaddr;
1521	long boguscnt = max_interrupt_work;
1522	unsigned short status;
1523	unsigned int handled = 0;
1524
1525	sp = netdev_priv(dev);
1526	ioaddr = sp->regs;
1527
1528#ifndef final_version
1529	/* A lock to prevent simultaneous entry on SMP machines. */
1530	if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1531		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1532			   dev->name);
1533		sp->in_interrupt = 0;	/* Avoid halting machine. */
1534		return IRQ_NONE;
1535	}
1536#endif
1537
1538	do {
1539		status = ioread16(ioaddr + SCBStatus);
1540		/* Acknowledge all of the current interrupt sources ASAP. */
1541		/* Will change from 0xfc00 to 0xff00 when we start handling
1542		   FCP and ER interrupts --Dragan */
1543		iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1544
1545		if (netif_msg_intr(sp))
1546			printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",
1547				   dev->name, status);
1548
1549		if ((status & 0xfc00) == 0)
1550			break;
1551		handled = 1;
1552
1553
1554		if ((status & 0x5000) ||	/* Packet received, or Rx error. */
1555			(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1556									/* Need to gather the postponed packet. */
1557			speedo_rx(dev);
1558
1559		/* Always check if all rx buffers are allocated.  --SAW */
1560		speedo_refill_rx_buffers(dev, 0);
1561
1562		spin_lock(&sp->lock);
1563		/*
1564		 * The chip may have suspended reception for various reasons.
1565		 * Check for that, and re-prime it should this be the case.
1566		 */
1567		switch ((status >> 2) & 0xf) {
1568		case 0: /* Idle */
1569			break;
1570		case 1:	/* Suspended */
1571		case 2:	/* No resources (RxFDs) */
1572		case 9:	/* Suspended with no more RBDs */
1573		case 10: /* No resources due to no RBDs */
1574		case 12: /* Ready with no RBDs */
1575			speedo_rx_soft_reset(dev);
1576			break;
1577		case 3:  case 5:  case 6:  case 7:  case 8:
1578		case 11:  case 13:  case 14:  case 15:
1579			/* these are all reserved values */
1580			break;
1581		}
1582
1583
1584		/* User interrupt, Command/Tx unit interrupt or CU not active. */
1585		if (status & 0xA400) {
1586			speedo_tx_buffer_gc(dev);
1587			if (sp->tx_full
1588				&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1589				/* The ring is no longer full. */
1590				sp->tx_full = 0;
1591				netif_wake_queue(dev); /* Attention: under a spinlock.  --SAW */
1592			}
1593		}
1594
1595		spin_unlock(&sp->lock);
1596
1597		if (--boguscnt < 0) {
1598			printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1599				   dev->name, status);
1600			/* Clear all interrupt sources. */
1601			/* Will change from 0xfc00 to 0xff00 when we start handling
1602			   FCP and ER interrupts --Dragan */
1603			iowrite16(0xfc00, ioaddr + SCBStatus);
1604			break;
1605		}
1606	} while (1);
1607
1608	if (netif_msg_intr(sp))
1609		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1610			   dev->name, ioread16(ioaddr + SCBStatus));
1611
1612	clear_bit(0, (void*)&sp->in_interrupt);
1613	return IRQ_RETVAL(handled);
1614}
1615
1616static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1617{
1618	struct speedo_private *sp = netdev_priv(dev);
1619	struct RxFD *rxf;
1620	struct sk_buff *skb;
1621	/* Get a fresh skbuff to replace the consumed one. */
1622	skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1623	if (skb)
1624		rx_align(skb);		/* Align IP on 16 byte boundary */
1625	sp->rx_skbuff[entry] = skb;
1626	if (skb == NULL) {
1627		sp->rx_ringp[entry] = NULL;
1628		return NULL;
1629	}
1630	rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1631	sp->rx_ring_dma[entry] =
1632		pci_map_single(sp->pdev, rxf,
1633					   PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1634	skb->dev = dev;
1635	skb_reserve(skb, sizeof(struct RxFD));
1636	rxf->rx_buf_addr = 0xffffffff;
1637	pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1638								   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1639	return rxf;
1640}
1641
1642static inline void speedo_rx_link(struct net_device *dev, int entry,
1643								  struct RxFD *rxf, dma_addr_t rxf_dma)
1644{
1645	struct speedo_private *sp = netdev_priv(dev);
1646	rxf->status = cpu_to_le32(0xC0000001); 	/* '1' for driver use only. */
1647	rxf->link = 0;			/* None yet. */
1648	rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1649	sp->last_rxf->link = cpu_to_le32(rxf_dma);
1650	sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1651	pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1652								   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1653	sp->last_rxf = rxf;
1654	sp->last_rxf_dma = rxf_dma;
1655}
1656
1657static int speedo_refill_rx_buf(struct net_device *dev, int force)
1658{
1659	struct speedo_private *sp = netdev_priv(dev);
1660	int entry;
1661	struct RxFD *rxf;
1662
1663	entry = sp->dirty_rx % RX_RING_SIZE;
1664	if (sp->rx_skbuff[entry] == NULL) {
1665		rxf = speedo_rx_alloc(dev, entry);
1666		if (rxf == NULL) {
1667			unsigned int forw;
1668			int forw_entry;
1669			if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1670				printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1671						dev->name, force);
1672				sp->rx_ring_state |= RrOOMReported;
1673			}
1674			speedo_show_state(dev);
1675			if (!force)
1676				return -1;	/* Better luck next time!  */
1677			/* Borrow an skb from one of next entries. */
1678			for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1679				if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1680					break;
1681			if (forw == sp->cur_rx)
1682				return -1;
1683			forw_entry = forw % RX_RING_SIZE;
1684			sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1685			sp->rx_skbuff[forw_entry] = NULL;
1686			rxf = sp->rx_ringp[forw_entry];
1687			sp->rx_ringp[forw_entry] = NULL;
1688			sp->rx_ringp[entry] = rxf;
1689		}
1690	} else {
1691		rxf = sp->rx_ringp[entry];
1692	}
1693	speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1694	sp->dirty_rx++;
1695	sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1696	return 0;
1697}
1698
1699static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1700{
1701	struct speedo_private *sp = netdev_priv(dev);
1702
1703	/* Refill the RX ring. */
1704	while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1705			speedo_refill_rx_buf(dev, force) != -1);
1706}
1707
1708static int
1709speedo_rx(struct net_device *dev)
1710{
1711	struct speedo_private *sp = netdev_priv(dev);
1712	int entry = sp->cur_rx % RX_RING_SIZE;
1713	int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1714	int alloc_ok = 1;
1715	int npkts = 0;
1716
1717	if (netif_msg_intr(sp))
1718		printk(KERN_DEBUG " In speedo_rx().\n");
1719	/* If we own the next entry, it's a new packet. Send it up. */
1720	while (sp->rx_ringp[entry] != NULL) {
1721		int status;
1722		int pkt_len;
1723
1724		pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1725									sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1726		status = le32_to_cpu(sp->rx_ringp[entry]->status);
1727		pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1728
1729		if (!(status & RxComplete))
1730			break;
1731
1732		if (--rx_work_limit < 0)
1733			break;
1734
1735		/* Check for a rare out-of-memory case: the current buffer is
1736		   the last buffer allocated in the RX ring.  --SAW */
1737		if (sp->last_rxf == sp->rx_ringp[entry]) {
1738			/* Postpone the packet.  It'll be reaped at an interrupt when this
1739			   packet is no longer the last packet in the ring. */
1740			if (netif_msg_rx_err(sp))
1741				printk(KERN_DEBUG "%s: RX packet postponed!\n",
1742					   dev->name);
1743			sp->rx_ring_state |= RrPostponed;
1744			break;
1745		}
1746
1747		if (netif_msg_rx_status(sp))
1748			printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,
1749				   pkt_len);
1750		if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1751			if (status & RxErrTooBig)
1752				printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1753					   "status %8.8x!\n", dev->name, status);
1754			else if (! (status & RxOK)) {
1755				/* There was a fatal error.  This *should* be impossible. */
1756				sp->stats.rx_errors++;
1757				printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1758					   "status %8.8x.\n",
1759					   dev->name, status);
1760			}
1761		} else {
1762			struct sk_buff *skb;
1763
1764			/* Check if the packet is long enough to just accept without
1765			   copying to a properly sized skbuff. */
1766			if (pkt_len < rx_copybreak
1767				&& (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1768				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1769				/* 'skb_put()' points to the start of sk_buff data area. */
1770				pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1771											sizeof(struct RxFD) + pkt_len,
1772											PCI_DMA_FROMDEVICE);
1773
1774				/* Packet is in one chunk -- we can copy + cksum. */
1775				eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
1776				skb_put(skb, pkt_len);
1777				pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1778											   sizeof(struct RxFD) + pkt_len,
1779											   PCI_DMA_FROMDEVICE);
1780				npkts++;
1781			} else {
1782				/* Pass up the already-filled skbuff. */
1783				skb = sp->rx_skbuff[entry];
1784				if (skb == NULL) {
1785					printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1786						   dev->name);
1787					break;
1788				}
1789				sp->rx_skbuff[entry] = NULL;
1790				skb_put(skb, pkt_len);
1791				npkts++;
1792				sp->rx_ringp[entry] = NULL;
1793				pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1794								 PKT_BUF_SZ + sizeof(struct RxFD),
1795								 PCI_DMA_FROMDEVICE);
1796			}
1797			skb->protocol = eth_type_trans(skb, dev);
1798			netif_rx(skb);
1799			dev->last_rx = jiffies;
1800			sp->stats.rx_packets++;
1801			sp->stats.rx_bytes += pkt_len;
1802		}
1803		entry = (++sp->cur_rx) % RX_RING_SIZE;
1804		sp->rx_ring_state &= ~RrPostponed;
1805		/* Refill the recently taken buffers.
1806		   Do it one-by-one to handle traffic bursts better. */
1807		if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1808			alloc_ok = 0;
1809	}
1810
1811	/* Try hard to refill the recently taken buffers. */
1812	speedo_refill_rx_buffers(dev, 1);
1813
1814	if (npkts)
1815		sp->last_rx_time = jiffies;
1816
1817	return 0;
1818}
1819
1820static int
1821speedo_close(struct net_device *dev)
1822{
1823	struct speedo_private *sp = netdev_priv(dev);
1824	void __iomem *ioaddr = sp->regs;
1825	int i;
1826
1827	netdevice_stop(dev);
1828	netif_stop_queue(dev);
1829
1830	if (netif_msg_ifdown(sp))
1831		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1832			   dev->name, ioread16(ioaddr + SCBStatus));
1833
1834	/* Shut off the media monitoring timer. */
1835	del_timer_sync(&sp->timer);
1836
1837	iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1838
1839	/* Shutting down the chip nicely fails to disable flow control. So.. */
1840	iowrite32(PortPartialReset, ioaddr + SCBPort);
1841	ioread32(ioaddr + SCBPort); /* flush posted write */
1842	/*
1843	 * The chip requires a 10 microsecond quiet period.  Wait here!
1844	 */
1845	udelay(10);
1846
1847	free_irq(dev->irq, dev);
1848	speedo_show_state(dev);
1849
1850    /* Free all the skbuffs in the Rx and Tx queues. */
1851	for (i = 0; i < RX_RING_SIZE; i++) {
1852		struct sk_buff *skb = sp->rx_skbuff[i];
1853		sp->rx_skbuff[i] = NULL;
1854		/* Clear the Rx descriptors. */
1855		if (skb) {
1856			pci_unmap_single(sp->pdev,
1857					 sp->rx_ring_dma[i],
1858					 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1859			dev_kfree_skb(skb);
1860		}
1861	}
1862
1863	for (i = 0; i < TX_RING_SIZE; i++) {
1864		struct sk_buff *skb = sp->tx_skbuff[i];
1865		sp->tx_skbuff[i] = NULL;
1866		/* Clear the Tx descriptors. */
1867		if (skb) {
1868			pci_unmap_single(sp->pdev,
1869					 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1870					 skb->len, PCI_DMA_TODEVICE);
1871			dev_kfree_skb(skb);
1872		}
1873	}
1874
1875	/* Free multicast setting blocks. */
1876	for (i = 0; sp->mc_setup_head != NULL; i++) {
1877		struct speedo_mc_block *t;
1878		t = sp->mc_setup_head->next;
1879		kfree(sp->mc_setup_head);
1880		sp->mc_setup_head = t;
1881	}
1882	sp->mc_setup_tail = NULL;
1883	if (netif_msg_ifdown(sp))
1884		printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1885
1886	pci_set_power_state(sp->pdev, PCI_D2);
1887
1888	return 0;
1889}
1890
1891/* The Speedo-3 has an especially awkward and unusable method of getting
1892   statistics out of the chip.  It takes an unpredictable length of time
1893   for the dump-stats command to complete.  To avoid a busy-wait loop we
1894   update the stats with the previous dump results, and then trigger a
1895   new dump.
1896
1897   Oh, and incoming frames are dropped while executing dump-stats!
1898   */
1899static struct net_device_stats *
1900speedo_get_stats(struct net_device *dev)
1901{
1902	struct speedo_private *sp = netdev_priv(dev);
1903	void __iomem *ioaddr = sp->regs;
1904
1905	/* Update only if the previous dump finished. */
1906	if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1907		sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1908		sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1909		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1910		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1911		/*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1912		sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1913		sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1914		sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1915		sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1916		sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1917		sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1918		sp->lstats->done_marker = 0x0000;
1919		if (netif_running(dev)) {
1920			unsigned long flags;
1921			/* Take a spinlock to make wait_for_cmd_done and sending the
1922			   command atomic.  --SAW */
1923			spin_lock_irqsave(&sp->lock, flags);
1924			wait_for_cmd_done(dev, sp);
1925			iowrite8(CUDumpStats, ioaddr + SCBCmd);
1926			spin_unlock_irqrestore(&sp->lock, flags);
1927		}
1928	}
1929	return &sp->stats;
1930}
1931
1932static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1933{
1934	struct speedo_private *sp = netdev_priv(dev);
1935	strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1936	strncpy(info->version, version, sizeof(info->version)-1);
1937	if (sp->pdev)
1938		strcpy(info->bus_info, pci_name(sp->pdev));
1939}
1940
1941static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1942{
1943	struct speedo_private *sp = netdev_priv(dev);
1944	spin_lock_irq(&sp->lock);
1945	mii_ethtool_gset(&sp->mii_if, ecmd);
1946	spin_unlock_irq(&sp->lock);
1947	return 0;
1948}
1949
1950static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1951{
1952	struct speedo_private *sp = netdev_priv(dev);
1953	int res;
1954	spin_lock_irq(&sp->lock);
1955	res = mii_ethtool_sset(&sp->mii_if, ecmd);
1956	spin_unlock_irq(&sp->lock);
1957	return res;
1958}
1959
1960static int speedo_nway_reset(struct net_device *dev)
1961{
1962	struct speedo_private *sp = netdev_priv(dev);
1963	return mii_nway_restart(&sp->mii_if);
1964}
1965
1966static u32 speedo_get_link(struct net_device *dev)
1967{
1968	struct speedo_private *sp = netdev_priv(dev);
1969	return mii_link_ok(&sp->mii_if);
1970}
1971
1972static u32 speedo_get_msglevel(struct net_device *dev)
1973{
1974	struct speedo_private *sp = netdev_priv(dev);
1975	return sp->msg_enable;
1976}
1977
1978static void speedo_set_msglevel(struct net_device *dev, u32 v)
1979{
1980	struct speedo_private *sp = netdev_priv(dev);
1981	sp->msg_enable = v;
1982}
1983
1984static const struct ethtool_ops ethtool_ops = {
1985	.get_drvinfo = speedo_get_drvinfo,
1986	.get_settings = speedo_get_settings,
1987	.set_settings = speedo_set_settings,
1988	.nway_reset = speedo_nway_reset,
1989	.get_link = speedo_get_link,
1990	.get_msglevel = speedo_get_msglevel,
1991	.set_msglevel = speedo_set_msglevel,
1992};
1993
1994static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1995{
1996	struct speedo_private *sp = netdev_priv(dev);
1997	struct mii_ioctl_data *data = if_mii(rq);
1998	int phy = sp->phy[0] & 0x1f;
1999	int saved_acpi;
2000	int t;
2001
2002    switch(cmd) {
2003	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
2004		data->phy_id = phy;
2005
2006	case SIOCGMIIREG:		/* Read MII PHY register. */
2007		saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2008		t = del_timer_sync(&sp->timer);
2009		data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2010		if (t)
2011			add_timer(&sp->timer); /* may be set to the past  --SAW */
2012		pci_set_power_state(sp->pdev, saved_acpi);
2013		return 0;
2014
2015	case SIOCSMIIREG:		/* Write MII PHY register. */
2016		if (!capable(CAP_NET_ADMIN))
2017			return -EPERM;
2018		saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2019		t = del_timer_sync(&sp->timer);
2020		mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2021		if (t)
2022			add_timer(&sp->timer); /* may be set to the past  --SAW */
2023		pci_set_power_state(sp->pdev, saved_acpi);
2024		return 0;
2025	default:
2026		return -EOPNOTSUPP;
2027	}
2028}
2029
2030/* Set or clear the multicast filter for this adaptor.
2031   This is very ugly with Intel chips -- we usually have to execute an
2032   entire configuration command, plus process a multicast command.
2033   This is complicated.  We must put a large configuration command and
2034   an arbitrarily-sized multicast command in the transmit list.
2035   To minimize the disruption -- the previous command might have already
2036   loaded the link -- we convert the current command block, normally a Tx
2037   command, into a no-op and link it to the new command.
2038*/
2039static void set_rx_mode(struct net_device *dev)
2040{
2041	struct speedo_private *sp = netdev_priv(dev);
2042	void __iomem *ioaddr = sp->regs;
2043	struct descriptor *last_cmd;
2044	char new_rx_mode;
2045	unsigned long flags;
2046	int entry, i;
2047
2048	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
2049		new_rx_mode = 3;
2050	} else if ((dev->flags & IFF_ALLMULTI)  ||
2051			   dev->mc_count > multicast_filter_limit) {
2052		new_rx_mode = 1;
2053	} else
2054		new_rx_mode = 0;
2055
2056	if (netif_msg_rx_status(sp))
2057		printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2058				sp->rx_mode, new_rx_mode);
2059
2060	if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2061	    /* The Tx ring is full -- don't add anything!  Hope the mode will be
2062		 * set again later. */
2063		sp->rx_mode = -1;
2064		return;
2065	}
2066
2067	if (new_rx_mode != sp->rx_mode) {
2068		u8 *config_cmd_data;
2069
2070		spin_lock_irqsave(&sp->lock, flags);
2071		entry = sp->cur_tx++ % TX_RING_SIZE;
2072		last_cmd = sp->last_cmd;
2073		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2074
2075		sp->tx_skbuff[entry] = NULL;			/* Redundant. */
2076		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2077		sp->tx_ring[entry].link =
2078			cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2079		config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2080		/* Construct a full CmdConfig frame. */
2081		memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2082		config_cmd_data[1] = (txfifo << 4) | rxfifo;
2083		config_cmd_data[4] = rxdmacount;
2084		config_cmd_data[5] = txdmacount + 0x80;
2085		config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2086		/* 0x80 doesn't disable FC 0x84 does.
2087		   Disable Flow control since we are not ACK-ing any FC interrupts
2088		   for now. --Dragan */
2089		config_cmd_data[19] = 0x84;
2090		config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2091		config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2092		if (sp->phy[0] & 0x8000) {			/* Use the AUI port instead. */
2093			config_cmd_data[15] |= 0x80;
2094			config_cmd_data[8] = 0;
2095		}
2096		/* Trigger the command unit resume. */
2097		wait_for_cmd_done(dev, sp);
2098		clear_suspend(last_cmd);
2099		iowrite8(CUResume, ioaddr + SCBCmd);
2100		if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2101			netif_stop_queue(dev);
2102			sp->tx_full = 1;
2103		}
2104		spin_unlock_irqrestore(&sp->lock, flags);
2105	}
2106
2107	if (new_rx_mode == 0  &&  dev->mc_count < 4) {
2108		/* The simple case of 0-3 multicast list entries occurs often, and
2109		   fits within one tx_ring[] entry. */
2110		struct dev_mc_list *mclist;
2111		u16 *setup_params, *eaddrs;
2112
2113		spin_lock_irqsave(&sp->lock, flags);
2114		entry = sp->cur_tx++ % TX_RING_SIZE;
2115		last_cmd = sp->last_cmd;
2116		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2117
2118		sp->tx_skbuff[entry] = NULL;
2119		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2120		sp->tx_ring[entry].link =
2121			cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2122		sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2123		setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2124		*setup_params++ = cpu_to_le16(dev->mc_count*6);
2125		/* Fill in the multicast addresses. */
2126		for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2127			 i++, mclist = mclist->next) {
2128			eaddrs = (u16 *)mclist->dmi_addr;
2129			*setup_params++ = *eaddrs++;
2130			*setup_params++ = *eaddrs++;
2131			*setup_params++ = *eaddrs++;
2132		}
2133
2134		wait_for_cmd_done(dev, sp);
2135		clear_suspend(last_cmd);
2136		/* Immediately trigger the command unit resume. */
2137		iowrite8(CUResume, ioaddr + SCBCmd);
2138
2139		if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2140			netif_stop_queue(dev);
2141			sp->tx_full = 1;
2142		}
2143		spin_unlock_irqrestore(&sp->lock, flags);
2144	} else if (new_rx_mode == 0) {
2145		struct dev_mc_list *mclist;
2146		u16 *setup_params, *eaddrs;
2147		struct speedo_mc_block *mc_blk;
2148		struct descriptor *mc_setup_frm;
2149		int i;
2150
2151		mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2152						 GFP_ATOMIC);
2153		if (mc_blk == NULL) {
2154			printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2155				   dev->name);
2156			sp->rx_mode = -1; /* We failed, try again. */
2157			return;
2158		}
2159		mc_blk->next = NULL;
2160		mc_blk->len = 2 + multicast_filter_limit*6;
2161		mc_blk->frame_dma =
2162			pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2163					PCI_DMA_TODEVICE);
2164		mc_setup_frm = &mc_blk->frame;
2165
2166		/* Fill the setup frame. */
2167		if (netif_msg_ifup(sp))
2168			printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2169				   dev->name, mc_setup_frm);
2170		mc_setup_frm->cmd_status =
2171			cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2172		/* Link set below. */
2173		setup_params = (u16 *)&mc_setup_frm->params;
2174		*setup_params++ = cpu_to_le16(dev->mc_count*6);
2175		/* Fill in the multicast addresses. */
2176		for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2177			 i++, mclist = mclist->next) {
2178			eaddrs = (u16 *)mclist->dmi_addr;
2179			*setup_params++ = *eaddrs++;
2180			*setup_params++ = *eaddrs++;
2181			*setup_params++ = *eaddrs++;
2182		}
2183
2184		/* Disable interrupts while playing with the Tx Cmd list. */
2185		spin_lock_irqsave(&sp->lock, flags);
2186
2187		if (sp->mc_setup_tail)
2188			sp->mc_setup_tail->next = mc_blk;
2189		else
2190			sp->mc_setup_head = mc_blk;
2191		sp->mc_setup_tail = mc_blk;
2192		mc_blk->tx = sp->cur_tx;
2193
2194		entry = sp->cur_tx++ % TX_RING_SIZE;
2195		last_cmd = sp->last_cmd;
2196		sp->last_cmd = mc_setup_frm;
2197
2198		/* Change the command to a NoOp, pointing to the CmdMulti command. */
2199		sp->tx_skbuff[entry] = NULL;
2200		sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2201		sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2202
2203		/* Set the link in the setup frame. */
2204		mc_setup_frm->link =
2205			cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2206
2207		pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2208									   mc_blk->len, PCI_DMA_TODEVICE);
2209
2210		wait_for_cmd_done(dev, sp);
2211		clear_suspend(last_cmd);
2212		/* Immediately trigger the command unit resume. */
2213		iowrite8(CUResume, ioaddr + SCBCmd);
2214
2215		if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2216			netif_stop_queue(dev);
2217			sp->tx_full = 1;
2218		}
2219		spin_unlock_irqrestore(&sp->lock, flags);
2220
2221		if (netif_msg_rx_status(sp))
2222			printk(" CmdMCSetup frame length %d in entry %d.\n",
2223				   dev->mc_count, entry);
2224	}
2225
2226	sp->rx_mode = new_rx_mode;
2227}
2228
2229#ifdef CONFIG_PM
2230static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2231{
2232	struct net_device *dev = pci_get_drvdata (pdev);
2233	struct speedo_private *sp = netdev_priv(dev);
2234	void __iomem *ioaddr = sp->regs;
2235
2236	pci_save_state(pdev);
2237
2238	if (!netif_running(dev))
2239		return 0;
2240
2241	del_timer_sync(&sp->timer);
2242
2243	netif_device_detach(dev);
2244	iowrite32(PortPartialReset, ioaddr + SCBPort);
2245
2246	pci_disable_device(pdev);
2247	pci_set_power_state (pdev, PCI_D3hot);
2248	return 0;
2249}
2250
2251static int eepro100_resume(struct pci_dev *pdev)
2252{
2253	struct net_device *dev = pci_get_drvdata (pdev);
2254	struct speedo_private *sp = netdev_priv(dev);
2255	void __iomem *ioaddr = sp->regs;
2256
2257	pci_set_power_state(pdev, PCI_D0);
2258	pci_restore_state(pdev);
2259	pci_enable_device(pdev);
2260	pci_set_master(pdev);
2261
2262	if (!netif_running(dev))
2263		return 0;
2264
2265	/* I'm absolutely uncertain if this part of code may work.
2266	   The problems are:
2267	    - correct hardware reinitialization;
2268		- correct driver behavior between different steps of the
2269		  reinitialization;
2270		- serialization with other driver calls.
2271	   2000/03/08  SAW */
2272	iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2273	speedo_resume(dev);
2274	netif_device_attach(dev);
2275	sp->rx_mode = -1;
2276	sp->flow_ctrl = sp->partner = 0;
2277	set_rx_mode(dev);
2278	sp->timer.expires = RUN_AT(2*HZ);
2279	add_timer(&sp->timer);
2280	return 0;
2281}
2282#endif /* CONFIG_PM */
2283
2284static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2285{
2286	struct net_device *dev = pci_get_drvdata (pdev);
2287	struct speedo_private *sp = netdev_priv(dev);
2288
2289	unregister_netdev(dev);
2290
2291	release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2292	release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2293
2294	pci_iounmap(pdev, sp->regs);
2295	pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2296								+ sizeof(struct speedo_stats),
2297						sp->tx_ring, sp->tx_ring_dma);
2298	pci_disable_device(pdev);
2299	free_netdev(dev);
2300}
2301
2302static struct pci_device_id eepro100_pci_tbl[] = {
2303	{ PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2304	{ PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2305	{ PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2306	{ PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2307	{ PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2308	{ PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2309	{ PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2310	{ PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2311	{ PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2312	{ PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2313	{ PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2314	{ PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2315	{ PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2316	{ PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2317	{ PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2318	{ PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2319	{ PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2320	{ PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2321	{ PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2322	{ PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2323	{ PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2324	{ PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2325	{ PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2326	{ PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2327	{ PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2328	{ PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2329	{ 0,}
2330};
2331MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2332
2333static struct pci_driver eepro100_driver = {
2334	.name		= "eepro100",
2335	.id_table	= eepro100_pci_tbl,
2336	.probe		= eepro100_init_one,
2337	.remove		= __devexit_p(eepro100_remove_one),
2338#ifdef CONFIG_PM
2339	.suspend	= eepro100_suspend,
2340	.resume		= eepro100_resume,
2341#endif /* CONFIG_PM */
2342};
2343
2344static int __init eepro100_init_module(void)
2345{
2346#ifdef MODULE
2347	printk(version);
2348#endif
2349	return pci_register_driver(&eepro100_driver);
2350}
2351
2352static void __exit eepro100_cleanup_module(void)
2353{
2354	pci_unregister_driver(&eepro100_driver);
2355}
2356
2357module_init(eepro100_init_module);
2358module_exit(eepro100_cleanup_module);
2359
2360/*
2361 * Local variables:
2362 *  compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2363 *  c-indent-level: 4
2364 *  c-basic-offset: 4
2365 *  tab-width: 4
2366 * End:
2367 */
2368