• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/
1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2/*
3	Written/copyright 1997-2001 by Donald Becker.
4
5	This software may be used and distributed according to the terms of
6	the GNU General Public License (GPL), incorporated herein by reference.
7	Drivers based on or derived from this code fall under the GPL and must
8	retain the authorship, copyright and license notice.  This file is not
9	a complete program and may only be used when the entire operating
10	system is licensed under the GPL.
11
12	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14
15	The author may be reached as becker@scyld.com, or C/O
16	Scyld Computing Corporation
17	410 Severn Ave., Suite 210
18	Annapolis MD 21403
19
20	Information and updates available at
21	http://www.scyld.com/network/epic100.html
22	[this link no longer provides anything useful -jgarzik]
23
24	---------------------------------------------------------------------
25
26*/
27
28#define DRV_NAME        "epic100"
29#define DRV_VERSION     "2.1"
30#define DRV_RELDATE     "Sept 11, 2006"
31
32/* The user-configurable values.
33   These may be modified when a driver module is loaded.*/
34
35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36
37/* Used to pass the full-duplex flag, etc. */
38#define MAX_UNITS 8		/* More are supported, limit only on options */
39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41
42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43   Setting to > 1518 effectively disables this feature. */
44static int rx_copybreak;
45
46/* Operational parameters that are set at compile time. */
47
48/* Keep the ring sizes a power of two for operational efficiency.
49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50   Making the Tx ring too large decreases the effectiveness of channel
51   bonding and packet priority.
52   There are no ill effects from too-large receive rings. */
53#define TX_RING_SIZE	256
54#define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55#define RX_RING_SIZE	256
56#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58
59/* Operational parameters that usually are not changed. */
60/* Time in jiffies before concluding the transmitter is hung. */
61#define TX_TIMEOUT  (2*HZ)
62
63#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64
65/* Bytes transferred to chip before transmission starts. */
66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67#define TX_FIFO_THRESH 256
68#define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/interrupt.h>
77#include <linux/pci.h>
78#include <linux/delay.h>
79#include <linux/netdevice.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/ethtool.h>
85#include <linux/mii.h>
86#include <linux/crc32.h>
87#include <linux/bitops.h>
88#include <asm/io.h>
89#include <asm/uaccess.h>
90#include <asm/byteorder.h>
91
92/* These identify the driver base version and may not be removed. */
93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata =
96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
97
98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100MODULE_LICENSE("GPL");
101
102module_param(debug, int, 0);
103module_param(rx_copybreak, int, 0);
104module_param_array(options, int, NULL, 0);
105module_param_array(full_duplex, int, NULL, 0);
106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110
111/*
112				Theory of Operation
113
114I. Board Compatibility
115
116This device driver is designed for the SMC "EPIC/100", the SMC
117single-chip Ethernet controllers for PCI.  This chip is used on
118the SMC EtherPower II boards.
119
120II. Board-specific settings
121
122PCI bus devices are configured by the system at boot time, so no jumpers
123need to be set on the board.  The system BIOS will assign the
124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126interrupt lines.
127
128III. Driver operation
129
130IIIa. Ring buffers
131
132IVb. References
133
134http://www.smsc.com/main/tools/discontinued/83c171.pdf
135http://www.smsc.com/main/tools/discontinued/83c175.pdf
136http://scyld.com/expert/NWay.html
137http://www.national.com/pf/DP/DP83840A.html
138
139IVc. Errata
140
141*/
142
143
144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145
146#define EPIC_TOTAL_SIZE 0x100
147#define USE_IO_OPS 1
148
149typedef enum {
150	SMSC_83C170_0,
151	SMSC_83C170,
152	SMSC_83C175,
153} chip_t;
154
155
156struct epic_chip_info {
157	const char *name;
158        int drv_flags;                          /* Driver use, intended as capability flags. */
159};
160
161
162/* indexed by chip_t */
163static const struct epic_chip_info pci_id_tbl[] = {
164	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
165	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
166	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
167};
168
169
170static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
171	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
172	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
173	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
174	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
175	{ 0,}
176};
177MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
178
179
180#ifndef USE_IO_OPS
181#undef inb
182#undef inw
183#undef inl
184#undef outb
185#undef outw
186#undef outl
187#define inb readb
188#define inw readw
189#define inl readl
190#define outb writeb
191#define outw writew
192#define outl writel
193#endif
194
195/* Offsets to registers, using the (ugh) SMC names. */
196enum epic_registers {
197  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
198  PCIBurstCnt=0x18,
199  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
200  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
201  LAN0=64,						/* MAC address. */
202  MC0=80,						/* Multicast filter table. */
203  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
204  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
205};
206
207/* Interrupt register bits, using my own meaningful names. */
208enum IntrStatus {
209	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
210	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
211	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
212	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
213	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
214};
215enum CommandBits {
216	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
217	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
218};
219
220#define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
221
222#define EpicNapiEvent	(TxEmpty | TxDone | \
223			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
224#define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
225
226static const u16 media2miictl[16] = {
227	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
228	0, 0, 0, 0,  0, 0, 0, 0 };
229
230/*
231 * The EPIC100 Rx and Tx buffer descriptors.  Note that these
232 * really ARE host-endian; it's not a misannotation.  We tell
233 * the card to byteswap them internally on big-endian hosts -
234 * look for #ifdef __BIG_ENDIAN in epic_open().
235 */
236
237struct epic_tx_desc {
238	u32 txstatus;
239	u32 bufaddr;
240	u32 buflength;
241	u32 next;
242};
243
244struct epic_rx_desc {
245	u32 rxstatus;
246	u32 bufaddr;
247	u32 buflength;
248	u32 next;
249};
250
251enum desc_status_bits {
252	DescOwn=0x8000,
253};
254
255#define PRIV_ALIGN	15 	/* Required alignment mask */
256struct epic_private {
257	struct epic_rx_desc *rx_ring;
258	struct epic_tx_desc *tx_ring;
259	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
260	struct sk_buff* tx_skbuff[TX_RING_SIZE];
261	/* The addresses of receive-in-place skbuffs. */
262	struct sk_buff* rx_skbuff[RX_RING_SIZE];
263
264	dma_addr_t tx_ring_dma;
265	dma_addr_t rx_ring_dma;
266
267	/* Ring pointers. */
268	spinlock_t lock;				/* Group with Tx control cache line. */
269	spinlock_t napi_lock;
270	struct napi_struct napi;
271	unsigned int reschedule_in_poll;
272	unsigned int cur_tx, dirty_tx;
273
274	unsigned int cur_rx, dirty_rx;
275	u32 irq_mask;
276	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
277
278	struct pci_dev *pci_dev;			/* PCI bus location. */
279	int chip_id, chip_flags;
280
281	struct timer_list timer;			/* Media selection timer. */
282	int tx_threshold;
283	unsigned char mc_filter[8];
284	signed char phys[4];				/* MII device addresses. */
285	u16 advertising;					/* NWay media advertisement */
286	int mii_phy_cnt;
287	struct mii_if_info mii;
288	unsigned int tx_full:1;				/* The Tx queue is full. */
289	unsigned int default_port:4;		/* Last dev->if_port value. */
290};
291
292static int epic_open(struct net_device *dev);
293static int read_eeprom(long ioaddr, int location);
294static int mdio_read(struct net_device *dev, int phy_id, int location);
295static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
296static void epic_restart(struct net_device *dev);
297static void epic_timer(unsigned long data);
298static void epic_tx_timeout(struct net_device *dev);
299static void epic_init_ring(struct net_device *dev);
300static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
301				   struct net_device *dev);
302static int epic_rx(struct net_device *dev, int budget);
303static int epic_poll(struct napi_struct *napi, int budget);
304static irqreturn_t epic_interrupt(int irq, void *dev_instance);
305static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
306static const struct ethtool_ops netdev_ethtool_ops;
307static int epic_close(struct net_device *dev);
308static struct net_device_stats *epic_get_stats(struct net_device *dev);
309static void set_rx_mode(struct net_device *dev);
310
311static const struct net_device_ops epic_netdev_ops = {
312	.ndo_open		= epic_open,
313	.ndo_stop		= epic_close,
314	.ndo_start_xmit		= epic_start_xmit,
315	.ndo_tx_timeout 	= epic_tx_timeout,
316	.ndo_get_stats		= epic_get_stats,
317	.ndo_set_multicast_list = set_rx_mode,
318	.ndo_do_ioctl 		= netdev_ioctl,
319	.ndo_change_mtu		= eth_change_mtu,
320	.ndo_set_mac_address 	= eth_mac_addr,
321	.ndo_validate_addr	= eth_validate_addr,
322};
323
324static int __devinit epic_init_one (struct pci_dev *pdev,
325				    const struct pci_device_id *ent)
326{
327	static int card_idx = -1;
328	long ioaddr;
329	int chip_idx = (int) ent->driver_data;
330	int irq;
331	struct net_device *dev;
332	struct epic_private *ep;
333	int i, ret, option = 0, duplex = 0;
334	void *ring_space;
335	dma_addr_t ring_dma;
336
337/* when built into the kernel, we only print version if device is found */
338#ifndef MODULE
339	static int printed_version;
340	if (!printed_version++)
341		printk(KERN_INFO "%s%s", version, version2);
342#endif
343
344	card_idx++;
345
346	ret = pci_enable_device(pdev);
347	if (ret)
348		goto out;
349	irq = pdev->irq;
350
351	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
352		dev_err(&pdev->dev, "no PCI region space\n");
353		ret = -ENODEV;
354		goto err_out_disable;
355	}
356
357	pci_set_master(pdev);
358
359	ret = pci_request_regions(pdev, DRV_NAME);
360	if (ret < 0)
361		goto err_out_disable;
362
363	ret = -ENOMEM;
364
365	dev = alloc_etherdev(sizeof (*ep));
366	if (!dev) {
367		dev_err(&pdev->dev, "no memory for eth device\n");
368		goto err_out_free_res;
369	}
370	SET_NETDEV_DEV(dev, &pdev->dev);
371
372#ifdef USE_IO_OPS
373	ioaddr = pci_resource_start (pdev, 0);
374#else
375	ioaddr = pci_resource_start (pdev, 1);
376	ioaddr = (long) pci_ioremap_bar(pdev, 1);
377	if (!ioaddr) {
378		dev_err(&pdev->dev, "ioremap failed\n");
379		goto err_out_free_netdev;
380	}
381#endif
382
383	pci_set_drvdata(pdev, dev);
384	ep = netdev_priv(dev);
385	ep->mii.dev = dev;
386	ep->mii.mdio_read = mdio_read;
387	ep->mii.mdio_write = mdio_write;
388	ep->mii.phy_id_mask = 0x1f;
389	ep->mii.reg_num_mask = 0x1f;
390
391	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
392	if (!ring_space)
393		goto err_out_iounmap;
394	ep->tx_ring = (struct epic_tx_desc *)ring_space;
395	ep->tx_ring_dma = ring_dma;
396
397	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
398	if (!ring_space)
399		goto err_out_unmap_tx;
400	ep->rx_ring = (struct epic_rx_desc *)ring_space;
401	ep->rx_ring_dma = ring_dma;
402
403	if (dev->mem_start) {
404		option = dev->mem_start;
405		duplex = (dev->mem_start & 16) ? 1 : 0;
406	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
407		if (options[card_idx] >= 0)
408			option = options[card_idx];
409		if (full_duplex[card_idx] >= 0)
410			duplex = full_duplex[card_idx];
411	}
412
413	dev->base_addr = ioaddr;
414	dev->irq = irq;
415
416	spin_lock_init(&ep->lock);
417	spin_lock_init(&ep->napi_lock);
418	ep->reschedule_in_poll = 0;
419
420	/* Bring the chip out of low-power mode. */
421	outl(0x4200, ioaddr + GENCTL);
422	/* Magic?!  If we don't set this bit the MII interface won't work. */
423	/* This magic is documented in SMSC app note 7.15 */
424	for (i = 16; i > 0; i--)
425		outl(0x0008, ioaddr + TEST1);
426
427	/* Turn on the MII transceiver. */
428	outl(0x12, ioaddr + MIICfg);
429	if (chip_idx == 1)
430		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
431	outl(0x0200, ioaddr + GENCTL);
432
433	/* Note: the '175 does not have a serial EEPROM. */
434	for (i = 0; i < 3; i++)
435		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
436
437	if (debug > 2) {
438		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
439		for (i = 0; i < 64; i++)
440			printk(" %4.4x%s", read_eeprom(ioaddr, i),
441				   i % 16 == 15 ? "\n" : "");
442	}
443
444	ep->pci_dev = pdev;
445	ep->chip_id = chip_idx;
446	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
447	ep->irq_mask =
448		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
449		 | CntFull | TxUnderrun | EpicNapiEvent;
450
451	/* Find the connected MII xcvrs.
452	   Doing this in open() would allow detecting external xcvrs later, but
453	   takes much time and no cards have external MII. */
454	{
455		int phy, phy_idx = 0;
456		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
457			int mii_status = mdio_read(dev, phy, MII_BMSR);
458			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
459				ep->phys[phy_idx++] = phy;
460				dev_info(&pdev->dev,
461					"MII transceiver #%d control "
462					"%4.4x status %4.4x.\n",
463					phy, mdio_read(dev, phy, 0), mii_status);
464			}
465		}
466		ep->mii_phy_cnt = phy_idx;
467		if (phy_idx != 0) {
468			phy = ep->phys[0];
469			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
470			dev_info(&pdev->dev,
471				"Autonegotiation advertising %4.4x link "
472				   "partner %4.4x.\n",
473				   ep->mii.advertising, mdio_read(dev, phy, 5));
474		} else if ( ! (ep->chip_flags & NO_MII)) {
475			dev_warn(&pdev->dev,
476				"***WARNING***: No MII transceiver found!\n");
477			/* Use the known PHY address of the EPII. */
478			ep->phys[0] = 3;
479		}
480		ep->mii.phy_id = ep->phys[0];
481	}
482
483	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
484	if (ep->chip_flags & MII_PWRDWN)
485		outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
486	outl(0x0008, ioaddr + GENCTL);
487
488	/* The lower four bits are the media type. */
489	if (duplex) {
490		ep->mii.force_media = ep->mii.full_duplex = 1;
491		dev_info(&pdev->dev, "Forced full duplex requested.\n");
492	}
493	dev->if_port = ep->default_port = option;
494
495	/* The Epic-specific entries in the device structure. */
496	dev->netdev_ops = &epic_netdev_ops;
497	dev->ethtool_ops = &netdev_ethtool_ops;
498	dev->watchdog_timeo = TX_TIMEOUT;
499	netif_napi_add(dev, &ep->napi, epic_poll, 64);
500
501	ret = register_netdev(dev);
502	if (ret < 0)
503		goto err_out_unmap_rx;
504
505	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
506	       dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
507	       dev->dev_addr);
508
509out:
510	return ret;
511
512err_out_unmap_rx:
513	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
514err_out_unmap_tx:
515	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
516err_out_iounmap:
517#ifndef USE_IO_OPS
518	iounmap(ioaddr);
519err_out_free_netdev:
520#endif
521	free_netdev(dev);
522err_out_free_res:
523	pci_release_regions(pdev);
524err_out_disable:
525	pci_disable_device(pdev);
526	goto out;
527}
528
529/* Serial EEPROM section. */
530
531/*  EEPROM_Ctrl bits. */
532#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
533#define EE_CS			0x02	/* EEPROM chip select. */
534#define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
535#define EE_WRITE_0		0x01
536#define EE_WRITE_1		0x09
537#define EE_DATA_READ	0x10	/* EEPROM chip data out. */
538#define EE_ENB			(0x0001 | EE_CS)
539
540/* Delay between EEPROM clock transitions.
541   This serves to flush the operation to the PCI bus.
542 */
543
544#define eeprom_delay()	inl(ee_addr)
545
546/* The EEPROM commands include the alway-set leading bit. */
547#define EE_WRITE_CMD	(5 << 6)
548#define EE_READ64_CMD	(6 << 6)
549#define EE_READ256_CMD	(6 << 8)
550#define EE_ERASE_CMD	(7 << 6)
551
552static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
553{
554	long ioaddr = dev->base_addr;
555
556	outl(0x00000000, ioaddr + INTMASK);
557}
558
559static inline void __epic_pci_commit(long ioaddr)
560{
561#ifndef USE_IO_OPS
562	inl(ioaddr + INTMASK);
563#endif
564}
565
566static inline void epic_napi_irq_off(struct net_device *dev,
567				     struct epic_private *ep)
568{
569	long ioaddr = dev->base_addr;
570
571	outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
572	__epic_pci_commit(ioaddr);
573}
574
575static inline void epic_napi_irq_on(struct net_device *dev,
576				    struct epic_private *ep)
577{
578	long ioaddr = dev->base_addr;
579
580	/* No need to commit possible posted write */
581	outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
582}
583
584static int __devinit read_eeprom(long ioaddr, int location)
585{
586	int i;
587	int retval = 0;
588	long ee_addr = ioaddr + EECTL;
589	int read_cmd = location |
590		(inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
591
592	outl(EE_ENB & ~EE_CS, ee_addr);
593	outl(EE_ENB, ee_addr);
594
595	/* Shift the read command bits out. */
596	for (i = 12; i >= 0; i--) {
597		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
598		outl(EE_ENB | dataval, ee_addr);
599		eeprom_delay();
600		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
601		eeprom_delay();
602	}
603	outl(EE_ENB, ee_addr);
604
605	for (i = 16; i > 0; i--) {
606		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
607		eeprom_delay();
608		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
609		outl(EE_ENB, ee_addr);
610		eeprom_delay();
611	}
612
613	/* Terminate the EEPROM access. */
614	outl(EE_ENB & ~EE_CS, ee_addr);
615	return retval;
616}
617
618#define MII_READOP		1
619#define MII_WRITEOP		2
620static int mdio_read(struct net_device *dev, int phy_id, int location)
621{
622	long ioaddr = dev->base_addr;
623	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
624	int i;
625
626	outl(read_cmd, ioaddr + MIICtrl);
627	/* Typical operation takes 25 loops. */
628	for (i = 400; i > 0; i--) {
629		barrier();
630		if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
631			if (phy_id == 1 && location < 6 &&
632			    inw(ioaddr + MIIData) == 0xffff) {
633				outl(read_cmd, ioaddr + MIICtrl);
634				continue;
635			}
636			return inw(ioaddr + MIIData);
637		}
638	}
639	return 0xffff;
640}
641
642static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
643{
644	long ioaddr = dev->base_addr;
645	int i;
646
647	outw(value, ioaddr + MIIData);
648	outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
649	for (i = 10000; i > 0; i--) {
650		barrier();
651		if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
652			break;
653	}
654}
655
656
657static int epic_open(struct net_device *dev)
658{
659	struct epic_private *ep = netdev_priv(dev);
660	long ioaddr = dev->base_addr;
661	int i;
662	int retval;
663
664	/* Soft reset the chip. */
665	outl(0x4001, ioaddr + GENCTL);
666
667	napi_enable(&ep->napi);
668	if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
669		napi_disable(&ep->napi);
670		return retval;
671	}
672
673	epic_init_ring(dev);
674
675	outl(0x4000, ioaddr + GENCTL);
676	/* This magic is documented in SMSC app note 7.15 */
677	for (i = 16; i > 0; i--)
678		outl(0x0008, ioaddr + TEST1);
679
680	/* Pull the chip out of low-power mode, enable interrupts, and set for
681	   PCI read multiple.  The MIIcfg setting and strange write order are
682	   required by the details of which bits are reset and the transceiver
683	   wiring on the Ositech CardBus card.
684	*/
685	if (ep->chip_flags & MII_PWRDWN)
686		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
687
688	/* Tell the chip to byteswap descriptors on big-endian hosts */
689#ifdef __BIG_ENDIAN
690	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
691	inl(ioaddr + GENCTL);
692	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
693#else
694	outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
695	inl(ioaddr + GENCTL);
696	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
697#endif
698
699	udelay(20);
700
701	for (i = 0; i < 3; i++)
702		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
703
704	ep->tx_threshold = TX_FIFO_THRESH;
705	outl(ep->tx_threshold, ioaddr + TxThresh);
706
707	if (media2miictl[dev->if_port & 15]) {
708		if (ep->mii_phy_cnt)
709			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
710		if (dev->if_port == 1) {
711			if (debug > 1)
712				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
713					   "status %4.4x.\n",
714					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
715		}
716	} else {
717		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
718		if (mii_lpa != 0xffff) {
719			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
720				ep->mii.full_duplex = 1;
721			else if (! (mii_lpa & LPA_LPACK))
722				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
723			if (debug > 1)
724				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
725					   " register read of %4.4x.\n", dev->name,
726					   ep->mii.full_duplex ? "full" : "half",
727					   ep->phys[0], mii_lpa);
728		}
729	}
730
731	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
732	outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
733	outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
734
735	/* Start the chip's Rx process. */
736	set_rx_mode(dev);
737	outl(StartRx | RxQueued, ioaddr + COMMAND);
738
739	netif_start_queue(dev);
740
741	/* Enable interrupts by setting the interrupt mask. */
742	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
743		 | CntFull | TxUnderrun
744		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
745
746	if (debug > 1)
747		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
748			   "%s-duplex.\n",
749			   dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
750			   ep->mii.full_duplex ? "full" : "half");
751
752	/* Set the timer to switch to check for link beat and perhaps switch
753	   to an alternate media type. */
754	init_timer(&ep->timer);
755	ep->timer.expires = jiffies + 3*HZ;
756	ep->timer.data = (unsigned long)dev;
757	ep->timer.function = &epic_timer;				/* timer handler */
758	add_timer(&ep->timer);
759
760	return 0;
761}
762
763/* Reset the chip to recover from a PCI transaction error.
764   This may occur at interrupt time. */
765static void epic_pause(struct net_device *dev)
766{
767	long ioaddr = dev->base_addr;
768
769	netif_stop_queue (dev);
770
771	/* Disable interrupts by clearing the interrupt mask. */
772	outl(0x00000000, ioaddr + INTMASK);
773	/* Stop the chip's Tx and Rx DMA processes. */
774	outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
775
776	/* Update the error counts. */
777	if (inw(ioaddr + COMMAND) != 0xffff) {
778		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
779		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
780		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
781	}
782
783	/* Remove the packets on the Rx queue. */
784	epic_rx(dev, RX_RING_SIZE);
785}
786
787static void epic_restart(struct net_device *dev)
788{
789	long ioaddr = dev->base_addr;
790	struct epic_private *ep = netdev_priv(dev);
791	int i;
792
793	/* Soft reset the chip. */
794	outl(0x4001, ioaddr + GENCTL);
795
796	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
797		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
798	udelay(1);
799
800	/* This magic is documented in SMSC app note 7.15 */
801	for (i = 16; i > 0; i--)
802		outl(0x0008, ioaddr + TEST1);
803
804#ifdef __BIG_ENDIAN
805	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
806#else
807	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
808#endif
809	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
810	if (ep->chip_flags & MII_PWRDWN)
811		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
812
813	for (i = 0; i < 3; i++)
814		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
815
816	ep->tx_threshold = TX_FIFO_THRESH;
817	outl(ep->tx_threshold, ioaddr + TxThresh);
818	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
819	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
820		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
821	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
822		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
823
824	/* Start the chip's Rx process. */
825	set_rx_mode(dev);
826	outl(StartRx | RxQueued, ioaddr + COMMAND);
827
828	/* Enable interrupts by setting the interrupt mask. */
829	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
830		 | CntFull | TxUnderrun
831		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
832
833	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
834		   " interrupt %4.4x.\n",
835		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
836		   (int)inl(ioaddr + INTSTAT));
837}
838
839static void check_media(struct net_device *dev)
840{
841	struct epic_private *ep = netdev_priv(dev);
842	long ioaddr = dev->base_addr;
843	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
844	int negotiated = mii_lpa & ep->mii.advertising;
845	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
846
847	if (ep->mii.force_media)
848		return;
849	if (mii_lpa == 0xffff)		/* Bogus read */
850		return;
851	if (ep->mii.full_duplex != duplex) {
852		ep->mii.full_duplex = duplex;
853		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
854			   " partner capability of %4.4x.\n", dev->name,
855			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
856		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
857	}
858}
859
860static void epic_timer(unsigned long data)
861{
862	struct net_device *dev = (struct net_device *)data;
863	struct epic_private *ep = netdev_priv(dev);
864	long ioaddr = dev->base_addr;
865	int next_tick = 5*HZ;
866
867	if (debug > 3) {
868		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
869			   dev->name, (int)inl(ioaddr + TxSTAT));
870		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
871			   "IntStatus %4.4x RxStatus %4.4x.\n",
872			   dev->name, (int)inl(ioaddr + INTMASK),
873			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
874	}
875
876	check_media(dev);
877
878	ep->timer.expires = jiffies + next_tick;
879	add_timer(&ep->timer);
880}
881
882static void epic_tx_timeout(struct net_device *dev)
883{
884	struct epic_private *ep = netdev_priv(dev);
885	long ioaddr = dev->base_addr;
886
887	if (debug > 0) {
888		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
889			   "Tx status %4.4x.\n",
890			   dev->name, (int)inw(ioaddr + TxSTAT));
891		if (debug > 1) {
892			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
893				   dev->name, ep->dirty_tx, ep->cur_tx);
894		}
895	}
896	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
897		dev->stats.tx_fifo_errors++;
898		outl(RestartTx, ioaddr + COMMAND);
899	} else {
900		epic_restart(dev);
901		outl(TxQueued, dev->base_addr + COMMAND);
902	}
903
904	dev->trans_start = jiffies; /* prevent tx timeout */
905	dev->stats.tx_errors++;
906	if (!ep->tx_full)
907		netif_wake_queue(dev);
908}
909
910/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
911static void epic_init_ring(struct net_device *dev)
912{
913	struct epic_private *ep = netdev_priv(dev);
914	int i;
915
916	ep->tx_full = 0;
917	ep->dirty_tx = ep->cur_tx = 0;
918	ep->cur_rx = ep->dirty_rx = 0;
919	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
920
921	/* Initialize all Rx descriptors. */
922	for (i = 0; i < RX_RING_SIZE; i++) {
923		ep->rx_ring[i].rxstatus = 0;
924		ep->rx_ring[i].buflength = ep->rx_buf_sz;
925		ep->rx_ring[i].next = ep->rx_ring_dma +
926				      (i+1)*sizeof(struct epic_rx_desc);
927		ep->rx_skbuff[i] = NULL;
928	}
929	/* Mark the last entry as wrapping the ring. */
930	ep->rx_ring[i-1].next = ep->rx_ring_dma;
931
932	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
933	for (i = 0; i < RX_RING_SIZE; i++) {
934		struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
935		ep->rx_skbuff[i] = skb;
936		if (skb == NULL)
937			break;
938		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
939		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
940			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
941		ep->rx_ring[i].rxstatus = DescOwn;
942	}
943	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
944
945	/* The Tx buffer descriptor is filled in as needed, but we
946	   do need to clear the ownership bit. */
947	for (i = 0; i < TX_RING_SIZE; i++) {
948		ep->tx_skbuff[i] = NULL;
949		ep->tx_ring[i].txstatus = 0x0000;
950		ep->tx_ring[i].next = ep->tx_ring_dma +
951			(i+1)*sizeof(struct epic_tx_desc);
952	}
953	ep->tx_ring[i-1].next = ep->tx_ring_dma;
954}
955
956static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
957{
958	struct epic_private *ep = netdev_priv(dev);
959	int entry, free_count;
960	u32 ctrl_word;
961	unsigned long flags;
962
963	if (skb_padto(skb, ETH_ZLEN))
964		return NETDEV_TX_OK;
965
966	/* Caution: the write order is important here, set the field with the
967	   "ownership" bit last. */
968
969	/* Calculate the next Tx descriptor entry. */
970	spin_lock_irqsave(&ep->lock, flags);
971	free_count = ep->cur_tx - ep->dirty_tx;
972	entry = ep->cur_tx % TX_RING_SIZE;
973
974	ep->tx_skbuff[entry] = skb;
975	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
976		 			            skb->len, PCI_DMA_TODEVICE);
977	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
978		ctrl_word = 0x100000; /* No interrupt */
979	} else if (free_count == TX_QUEUE_LEN/2) {
980		ctrl_word = 0x140000; /* Tx-done intr. */
981	} else if (free_count < TX_QUEUE_LEN - 1) {
982		ctrl_word = 0x100000; /* No Tx-done intr. */
983	} else {
984		/* Leave room for an additional entry. */
985		ctrl_word = 0x140000; /* Tx-done intr. */
986		ep->tx_full = 1;
987	}
988	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
989	ep->tx_ring[entry].txstatus =
990		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
991			    | DescOwn;
992
993	ep->cur_tx++;
994	if (ep->tx_full)
995		netif_stop_queue(dev);
996
997	spin_unlock_irqrestore(&ep->lock, flags);
998	/* Trigger an immediate transmit demand. */
999	outl(TxQueued, dev->base_addr + COMMAND);
1000
1001	if (debug > 4)
1002		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1003			   "flag %2.2x Tx status %8.8x.\n",
1004			   dev->name, (int)skb->len, entry, ctrl_word,
1005			   (int)inl(dev->base_addr + TxSTAT));
1006
1007	return NETDEV_TX_OK;
1008}
1009
1010static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1011			  int status)
1012{
1013	struct net_device_stats *stats = &dev->stats;
1014
1015#ifndef final_version
1016	/* There was an major error, log it. */
1017	if (debug > 1)
1018		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1019		       dev->name, status);
1020#endif
1021	stats->tx_errors++;
1022	if (status & 0x1050)
1023		stats->tx_aborted_errors++;
1024	if (status & 0x0008)
1025		stats->tx_carrier_errors++;
1026	if (status & 0x0040)
1027		stats->tx_window_errors++;
1028	if (status & 0x0010)
1029		stats->tx_fifo_errors++;
1030}
1031
1032static void epic_tx(struct net_device *dev, struct epic_private *ep)
1033{
1034	unsigned int dirty_tx, cur_tx;
1035
1036	/*
1037	 * Note: if this lock becomes a problem we can narrow the locked
1038	 * region at the cost of occasionally grabbing the lock more times.
1039	 */
1040	cur_tx = ep->cur_tx;
1041	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1042		struct sk_buff *skb;
1043		int entry = dirty_tx % TX_RING_SIZE;
1044		int txstatus = ep->tx_ring[entry].txstatus;
1045
1046		if (txstatus & DescOwn)
1047			break;	/* It still hasn't been Txed */
1048
1049		if (likely(txstatus & 0x0001)) {
1050			dev->stats.collisions += (txstatus >> 8) & 15;
1051			dev->stats.tx_packets++;
1052			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1053		} else
1054			epic_tx_error(dev, ep, txstatus);
1055
1056		/* Free the original skb. */
1057		skb = ep->tx_skbuff[entry];
1058		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1059				 skb->len, PCI_DMA_TODEVICE);
1060		dev_kfree_skb_irq(skb);
1061		ep->tx_skbuff[entry] = NULL;
1062	}
1063
1064#ifndef final_version
1065	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1066		printk(KERN_WARNING
1067		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1068		       dev->name, dirty_tx, cur_tx, ep->tx_full);
1069		dirty_tx += TX_RING_SIZE;
1070	}
1071#endif
1072	ep->dirty_tx = dirty_tx;
1073	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1074		/* The ring is no longer full, allow new TX entries. */
1075		ep->tx_full = 0;
1076		netif_wake_queue(dev);
1077	}
1078}
1079
1080/* The interrupt handler does all of the Rx thread work and cleans up
1081   after the Tx thread. */
1082static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1083{
1084	struct net_device *dev = dev_instance;
1085	struct epic_private *ep = netdev_priv(dev);
1086	long ioaddr = dev->base_addr;
1087	unsigned int handled = 0;
1088	int status;
1089
1090	status = inl(ioaddr + INTSTAT);
1091	/* Acknowledge all of the current interrupt sources ASAP. */
1092	outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1093
1094	if (debug > 4) {
1095		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1096				   "intstat=%#8.8x.\n", dev->name, status,
1097				   (int)inl(ioaddr + INTSTAT));
1098	}
1099
1100	if ((status & IntrSummary) == 0)
1101		goto out;
1102
1103	handled = 1;
1104
1105	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1106		spin_lock(&ep->napi_lock);
1107		if (napi_schedule_prep(&ep->napi)) {
1108			epic_napi_irq_off(dev, ep);
1109			__napi_schedule(&ep->napi);
1110		} else
1111			ep->reschedule_in_poll++;
1112		spin_unlock(&ep->napi_lock);
1113	}
1114	status &= ~EpicNapiEvent;
1115
1116	/* Check uncommon events all at once. */
1117	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1118		if (status == EpicRemoved)
1119			goto out;
1120
1121		/* Always update the error counts to avoid overhead later. */
1122		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1123		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1124		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1125
1126		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1127			dev->stats.tx_fifo_errors++;
1128			outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1129			/* Restart the transmit process. */
1130			outl(RestartTx, ioaddr + COMMAND);
1131		}
1132		if (status & PCIBusErr170) {
1133			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1134					 dev->name, status);
1135			epic_pause(dev);
1136			epic_restart(dev);
1137		}
1138		/* Clear all error sources. */
1139		outl(status & 0x7f18, ioaddr + INTSTAT);
1140	}
1141
1142out:
1143	if (debug > 3) {
1144		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1145				   dev->name, status);
1146	}
1147
1148	return IRQ_RETVAL(handled);
1149}
1150
1151static int epic_rx(struct net_device *dev, int budget)
1152{
1153	struct epic_private *ep = netdev_priv(dev);
1154	int entry = ep->cur_rx % RX_RING_SIZE;
1155	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1156	int work_done = 0;
1157
1158	if (debug > 4)
1159		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1160			   ep->rx_ring[entry].rxstatus);
1161
1162	if (rx_work_limit > budget)
1163		rx_work_limit = budget;
1164
1165	/* If we own the next entry, it's a new packet. Send it up. */
1166	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1167		int status = ep->rx_ring[entry].rxstatus;
1168
1169		if (debug > 4)
1170			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
1171		if (--rx_work_limit < 0)
1172			break;
1173		if (status & 0x2006) {
1174			if (debug > 2)
1175				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1176					   dev->name, status);
1177			if (status & 0x2000) {
1178				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1179					   "multiple buffers, status %4.4x!\n", dev->name, status);
1180				dev->stats.rx_length_errors++;
1181			} else if (status & 0x0006)
1182				/* Rx Frame errors are counted in hardware. */
1183				dev->stats.rx_errors++;
1184		} else {
1185			/* Malloc up new buffer, compatible with net-2e. */
1186			/* Omit the four octet CRC from the length. */
1187			short pkt_len = (status >> 16) - 4;
1188			struct sk_buff *skb;
1189
1190			if (pkt_len > PKT_BUF_SZ - 4) {
1191				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1192					   "%d bytes.\n",
1193					   dev->name, status, pkt_len);
1194				pkt_len = 1514;
1195			}
1196			/* Check if the packet is long enough to accept without copying
1197			   to a minimally-sized skbuff. */
1198			if (pkt_len < rx_copybreak &&
1199			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1200				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1201				pci_dma_sync_single_for_cpu(ep->pci_dev,
1202							    ep->rx_ring[entry].bufaddr,
1203							    ep->rx_buf_sz,
1204							    PCI_DMA_FROMDEVICE);
1205				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1206				skb_put(skb, pkt_len);
1207				pci_dma_sync_single_for_device(ep->pci_dev,
1208							       ep->rx_ring[entry].bufaddr,
1209							       ep->rx_buf_sz,
1210							       PCI_DMA_FROMDEVICE);
1211			} else {
1212				pci_unmap_single(ep->pci_dev,
1213					ep->rx_ring[entry].bufaddr,
1214					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1215				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1216				ep->rx_skbuff[entry] = NULL;
1217			}
1218			skb->protocol = eth_type_trans(skb, dev);
1219			netif_receive_skb(skb);
1220			dev->stats.rx_packets++;
1221			dev->stats.rx_bytes += pkt_len;
1222		}
1223		work_done++;
1224		entry = (++ep->cur_rx) % RX_RING_SIZE;
1225	}
1226
1227	/* Refill the Rx ring buffers. */
1228	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1229		entry = ep->dirty_rx % RX_RING_SIZE;
1230		if (ep->rx_skbuff[entry] == NULL) {
1231			struct sk_buff *skb;
1232			skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1233			if (skb == NULL)
1234				break;
1235			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1236			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1237				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1238			work_done++;
1239		}
1240		/* AV: shouldn't we add a barrier here? */
1241		ep->rx_ring[entry].rxstatus = DescOwn;
1242	}
1243	return work_done;
1244}
1245
1246static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1247{
1248	long ioaddr = dev->base_addr;
1249	int status;
1250
1251	status = inl(ioaddr + INTSTAT);
1252
1253	if (status == EpicRemoved)
1254		return;
1255	if (status & RxOverflow) 	/* Missed a Rx frame. */
1256		dev->stats.rx_errors++;
1257	if (status & (RxOverflow | RxFull))
1258		outw(RxQueued, ioaddr + COMMAND);
1259}
1260
1261static int epic_poll(struct napi_struct *napi, int budget)
1262{
1263	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1264	struct net_device *dev = ep->mii.dev;
1265	int work_done = 0;
1266	long ioaddr = dev->base_addr;
1267
1268rx_action:
1269
1270	epic_tx(dev, ep);
1271
1272	work_done += epic_rx(dev, budget);
1273
1274	epic_rx_err(dev, ep);
1275
1276	if (work_done < budget) {
1277		unsigned long flags;
1278		int more;
1279
1280		/* A bit baroque but it avoids a (space hungry) spin_unlock */
1281
1282		spin_lock_irqsave(&ep->napi_lock, flags);
1283
1284		more = ep->reschedule_in_poll;
1285		if (!more) {
1286			__napi_complete(napi);
1287			outl(EpicNapiEvent, ioaddr + INTSTAT);
1288			epic_napi_irq_on(dev, ep);
1289		} else
1290			ep->reschedule_in_poll--;
1291
1292		spin_unlock_irqrestore(&ep->napi_lock, flags);
1293
1294		if (more)
1295			goto rx_action;
1296	}
1297
1298	return work_done;
1299}
1300
1301static int epic_close(struct net_device *dev)
1302{
1303	long ioaddr = dev->base_addr;
1304	struct epic_private *ep = netdev_priv(dev);
1305	struct sk_buff *skb;
1306	int i;
1307
1308	netif_stop_queue(dev);
1309	napi_disable(&ep->napi);
1310
1311	if (debug > 1)
1312		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1313			   dev->name, (int)inl(ioaddr + INTSTAT));
1314
1315	del_timer_sync(&ep->timer);
1316
1317	epic_disable_int(dev, ep);
1318
1319	free_irq(dev->irq, dev);
1320
1321	epic_pause(dev);
1322
1323	/* Free all the skbuffs in the Rx queue. */
1324	for (i = 0; i < RX_RING_SIZE; i++) {
1325		skb = ep->rx_skbuff[i];
1326		ep->rx_skbuff[i] = NULL;
1327		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1328		ep->rx_ring[i].buflength = 0;
1329		if (skb) {
1330			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1331				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1332			dev_kfree_skb(skb);
1333		}
1334		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1335	}
1336	for (i = 0; i < TX_RING_SIZE; i++) {
1337		skb = ep->tx_skbuff[i];
1338		ep->tx_skbuff[i] = NULL;
1339		if (!skb)
1340			continue;
1341		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1342				 skb->len, PCI_DMA_TODEVICE);
1343		dev_kfree_skb(skb);
1344	}
1345
1346	/* Green! Leave the chip in low-power mode. */
1347	outl(0x0008, ioaddr + GENCTL);
1348
1349	return 0;
1350}
1351
1352static struct net_device_stats *epic_get_stats(struct net_device *dev)
1353{
1354	long ioaddr = dev->base_addr;
1355
1356	if (netif_running(dev)) {
1357		/* Update the error counts. */
1358		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1359		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1360		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1361	}
1362
1363	return &dev->stats;
1364}
1365
1366/* Set or clear the multicast filter for this adaptor.
1367   Note that we only use exclusion around actually queueing the
1368   new frame, not around filling ep->setup_frame.  This is non-deterministic
1369   when re-entered but still correct. */
1370
1371static void set_rx_mode(struct net_device *dev)
1372{
1373	long ioaddr = dev->base_addr;
1374	struct epic_private *ep = netdev_priv(dev);
1375	unsigned char mc_filter[8];		 /* Multicast hash filter */
1376	int i;
1377
1378	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1379		outl(0x002C, ioaddr + RxCtrl);
1380		/* Unconditionally log net taps. */
1381		memset(mc_filter, 0xff, sizeof(mc_filter));
1382	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1383		/* There is apparently a chip bug, so the multicast filter
1384		   is never enabled. */
1385		/* Too many to filter perfectly -- accept all multicasts. */
1386		memset(mc_filter, 0xff, sizeof(mc_filter));
1387		outl(0x000C, ioaddr + RxCtrl);
1388	} else if (netdev_mc_empty(dev)) {
1389		outl(0x0004, ioaddr + RxCtrl);
1390		return;
1391	} else {					/* Never executed, for now. */
1392		struct netdev_hw_addr *ha;
1393
1394		memset(mc_filter, 0, sizeof(mc_filter));
1395		netdev_for_each_mc_addr(ha, dev) {
1396			unsigned int bit_nr =
1397				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1398			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1399		}
1400	}
1401	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1402	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1403		for (i = 0; i < 4; i++)
1404			outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1405		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1406	}
1407}
1408
1409static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1410{
1411	struct epic_private *np = netdev_priv(dev);
1412
1413	strcpy (info->driver, DRV_NAME);
1414	strcpy (info->version, DRV_VERSION);
1415	strcpy (info->bus_info, pci_name(np->pci_dev));
1416}
1417
1418static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1419{
1420	struct epic_private *np = netdev_priv(dev);
1421	int rc;
1422
1423	spin_lock_irq(&np->lock);
1424	rc = mii_ethtool_gset(&np->mii, cmd);
1425	spin_unlock_irq(&np->lock);
1426
1427	return rc;
1428}
1429
1430static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1431{
1432	struct epic_private *np = netdev_priv(dev);
1433	int rc;
1434
1435	spin_lock_irq(&np->lock);
1436	rc = mii_ethtool_sset(&np->mii, cmd);
1437	spin_unlock_irq(&np->lock);
1438
1439	return rc;
1440}
1441
1442static int netdev_nway_reset(struct net_device *dev)
1443{
1444	struct epic_private *np = netdev_priv(dev);
1445	return mii_nway_restart(&np->mii);
1446}
1447
1448static u32 netdev_get_link(struct net_device *dev)
1449{
1450	struct epic_private *np = netdev_priv(dev);
1451	return mii_link_ok(&np->mii);
1452}
1453
1454static u32 netdev_get_msglevel(struct net_device *dev)
1455{
1456	return debug;
1457}
1458
1459static void netdev_set_msglevel(struct net_device *dev, u32 value)
1460{
1461	debug = value;
1462}
1463
1464static int ethtool_begin(struct net_device *dev)
1465{
1466	unsigned long ioaddr = dev->base_addr;
1467	/* power-up, if interface is down */
1468	if (! netif_running(dev)) {
1469		outl(0x0200, ioaddr + GENCTL);
1470		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1471	}
1472	return 0;
1473}
1474
1475static void ethtool_complete(struct net_device *dev)
1476{
1477	unsigned long ioaddr = dev->base_addr;
1478	/* power-down, if interface is down */
1479	if (! netif_running(dev)) {
1480		outl(0x0008, ioaddr + GENCTL);
1481		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1482	}
1483}
1484
1485static const struct ethtool_ops netdev_ethtool_ops = {
1486	.get_drvinfo		= netdev_get_drvinfo,
1487	.get_settings		= netdev_get_settings,
1488	.set_settings		= netdev_set_settings,
1489	.nway_reset		= netdev_nway_reset,
1490	.get_link		= netdev_get_link,
1491	.get_msglevel		= netdev_get_msglevel,
1492	.set_msglevel		= netdev_set_msglevel,
1493	.begin			= ethtool_begin,
1494	.complete		= ethtool_complete
1495};
1496
1497static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1498{
1499	struct epic_private *np = netdev_priv(dev);
1500	long ioaddr = dev->base_addr;
1501	struct mii_ioctl_data *data = if_mii(rq);
1502	int rc;
1503
1504	/* power-up, if interface is down */
1505	if (! netif_running(dev)) {
1506		outl(0x0200, ioaddr + GENCTL);
1507		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1508	}
1509
1510	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1511	spin_lock_irq(&np->lock);
1512	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1513	spin_unlock_irq(&np->lock);
1514
1515	/* power-down, if interface is down */
1516	if (! netif_running(dev)) {
1517		outl(0x0008, ioaddr + GENCTL);
1518		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1519	}
1520	return rc;
1521}
1522
1523
1524static void __devexit epic_remove_one (struct pci_dev *pdev)
1525{
1526	struct net_device *dev = pci_get_drvdata(pdev);
1527	struct epic_private *ep = netdev_priv(dev);
1528
1529	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1530	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1531	unregister_netdev(dev);
1532#ifndef USE_IO_OPS
1533	iounmap((void*) dev->base_addr);
1534#endif
1535	pci_release_regions(pdev);
1536	free_netdev(dev);
1537	pci_disable_device(pdev);
1538	pci_set_drvdata(pdev, NULL);
1539	/* pci_power_off(pdev, -1); */
1540}
1541
1542
1543#ifdef CONFIG_PM
1544
1545static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1546{
1547	struct net_device *dev = pci_get_drvdata(pdev);
1548	long ioaddr = dev->base_addr;
1549
1550	if (!netif_running(dev))
1551		return 0;
1552	epic_pause(dev);
1553	/* Put the chip into low-power mode. */
1554	outl(0x0008, ioaddr + GENCTL);
1555	/* pci_power_off(pdev, -1); */
1556	return 0;
1557}
1558
1559
1560static int epic_resume (struct pci_dev *pdev)
1561{
1562	struct net_device *dev = pci_get_drvdata(pdev);
1563
1564	if (!netif_running(dev))
1565		return 0;
1566	epic_restart(dev);
1567	/* pci_power_on(pdev); */
1568	return 0;
1569}
1570
1571#endif /* CONFIG_PM */
1572
1573
1574static struct pci_driver epic_driver = {
1575	.name		= DRV_NAME,
1576	.id_table	= epic_pci_tbl,
1577	.probe		= epic_init_one,
1578	.remove		= __devexit_p(epic_remove_one),
1579#ifdef CONFIG_PM
1580	.suspend	= epic_suspend,
1581	.resume		= epic_resume,
1582#endif /* CONFIG_PM */
1583};
1584
1585
1586static int __init epic_init (void)
1587{
1588/* when a module, this is printed whether or not devices are found in probe */
1589#ifdef MODULE
1590	printk (KERN_INFO "%s%s",
1591		version, version2);
1592#endif
1593
1594	return pci_register_driver(&epic_driver);
1595}
1596
1597
1598static void __exit epic_cleanup (void)
1599{
1600	pci_unregister_driver (&epic_driver);
1601}
1602
1603
1604module_init(epic_init);
1605module_exit(epic_cleanup);
1606