1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2/*
3	Written/copyright 1997-2001 by Donald Becker.
4
5	This software may be used and distributed according to the terms of
6	the GNU General Public License (GPL), incorporated herein by reference.
7	Drivers based on or derived from this code fall under the GPL and must
8	retain the authorship, copyright and license notice.  This file is not
9	a complete program and may only be used when the entire operating
10	system is licensed under the GPL.
11
12	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14
15	The author may be reached as becker@scyld.com, or C/O
16	Scyld Computing Corporation
17	410 Severn Ave., Suite 210
18	Annapolis MD 21403
19
20	Information and updates available at
21	http://www.scyld.com/network/epic100.html
22	[this link no longer provides anything useful -jgarzik]
23
24	---------------------------------------------------------------------
25
26*/
27
28#define DRV_NAME        "epic100"
29#define DRV_VERSION     "2.1"
30#define DRV_RELDATE     "Sept 11, 2006"
31
32/* The user-configurable values.
33   These may be modified when a driver module is loaded.*/
34
35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36
37/* Used to pass the full-duplex flag, etc. */
38#define MAX_UNITS 8		/* More are supported, limit only on options */
39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41
42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43   Setting to > 1518 effectively disables this feature. */
44static int rx_copybreak;
45
46/* Operational parameters that are set at compile time. */
47
48/* Keep the ring sizes a power of two for operational efficiency.
49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50   Making the Tx ring too large decreases the effectiveness of channel
51   bonding and packet priority.
52   There are no ill effects from too-large receive rings. */
53#define TX_RING_SIZE	256
54#define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55#define RX_RING_SIZE	256
56#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58
59/* Operational parameters that usually are not changed. */
60/* Time in jiffies before concluding the transmitter is hung. */
61#define TX_TIMEOUT  (2*HZ)
62
63#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64
65/* Bytes transferred to chip before transmission starts. */
66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67#define TX_FIFO_THRESH 256
68#define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/slab.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/delay.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/init.h>
84#include <linux/spinlock.h>
85#include <linux/ethtool.h>
86#include <linux/mii.h>
87#include <linux/crc32.h>
88#include <linux/bitops.h>
89#include <asm/io.h>
90#include <asm/uaccess.h>
91
92/* These identify the driver base version and may not be removed. */
93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata =
96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
97
98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100MODULE_LICENSE("GPL");
101
102module_param(debug, int, 0);
103module_param(rx_copybreak, int, 0);
104module_param_array(options, int, NULL, 0);
105module_param_array(full_duplex, int, NULL, 0);
106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110
111/*
112				Theory of Operation
113
114I. Board Compatibility
115
116This device driver is designed for the SMC "EPIC/100", the SMC
117single-chip Ethernet controllers for PCI.  This chip is used on
118the SMC EtherPower II boards.
119
120II. Board-specific settings
121
122PCI bus devices are configured by the system at boot time, so no jumpers
123need to be set on the board.  The system BIOS will assign the
124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126interrupt lines.
127
128III. Driver operation
129
130IIIa. Ring buffers
131
132IVb. References
133
134http://www.smsc.com/main/datasheets/83c171.pdf
135http://www.smsc.com/main/datasheets/83c175.pdf
136http://scyld.com/expert/NWay.html
137http://www.national.com/pf/DP/DP83840A.html
138
139IVc. Errata
140
141*/
142
143
144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145
146#define EPIC_TOTAL_SIZE 0x100
147#define USE_IO_OPS 1
148
149typedef enum {
150	SMSC_83C170_0,
151	SMSC_83C170,
152	SMSC_83C175,
153} chip_t;
154
155
156struct epic_chip_info {
157	const char *name;
158        int drv_flags;                          /* Driver use, intended as capability flags. */
159};
160
161
162/* indexed by chip_t */
163static const struct epic_chip_info pci_id_tbl[] = {
164	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
165	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
166	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
167};
168
169
170static struct pci_device_id epic_pci_tbl[] = {
171	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
172	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
173	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
174	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
175	{ 0,}
176};
177MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
178
179
180#ifndef USE_IO_OPS
181#undef inb
182#undef inw
183#undef inl
184#undef outb
185#undef outw
186#undef outl
187#define inb readb
188#define inw readw
189#define inl readl
190#define outb writeb
191#define outw writew
192#define outl writel
193#endif
194
195/* Offsets to registers, using the (ugh) SMC names. */
196enum epic_registers {
197  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
198  PCIBurstCnt=0x18,
199  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
200  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
201  LAN0=64,						/* MAC address. */
202  MC0=80,						/* Multicast filter table. */
203  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
204  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
205};
206
207/* Interrupt register bits, using my own meaningful names. */
208enum IntrStatus {
209	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
210	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
211	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
212	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
213	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
214};
215enum CommandBits {
216	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
217	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
218};
219
220#define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
221
222#define EpicNapiEvent	(TxEmpty | TxDone | \
223			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
224#define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
225
226static const u16 media2miictl[16] = {
227	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
228	0, 0, 0, 0,  0, 0, 0, 0 };
229
230/* The EPIC100 Rx and Tx buffer descriptors. */
231
232struct epic_tx_desc {
233	u32 txstatus;
234	u32 bufaddr;
235	u32 buflength;
236	u32 next;
237};
238
239struct epic_rx_desc {
240	u32 rxstatus;
241	u32 bufaddr;
242	u32 buflength;
243	u32 next;
244};
245
246enum desc_status_bits {
247	DescOwn=0x8000,
248};
249
250#define PRIV_ALIGN	15 	/* Required alignment mask */
251struct epic_private {
252	struct epic_rx_desc *rx_ring;
253	struct epic_tx_desc *tx_ring;
254	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
255	struct sk_buff* tx_skbuff[TX_RING_SIZE];
256	/* The addresses of receive-in-place skbuffs. */
257	struct sk_buff* rx_skbuff[RX_RING_SIZE];
258
259	dma_addr_t tx_ring_dma;
260	dma_addr_t rx_ring_dma;
261
262	/* Ring pointers. */
263	spinlock_t lock;				/* Group with Tx control cache line. */
264	spinlock_t napi_lock;
265	unsigned int reschedule_in_poll;
266	unsigned int cur_tx, dirty_tx;
267
268	unsigned int cur_rx, dirty_rx;
269	u32 irq_mask;
270	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
271
272	struct pci_dev *pci_dev;			/* PCI bus location. */
273	int chip_id, chip_flags;
274
275	struct net_device_stats stats;
276	struct timer_list timer;			/* Media selection timer. */
277	int tx_threshold;
278	unsigned char mc_filter[8];
279	signed char phys[4];				/* MII device addresses. */
280	u16 advertising;					/* NWay media advertisement */
281	int mii_phy_cnt;
282	struct mii_if_info mii;
283	unsigned int tx_full:1;				/* The Tx queue is full. */
284	unsigned int default_port:4;		/* Last dev->if_port value. */
285};
286
287static int epic_open(struct net_device *dev);
288static int read_eeprom(long ioaddr, int location);
289static int mdio_read(struct net_device *dev, int phy_id, int location);
290static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
291static void epic_restart(struct net_device *dev);
292static void epic_timer(unsigned long data);
293static void epic_tx_timeout(struct net_device *dev);
294static void epic_init_ring(struct net_device *dev);
295static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
296static int epic_rx(struct net_device *dev, int budget);
297static int epic_poll(struct net_device *dev, int *budget);
298static irqreturn_t epic_interrupt(int irq, void *dev_instance);
299static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
300static const struct ethtool_ops netdev_ethtool_ops;
301static int epic_close(struct net_device *dev);
302static struct net_device_stats *epic_get_stats(struct net_device *dev);
303static void set_rx_mode(struct net_device *dev);
304
305
306
307static int __devinit epic_init_one (struct pci_dev *pdev,
308				    const struct pci_device_id *ent)
309{
310	static int card_idx = -1;
311	long ioaddr;
312	int chip_idx = (int) ent->driver_data;
313	int irq;
314	struct net_device *dev;
315	struct epic_private *ep;
316	int i, ret, option = 0, duplex = 0;
317	void *ring_space;
318	dma_addr_t ring_dma;
319
320/* when built into the kernel, we only print version if device is found */
321#ifndef MODULE
322	static int printed_version;
323	if (!printed_version++)
324		printk (KERN_INFO "%s" KERN_INFO "%s",
325			version, version2);
326#endif
327
328	card_idx++;
329
330	ret = pci_enable_device(pdev);
331	if (ret)
332		goto out;
333	irq = pdev->irq;
334
335	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
336		dev_err(&pdev->dev, "no PCI region space\n");
337		ret = -ENODEV;
338		goto err_out_disable;
339	}
340
341	pci_set_master(pdev);
342
343	ret = pci_request_regions(pdev, DRV_NAME);
344	if (ret < 0)
345		goto err_out_disable;
346
347	ret = -ENOMEM;
348
349	dev = alloc_etherdev(sizeof (*ep));
350	if (!dev) {
351		dev_err(&pdev->dev, "no memory for eth device\n");
352		goto err_out_free_res;
353	}
354	SET_MODULE_OWNER(dev);
355	SET_NETDEV_DEV(dev, &pdev->dev);
356
357#ifdef USE_IO_OPS
358	ioaddr = pci_resource_start (pdev, 0);
359#else
360	ioaddr = pci_resource_start (pdev, 1);
361	ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
362	if (!ioaddr) {
363		dev_err(&pdev->dev, "ioremap failed\n");
364		goto err_out_free_netdev;
365	}
366#endif
367
368	pci_set_drvdata(pdev, dev);
369	ep = dev->priv;
370	ep->mii.dev = dev;
371	ep->mii.mdio_read = mdio_read;
372	ep->mii.mdio_write = mdio_write;
373	ep->mii.phy_id_mask = 0x1f;
374	ep->mii.reg_num_mask = 0x1f;
375
376	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
377	if (!ring_space)
378		goto err_out_iounmap;
379	ep->tx_ring = (struct epic_tx_desc *)ring_space;
380	ep->tx_ring_dma = ring_dma;
381
382	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
383	if (!ring_space)
384		goto err_out_unmap_tx;
385	ep->rx_ring = (struct epic_rx_desc *)ring_space;
386	ep->rx_ring_dma = ring_dma;
387
388	if (dev->mem_start) {
389		option = dev->mem_start;
390		duplex = (dev->mem_start & 16) ? 1 : 0;
391	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
392		if (options[card_idx] >= 0)
393			option = options[card_idx];
394		if (full_duplex[card_idx] >= 0)
395			duplex = full_duplex[card_idx];
396	}
397
398	dev->base_addr = ioaddr;
399	dev->irq = irq;
400
401	spin_lock_init(&ep->lock);
402	spin_lock_init(&ep->napi_lock);
403	ep->reschedule_in_poll = 0;
404
405	/* Bring the chip out of low-power mode. */
406	outl(0x4200, ioaddr + GENCTL);
407	/* Magic?!  If we don't set this bit the MII interface won't work. */
408	/* This magic is documented in SMSC app note 7.15 */
409	for (i = 16; i > 0; i--)
410		outl(0x0008, ioaddr + TEST1);
411
412	/* Turn on the MII transceiver. */
413	outl(0x12, ioaddr + MIICfg);
414	if (chip_idx == 1)
415		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
416	outl(0x0200, ioaddr + GENCTL);
417
418	/* Note: the '175 does not have a serial EEPROM. */
419	for (i = 0; i < 3; i++)
420		((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
421
422	if (debug > 2) {
423		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
424		for (i = 0; i < 64; i++)
425			printk(" %4.4x%s", read_eeprom(ioaddr, i),
426				   i % 16 == 15 ? "\n" : "");
427	}
428
429	ep->pci_dev = pdev;
430	ep->chip_id = chip_idx;
431	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
432	ep->irq_mask =
433		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
434		 | CntFull | TxUnderrun | EpicNapiEvent;
435
436	/* Find the connected MII xcvrs.
437	   Doing this in open() would allow detecting external xcvrs later, but
438	   takes much time and no cards have external MII. */
439	{
440		int phy, phy_idx = 0;
441		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
442			int mii_status = mdio_read(dev, phy, MII_BMSR);
443			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
444				ep->phys[phy_idx++] = phy;
445				dev_info(&pdev->dev,
446					"MII transceiver #%d control "
447					"%4.4x status %4.4x.\n",
448					phy, mdio_read(dev, phy, 0), mii_status);
449			}
450		}
451		ep->mii_phy_cnt = phy_idx;
452		if (phy_idx != 0) {
453			phy = ep->phys[0];
454			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
455			dev_info(&pdev->dev,
456				"Autonegotiation advertising %4.4x link "
457				   "partner %4.4x.\n",
458				   ep->mii.advertising, mdio_read(dev, phy, 5));
459		} else if ( ! (ep->chip_flags & NO_MII)) {
460			dev_warn(&pdev->dev,
461				"***WARNING***: No MII transceiver found!\n");
462			/* Use the known PHY address of the EPII. */
463			ep->phys[0] = 3;
464		}
465		ep->mii.phy_id = ep->phys[0];
466	}
467
468	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
469	if (ep->chip_flags & MII_PWRDWN)
470		outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
471	outl(0x0008, ioaddr + GENCTL);
472
473	/* The lower four bits are the media type. */
474	if (duplex) {
475		ep->mii.force_media = ep->mii.full_duplex = 1;
476		dev_info(&pdev->dev, "Forced full duplex requested.\n");
477	}
478	dev->if_port = ep->default_port = option;
479
480	/* The Epic-specific entries in the device structure. */
481	dev->open = &epic_open;
482	dev->hard_start_xmit = &epic_start_xmit;
483	dev->stop = &epic_close;
484	dev->get_stats = &epic_get_stats;
485	dev->set_multicast_list = &set_rx_mode;
486	dev->do_ioctl = &netdev_ioctl;
487	dev->ethtool_ops = &netdev_ethtool_ops;
488	dev->watchdog_timeo = TX_TIMEOUT;
489	dev->tx_timeout = &epic_tx_timeout;
490	dev->poll = epic_poll;
491	dev->weight = 64;
492
493	ret = register_netdev(dev);
494	if (ret < 0)
495		goto err_out_unmap_rx;
496
497	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
498		   dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
499	for (i = 0; i < 5; i++)
500		printk("%2.2x:", dev->dev_addr[i]);
501	printk("%2.2x.\n", dev->dev_addr[i]);
502
503out:
504	return ret;
505
506err_out_unmap_rx:
507	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
508err_out_unmap_tx:
509	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
510err_out_iounmap:
511#ifndef USE_IO_OPS
512	iounmap(ioaddr);
513err_out_free_netdev:
514#endif
515	free_netdev(dev);
516err_out_free_res:
517	pci_release_regions(pdev);
518err_out_disable:
519	pci_disable_device(pdev);
520	goto out;
521}
522
523/* Serial EEPROM section. */
524
525/*  EEPROM_Ctrl bits. */
526#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
527#define EE_CS			0x02	/* EEPROM chip select. */
528#define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
529#define EE_WRITE_0		0x01
530#define EE_WRITE_1		0x09
531#define EE_DATA_READ	0x10	/* EEPROM chip data out. */
532#define EE_ENB			(0x0001 | EE_CS)
533
534/* Delay between EEPROM clock transitions.
535   This serves to flush the operation to the PCI bus.
536 */
537
538#define eeprom_delay()	inl(ee_addr)
539
540/* The EEPROM commands include the alway-set leading bit. */
541#define EE_WRITE_CMD	(5 << 6)
542#define EE_READ64_CMD	(6 << 6)
543#define EE_READ256_CMD	(6 << 8)
544#define EE_ERASE_CMD	(7 << 6)
545
546static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
547{
548	long ioaddr = dev->base_addr;
549
550	outl(0x00000000, ioaddr + INTMASK);
551}
552
553static inline void __epic_pci_commit(long ioaddr)
554{
555#ifndef USE_IO_OPS
556	inl(ioaddr + INTMASK);
557#endif
558}
559
560static inline void epic_napi_irq_off(struct net_device *dev,
561				     struct epic_private *ep)
562{
563	long ioaddr = dev->base_addr;
564
565	outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
566	__epic_pci_commit(ioaddr);
567}
568
569static inline void epic_napi_irq_on(struct net_device *dev,
570				    struct epic_private *ep)
571{
572	long ioaddr = dev->base_addr;
573
574	/* No need to commit possible posted write */
575	outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
576}
577
578static int __devinit read_eeprom(long ioaddr, int location)
579{
580	int i;
581	int retval = 0;
582	long ee_addr = ioaddr + EECTL;
583	int read_cmd = location |
584		(inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
585
586	outl(EE_ENB & ~EE_CS, ee_addr);
587	outl(EE_ENB, ee_addr);
588
589	/* Shift the read command bits out. */
590	for (i = 12; i >= 0; i--) {
591		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
592		outl(EE_ENB | dataval, ee_addr);
593		eeprom_delay();
594		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
595		eeprom_delay();
596	}
597	outl(EE_ENB, ee_addr);
598
599	for (i = 16; i > 0; i--) {
600		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
601		eeprom_delay();
602		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
603		outl(EE_ENB, ee_addr);
604		eeprom_delay();
605	}
606
607	/* Terminate the EEPROM access. */
608	outl(EE_ENB & ~EE_CS, ee_addr);
609	return retval;
610}
611
612#define MII_READOP		1
613#define MII_WRITEOP		2
614static int mdio_read(struct net_device *dev, int phy_id, int location)
615{
616	long ioaddr = dev->base_addr;
617	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
618	int i;
619
620	outl(read_cmd, ioaddr + MIICtrl);
621	/* Typical operation takes 25 loops. */
622	for (i = 400; i > 0; i--) {
623		barrier();
624		if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
625			if (phy_id == 1 && location < 6
626				&& inw(ioaddr + MIIData) == 0xffff) {
627				outl(read_cmd, ioaddr + MIICtrl);
628				continue;
629			}
630			return inw(ioaddr + MIIData);
631		}
632	}
633	return 0xffff;
634}
635
636static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
637{
638	long ioaddr = dev->base_addr;
639	int i;
640
641	outw(value, ioaddr + MIIData);
642	outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
643	for (i = 10000; i > 0; i--) {
644		barrier();
645		if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
646			break;
647	}
648	return;
649}
650
651
652static int epic_open(struct net_device *dev)
653{
654	struct epic_private *ep = dev->priv;
655	long ioaddr = dev->base_addr;
656	int i;
657	int retval;
658
659	/* Soft reset the chip. */
660	outl(0x4001, ioaddr + GENCTL);
661
662	if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev)))
663		return retval;
664
665	epic_init_ring(dev);
666
667	outl(0x4000, ioaddr + GENCTL);
668	/* This magic is documented in SMSC app note 7.15 */
669	for (i = 16; i > 0; i--)
670		outl(0x0008, ioaddr + TEST1);
671
672	/* Pull the chip out of low-power mode, enable interrupts, and set for
673	   PCI read multiple.  The MIIcfg setting and strange write order are
674	   required by the details of which bits are reset and the transceiver
675	   wiring on the Ositech CardBus card.
676	*/
677	if (ep->chip_flags & MII_PWRDWN)
678		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
679
680#if defined(__powerpc__) || defined(__sparc__)		    /* Big endian */
681	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
682	inl(ioaddr + GENCTL);
683	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
684#else
685	outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
686	inl(ioaddr + GENCTL);
687	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
688#endif
689
690	udelay(20);
691
692	for (i = 0; i < 3; i++)
693		outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
694
695	ep->tx_threshold = TX_FIFO_THRESH;
696	outl(ep->tx_threshold, ioaddr + TxThresh);
697
698	if (media2miictl[dev->if_port & 15]) {
699		if (ep->mii_phy_cnt)
700			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
701		if (dev->if_port == 1) {
702			if (debug > 1)
703				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
704					   "status %4.4x.\n",
705					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
706		}
707	} else {
708		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
709		if (mii_lpa != 0xffff) {
710			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
711				ep->mii.full_duplex = 1;
712			else if (! (mii_lpa & LPA_LPACK))
713				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
714			if (debug > 1)
715				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
716					   " register read of %4.4x.\n", dev->name,
717					   ep->mii.full_duplex ? "full" : "half",
718					   ep->phys[0], mii_lpa);
719		}
720	}
721
722	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
723	outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
724	outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
725
726	/* Start the chip's Rx process. */
727	set_rx_mode(dev);
728	outl(StartRx | RxQueued, ioaddr + COMMAND);
729
730	netif_start_queue(dev);
731
732	/* Enable interrupts by setting the interrupt mask. */
733	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
734		 | CntFull | TxUnderrun
735		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
736
737	if (debug > 1)
738		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
739			   "%s-duplex.\n",
740			   dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
741			   ep->mii.full_duplex ? "full" : "half");
742
743	/* Set the timer to switch to check for link beat and perhaps switch
744	   to an alternate media type. */
745	init_timer(&ep->timer);
746	ep->timer.expires = jiffies + 3*HZ;
747	ep->timer.data = (unsigned long)dev;
748	ep->timer.function = &epic_timer;				/* timer handler */
749	add_timer(&ep->timer);
750
751	return 0;
752}
753
754/* Reset the chip to recover from a PCI transaction error.
755   This may occur at interrupt time. */
756static void epic_pause(struct net_device *dev)
757{
758	long ioaddr = dev->base_addr;
759	struct epic_private *ep = dev->priv;
760
761	netif_stop_queue (dev);
762
763	/* Disable interrupts by clearing the interrupt mask. */
764	outl(0x00000000, ioaddr + INTMASK);
765	/* Stop the chip's Tx and Rx DMA processes. */
766	outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
767
768	/* Update the error counts. */
769	if (inw(ioaddr + COMMAND) != 0xffff) {
770		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
771		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
772		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
773	}
774
775	/* Remove the packets on the Rx queue. */
776	epic_rx(dev, RX_RING_SIZE);
777}
778
779static void epic_restart(struct net_device *dev)
780{
781	long ioaddr = dev->base_addr;
782	struct epic_private *ep = dev->priv;
783	int i;
784
785	/* Soft reset the chip. */
786	outl(0x4001, ioaddr + GENCTL);
787
788	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
789		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
790	udelay(1);
791
792	/* This magic is documented in SMSC app note 7.15 */
793	for (i = 16; i > 0; i--)
794		outl(0x0008, ioaddr + TEST1);
795
796#if defined(__powerpc__) || defined(__sparc__)		    /* Big endian */
797	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
798#else
799	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
800#endif
801	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
802	if (ep->chip_flags & MII_PWRDWN)
803		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
804
805	for (i = 0; i < 3; i++)
806		outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
807
808	ep->tx_threshold = TX_FIFO_THRESH;
809	outl(ep->tx_threshold, ioaddr + TxThresh);
810	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
811	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
812		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
813	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
814		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
815
816	/* Start the chip's Rx process. */
817	set_rx_mode(dev);
818	outl(StartRx | RxQueued, ioaddr + COMMAND);
819
820	/* Enable interrupts by setting the interrupt mask. */
821	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
822		 | CntFull | TxUnderrun
823		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
824
825	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
826		   " interrupt %4.4x.\n",
827		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
828		   (int)inl(ioaddr + INTSTAT));
829	return;
830}
831
832static void check_media(struct net_device *dev)
833{
834	struct epic_private *ep = dev->priv;
835	long ioaddr = dev->base_addr;
836	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
837	int negotiated = mii_lpa & ep->mii.advertising;
838	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
839
840	if (ep->mii.force_media)
841		return;
842	if (mii_lpa == 0xffff)		/* Bogus read */
843		return;
844	if (ep->mii.full_duplex != duplex) {
845		ep->mii.full_duplex = duplex;
846		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
847			   " partner capability of %4.4x.\n", dev->name,
848			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
849		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
850	}
851}
852
853static void epic_timer(unsigned long data)
854{
855	struct net_device *dev = (struct net_device *)data;
856	struct epic_private *ep = dev->priv;
857	long ioaddr = dev->base_addr;
858	int next_tick = 5*HZ;
859
860	if (debug > 3) {
861		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
862			   dev->name, (int)inl(ioaddr + TxSTAT));
863		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
864			   "IntStatus %4.4x RxStatus %4.4x.\n",
865			   dev->name, (int)inl(ioaddr + INTMASK),
866			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
867	}
868
869	check_media(dev);
870
871	ep->timer.expires = jiffies + next_tick;
872	add_timer(&ep->timer);
873}
874
875static void epic_tx_timeout(struct net_device *dev)
876{
877	struct epic_private *ep = dev->priv;
878	long ioaddr = dev->base_addr;
879
880	if (debug > 0) {
881		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
882			   "Tx status %4.4x.\n",
883			   dev->name, (int)inw(ioaddr + TxSTAT));
884		if (debug > 1) {
885			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
886				   dev->name, ep->dirty_tx, ep->cur_tx);
887		}
888	}
889	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
890		ep->stats.tx_fifo_errors++;
891		outl(RestartTx, ioaddr + COMMAND);
892	} else {
893		epic_restart(dev);
894		outl(TxQueued, dev->base_addr + COMMAND);
895	}
896
897	dev->trans_start = jiffies;
898	ep->stats.tx_errors++;
899	if (!ep->tx_full)
900		netif_wake_queue(dev);
901}
902
903/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
904static void epic_init_ring(struct net_device *dev)
905{
906	struct epic_private *ep = dev->priv;
907	int i;
908
909	ep->tx_full = 0;
910	ep->dirty_tx = ep->cur_tx = 0;
911	ep->cur_rx = ep->dirty_rx = 0;
912	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
913
914	/* Initialize all Rx descriptors. */
915	for (i = 0; i < RX_RING_SIZE; i++) {
916		ep->rx_ring[i].rxstatus = 0;
917		ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
918		ep->rx_ring[i].next = ep->rx_ring_dma +
919				      (i+1)*sizeof(struct epic_rx_desc);
920		ep->rx_skbuff[i] = NULL;
921	}
922	/* Mark the last entry as wrapping the ring. */
923	ep->rx_ring[i-1].next = ep->rx_ring_dma;
924
925	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
926	for (i = 0; i < RX_RING_SIZE; i++) {
927		struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
928		ep->rx_skbuff[i] = skb;
929		if (skb == NULL)
930			break;
931		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
932		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
933			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
934		ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
935	}
936	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
937
938	/* The Tx buffer descriptor is filled in as needed, but we
939	   do need to clear the ownership bit. */
940	for (i = 0; i < TX_RING_SIZE; i++) {
941		ep->tx_skbuff[i] = NULL;
942		ep->tx_ring[i].txstatus = 0x0000;
943		ep->tx_ring[i].next = ep->tx_ring_dma +
944			(i+1)*sizeof(struct epic_tx_desc);
945	}
946	ep->tx_ring[i-1].next = ep->tx_ring_dma;
947	return;
948}
949
950static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
951{
952	struct epic_private *ep = dev->priv;
953	int entry, free_count;
954	u32 ctrl_word;
955	unsigned long flags;
956
957	if (skb_padto(skb, ETH_ZLEN))
958		return 0;
959
960	/* Caution: the write order is important here, set the field with the
961	   "ownership" bit last. */
962
963	/* Calculate the next Tx descriptor entry. */
964	spin_lock_irqsave(&ep->lock, flags);
965	free_count = ep->cur_tx - ep->dirty_tx;
966	entry = ep->cur_tx % TX_RING_SIZE;
967
968	ep->tx_skbuff[entry] = skb;
969	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
970		 			            skb->len, PCI_DMA_TODEVICE);
971	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
972		ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
973	} else if (free_count == TX_QUEUE_LEN/2) {
974		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
975	} else if (free_count < TX_QUEUE_LEN - 1) {
976		ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
977	} else {
978		/* Leave room for an additional entry. */
979		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
980		ep->tx_full = 1;
981	}
982	ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
983	ep->tx_ring[entry].txstatus =
984		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
985		| cpu_to_le32(DescOwn);
986
987	ep->cur_tx++;
988	if (ep->tx_full)
989		netif_stop_queue(dev);
990
991	spin_unlock_irqrestore(&ep->lock, flags);
992	/* Trigger an immediate transmit demand. */
993	outl(TxQueued, dev->base_addr + COMMAND);
994
995	dev->trans_start = jiffies;
996	if (debug > 4)
997		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
998			   "flag %2.2x Tx status %8.8x.\n",
999			   dev->name, (int)skb->len, entry, ctrl_word,
1000			   (int)inl(dev->base_addr + TxSTAT));
1001
1002	return 0;
1003}
1004
1005static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1006			  int status)
1007{
1008	struct net_device_stats *stats = &ep->stats;
1009
1010#ifndef final_version
1011	/* There was an major error, log it. */
1012	if (debug > 1)
1013		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1014		       dev->name, status);
1015#endif
1016	stats->tx_errors++;
1017	if (status & 0x1050)
1018		stats->tx_aborted_errors++;
1019	if (status & 0x0008)
1020		stats->tx_carrier_errors++;
1021	if (status & 0x0040)
1022		stats->tx_window_errors++;
1023	if (status & 0x0010)
1024		stats->tx_fifo_errors++;
1025}
1026
1027static void epic_tx(struct net_device *dev, struct epic_private *ep)
1028{
1029	unsigned int dirty_tx, cur_tx;
1030
1031	/*
1032	 * Note: if this lock becomes a problem we can narrow the locked
1033	 * region at the cost of occasionally grabbing the lock more times.
1034	 */
1035	cur_tx = ep->cur_tx;
1036	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1037		struct sk_buff *skb;
1038		int entry = dirty_tx % TX_RING_SIZE;
1039		int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1040
1041		if (txstatus & DescOwn)
1042			break;	/* It still hasn't been Txed */
1043
1044		if (likely(txstatus & 0x0001)) {
1045			ep->stats.collisions += (txstatus >> 8) & 15;
1046			ep->stats.tx_packets++;
1047			ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1048		} else
1049			epic_tx_error(dev, ep, txstatus);
1050
1051		/* Free the original skb. */
1052		skb = ep->tx_skbuff[entry];
1053		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1054				 skb->len, PCI_DMA_TODEVICE);
1055		dev_kfree_skb_irq(skb);
1056		ep->tx_skbuff[entry] = NULL;
1057	}
1058
1059#ifndef final_version
1060	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1061		printk(KERN_WARNING
1062		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1063		       dev->name, dirty_tx, cur_tx, ep->tx_full);
1064		dirty_tx += TX_RING_SIZE;
1065	}
1066#endif
1067	ep->dirty_tx = dirty_tx;
1068	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1069		/* The ring is no longer full, allow new TX entries. */
1070		ep->tx_full = 0;
1071		netif_wake_queue(dev);
1072	}
1073}
1074
1075/* The interrupt handler does all of the Rx thread work and cleans up
1076   after the Tx thread. */
1077static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1078{
1079	struct net_device *dev = dev_instance;
1080	struct epic_private *ep = dev->priv;
1081	long ioaddr = dev->base_addr;
1082	unsigned int handled = 0;
1083	int status;
1084
1085	status = inl(ioaddr + INTSTAT);
1086	/* Acknowledge all of the current interrupt sources ASAP. */
1087	outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1088
1089	if (debug > 4) {
1090		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1091				   "intstat=%#8.8x.\n", dev->name, status,
1092				   (int)inl(ioaddr + INTSTAT));
1093	}
1094
1095	if ((status & IntrSummary) == 0)
1096		goto out;
1097
1098	handled = 1;
1099
1100	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1101		spin_lock(&ep->napi_lock);
1102		if (netif_rx_schedule_prep(dev)) {
1103			epic_napi_irq_off(dev, ep);
1104			__netif_rx_schedule(dev);
1105		} else
1106			ep->reschedule_in_poll++;
1107		spin_unlock(&ep->napi_lock);
1108	}
1109	status &= ~EpicNapiEvent;
1110
1111	/* Check uncommon events all at once. */
1112	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1113		if (status == EpicRemoved)
1114			goto out;
1115
1116		/* Always update the error counts to avoid overhead later. */
1117		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1118		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1119		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1120
1121		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1122			ep->stats.tx_fifo_errors++;
1123			outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1124			/* Restart the transmit process. */
1125			outl(RestartTx, ioaddr + COMMAND);
1126		}
1127		if (status & PCIBusErr170) {
1128			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1129					 dev->name, status);
1130			epic_pause(dev);
1131			epic_restart(dev);
1132		}
1133		/* Clear all error sources. */
1134		outl(status & 0x7f18, ioaddr + INTSTAT);
1135	}
1136
1137out:
1138	if (debug > 3) {
1139		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1140				   dev->name, status);
1141	}
1142
1143	return IRQ_RETVAL(handled);
1144}
1145
1146static int epic_rx(struct net_device *dev, int budget)
1147{
1148	struct epic_private *ep = dev->priv;
1149	int entry = ep->cur_rx % RX_RING_SIZE;
1150	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1151	int work_done = 0;
1152
1153	if (debug > 4)
1154		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1155			   ep->rx_ring[entry].rxstatus);
1156
1157	if (rx_work_limit > budget)
1158		rx_work_limit = budget;
1159
1160	/* If we own the next entry, it's a new packet. Send it up. */
1161	while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1162		int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1163
1164		if (debug > 4)
1165			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
1166		if (--rx_work_limit < 0)
1167			break;
1168		if (status & 0x2006) {
1169			if (debug > 2)
1170				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1171					   dev->name, status);
1172			if (status & 0x2000) {
1173				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1174					   "multiple buffers, status %4.4x!\n", dev->name, status);
1175				ep->stats.rx_length_errors++;
1176			} else if (status & 0x0006)
1177				/* Rx Frame errors are counted in hardware. */
1178				ep->stats.rx_errors++;
1179		} else {
1180			/* Malloc up new buffer, compatible with net-2e. */
1181			/* Omit the four octet CRC from the length. */
1182			short pkt_len = (status >> 16) - 4;
1183			struct sk_buff *skb;
1184
1185			if (pkt_len > PKT_BUF_SZ - 4) {
1186				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1187					   "%d bytes.\n",
1188					   dev->name, status, pkt_len);
1189				pkt_len = 1514;
1190			}
1191			/* Check if the packet is long enough to accept without copying
1192			   to a minimally-sized skbuff. */
1193			if (pkt_len < rx_copybreak
1194				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1195				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1196				pci_dma_sync_single_for_cpu(ep->pci_dev,
1197							    ep->rx_ring[entry].bufaddr,
1198							    ep->rx_buf_sz,
1199							    PCI_DMA_FROMDEVICE);
1200				eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1201				skb_put(skb, pkt_len);
1202				pci_dma_sync_single_for_device(ep->pci_dev,
1203							       ep->rx_ring[entry].bufaddr,
1204							       ep->rx_buf_sz,
1205							       PCI_DMA_FROMDEVICE);
1206			} else {
1207				pci_unmap_single(ep->pci_dev,
1208					ep->rx_ring[entry].bufaddr,
1209					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1210				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1211				ep->rx_skbuff[entry] = NULL;
1212			}
1213			skb->protocol = eth_type_trans(skb, dev);
1214			netif_receive_skb(skb);
1215			dev->last_rx = jiffies;
1216			ep->stats.rx_packets++;
1217			ep->stats.rx_bytes += pkt_len;
1218		}
1219		work_done++;
1220		entry = (++ep->cur_rx) % RX_RING_SIZE;
1221	}
1222
1223	/* Refill the Rx ring buffers. */
1224	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1225		entry = ep->dirty_rx % RX_RING_SIZE;
1226		if (ep->rx_skbuff[entry] == NULL) {
1227			struct sk_buff *skb;
1228			skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1229			if (skb == NULL)
1230				break;
1231			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1232			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1233				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1234			work_done++;
1235		}
1236		ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1237	}
1238	return work_done;
1239}
1240
1241static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1242{
1243	long ioaddr = dev->base_addr;
1244	int status;
1245
1246	status = inl(ioaddr + INTSTAT);
1247
1248	if (status == EpicRemoved)
1249		return;
1250	if (status & RxOverflow) 	/* Missed a Rx frame. */
1251		ep->stats.rx_errors++;
1252	if (status & (RxOverflow | RxFull))
1253		outw(RxQueued, ioaddr + COMMAND);
1254}
1255
1256static int epic_poll(struct net_device *dev, int *budget)
1257{
1258	struct epic_private *ep = dev->priv;
1259	int work_done = 0, orig_budget;
1260	long ioaddr = dev->base_addr;
1261
1262	orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
1263
1264rx_action:
1265
1266	epic_tx(dev, ep);
1267
1268	work_done += epic_rx(dev, *budget);
1269
1270	epic_rx_err(dev, ep);
1271
1272	*budget -= work_done;
1273	dev->quota -= work_done;
1274
1275	if (netif_running(dev) && (work_done < orig_budget)) {
1276		unsigned long flags;
1277		int more;
1278
1279		/* A bit baroque but it avoids a (space hungry) spin_unlock */
1280
1281		spin_lock_irqsave(&ep->napi_lock, flags);
1282
1283		more = ep->reschedule_in_poll;
1284		if (!more) {
1285			__netif_rx_complete(dev);
1286			outl(EpicNapiEvent, ioaddr + INTSTAT);
1287			epic_napi_irq_on(dev, ep);
1288		} else
1289			ep->reschedule_in_poll--;
1290
1291		spin_unlock_irqrestore(&ep->napi_lock, flags);
1292
1293		if (more)
1294			goto rx_action;
1295	}
1296
1297	return (work_done >= orig_budget);
1298}
1299
1300static int epic_close(struct net_device *dev)
1301{
1302	long ioaddr = dev->base_addr;
1303	struct epic_private *ep = dev->priv;
1304	struct sk_buff *skb;
1305	int i;
1306
1307	netif_stop_queue(dev);
1308
1309	if (debug > 1)
1310		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1311			   dev->name, (int)inl(ioaddr + INTSTAT));
1312
1313	del_timer_sync(&ep->timer);
1314
1315	epic_disable_int(dev, ep);
1316
1317	free_irq(dev->irq, dev);
1318
1319	epic_pause(dev);
1320
1321	/* Free all the skbuffs in the Rx queue. */
1322	for (i = 0; i < RX_RING_SIZE; i++) {
1323		skb = ep->rx_skbuff[i];
1324		ep->rx_skbuff[i] = NULL;
1325		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1326		ep->rx_ring[i].buflength = 0;
1327		if (skb) {
1328			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1329				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1330			dev_kfree_skb(skb);
1331		}
1332		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1333	}
1334	for (i = 0; i < TX_RING_SIZE; i++) {
1335		skb = ep->tx_skbuff[i];
1336		ep->tx_skbuff[i] = NULL;
1337		if (!skb)
1338			continue;
1339		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1340				 skb->len, PCI_DMA_TODEVICE);
1341		dev_kfree_skb(skb);
1342	}
1343
1344	/* Green! Leave the chip in low-power mode. */
1345	outl(0x0008, ioaddr + GENCTL);
1346
1347	return 0;
1348}
1349
1350static struct net_device_stats *epic_get_stats(struct net_device *dev)
1351{
1352	struct epic_private *ep = dev->priv;
1353	long ioaddr = dev->base_addr;
1354
1355	if (netif_running(dev)) {
1356		/* Update the error counts. */
1357		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1358		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1359		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1360	}
1361
1362	return &ep->stats;
1363}
1364
1365/* Set or clear the multicast filter for this adaptor.
1366   Note that we only use exclusion around actually queueing the
1367   new frame, not around filling ep->setup_frame.  This is non-deterministic
1368   when re-entered but still correct. */
1369
1370static void set_rx_mode(struct net_device *dev)
1371{
1372	long ioaddr = dev->base_addr;
1373	struct epic_private *ep = dev->priv;
1374	unsigned char mc_filter[8];		 /* Multicast hash filter */
1375	int i;
1376
1377	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1378		outl(0x002C, ioaddr + RxCtrl);
1379		/* Unconditionally log net taps. */
1380		memset(mc_filter, 0xff, sizeof(mc_filter));
1381	} else if ((dev->mc_count > 0)  ||  (dev->flags & IFF_ALLMULTI)) {
1382		/* There is apparently a chip bug, so the multicast filter
1383		   is never enabled. */
1384		/* Too many to filter perfectly -- accept all multicasts. */
1385		memset(mc_filter, 0xff, sizeof(mc_filter));
1386		outl(0x000C, ioaddr + RxCtrl);
1387	} else if (dev->mc_count == 0) {
1388		outl(0x0004, ioaddr + RxCtrl);
1389		return;
1390	} else {					/* Never executed, for now. */
1391		struct dev_mc_list *mclist;
1392
1393		memset(mc_filter, 0, sizeof(mc_filter));
1394		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1395			 i++, mclist = mclist->next) {
1396			unsigned int bit_nr =
1397				ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1398			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1399		}
1400	}
1401	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1402	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1403		for (i = 0; i < 4; i++)
1404			outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1405		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1406	}
1407	return;
1408}
1409
1410static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1411{
1412	struct epic_private *np = dev->priv;
1413
1414	strcpy (info->driver, DRV_NAME);
1415	strcpy (info->version, DRV_VERSION);
1416	strcpy (info->bus_info, pci_name(np->pci_dev));
1417}
1418
1419static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1420{
1421	struct epic_private *np = dev->priv;
1422	int rc;
1423
1424	spin_lock_irq(&np->lock);
1425	rc = mii_ethtool_gset(&np->mii, cmd);
1426	spin_unlock_irq(&np->lock);
1427
1428	return rc;
1429}
1430
1431static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1432{
1433	struct epic_private *np = dev->priv;
1434	int rc;
1435
1436	spin_lock_irq(&np->lock);
1437	rc = mii_ethtool_sset(&np->mii, cmd);
1438	spin_unlock_irq(&np->lock);
1439
1440	return rc;
1441}
1442
1443static int netdev_nway_reset(struct net_device *dev)
1444{
1445	struct epic_private *np = dev->priv;
1446	return mii_nway_restart(&np->mii);
1447}
1448
1449static u32 netdev_get_link(struct net_device *dev)
1450{
1451	struct epic_private *np = dev->priv;
1452	return mii_link_ok(&np->mii);
1453}
1454
1455static u32 netdev_get_msglevel(struct net_device *dev)
1456{
1457	return debug;
1458}
1459
1460static void netdev_set_msglevel(struct net_device *dev, u32 value)
1461{
1462	debug = value;
1463}
1464
1465static int ethtool_begin(struct net_device *dev)
1466{
1467	unsigned long ioaddr = dev->base_addr;
1468	/* power-up, if interface is down */
1469	if (! netif_running(dev)) {
1470		outl(0x0200, ioaddr + GENCTL);
1471		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1472	}
1473	return 0;
1474}
1475
1476static void ethtool_complete(struct net_device *dev)
1477{
1478	unsigned long ioaddr = dev->base_addr;
1479	/* power-down, if interface is down */
1480	if (! netif_running(dev)) {
1481		outl(0x0008, ioaddr + GENCTL);
1482		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1483	}
1484}
1485
1486static const struct ethtool_ops netdev_ethtool_ops = {
1487	.get_drvinfo		= netdev_get_drvinfo,
1488	.get_settings		= netdev_get_settings,
1489	.set_settings		= netdev_set_settings,
1490	.nway_reset		= netdev_nway_reset,
1491	.get_link		= netdev_get_link,
1492	.get_msglevel		= netdev_get_msglevel,
1493	.set_msglevel		= netdev_set_msglevel,
1494	.get_sg			= ethtool_op_get_sg,
1495	.get_tx_csum		= ethtool_op_get_tx_csum,
1496	.begin			= ethtool_begin,
1497	.complete		= ethtool_complete
1498};
1499
1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1501{
1502	struct epic_private *np = dev->priv;
1503	long ioaddr = dev->base_addr;
1504	struct mii_ioctl_data *data = if_mii(rq);
1505	int rc;
1506
1507	/* power-up, if interface is down */
1508	if (! netif_running(dev)) {
1509		outl(0x0200, ioaddr + GENCTL);
1510		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1511	}
1512
1513	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1514	spin_lock_irq(&np->lock);
1515	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1516	spin_unlock_irq(&np->lock);
1517
1518	/* power-down, if interface is down */
1519	if (! netif_running(dev)) {
1520		outl(0x0008, ioaddr + GENCTL);
1521		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1522	}
1523	return rc;
1524}
1525
1526
1527static void __devexit epic_remove_one (struct pci_dev *pdev)
1528{
1529	struct net_device *dev = pci_get_drvdata(pdev);
1530	struct epic_private *ep = dev->priv;
1531
1532	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1534	unregister_netdev(dev);
1535#ifndef USE_IO_OPS
1536	iounmap((void*) dev->base_addr);
1537#endif
1538	pci_release_regions(pdev);
1539	free_netdev(dev);
1540	pci_disable_device(pdev);
1541	pci_set_drvdata(pdev, NULL);
1542	/* pci_power_off(pdev, -1); */
1543}
1544
1545
1546#ifdef CONFIG_PM
1547
1548static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1549{
1550	struct net_device *dev = pci_get_drvdata(pdev);
1551	long ioaddr = dev->base_addr;
1552
1553	if (!netif_running(dev))
1554		return 0;
1555	epic_pause(dev);
1556	/* Put the chip into low-power mode. */
1557	outl(0x0008, ioaddr + GENCTL);
1558	/* pci_power_off(pdev, -1); */
1559	return 0;
1560}
1561
1562
1563static int epic_resume (struct pci_dev *pdev)
1564{
1565	struct net_device *dev = pci_get_drvdata(pdev);
1566
1567	if (!netif_running(dev))
1568		return 0;
1569	epic_restart(dev);
1570	/* pci_power_on(pdev); */
1571	return 0;
1572}
1573
1574#endif /* CONFIG_PM */
1575
1576
1577static struct pci_driver epic_driver = {
1578	.name		= DRV_NAME,
1579	.id_table	= epic_pci_tbl,
1580	.probe		= epic_init_one,
1581	.remove		= __devexit_p(epic_remove_one),
1582#ifdef CONFIG_PM
1583	.suspend	= epic_suspend,
1584	.resume		= epic_resume,
1585#endif /* CONFIG_PM */
1586};
1587
1588
1589static int __init epic_init (void)
1590{
1591/* when a module, this is printed whether or not devices are found in probe */
1592#ifdef MODULE
1593	printk (KERN_INFO "%s" KERN_INFO "%s",
1594		version, version2);
1595#endif
1596
1597	return pci_register_driver(&epic_driver);
1598}
1599
1600
1601static void __exit epic_cleanup (void)
1602{
1603	pci_unregister_driver (&epic_driver);
1604}
1605
1606
1607module_init(epic_init);
1608module_exit(epic_cleanup);
1609