1/* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
2/*
3	Written/copyright 1994-1999 by Donald Becker.
4
5	This software may be used and distributed according to the terms
6	of the GNU General Public License, incorporated herein by reference.
7
8	The author may be reached as becker@scyld.com, or C/O
9	Scyld Computing Corporation
10	410 Severn Ave., Suite 210
11	Annapolis MD 21403
12
13*/
14
15#define DRV_NAME	"xircom_tulip_cb"
16#define DRV_VERSION	"0.92"
17#define DRV_RELDATE	"June 27, 2006"
18
19/* A few user-configurable values. */
20
21#define xircom_debug debug
22#ifdef XIRCOM_DEBUG
23static int xircom_debug = XIRCOM_DEBUG;
24#else
25static int xircom_debug = 1;
26#endif
27
28/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
29static int max_interrupt_work = 25;
30
31#define MAX_UNITS 4
32/* Used to pass the full-duplex flag, etc. */
33static int full_duplex[MAX_UNITS];
34static int options[MAX_UNITS];
35static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
36
37/* Keep the ring sizes a power of two for efficiency.
38   Making the Tx ring too large decreases the effectiveness of channel
39   bonding and packet priority.
40   There are no ill effects from too-large receive rings. */
41#define TX_RING_SIZE	16
42#define RX_RING_SIZE	32
43
44/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
45#ifdef __alpha__
46static int rx_copybreak = 1518;
47#else
48static int rx_copybreak = 100;
49#endif
50
51/*
52  Set the bus performance register.
53	Typical: Set 16 longword cache alignment, no burst limit.
54	Cache alignment bits 15:14	     Burst length 13:8
55		0000	No alignment  0x00000000 unlimited		0800 8 longwords
56		4000	8  longwords		0100 1 longword		1000 16 longwords
57		8000	16 longwords		0200 2 longwords	2000 32 longwords
58		C000	32  longwords		0400 4 longwords
59	Warning: many older 486 systems are broken and require setting 0x00A04800
60	   8 longword cache alignment, 8 longword burst.
61	ToDo: Non-Intel setting could be better.
62*/
63
64#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
65static int csr0 = 0x01A00000 | 0xE000;
66#elif defined(__powerpc__)
67static int csr0 = 0x01B00000 | 0x8000;
68#elif defined(CONFIG_SPARC)
69static int csr0 = 0x01B00080 | 0x8000;
70#elif defined(__i386__)
71static int csr0 = 0x01A00000 | 0x8000;
72#else
73#warning Processor architecture undefined!
74static int csr0 = 0x00A00000 | 0x4800;
75#endif
76
77/* Operational parameters that usually are not changed. */
78/* Time in jiffies before concluding the transmitter is hung. */
79#define TX_TIMEOUT		(4 * HZ)
80#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
81#define PKT_SETUP_SZ		192			/* Size of the setup frame */
82
83/* PCI registers */
84#define PCI_POWERMGMT 	0x40
85
86#include <linux/module.h>
87#include <linux/moduleparam.h>
88#include <linux/kernel.h>
89#include <linux/pci.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/delay.h>
93#include <linux/init.h>
94#include <linux/mii.h>
95#include <linux/ethtool.h>
96#include <linux/crc32.h>
97
98#include <asm/io.h>
99#include <asm/processor.h>	/* Processor type for cache alignment. */
100#include <asm/uaccess.h>
101
102
103/* These identify the driver base version and may not be removed. */
104static char version[] __devinitdata =
105KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
106KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
110MODULE_LICENSE("GPL v2");
111MODULE_VERSION(DRV_VERSION);
112
113module_param(debug, int, 0);
114module_param(max_interrupt_work, int, 0);
115module_param(rx_copybreak, int, 0);
116module_param(csr0, int, 0);
117
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120
121#define RUN_AT(x) (jiffies + (x))
122
123/*
124				Theory of Operation
125
126I. Board Compatibility
127
128This device driver was forked from the driver for the DECchip "Tulip",
129Digital's single-chip ethernet controllers for PCI.  It supports Xircom's
130almost-Tulip-compatible CBE-100 CardBus adapters.
131
132II. Board-specific settings
133
134PCI bus devices are configured by the system at boot time, so no jumpers
135need to be set on the board.  The system BIOS preferably should assign the
136PCI INTA signal to an otherwise unused system IRQ line.
137
138III. Driver operation
139
140IIIa. Ring buffers
141
142The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
143This driver uses statically allocated rings of Rx and Tx descriptors, set at
144compile time by RX/TX_RING_SIZE.  This version of the driver allocates skbuffs
145for the Rx ring buffers at open() time and passes the skb->data field to the
146Xircom as receive data buffers.  When an incoming frame is less than
147RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
148copied to the new skbuff.  When the incoming frame is larger, the skbuff is
149passed directly up the protocol stack and replaced by a newly allocated
150skbuff.
151
152The RX_COPYBREAK value is chosen to trade-off the memory wasted by
153using a full-sized skbuff for small frames vs. the copying costs of larger
154frames.  For small frames the copying cost is negligible (esp. considering
155that we are pre-loading the cache with immediately useful header
156information).  For large frames the copying cost is non-trivial, and the
157larger copy might flush the cache of useful data.  A subtle aspect of this
158choice is that the Xircom only receives into longword aligned buffers, thus
159the IP header at offset 14 isn't longword aligned for further processing.
160Copied frames are put into the new skbuff at an offset of "+2", thus copying
161has the beneficial effect of aligning the IP header and preloading the
162cache.
163
164IIIC. Synchronization
165The driver runs as two independent, single-threaded flows of control.  One
166is the send-packet routine, which enforces single-threaded use by the
167dev->tbusy flag.  The other thread is the interrupt handler, which is single
168threaded by the hardware and other software.
169
170The send packet thread has partial control over the Tx ring and 'dev->tbusy'
171flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
172queue slot is empty, it clears the tbusy flag when finished otherwise it sets
173the 'tp->tx_full' flag.
174
175The interrupt handler has exclusive control over the Rx ring and records stats
176from the Tx ring.  (The Tx-done interrupt can't be selectively turned off, so
177we can't avoid the interrupt overhead by having the Tx routine reap the Tx
178stats.)	 After reaping the stats, it marks the queue entry as empty by setting
179the 'base' to zero.	 Iff the 'tp->tx_full' flag is set, it clears both the
180tx_full and tbusy flags.
181
182IV. Notes
183
184IVb. References
185
186http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
187http://www.digital.com  (search for current 21*4* datasheets and "21X4 SROM")
188http://www.national.com/pf/DP/DP83840A.html
189
190IVc. Errata
191
192*/
193
194/* A full-duplex map for media types. */
195enum MediaIs {
196	MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
197	MediaIs100=16};
198static const char media_cap[] =
199{0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20 };
200
201/* Offsets to the Command and Status Registers, "CSRs".  All accesses
202   must be longword instructions and quadword aligned. */
203enum xircom_offsets {
204	CSR0=0,    CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
205	CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
206	CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
207
208/* The bits in the CSR5 status registers, mostly interrupt sources. */
209enum status_bits {
210	LinkChange=0x08000000,
211	NormalIntr=0x10000, NormalIntrMask=0x00014045,
212	AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
213	ReservedIntrMask=0xe0001a18,
214	EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
215	EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
216	TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
217};
218
219enum csr0_control_bits {
220	EnableMWI=0x01000000, EnableMRL=0x00800000,
221	EnableMRM=0x00200000, EqualBusPrio=0x02,
222	SoftwareReset=0x01,
223};
224
225enum csr6_control_bits {
226	ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
227	HashFilterBit=0x01, FullDuplexBit=0x0200,
228	TxThresh10=0x400000, TxStoreForw=0x200000,
229	TxThreshMask=0xc000, TxThreshShift=14,
230	EnableTx=0x2000, EnableRx=0x02,
231	ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
232	EnableTxRx=(EnableTx | EnableRx),
233};
234
235
236enum tbl_flag {
237	HAS_MII=1, HAS_ACPI=2,
238};
239static struct xircom_chip_table {
240	char *chip_name;
241	int valid_intrs;			/* CSR7 interrupt enable settings */
242	int flags;
243} xircom_tbl[] = {
244  { "Xircom Cardbus Adapter",
245	LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
246	RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
247	HAS_MII | HAS_ACPI, },
248  { NULL, },
249};
250/* This matches the table above. */
251enum chips {
252	X3201_3,
253};
254
255
256/* The Xircom Rx and Tx buffer descriptors. */
257struct xircom_rx_desc {
258	s32 status;
259	s32 length;
260	u32 buffer1, buffer2;
261};
262
263struct xircom_tx_desc {
264	s32 status;
265	s32 length;
266	u32 buffer1, buffer2;				/* We use only buffer 1.  */
267};
268
269enum tx_desc0_status_bits {
270	Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
271	Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
272};
273enum tx_desc1_status_bits {
274	Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
275	Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
276	Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
277	Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
278};
279enum rx_desc0_status_bits {
280	Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
281	Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
282	Rx0HugeFrame=0x80, Rx0CRCError=0x02,
283	Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
284};
285enum rx_desc1_status_bits {
286	Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
287};
288
289struct xircom_private {
290	struct xircom_rx_desc rx_ring[RX_RING_SIZE];
291	struct xircom_tx_desc tx_ring[TX_RING_SIZE];
292	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
293	struct sk_buff* tx_skbuff[TX_RING_SIZE];
294
295	/* The X3201-3 requires 4-byte aligned tx bufs */
296	struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
297
298	/* The addresses of receive-in-place skbuffs. */
299	struct sk_buff* rx_skbuff[RX_RING_SIZE];
300	u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)];	/* Pseudo-Tx frame to init address table. */
301	int chip_id;
302	struct net_device_stats stats;
303	unsigned int cur_rx, cur_tx;		/* The next free ring entry */
304	unsigned int dirty_rx, dirty_tx;	/* The ring entries to be free()ed. */
305	unsigned int tx_full:1;				/* The Tx queue is full. */
306	unsigned int speed100:1;
307	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
308	unsigned int autoneg:1;
309	unsigned int default_port:4;		/* Last dev->if_port value. */
310	unsigned int open:1;
311	unsigned int csr0;					/* CSR0 setting. */
312	unsigned int csr6;					/* Current CSR6 control settings. */
313	u16 to_advertise;					/* NWay capabilities advertised.  */
314	u16 advertising[4];
315	signed char phys[4], mii_cnt;		/* MII device addresses. */
316	int saved_if_port;
317	struct pci_dev *pdev;
318	spinlock_t lock;
319};
320
321static int mdio_read(struct net_device *dev, int phy_id, int location);
322static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
323static void xircom_up(struct net_device *dev);
324static void xircom_down(struct net_device *dev);
325static int xircom_open(struct net_device *dev);
326static void xircom_tx_timeout(struct net_device *dev);
327static void xircom_init_ring(struct net_device *dev);
328static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
329static int xircom_rx(struct net_device *dev);
330static void xircom_media_change(struct net_device *dev);
331static irqreturn_t xircom_interrupt(int irq, void *dev_instance);
332static int xircom_close(struct net_device *dev);
333static struct net_device_stats *xircom_get_stats(struct net_device *dev);
334static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
335static void set_rx_mode(struct net_device *dev);
336static void check_duplex(struct net_device *dev);
337static const struct ethtool_ops ops;
338
339
340/* The Xircom cards are picky about when certain bits in CSR6 can be
341   manipulated.  Keith Owens <kaos@ocs.com.au>. */
342static void outl_CSR6(u32 newcsr6, long ioaddr)
343{
344	const int strict_bits =
345		TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
346    int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
347    unsigned long flags;
348    save_flags(flags);
349    cli();
350	/* mask out the reserved bits that always read 0 on the Xircom cards */
351	newcsr6 &= ~ReservedZeroMask;
352	/* or in the reserved bits that always read 1 */
353	newcsr6 |= ReservedOneMask;
354    currcsr6 = inl(ioaddr + CSR6);
355    if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
356	((currcsr6 & ~EnableTxRx) == 0)) {
357		outl(newcsr6, ioaddr + CSR6);	/* safe */
358		restore_flags(flags);
359		return;
360    }
361    /* make sure the transmitter and receiver are stopped first */
362    currcsr6 &= ~EnableTxRx;
363    while (1) {
364		csr5 = inl(ioaddr + CSR5);
365		if (csr5 == 0xffffffff)
366			break;  /* cannot read csr5, card removed? */
367		csr5_22_20 = csr5 & 0x700000;
368		csr5_19_17 = csr5 & 0x0e0000;
369		if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
370			(csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
371			break;  /* both are stopped or suspended */
372		if (!--attempts) {
373			printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
374				   "csr5=0x%08x\n", csr5);
375			outl(newcsr6, ioaddr + CSR6);  /* unsafe but do it anyway */
376			restore_flags(flags);
377			return;
378		}
379		outl(currcsr6, ioaddr + CSR6);
380		udelay(1);
381    }
382    /* now it is safe to change csr6 */
383    outl(newcsr6, ioaddr + CSR6);
384    restore_flags(flags);
385}
386
387
388static void __devinit read_mac_address(struct net_device *dev)
389{
390	long ioaddr = dev->base_addr;
391	int i, j;
392	unsigned char tuple, link, data_id, data_count;
393
394	/* Xircom has its address stored in the CIS;
395	 * we access it through the boot rom interface for now
396	 * this might not work, as the CIS is not parsed but I
397	 * (danilo) use the offset I found on my card's CIS !!!
398	 *
399	 * Doug Ledford: I changed this routine around so that it
400	 * walks the CIS memory space, parsing the config items, and
401	 * finds the proper lan_node_id tuple and uses the data
402	 * stored there.
403	 */
404	outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
405	for (i = 0x100; i < 0x1f7; i += link+2) {
406		outl(i, ioaddr + CSR10);
407		tuple = inl(ioaddr + CSR9) & 0xff;
408		outl(i + 1, ioaddr + CSR10);
409		link = inl(ioaddr + CSR9) & 0xff;
410		outl(i + 2, ioaddr + CSR10);
411		data_id = inl(ioaddr + CSR9) & 0xff;
412		outl(i + 3, ioaddr + CSR10);
413		data_count = inl(ioaddr + CSR9) & 0xff;
414		if ( (tuple == 0x22) &&
415			 (data_id == 0x04) && (data_count == 0x06) ) {
416			/*
417			 * This is it.  We have the data we want.
418			 */
419			for (j = 0; j < 6; j++) {
420				outl(i + j + 4, ioaddr + CSR10);
421				dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
422			}
423			break;
424		} else if (link == 0) {
425			break;
426		}
427	}
428}
429
430
431/*
432 * locate the MII interfaces and initialize them.
433 * we disable full-duplex modes here,
434 * because we don't know how to handle them.
435 */
436static void find_mii_transceivers(struct net_device *dev)
437{
438	struct xircom_private *tp = netdev_priv(dev);
439	int phy, phy_idx;
440
441	if (media_cap[tp->default_port] & MediaIsMII) {
442		u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
443		tp->to_advertise = media2advert[tp->default_port - 9];
444	} else
445		tp->to_advertise =
446			/*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
447			/*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
448
449	/* Find the connected MII xcvrs.
450	   Doing this in open() would allow detecting external xcvrs later,
451	   but takes much time. */
452	for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
453		int mii_status = mdio_read(dev, phy, MII_BMSR);
454		if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
455			((mii_status & BMSR_100BASE4) == 0 &&
456			 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
457			int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
458			int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
459			int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
460			tp->phys[phy_idx] = phy;
461			tp->advertising[phy_idx++] = reg4;
462			printk(KERN_INFO "%s:  MII transceiver #%d "
463				   "config %4.4x status %4.4x advertising %4.4x.\n",
464				   dev->name, phy, mii_reg0, mii_status, mii_advert);
465		}
466	}
467	tp->mii_cnt = phy_idx;
468	if (phy_idx == 0) {
469		printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
470			   dev->name);
471		tp->phys[0] = 0;
472	}
473}
474
475
476/*
477 * To quote Arjan van de Ven:
478 *   transceiver_voodoo() enables the external UTP plug thingy.
479 *   it's called voodoo as I stole this code and cannot cross-reference
480 *   it with the specification.
481 * Actually it seems to go like this:
482 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
483 *   so any prior MII settings are lost.
484 * - GPIO0 enables the TP port so the MII can talk to the network.
485 * - a software reset will reset both GPIO pins.
486 * I also moved the software reset here, because doing it in xircom_up()
487 * required enabling the GPIO pins each time, which reset the MII each time.
488 * Thus we couldn't control the MII -- which sucks because we don't know
489 * how to handle full-duplex modes so we *must* disable them.
490 */
491static void transceiver_voodoo(struct net_device *dev)
492{
493	struct xircom_private *tp = netdev_priv(dev);
494	long ioaddr = dev->base_addr;
495
496	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
497	outl(SoftwareReset, ioaddr + CSR0);
498	udelay(2);
499
500	/* Deassert reset. */
501	outl(tp->csr0, ioaddr + CSR0);
502
503	/* Reset the xcvr interface and turn on heartbeat. */
504	outl(0x0008, ioaddr + CSR15);
505	udelay(5);  /* The delays are Xircom-recommended to give the
506				 * chipset time to reset the actual hardware
507				 * on the PCMCIA card
508				 */
509	outl(0xa8050000, ioaddr + CSR15);
510	udelay(5);
511	outl(0xa00f0000, ioaddr + CSR15);
512	udelay(5);
513
514	outl_CSR6(0, ioaddr);
515	//outl_CSR6(FullDuplexBit, ioaddr);
516}
517
518
519static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
520{
521	struct net_device *dev;
522	struct xircom_private *tp;
523	static int board_idx = -1;
524	int chip_idx = id->driver_data;
525	long ioaddr;
526	int i;
527	u8 chip_rev;
528
529/* when built into the kernel, we only print version if device is found */
530#ifndef MODULE
531	static int printed_version;
532	if (!printed_version++)
533		printk(version);
534#endif
535
536	//printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
537
538	board_idx++;
539
540	if (pci_enable_device(pdev))
541		return -ENODEV;
542
543	pci_set_master(pdev);
544
545	ioaddr = pci_resource_start(pdev, 0);
546	dev = alloc_etherdev(sizeof(*tp));
547	if (!dev) {
548		printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
549		return -ENOMEM;
550	}
551	SET_MODULE_OWNER(dev);
552	SET_NETDEV_DEV(dev, &pdev->dev);
553
554	dev->base_addr = ioaddr;
555	dev->irq = pdev->irq;
556
557	if (pci_request_regions(pdev, dev->name)) {
558		printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
559		goto err_out_free_netdev;
560	}
561
562	/* Bring the chip out of sleep mode.
563	   Caution: Snooze mode does not work with some boards! */
564	if (xircom_tbl[chip_idx].flags & HAS_ACPI)
565		pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
566
567	/* Stop the chip's Tx and Rx processes. */
568	outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
569	/* Clear the missed-packet counter. */
570	(volatile int)inl(ioaddr + CSR8);
571
572	tp = netdev_priv(dev);
573
574	spin_lock_init(&tp->lock);
575	tp->pdev = pdev;
576	tp->chip_id = chip_idx;
577	/* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
578	tp->csr0 = csr0 & ~EnableMWI;
579
580	pci_set_drvdata(pdev, dev);
581
582	/* The lower four bits are the media type. */
583	if (board_idx >= 0 && board_idx < MAX_UNITS) {
584		tp->default_port = options[board_idx] & 15;
585		if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
586			tp->full_duplex = 1;
587		if (mtu[board_idx] > 0)
588			dev->mtu = mtu[board_idx];
589	}
590	if (dev->mem_start)
591		tp->default_port = dev->mem_start;
592	if (tp->default_port) {
593		if (media_cap[tp->default_port] & MediaAlwaysFD)
594			tp->full_duplex = 1;
595	}
596	if (tp->full_duplex)
597		tp->autoneg = 0;
598	else
599		tp->autoneg = 1;
600	tp->speed100 = 1;
601
602	/* The Xircom-specific entries in the device structure. */
603	dev->open = &xircom_open;
604	dev->hard_start_xmit = &xircom_start_xmit;
605	dev->stop = &xircom_close;
606	dev->get_stats = &xircom_get_stats;
607	dev->do_ioctl = &xircom_ioctl;
608#ifdef HAVE_MULTICAST
609	dev->set_multicast_list = &set_rx_mode;
610#endif
611	dev->tx_timeout = xircom_tx_timeout;
612	dev->watchdog_timeo = TX_TIMEOUT;
613	SET_ETHTOOL_OPS(dev, &ops);
614
615	transceiver_voodoo(dev);
616
617	read_mac_address(dev);
618
619	if (register_netdev(dev))
620		goto err_out_cleardev;
621
622	pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
623	printk(KERN_INFO "%s: %s rev %d at %#3lx,",
624	       dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
625	for (i = 0; i < 6; i++)
626		printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
627	printk(", IRQ %d.\n", dev->irq);
628
629	if (xircom_tbl[chip_idx].flags & HAS_MII) {
630		find_mii_transceivers(dev);
631		check_duplex(dev);
632	}
633
634	return 0;
635
636err_out_cleardev:
637	pci_set_drvdata(pdev, NULL);
638	pci_release_regions(pdev);
639err_out_free_netdev:
640	free_netdev(dev);
641	return -ENODEV;
642}
643
644
645/* MII transceiver control section.
646   Read and write the MII registers using software-generated serial
647   MDIO protocol.  See the MII specifications or DP83840A data sheet
648   for details. */
649
650/* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
651   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
652   "overclocking" issues or future 66Mhz PCI. */
653#define mdio_delay() inl(mdio_addr)
654
655/* Read and write the MII registers using software-generated serial
656   MDIO protocol.  It is just different enough from the EEPROM protocol
657   to not share code.  The maxium data clock rate is 2.5 Mhz. */
658#define MDIO_SHIFT_CLK	0x10000
659#define MDIO_DATA_WRITE0 0x00000
660#define MDIO_DATA_WRITE1 0x20000
661#define MDIO_ENB		0x00000		/* Ignore the 0x02000 databook setting. */
662#define MDIO_ENB_IN		0x40000
663#define MDIO_DATA_READ	0x80000
664
665static int mdio_read(struct net_device *dev, int phy_id, int location)
666{
667	int i;
668	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
669	int retval = 0;
670	long ioaddr = dev->base_addr;
671	long mdio_addr = ioaddr + CSR9;
672
673	/* Establish sync by sending at least 32 logic ones. */
674	for (i = 32; i >= 0; i--) {
675		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
676		mdio_delay();
677		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
678		mdio_delay();
679	}
680	/* Shift the read command bits out. */
681	for (i = 15; i >= 0; i--) {
682		int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
683
684		outl(MDIO_ENB | dataval, mdio_addr);
685		mdio_delay();
686		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
687		mdio_delay();
688	}
689	/* Read the two transition, 16 data, and wire-idle bits. */
690	for (i = 19; i > 0; i--) {
691		outl(MDIO_ENB_IN, mdio_addr);
692		mdio_delay();
693		retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
694		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
695		mdio_delay();
696	}
697	return (retval>>1) & 0xffff;
698}
699
700
701static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
702{
703	int i;
704	int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
705	long ioaddr = dev->base_addr;
706	long mdio_addr = ioaddr + CSR9;
707
708	/* Establish sync by sending 32 logic ones. */
709	for (i = 32; i >= 0; i--) {
710		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
711		mdio_delay();
712		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
713		mdio_delay();
714	}
715	/* Shift the command bits out. */
716	for (i = 31; i >= 0; i--) {
717		int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
718		outl(MDIO_ENB | dataval, mdio_addr);
719		mdio_delay();
720		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
721		mdio_delay();
722	}
723	/* Clear out extra bits. */
724	for (i = 2; i > 0; i--) {
725		outl(MDIO_ENB_IN, mdio_addr);
726		mdio_delay();
727		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
728		mdio_delay();
729	}
730	return;
731}
732
733
734static void
735xircom_up(struct net_device *dev)
736{
737	struct xircom_private *tp = netdev_priv(dev);
738	long ioaddr = dev->base_addr;
739	int i;
740
741	xircom_init_ring(dev);
742	/* Clear the tx ring */
743	for (i = 0; i < TX_RING_SIZE; i++) {
744		tp->tx_skbuff[i] = NULL;
745		tp->tx_ring[i].status = 0;
746	}
747
748	if (xircom_debug > 1)
749		printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
750
751	outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
752	outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
753
754	tp->saved_if_port = dev->if_port;
755	if (dev->if_port == 0)
756		dev->if_port = tp->default_port;
757
758	tp->csr6 = TxThresh10 /*| FullDuplexBit*/;
759
760	set_rx_mode(dev);
761
762	/* Start the chip's Tx to process setup frame. */
763	outl_CSR6(tp->csr6, ioaddr);
764	outl_CSR6(tp->csr6 | EnableTx, ioaddr);
765
766	/* Acknowledge all outstanding interrupts sources */
767	outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
768	/* Enable interrupts by setting the interrupt mask. */
769	outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
770	/* Enable Rx */
771	outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
772	/* Rx poll demand */
773	outl(0, ioaddr + CSR2);
774
775	/* Tell the net layer we're ready */
776	netif_start_queue (dev);
777
778	/* Check current media state */
779	xircom_media_change(dev);
780
781	if (xircom_debug > 2) {
782		printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
783			   dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
784			   inl(ioaddr + CSR6));
785	}
786}
787
788
789static int
790xircom_open(struct net_device *dev)
791{
792	struct xircom_private *tp = netdev_priv(dev);
793
794	if (request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev))
795		return -EAGAIN;
796
797	xircom_up(dev);
798	tp->open = 1;
799
800	return 0;
801}
802
803
804static void xircom_tx_timeout(struct net_device *dev)
805{
806	struct xircom_private *tp = netdev_priv(dev);
807	long ioaddr = dev->base_addr;
808
809	if (media_cap[dev->if_port] & MediaIsMII) {
810		/* Do nothing -- the media monitor should handle this. */
811		if (xircom_debug > 1)
812			printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
813				   dev->name);
814	}
815
816#if defined(way_too_many_messages)
817	if (xircom_debug > 3) {
818		int i;
819		for (i = 0; i < RX_RING_SIZE; i++) {
820			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
821			int j;
822			printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x  "
823				   "%2.2x %2.2x %2.2x.\n",
824				   i, (unsigned int)tp->rx_ring[i].status,
825				   (unsigned int)tp->rx_ring[i].length,
826				   (unsigned int)tp->rx_ring[i].buffer1,
827				   (unsigned int)tp->rx_ring[i].buffer2,
828				   buf[0], buf[1], buf[2]);
829			for (j = 0; buf[j] != 0xee && j < 1600; j++)
830				if (j < 100) printk(" %2.2x", buf[j]);
831			printk(" j=%d.\n", j);
832		}
833		printk(KERN_DEBUG "  Rx ring %8.8x: ", (int)tp->rx_ring);
834		for (i = 0; i < RX_RING_SIZE; i++)
835			printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
836		printk("\n" KERN_DEBUG "  Tx ring %8.8x: ", (int)tp->tx_ring);
837		for (i = 0; i < TX_RING_SIZE; i++)
838			printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
839		printk("\n");
840	}
841#endif
842
843	/* Stop and restart the chip's Tx/Rx processes . */
844	outl_CSR6(tp->csr6 | EnableRx, ioaddr);
845	outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
846	/* Trigger an immediate transmit demand. */
847	outl(0, ioaddr + CSR1);
848
849	dev->trans_start = jiffies;
850	netif_wake_queue (dev);
851	tp->stats.tx_errors++;
852}
853
854
855/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
856static void xircom_init_ring(struct net_device *dev)
857{
858	struct xircom_private *tp = netdev_priv(dev);
859	int i;
860
861	tp->tx_full = 0;
862	tp->cur_rx = tp->cur_tx = 0;
863	tp->dirty_rx = tp->dirty_tx = 0;
864
865	for (i = 0; i < RX_RING_SIZE; i++) {
866		tp->rx_ring[i].status = 0;
867		tp->rx_ring[i].length = PKT_BUF_SZ;
868		tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
869		tp->rx_skbuff[i] = NULL;
870	}
871	/* Mark the last entry as wrapping the ring. */
872	tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
873	tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
874
875	for (i = 0; i < RX_RING_SIZE; i++) {
876		/* Note the receive buffer must be longword aligned.
877		   dev_alloc_skb() provides 16 byte alignment.  But do *not*
878		   use skb_reserve() to align the IP header! */
879		struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
880		tp->rx_skbuff[i] = skb;
881		if (skb == NULL)
882			break;
883		skb->dev = dev;			/* Mark as being used by this device. */
884		tp->rx_ring[i].status = Rx0DescOwned;	/* Owned by Xircom chip */
885		tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
886	}
887	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
888
889	/* The Tx buffer descriptor is filled in as needed, but we
890	   do need to clear the ownership bit. */
891	for (i = 0; i < TX_RING_SIZE; i++) {
892		tp->tx_skbuff[i] = NULL;
893		tp->tx_ring[i].status = 0;
894		tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
895		if (tp->chip_id == X3201_3)
896			tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
897	}
898	tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
899}
900
901
902static int
903xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
904{
905	struct xircom_private *tp = netdev_priv(dev);
906	int entry;
907	u32 flag;
908
909	/* Caution: the write order is important here, set the base address
910	   with the "ownership" bits last. */
911
912	/* Calculate the next Tx descriptor entry. */
913	entry = tp->cur_tx % TX_RING_SIZE;
914
915	tp->tx_skbuff[entry] = skb;
916	if (tp->chip_id == X3201_3) {
917		skb_copy_from_linear_data(skb,
918					  tp->tx_aligned_skbuff[entry]->data,
919					  skb->len);
920		tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
921	} else
922		tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
923
924	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
925		flag = Tx1WholePkt; /* No interrupt */
926	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
927		flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
928	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
929		flag = Tx1WholePkt; /* No Tx-done intr. */
930	} else {
931		/* Leave room for set_rx_mode() to fill entries. */
932		flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
933		tp->tx_full = 1;
934	}
935	if (entry == TX_RING_SIZE - 1)
936		flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
937
938	tp->tx_ring[entry].length = skb->len | flag;
939	tp->tx_ring[entry].status = Tx0DescOwned;	/* Pass ownership to the chip. */
940	tp->cur_tx++;
941	if (tp->tx_full)
942		netif_stop_queue (dev);
943	else
944		netif_wake_queue (dev);
945
946	/* Trigger an immediate transmit demand. */
947	outl(0, dev->base_addr + CSR1);
948
949	dev->trans_start = jiffies;
950
951	return 0;
952}
953
954
955static void xircom_media_change(struct net_device *dev)
956{
957	struct xircom_private *tp = netdev_priv(dev);
958	long ioaddr = dev->base_addr;
959	u16 reg0, reg1, reg4, reg5;
960	u32 csr6 = inl(ioaddr + CSR6), newcsr6;
961
962	/* reset status first */
963	mdio_read(dev, tp->phys[0], MII_BMCR);
964	mdio_read(dev, tp->phys[0], MII_BMSR);
965
966	reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
967	reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
968
969	if (reg1 & BMSR_LSTATUS) {
970		/* link is up */
971		if (reg0 & BMCR_ANENABLE) {
972			/* autonegotiation is enabled */
973			reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
974			reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
975			if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
976				tp->speed100 = 1;
977				tp->full_duplex = 1;
978			} else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
979				tp->speed100 = 1;
980				tp->full_duplex = 0;
981			} else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
982				tp->speed100 = 0;
983				tp->full_duplex = 1;
984			} else {
985				tp->speed100 = 0;
986				tp->full_duplex = 0;
987			}
988		} else {
989			/* autonegotiation is disabled */
990			if (reg0 & BMCR_SPEED100)
991				tp->speed100 = 1;
992			else
993				tp->speed100 = 0;
994			if (reg0 & BMCR_FULLDPLX)
995				tp->full_duplex = 1;
996			else
997				tp->full_duplex = 0;
998		}
999		printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1000		       dev->name,
1001		       tp->speed100 ? "100" : "10",
1002		       tp->full_duplex ? "full" : "half");
1003		netif_carrier_on(dev);
1004		newcsr6 = csr6 & ~FullDuplexBit;
1005		if (tp->full_duplex)
1006			newcsr6 |= FullDuplexBit;
1007		if (newcsr6 != csr6)
1008			outl_CSR6(newcsr6, ioaddr + CSR6);
1009	} else {
1010		printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1011		netif_carrier_off(dev);
1012	}
1013}
1014
1015
1016static void check_duplex(struct net_device *dev)
1017{
1018	struct xircom_private *tp = netdev_priv(dev);
1019	u16 reg0;
1020
1021	mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1022	udelay(500);
1023	while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1024
1025	reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1026	mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1027
1028	if (tp->autoneg) {
1029		reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1030		reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1031	} else {
1032		reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1033		if (tp->speed100)
1034			reg0 |= BMCR_SPEED100;
1035		if (tp->full_duplex)
1036			reg0 |= BMCR_FULLDPLX;
1037		printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1038		       dev->name,
1039		       tp->speed100 ? "100" : "10",
1040		       tp->full_duplex ? "full" : "half");
1041	}
1042	mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1043}
1044
1045
1046/* The interrupt handler does all of the Rx thread work and cleans up
1047   after the Tx thread. */
1048static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
1049{
1050	struct net_device *dev = dev_instance;
1051	struct xircom_private *tp = netdev_priv(dev);
1052	long ioaddr = dev->base_addr;
1053	int csr5, work_budget = max_interrupt_work;
1054	int handled = 0;
1055
1056	spin_lock (&tp->lock);
1057
1058	do {
1059		csr5 = inl(ioaddr + CSR5);
1060		/* Acknowledge all of the current interrupt sources ASAP. */
1061		outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1062
1063		if (xircom_debug > 4)
1064			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
1065				   dev->name, csr5, inl(dev->base_addr + CSR5));
1066
1067		if (csr5 == 0xffffffff)
1068			break;	/* all bits set, assume PCMCIA card removed */
1069
1070		if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1071			break;
1072
1073		handled = 1;
1074
1075		if (csr5 & (RxIntr | RxNoBuf))
1076			work_budget -= xircom_rx(dev);
1077
1078		if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1079			unsigned int dirty_tx;
1080
1081			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1082				 dirty_tx++) {
1083				int entry = dirty_tx % TX_RING_SIZE;
1084				int status = tp->tx_ring[entry].status;
1085
1086				if (status < 0)
1087					break;			/* It still hasn't been Txed */
1088				/* Check for Rx filter setup frames. */
1089				if (tp->tx_skbuff[entry] == NULL)
1090				  continue;
1091
1092				if (status & Tx0DescError) {
1093					/* There was an major error, log it. */
1094#ifndef final_version
1095					if (xircom_debug > 1)
1096						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1097							   dev->name, status);
1098#endif
1099					tp->stats.tx_errors++;
1100					if (status & Tx0ManyColl) {
1101						tp->stats.tx_aborted_errors++;
1102					}
1103					if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1104					if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1105					if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1106				} else {
1107					tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1108					tp->stats.collisions += (status >> 3) & 15;
1109					tp->stats.tx_packets++;
1110				}
1111
1112				/* Free the original skb. */
1113				dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1114				tp->tx_skbuff[entry] = NULL;
1115			}
1116
1117#ifndef final_version
1118			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1119				printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1120					   dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1121				dirty_tx += TX_RING_SIZE;
1122			}
1123#endif
1124
1125			if (tp->tx_full &&
1126			    tp->cur_tx - dirty_tx  < TX_RING_SIZE - 2)
1127				/* The ring is no longer full */
1128				tp->tx_full = 0;
1129
1130			if (tp->tx_full)
1131				netif_stop_queue (dev);
1132			else
1133				netif_wake_queue (dev);
1134
1135			tp->dirty_tx = dirty_tx;
1136			if (csr5 & TxDied) {
1137				if (xircom_debug > 2)
1138					printk(KERN_WARNING "%s: The transmitter stopped."
1139						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1140						   dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1141				outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1142				outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1143			}
1144		}
1145
1146		/* Log errors. */
1147		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
1148			if (csr5 & LinkChange)
1149				xircom_media_change(dev);
1150			if (csr5 & TxFIFOUnderflow) {
1151				if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1152					tp->csr6 += (1 << TxThreshShift);	/* Bump up the Tx threshold */
1153				else
1154					tp->csr6 |= TxStoreForw;  /* Store-n-forward. */
1155				/* Restart the transmit process. */
1156				outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1157				outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1158			}
1159			if (csr5 & RxDied) {		/* Missed a Rx frame. */
1160				tp->stats.rx_errors++;
1161				tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1162				outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1163			}
1164			/* Clear all error sources, included undocumented ones! */
1165			outl(0x0800f7ba, ioaddr + CSR5);
1166		}
1167		if (--work_budget < 0) {
1168			if (xircom_debug > 1)
1169				printk(KERN_WARNING "%s: Too much work during an interrupt, "
1170					   "csr5=0x%8.8x.\n", dev->name, csr5);
1171			/* Acknowledge all interrupt sources. */
1172			outl(0x8001ffff, ioaddr + CSR5);
1173			break;
1174		}
1175	} while (1);
1176
1177	if (xircom_debug > 3)
1178		printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1179			   dev->name, inl(ioaddr + CSR5));
1180
1181	spin_unlock (&tp->lock);
1182	return IRQ_RETVAL(handled);
1183}
1184
1185
1186static int
1187xircom_rx(struct net_device *dev)
1188{
1189	struct xircom_private *tp = netdev_priv(dev);
1190	int entry = tp->cur_rx % RX_RING_SIZE;
1191	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1192	int work_done = 0;
1193
1194	if (xircom_debug > 4)
1195		printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1196			   tp->rx_ring[entry].status);
1197	/* If we own the next entry, it's a new packet. Send it up. */
1198	while (tp->rx_ring[entry].status >= 0) {
1199		s32 status = tp->rx_ring[entry].status;
1200
1201		if (xircom_debug > 5)
1202			printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1203				   tp->rx_ring[entry].status);
1204		if (--rx_work_limit < 0)
1205			break;
1206		if ((status & 0x38008300) != 0x0300) {
1207			if ((status & 0x38000300) != 0x0300) {
1208				/* Ignore earlier buffers. */
1209				if ((status & 0xffff) != 0x7fff) {
1210					if (xircom_debug > 1)
1211						printk(KERN_WARNING "%s: Oversized Ethernet frame "
1212							   "spanned multiple buffers, status %8.8x!\n",
1213							   dev->name, status);
1214					tp->stats.rx_length_errors++;
1215				}
1216			} else if (status & Rx0DescError) {
1217				/* There was a fatal error. */
1218				if (xircom_debug > 2)
1219					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1220						   dev->name, status);
1221				tp->stats.rx_errors++; /* end of a packet.*/
1222				if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1223				if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1224			}
1225		} else {
1226			/* Omit the four octet CRC from the length. */
1227			short pkt_len = ((status >> 16) & 0x7ff) - 4;
1228			struct sk_buff *skb;
1229
1230#ifndef final_version
1231			if (pkt_len > 1518) {
1232				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1233					   dev->name, pkt_len, pkt_len);
1234				pkt_len = 1518;
1235				tp->stats.rx_length_errors++;
1236			}
1237#endif
1238			/* Check if the packet is long enough to accept without copying
1239			   to a minimally-sized skbuff. */
1240			if (pkt_len < rx_copybreak
1241				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1242				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1243#if !defined(__alpha__)
1244				eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1245								 pkt_len, 0);
1246				skb_put(skb, pkt_len);
1247#else
1248				memcpy(skb_put(skb, pkt_len),
1249					   bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1250#endif
1251				work_done++;
1252			} else { 	/* Pass up the skb already on the Rx ring. */
1253				skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1254				tp->rx_skbuff[entry] = NULL;
1255			}
1256			skb->protocol = eth_type_trans(skb, dev);
1257			netif_rx(skb);
1258			dev->last_rx = jiffies;
1259			tp->stats.rx_packets++;
1260			tp->stats.rx_bytes += pkt_len;
1261		}
1262		entry = (++tp->cur_rx) % RX_RING_SIZE;
1263	}
1264
1265	/* Refill the Rx ring buffers. */
1266	for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1267		entry = tp->dirty_rx % RX_RING_SIZE;
1268		if (tp->rx_skbuff[entry] == NULL) {
1269			struct sk_buff *skb;
1270			skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1271			if (skb == NULL)
1272				break;
1273			skb->dev = dev;			/* Mark as being used by this device. */
1274			tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
1275			work_done++;
1276		}
1277		tp->rx_ring[entry].status = Rx0DescOwned;
1278	}
1279
1280	return work_done;
1281}
1282
1283
1284static void
1285xircom_down(struct net_device *dev)
1286{
1287	long ioaddr = dev->base_addr;
1288	struct xircom_private *tp = netdev_priv(dev);
1289
1290	/* Disable interrupts by clearing the interrupt mask. */
1291	outl(0, ioaddr + CSR7);
1292	/* Stop the chip's Tx and Rx processes. */
1293	outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1294
1295	if (inl(ioaddr + CSR6) != 0xffffffff)
1296		tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1297
1298	dev->if_port = tp->saved_if_port;
1299}
1300
1301
1302static int
1303xircom_close(struct net_device *dev)
1304{
1305	long ioaddr = dev->base_addr;
1306	struct xircom_private *tp = netdev_priv(dev);
1307	int i;
1308
1309	if (xircom_debug > 1)
1310		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1311			   dev->name, inl(ioaddr + CSR5));
1312
1313	netif_stop_queue(dev);
1314
1315	if (netif_device_present(dev))
1316		xircom_down(dev);
1317
1318	free_irq(dev->irq, dev);
1319
1320	/* Free all the skbuffs in the Rx queue. */
1321	for (i = 0; i < RX_RING_SIZE; i++) {
1322		struct sk_buff *skb = tp->rx_skbuff[i];
1323		tp->rx_skbuff[i] = NULL;
1324		tp->rx_ring[i].status = 0;		/* Not owned by Xircom chip. */
1325		tp->rx_ring[i].length = 0;
1326		tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1327		if (skb) {
1328			dev_kfree_skb(skb);
1329		}
1330	}
1331	for (i = 0; i < TX_RING_SIZE; i++) {
1332		if (tp->tx_skbuff[i])
1333			dev_kfree_skb(tp->tx_skbuff[i]);
1334		tp->tx_skbuff[i] = NULL;
1335	}
1336
1337	tp->open = 0;
1338	return 0;
1339}
1340
1341
1342static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1343{
1344	struct xircom_private *tp = netdev_priv(dev);
1345	long ioaddr = dev->base_addr;
1346
1347	if (netif_device_present(dev))
1348		tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1349
1350	return &tp->stats;
1351}
1352
1353static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1354{
1355	struct xircom_private *tp = netdev_priv(dev);
1356	ecmd->supported =
1357			SUPPORTED_10baseT_Half |
1358			SUPPORTED_10baseT_Full |
1359			SUPPORTED_100baseT_Half |
1360			SUPPORTED_100baseT_Full |
1361			SUPPORTED_Autoneg |
1362			SUPPORTED_MII;
1363
1364	ecmd->advertising = ADVERTISED_MII;
1365	if (tp->advertising[0] & ADVERTISE_10HALF)
1366		ecmd->advertising |= ADVERTISED_10baseT_Half;
1367	if (tp->advertising[0] & ADVERTISE_10FULL)
1368		ecmd->advertising |= ADVERTISED_10baseT_Full;
1369	if (tp->advertising[0] & ADVERTISE_100HALF)
1370		ecmd->advertising |= ADVERTISED_100baseT_Half;
1371	if (tp->advertising[0] & ADVERTISE_100FULL)
1372		ecmd->advertising |= ADVERTISED_100baseT_Full;
1373	if (tp->autoneg) {
1374		ecmd->advertising |= ADVERTISED_Autoneg;
1375		ecmd->autoneg = AUTONEG_ENABLE;
1376	} else
1377		ecmd->autoneg = AUTONEG_DISABLE;
1378
1379	ecmd->port = PORT_MII;
1380	ecmd->transceiver = XCVR_INTERNAL;
1381	ecmd->phy_address = tp->phys[0];
1382	ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
1383	ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1384	ecmd->maxtxpkt = TX_RING_SIZE / 2;
1385	ecmd->maxrxpkt = 0;
1386	return 0;
1387}
1388
1389static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1390{
1391	struct xircom_private *tp = netdev_priv(dev);
1392	u16 autoneg, speed100, full_duplex;
1393
1394	autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
1395	speed100 = (ecmd->speed == SPEED_100);
1396	full_duplex = (ecmd->duplex == DUPLEX_FULL);
1397
1398	tp->autoneg = autoneg;
1399	if (speed100 != tp->speed100 ||
1400	    full_duplex != tp->full_duplex) {
1401		tp->speed100 = speed100;
1402		tp->full_duplex = full_duplex;
1403		/* change advertising bits */
1404		tp->advertising[0] &= ~(ADVERTISE_10HALF |
1405				     ADVERTISE_10FULL |
1406				     ADVERTISE_100HALF |
1407				     ADVERTISE_100FULL |
1408				     ADVERTISE_100BASE4);
1409		if (speed100) {
1410			if (full_duplex)
1411				tp->advertising[0] |= ADVERTISE_100FULL;
1412			else
1413				tp->advertising[0] |= ADVERTISE_100HALF;
1414		} else {
1415			if (full_duplex)
1416				tp->advertising[0] |= ADVERTISE_10FULL;
1417			else
1418				tp->advertising[0] |= ADVERTISE_10HALF;
1419		}
1420	}
1421	check_duplex(dev);
1422	return 0;
1423}
1424
1425static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1426{
1427	struct xircom_private *tp = netdev_priv(dev);
1428	strcpy(info->driver, DRV_NAME);
1429	strcpy(info->version, DRV_VERSION);
1430	strcpy(info->bus_info, pci_name(tp->pdev));
1431}
1432
1433static const struct ethtool_ops ops = {
1434	.get_settings = xircom_get_settings,
1435	.set_settings = xircom_set_settings,
1436	.get_drvinfo = xircom_get_drvinfo,
1437};
1438
1439/* Provide ioctl() calls to examine the MII xcvr state. */
1440static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1441{
1442	struct xircom_private *tp = netdev_priv(dev);
1443	u16 *data = (u16 *)&rq->ifr_ifru;
1444	int phy = tp->phys[0] & 0x1f;
1445	unsigned long flags;
1446
1447	switch(cmd) {
1448	/* Legacy mii-diag interface */
1449	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1450		if (tp->mii_cnt)
1451			data[0] = phy;
1452		else
1453			return -ENODEV;
1454		return 0;
1455	case SIOCGMIIREG:		/* Read MII PHY register. */
1456		save_flags(flags);
1457		cli();
1458		data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1459		restore_flags(flags);
1460		return 0;
1461	case SIOCSMIIREG:		/* Write MII PHY register. */
1462		if (!capable(CAP_NET_ADMIN))
1463			return -EPERM;
1464		save_flags(flags);
1465		cli();
1466		if (data[0] == tp->phys[0]) {
1467			u16 value = data[2];
1468			switch (data[1]) {
1469			case 0:
1470				if (value & (BMCR_RESET | BMCR_ANENABLE))
1471					/* Autonegotiation. */
1472					tp->autoneg = 1;
1473				else {
1474					tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1475					tp->autoneg = 0;
1476				}
1477				break;
1478			case 4:
1479				tp->advertising[0] = value;
1480				break;
1481			}
1482			check_duplex(dev);
1483		}
1484		mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1485		restore_flags(flags);
1486		return 0;
1487	default:
1488		return -EOPNOTSUPP;
1489	}
1490
1491	return -EOPNOTSUPP;
1492}
1493
1494/* Set or clear the multicast filter for this adaptor.
1495   Note that we only use exclusion around actually queueing the
1496   new frame, not around filling tp->setup_frame.  This is non-deterministic
1497   when re-entered but still correct. */
1498static void set_rx_mode(struct net_device *dev)
1499{
1500	struct xircom_private *tp = netdev_priv(dev);
1501	struct dev_mc_list *mclist;
1502	long ioaddr = dev->base_addr;
1503	int csr6 = inl(ioaddr + CSR6);
1504	u16 *eaddrs, *setup_frm;
1505	u32 tx_flags;
1506	int i;
1507
1508	tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1509	csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1510	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1511		tp->csr6 |= PromiscBit;
1512		csr6 |= PromiscBit;
1513		goto out;
1514	}
1515
1516	if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1517		/* Too many to filter well -- accept all multicasts. */
1518		tp->csr6 |= AllMultiBit;
1519		csr6 |= AllMultiBit;
1520		goto out;
1521	}
1522
1523	tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1524
1525	/* Note that only the low-address shortword of setup_frame is valid! */
1526	setup_frm = tp->setup_frame;
1527	mclist = dev->mc_list;
1528
1529	/* Fill the first entry with our physical address. */
1530	eaddrs = (u16 *)dev->dev_addr;
1531	*setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1532	*setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1533	*setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1534
1535	if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1536		u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1537		u32 hash, hash2;
1538
1539		tx_flags |= Tx1HashSetup;
1540		tp->csr6 |= HashFilterBit;
1541		csr6 |= HashFilterBit;
1542
1543		/* Fill the unused 3 entries with the broadcast address.
1544		   At least one entry *must* contain the broadcast address!!!*/
1545		for (i = 0; i < 3; i++) {
1546			*setup_frm = 0xffff; setup_frm += 2;
1547			*setup_frm = 0xffff; setup_frm += 2;
1548			*setup_frm = 0xffff; setup_frm += 2;
1549		}
1550
1551		/* Truly brain-damaged hash filter layout */
1552		for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1553			u32 *hptr;
1554			hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1555			if (hash < 384) {
1556				hash2 = hash + ((hash >> 4) << 4) +
1557					((hash >> 5) << 5);
1558			} else {
1559				hash -= 384;
1560				hash2 = 64 + hash + (hash >> 4) * 80;
1561			}
1562			hptr = &hash_table[hash2 & ~0x1f];
1563			*hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1564		}
1565	} else {
1566		/* We have <= 14 mcast addresses so we can use Xircom's
1567		   wonderful 16-address perfect filter. */
1568		for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1569			eaddrs = (u16 *)mclist->dmi_addr;
1570			*setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1571			*setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1572			*setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1573		}
1574		/* Fill the unused entries with the broadcast address.
1575		   At least one entry *must* contain the broadcast address!!!*/
1576		for (; i < 15; i++) {
1577			*setup_frm = 0xffff; setup_frm += 2;
1578			*setup_frm = 0xffff; setup_frm += 2;
1579			*setup_frm = 0xffff; setup_frm += 2;
1580		}
1581	}
1582
1583	/* Now add this frame to the Tx list. */
1584	if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1585		/* Same setup recently queued, we need not add it. */
1586	} else {
1587		unsigned long flags;
1588		unsigned int entry;
1589		int dummy = -1;
1590
1591		save_flags(flags); cli();
1592		entry = tp->cur_tx++ % TX_RING_SIZE;
1593
1594		if (entry != 0) {
1595			/* Avoid a chip errata by prefixing a dummy entry. */
1596			tp->tx_skbuff[entry] = NULL;
1597			tp->tx_ring[entry].length =
1598				(entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1599			tp->tx_ring[entry].buffer1 = 0;
1600			/* race with chip, set Tx0DescOwned later */
1601			dummy = entry;
1602			entry = tp->cur_tx++ % TX_RING_SIZE;
1603		}
1604
1605		tp->tx_skbuff[entry] = NULL;
1606		/* Put the setup frame on the Tx list. */
1607		if (entry == TX_RING_SIZE - 1)
1608			tx_flags |= Tx1RingWrap;		/* Wrap ring. */
1609		tp->tx_ring[entry].length = tx_flags;
1610		tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1611		tp->tx_ring[entry].status = Tx0DescOwned;
1612		if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1613			tp->tx_full = 1;
1614			netif_stop_queue (dev);
1615		}
1616		if (dummy >= 0)
1617			tp->tx_ring[dummy].status = Tx0DescOwned;
1618		restore_flags(flags);
1619		/* Trigger an immediate transmit demand. */
1620		outl(0, ioaddr + CSR1);
1621	}
1622
1623out:
1624	outl_CSR6(csr6, ioaddr);
1625}
1626
1627
1628static struct pci_device_id xircom_pci_table[] = {
1629  { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1630  {0},
1631};
1632MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1633
1634
1635#ifdef CONFIG_PM
1636static int xircom_suspend(struct pci_dev *pdev, pm_message_t state)
1637{
1638	struct net_device *dev = pci_get_drvdata(pdev);
1639	struct xircom_private *tp = netdev_priv(dev);
1640	printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1641	if (tp->open)
1642		xircom_down(dev);
1643
1644	pci_save_state(pdev);
1645	pci_disable_device(pdev);
1646	pci_set_power_state(pdev, 3);
1647
1648	return 0;
1649}
1650
1651
1652static int xircom_resume(struct pci_dev *pdev)
1653{
1654	struct net_device *dev = pci_get_drvdata(pdev);
1655	struct xircom_private *tp = netdev_priv(dev);
1656	printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1657
1658	pci_set_power_state(pdev,0);
1659	pci_enable_device(pdev);
1660	pci_restore_state(pdev);
1661
1662	/* Bring the chip out of sleep mode.
1663	   Caution: Snooze mode does not work with some boards! */
1664	if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1665		pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1666
1667	transceiver_voodoo(dev);
1668	if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1669		check_duplex(dev);
1670
1671	if (tp->open)
1672		xircom_up(dev);
1673	return 0;
1674}
1675#endif /* CONFIG_PM */
1676
1677
1678static void __devexit xircom_remove_one(struct pci_dev *pdev)
1679{
1680	struct net_device *dev = pci_get_drvdata(pdev);
1681
1682	printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1683	unregister_netdev(dev);
1684	pci_release_regions(pdev);
1685	free_netdev(dev);
1686	pci_set_drvdata(pdev, NULL);
1687}
1688
1689
1690static struct pci_driver xircom_driver = {
1691	.name		= DRV_NAME,
1692	.id_table	= xircom_pci_table,
1693	.probe		= xircom_init_one,
1694	.remove		= __devexit_p(xircom_remove_one),
1695#ifdef CONFIG_PM
1696	.suspend	= xircom_suspend,
1697	.resume		= xircom_resume
1698#endif /* CONFIG_PM */
1699};
1700
1701
1702static int __init xircom_init(void)
1703{
1704/* when a module, this is printed whether or not devices are found in probe */
1705#ifdef MODULE
1706	printk(version);
1707#endif
1708	return pci_register_driver(&xircom_driver);
1709}
1710
1711
1712static void __exit xircom_exit(void)
1713{
1714	pci_unregister_driver(&xircom_driver);
1715}
1716
1717module_init(xircom_init)
1718module_exit(xircom_exit)
1719
1720/*
1721 * Local variables:
1722 *  c-indent-level: 4
1723 *  c-basic-offset: 4
1724 *  tab-width: 4
1725 * End:
1726 */
1727