• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/
1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2/*
3	Written 1997-2001 by Donald Becker.
4
5	This software may be used and distributed according to the terms of
6	the GNU General Public License (GPL), incorporated herein by reference.
7	Drivers based on or derived from this code fall under the GPL and must
8	retain the authorship, copyright and license notice.  This file is not
9	a complete program and may only be used when the entire operating
10	system is licensed under the GPL.
11
12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13	It also supports the Symbios Logic version of the same chip core.
14
15	The author may be reached as becker@scyld.com, or C/O
16	Scyld Computing Corporation
17	410 Severn Ave., Suite 210
18	Annapolis MD 21403
19
20	Support and updates available at
21	http://www.scyld.com/network/yellowfin.html
22	[link no longer provides useful info -jgarzik]
23
24*/
25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#define DRV_NAME	"yellowfin"
29#define DRV_VERSION	"2.1"
30#define DRV_RELDATE	"Sep 11, 2006"
31
32/* The user-configurable values.
33   These may be modified when a driver module is loaded.*/
34
35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37static int max_interrupt_work = 20;
38static int mtu;
39#ifdef YF_PROTOTYPE			    /* Support for prototype hardware errata. */
40/* System-wide count of bogus-rx frames. */
41static int bogus_rx;
42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
44#elif defined(YF_NEW)					  /* A future perfect board :->.  */
45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
46static int fifo_cfg = 0x0028;
47#else
48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
50#endif
51
52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
53   Setting to > 1514 effectively disables this feature. */
54static int rx_copybreak;
55
56/* Used to pass the media type, etc.
57   No media types are currently defined.  These exist for driver
58   interoperability.
59*/
60#define MAX_UNITS 8				/* More are supported, limit only on options */
61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
63
64static int gx_fix;
65
66/* Operational parameters that are set at compile time. */
67
68/* Keep the ring sizes a power of two for efficiency.
69   Making the Tx ring too long decreases the effectiveness of channel
70   bonding and packet priority.
71   There are no ill effects from too-large receive rings. */
72#define TX_RING_SIZE	16
73#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
74#define RX_RING_SIZE	64
75#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
76#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
77#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
78
79/* Operational parameters that usually are not changed. */
80/* Time in jiffies before concluding the transmitter is hung. */
81#define TX_TIMEOUT  (2*HZ)
82#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
83
84#define yellowfin_debug debug
85
86#include <linux/module.h>
87#include <linux/kernel.h>
88#include <linux/string.h>
89#include <linux/timer.h>
90#include <linux/errno.h>
91#include <linux/ioport.h>
92#include <linux/interrupt.h>
93#include <linux/pci.h>
94#include <linux/init.h>
95#include <linux/mii.h>
96#include <linux/netdevice.h>
97#include <linux/etherdevice.h>
98#include <linux/skbuff.h>
99#include <linux/ethtool.h>
100#include <linux/crc32.h>
101#include <linux/bitops.h>
102#include <asm/uaccess.h>
103#include <asm/processor.h>		/* Processor type for cache alignment. */
104#include <asm/unaligned.h>
105#include <asm/io.h>
106
107/* These identify the driver base version and may not be removed. */
108static const char version[] __devinitconst =
109  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
110  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
111
112MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
113MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
114MODULE_LICENSE("GPL");
115
116module_param(max_interrupt_work, int, 0);
117module_param(mtu, int, 0);
118module_param(debug, int, 0);
119module_param(rx_copybreak, int, 0);
120module_param_array(options, int, NULL, 0);
121module_param_array(full_duplex, int, NULL, 0);
122module_param(gx_fix, int, 0);
123MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
124MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
125MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
126MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
127MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
128MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
129MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
130
131/*
132				Theory of Operation
133
134I. Board Compatibility
135
136This device driver is designed for the Packet Engines "Yellowfin" Gigabit
137Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
138Symbios 53C885E dual function chip.
139
140II. Board-specific settings
141
142PCI bus devices are configured by the system at boot time, so no jumpers
143need to be set on the board.  The system BIOS preferably should assign the
144PCI INTA signal to an otherwise unused system IRQ line.
145Note: Kernel versions earlier than 1.3.73 do not support shared PCI
146interrupt lines.
147
148III. Driver operation
149
150IIIa. Ring buffers
151
152The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
153This is a descriptor list scheme similar to that used by the EEPro100 and
154Tulip.  This driver uses two statically allocated fixed-size descriptor lists
155formed into rings by a branch from the final descriptor to the beginning of
156the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
157
158The driver allocates full frame size skbuffs for the Rx ring buffers at
159open() time and passes the skb->data field to the Yellowfin as receive data
160buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
161a fresh skbuff is allocated and the frame is copied to the new skbuff.
162When the incoming frame is larger, the skbuff is passed directly up the
163protocol stack and replaced by a newly allocated skbuff.
164
165The RX_COPYBREAK value is chosen to trade-off the memory wasted by
166using a full-sized skbuff for small frames vs. the copying costs of larger
167frames.  For small frames the copying cost is negligible (esp. considering
168that we are pre-loading the cache with immediately useful header
169information).  For large frames the copying cost is non-trivial, and the
170larger copy might flush the cache of useful data.
171
172IIIC. Synchronization
173
174The driver runs as two independent, single-threaded flows of control.  One
175is the send-packet routine, which enforces single-threaded use by the
176dev->tbusy flag.  The other thread is the interrupt handler, which is single
177threaded by the hardware and other software.
178
179The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182the 'yp->tx_full' flag.
183
184The interrupt handler has exclusive control over the Rx ring and records stats
185from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
186empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
187clears both the tx_full and tbusy flags.
188
189IV. Notes
190
191Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
192Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
193and an AlphaStation to verifty the Alpha port!
194
195IVb. References
196
197Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
198Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
199   Data Manual v3.0
200http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
201http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
202
203IVc. Errata
204
205See Packet Engines confidential appendix (prototype chips only).
206*/
207
208
209
210enum capability_flags {
211	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
212	HasMACAddrBug=32, /* Only on early revs.  */
213	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
214};
215
216/* The PCI I/O space extent. */
217enum {
218	YELLOWFIN_SIZE	= 0x100,
219};
220
221struct pci_id_info {
222        const char *name;
223        struct match_info {
224                int     pci, pci_mask, subsystem, subsystem_mask;
225                int revision, revision_mask;                            /* Only 8 bits. */
226        } id;
227        int drv_flags;                          /* Driver use, intended as capability flags. */
228};
229
230static const struct pci_id_info pci_id_tbl[] = {
231	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
232	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
233	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
234	  HasMII | DontUseEeprom },
235	{ }
236};
237
238static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
239	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
240	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
241	{ }
242};
243MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
244
245
246/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
247enum yellowfin_offsets {
248	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
249	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
250	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
251	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
252	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
253	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
254	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
255	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
256	MII_Status=0xAE,
257	RxDepth=0xB8, FlowCtrl=0xBC,
258	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
259	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
260	EEFeature=0xF5,
261};
262
263/* The Yellowfin Rx and Tx buffer descriptors.
264   Elements are written as 32 bit for endian portability. */
265struct yellowfin_desc {
266	__le32 dbdma_cmd;
267	__le32 addr;
268	__le32 branch_addr;
269	__le32 result_status;
270};
271
272struct tx_status_words {
273#ifdef __BIG_ENDIAN
274	u16 tx_errs;
275	u16 tx_cnt;
276	u16 paused;
277	u16 total_tx_cnt;
278#else  /* Little endian chips. */
279	u16 tx_cnt;
280	u16 tx_errs;
281	u16 total_tx_cnt;
282	u16 paused;
283#endif /* __BIG_ENDIAN */
284};
285
286/* Bits in yellowfin_desc.cmd */
287enum desc_cmd_bits {
288	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
289	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
290	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
291	BRANCH_IFTRUE=0x040000,
292};
293
294/* Bits in yellowfin_desc.status */
295enum desc_status_bits { RX_EOP=0x0040, };
296
297/* Bits in the interrupt status/mask registers. */
298enum intr_status_bits {
299	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
300	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
301	IntrEarlyRx=0x100, IntrWakeup=0x200, };
302
303#define PRIV_ALIGN	31 	/* Required alignment mask */
304#define MII_CNT		4
305struct yellowfin_private {
306	/* Descriptor rings first for alignment.
307	   Tx requires a second descriptor for status. */
308	struct yellowfin_desc *rx_ring;
309	struct yellowfin_desc *tx_ring;
310	struct sk_buff* rx_skbuff[RX_RING_SIZE];
311	struct sk_buff* tx_skbuff[TX_RING_SIZE];
312	dma_addr_t rx_ring_dma;
313	dma_addr_t tx_ring_dma;
314
315	struct tx_status_words *tx_status;
316	dma_addr_t tx_status_dma;
317
318	struct timer_list timer;	/* Media selection timer. */
319	/* Frequently used and paired value: keep adjacent for cache effect. */
320	int chip_id, drv_flags;
321	struct pci_dev *pci_dev;
322	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
323	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
324	struct tx_status_words *tx_tail_desc;
325	unsigned int cur_tx, dirty_tx;
326	int tx_threshold;
327	unsigned int tx_full:1;				/* The Tx queue is full. */
328	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
329	unsigned int duplex_lock:1;
330	unsigned int medialock:1;			/* Do not sense media. */
331	unsigned int default_port:4;		/* Last dev->if_port value. */
332	/* MII transceiver section. */
333	int mii_cnt;						/* MII device addresses. */
334	u16 advertising;					/* NWay media advertisement */
335	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
336	spinlock_t lock;
337	void __iomem *base;
338};
339
340static int read_eeprom(void __iomem *ioaddr, int location);
341static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
342static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
343static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
344static int yellowfin_open(struct net_device *dev);
345static void yellowfin_timer(unsigned long data);
346static void yellowfin_tx_timeout(struct net_device *dev);
347static int yellowfin_init_ring(struct net_device *dev);
348static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
349					struct net_device *dev);
350static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
351static int yellowfin_rx(struct net_device *dev);
352static void yellowfin_error(struct net_device *dev, int intr_status);
353static int yellowfin_close(struct net_device *dev);
354static void set_rx_mode(struct net_device *dev);
355static const struct ethtool_ops ethtool_ops;
356
357static const struct net_device_ops netdev_ops = {
358	.ndo_open 		= yellowfin_open,
359	.ndo_stop 		= yellowfin_close,
360	.ndo_start_xmit 	= yellowfin_start_xmit,
361	.ndo_set_multicast_list = set_rx_mode,
362	.ndo_change_mtu		= eth_change_mtu,
363	.ndo_validate_addr	= eth_validate_addr,
364	.ndo_set_mac_address 	= eth_mac_addr,
365	.ndo_do_ioctl 		= netdev_ioctl,
366	.ndo_tx_timeout 	= yellowfin_tx_timeout,
367};
368
369static int __devinit yellowfin_init_one(struct pci_dev *pdev,
370					const struct pci_device_id *ent)
371{
372	struct net_device *dev;
373	struct yellowfin_private *np;
374	int irq;
375	int chip_idx = ent->driver_data;
376	static int find_cnt;
377	void __iomem *ioaddr;
378	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
379	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
380        void *ring_space;
381        dma_addr_t ring_dma;
382#ifdef USE_IO_OPS
383	int bar = 0;
384#else
385	int bar = 1;
386#endif
387
388/* when built into the kernel, we only print version if device is found */
389#ifndef MODULE
390	static int printed_version;
391	if (!printed_version++)
392		printk(version);
393#endif
394
395	i = pci_enable_device(pdev);
396	if (i) return i;
397
398	dev = alloc_etherdev(sizeof(*np));
399	if (!dev) {
400		pr_err("cannot allocate ethernet device\n");
401		return -ENOMEM;
402	}
403	SET_NETDEV_DEV(dev, &pdev->dev);
404
405	np = netdev_priv(dev);
406
407	if (pci_request_regions(pdev, DRV_NAME))
408		goto err_out_free_netdev;
409
410	pci_set_master (pdev);
411
412	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
413	if (!ioaddr)
414		goto err_out_free_res;
415
416	irq = pdev->irq;
417
418	if (drv_flags & DontUseEeprom)
419		for (i = 0; i < 6; i++)
420			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
421	else {
422		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
423		for (i = 0; i < 6; i++)
424			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
425	}
426
427	/* Reset the chip. */
428	iowrite32(0x80000000, ioaddr + DMACtrl);
429
430	dev->base_addr = (unsigned long)ioaddr;
431	dev->irq = irq;
432
433	pci_set_drvdata(pdev, dev);
434	spin_lock_init(&np->lock);
435
436	np->pci_dev = pdev;
437	np->chip_id = chip_idx;
438	np->drv_flags = drv_flags;
439	np->base = ioaddr;
440
441	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
442	if (!ring_space)
443		goto err_out_cleardev;
444	np->tx_ring = (struct yellowfin_desc *)ring_space;
445	np->tx_ring_dma = ring_dma;
446
447	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
448	if (!ring_space)
449		goto err_out_unmap_tx;
450	np->rx_ring = (struct yellowfin_desc *)ring_space;
451	np->rx_ring_dma = ring_dma;
452
453	ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
454	if (!ring_space)
455		goto err_out_unmap_rx;
456	np->tx_status = (struct tx_status_words *)ring_space;
457	np->tx_status_dma = ring_dma;
458
459	if (dev->mem_start)
460		option = dev->mem_start;
461
462	/* The lower four bits are the media type. */
463	if (option > 0) {
464		if (option & 0x200)
465			np->full_duplex = 1;
466		np->default_port = option & 15;
467		if (np->default_port)
468			np->medialock = 1;
469	}
470	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
471		np->full_duplex = 1;
472
473	if (np->full_duplex)
474		np->duplex_lock = 1;
475
476	/* The Yellowfin-specific entries in the device structure. */
477	dev->netdev_ops = &netdev_ops;
478	SET_ETHTOOL_OPS(dev, &ethtool_ops);
479	dev->watchdog_timeo = TX_TIMEOUT;
480
481	if (mtu)
482		dev->mtu = mtu;
483
484	i = register_netdev(dev);
485	if (i)
486		goto err_out_unmap_status;
487
488	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
489		    pci_id_tbl[chip_idx].name,
490		    ioread32(ioaddr + ChipRev), ioaddr,
491		    dev->dev_addr, irq);
492
493	if (np->drv_flags & HasMII) {
494		int phy, phy_idx = 0;
495		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
496			int mii_status = mdio_read(ioaddr, phy, 1);
497			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
498				np->phys[phy_idx++] = phy;
499				np->advertising = mdio_read(ioaddr, phy, 4);
500				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
501					    phy, mii_status, np->advertising);
502			}
503		}
504		np->mii_cnt = phy_idx;
505	}
506
507	find_cnt++;
508
509	return 0;
510
511err_out_unmap_status:
512        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
513		np->tx_status_dma);
514err_out_unmap_rx:
515        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
516err_out_unmap_tx:
517        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
518err_out_cleardev:
519	pci_set_drvdata(pdev, NULL);
520	pci_iounmap(pdev, ioaddr);
521err_out_free_res:
522	pci_release_regions(pdev);
523err_out_free_netdev:
524	free_netdev (dev);
525	return -ENODEV;
526}
527
528static int __devinit read_eeprom(void __iomem *ioaddr, int location)
529{
530	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
531
532	iowrite8(location, ioaddr + EEAddr);
533	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
534	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
535		;
536	return ioread8(ioaddr + EERead);
537}
538
539/* MII Managemen Data I/O accesses.
540   These routines assume the MDIO controller is idle, and do not exit until
541   the command is finished. */
542
543static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
544{
545	int i;
546
547	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
548	iowrite16(1, ioaddr + MII_Cmd);
549	for (i = 10000; i >= 0; i--)
550		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
551			break;
552	return ioread16(ioaddr + MII_Rd_Data);
553}
554
555static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
556{
557	int i;
558
559	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
560	iowrite16(value, ioaddr + MII_Wr_Data);
561
562	/* Wait for the command to finish. */
563	for (i = 10000; i >= 0; i--)
564		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
565			break;
566}
567
568
569static int yellowfin_open(struct net_device *dev)
570{
571	struct yellowfin_private *yp = netdev_priv(dev);
572	void __iomem *ioaddr = yp->base;
573	int i, ret;
574
575	/* Reset the chip. */
576	iowrite32(0x80000000, ioaddr + DMACtrl);
577
578	ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
579	if (ret)
580		return ret;
581
582	if (yellowfin_debug > 1)
583		netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
584			      __func__, dev->irq);
585
586	ret = yellowfin_init_ring(dev);
587	if (ret) {
588		free_irq(dev->irq, dev);
589		return ret;
590	}
591
592	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
593	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
594
595	for (i = 0; i < 6; i++)
596		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
597
598	/* Set up various condition 'select' registers.
599	   There are no options here. */
600	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
601	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
602	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
603	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
604	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
605	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
606
607	/* Initialize other registers: with so many this eventually this will
608	   converted to an offset/value list. */
609	iowrite32(dma_ctrl, ioaddr + DMACtrl);
610	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
611	/* Enable automatic generation of flow control frames, period 0xffff. */
612	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
613
614	yp->tx_threshold = 32;
615	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
616
617	if (dev->if_port == 0)
618		dev->if_port = yp->default_port;
619
620	netif_start_queue(dev);
621
622	/* Setting the Rx mode will start the Rx process. */
623	if (yp->drv_flags & IsGigabit) {
624		/* We are always in full-duplex mode with gigabit! */
625		yp->full_duplex = 1;
626		iowrite16(0x01CF, ioaddr + Cnfg);
627	} else {
628		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
629		iowrite16(0x1018, ioaddr + FrameGap1);
630		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
631	}
632	set_rx_mode(dev);
633
634	/* Enable interrupts by setting the interrupt mask. */
635	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
636	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
637	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
638	iowrite32(0x80008000, ioaddr + TxCtrl);
639
640	if (yellowfin_debug > 2) {
641		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
642	}
643
644	/* Set the timer to check for link beat. */
645	init_timer(&yp->timer);
646	yp->timer.expires = jiffies + 3*HZ;
647	yp->timer.data = (unsigned long)dev;
648	yp->timer.function = &yellowfin_timer;				/* timer handler */
649	add_timer(&yp->timer);
650
651	return 0;
652}
653
654static void yellowfin_timer(unsigned long data)
655{
656	struct net_device *dev = (struct net_device *)data;
657	struct yellowfin_private *yp = netdev_priv(dev);
658	void __iomem *ioaddr = yp->base;
659	int next_tick = 60*HZ;
660
661	if (yellowfin_debug > 3) {
662		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
663			      ioread16(ioaddr + IntrStatus));
664	}
665
666	if (yp->mii_cnt) {
667		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
668		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
669		int negotiated = lpa & yp->advertising;
670		if (yellowfin_debug > 1)
671			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
672				      yp->phys[0], bmsr, lpa);
673
674		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
675
676		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
677
678		if (bmsr & BMSR_LSTATUS)
679			next_tick = 60*HZ;
680		else
681			next_tick = 3*HZ;
682	}
683
684	yp->timer.expires = jiffies + next_tick;
685	add_timer(&yp->timer);
686}
687
688static void yellowfin_tx_timeout(struct net_device *dev)
689{
690	struct yellowfin_private *yp = netdev_priv(dev);
691	void __iomem *ioaddr = yp->base;
692
693	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
694		    yp->cur_tx, yp->dirty_tx,
695		    ioread32(ioaddr + TxStatus),
696		    ioread32(ioaddr + RxStatus));
697
698	/* Note: these should be KERN_DEBUG. */
699	if (yellowfin_debug) {
700		int i;
701		pr_warning("  Rx ring %p: ", yp->rx_ring);
702		for (i = 0; i < RX_RING_SIZE; i++)
703			pr_cont(" %08x", yp->rx_ring[i].result_status);
704		pr_cont("\n");
705		pr_warning("  Tx ring %p: ", yp->tx_ring);
706		for (i = 0; i < TX_RING_SIZE; i++)
707			pr_cont(" %04x /%08x",
708			       yp->tx_status[i].tx_errs,
709			       yp->tx_ring[i].result_status);
710		pr_cont("\n");
711	}
712
713	/* If the hardware is found to hang regularly, we will update the code
714	   to reinitialize the chip here. */
715	dev->if_port = 0;
716
717	/* Wake the potentially-idle transmit channel. */
718	iowrite32(0x10001000, yp->base + TxCtrl);
719	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
720		netif_wake_queue (dev);		/* Typical path */
721
722	dev->trans_start = jiffies; /* prevent tx timeout */
723	dev->stats.tx_errors++;
724}
725
726/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
727static int yellowfin_init_ring(struct net_device *dev)
728{
729	struct yellowfin_private *yp = netdev_priv(dev);
730	int i, j;
731
732	yp->tx_full = 0;
733	yp->cur_rx = yp->cur_tx = 0;
734	yp->dirty_tx = 0;
735
736	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
737
738	for (i = 0; i < RX_RING_SIZE; i++) {
739		yp->rx_ring[i].dbdma_cmd =
740			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
741		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
742			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
743	}
744
745	for (i = 0; i < RX_RING_SIZE; i++) {
746		struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
747		yp->rx_skbuff[i] = skb;
748		if (skb == NULL)
749			break;
750		skb->dev = dev;		/* Mark as being used by this device. */
751		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
752		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
753			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
754	}
755	if (i != RX_RING_SIZE) {
756		for (j = 0; j < i; j++)
757			dev_kfree_skb(yp->rx_skbuff[j]);
758		return -ENOMEM;
759	}
760	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
761	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
762
763#define NO_TXSTATS
764#ifdef NO_TXSTATS
765	/* In this mode the Tx ring needs only a single descriptor. */
766	for (i = 0; i < TX_RING_SIZE; i++) {
767		yp->tx_skbuff[i] = NULL;
768		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
769		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
770			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
771	}
772	/* Wrap ring */
773	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
774#else
775{
776	/* Tx ring needs a pair of descriptors, the second for the status. */
777	for (i = 0; i < TX_RING_SIZE; i++) {
778		j = 2*i;
779		yp->tx_skbuff[i] = 0;
780		/* Branch on Tx error. */
781		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
782		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
783			(j+1)*sizeof(struct yellowfin_desc));
784		j++;
785		if (yp->flags & FullTxStatus) {
786			yp->tx_ring[j].dbdma_cmd =
787				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
788			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
789			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
790				i*sizeof(struct tx_status_words));
791		} else {
792			/* Symbios chips write only tx_errs word. */
793			yp->tx_ring[j].dbdma_cmd =
794				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
795			yp->tx_ring[j].request_cnt = 2;
796			/* Om pade ummmmm... */
797			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
798				i*sizeof(struct tx_status_words) +
799				&(yp->tx_status[0].tx_errs) -
800				&(yp->tx_status[0]));
801		}
802		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
803			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
804	}
805	/* Wrap ring */
806	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
807}
808#endif
809	yp->tx_tail_desc = &yp->tx_status[0];
810	return 0;
811}
812
813static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
814					struct net_device *dev)
815{
816	struct yellowfin_private *yp = netdev_priv(dev);
817	unsigned entry;
818	int len = skb->len;
819
820	netif_stop_queue (dev);
821
822	/* Note: Ordering is important here, set the field with the
823	   "ownership" bit last, and only then increment cur_tx. */
824
825	/* Calculate the next Tx descriptor entry. */
826	entry = yp->cur_tx % TX_RING_SIZE;
827
828	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
829		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
830		/* Fix GX chipset errata. */
831		if (cacheline_end > 24  || cacheline_end == 0) {
832			len = skb->len + 32 - cacheline_end + 1;
833			if (skb_padto(skb, len)) {
834				yp->tx_skbuff[entry] = NULL;
835				netif_wake_queue(dev);
836				return NETDEV_TX_OK;
837			}
838		}
839	}
840	yp->tx_skbuff[entry] = skb;
841
842#ifdef NO_TXSTATS
843	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
844		skb->data, len, PCI_DMA_TODEVICE));
845	yp->tx_ring[entry].result_status = 0;
846	if (entry >= TX_RING_SIZE-1) {
847		/* New stop command. */
848		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
849		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
850			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
851	} else {
852		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
853		yp->tx_ring[entry].dbdma_cmd =
854			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
855	}
856	yp->cur_tx++;
857#else
858	yp->tx_ring[entry<<1].request_cnt = len;
859	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
860		skb->data, len, PCI_DMA_TODEVICE));
861	/* The input_last (status-write) command is constant, but we must
862	   rewrite the subsequent 'stop' command. */
863
864	yp->cur_tx++;
865	{
866		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
867		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
868	}
869	/* Final step -- overwrite the old 'stop' command. */
870
871	yp->tx_ring[entry<<1].dbdma_cmd =
872		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
873					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
874#endif
875
876	/* Non-x86 Todo: explicitly flush cache lines here. */
877
878	/* Wake the potentially-idle transmit channel. */
879	iowrite32(0x10001000, yp->base + TxCtrl);
880
881	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
882		netif_start_queue (dev);		/* Typical path */
883	else
884		yp->tx_full = 1;
885
886	if (yellowfin_debug > 4) {
887		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
888			      yp->cur_tx, entry);
889	}
890	return NETDEV_TX_OK;
891}
892
893/* The interrupt handler does all of the Rx thread work and cleans up
894   after the Tx thread. */
895static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
896{
897	struct net_device *dev = dev_instance;
898	struct yellowfin_private *yp;
899	void __iomem *ioaddr;
900	int boguscnt = max_interrupt_work;
901	unsigned int handled = 0;
902
903	yp = netdev_priv(dev);
904	ioaddr = yp->base;
905
906	spin_lock (&yp->lock);
907
908	do {
909		u16 intr_status = ioread16(ioaddr + IntrClear);
910
911		if (yellowfin_debug > 4)
912			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
913				      intr_status);
914
915		if (intr_status == 0)
916			break;
917		handled = 1;
918
919		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
920			yellowfin_rx(dev);
921			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
922		}
923
924#ifdef NO_TXSTATS
925		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
926			int entry = yp->dirty_tx % TX_RING_SIZE;
927			struct sk_buff *skb;
928
929			if (yp->tx_ring[entry].result_status == 0)
930				break;
931			skb = yp->tx_skbuff[entry];
932			dev->stats.tx_packets++;
933			dev->stats.tx_bytes += skb->len;
934			/* Free the original skb. */
935			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
936				skb->len, PCI_DMA_TODEVICE);
937			dev_kfree_skb_irq(skb);
938			yp->tx_skbuff[entry] = NULL;
939		}
940		if (yp->tx_full &&
941		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
942			/* The ring is no longer full, clear tbusy. */
943			yp->tx_full = 0;
944			netif_wake_queue(dev);
945		}
946#else
947		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
948			unsigned dirty_tx = yp->dirty_tx;
949
950			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
951				 dirty_tx++) {
952				/* Todo: optimize this. */
953				int entry = dirty_tx % TX_RING_SIZE;
954				u16 tx_errs = yp->tx_status[entry].tx_errs;
955				struct sk_buff *skb;
956
957#ifndef final_version
958				if (yellowfin_debug > 5)
959					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
960						      entry,
961						      yp->tx_status[entry].tx_cnt,
962						      yp->tx_status[entry].tx_errs,
963						      yp->tx_status[entry].total_tx_cnt,
964						      yp->tx_status[entry].paused);
965#endif
966				if (tx_errs == 0)
967					break;	/* It still hasn't been Txed */
968				skb = yp->tx_skbuff[entry];
969				if (tx_errs & 0xF810) {
970					/* There was an major error, log it. */
971#ifndef final_version
972					if (yellowfin_debug > 1)
973						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
974							      tx_errs);
975#endif
976					dev->stats.tx_errors++;
977					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
978					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
979					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
980					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
981				} else {
982#ifndef final_version
983					if (yellowfin_debug > 4)
984						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
985							      tx_errs);
986#endif
987					dev->stats.tx_bytes += skb->len;
988					dev->stats.collisions += tx_errs & 15;
989					dev->stats.tx_packets++;
990				}
991				/* Free the original skb. */
992				pci_unmap_single(yp->pci_dev,
993					yp->tx_ring[entry<<1].addr, skb->len,
994					PCI_DMA_TODEVICE);
995				dev_kfree_skb_irq(skb);
996				yp->tx_skbuff[entry] = 0;
997				/* Mark status as empty. */
998				yp->tx_status[entry].tx_errs = 0;
999			}
1000
1001#ifndef final_version
1002			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1003				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1004					   dirty_tx, yp->cur_tx, yp->tx_full);
1005				dirty_tx += TX_RING_SIZE;
1006			}
1007#endif
1008
1009			if (yp->tx_full &&
1010			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1011				/* The ring is no longer full, clear tbusy. */
1012				yp->tx_full = 0;
1013				netif_wake_queue(dev);
1014			}
1015
1016			yp->dirty_tx = dirty_tx;
1017			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1018		}
1019#endif
1020
1021		/* Log errors and other uncommon events. */
1022		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1023			yellowfin_error(dev, intr_status);
1024
1025		if (--boguscnt < 0) {
1026			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1027				    intr_status);
1028			break;
1029		}
1030	} while (1);
1031
1032	if (yellowfin_debug > 3)
1033		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1034			      ioread16(ioaddr + IntrStatus));
1035
1036	spin_unlock (&yp->lock);
1037	return IRQ_RETVAL(handled);
1038}
1039
1040/* This routine is logically part of the interrupt handler, but separated
1041   for clarity and better register allocation. */
1042static int yellowfin_rx(struct net_device *dev)
1043{
1044	struct yellowfin_private *yp = netdev_priv(dev);
1045	int entry = yp->cur_rx % RX_RING_SIZE;
1046	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1047
1048	if (yellowfin_debug > 4) {
1049		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1050			   entry, yp->rx_ring[entry].result_status);
1051		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1052			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1053			   yp->rx_ring[entry].result_status);
1054	}
1055
1056	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1057	while (1) {
1058		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1059		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1060		s16 frame_status;
1061		u16 desc_status;
1062		int data_size;
1063		u8 *buf_addr;
1064
1065		if(!desc->result_status)
1066			break;
1067		pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1068			yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1069		desc_status = le32_to_cpu(desc->result_status) >> 16;
1070		buf_addr = rx_skb->data;
1071		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1072			le32_to_cpu(desc->result_status)) & 0xffff;
1073		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1074		if (yellowfin_debug > 4)
1075			printk(KERN_DEBUG "  %s() status was %04x\n",
1076			       __func__, frame_status);
1077		if (--boguscnt < 0)
1078			break;
1079		if ( ! (desc_status & RX_EOP)) {
1080			if (data_size != 0)
1081				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1082					    desc_status, data_size);
1083			dev->stats.rx_length_errors++;
1084		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1085			/* There was a error. */
1086			if (yellowfin_debug > 3)
1087				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1088				       __func__, frame_status);
1089			dev->stats.rx_errors++;
1090			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1091			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1092			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1093			if (frame_status < 0) dev->stats.rx_dropped++;
1094		} else if ( !(yp->drv_flags & IsGigabit)  &&
1095				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1096			u8 status1 = buf_addr[data_size-2];
1097			u8 status2 = buf_addr[data_size-1];
1098			dev->stats.rx_errors++;
1099			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1100			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1101			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1102			if (status2 & 0x80) dev->stats.rx_dropped++;
1103#ifdef YF_PROTOTYPE		    /* Support for prototype hardware errata. */
1104		} else if ((yp->flags & HasMACAddrBug)  &&
1105			memcmp(le32_to_cpu(yp->rx_ring_dma +
1106				entry*sizeof(struct yellowfin_desc)),
1107				dev->dev_addr, 6) != 0 &&
1108			memcmp(le32_to_cpu(yp->rx_ring_dma +
1109				entry*sizeof(struct yellowfin_desc)),
1110				"\377\377\377\377\377\377", 6) != 0) {
1111			if (bogus_rx++ == 0)
1112				netdev_warn(dev, "Bad frame to %pM\n",
1113					    buf_addr);
1114#endif
1115		} else {
1116			struct sk_buff *skb;
1117			int pkt_len = data_size -
1118				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1119			/* To verify: Yellowfin Length should omit the CRC! */
1120
1121#ifndef final_version
1122			if (yellowfin_debug > 4)
1123				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1124				       __func__, pkt_len, data_size, boguscnt);
1125#endif
1126			/* Check if the packet is long enough to just pass up the skbuff
1127			   without copying to a properly sized skbuff. */
1128			if (pkt_len > rx_copybreak) {
1129				skb_put(skb = rx_skb, pkt_len);
1130				pci_unmap_single(yp->pci_dev,
1131					le32_to_cpu(yp->rx_ring[entry].addr),
1132					yp->rx_buf_sz,
1133					PCI_DMA_FROMDEVICE);
1134				yp->rx_skbuff[entry] = NULL;
1135			} else {
1136				skb = dev_alloc_skb(pkt_len + 2);
1137				if (skb == NULL)
1138					break;
1139				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1140				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1141				skb_put(skb, pkt_len);
1142				pci_dma_sync_single_for_device(yp->pci_dev,
1143								le32_to_cpu(desc->addr),
1144								yp->rx_buf_sz,
1145								PCI_DMA_FROMDEVICE);
1146			}
1147			skb->protocol = eth_type_trans(skb, dev);
1148			netif_rx(skb);
1149			dev->stats.rx_packets++;
1150			dev->stats.rx_bytes += pkt_len;
1151		}
1152		entry = (++yp->cur_rx) % RX_RING_SIZE;
1153	}
1154
1155	/* Refill the Rx ring buffers. */
1156	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1157		entry = yp->dirty_rx % RX_RING_SIZE;
1158		if (yp->rx_skbuff[entry] == NULL) {
1159			struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1160			if (skb == NULL)
1161				break;				/* Better luck next round. */
1162			yp->rx_skbuff[entry] = skb;
1163			skb->dev = dev;	/* Mark as being used by this device. */
1164			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1165			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1166				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1167		}
1168		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1169		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1170		if (entry != 0)
1171			yp->rx_ring[entry - 1].dbdma_cmd =
1172				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1173		else
1174			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1175				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1176							| yp->rx_buf_sz);
1177	}
1178
1179	return 0;
1180}
1181
1182static void yellowfin_error(struct net_device *dev, int intr_status)
1183{
1184	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1185	/* Hmmmmm, it's not clear what to do here. */
1186	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1187		dev->stats.tx_errors++;
1188	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1189		dev->stats.rx_errors++;
1190}
1191
1192static int yellowfin_close(struct net_device *dev)
1193{
1194	struct yellowfin_private *yp = netdev_priv(dev);
1195	void __iomem *ioaddr = yp->base;
1196	int i;
1197
1198	netif_stop_queue (dev);
1199
1200	if (yellowfin_debug > 1) {
1201		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1202			      ioread16(ioaddr + TxStatus),
1203			      ioread16(ioaddr + RxStatus),
1204			      ioread16(ioaddr + IntrStatus));
1205		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1206			      yp->cur_tx, yp->dirty_tx,
1207			      yp->cur_rx, yp->dirty_rx);
1208	}
1209
1210	/* Disable interrupts by clearing the interrupt mask. */
1211	iowrite16(0x0000, ioaddr + IntrEnb);
1212
1213	/* Stop the chip's Tx and Rx processes. */
1214	iowrite32(0x80000000, ioaddr + RxCtrl);
1215	iowrite32(0x80000000, ioaddr + TxCtrl);
1216
1217	del_timer(&yp->timer);
1218
1219#if defined(__i386__)
1220	if (yellowfin_debug > 2) {
1221		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1222				(unsigned long long)yp->tx_ring_dma);
1223		for (i = 0; i < TX_RING_SIZE*2; i++)
1224			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1225				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1226				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1227				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1228		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1229		for (i = 0; i < TX_RING_SIZE; i++)
1230			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1231				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1232				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1233
1234		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1235				(unsigned long long)yp->rx_ring_dma);
1236		for (i = 0; i < RX_RING_SIZE; i++) {
1237			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1238				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1239				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1240				   yp->rx_ring[i].result_status);
1241			if (yellowfin_debug > 6) {
1242				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1243					int j;
1244
1245					printk(KERN_DEBUG);
1246					for (j = 0; j < 0x50; j++)
1247						pr_cont(" %04x",
1248							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1249					pr_cont("\n");
1250				}
1251			}
1252		}
1253	}
1254#endif /* __i386__ debugging only */
1255
1256	free_irq(dev->irq, dev);
1257
1258	/* Free all the skbuffs in the Rx queue. */
1259	for (i = 0; i < RX_RING_SIZE; i++) {
1260		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1261		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1262		if (yp->rx_skbuff[i]) {
1263			dev_kfree_skb(yp->rx_skbuff[i]);
1264		}
1265		yp->rx_skbuff[i] = NULL;
1266	}
1267	for (i = 0; i < TX_RING_SIZE; i++) {
1268		if (yp->tx_skbuff[i])
1269			dev_kfree_skb(yp->tx_skbuff[i]);
1270		yp->tx_skbuff[i] = NULL;
1271	}
1272
1273#ifdef YF_PROTOTYPE			    /* Support for prototype hardware errata. */
1274	if (yellowfin_debug > 0) {
1275		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1276			      bogus_rx);
1277	}
1278#endif
1279
1280	return 0;
1281}
1282
1283/* Set or clear the multicast filter for this adaptor. */
1284
1285static void set_rx_mode(struct net_device *dev)
1286{
1287	struct yellowfin_private *yp = netdev_priv(dev);
1288	void __iomem *ioaddr = yp->base;
1289	u16 cfg_value = ioread16(ioaddr + Cnfg);
1290
1291	/* Stop the Rx process to change any value. */
1292	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1293	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1294		iowrite16(0x000F, ioaddr + AddrMode);
1295	} else if ((netdev_mc_count(dev) > 64) ||
1296		   (dev->flags & IFF_ALLMULTI)) {
1297		/* Too many to filter well, or accept all multicasts. */
1298		iowrite16(0x000B, ioaddr + AddrMode);
1299	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1300		struct netdev_hw_addr *ha;
1301		u16 hash_table[4];
1302		int i;
1303
1304		memset(hash_table, 0, sizeof(hash_table));
1305		netdev_for_each_mc_addr(ha, dev) {
1306			unsigned int bit;
1307
1308			/* Due to a bug in the early chip versions, multiple filter
1309			   slots must be set for each address. */
1310			if (yp->drv_flags & HasMulticastBug) {
1311				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1312				hash_table[bit >> 4] |= (1 << bit);
1313				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1314				hash_table[bit >> 4] |= (1 << bit);
1315				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1316				hash_table[bit >> 4] |= (1 << bit);
1317			}
1318			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1319			hash_table[bit >> 4] |= (1 << bit);
1320		}
1321		/* Copy the hash table to the chip. */
1322		for (i = 0; i < 4; i++)
1323			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1324		iowrite16(0x0003, ioaddr + AddrMode);
1325	} else {					/* Normal, unicast/broadcast-only mode. */
1326		iowrite16(0x0001, ioaddr + AddrMode);
1327	}
1328	/* Restart the Rx process. */
1329	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1330}
1331
1332static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1333{
1334	struct yellowfin_private *np = netdev_priv(dev);
1335	strcpy(info->driver, DRV_NAME);
1336	strcpy(info->version, DRV_VERSION);
1337	strcpy(info->bus_info, pci_name(np->pci_dev));
1338}
1339
1340static const struct ethtool_ops ethtool_ops = {
1341	.get_drvinfo = yellowfin_get_drvinfo
1342};
1343
1344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1345{
1346	struct yellowfin_private *np = netdev_priv(dev);
1347	void __iomem *ioaddr = np->base;
1348	struct mii_ioctl_data *data = if_mii(rq);
1349
1350	switch(cmd) {
1351	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1352		data->phy_id = np->phys[0] & 0x1f;
1353		/* Fall Through */
1354
1355	case SIOCGMIIREG:		/* Read MII PHY register. */
1356		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1357		return 0;
1358
1359	case SIOCSMIIREG:		/* Write MII PHY register. */
1360		if (data->phy_id == np->phys[0]) {
1361			u16 value = data->val_in;
1362			switch (data->reg_num) {
1363			case 0:
1364				/* Check for autonegotiation on or reset. */
1365				np->medialock = (value & 0x9000) ? 0 : 1;
1366				if (np->medialock)
1367					np->full_duplex = (value & 0x0100) ? 1 : 0;
1368				break;
1369			case 4: np->advertising = value; break;
1370			}
1371			/* Perhaps check_duplex(dev), depending on chip semantics. */
1372		}
1373		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1374		return 0;
1375	default:
1376		return -EOPNOTSUPP;
1377	}
1378}
1379
1380
1381static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1382{
1383	struct net_device *dev = pci_get_drvdata(pdev);
1384	struct yellowfin_private *np;
1385
1386	BUG_ON(!dev);
1387	np = netdev_priv(dev);
1388
1389        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1390		np->tx_status_dma);
1391	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1392	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1393	unregister_netdev (dev);
1394
1395	pci_iounmap(pdev, np->base);
1396
1397	pci_release_regions (pdev);
1398
1399	free_netdev (dev);
1400	pci_set_drvdata(pdev, NULL);
1401}
1402
1403
1404static struct pci_driver yellowfin_driver = {
1405	.name		= DRV_NAME,
1406	.id_table	= yellowfin_pci_tbl,
1407	.probe		= yellowfin_init_one,
1408	.remove		= __devexit_p(yellowfin_remove_one),
1409};
1410
1411
1412static int __init yellowfin_init (void)
1413{
1414/* when a module, this is printed whether or not devices are found in probe */
1415#ifdef MODULE
1416	printk(version);
1417#endif
1418	return pci_register_driver(&yellowfin_driver);
1419}
1420
1421
1422static void __exit yellowfin_cleanup (void)
1423{
1424	pci_unregister_driver (&yellowfin_driver);
1425}
1426
1427
1428module_init(yellowfin_init);
1429module_exit(yellowfin_cleanup);
1430