1254721Semaste/*
2254721Semaste	Written 1998-2000 by Donald Becker.
3254721Semaste
4254721Semaste	This software may be used and distributed according to the terms of
5254721Semaste	the GNU General Public License (GPL), incorporated herein by reference.
6254721Semaste	Drivers based on or derived from this code fall under the GPL and must
7254721Semaste	retain the authorship, copyright and license notice.  This file is not
8254721Semaste	a complete program and may only be used when the entire operating
9254721Semaste	system is licensed under the GPL.
10254721Semaste
11254721Semaste	The author may be reached as becker@scyld.com, or C/O
12254721Semaste	Scyld Computing Corporation
13254721Semaste	410 Severn Ave., Suite 210
14254721Semaste	Annapolis MD 21403
15254721Semaste
16254721Semaste	Support information and updates available at
17254721Semaste	http://www.scyld.com/network/pci-skeleton.html
18254721Semaste
19254721Semaste	Linux kernel updates:
20254721Semaste
21254721Semaste	Version 2.51, Nov 17, 2001 (jgarzik):
22254721Semaste	- Add ethtool support
23254721Semaste	- Replace some MII-related magic numbers with constants
24254721Semaste
25254721Semaste*/
26254721Semaste
27254721Semaste#define DRV_NAME	"fealnx"
28254721Semaste
29254721Semastestatic int debug;		/* 1-> print debug message */
30254721Semastestatic int max_interrupt_work = 20;
31254721Semaste
32254721Semaste/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
33254721Semastestatic int multicast_filter_limit = 32;
34254721Semaste
35254721Semaste/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
36254721Semaste/* Setting to > 1518 effectively disables this feature.          */
37254721Semastestatic int rx_copybreak;
38254721Semaste
39254721Semaste/* Used to pass the media type, etc.                            */
40254721Semaste/* Both 'options[]' and 'full_duplex[]' should exist for driver */
41254721Semaste/* interoperability.                                            */
42254721Semaste/* The media type is usually passed in 'options[]'.             */
43254721Semaste#define MAX_UNITS 8		/* More are supported, limit only on options */
44254721Semastestatic int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
45254721Semastestatic int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
46254721Semaste
47254721Semaste/* Operational parameters that are set at compile time.                 */
48254721Semaste/* Keep the ring sizes a power of two for compile efficiency.           */
49254721Semaste/* The compiler will convert <unsigned>'%'<2^N> into a bit mask.        */
50254721Semaste/* Making the Tx ring too large decreases the effectiveness of channel  */
51254721Semaste/* bonding and packet priority.                                         */
52254721Semaste/* There are no ill effects from too-large receive rings.               */
53254721Semaste// 88-12-9 modify,
54254721Semaste// #define TX_RING_SIZE    16
55254721Semaste// #define RX_RING_SIZE    32
56254721Semaste#define TX_RING_SIZE    6
57254721Semaste#define RX_RING_SIZE    12
58254721Semaste#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct fealnx_desc)
59254721Semaste#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct fealnx_desc)
60254721Semaste
61254721Semaste/* Operational parameters that usually are not changed. */
62254721Semaste/* Time in jiffies before concluding the transmitter is hung. */
63254721Semaste#define TX_TIMEOUT      (2*HZ)
64254721Semaste
65254721Semaste#define PKT_BUF_SZ      1536	/* Size of each temporary Rx buffer. */
66254721Semaste
67254721Semaste
68254721Semaste/* Include files, designed to support most kernel versions 2.0.0 and later. */
69254721Semaste#include <linux/module.h>
70254721Semaste#include <linux/kernel.h>
71254721Semaste#include <linux/string.h>
72254721Semaste#include <linux/timer.h>
73254721Semaste#include <linux/errno.h>
74254721Semaste#include <linux/ioport.h>
75254721Semaste#include <linux/interrupt.h>
76254721Semaste#include <linux/pci.h>
77254721Semaste#include <linux/netdevice.h>
78254721Semaste#include <linux/etherdevice.h>
79254721Semaste#include <linux/skbuff.h>
80254721Semaste#include <linux/init.h>
81254721Semaste#include <linux/mii.h>
82254721Semaste#include <linux/ethtool.h>
83254721Semaste#include <linux/crc32.h>
84254721Semaste#include <linux/delay.h>
85254721Semaste#include <linux/bitops.h>
86254721Semaste
87254721Semaste#include <asm/processor.h>	/* Processor type for cache alignment. */
88254721Semaste#include <asm/io.h>
89254721Semaste#include <linux/uaccess.h>
90254721Semaste#include <asm/byteorder.h>
91254721Semaste
92254721Semaste/* This driver was written to use PCI memory space, however some x86 systems
93254721Semaste   work only with I/O space accesses. */
94254721Semaste#ifndef __alpha__
95254721Semaste#define USE_IO_OPS
96254721Semaste#endif
97254721Semaste
98254721Semaste/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
99254721Semaste/* This is only in the support-all-kernels source code. */
100254721Semaste
101254721Semaste#define RUN_AT(x) (jiffies + (x))
102254721Semaste
103254721SemasteMODULE_AUTHOR("Myson or whoever");
104254721SemasteMODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
105254721SemasteMODULE_LICENSE("GPL");
106254721Semastemodule_param(max_interrupt_work, int, 0);
107254721Semastemodule_param(debug, int, 0);
108254721Semastemodule_param(rx_copybreak, int, 0);
109254721Semastemodule_param(multicast_filter_limit, int, 0);
110254721Semastemodule_param_array(options, int, NULL, 0);
111254721Semastemodule_param_array(full_duplex, int, NULL, 0);
112254721SemasteMODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
113254721SemasteMODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
114254721SemasteMODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
115254721SemasteMODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
116254721SemasteMODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
117254721SemasteMODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
118254721Semaste
119254721Semasteenum {
120254721Semaste	MIN_REGION_SIZE		= 136,
121254721Semaste};
122254721Semaste
123254721Semaste/* A chip capabilities table, matching the entries in pci_tbl[] above. */
124254721Semasteenum chip_capability_flags {
125254721Semaste	HAS_MII_XCVR,
126254721Semaste	HAS_CHIP_XCVR,
127254721Semaste};
128254721Semaste
129254721Semaste/* 89/6/13 add, */
130254721Semaste/* for different PHY */
131254721Semasteenum phy_type_flags {
132254721Semaste	MysonPHY = 1,
133254721Semaste	AhdocPHY = 2,
134254721Semaste	SeeqPHY = 3,
135254721Semaste	MarvellPHY = 4,
136254721Semaste	Myson981 = 5,
137254721Semaste	LevelOnePHY = 6,
138254721Semaste	OtherPHY = 10,
139254721Semaste};
140254721Semaste
141254721Semastestruct chip_info {
142254721Semaste	char *chip_name;
143254721Semaste	int flags;
144254721Semaste};
145254721Semaste
146254721Semastestatic const struct chip_info skel_netdrv_tbl[] = {
147254721Semaste	{ "100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
148254721Semaste	{ "100/10M Ethernet PCI Adapter",	HAS_CHIP_XCVR },
149254721Semaste	{ "1000/100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
150254721Semaste};
151254721Semaste
152254721Semaste/* Offsets to the Command and Status Registers. */
153254721Semasteenum fealnx_offsets {
154254721Semaste	PAR0 = 0x0,		/* physical address 0-3 */
155254721Semaste	PAR1 = 0x04,		/* physical address 4-5 */
156254721Semaste	MAR0 = 0x08,		/* multicast address 0-3 */
157254721Semaste	MAR1 = 0x0C,		/* multicast address 4-7 */
158254721Semaste	FAR0 = 0x10,		/* flow-control address 0-3 */
159254721Semaste	FAR1 = 0x14,		/* flow-control address 4-5 */
160254721Semaste	TCRRCR = 0x18,		/* receive & transmit configuration */
161254721Semaste	BCR = 0x1C,		/* bus command */
162254721Semaste	TXPDR = 0x20,		/* transmit polling demand */
163254721Semaste	RXPDR = 0x24,		/* receive polling demand */
164254721Semaste	RXCWP = 0x28,		/* receive current word pointer */
165254721Semaste	TXLBA = 0x2C,		/* transmit list base address */
166254721Semaste	RXLBA = 0x30,		/* receive list base address */
167254721Semaste	ISR = 0x34,		/* interrupt status */
168254721Semaste	IMR = 0x38,		/* interrupt mask */
169254721Semaste	FTH = 0x3C,		/* flow control high/low threshold */
170254721Semaste	MANAGEMENT = 0x40,	/* bootrom/eeprom and mii management */
171254721Semaste	TALLY = 0x44,		/* tally counters for crc and mpa */
172254721Semaste	TSR = 0x48,		/* tally counter for transmit status */
173254721Semaste	BMCRSR = 0x4c,		/* basic mode control and status */
174254721Semaste	PHYIDENTIFIER = 0x50,	/* phy identifier */
175254721Semaste	ANARANLPAR = 0x54,	/* auto-negotiation advertisement and link
176254721Semaste				   partner ability */
177254721Semaste	ANEROCR = 0x58,		/* auto-negotiation expansion and pci conf. */
178254721Semaste	BPREMRPSR = 0x5c,	/* bypass & receive error mask and phy status */
179254721Semaste};
180254721Semaste
181254721Semaste/* Bits in the interrupt status/enable registers. */
182254721Semaste/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
183254721Semasteenum intr_status_bits {
184254721Semaste	RFCON = 0x00020000,	/* receive flow control xon packet */
185254721Semaste	RFCOFF = 0x00010000,	/* receive flow control xoff packet */
186254721Semaste	LSCStatus = 0x00008000,	/* link status change */
187254721Semaste	ANCStatus = 0x00004000,	/* autonegotiation completed */
188254721Semaste	FBE = 0x00002000,	/* fatal bus error */
189254721Semaste	FBEMask = 0x00001800,	/* mask bit12-11 */
190254721Semaste	ParityErr = 0x00000000,	/* parity error */
191254721Semaste	TargetErr = 0x00001000,	/* target abort */
192254721Semaste	MasterErr = 0x00000800,	/* master error */
193254721Semaste	TUNF = 0x00000400,	/* transmit underflow */
194254721Semaste	ROVF = 0x00000200,	/* receive overflow */
195254721Semaste	ETI = 0x00000100,	/* transmit early int */
196254721Semaste	ERI = 0x00000080,	/* receive early int */
197254721Semaste	CNTOVF = 0x00000040,	/* counter overflow */
198254721Semaste	RBU = 0x00000020,	/* receive buffer unavailable */
199254721Semaste	TBU = 0x00000010,	/* transmit buffer unavilable */
200254721Semaste	TI = 0x00000008,	/* transmit interrupt */
201254721Semaste	RI = 0x00000004,	/* receive interrupt */
202254721Semaste	RxErr = 0x00000002,	/* receive error */
203254721Semaste};
204254721Semaste
205254721Semaste/* Bits in the NetworkConfig register, W for writing, R for reading */
206254721Semaste/* FIXME: some names are invented by me. Marked with (name?) */
207254721Semaste/* If you have docs and know bit names, please fix 'em */
208254721Semasteenum rx_mode_bits {
209254721Semaste	CR_W_ENH	= 0x02000000,	/* enhanced mode (name?) */
210254721Semaste	CR_W_FD		= 0x00100000,	/* full duplex */
211254721Semaste	CR_W_PS10	= 0x00080000,	/* 10 mbit */
212254721Semaste	CR_W_TXEN	= 0x00040000,	/* tx enable (name?) */
213254721Semaste	CR_W_PS1000	= 0x00010000,	/* 1000 mbit */
214254721Semaste     /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
215254721Semaste	CR_W_RXMODEMASK	= 0x000000e0,
216254721Semaste	CR_W_PROM	= 0x00000080,	/* promiscuous mode */
217254721Semaste	CR_W_AB		= 0x00000040,	/* accept broadcast */
218254721Semaste	CR_W_AM		= 0x00000020,	/* accept mutlicast */
219254721Semaste	CR_W_ARP	= 0x00000008,	/* receive runt pkt */
220254721Semaste	CR_W_ALP	= 0x00000004,	/* receive long pkt */
221254721Semaste	CR_W_SEP	= 0x00000002,	/* receive error pkt */
222254721Semaste	CR_W_RXEN	= 0x00000001,	/* rx enable (unicast?) (name?) */
223254721Semaste
224254721Semaste	CR_R_TXSTOP	= 0x04000000,	/* tx stopped (name?) */
225254721Semaste	CR_R_FD		= 0x00100000,	/* full duplex detected */
226254721Semaste	CR_R_PS10	= 0x00080000,	/* 10 mbit detected */
227254721Semaste	CR_R_RXSTOP	= 0x00008000,	/* rx stopped (name?) */
228254721Semaste};
229254721Semaste
230254721Semaste/* The Tulip Rx and Tx buffer descriptors. */
231254721Semastestruct fealnx_desc {
232254721Semaste	s32 status;
233254721Semaste	s32 control;
234254721Semaste	u32 buffer;
235254721Semaste	u32 next_desc;
236254721Semaste	struct fealnx_desc *next_desc_logical;
237254721Semaste	struct sk_buff *skbuff;
238254721Semaste	u32 reserved1;
239254721Semaste	u32 reserved2;
240254721Semaste};
241254721Semaste
242254721Semaste/* Bits in network_desc.status */
243254721Semasteenum rx_desc_status_bits {
244254721Semaste	RXOWN = 0x80000000,	/* own bit */
245254721Semaste	FLNGMASK = 0x0fff0000,	/* frame length */
246254721Semaste	FLNGShift = 16,
247254721Semaste	MARSTATUS = 0x00004000,	/* multicast address received */
248254721Semaste	BARSTATUS = 0x00002000,	/* broadcast address received */
249254721Semaste	PHYSTATUS = 0x00001000,	/* physical address received */
250254721Semaste	RXFSD = 0x00000800,	/* first descriptor */
251254721Semaste	RXLSD = 0x00000400,	/* last descriptor */
252254721Semaste	ErrorSummary = 0x80,	/* error summary */
253254721Semaste	RUNTPKT = 0x40,		/* runt packet received */
254254721Semaste	LONGPKT = 0x20,		/* long packet received */
255254721Semaste	FAE = 0x10,		/* frame align error */
256254721Semaste	CRC = 0x08,		/* crc error */
257254721Semaste	RXER = 0x04,		/* receive error */
258254721Semaste};
259254721Semaste
260254721Semasteenum rx_desc_control_bits {
261254721Semaste	RXIC = 0x00800000,	/* interrupt control */
262254721Semaste	RBSShift = 0,
263254721Semaste};
264254721Semaste
265254721Semasteenum tx_desc_status_bits {
266254721Semaste	TXOWN = 0x80000000,	/* own bit */
267254721Semaste	JABTO = 0x00004000,	/* jabber timeout */
268254721Semaste	CSL = 0x00002000,	/* carrier sense lost */
269254721Semaste	LC = 0x00001000,	/* late collision */
270254721Semaste	EC = 0x00000800,	/* excessive collision */
271254721Semaste	UDF = 0x00000400,	/* fifo underflow */
272254721Semaste	DFR = 0x00000200,	/* deferred */
273254721Semaste	HF = 0x00000100,	/* heartbeat fail */
274254721Semaste	NCRMask = 0x000000ff,	/* collision retry count */
275254721Semaste	NCRShift = 0,
276254721Semaste};
277254721Semaste
278254721Semasteenum tx_desc_control_bits {
279254721Semaste	TXIC = 0x80000000,	/* interrupt control */
280254721Semaste	ETIControl = 0x40000000,	/* early transmit interrupt */
281254721Semaste	TXLD = 0x20000000,	/* last descriptor */
282254721Semaste	TXFD = 0x10000000,	/* first descriptor */
283254721Semaste	CRCEnable = 0x08000000,	/* crc control */
284254721Semaste	PADEnable = 0x04000000,	/* padding control */
285254721Semaste	RetryTxLC = 0x02000000,	/* retry late collision */
286254721Semaste	PKTSMask = 0x3ff800,	/* packet size bit21-11 */
287254721Semaste	PKTSShift = 11,
288254721Semaste	TBSMask = 0x000007ff,	/* transmit buffer bit 10-0 */
289254721Semaste	TBSShift = 0,
290254721Semaste};
291254721Semaste
292254721Semaste/* BootROM/EEPROM/MII Management Register */
293254721Semaste#define MASK_MIIR_MII_READ       0x00000000
294254721Semaste#define MASK_MIIR_MII_WRITE      0x00000008
295254721Semaste#define MASK_MIIR_MII_MDO        0x00000004
296254721Semaste#define MASK_MIIR_MII_MDI        0x00000002
297254721Semaste#define MASK_MIIR_MII_MDC        0x00000001
298254721Semaste
299254721Semaste/* ST+OP+PHYAD+REGAD+TA */
300254721Semaste#define OP_READ             0x6000	/* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
301254721Semaste#define OP_WRITE            0x5002	/* ST:01+OP:01+PHYAD+REGAD+TA:10 */
302254721Semaste
303254721Semaste/* ------------------------------------------------------------------------- */
304254721Semaste/*      Constants for Myson PHY                                              */
305254721Semaste/* ------------------------------------------------------------------------- */
306254721Semaste#define MysonPHYID      0xd0000302
307254721Semaste/* 89-7-27 add, (begin) */
308254721Semaste#define MysonPHYID0     0x0302
309254721Semaste#define StatusRegister  18
310254721Semaste#define SPEED100        0x0400	// bit10
311254721Semaste#define FULLMODE        0x0800	// bit11
312254721Semaste/* 89-7-27 add, (end) */
313254721Semaste
314254721Semaste/* ------------------------------------------------------------------------- */
315254721Semaste/*      Constants for Seeq 80225 PHY                                         */
316254721Semaste/* ------------------------------------------------------------------------- */
317254721Semaste#define SeeqPHYID0      0x0016
318254721Semaste
319254721Semaste#define MIIRegister18   18
320254721Semaste#define SPD_DET_100     0x80
321254721Semaste#define DPLX_DET_FULL   0x40
322254721Semaste
323254721Semaste/* ------------------------------------------------------------------------- */
324254721Semaste/*      Constants for Ahdoc 101 PHY                                          */
325254721Semaste/* ------------------------------------------------------------------------- */
326254721Semaste#define AhdocPHYID0     0x0022
327254721Semaste
328254721Semaste#define DiagnosticReg   18
329254721Semaste#define DPLX_FULL       0x0800
330254721Semaste#define Speed_100       0x0400
331254721Semaste
332254721Semaste/* 89/6/13 add, */
333254721Semaste/* -------------------------------------------------------------------------- */
334254721Semaste/*      Constants                                                             */
335254721Semaste/* -------------------------------------------------------------------------- */
336254721Semaste#define MarvellPHYID0           0x0141
337254721Semaste#define LevelOnePHYID0		0x0013
338254721Semaste
339254721Semaste#define MII1000BaseTControlReg  9
340254721Semaste#define MII1000BaseTStatusReg   10
341254721Semaste#define SpecificReg		17
342254721Semaste
343254721Semaste/* for 1000BaseT Control Register */
344254721Semaste#define PHYAbletoPerform1000FullDuplex  0x0200
345254721Semaste#define PHYAbletoPerform1000HalfDuplex  0x0100
346254721Semaste#define PHY1000AbilityMask              0x300
347254721Semaste
348254721Semaste// for phy specific status register, marvell phy.
349254721Semaste#define SpeedMask       0x0c000
350254721Semaste#define Speed_1000M     0x08000
351254721Semaste#define Speed_100M      0x4000
352254721Semaste#define Speed_10M       0
353254721Semaste#define Full_Duplex     0x2000
354254721Semaste
355254721Semaste// 89/12/29 add, for phy specific status register, levelone phy, (begin)
356254721Semaste#define LXT1000_100M    0x08000
357254721Semaste#define LXT1000_1000M   0x0c000
358254721Semaste#define LXT1000_Full    0x200
359254721Semaste// 89/12/29 add, for phy specific status register, levelone phy, (end)
360254721Semaste
361254721Semaste/* for 3-in-1 case, BMCRSR register */
362254721Semaste#define LinkIsUp2	0x00040000
363254721Semaste
364254721Semaste/* for PHY */
365254721Semaste#define LinkIsUp        0x0004
366254721Semaste
367254721Semaste
368254721Semastestruct netdev_private {
369254721Semaste	/* Descriptor rings first for alignment. */
370254721Semaste	struct fealnx_desc *rx_ring;
371254721Semaste	struct fealnx_desc *tx_ring;
372254721Semaste
373254721Semaste	dma_addr_t rx_ring_dma;
374254721Semaste	dma_addr_t tx_ring_dma;
375254721Semaste
376254721Semaste	spinlock_t lock;
377254721Semaste
378254721Semaste	/* Media monitoring timer. */
379254721Semaste	struct timer_list timer;
380254721Semaste
381254721Semaste	/* Reset timer */
382254721Semaste	struct timer_list reset_timer;
383254721Semaste	int reset_timer_armed;
384254721Semaste	unsigned long crvalue_sv;
385254721Semaste	unsigned long imrvalue_sv;
386254721Semaste
387254721Semaste	/* Frequently used values: keep some adjacent for cache effect. */
388254721Semaste	int flags;
389254721Semaste	struct pci_dev *pci_dev;
390254721Semaste	unsigned long crvalue;
391254721Semaste	unsigned long bcrvalue;
392254721Semaste	unsigned long imrvalue;
393254721Semaste	struct fealnx_desc *cur_rx;
394254721Semaste	struct fealnx_desc *lack_rxbuf;
395254721Semaste	int really_rx_count;
396254721Semaste	struct fealnx_desc *cur_tx;
397254721Semaste	struct fealnx_desc *cur_tx_copy;
398254721Semaste	int really_tx_count;
399254721Semaste	int free_tx_count;
400254721Semaste	unsigned int rx_buf_sz;	/* Based on MTU+slack. */
401254721Semaste
402254721Semaste	/* These values are keep track of the transceiver/media in use. */
403254721Semaste	unsigned int linkok;
404254721Semaste	unsigned int line_speed;
405254721Semaste	unsigned int duplexmode;
406254721Semaste	unsigned int default_port:4;	/* Last dev->if_port value. */
407	unsigned int PHYType;
408
409	/* MII transceiver section. */
410	int mii_cnt;		/* MII device addresses. */
411	unsigned char phys[2];	/* MII device addresses. */
412	struct mii_if_info mii;
413	void __iomem *mem;
414};
415
416
417static int mdio_read(struct net_device *dev, int phy_id, int location);
418static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
419static int netdev_open(struct net_device *dev);
420static void getlinktype(struct net_device *dev);
421static void getlinkstatus(struct net_device *dev);
422static void netdev_timer(struct timer_list *t);
423static void reset_timer(struct timer_list *t);
424static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
425static void init_ring(struct net_device *dev);
426static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
427static irqreturn_t intr_handler(int irq, void *dev_instance);
428static int netdev_rx(struct net_device *dev);
429static void set_rx_mode(struct net_device *dev);
430static void __set_rx_mode(struct net_device *dev);
431static struct net_device_stats *get_stats(struct net_device *dev);
432static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
433static const struct ethtool_ops netdev_ethtool_ops;
434static int netdev_close(struct net_device *dev);
435static void reset_rx_descriptors(struct net_device *dev);
436static void reset_tx_descriptors(struct net_device *dev);
437
438static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
439{
440	int delay = 0x1000;
441	iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
442	while (--delay) {
443		if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
444			break;
445	}
446}
447
448
449static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
450{
451	int delay = 0x1000;
452	iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
453	while (--delay) {
454		if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
455					    == (CR_R_RXSTOP+CR_R_TXSTOP) )
456			break;
457	}
458}
459
460static const struct net_device_ops netdev_ops = {
461	.ndo_open		= netdev_open,
462	.ndo_stop		= netdev_close,
463	.ndo_start_xmit		= start_tx,
464	.ndo_get_stats 		= get_stats,
465	.ndo_set_rx_mode	= set_rx_mode,
466	.ndo_eth_ioctl		= mii_ioctl,
467	.ndo_tx_timeout		= fealnx_tx_timeout,
468	.ndo_set_mac_address 	= eth_mac_addr,
469	.ndo_validate_addr	= eth_validate_addr,
470};
471
472static int fealnx_init_one(struct pci_dev *pdev,
473			   const struct pci_device_id *ent)
474{
475	struct netdev_private *np;
476	int i, option, err, irq;
477	static int card_idx = -1;
478	char boardname[12];
479	void __iomem *ioaddr;
480	unsigned long len;
481	unsigned int chip_id = ent->driver_data;
482	struct net_device *dev;
483	void *ring_space;
484	dma_addr_t ring_dma;
485	u8 addr[ETH_ALEN];
486#ifdef USE_IO_OPS
487	int bar = 0;
488#else
489	int bar = 1;
490#endif
491
492	card_idx++;
493	sprintf(boardname, "fealnx%d", card_idx);
494
495	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
496
497	i = pci_enable_device(pdev);
498	if (i) return i;
499	pci_set_master(pdev);
500
501	len = pci_resource_len(pdev, bar);
502	if (len < MIN_REGION_SIZE) {
503		dev_err(&pdev->dev,
504			   "region size %ld too small, aborting\n", len);
505		return -ENODEV;
506	}
507
508	i = pci_request_regions(pdev, boardname);
509	if (i)
510		return i;
511
512	irq = pdev->irq;
513
514	ioaddr = pci_iomap(pdev, bar, len);
515	if (!ioaddr) {
516		err = -ENOMEM;
517		goto err_out_res;
518	}
519
520	dev = alloc_etherdev(sizeof(struct netdev_private));
521	if (!dev) {
522		err = -ENOMEM;
523		goto err_out_unmap;
524	}
525	SET_NETDEV_DEV(dev, &pdev->dev);
526
527	/* read ethernet id */
528	for (i = 0; i < 6; ++i)
529		addr[i] = ioread8(ioaddr + PAR0 + i);
530	eth_hw_addr_set(dev, addr);
531
532	/* Reset the chip to erase previous misconfiguration. */
533	iowrite32(0x00000001, ioaddr + BCR);
534
535	/* Make certain the descriptor lists are aligned. */
536	np = netdev_priv(dev);
537	np->mem = ioaddr;
538	spin_lock_init(&np->lock);
539	np->pci_dev = pdev;
540	np->flags = skel_netdrv_tbl[chip_id].flags;
541	pci_set_drvdata(pdev, dev);
542	np->mii.dev = dev;
543	np->mii.mdio_read = mdio_read;
544	np->mii.mdio_write = mdio_write;
545	np->mii.phy_id_mask = 0x1f;
546	np->mii.reg_num_mask = 0x1f;
547
548	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
549					GFP_KERNEL);
550	if (!ring_space) {
551		err = -ENOMEM;
552		goto err_out_free_dev;
553	}
554	np->rx_ring = ring_space;
555	np->rx_ring_dma = ring_dma;
556
557	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
558					GFP_KERNEL);
559	if (!ring_space) {
560		err = -ENOMEM;
561		goto err_out_free_rx;
562	}
563	np->tx_ring = ring_space;
564	np->tx_ring_dma = ring_dma;
565
566	/* find the connected MII xcvrs */
567	if (np->flags == HAS_MII_XCVR) {
568		int phy, phy_idx = 0;
569
570		for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
571			       phy++) {
572			int mii_status = mdio_read(dev, phy, 1);
573
574			if (mii_status != 0xffff && mii_status != 0x0000) {
575				np->phys[phy_idx++] = phy;
576				dev_info(&pdev->dev,
577				       "MII PHY found at address %d, status "
578				       "0x%4.4x.\n", phy, mii_status);
579				/* get phy type */
580				{
581					unsigned int data;
582
583					data = mdio_read(dev, np->phys[0], 2);
584					if (data == SeeqPHYID0)
585						np->PHYType = SeeqPHY;
586					else if (data == AhdocPHYID0)
587						np->PHYType = AhdocPHY;
588					else if (data == MarvellPHYID0)
589						np->PHYType = MarvellPHY;
590					else if (data == MysonPHYID0)
591						np->PHYType = Myson981;
592					else if (data == LevelOnePHYID0)
593						np->PHYType = LevelOnePHY;
594					else
595						np->PHYType = OtherPHY;
596				}
597			}
598		}
599
600		np->mii_cnt = phy_idx;
601		if (phy_idx == 0)
602			dev_warn(&pdev->dev,
603				"MII PHY not found -- this device may "
604			       "not operate correctly.\n");
605	} else {
606		np->phys[0] = 32;
607/* 89/6/23 add, (begin) */
608		/* get phy type */
609		if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
610			np->PHYType = MysonPHY;
611		else
612			np->PHYType = OtherPHY;
613	}
614	np->mii.phy_id = np->phys[0];
615
616	if (dev->mem_start)
617		option = dev->mem_start;
618
619	/* The lower four bits are the media type. */
620	if (option > 0) {
621		if (option & 0x200)
622			np->mii.full_duplex = 1;
623		np->default_port = option & 15;
624	}
625
626	if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
627		np->mii.full_duplex = full_duplex[card_idx];
628
629	if (np->mii.full_duplex) {
630		dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
631/* 89/6/13 add, (begin) */
632//      if (np->PHYType==MarvellPHY)
633		if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
634			unsigned int data;
635
636			data = mdio_read(dev, np->phys[0], 9);
637			data = (data & 0xfcff) | 0x0200;
638			mdio_write(dev, np->phys[0], 9, data);
639		}
640/* 89/6/13 add, (end) */
641		if (np->flags == HAS_MII_XCVR)
642			mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
643		else
644			iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
645		np->mii.force_media = 1;
646	}
647
648	dev->netdev_ops = &netdev_ops;
649	dev->ethtool_ops = &netdev_ethtool_ops;
650	dev->watchdog_timeo = TX_TIMEOUT;
651
652	err = register_netdev(dev);
653	if (err)
654		goto err_out_free_tx;
655
656	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
657	       dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
658	       dev->dev_addr, irq);
659
660	return 0;
661
662err_out_free_tx:
663	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
664			  np->tx_ring_dma);
665err_out_free_rx:
666	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
667			  np->rx_ring_dma);
668err_out_free_dev:
669	free_netdev(dev);
670err_out_unmap:
671	pci_iounmap(pdev, ioaddr);
672err_out_res:
673	pci_release_regions(pdev);
674	return err;
675}
676
677
678static void fealnx_remove_one(struct pci_dev *pdev)
679{
680	struct net_device *dev = pci_get_drvdata(pdev);
681
682	if (dev) {
683		struct netdev_private *np = netdev_priv(dev);
684
685		dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
686				  np->tx_ring_dma);
687		dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
688				  np->rx_ring_dma);
689		unregister_netdev(dev);
690		pci_iounmap(pdev, np->mem);
691		free_netdev(dev);
692		pci_release_regions(pdev);
693	} else
694		printk(KERN_ERR "fealnx: remove for unknown device\n");
695}
696
697
698static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
699{
700	ulong miir;
701	int i;
702	unsigned int mask, data;
703
704	/* enable MII output */
705	miir = (ulong) ioread32(miiport);
706	miir &= 0xfffffff0;
707
708	miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
709
710	/* send 32 1's preamble */
711	for (i = 0; i < 32; i++) {
712		/* low MDC; MDO is already high (miir) */
713		miir &= ~MASK_MIIR_MII_MDC;
714		iowrite32(miir, miiport);
715
716		/* high MDC */
717		miir |= MASK_MIIR_MII_MDC;
718		iowrite32(miir, miiport);
719	}
720
721	/* calculate ST+OP+PHYAD+REGAD+TA */
722	data = opcode | (phyad << 7) | (regad << 2);
723
724	/* sent out */
725	mask = 0x8000;
726	while (mask) {
727		/* low MDC, prepare MDO */
728		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
729		if (mask & data)
730			miir |= MASK_MIIR_MII_MDO;
731
732		iowrite32(miir, miiport);
733		/* high MDC */
734		miir |= MASK_MIIR_MII_MDC;
735		iowrite32(miir, miiport);
736		udelay(30);
737
738		/* next */
739		mask >>= 1;
740		if (mask == 0x2 && opcode == OP_READ)
741			miir &= ~MASK_MIIR_MII_WRITE;
742	}
743	return miir;
744}
745
746
747static int mdio_read(struct net_device *dev, int phyad, int regad)
748{
749	struct netdev_private *np = netdev_priv(dev);
750	void __iomem *miiport = np->mem + MANAGEMENT;
751	ulong miir;
752	unsigned int mask, data;
753
754	miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
755
756	/* read data */
757	mask = 0x8000;
758	data = 0;
759	while (mask) {
760		/* low MDC */
761		miir &= ~MASK_MIIR_MII_MDC;
762		iowrite32(miir, miiport);
763
764		/* read MDI */
765		miir = ioread32(miiport);
766		if (miir & MASK_MIIR_MII_MDI)
767			data |= mask;
768
769		/* high MDC, and wait */
770		miir |= MASK_MIIR_MII_MDC;
771		iowrite32(miir, miiport);
772		udelay(30);
773
774		/* next */
775		mask >>= 1;
776	}
777
778	/* low MDC */
779	miir &= ~MASK_MIIR_MII_MDC;
780	iowrite32(miir, miiport);
781
782	return data & 0xffff;
783}
784
785
786static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
787{
788	struct netdev_private *np = netdev_priv(dev);
789	void __iomem *miiport = np->mem + MANAGEMENT;
790	ulong miir;
791	unsigned int mask;
792
793	miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
794
795	/* write data */
796	mask = 0x8000;
797	while (mask) {
798		/* low MDC, prepare MDO */
799		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
800		if (mask & data)
801			miir |= MASK_MIIR_MII_MDO;
802		iowrite32(miir, miiport);
803
804		/* high MDC */
805		miir |= MASK_MIIR_MII_MDC;
806		iowrite32(miir, miiport);
807
808		/* next */
809		mask >>= 1;
810	}
811
812	/* low MDC */
813	miir &= ~MASK_MIIR_MII_MDC;
814	iowrite32(miir, miiport);
815}
816
817
818static int netdev_open(struct net_device *dev)
819{
820	struct netdev_private *np = netdev_priv(dev);
821	void __iomem *ioaddr = np->mem;
822	const int irq = np->pci_dev->irq;
823	int rc, i;
824
825	iowrite32(0x00000001, ioaddr + BCR);	/* Reset */
826
827	rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
828	if (rc)
829		return -EAGAIN;
830
831	for (i = 0; i < 3; i++)
832		iowrite16(((const unsigned short *)dev->dev_addr)[i],
833				ioaddr + PAR0 + i*2);
834
835	init_ring(dev);
836
837	iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
838	iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
839
840	/* Initialize other registers. */
841	/* Configure the PCI bus bursts and FIFO thresholds.
842	   486: Set 8 longword burst.
843	   586: no burst limit.
844	   Burst length 5:3
845	   0 0 0   1
846	   0 0 1   4
847	   0 1 0   8
848	   0 1 1   16
849	   1 0 0   32
850	   1 0 1   64
851	   1 1 0   128
852	   1 1 1   256
853	   Wait the specified 50 PCI cycles after a reset by initializing
854	   Tx and Rx queues and the address filter list.
855	   FIXME (Ueimor): optimistic for alpha + posted writes ? */
856
857	np->bcrvalue = 0x10;	/* little-endian, 8 burst length */
858#ifdef __BIG_ENDIAN
859	np->bcrvalue |= 0x04;	/* big-endian */
860#endif
861
862#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
863	if (boot_cpu_data.x86 <= 4)
864		np->crvalue = 0xa00;
865	else
866#endif
867		np->crvalue = 0xe00;	/* rx 128 burst length */
868
869
870// 89/12/29 add,
871// 90/1/16 modify,
872//   np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
873	np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
874	if (np->pci_dev->device == 0x891) {
875		np->bcrvalue |= 0x200;	/* set PROG bit */
876		np->crvalue |= CR_W_ENH;	/* set enhanced bit */
877		np->imrvalue |= ETI;
878	}
879	iowrite32(np->bcrvalue, ioaddr + BCR);
880
881	if (dev->if_port == 0)
882		dev->if_port = np->default_port;
883
884	iowrite32(0, ioaddr + RXPDR);
885// 89/9/1 modify,
886//   np->crvalue = 0x00e40001;    /* tx store and forward, tx/rx enable */
887	np->crvalue |= 0x00e40001;	/* tx store and forward, tx/rx enable */
888	np->mii.full_duplex = np->mii.force_media;
889	getlinkstatus(dev);
890	if (np->linkok)
891		getlinktype(dev);
892	__set_rx_mode(dev);
893
894	netif_start_queue(dev);
895
896	/* Clear and Enable interrupts by setting the interrupt mask. */
897	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
898	iowrite32(np->imrvalue, ioaddr + IMR);
899
900	if (debug)
901		printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
902
903	/* Set the timer to check for link beat. */
904	timer_setup(&np->timer, netdev_timer, 0);
905	np->timer.expires = RUN_AT(3 * HZ);
906
907	/* timer handler */
908	add_timer(&np->timer);
909
910	timer_setup(&np->reset_timer, reset_timer, 0);
911	np->reset_timer_armed = 0;
912	return rc;
913}
914
915
916static void getlinkstatus(struct net_device *dev)
917/* function: Routine will read MII Status Register to get link status.       */
918/* input   : dev... pointer to the adapter block.                            */
919/* output  : none.                                                           */
920{
921	struct netdev_private *np = netdev_priv(dev);
922	unsigned int i, DelayTime = 0x1000;
923
924	np->linkok = 0;
925
926	if (np->PHYType == MysonPHY) {
927		for (i = 0; i < DelayTime; ++i) {
928			if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
929				np->linkok = 1;
930				return;
931			}
932			udelay(100);
933		}
934	} else {
935		for (i = 0; i < DelayTime; ++i) {
936			if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
937				np->linkok = 1;
938				return;
939			}
940			udelay(100);
941		}
942	}
943}
944
945
946static void getlinktype(struct net_device *dev)
947{
948	struct netdev_private *np = netdev_priv(dev);
949
950	if (np->PHYType == MysonPHY) {	/* 3-in-1 case */
951		if (ioread32(np->mem + TCRRCR) & CR_R_FD)
952			np->duplexmode = 2;	/* full duplex */
953		else
954			np->duplexmode = 1;	/* half duplex */
955		if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
956			np->line_speed = 1;	/* 10M */
957		else
958			np->line_speed = 2;	/* 100M */
959	} else {
960		if (np->PHYType == SeeqPHY) {	/* this PHY is SEEQ 80225 */
961			unsigned int data;
962
963			data = mdio_read(dev, np->phys[0], MIIRegister18);
964			if (data & SPD_DET_100)
965				np->line_speed = 2;	/* 100M */
966			else
967				np->line_speed = 1;	/* 10M */
968			if (data & DPLX_DET_FULL)
969				np->duplexmode = 2;	/* full duplex mode */
970			else
971				np->duplexmode = 1;	/* half duplex mode */
972		} else if (np->PHYType == AhdocPHY) {
973			unsigned int data;
974
975			data = mdio_read(dev, np->phys[0], DiagnosticReg);
976			if (data & Speed_100)
977				np->line_speed = 2;	/* 100M */
978			else
979				np->line_speed = 1;	/* 10M */
980			if (data & DPLX_FULL)
981				np->duplexmode = 2;	/* full duplex mode */
982			else
983				np->duplexmode = 1;	/* half duplex mode */
984		}
985/* 89/6/13 add, (begin) */
986		else if (np->PHYType == MarvellPHY) {
987			unsigned int data;
988
989			data = mdio_read(dev, np->phys[0], SpecificReg);
990			if (data & Full_Duplex)
991				np->duplexmode = 2;	/* full duplex mode */
992			else
993				np->duplexmode = 1;	/* half duplex mode */
994			data &= SpeedMask;
995			if (data == Speed_1000M)
996				np->line_speed = 3;	/* 1000M */
997			else if (data == Speed_100M)
998				np->line_speed = 2;	/* 100M */
999			else
1000				np->line_speed = 1;	/* 10M */
1001		}
1002/* 89/6/13 add, (end) */
1003/* 89/7/27 add, (begin) */
1004		else if (np->PHYType == Myson981) {
1005			unsigned int data;
1006
1007			data = mdio_read(dev, np->phys[0], StatusRegister);
1008
1009			if (data & SPEED100)
1010				np->line_speed = 2;
1011			else
1012				np->line_speed = 1;
1013
1014			if (data & FULLMODE)
1015				np->duplexmode = 2;
1016			else
1017				np->duplexmode = 1;
1018		}
1019/* 89/7/27 add, (end) */
1020/* 89/12/29 add */
1021		else if (np->PHYType == LevelOnePHY) {
1022			unsigned int data;
1023
1024			data = mdio_read(dev, np->phys[0], SpecificReg);
1025			if (data & LXT1000_Full)
1026				np->duplexmode = 2;	/* full duplex mode */
1027			else
1028				np->duplexmode = 1;	/* half duplex mode */
1029			data &= SpeedMask;
1030			if (data == LXT1000_1000M)
1031				np->line_speed = 3;	/* 1000M */
1032			else if (data == LXT1000_100M)
1033				np->line_speed = 2;	/* 100M */
1034			else
1035				np->line_speed = 1;	/* 10M */
1036		}
1037		np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
1038		if (np->line_speed == 1)
1039			np->crvalue |= CR_W_PS10;
1040		else if (np->line_speed == 3)
1041			np->crvalue |= CR_W_PS1000;
1042		if (np->duplexmode == 2)
1043			np->crvalue |= CR_W_FD;
1044	}
1045}
1046
1047
1048/* Take lock before calling this */
1049static void allocate_rx_buffers(struct net_device *dev)
1050{
1051	struct netdev_private *np = netdev_priv(dev);
1052
1053	/*  allocate skb for rx buffers */
1054	while (np->really_rx_count != RX_RING_SIZE) {
1055		struct sk_buff *skb;
1056
1057		skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1058		if (skb == NULL)
1059			break;	/* Better luck next round. */
1060
1061		while (np->lack_rxbuf->skbuff)
1062			np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1063
1064		np->lack_rxbuf->skbuff = skb;
1065		np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
1066							skb->data,
1067							np->rx_buf_sz,
1068							DMA_FROM_DEVICE);
1069		np->lack_rxbuf->status = RXOWN;
1070		++np->really_rx_count;
1071	}
1072}
1073
1074
1075static void netdev_timer(struct timer_list *t)
1076{
1077	struct netdev_private *np = from_timer(np, t, timer);
1078	struct net_device *dev = np->mii.dev;
1079	void __iomem *ioaddr = np->mem;
1080	int old_crvalue = np->crvalue;
1081	unsigned int old_linkok = np->linkok;
1082	unsigned long flags;
1083
1084	if (debug)
1085		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
1086		       "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
1087		       ioread32(ioaddr + TCRRCR));
1088
1089	spin_lock_irqsave(&np->lock, flags);
1090
1091	if (np->flags == HAS_MII_XCVR) {
1092		getlinkstatus(dev);
1093		if ((old_linkok == 0) && (np->linkok == 1)) {	/* we need to detect the media type again */
1094			getlinktype(dev);
1095			if (np->crvalue != old_crvalue) {
1096				stop_nic_rxtx(ioaddr, np->crvalue);
1097				iowrite32(np->crvalue, ioaddr + TCRRCR);
1098			}
1099		}
1100	}
1101
1102	allocate_rx_buffers(dev);
1103
1104	spin_unlock_irqrestore(&np->lock, flags);
1105
1106	np->timer.expires = RUN_AT(10 * HZ);
1107	add_timer(&np->timer);
1108}
1109
1110
1111/* Take lock before calling */
1112/* Reset chip and disable rx, tx and interrupts */
1113static void reset_and_disable_rxtx(struct net_device *dev)
1114{
1115	struct netdev_private *np = netdev_priv(dev);
1116	void __iomem *ioaddr = np->mem;
1117	int delay=51;
1118
1119	/* Reset the chip's Tx and Rx processes. */
1120	stop_nic_rxtx(ioaddr, 0);
1121
1122	/* Disable interrupts by clearing the interrupt mask. */
1123	iowrite32(0, ioaddr + IMR);
1124
1125	/* Reset the chip to erase previous misconfiguration. */
1126	iowrite32(0x00000001, ioaddr + BCR);
1127
1128	/* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1129	   We surely wait too long (address+data phase). Who cares? */
1130	while (--delay) {
1131		ioread32(ioaddr + BCR);
1132		rmb();
1133	}
1134}
1135
1136
1137/* Take lock before calling */
1138/* Restore chip after reset */
1139static void enable_rxtx(struct net_device *dev)
1140{
1141	struct netdev_private *np = netdev_priv(dev);
1142	void __iomem *ioaddr = np->mem;
1143
1144	reset_rx_descriptors(dev);
1145
1146	iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1147		ioaddr + TXLBA);
1148	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1149		ioaddr + RXLBA);
1150
1151	iowrite32(np->bcrvalue, ioaddr + BCR);
1152
1153	iowrite32(0, ioaddr + RXPDR);
1154	__set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
1155
1156	/* Clear and Enable interrupts by setting the interrupt mask. */
1157	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
1158	iowrite32(np->imrvalue, ioaddr + IMR);
1159
1160	iowrite32(0, ioaddr + TXPDR);
1161}
1162
1163
1164static void reset_timer(struct timer_list *t)
1165{
1166	struct netdev_private *np = from_timer(np, t, reset_timer);
1167	struct net_device *dev = np->mii.dev;
1168	unsigned long flags;
1169
1170	printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
1171
1172	spin_lock_irqsave(&np->lock, flags);
1173	np->crvalue = np->crvalue_sv;
1174	np->imrvalue = np->imrvalue_sv;
1175
1176	reset_and_disable_rxtx(dev);
1177	/* works for me without this:
1178	reset_tx_descriptors(dev); */
1179	enable_rxtx(dev);
1180	netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
1181
1182	np->reset_timer_armed = 0;
1183
1184	spin_unlock_irqrestore(&np->lock, flags);
1185}
1186
1187
1188static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
1189{
1190	struct netdev_private *np = netdev_priv(dev);
1191	void __iomem *ioaddr = np->mem;
1192	unsigned long flags;
1193	int i;
1194
1195	printk(KERN_WARNING
1196	       "%s: Transmit timed out, status %8.8x, resetting...\n",
1197	       dev->name, ioread32(ioaddr + ISR));
1198
1199	{
1200		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
1201		for (i = 0; i < RX_RING_SIZE; i++)
1202			printk(KERN_CONT " %8.8x",
1203			       (unsigned int) np->rx_ring[i].status);
1204		printk(KERN_CONT "\n");
1205		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
1206		for (i = 0; i < TX_RING_SIZE; i++)
1207			printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
1208		printk(KERN_CONT "\n");
1209	}
1210
1211	spin_lock_irqsave(&np->lock, flags);
1212
1213	reset_and_disable_rxtx(dev);
1214	reset_tx_descriptors(dev);
1215	enable_rxtx(dev);
1216
1217	spin_unlock_irqrestore(&np->lock, flags);
1218
1219	netif_trans_update(dev); /* prevent tx timeout */
1220	dev->stats.tx_errors++;
1221	netif_wake_queue(dev); /* or .._start_.. ?? */
1222}
1223
1224
1225/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1226static void init_ring(struct net_device *dev)
1227{
1228	struct netdev_private *np = netdev_priv(dev);
1229	int i;
1230
1231	/* initialize rx variables */
1232	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1233	np->cur_rx = &np->rx_ring[0];
1234	np->lack_rxbuf = np->rx_ring;
1235	np->really_rx_count = 0;
1236
1237	/* initial rx descriptors. */
1238	for (i = 0; i < RX_RING_SIZE; i++) {
1239		np->rx_ring[i].status = 0;
1240		np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1241		np->rx_ring[i].next_desc = np->rx_ring_dma +
1242			(i + 1)*sizeof(struct fealnx_desc);
1243		np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1244		np->rx_ring[i].skbuff = NULL;
1245	}
1246
1247	/* for the last rx descriptor */
1248	np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1249	np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1250
1251	/* allocate skb for rx buffers */
1252	for (i = 0; i < RX_RING_SIZE; i++) {
1253		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1254
1255		if (skb == NULL) {
1256			np->lack_rxbuf = &np->rx_ring[i];
1257			break;
1258		}
1259
1260		++np->really_rx_count;
1261		np->rx_ring[i].skbuff = skb;
1262		np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
1263						       skb->data,
1264						       np->rx_buf_sz,
1265						       DMA_FROM_DEVICE);
1266		np->rx_ring[i].status = RXOWN;
1267		np->rx_ring[i].control |= RXIC;
1268	}
1269
1270	/* initialize tx variables */
1271	np->cur_tx = &np->tx_ring[0];
1272	np->cur_tx_copy = &np->tx_ring[0];
1273	np->really_tx_count = 0;
1274	np->free_tx_count = TX_RING_SIZE;
1275
1276	for (i = 0; i < TX_RING_SIZE; i++) {
1277		np->tx_ring[i].status = 0;
1278		/* do we need np->tx_ring[i].control = XXX; ?? */
1279		np->tx_ring[i].next_desc = np->tx_ring_dma +
1280			(i + 1)*sizeof(struct fealnx_desc);
1281		np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1282		np->tx_ring[i].skbuff = NULL;
1283	}
1284
1285	/* for the last tx descriptor */
1286	np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1287	np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1288}
1289
1290
1291static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1292{
1293	struct netdev_private *np = netdev_priv(dev);
1294	unsigned long flags;
1295
1296	spin_lock_irqsave(&np->lock, flags);
1297
1298	np->cur_tx_copy->skbuff = skb;
1299
1300#define one_buffer
1301#define BPT 1022
1302#if defined(one_buffer)
1303	np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
1304						 skb->len, DMA_TO_DEVICE);
1305	np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1306	np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1307	np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
1308// 89/12/29 add,
1309	if (np->pci_dev->device == 0x891)
1310		np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1311	np->cur_tx_copy->status = TXOWN;
1312	np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1313	--np->free_tx_count;
1314#elif defined(two_buffer)
1315	if (skb->len > BPT) {
1316		struct fealnx_desc *next;
1317
1318		/* for the first descriptor */
1319		np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1320							 skb->data, BPT,
1321							 DMA_TO_DEVICE);
1322		np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1323		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1324		np->cur_tx_copy->control |= (BPT << TBSShift);	/* buffer size */
1325
1326		/* for the last descriptor */
1327		next = np->cur_tx_copy->next_desc_logical;
1328		next->skbuff = skb;
1329		next->control = TXIC | TXLD | CRCEnable | PADEnable;
1330		next->control |= (skb->len << PKTSShift);	/* pkt size */
1331		next->control |= ((skb->len - BPT) << TBSShift);	/* buf size */
1332// 89/12/29 add,
1333		if (np->pci_dev->device == 0x891)
1334			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1335		next->buffer = dma_map_single(&ep->pci_dev->dev,
1336					      skb->data + BPT, skb->len - BPT,
1337					      DMA_TO_DEVICE);
1338
1339		next->status = TXOWN;
1340		np->cur_tx_copy->status = TXOWN;
1341
1342		np->cur_tx_copy = next->next_desc_logical;
1343		np->free_tx_count -= 2;
1344	} else {
1345		np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1346							 skb->data, skb->len,
1347							 DMA_TO_DEVICE);
1348		np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1349		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1350		np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
1351// 89/12/29 add,
1352		if (np->pci_dev->device == 0x891)
1353			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1354		np->cur_tx_copy->status = TXOWN;
1355		np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1356		--np->free_tx_count;
1357	}
1358#endif
1359
1360	if (np->free_tx_count < 2)
1361		netif_stop_queue(dev);
1362	++np->really_tx_count;
1363	iowrite32(0, np->mem + TXPDR);
1364
1365	spin_unlock_irqrestore(&np->lock, flags);
1366	return NETDEV_TX_OK;
1367}
1368
1369
1370/* Take lock before calling */
1371/* Chip probably hosed tx ring. Clean up. */
1372static void reset_tx_descriptors(struct net_device *dev)
1373{
1374	struct netdev_private *np = netdev_priv(dev);
1375	struct fealnx_desc *cur;
1376	int i;
1377
1378	/* initialize tx variables */
1379	np->cur_tx = &np->tx_ring[0];
1380	np->cur_tx_copy = &np->tx_ring[0];
1381	np->really_tx_count = 0;
1382	np->free_tx_count = TX_RING_SIZE;
1383
1384	for (i = 0; i < TX_RING_SIZE; i++) {
1385		cur = &np->tx_ring[i];
1386		if (cur->skbuff) {
1387			dma_unmap_single(&np->pci_dev->dev, cur->buffer,
1388					 cur->skbuff->len, DMA_TO_DEVICE);
1389			dev_kfree_skb_any(cur->skbuff);
1390			cur->skbuff = NULL;
1391		}
1392		cur->status = 0;
1393		cur->control = 0;	/* needed? */
1394		/* probably not needed. We do it for purely paranoid reasons */
1395		cur->next_desc = np->tx_ring_dma +
1396			(i + 1)*sizeof(struct fealnx_desc);
1397		cur->next_desc_logical = &np->tx_ring[i + 1];
1398	}
1399	/* for the last tx descriptor */
1400	np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1401	np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
1402}
1403
1404
1405/* Take lock and stop rx before calling this */
1406static void reset_rx_descriptors(struct net_device *dev)
1407{
1408	struct netdev_private *np = netdev_priv(dev);
1409	struct fealnx_desc *cur = np->cur_rx;
1410	int i;
1411
1412	allocate_rx_buffers(dev);
1413
1414	for (i = 0; i < RX_RING_SIZE; i++) {
1415		if (cur->skbuff)
1416			cur->status = RXOWN;
1417		cur = cur->next_desc_logical;
1418	}
1419
1420	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1421		np->mem + RXLBA);
1422}
1423
1424
1425/* The interrupt handler does all of the Rx thread work and cleans up
1426   after the Tx thread. */
1427static irqreturn_t intr_handler(int irq, void *dev_instance)
1428{
1429	struct net_device *dev = (struct net_device *) dev_instance;
1430	struct netdev_private *np = netdev_priv(dev);
1431	void __iomem *ioaddr = np->mem;
1432	long boguscnt = max_interrupt_work;
1433	unsigned int num_tx = 0;
1434	int handled = 0;
1435
1436	spin_lock(&np->lock);
1437
1438	iowrite32(0, ioaddr + IMR);
1439
1440	do {
1441		u32 intr_status = ioread32(ioaddr + ISR);
1442
1443		/* Acknowledge all of the current interrupt sources ASAP. */
1444		iowrite32(intr_status, ioaddr + ISR);
1445
1446		if (debug)
1447			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
1448			       intr_status);
1449
1450		if (!(intr_status & np->imrvalue))
1451			break;
1452
1453		handled = 1;
1454
1455// 90/1/16 delete,
1456//
1457//      if (intr_status & FBE)
1458//      {   /* fatal error */
1459//          stop_nic_tx(ioaddr, 0);
1460//          stop_nic_rx(ioaddr, 0);
1461//          break;
1462//      };
1463
1464		if (intr_status & TUNF)
1465			iowrite32(0, ioaddr + TXPDR);
1466
1467		if (intr_status & CNTOVF) {
1468			/* missed pkts */
1469			dev->stats.rx_missed_errors +=
1470				ioread32(ioaddr + TALLY) & 0x7fff;
1471
1472			/* crc error */
1473			dev->stats.rx_crc_errors +=
1474			    (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1475		}
1476
1477		if (intr_status & (RI | RBU)) {
1478			if (intr_status & RI)
1479				netdev_rx(dev);
1480			else {
1481				stop_nic_rx(ioaddr, np->crvalue);
1482				reset_rx_descriptors(dev);
1483				iowrite32(np->crvalue, ioaddr + TCRRCR);
1484			}
1485		}
1486
1487		while (np->really_tx_count) {
1488			long tx_status = np->cur_tx->status;
1489			long tx_control = np->cur_tx->control;
1490
1491			if (!(tx_control & TXLD)) {	/* this pkt is combined by two tx descriptors */
1492				struct fealnx_desc *next;
1493
1494				next = np->cur_tx->next_desc_logical;
1495				tx_status = next->status;
1496				tx_control = next->control;
1497			}
1498
1499			if (tx_status & TXOWN)
1500				break;
1501
1502			if (!(np->crvalue & CR_W_ENH)) {
1503				if (tx_status & (CSL | LC | EC | UDF | HF)) {
1504					dev->stats.tx_errors++;
1505					if (tx_status & EC)
1506						dev->stats.tx_aborted_errors++;
1507					if (tx_status & CSL)
1508						dev->stats.tx_carrier_errors++;
1509					if (tx_status & LC)
1510						dev->stats.tx_window_errors++;
1511					if (tx_status & UDF)
1512						dev->stats.tx_fifo_errors++;
1513					if ((tx_status & HF) && np->mii.full_duplex == 0)
1514						dev->stats.tx_heartbeat_errors++;
1515
1516				} else {
1517					dev->stats.tx_bytes +=
1518					    ((tx_control & PKTSMask) >> PKTSShift);
1519
1520					dev->stats.collisions +=
1521					    ((tx_status & NCRMask) >> NCRShift);
1522					dev->stats.tx_packets++;
1523				}
1524			} else {
1525				dev->stats.tx_bytes +=
1526				    ((tx_control & PKTSMask) >> PKTSShift);
1527				dev->stats.tx_packets++;
1528			}
1529
1530			/* Free the original skb. */
1531			dma_unmap_single(&np->pci_dev->dev,
1532					 np->cur_tx->buffer,
1533					 np->cur_tx->skbuff->len,
1534					 DMA_TO_DEVICE);
1535			dev_consume_skb_irq(np->cur_tx->skbuff);
1536			np->cur_tx->skbuff = NULL;
1537			--np->really_tx_count;
1538			if (np->cur_tx->control & TXLD) {
1539				np->cur_tx = np->cur_tx->next_desc_logical;
1540				++np->free_tx_count;
1541			} else {
1542				np->cur_tx = np->cur_tx->next_desc_logical;
1543				np->cur_tx = np->cur_tx->next_desc_logical;
1544				np->free_tx_count += 2;
1545			}
1546			num_tx++;
1547		}		/* end of for loop */
1548
1549		if (num_tx && np->free_tx_count >= 2)
1550			netif_wake_queue(dev);
1551
1552		/* read transmit status for enhanced mode only */
1553		if (np->crvalue & CR_W_ENH) {
1554			long data;
1555
1556			data = ioread32(ioaddr + TSR);
1557			dev->stats.tx_errors += (data & 0xff000000) >> 24;
1558			dev->stats.tx_aborted_errors +=
1559				(data & 0xff000000) >> 24;
1560			dev->stats.tx_window_errors +=
1561				(data & 0x00ff0000) >> 16;
1562			dev->stats.collisions += (data & 0x0000ffff);
1563		}
1564
1565		if (--boguscnt < 0) {
1566			printk(KERN_WARNING "%s: Too much work at interrupt, "
1567			       "status=0x%4.4x.\n", dev->name, intr_status);
1568			if (!np->reset_timer_armed) {
1569				np->reset_timer_armed = 1;
1570				np->reset_timer.expires = RUN_AT(HZ/2);
1571				add_timer(&np->reset_timer);
1572				stop_nic_rxtx(ioaddr, 0);
1573				netif_stop_queue(dev);
1574				/* or netif_tx_disable(dev); ?? */
1575				/* Prevent other paths from enabling tx,rx,intrs */
1576				np->crvalue_sv = np->crvalue;
1577				np->imrvalue_sv = np->imrvalue;
1578				np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
1579				np->imrvalue = 0;
1580			}
1581
1582			break;
1583		}
1584	} while (1);
1585
1586	/* read the tally counters */
1587	/* missed pkts */
1588	dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1589
1590	/* crc error */
1591	dev->stats.rx_crc_errors +=
1592		(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1593
1594	if (debug)
1595		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1596		       dev->name, ioread32(ioaddr + ISR));
1597
1598	iowrite32(np->imrvalue, ioaddr + IMR);
1599
1600	spin_unlock(&np->lock);
1601
1602	return IRQ_RETVAL(handled);
1603}
1604
1605
1606/* This routine is logically part of the interrupt handler, but separated
1607   for clarity and better register allocation. */
1608static int netdev_rx(struct net_device *dev)
1609{
1610	struct netdev_private *np = netdev_priv(dev);
1611	void __iomem *ioaddr = np->mem;
1612
1613	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1614	while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
1615		s32 rx_status = np->cur_rx->status;
1616
1617		if (np->really_rx_count == 0)
1618			break;
1619
1620		if (debug)
1621			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n", rx_status);
1622
1623		if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
1624		    (rx_status & ErrorSummary)) {
1625			if (rx_status & ErrorSummary) {	/* there was a fatal error */
1626				if (debug)
1627					printk(KERN_DEBUG
1628					       "%s: Receive error, Rx status %8.8x.\n",
1629					       dev->name, rx_status);
1630
1631				dev->stats.rx_errors++;	/* end of a packet. */
1632				if (rx_status & (LONGPKT | RUNTPKT))
1633					dev->stats.rx_length_errors++;
1634				if (rx_status & RXER)
1635					dev->stats.rx_frame_errors++;
1636				if (rx_status & CRC)
1637					dev->stats.rx_crc_errors++;
1638			} else {
1639				int need_to_reset = 0;
1640				int desno = 0;
1641
1642				if (rx_status & RXFSD) {	/* this pkt is too long, over one rx buffer */
1643					struct fealnx_desc *cur;
1644
1645					/* check this packet is received completely? */
1646					cur = np->cur_rx;
1647					while (desno <= np->really_rx_count) {
1648						++desno;
1649						if ((!(cur->status & RXOWN)) &&
1650						    (cur->status & RXLSD))
1651							break;
1652						/* goto next rx descriptor */
1653						cur = cur->next_desc_logical;
1654					}
1655					if (desno > np->really_rx_count)
1656						need_to_reset = 1;
1657				} else	/* RXLSD did not find, something error */
1658					need_to_reset = 1;
1659
1660				if (need_to_reset == 0) {
1661					int i;
1662
1663					dev->stats.rx_length_errors++;
1664
1665					/* free all rx descriptors related this long pkt */
1666					for (i = 0; i < desno; ++i) {
1667						if (!np->cur_rx->skbuff) {
1668							printk(KERN_DEBUG
1669								"%s: I'm scared\n", dev->name);
1670							break;
1671						}
1672						np->cur_rx->status = RXOWN;
1673						np->cur_rx = np->cur_rx->next_desc_logical;
1674					}
1675					continue;
1676				} else {        /* rx error, need to reset this chip */
1677					stop_nic_rx(ioaddr, np->crvalue);
1678					reset_rx_descriptors(dev);
1679					iowrite32(np->crvalue, ioaddr + TCRRCR);
1680				}
1681				break;	/* exit the while loop */
1682			}
1683		} else {	/* this received pkt is ok */
1684
1685			struct sk_buff *skb;
1686			/* Omit the four octet CRC from the length. */
1687			short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
1688
1689#ifndef final_version
1690			if (debug)
1691				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1692				       " status %x.\n", pkt_len, rx_status);
1693#endif
1694
1695			/* Check if the packet is long enough to accept without copying
1696			   to a minimally-sized skbuff. */
1697			if (pkt_len < rx_copybreak &&
1698			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1699				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1700				dma_sync_single_for_cpu(&np->pci_dev->dev,
1701							np->cur_rx->buffer,
1702							np->rx_buf_sz,
1703							DMA_FROM_DEVICE);
1704				/* Call copy + cksum if available. */
1705
1706#if ! defined(__alpha__)
1707				skb_copy_to_linear_data(skb,
1708					np->cur_rx->skbuff->data, pkt_len);
1709				skb_put(skb, pkt_len);
1710#else
1711				skb_put_data(skb, np->cur_rx->skbuff->data,
1712					     pkt_len);
1713#endif
1714				dma_sync_single_for_device(&np->pci_dev->dev,
1715							   np->cur_rx->buffer,
1716							   np->rx_buf_sz,
1717							   DMA_FROM_DEVICE);
1718			} else {
1719				dma_unmap_single(&np->pci_dev->dev,
1720						 np->cur_rx->buffer,
1721						 np->rx_buf_sz,
1722						 DMA_FROM_DEVICE);
1723				skb_put(skb = np->cur_rx->skbuff, pkt_len);
1724				np->cur_rx->skbuff = NULL;
1725				--np->really_rx_count;
1726			}
1727			skb->protocol = eth_type_trans(skb, dev);
1728			netif_rx(skb);
1729			dev->stats.rx_packets++;
1730			dev->stats.rx_bytes += pkt_len;
1731		}
1732
1733		np->cur_rx = np->cur_rx->next_desc_logical;
1734	}			/* end of while loop */
1735
1736	/*  allocate skb for rx buffers */
1737	allocate_rx_buffers(dev);
1738
1739	return 0;
1740}
1741
1742
1743static struct net_device_stats *get_stats(struct net_device *dev)
1744{
1745	struct netdev_private *np = netdev_priv(dev);
1746	void __iomem *ioaddr = np->mem;
1747
1748	/* The chip only need report frame silently dropped. */
1749	if (netif_running(dev)) {
1750		dev->stats.rx_missed_errors +=
1751			ioread32(ioaddr + TALLY) & 0x7fff;
1752		dev->stats.rx_crc_errors +=
1753			(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1754	}
1755
1756	return &dev->stats;
1757}
1758
1759
1760/* for dev->set_multicast_list */
1761static void set_rx_mode(struct net_device *dev)
1762{
1763	spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
1764	unsigned long flags;
1765	spin_lock_irqsave(lp, flags);
1766	__set_rx_mode(dev);
1767	spin_unlock_irqrestore(lp, flags);
1768}
1769
1770
1771/* Take lock before calling */
1772static void __set_rx_mode(struct net_device *dev)
1773{
1774	struct netdev_private *np = netdev_priv(dev);
1775	void __iomem *ioaddr = np->mem;
1776	u32 mc_filter[2];	/* Multicast hash filter */
1777	u32 rx_mode;
1778
1779	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1780		memset(mc_filter, 0xff, sizeof(mc_filter));
1781		rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1782	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1783		   (dev->flags & IFF_ALLMULTI)) {
1784		/* Too many to match, or accept all multicasts. */
1785		memset(mc_filter, 0xff, sizeof(mc_filter));
1786		rx_mode = CR_W_AB | CR_W_AM;
1787	} else {
1788		struct netdev_hw_addr *ha;
1789
1790		memset(mc_filter, 0, sizeof(mc_filter));
1791		netdev_for_each_mc_addr(ha, dev) {
1792			unsigned int bit;
1793			bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1794			mc_filter[bit >> 5] |= (1 << bit);
1795		}
1796		rx_mode = CR_W_AB | CR_W_AM;
1797	}
1798
1799	stop_nic_rxtx(ioaddr, np->crvalue);
1800
1801	iowrite32(mc_filter[0], ioaddr + MAR0);
1802	iowrite32(mc_filter[1], ioaddr + MAR1);
1803	np->crvalue &= ~CR_W_RXMODEMASK;
1804	np->crvalue |= rx_mode;
1805	iowrite32(np->crvalue, ioaddr + TCRRCR);
1806}
1807
1808static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1809{
1810	struct netdev_private *np = netdev_priv(dev);
1811
1812	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1813	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1814}
1815
1816static int netdev_get_link_ksettings(struct net_device *dev,
1817				     struct ethtool_link_ksettings *cmd)
1818{
1819	struct netdev_private *np = netdev_priv(dev);
1820
1821	spin_lock_irq(&np->lock);
1822	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1823	spin_unlock_irq(&np->lock);
1824
1825	return 0;
1826}
1827
1828static int netdev_set_link_ksettings(struct net_device *dev,
1829				     const struct ethtool_link_ksettings *cmd)
1830{
1831	struct netdev_private *np = netdev_priv(dev);
1832	int rc;
1833
1834	spin_lock_irq(&np->lock);
1835	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1836	spin_unlock_irq(&np->lock);
1837
1838	return rc;
1839}
1840
1841static int netdev_nway_reset(struct net_device *dev)
1842{
1843	struct netdev_private *np = netdev_priv(dev);
1844	return mii_nway_restart(&np->mii);
1845}
1846
1847static u32 netdev_get_link(struct net_device *dev)
1848{
1849	struct netdev_private *np = netdev_priv(dev);
1850	return mii_link_ok(&np->mii);
1851}
1852
1853static u32 netdev_get_msglevel(struct net_device *dev)
1854{
1855	return debug;
1856}
1857
1858static void netdev_set_msglevel(struct net_device *dev, u32 value)
1859{
1860	debug = value;
1861}
1862
1863static const struct ethtool_ops netdev_ethtool_ops = {
1864	.get_drvinfo		= netdev_get_drvinfo,
1865	.nway_reset		= netdev_nway_reset,
1866	.get_link		= netdev_get_link,
1867	.get_msglevel		= netdev_get_msglevel,
1868	.set_msglevel		= netdev_set_msglevel,
1869	.get_link_ksettings	= netdev_get_link_ksettings,
1870	.set_link_ksettings	= netdev_set_link_ksettings,
1871};
1872
1873static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1874{
1875	struct netdev_private *np = netdev_priv(dev);
1876	int rc;
1877
1878	if (!netif_running(dev))
1879		return -EINVAL;
1880
1881	spin_lock_irq(&np->lock);
1882	rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
1883	spin_unlock_irq(&np->lock);
1884
1885	return rc;
1886}
1887
1888
1889static int netdev_close(struct net_device *dev)
1890{
1891	struct netdev_private *np = netdev_priv(dev);
1892	void __iomem *ioaddr = np->mem;
1893	int i;
1894
1895	netif_stop_queue(dev);
1896
1897	/* Disable interrupts by clearing the interrupt mask. */
1898	iowrite32(0x0000, ioaddr + IMR);
1899
1900	/* Stop the chip's Tx and Rx processes. */
1901	stop_nic_rxtx(ioaddr, 0);
1902
1903	del_timer_sync(&np->timer);
1904	del_timer_sync(&np->reset_timer);
1905
1906	free_irq(np->pci_dev->irq, dev);
1907
1908	/* Free all the skbuffs in the Rx queue. */
1909	for (i = 0; i < RX_RING_SIZE; i++) {
1910		struct sk_buff *skb = np->rx_ring[i].skbuff;
1911
1912		np->rx_ring[i].status = 0;
1913		if (skb) {
1914			dma_unmap_single(&np->pci_dev->dev,
1915					 np->rx_ring[i].buffer, np->rx_buf_sz,
1916					 DMA_FROM_DEVICE);
1917			dev_kfree_skb(skb);
1918			np->rx_ring[i].skbuff = NULL;
1919		}
1920	}
1921
1922	for (i = 0; i < TX_RING_SIZE; i++) {
1923		struct sk_buff *skb = np->tx_ring[i].skbuff;
1924
1925		if (skb) {
1926			dma_unmap_single(&np->pci_dev->dev,
1927					 np->tx_ring[i].buffer, skb->len,
1928					 DMA_TO_DEVICE);
1929			dev_kfree_skb(skb);
1930			np->tx_ring[i].skbuff = NULL;
1931		}
1932	}
1933
1934	return 0;
1935}
1936
1937static const struct pci_device_id fealnx_pci_tbl[] = {
1938	{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1939	{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1940	{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1941	{} /* terminate list */
1942};
1943MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
1944
1945
1946static struct pci_driver fealnx_driver = {
1947	.name		= "fealnx",
1948	.id_table	= fealnx_pci_tbl,
1949	.probe		= fealnx_init_one,
1950	.remove		= fealnx_remove_one,
1951};
1952
1953module_pci_driver(fealnx_driver);
1954