1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2/*
3	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4
5	Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6	Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7	Copyright 2001 Manfred Spraul				    [natsemi.c]
8	Copyright 1999-2001 by Donald Becker.			    [natsemi.c]
9       	Written 1997-2001 by Donald Becker.			    [8139too.c]
10	Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11
12	This software may be used and distributed according to the terms of
13	the GNU General Public License (GPL), incorporated herein by reference.
14	Drivers based on or derived from this code fall under the GPL and must
15	retain the authorship, copyright and license notice.  This file is not
16	a complete program and may only be used when the entire operating
17	system is licensed under the GPL.
18
19	See the file COPYING in this distribution for more information.
20
21	Contributors:
22
23		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
25		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
26
27	TODO:
28	* Test Tx checksumming thoroughly
29	* Implement dev->tx_timeout
30
31	Low priority TODO:
32	* Complete reset on PciErr
33	* Consider Rx interrupt mitigation using TimerIntr
34	* Investigate using skb->priority with h/w VLAN priority
35	* Investigate using High Priority Tx Queue with skb->priority
36	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38	* Implement Tx software interrupt mitigation via
39	  Tx descriptor bit
40	* The real minimum of CP_MIN_MTU is 4 bytes.  However,
41	  for this to be supported, one must(?) turn on packet padding.
42	* Support external MII transceivers (patch available)
43
44	NOTES:
45	* TX checksumming is considered experimental.  It is off by
46	  default, use ethtool to turn it on.
47
48 */
49
50#define DRV_NAME		"8139cp"
51#define DRV_VERSION		"1.3"
52#define DRV_RELDATE		"Mar 22, 2004"
53
54
55#include <linux/module.h>
56#include <linux/moduleparam.h>
57#include <linux/kernel.h>
58#include <linux/compiler.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/init.h>
62#include <linux/pci.h>
63#include <linux/dma-mapping.h>
64#include <linux/delay.h>
65#include <linux/ethtool.h>
66#include <linux/mii.h>
67#include <linux/if_vlan.h>
68#include <linux/crc32.h>
69#include <linux/in.h>
70#include <linux/ip.h>
71#include <linux/tcp.h>
72#include <linux/udp.h>
73#include <linux/cache.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/uaccess.h>
77
78/* VLAN tagging feature enable/disable */
79#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80#define CP_VLAN_TAG_USED 1
81#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82	do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
83#else
84#define CP_VLAN_TAG_USED 0
85#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86	do { (tx_desc)->opts2 = 0; } while (0)
87#endif
88
89/* These identify the driver base version and may not be removed. */
90static char version[] =
91KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
92
93MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
95MODULE_VERSION(DRV_VERSION);
96MODULE_LICENSE("GPL");
97
98static int debug = -1;
99module_param(debug, int, 0);
100MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
101
102/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
104static int multicast_filter_limit = 32;
105module_param(multicast_filter_limit, int, 0);
106MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
107
108#define PFX			DRV_NAME ": "
109
110#ifndef TRUE
111#define FALSE 0
112#define TRUE (!FALSE)
113#endif
114
115#define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
116				 NETIF_MSG_PROBE 	| \
117				 NETIF_MSG_LINK)
118#define CP_NUM_STATS		14	/* struct cp_dma_stats, plus one */
119#define CP_STATS_SIZE		64	/* size in bytes of DMA stats block */
120#define CP_REGS_SIZE		(0xff + 1)
121#define CP_REGS_VER		1		/* version 1 */
122#define CP_RX_RING_SIZE		64
123#define CP_TX_RING_SIZE		64
124#define CP_RING_BYTES		\
125		((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +	\
126		 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +	\
127		 CP_STATS_SIZE)
128#define NEXT_TX(N)		(((N) + 1) & (CP_TX_RING_SIZE - 1))
129#define NEXT_RX(N)		(((N) + 1) & (CP_RX_RING_SIZE - 1))
130#define TX_BUFFS_AVAIL(CP)					\
131	(((CP)->tx_tail <= (CP)->tx_head) ?			\
132	  (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :	\
133	  (CP)->tx_tail - (CP)->tx_head - 1)
134
135#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
136#define RX_OFFSET		2
137#define CP_INTERNAL_PHY		32
138
139/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
140#define RX_FIFO_THRESH		5	/* Rx buffer level before first PCI xfer.  */
141#define RX_DMA_BURST		4	/* Maximum PCI burst, '4' is 256 */
142#define TX_DMA_BURST		6	/* Maximum PCI burst, '6' is 1024 */
143#define TX_EARLY_THRESH		256	/* Early Tx threshold, in bytes */
144
145/* Time in jiffies before concluding the transmitter is hung. */
146#define TX_TIMEOUT		(6*HZ)
147
148/* hardware minimum and maximum for a single frame's data payload */
149#define CP_MIN_MTU		60	/* TODO: allow lower, but pad */
150#define CP_MAX_MTU		4096
151
152enum {
153	/* NIC register offsets */
154	MAC0		= 0x00,	/* Ethernet hardware address. */
155	MAR0		= 0x08,	/* Multicast filter. */
156	StatsAddr	= 0x10,	/* 64-bit start addr of 64-byte DMA stats blk */
157	TxRingAddr	= 0x20, /* 64-bit start addr of Tx ring */
158	HiTxRingAddr	= 0x28, /* 64-bit start addr of high priority Tx ring */
159	Cmd		= 0x37, /* Command register */
160	IntrMask	= 0x3C, /* Interrupt mask */
161	IntrStatus	= 0x3E, /* Interrupt status */
162	TxConfig	= 0x40, /* Tx configuration */
163	ChipVersion	= 0x43, /* 8-bit chip version, inside TxConfig */
164	RxConfig	= 0x44, /* Rx configuration */
165	RxMissed	= 0x4C,	/* 24 bits valid, write clears */
166	Cfg9346		= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167	Config1		= 0x52, /* Config1 */
168	Config3		= 0x59, /* Config3 */
169	Config4		= 0x5A, /* Config4 */
170	MultiIntr	= 0x5C, /* Multiple interrupt select */
171	BasicModeCtrl	= 0x62,	/* MII BMCR */
172	BasicModeStatus	= 0x64, /* MII BMSR */
173	NWayAdvert	= 0x66, /* MII ADVERTISE */
174	NWayLPAR	= 0x68, /* MII LPA */
175	NWayExpansion	= 0x6A, /* MII Expansion */
176	Config5		= 0xD8,	/* Config5 */
177	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
178	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
179	CpCmd		= 0xE0, /* C+ Command register (C+ mode only) */
180	IntrMitigate	= 0xE2,	/* rx/tx interrupt mitigation control */
181	RxRingAddr	= 0xE4, /* 64-bit start addr of Rx ring */
182	TxThresh	= 0xEC, /* Early Tx threshold */
183	OldRxBufAddr	= 0x30, /* DMA address of Rx ring buffer (C mode) */
184	OldTSD0		= 0x10, /* DMA address of first Tx desc (C mode) */
185
186	/* Tx and Rx status descriptors */
187	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
188	RingEnd		= (1 << 30), /* End of descriptor ring */
189	FirstFrag	= (1 << 29), /* First segment of a packet */
190	LastFrag	= (1 << 28), /* Final segment of a packet */
191	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
192	MSSShift	= 16,	     /* MSS value position */
193	MSSMask		= 0xfff,     /* MSS value: 11 bits */
194	TxError		= (1 << 23), /* Tx error summary */
195	RxError		= (1 << 20), /* Rx error summary */
196	IPCS		= (1 << 18), /* Calculate IP checksum */
197	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
198	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
199	TxVlanTag	= (1 << 17), /* Add VLAN tag */
200	RxVlanTagged	= (1 << 16), /* Rx VLAN tag available */
201	IPFail		= (1 << 15), /* IP checksum failed */
202	UDPFail		= (1 << 14), /* UDP/IP checksum failed */
203	TCPFail		= (1 << 13), /* TCP/IP checksum failed */
204	NormalTxPoll	= (1 << 6),  /* One or more normal Tx packets to send */
205	PID1		= (1 << 17), /* 2 protocol id bits:  0==non-IP, */
206	PID0		= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
207	RxProtoTCP	= 1,
208	RxProtoUDP	= 2,
209	RxProtoIP	= 3,
210	TxFIFOUnder	= (1 << 25), /* Tx FIFO underrun */
211	TxOWC		= (1 << 22), /* Tx Out-of-window collision */
212	TxLinkFail	= (1 << 21), /* Link failed during Tx of packet */
213	TxMaxCol	= (1 << 20), /* Tx aborted due to excessive collisions */
214	TxColCntShift	= 16,	     /* Shift, to get 4-bit Tx collision cnt */
215	TxColCntMask	= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216	RxErrFrame	= (1 << 27), /* Rx frame alignment error */
217	RxMcast		= (1 << 26), /* Rx multicast packet rcv'd */
218	RxErrCRC	= (1 << 18), /* Rx CRC error */
219	RxErrRunt	= (1 << 19), /* Rx error, packet < 64 bytes */
220	RxErrLong	= (1 << 21), /* Rx error, packet > 4096 bytes */
221	RxErrFIFO	= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
222
223	/* StatsAddr register */
224	DumpStats	= (1 << 3),  /* Begin stats dump */
225
226	/* RxConfig register */
227	RxCfgFIFOShift	= 13,	     /* Shift, to get Rx FIFO thresh value */
228	RxCfgDMAShift	= 8,	     /* Shift, to get Rx Max DMA value */
229	AcceptErr	= 0x20,	     /* Accept packets with CRC errors */
230	AcceptRunt	= 0x10,	     /* Accept runt (<64 bytes) packets */
231	AcceptBroadcast	= 0x08,	     /* Accept broadcast packets */
232	AcceptMulticast	= 0x04,	     /* Accept multicast packets */
233	AcceptMyPhys	= 0x02,	     /* Accept pkts with our MAC as dest */
234	AcceptAllPhys	= 0x01,	     /* Accept all pkts w/ physical dest */
235
236	/* IntrMask / IntrStatus registers */
237	PciErr		= (1 << 15), /* System error on the PCI bus */
238	TimerIntr	= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239	LenChg		= (1 << 13), /* Cable length change */
240	SWInt		= (1 << 8),  /* Software-requested interrupt */
241	TxEmpty		= (1 << 7),  /* No Tx descriptors available */
242	RxFIFOOvr	= (1 << 6),  /* Rx FIFO Overflow */
243	LinkChg		= (1 << 5),  /* Packet underrun, or link change */
244	RxEmpty		= (1 << 4),  /* No Rx descriptors available */
245	TxErr		= (1 << 3),  /* Tx error */
246	TxOK		= (1 << 2),  /* Tx packet sent */
247	RxErr		= (1 << 1),  /* Rx error */
248	RxOK		= (1 << 0),  /* Rx packet received */
249	IntrResvd	= (1 << 10), /* reserved, according to RealTek engineers,
250					but hardware likes to raise it */
251
252	IntrAll		= PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
253			  RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
254			  RxErr | RxOK | IntrResvd,
255
256	/* C mode command register */
257	CmdReset	= (1 << 4),  /* Enable to reset; self-clearing */
258	RxOn		= (1 << 3),  /* Rx mode enable */
259	TxOn		= (1 << 2),  /* Tx mode enable */
260
261	/* C+ mode command register */
262	RxVlanOn	= (1 << 6),  /* Rx VLAN de-tagging enable */
263	RxChkSum	= (1 << 5),  /* Rx checksum offload enable */
264	PCIDAC		= (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
265	PCIMulRW	= (1 << 3),  /* Enable PCI read/write multiple */
266	CpRxOn		= (1 << 1),  /* Rx mode enable */
267	CpTxOn		= (1 << 0),  /* Tx mode enable */
268
269	/* Cfg9436 EEPROM control register */
270	Cfg9346_Lock	= 0x00,	     /* Lock ConfigX/MII register access */
271	Cfg9346_Unlock	= 0xC0,	     /* Unlock ConfigX/MII register access */
272
273	/* TxConfig register */
274	IFG		= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275	TxDMAShift	= 8,	     /* DMA burst value (0-7) is shift this many bits */
276
277	/* Early Tx Threshold register */
278	TxThreshMask	= 0x3f,	     /* Mask bits 5-0 */
279	TxThreshMax	= 2048,	     /* Max early Tx threshold */
280
281	/* Config1 register */
282	DriverLoaded	= (1 << 5),  /* Software marker, driver is loaded */
283	LWACT           = (1 << 4),  /* LWAKE active mode */
284	PMEnable	= (1 << 0),  /* Enable various PM features of chip */
285
286	/* Config3 register */
287	PARMEnable	= (1 << 6),  /* Enable auto-loading of PHY parms */
288	MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
289	LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
290
291	/* Config4 register */
292	LWPTN           = (1 << 1),  /* LWAKE Pattern */
293	LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
294
295	/* Config5 register */
296	BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
297	MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
298	UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
299	LANWake         = (1 << 1),  /* Enable LANWake signal */
300	PMEStatus	= (1 << 0),  /* PME status can be reset by PCI RST# */
301
302	cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
303	cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
304	cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
305};
306
307static const unsigned int cp_rx_config =
308	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
309	  (RX_DMA_BURST << RxCfgDMAShift);
310
311struct cp_desc {
312	u32		opts1;
313	u32		opts2;
314	u64		addr;
315};
316
317struct cp_dma_stats {
318	u64			tx_ok;
319	u64			rx_ok;
320	u64			tx_err;
321	u32			rx_err;
322	u16			rx_fifo;
323	u16			frame_align;
324	u32			tx_ok_1col;
325	u32			tx_ok_mcol;
326	u64			rx_ok_phys;
327	u64			rx_ok_bcast;
328	u32			rx_ok_mcast;
329	u16			tx_abort;
330	u16			tx_underrun;
331} __attribute__((packed));
332
333struct cp_extra_stats {
334	unsigned long		rx_frags;
335};
336
337struct cp_private {
338	void			__iomem *regs;
339	struct net_device	*dev;
340	spinlock_t		lock;
341	u32			msg_enable;
342
343	struct pci_dev		*pdev;
344	u32			rx_config;
345	u16			cpcmd;
346
347	struct net_device_stats net_stats;
348	struct cp_extra_stats	cp_stats;
349
350	unsigned		rx_head		____cacheline_aligned;
351	unsigned		rx_tail;
352	struct cp_desc		*rx_ring;
353	struct sk_buff		*rx_skb[CP_RX_RING_SIZE];
354
355	unsigned		tx_head		____cacheline_aligned;
356	unsigned		tx_tail;
357	struct cp_desc		*tx_ring;
358	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
359
360	unsigned		rx_buf_sz;
361	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
362
363#if CP_VLAN_TAG_USED
364	struct vlan_group	*vlgrp;
365#endif
366	dma_addr_t		ring_dma;
367
368	struct mii_if_info	mii_if;
369};
370
371#define cpr8(reg)	readb(cp->regs + (reg))
372#define cpr16(reg)	readw(cp->regs + (reg))
373#define cpr32(reg)	readl(cp->regs + (reg))
374#define cpw8(reg,val)	writeb((val), cp->regs + (reg))
375#define cpw16(reg,val)	writew((val), cp->regs + (reg))
376#define cpw32(reg,val)	writel((val), cp->regs + (reg))
377#define cpw8_f(reg,val) do {			\
378	writeb((val), cp->regs + (reg));	\
379	readb(cp->regs + (reg));		\
380	} while (0)
381#define cpw16_f(reg,val) do {			\
382	writew((val), cp->regs + (reg));	\
383	readw(cp->regs + (reg));		\
384	} while (0)
385#define cpw32_f(reg,val) do {			\
386	writel((val), cp->regs + (reg));	\
387	readl(cp->regs + (reg));		\
388	} while (0)
389
390
391static void __cp_set_rx_mode (struct net_device *dev);
392static void cp_tx (struct cp_private *cp);
393static void cp_clean_rings (struct cp_private *cp);
394#ifdef CONFIG_NET_POLL_CONTROLLER
395static void cp_poll_controller(struct net_device *dev);
396#endif
397static int cp_get_eeprom_len(struct net_device *dev);
398static int cp_get_eeprom(struct net_device *dev,
399			 struct ethtool_eeprom *eeprom, u8 *data);
400static int cp_set_eeprom(struct net_device *dev,
401			 struct ethtool_eeprom *eeprom, u8 *data);
402
403static struct pci_device_id cp_pci_tbl[] = {
404	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
405	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
406	{ },
407};
408MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
409
410static struct {
411	const char str[ETH_GSTRING_LEN];
412} ethtool_stats_keys[] = {
413	{ "tx_ok" },
414	{ "rx_ok" },
415	{ "tx_err" },
416	{ "rx_err" },
417	{ "rx_fifo" },
418	{ "frame_align" },
419	{ "tx_ok_1col" },
420	{ "tx_ok_mcol" },
421	{ "rx_ok_phys" },
422	{ "rx_ok_bcast" },
423	{ "rx_ok_mcast" },
424	{ "tx_abort" },
425	{ "tx_underrun" },
426	{ "rx_frags" },
427};
428
429
430#if CP_VLAN_TAG_USED
431static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
432{
433	struct cp_private *cp = netdev_priv(dev);
434	unsigned long flags;
435
436	spin_lock_irqsave(&cp->lock, flags);
437	cp->vlgrp = grp;
438	if (grp)
439		cp->cpcmd |= RxVlanOn;
440	else
441		cp->cpcmd &= ~RxVlanOn;
442
443	cpw16(CpCmd, cp->cpcmd);
444	spin_unlock_irqrestore(&cp->lock, flags);
445}
446#endif /* CP_VLAN_TAG_USED */
447
448static inline void cp_set_rxbufsize (struct cp_private *cp)
449{
450	unsigned int mtu = cp->dev->mtu;
451
452	if (mtu > ETH_DATA_LEN)
453		/* MTU + ethernet header + FCS + optional VLAN tag */
454		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
455	else
456		cp->rx_buf_sz = PKT_BUF_SZ;
457}
458
459static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
460			      struct cp_desc *desc)
461{
462	skb->protocol = eth_type_trans (skb, cp->dev);
463
464	cp->net_stats.rx_packets++;
465	cp->net_stats.rx_bytes += skb->len;
466	cp->dev->last_rx = jiffies;
467
468#if CP_VLAN_TAG_USED
469	if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
470		vlan_hwaccel_receive_skb(skb, cp->vlgrp,
471					 be16_to_cpu(desc->opts2 & 0xffff));
472	} else
473#endif
474		netif_receive_skb(skb);
475}
476
477static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
478			    u32 status, u32 len)
479{
480	if (netif_msg_rx_err (cp))
481		printk (KERN_DEBUG
482			"%s: rx err, slot %d status 0x%x len %d\n",
483			cp->dev->name, rx_tail, status, len);
484	cp->net_stats.rx_errors++;
485	if (status & RxErrFrame)
486		cp->net_stats.rx_frame_errors++;
487	if (status & RxErrCRC)
488		cp->net_stats.rx_crc_errors++;
489	if ((status & RxErrRunt) || (status & RxErrLong))
490		cp->net_stats.rx_length_errors++;
491	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
492		cp->net_stats.rx_length_errors++;
493	if (status & RxErrFIFO)
494		cp->net_stats.rx_fifo_errors++;
495}
496
497static inline unsigned int cp_rx_csum_ok (u32 status)
498{
499	unsigned int protocol = (status >> 16) & 0x3;
500
501	if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
502		return 1;
503	else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
504		return 1;
505	else if ((protocol == RxProtoIP) && (!(status & IPFail)))
506		return 1;
507	return 0;
508}
509
510static int cp_rx_poll (struct net_device *dev, int *budget)
511{
512	struct cp_private *cp = netdev_priv(dev);
513	unsigned rx_tail = cp->rx_tail;
514	unsigned rx_work = dev->quota;
515	unsigned rx;
516
517rx_status_loop:
518	rx = 0;
519	cpw16(IntrStatus, cp_rx_intr_mask);
520
521	while (1) {
522		u32 status, len;
523		dma_addr_t mapping;
524		struct sk_buff *skb, *new_skb;
525		struct cp_desc *desc;
526		unsigned buflen;
527
528		skb = cp->rx_skb[rx_tail];
529		BUG_ON(!skb);
530
531		desc = &cp->rx_ring[rx_tail];
532		status = le32_to_cpu(desc->opts1);
533		if (status & DescOwn)
534			break;
535
536		len = (status & 0x1fff) - 4;
537		mapping = le64_to_cpu(desc->addr);
538
539		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
540			/* we don't support incoming fragmented frames.
541			 * instead, we attempt to ensure that the
542			 * pre-allocated RX skbs are properly sized such
543			 * that RX fragments are never encountered
544			 */
545			cp_rx_err_acct(cp, rx_tail, status, len);
546			cp->net_stats.rx_dropped++;
547			cp->cp_stats.rx_frags++;
548			goto rx_next;
549		}
550
551		if (status & (RxError | RxErrFIFO)) {
552			cp_rx_err_acct(cp, rx_tail, status, len);
553			goto rx_next;
554		}
555
556		if (netif_msg_rx_status(cp))
557			printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
558			       dev->name, rx_tail, status, len);
559
560		buflen = cp->rx_buf_sz + RX_OFFSET;
561		new_skb = dev_alloc_skb (buflen);
562		if (!new_skb) {
563			cp->net_stats.rx_dropped++;
564			goto rx_next;
565		}
566
567		skb_reserve(new_skb, RX_OFFSET);
568
569		pci_unmap_single(cp->pdev, mapping,
570				 buflen, PCI_DMA_FROMDEVICE);
571
572		/* Handle checksum offloading for incoming packets. */
573		if (cp_rx_csum_ok(status))
574			skb->ip_summed = CHECKSUM_UNNECESSARY;
575		else
576			skb->ip_summed = CHECKSUM_NONE;
577
578		skb_put(skb, len);
579
580		mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
581					 PCI_DMA_FROMDEVICE);
582		cp->rx_skb[rx_tail] = new_skb;
583
584		cp_rx_skb(cp, skb, desc);
585		rx++;
586
587rx_next:
588		cp->rx_ring[rx_tail].opts2 = 0;
589		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
590		if (rx_tail == (CP_RX_RING_SIZE - 1))
591			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
592						  cp->rx_buf_sz);
593		else
594			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
595		rx_tail = NEXT_RX(rx_tail);
596
597		if (!rx_work--)
598			break;
599	}
600
601	cp->rx_tail = rx_tail;
602
603	dev->quota -= rx;
604	*budget -= rx;
605
606	/* if we did not reach work limit, then we're done with
607	 * this round of polling
608	 */
609	if (rx_work) {
610		unsigned long flags;
611
612		if (cpr16(IntrStatus) & cp_rx_intr_mask)
613			goto rx_status_loop;
614
615		local_irq_save(flags);
616		cpw16_f(IntrMask, cp_intr_mask);
617		__netif_rx_complete(dev);
618		local_irq_restore(flags);
619
620		return 0;	/* done */
621	}
622
623	return 1;		/* not done */
624}
625
626static irqreturn_t cp_interrupt (int irq, void *dev_instance)
627{
628	struct net_device *dev = dev_instance;
629	struct cp_private *cp;
630	u16 status;
631
632	if (unlikely(dev == NULL))
633		return IRQ_NONE;
634	cp = netdev_priv(dev);
635
636	status = cpr16(IntrStatus);
637	if (!status || (status == 0xFFFF))
638		return IRQ_NONE;
639
640	if (netif_msg_intr(cp))
641		printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
642		        dev->name, status, cpr8(Cmd), cpr16(CpCmd));
643
644	cpw16(IntrStatus, status & ~cp_rx_intr_mask);
645
646	spin_lock(&cp->lock);
647
648	/* close possible race's with dev_close */
649	if (unlikely(!netif_running(dev))) {
650		cpw16(IntrMask, 0);
651		spin_unlock(&cp->lock);
652		return IRQ_HANDLED;
653	}
654
655	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
656		if (netif_rx_schedule_prep(dev)) {
657			cpw16_f(IntrMask, cp_norx_intr_mask);
658			__netif_rx_schedule(dev);
659		}
660
661	if (status & (TxOK | TxErr | TxEmpty | SWInt))
662		cp_tx(cp);
663	if (status & LinkChg)
664		mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
665
666	spin_unlock(&cp->lock);
667
668	if (status & PciErr) {
669		u16 pci_status;
670
671		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
672		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
673		printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
674		       dev->name, status, pci_status);
675
676		/* TODO: reset hardware */
677	}
678
679	return IRQ_HANDLED;
680}
681
682#ifdef CONFIG_NET_POLL_CONTROLLER
683/*
684 * Polling receive - used by netconsole and other diagnostic tools
685 * to allow network i/o with interrupts disabled.
686 */
687static void cp_poll_controller(struct net_device *dev)
688{
689	disable_irq(dev->irq);
690	cp_interrupt(dev->irq, dev);
691	enable_irq(dev->irq);
692}
693#endif
694
695static void cp_tx (struct cp_private *cp)
696{
697	unsigned tx_head = cp->tx_head;
698	unsigned tx_tail = cp->tx_tail;
699
700	while (tx_tail != tx_head) {
701		struct cp_desc *txd = cp->tx_ring + tx_tail;
702		struct sk_buff *skb;
703		u32 status;
704
705		rmb();
706		status = le32_to_cpu(txd->opts1);
707		if (status & DescOwn)
708			break;
709
710		skb = cp->tx_skb[tx_tail];
711		BUG_ON(!skb);
712
713		pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
714				 le32_to_cpu(txd->opts1) & 0xffff,
715				 PCI_DMA_TODEVICE);
716
717		if (status & LastFrag) {
718			if (status & (TxError | TxFIFOUnder)) {
719				if (netif_msg_tx_err(cp))
720					printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
721					       cp->dev->name, status);
722				cp->net_stats.tx_errors++;
723				if (status & TxOWC)
724					cp->net_stats.tx_window_errors++;
725				if (status & TxMaxCol)
726					cp->net_stats.tx_aborted_errors++;
727				if (status & TxLinkFail)
728					cp->net_stats.tx_carrier_errors++;
729				if (status & TxFIFOUnder)
730					cp->net_stats.tx_fifo_errors++;
731			} else {
732				cp->net_stats.collisions +=
733					((status >> TxColCntShift) & TxColCntMask);
734				cp->net_stats.tx_packets++;
735				cp->net_stats.tx_bytes += skb->len;
736				if (netif_msg_tx_done(cp))
737					printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
738			}
739			dev_kfree_skb_irq(skb);
740		}
741
742		cp->tx_skb[tx_tail] = NULL;
743
744		tx_tail = NEXT_TX(tx_tail);
745	}
746
747	cp->tx_tail = tx_tail;
748
749	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
750		netif_wake_queue(cp->dev);
751}
752
753static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
754{
755	struct cp_private *cp = netdev_priv(dev);
756	unsigned entry;
757	u32 eor, flags;
758	unsigned long intr_flags;
759#if CP_VLAN_TAG_USED
760	u32 vlan_tag = 0;
761#endif
762	int mss = 0;
763
764	spin_lock_irqsave(&cp->lock, intr_flags);
765
766	/* This is a hard error, log it. */
767	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
768		netif_stop_queue(dev);
769		spin_unlock_irqrestore(&cp->lock, intr_flags);
770		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
771		       dev->name);
772		return 1;
773	}
774
775#if CP_VLAN_TAG_USED
776	if (cp->vlgrp && vlan_tx_tag_present(skb))
777		vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
778#endif
779
780	entry = cp->tx_head;
781	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
782	if (dev->features & NETIF_F_TSO)
783		mss = skb_shinfo(skb)->gso_size;
784
785	if (skb_shinfo(skb)->nr_frags == 0) {
786		struct cp_desc *txd = &cp->tx_ring[entry];
787		u32 len;
788		dma_addr_t mapping;
789
790		len = skb->len;
791		mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
792		CP_VLAN_TX_TAG(txd, vlan_tag);
793		txd->addr = cpu_to_le64(mapping);
794		wmb();
795
796		flags = eor | len | DescOwn | FirstFrag | LastFrag;
797
798		if (mss)
799			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
800		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
801			const struct iphdr *ip = ip_hdr(skb);
802			if (ip->protocol == IPPROTO_TCP)
803				flags |= IPCS | TCPCS;
804			else if (ip->protocol == IPPROTO_UDP)
805				flags |= IPCS | UDPCS;
806			else
807				WARN_ON(1);	/* we need a WARN() */
808		}
809
810		txd->opts1 = cpu_to_le32(flags);
811		wmb();
812
813		cp->tx_skb[entry] = skb;
814		entry = NEXT_TX(entry);
815	} else {
816		struct cp_desc *txd;
817		u32 first_len, first_eor;
818		dma_addr_t first_mapping;
819		int frag, first_entry = entry;
820		const struct iphdr *ip = ip_hdr(skb);
821
822		/* We must give this initial chunk to the device last.
823		 * Otherwise we could race with the device.
824		 */
825		first_eor = eor;
826		first_len = skb_headlen(skb);
827		first_mapping = pci_map_single(cp->pdev, skb->data,
828					       first_len, PCI_DMA_TODEVICE);
829		cp->tx_skb[entry] = skb;
830		entry = NEXT_TX(entry);
831
832		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
833			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
834			u32 len;
835			u32 ctrl;
836			dma_addr_t mapping;
837
838			len = this_frag->size;
839			mapping = pci_map_single(cp->pdev,
840						 ((void *) page_address(this_frag->page) +
841						  this_frag->page_offset),
842						 len, PCI_DMA_TODEVICE);
843			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
844
845			ctrl = eor | len | DescOwn;
846
847			if (mss)
848				ctrl |= LargeSend |
849					((mss & MSSMask) << MSSShift);
850			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
851				if (ip->protocol == IPPROTO_TCP)
852					ctrl |= IPCS | TCPCS;
853				else if (ip->protocol == IPPROTO_UDP)
854					ctrl |= IPCS | UDPCS;
855				else
856					BUG();
857			}
858
859			if (frag == skb_shinfo(skb)->nr_frags - 1)
860				ctrl |= LastFrag;
861
862			txd = &cp->tx_ring[entry];
863			CP_VLAN_TX_TAG(txd, vlan_tag);
864			txd->addr = cpu_to_le64(mapping);
865			wmb();
866
867			txd->opts1 = cpu_to_le32(ctrl);
868			wmb();
869
870			cp->tx_skb[entry] = skb;
871			entry = NEXT_TX(entry);
872		}
873
874		txd = &cp->tx_ring[first_entry];
875		CP_VLAN_TX_TAG(txd, vlan_tag);
876		txd->addr = cpu_to_le64(first_mapping);
877		wmb();
878
879		if (skb->ip_summed == CHECKSUM_PARTIAL) {
880			if (ip->protocol == IPPROTO_TCP)
881				txd->opts1 = cpu_to_le32(first_eor | first_len |
882							 FirstFrag | DescOwn |
883							 IPCS | TCPCS);
884			else if (ip->protocol == IPPROTO_UDP)
885				txd->opts1 = cpu_to_le32(first_eor | first_len |
886							 FirstFrag | DescOwn |
887							 IPCS | UDPCS);
888			else
889				BUG();
890		} else
891			txd->opts1 = cpu_to_le32(first_eor | first_len |
892						 FirstFrag | DescOwn);
893		wmb();
894	}
895	cp->tx_head = entry;
896	if (netif_msg_tx_queued(cp))
897		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
898		       dev->name, entry, skb->len);
899	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
900		netif_stop_queue(dev);
901
902	spin_unlock_irqrestore(&cp->lock, intr_flags);
903
904	cpw8(TxPoll, NormalTxPoll);
905	dev->trans_start = jiffies;
906
907	return 0;
908}
909
910/* Set or clear the multicast filter for this adaptor.
911   This routine is not state sensitive and need not be SMP locked. */
912
913static void __cp_set_rx_mode (struct net_device *dev)
914{
915	struct cp_private *cp = netdev_priv(dev);
916	u32 mc_filter[2];	/* Multicast hash filter */
917	int i, rx_mode;
918	u32 tmp;
919
920	/* Note: do not reorder, GCC is clever about common statements. */
921	if (dev->flags & IFF_PROMISC) {
922		/* Unconditionally log net taps. */
923		rx_mode =
924		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
925		    AcceptAllPhys;
926		mc_filter[1] = mc_filter[0] = 0xffffffff;
927	} else if ((dev->mc_count > multicast_filter_limit)
928		   || (dev->flags & IFF_ALLMULTI)) {
929		/* Too many to filter perfectly -- accept all multicasts. */
930		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
931		mc_filter[1] = mc_filter[0] = 0xffffffff;
932	} else {
933		struct dev_mc_list *mclist;
934		rx_mode = AcceptBroadcast | AcceptMyPhys;
935		mc_filter[1] = mc_filter[0] = 0;
936		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
937		     i++, mclist = mclist->next) {
938			int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
939
940			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
941			rx_mode |= AcceptMulticast;
942		}
943	}
944
945	/* We can safely update without stopping the chip. */
946	tmp = cp_rx_config | rx_mode;
947	if (cp->rx_config != tmp) {
948		cpw32_f (RxConfig, tmp);
949		cp->rx_config = tmp;
950	}
951	cpw32_f (MAR0 + 0, mc_filter[0]);
952	cpw32_f (MAR0 + 4, mc_filter[1]);
953}
954
955static void cp_set_rx_mode (struct net_device *dev)
956{
957	unsigned long flags;
958	struct cp_private *cp = netdev_priv(dev);
959
960	spin_lock_irqsave (&cp->lock, flags);
961	__cp_set_rx_mode(dev);
962	spin_unlock_irqrestore (&cp->lock, flags);
963}
964
965static void __cp_get_stats(struct cp_private *cp)
966{
967	/* only lower 24 bits valid; write any value to clear */
968	cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
969	cpw32 (RxMissed, 0);
970}
971
972static struct net_device_stats *cp_get_stats(struct net_device *dev)
973{
974	struct cp_private *cp = netdev_priv(dev);
975	unsigned long flags;
976
977	/* The chip only need report frame silently dropped. */
978	spin_lock_irqsave(&cp->lock, flags);
979 	if (netif_running(dev) && netif_device_present(dev))
980 		__cp_get_stats(cp);
981	spin_unlock_irqrestore(&cp->lock, flags);
982
983	return &cp->net_stats;
984}
985
986static void cp_stop_hw (struct cp_private *cp)
987{
988	cpw16(IntrStatus, ~(cpr16(IntrStatus)));
989	cpw16_f(IntrMask, 0);
990	cpw8(Cmd, 0);
991	cpw16_f(CpCmd, 0);
992	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
993
994	cp->rx_tail = 0;
995	cp->tx_head = cp->tx_tail = 0;
996}
997
998static void cp_reset_hw (struct cp_private *cp)
999{
1000	unsigned work = 1000;
1001
1002	cpw8(Cmd, CmdReset);
1003
1004	while (work--) {
1005		if (!(cpr8(Cmd) & CmdReset))
1006			return;
1007
1008		schedule_timeout_uninterruptible(10);
1009	}
1010
1011	printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1012}
1013
1014static inline void cp_start_hw (struct cp_private *cp)
1015{
1016	cpw16(CpCmd, cp->cpcmd);
1017	cpw8(Cmd, RxOn | TxOn);
1018}
1019
1020static void cp_init_hw (struct cp_private *cp)
1021{
1022	struct net_device *dev = cp->dev;
1023	dma_addr_t ring_dma;
1024
1025	cp_reset_hw(cp);
1026
1027	cpw8_f (Cfg9346, Cfg9346_Unlock);
1028
1029	/* Restore our idea of the MAC address. */
1030	cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1031	cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1032
1033	cp_start_hw(cp);
1034	cpw8(TxThresh, 0x06);
1035
1036	__cp_set_rx_mode(dev);
1037	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1038
1039	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1040	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1041	cpw8(Config3, PARMEnable);
1042	cp->wol_enabled = 0;
1043
1044	cpw8(Config5, cpr8(Config5) & PMEStatus);
1045
1046	cpw32_f(HiTxRingAddr, 0);
1047	cpw32_f(HiTxRingAddr + 4, 0);
1048
1049	ring_dma = cp->ring_dma;
1050	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1051	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1052
1053	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1054	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1055	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1056
1057	cpw16(MultiIntr, 0);
1058
1059	cpw16_f(IntrMask, cp_intr_mask);
1060
1061	cpw8_f(Cfg9346, Cfg9346_Lock);
1062}
1063
1064static int cp_refill_rx (struct cp_private *cp)
1065{
1066	unsigned i;
1067
1068	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1069		struct sk_buff *skb;
1070		dma_addr_t mapping;
1071
1072		skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1073		if (!skb)
1074			goto err_out;
1075
1076		skb_reserve(skb, RX_OFFSET);
1077
1078		mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1079					 PCI_DMA_FROMDEVICE);
1080		cp->rx_skb[i] = skb;
1081
1082		cp->rx_ring[i].opts2 = 0;
1083		cp->rx_ring[i].addr = cpu_to_le64(mapping);
1084		if (i == (CP_RX_RING_SIZE - 1))
1085			cp->rx_ring[i].opts1 =
1086				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1087		else
1088			cp->rx_ring[i].opts1 =
1089				cpu_to_le32(DescOwn | cp->rx_buf_sz);
1090	}
1091
1092	return 0;
1093
1094err_out:
1095	cp_clean_rings(cp);
1096	return -ENOMEM;
1097}
1098
1099static void cp_init_rings_index (struct cp_private *cp)
1100{
1101	cp->rx_tail = 0;
1102	cp->tx_head = cp->tx_tail = 0;
1103}
1104
1105static int cp_init_rings (struct cp_private *cp)
1106{
1107	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1108	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1109
1110	cp_init_rings_index(cp);
1111
1112	return cp_refill_rx (cp);
1113}
1114
1115static int cp_alloc_rings (struct cp_private *cp)
1116{
1117	void *mem;
1118
1119	mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1120	if (!mem)
1121		return -ENOMEM;
1122
1123	cp->rx_ring = mem;
1124	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1125
1126	return cp_init_rings(cp);
1127}
1128
1129static void cp_clean_rings (struct cp_private *cp)
1130{
1131	struct cp_desc *desc;
1132	unsigned i;
1133
1134	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1135		if (cp->rx_skb[i]) {
1136			desc = cp->rx_ring + i;
1137			pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1138					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1139			dev_kfree_skb(cp->rx_skb[i]);
1140		}
1141	}
1142
1143	for (i = 0; i < CP_TX_RING_SIZE; i++) {
1144		if (cp->tx_skb[i]) {
1145			struct sk_buff *skb = cp->tx_skb[i];
1146
1147			desc = cp->tx_ring + i;
1148			pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1149					 le32_to_cpu(desc->opts1) & 0xffff,
1150					 PCI_DMA_TODEVICE);
1151			if (le32_to_cpu(desc->opts1) & LastFrag)
1152				dev_kfree_skb(skb);
1153			cp->net_stats.tx_dropped++;
1154		}
1155	}
1156
1157	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1158	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1159
1160	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1161	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1162}
1163
1164static void cp_free_rings (struct cp_private *cp)
1165{
1166	cp_clean_rings(cp);
1167	pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1168	cp->rx_ring = NULL;
1169	cp->tx_ring = NULL;
1170}
1171
1172static int cp_open (struct net_device *dev)
1173{
1174	struct cp_private *cp = netdev_priv(dev);
1175	int rc;
1176
1177	if (netif_msg_ifup(cp))
1178		printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1179
1180	rc = cp_alloc_rings(cp);
1181	if (rc)
1182		return rc;
1183
1184	cp_init_hw(cp);
1185
1186	rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1187	if (rc)
1188		goto err_out_hw;
1189
1190	netif_carrier_off(dev);
1191	mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1192	netif_start_queue(dev);
1193
1194	return 0;
1195
1196err_out_hw:
1197	cp_stop_hw(cp);
1198	cp_free_rings(cp);
1199	return rc;
1200}
1201
1202static int cp_close (struct net_device *dev)
1203{
1204	struct cp_private *cp = netdev_priv(dev);
1205	unsigned long flags;
1206
1207	if (netif_msg_ifdown(cp))
1208		printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1209
1210	spin_lock_irqsave(&cp->lock, flags);
1211
1212	netif_stop_queue(dev);
1213	netif_carrier_off(dev);
1214
1215	cp_stop_hw(cp);
1216
1217	spin_unlock_irqrestore(&cp->lock, flags);
1218
1219	synchronize_irq(dev->irq);
1220	free_irq(dev->irq, dev);
1221
1222	cp_free_rings(cp);
1223	return 0;
1224}
1225
1226#ifdef BROKEN
1227static int cp_change_mtu(struct net_device *dev, int new_mtu)
1228{
1229	struct cp_private *cp = netdev_priv(dev);
1230	int rc;
1231	unsigned long flags;
1232
1233	/* check for invalid MTU, according to hardware limits */
1234	if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1235		return -EINVAL;
1236
1237	/* if network interface not up, no need for complexity */
1238	if (!netif_running(dev)) {
1239		dev->mtu = new_mtu;
1240		cp_set_rxbufsize(cp);	/* set new rx buf size */
1241		return 0;
1242	}
1243
1244	spin_lock_irqsave(&cp->lock, flags);
1245
1246	cp_stop_hw(cp);			/* stop h/w and free rings */
1247	cp_clean_rings(cp);
1248
1249	dev->mtu = new_mtu;
1250	cp_set_rxbufsize(cp);		/* set new rx buf size */
1251
1252	rc = cp_init_rings(cp);		/* realloc and restart h/w */
1253	cp_start_hw(cp);
1254
1255	spin_unlock_irqrestore(&cp->lock, flags);
1256
1257	return rc;
1258}
1259#endif /* BROKEN */
1260
1261static const char mii_2_8139_map[8] = {
1262	BasicModeCtrl,
1263	BasicModeStatus,
1264	0,
1265	0,
1266	NWayAdvert,
1267	NWayLPAR,
1268	NWayExpansion,
1269	0
1270};
1271
1272static int mdio_read(struct net_device *dev, int phy_id, int location)
1273{
1274	struct cp_private *cp = netdev_priv(dev);
1275
1276	return location < 8 && mii_2_8139_map[location] ?
1277	       readw(cp->regs + mii_2_8139_map[location]) : 0;
1278}
1279
1280
1281static void mdio_write(struct net_device *dev, int phy_id, int location,
1282		       int value)
1283{
1284	struct cp_private *cp = netdev_priv(dev);
1285
1286	if (location == 0) {
1287		cpw8(Cfg9346, Cfg9346_Unlock);
1288		cpw16(BasicModeCtrl, value);
1289		cpw8(Cfg9346, Cfg9346_Lock);
1290	} else if (location < 8 && mii_2_8139_map[location])
1291		cpw16(mii_2_8139_map[location], value);
1292}
1293
1294/* Set the ethtool Wake-on-LAN settings */
1295static int netdev_set_wol (struct cp_private *cp,
1296			   const struct ethtool_wolinfo *wol)
1297{
1298	u8 options;
1299
1300	options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1301	/* If WOL is being disabled, no need for complexity */
1302	if (wol->wolopts) {
1303		if (wol->wolopts & WAKE_PHY)	options |= LinkUp;
1304		if (wol->wolopts & WAKE_MAGIC)	options |= MagicPacket;
1305	}
1306
1307	cpw8 (Cfg9346, Cfg9346_Unlock);
1308	cpw8 (Config3, options);
1309	cpw8 (Cfg9346, Cfg9346_Lock);
1310
1311	options = 0; /* Paranoia setting */
1312	options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1313	/* If WOL is being disabled, no need for complexity */
1314	if (wol->wolopts) {
1315		if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1316		if (wol->wolopts & WAKE_BCAST)	options |= BWF;
1317		if (wol->wolopts & WAKE_MCAST)	options |= MWF;
1318	}
1319
1320	cpw8 (Config5, options);
1321
1322	cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1323
1324	return 0;
1325}
1326
1327/* Get the ethtool Wake-on-LAN settings */
1328static void netdev_get_wol (struct cp_private *cp,
1329	             struct ethtool_wolinfo *wol)
1330{
1331	u8 options;
1332
1333	wol->wolopts   = 0; /* Start from scratch */
1334	wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1335		         WAKE_MCAST | WAKE_UCAST;
1336	/* We don't need to go on if WOL is disabled */
1337	if (!cp->wol_enabled) return;
1338
1339	options        = cpr8 (Config3);
1340	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1341	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1342
1343	options        = 0; /* Paranoia setting */
1344	options        = cpr8 (Config5);
1345	if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1346	if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1347	if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1348}
1349
1350static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1351{
1352	struct cp_private *cp = netdev_priv(dev);
1353
1354	strcpy (info->driver, DRV_NAME);
1355	strcpy (info->version, DRV_VERSION);
1356	strcpy (info->bus_info, pci_name(cp->pdev));
1357}
1358
1359static int cp_get_regs_len(struct net_device *dev)
1360{
1361	return CP_REGS_SIZE;
1362}
1363
1364static int cp_get_stats_count (struct net_device *dev)
1365{
1366	return CP_NUM_STATS;
1367}
1368
1369static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1370{
1371	struct cp_private *cp = netdev_priv(dev);
1372	int rc;
1373	unsigned long flags;
1374
1375	spin_lock_irqsave(&cp->lock, flags);
1376	rc = mii_ethtool_gset(&cp->mii_if, cmd);
1377	spin_unlock_irqrestore(&cp->lock, flags);
1378
1379	return rc;
1380}
1381
1382static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1383{
1384	struct cp_private *cp = netdev_priv(dev);
1385	int rc;
1386	unsigned long flags;
1387
1388	spin_lock_irqsave(&cp->lock, flags);
1389	rc = mii_ethtool_sset(&cp->mii_if, cmd);
1390	spin_unlock_irqrestore(&cp->lock, flags);
1391
1392	return rc;
1393}
1394
1395static int cp_nway_reset(struct net_device *dev)
1396{
1397	struct cp_private *cp = netdev_priv(dev);
1398	return mii_nway_restart(&cp->mii_if);
1399}
1400
1401static u32 cp_get_msglevel(struct net_device *dev)
1402{
1403	struct cp_private *cp = netdev_priv(dev);
1404	return cp->msg_enable;
1405}
1406
1407static void cp_set_msglevel(struct net_device *dev, u32 value)
1408{
1409	struct cp_private *cp = netdev_priv(dev);
1410	cp->msg_enable = value;
1411}
1412
1413static u32 cp_get_rx_csum(struct net_device *dev)
1414{
1415	struct cp_private *cp = netdev_priv(dev);
1416	return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1417}
1418
1419static int cp_set_rx_csum(struct net_device *dev, u32 data)
1420{
1421	struct cp_private *cp = netdev_priv(dev);
1422	u16 cmd = cp->cpcmd, newcmd;
1423
1424	newcmd = cmd;
1425
1426	if (data)
1427		newcmd |= RxChkSum;
1428	else
1429		newcmd &= ~RxChkSum;
1430
1431	if (newcmd != cmd) {
1432		unsigned long flags;
1433
1434		spin_lock_irqsave(&cp->lock, flags);
1435		cp->cpcmd = newcmd;
1436		cpw16_f(CpCmd, newcmd);
1437		spin_unlock_irqrestore(&cp->lock, flags);
1438	}
1439
1440	return 0;
1441}
1442
1443static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1444		        void *p)
1445{
1446	struct cp_private *cp = netdev_priv(dev);
1447	unsigned long flags;
1448
1449	if (regs->len < CP_REGS_SIZE)
1450		return /* -EINVAL */;
1451
1452	regs->version = CP_REGS_VER;
1453
1454	spin_lock_irqsave(&cp->lock, flags);
1455	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1456	spin_unlock_irqrestore(&cp->lock, flags);
1457}
1458
1459static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1460{
1461	struct cp_private *cp = netdev_priv(dev);
1462	unsigned long flags;
1463
1464	spin_lock_irqsave (&cp->lock, flags);
1465	netdev_get_wol (cp, wol);
1466	spin_unlock_irqrestore (&cp->lock, flags);
1467}
1468
1469static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1470{
1471	struct cp_private *cp = netdev_priv(dev);
1472	unsigned long flags;
1473	int rc;
1474
1475	spin_lock_irqsave (&cp->lock, flags);
1476	rc = netdev_set_wol (cp, wol);
1477	spin_unlock_irqrestore (&cp->lock, flags);
1478
1479	return rc;
1480}
1481
1482static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1483{
1484	switch (stringset) {
1485	case ETH_SS_STATS:
1486		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1487		break;
1488	default:
1489		BUG();
1490		break;
1491	}
1492}
1493
1494static void cp_get_ethtool_stats (struct net_device *dev,
1495				  struct ethtool_stats *estats, u64 *tmp_stats)
1496{
1497	struct cp_private *cp = netdev_priv(dev);
1498	struct cp_dma_stats *nic_stats;
1499	dma_addr_t dma;
1500	int i;
1501
1502	nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1503	if (!nic_stats)
1504		return;
1505
1506	/* begin NIC statistics dump */
1507	cpw32(StatsAddr + 4, (u64)dma >> 32);
1508	cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1509	cpr32(StatsAddr);
1510
1511	for (i = 0; i < 1000; i++) {
1512		if ((cpr32(StatsAddr) & DumpStats) == 0)
1513			break;
1514		udelay(10);
1515	}
1516	cpw32(StatsAddr, 0);
1517	cpw32(StatsAddr + 4, 0);
1518	cpr32(StatsAddr);
1519
1520	i = 0;
1521	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1522	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1523	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1524	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1525	tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1526	tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1527	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1528	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1529	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1530	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1531	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1532	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1533	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1534	tmp_stats[i++] = cp->cp_stats.rx_frags;
1535	BUG_ON(i != CP_NUM_STATS);
1536
1537	pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1538}
1539
1540static const struct ethtool_ops cp_ethtool_ops = {
1541	.get_drvinfo		= cp_get_drvinfo,
1542	.get_regs_len		= cp_get_regs_len,
1543	.get_stats_count	= cp_get_stats_count,
1544	.get_settings		= cp_get_settings,
1545	.set_settings		= cp_set_settings,
1546	.nway_reset		= cp_nway_reset,
1547	.get_link		= ethtool_op_get_link,
1548	.get_msglevel		= cp_get_msglevel,
1549	.set_msglevel		= cp_set_msglevel,
1550	.get_rx_csum		= cp_get_rx_csum,
1551	.set_rx_csum		= cp_set_rx_csum,
1552	.get_tx_csum		= ethtool_op_get_tx_csum,
1553	.set_tx_csum		= ethtool_op_set_tx_csum, /* local! */
1554	.get_sg			= ethtool_op_get_sg,
1555	.set_sg			= ethtool_op_set_sg,
1556	.get_tso		= ethtool_op_get_tso,
1557	.set_tso		= ethtool_op_set_tso,
1558	.get_regs		= cp_get_regs,
1559	.get_wol		= cp_get_wol,
1560	.set_wol		= cp_set_wol,
1561	.get_strings		= cp_get_strings,
1562	.get_ethtool_stats	= cp_get_ethtool_stats,
1563	.get_perm_addr		= ethtool_op_get_perm_addr,
1564	.get_eeprom_len		= cp_get_eeprom_len,
1565	.get_eeprom		= cp_get_eeprom,
1566	.set_eeprom		= cp_set_eeprom,
1567};
1568
1569static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1570{
1571	struct cp_private *cp = netdev_priv(dev);
1572	int rc;
1573	unsigned long flags;
1574
1575	if (!netif_running(dev))
1576		return -EINVAL;
1577
1578	spin_lock_irqsave(&cp->lock, flags);
1579	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1580	spin_unlock_irqrestore(&cp->lock, flags);
1581	return rc;
1582}
1583
1584/* Serial EEPROM section. */
1585
1586/*  EEPROM_Ctrl bits. */
1587#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1588#define EE_CS			0x08	/* EEPROM chip select. */
1589#define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1590#define EE_WRITE_0		0x00
1591#define EE_WRITE_1		0x02
1592#define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1593#define EE_ENB			(0x80 | EE_CS)
1594
1595/* Delay between EEPROM clock transitions.
1596   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1597 */
1598
1599#define eeprom_delay()	readl(ee_addr)
1600
1601/* The EEPROM commands include the alway-set leading bit. */
1602#define EE_EXTEND_CMD	(4)
1603#define EE_WRITE_CMD	(5)
1604#define EE_READ_CMD		(6)
1605#define EE_ERASE_CMD	(7)
1606
1607#define EE_EWDS_ADDR	(0)
1608#define EE_WRAL_ADDR	(1)
1609#define EE_ERAL_ADDR	(2)
1610#define EE_EWEN_ADDR	(3)
1611
1612#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1613
1614static void eeprom_cmd_start(void __iomem *ee_addr)
1615{
1616	writeb (EE_ENB & ~EE_CS, ee_addr);
1617	writeb (EE_ENB, ee_addr);
1618	eeprom_delay ();
1619}
1620
1621static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1622{
1623	int i;
1624
1625	/* Shift the command bits out. */
1626	for (i = cmd_len - 1; i >= 0; i--) {
1627		int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1628		writeb (EE_ENB | dataval, ee_addr);
1629		eeprom_delay ();
1630		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1631		eeprom_delay ();
1632	}
1633	writeb (EE_ENB, ee_addr);
1634	eeprom_delay ();
1635}
1636
1637static void eeprom_cmd_end(void __iomem *ee_addr)
1638{
1639	writeb (~EE_CS, ee_addr);
1640	eeprom_delay ();
1641}
1642
1643static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1644			      int addr_len)
1645{
1646	int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1647
1648	eeprom_cmd_start(ee_addr);
1649	eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1650	eeprom_cmd_end(ee_addr);
1651}
1652
1653static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1654{
1655	int i;
1656	u16 retval = 0;
1657	void __iomem *ee_addr = ioaddr + Cfg9346;
1658	int read_cmd = location | (EE_READ_CMD << addr_len);
1659
1660	eeprom_cmd_start(ee_addr);
1661	eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1662
1663	for (i = 16; i > 0; i--) {
1664		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1665		eeprom_delay ();
1666		retval =
1667		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1668				     0);
1669		writeb (EE_ENB, ee_addr);
1670		eeprom_delay ();
1671	}
1672
1673	eeprom_cmd_end(ee_addr);
1674
1675	return retval;
1676}
1677
1678static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1679			 int addr_len)
1680{
1681	int i;
1682	void __iomem *ee_addr = ioaddr + Cfg9346;
1683	int write_cmd = location | (EE_WRITE_CMD << addr_len);
1684
1685	eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1686
1687	eeprom_cmd_start(ee_addr);
1688	eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1689	eeprom_cmd(ee_addr, val, 16);
1690	eeprom_cmd_end(ee_addr);
1691
1692	eeprom_cmd_start(ee_addr);
1693	for (i = 0; i < 20000; i++)
1694		if (readb(ee_addr) & EE_DATA_READ)
1695			break;
1696	eeprom_cmd_end(ee_addr);
1697
1698	eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1699}
1700
1701static int cp_get_eeprom_len(struct net_device *dev)
1702{
1703	struct cp_private *cp = netdev_priv(dev);
1704	int size;
1705
1706	spin_lock_irq(&cp->lock);
1707	size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1708	spin_unlock_irq(&cp->lock);
1709
1710	return size;
1711}
1712
1713static int cp_get_eeprom(struct net_device *dev,
1714			 struct ethtool_eeprom *eeprom, u8 *data)
1715{
1716	struct cp_private *cp = netdev_priv(dev);
1717	unsigned int addr_len;
1718	u16 val;
1719	u32 offset = eeprom->offset >> 1;
1720	u32 len = eeprom->len;
1721	u32 i = 0;
1722
1723	eeprom->magic = CP_EEPROM_MAGIC;
1724
1725	spin_lock_irq(&cp->lock);
1726
1727	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1728
1729	if (eeprom->offset & 1) {
1730		val = read_eeprom(cp->regs, offset, addr_len);
1731		data[i++] = (u8)(val >> 8);
1732		offset++;
1733	}
1734
1735	while (i < len - 1) {
1736		val = read_eeprom(cp->regs, offset, addr_len);
1737		data[i++] = (u8)val;
1738		data[i++] = (u8)(val >> 8);
1739		offset++;
1740	}
1741
1742	if (i < len) {
1743		val = read_eeprom(cp->regs, offset, addr_len);
1744		data[i] = (u8)val;
1745	}
1746
1747	spin_unlock_irq(&cp->lock);
1748	return 0;
1749}
1750
1751static int cp_set_eeprom(struct net_device *dev,
1752			 struct ethtool_eeprom *eeprom, u8 *data)
1753{
1754	struct cp_private *cp = netdev_priv(dev);
1755	unsigned int addr_len;
1756	u16 val;
1757	u32 offset = eeprom->offset >> 1;
1758	u32 len = eeprom->len;
1759	u32 i = 0;
1760
1761	if (eeprom->magic != CP_EEPROM_MAGIC)
1762		return -EINVAL;
1763
1764	spin_lock_irq(&cp->lock);
1765
1766	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1767
1768	if (eeprom->offset & 1) {
1769		val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1770		val |= (u16)data[i++] << 8;
1771		write_eeprom(cp->regs, offset, val, addr_len);
1772		offset++;
1773	}
1774
1775	while (i < len - 1) {
1776		val = (u16)data[i++];
1777		val |= (u16)data[i++] << 8;
1778		write_eeprom(cp->regs, offset, val, addr_len);
1779		offset++;
1780	}
1781
1782	if (i < len) {
1783		val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1784		val |= (u16)data[i];
1785		write_eeprom(cp->regs, offset, val, addr_len);
1786	}
1787
1788	spin_unlock_irq(&cp->lock);
1789	return 0;
1790}
1791
1792/* Put the board into D3cold state and wait for WakeUp signal */
1793static void cp_set_d3_state (struct cp_private *cp)
1794{
1795	pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1796	pci_set_power_state (cp->pdev, PCI_D3hot);
1797}
1798
1799static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1800{
1801	struct net_device *dev;
1802	struct cp_private *cp;
1803	int rc;
1804	void __iomem *regs;
1805	resource_size_t pciaddr;
1806	unsigned int addr_len, i, pci_using_dac;
1807	u8 pci_rev;
1808
1809#ifndef MODULE
1810	static int version_printed;
1811	if (version_printed++ == 0)
1812		printk("%s", version);
1813#endif
1814
1815	pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1816
1817	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1818	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
1819		dev_err(&pdev->dev,
1820			   "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1821		           pdev->vendor, pdev->device, pci_rev);
1822		dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1823		return -ENODEV;
1824	}
1825
1826	dev = alloc_etherdev(sizeof(struct cp_private));
1827	if (!dev)
1828		return -ENOMEM;
1829	SET_MODULE_OWNER(dev);
1830	SET_NETDEV_DEV(dev, &pdev->dev);
1831
1832	cp = netdev_priv(dev);
1833	cp->pdev = pdev;
1834	cp->dev = dev;
1835	cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1836	spin_lock_init (&cp->lock);
1837	cp->mii_if.dev = dev;
1838	cp->mii_if.mdio_read = mdio_read;
1839	cp->mii_if.mdio_write = mdio_write;
1840	cp->mii_if.phy_id = CP_INTERNAL_PHY;
1841	cp->mii_if.phy_id_mask = 0x1f;
1842	cp->mii_if.reg_num_mask = 0x1f;
1843	cp_set_rxbufsize(cp);
1844
1845	rc = pci_enable_device(pdev);
1846	if (rc)
1847		goto err_out_free;
1848
1849	rc = pci_set_mwi(pdev);
1850	if (rc)
1851		goto err_out_disable;
1852
1853	rc = pci_request_regions(pdev, DRV_NAME);
1854	if (rc)
1855		goto err_out_mwi;
1856
1857	pciaddr = pci_resource_start(pdev, 1);
1858	if (!pciaddr) {
1859		rc = -EIO;
1860		dev_err(&pdev->dev, "no MMIO resource\n");
1861		goto err_out_res;
1862	}
1863	if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1864		rc = -EIO;
1865		dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1866		       (unsigned long long)pci_resource_len(pdev, 1));
1867		goto err_out_res;
1868	}
1869
1870	/* Configure DMA attributes. */
1871	if ((sizeof(dma_addr_t) > 4) &&
1872	    !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1873	    !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1874		pci_using_dac = 1;
1875	} else {
1876		pci_using_dac = 0;
1877
1878		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1879		if (rc) {
1880			dev_err(&pdev->dev,
1881				   "No usable DMA configuration, aborting.\n");
1882			goto err_out_res;
1883		}
1884		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1885		if (rc) {
1886			dev_err(&pdev->dev,
1887				   "No usable consistent DMA configuration, "
1888			           "aborting.\n");
1889			goto err_out_res;
1890		}
1891	}
1892
1893	cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1894		    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1895
1896	regs = ioremap(pciaddr, CP_REGS_SIZE);
1897	if (!regs) {
1898		rc = -EIO;
1899		dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1900		       (unsigned long long)pci_resource_len(pdev, 1),
1901		       (unsigned long long)pciaddr);
1902		goto err_out_res;
1903	}
1904	dev->base_addr = (unsigned long) regs;
1905	cp->regs = regs;
1906
1907	cp_stop_hw(cp);
1908
1909	/* read MAC address from EEPROM */
1910	addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1911	for (i = 0; i < 3; i++)
1912		((u16 *) (dev->dev_addr))[i] =
1913		    le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
1914	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1915
1916	dev->open = cp_open;
1917	dev->stop = cp_close;
1918	dev->set_multicast_list = cp_set_rx_mode;
1919	dev->hard_start_xmit = cp_start_xmit;
1920	dev->get_stats = cp_get_stats;
1921	dev->do_ioctl = cp_ioctl;
1922	dev->poll = cp_rx_poll;
1923#ifdef CONFIG_NET_POLL_CONTROLLER
1924	dev->poll_controller = cp_poll_controller;
1925#endif
1926	dev->weight = 16;	/* arbitrary? from NAPI_HOWTO.txt. */
1927#ifdef BROKEN
1928	dev->change_mtu = cp_change_mtu;
1929#endif
1930	dev->ethtool_ops = &cp_ethtool_ops;
1931
1932#if CP_VLAN_TAG_USED
1933	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1934	dev->vlan_rx_register = cp_vlan_rx_register;
1935#endif
1936
1937	if (pci_using_dac)
1938		dev->features |= NETIF_F_HIGHDMA;
1939
1940
1941	dev->irq = pdev->irq;
1942
1943	rc = register_netdev(dev);
1944	if (rc)
1945		goto err_out_iomap;
1946
1947	printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1948		"%02x:%02x:%02x:%02x:%02x:%02x, "
1949		"IRQ %d\n",
1950		dev->name,
1951		dev->base_addr,
1952		dev->dev_addr[0], dev->dev_addr[1],
1953		dev->dev_addr[2], dev->dev_addr[3],
1954		dev->dev_addr[4], dev->dev_addr[5],
1955		dev->irq);
1956
1957	pci_set_drvdata(pdev, dev);
1958
1959	/* enable busmastering and memory-write-invalidate */
1960	pci_set_master(pdev);
1961
1962	if (cp->wol_enabled)
1963		cp_set_d3_state (cp);
1964
1965	return 0;
1966
1967err_out_iomap:
1968	iounmap(regs);
1969err_out_res:
1970	pci_release_regions(pdev);
1971err_out_mwi:
1972	pci_clear_mwi(pdev);
1973err_out_disable:
1974	pci_disable_device(pdev);
1975err_out_free:
1976	free_netdev(dev);
1977	return rc;
1978}
1979
1980static void cp_remove_one (struct pci_dev *pdev)
1981{
1982	struct net_device *dev = pci_get_drvdata(pdev);
1983	struct cp_private *cp = netdev_priv(dev);
1984
1985	unregister_netdev(dev);
1986	iounmap(cp->regs);
1987	if (cp->wol_enabled)
1988		pci_set_power_state (pdev, PCI_D0);
1989	pci_release_regions(pdev);
1990	pci_clear_mwi(pdev);
1991	pci_disable_device(pdev);
1992	pci_set_drvdata(pdev, NULL);
1993	free_netdev(dev);
1994}
1995
1996#ifdef CONFIG_PM
1997static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1998{
1999	struct net_device *dev = pci_get_drvdata(pdev);
2000	struct cp_private *cp = netdev_priv(dev);
2001	unsigned long flags;
2002
2003	if (!netif_running(dev))
2004		return 0;
2005
2006	netif_device_detach (dev);
2007	netif_stop_queue (dev);
2008
2009	spin_lock_irqsave (&cp->lock, flags);
2010
2011	/* Disable Rx and Tx */
2012	cpw16 (IntrMask, 0);
2013	cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2014
2015	spin_unlock_irqrestore (&cp->lock, flags);
2016
2017	pci_save_state(pdev);
2018	pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2019	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2020
2021	return 0;
2022}
2023
2024static int cp_resume (struct pci_dev *pdev)
2025{
2026	struct net_device *dev = pci_get_drvdata (pdev);
2027	struct cp_private *cp = netdev_priv(dev);
2028	unsigned long flags;
2029
2030	if (!netif_running(dev))
2031		return 0;
2032
2033	netif_device_attach (dev);
2034
2035	pci_set_power_state(pdev, PCI_D0);
2036	pci_restore_state(pdev);
2037	pci_enable_wake(pdev, PCI_D0, 0);
2038
2039	cp_init_rings_index (cp);
2040	cp_init_hw (cp);
2041	netif_start_queue (dev);
2042
2043	spin_lock_irqsave (&cp->lock, flags);
2044
2045	mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
2046
2047	spin_unlock_irqrestore (&cp->lock, flags);
2048
2049	return 0;
2050}
2051#endif /* CONFIG_PM */
2052
2053static struct pci_driver cp_driver = {
2054	.name         = DRV_NAME,
2055	.id_table     = cp_pci_tbl,
2056	.probe        =	cp_init_one,
2057	.remove       = cp_remove_one,
2058#ifdef CONFIG_PM
2059	.resume       = cp_resume,
2060	.suspend      = cp_suspend,
2061#endif
2062};
2063
2064static int __init cp_init (void)
2065{
2066#ifdef MODULE
2067	printk("%s", version);
2068#endif
2069	return pci_register_driver(&cp_driver);
2070}
2071
2072static void __exit cp_exit (void)
2073{
2074	pci_unregister_driver (&cp_driver);
2075}
2076
2077module_init(cp_init);
2078module_exit(cp_exit);
2079