• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/
1/*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 *
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
7 *
8 * See MAINTAINERS file for support contact information.
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/delay.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/if_vlan.h>
20#include <linux/crc32.h>
21#include <linux/in.h>
22#include <linux/ip.h>
23#include <linux/tcp.h>
24#include <linux/init.h>
25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h>
27
28#include <asm/system.h>
29#include <asm/io.h>
30#include <asm/irq.h>
31
32#define RTL8169_VERSION "2.3LK-NAPI"
33#define MODULENAME "r8169"
34#define PFX MODULENAME ": "
35
36#ifdef RTL8169_DEBUG
37#define assert(expr) \
38	if (!(expr)) {					\
39		printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
40		#expr,__FILE__,__func__,__LINE__);		\
41	}
42#define dprintk(fmt, args...) \
43	do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
44#else
45#define assert(expr) do {} while (0)
46#define dprintk(fmt, args...)	do {} while (0)
47#endif /* RTL8169_DEBUG */
48
49#define R8169_MSG_DEFAULT \
50	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
51
52#define TX_BUFFS_AVAIL(tp) \
53	(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
54
55/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
56   The RTL chips use a 64 element hash table based on the Ethernet CRC. */
57static const int multicast_filter_limit = 32;
58
59/* MAC address length */
60#define MAC_ADDR_LEN	6
61
62#define MAX_READ_REQUEST_SHIFT	12
63#define RX_FIFO_THRESH	7	/* 7 means NO threshold, Rx buffer level before first PCI xfer. */
64#define RX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
65#define TX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
66#define EarlyTxThld	0x3F	/* 0x3F means NO early transmit */
67#define SafeMtu		0x1c20	/* ... actually life sucks beyond ~7k */
68#define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
69
70#define R8169_REGS_SIZE		256
71#define R8169_NAPI_WEIGHT	64
72#define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
73#define NUM_RX_DESC	256	/* Number of Rx descriptor registers */
74#define RX_BUF_SIZE	1536	/* Rx Buffer size */
75#define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
76#define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
77
78#define RTL8169_TX_TIMEOUT	(6*HZ)
79#define RTL8169_PHY_TIMEOUT	(10*HZ)
80
81#define RTL_EEPROM_SIG		cpu_to_le32(0x8129)
82#define RTL_EEPROM_SIG_MASK	cpu_to_le32(0xffff)
83#define RTL_EEPROM_SIG_ADDR	0x0000
84
85/* write/read MMIO register */
86#define RTL_W8(reg, val8)	writeb ((val8), ioaddr + (reg))
87#define RTL_W16(reg, val16)	writew ((val16), ioaddr + (reg))
88#define RTL_W32(reg, val32)	writel ((val32), ioaddr + (reg))
89#define RTL_R8(reg)		readb (ioaddr + (reg))
90#define RTL_R16(reg)		readw (ioaddr + (reg))
91#define RTL_R32(reg)		readl (ioaddr + (reg))
92
93enum mac_version {
94	RTL_GIGA_MAC_NONE   = 0x00,
95	RTL_GIGA_MAC_VER_01 = 0x01, // 8169
96	RTL_GIGA_MAC_VER_02 = 0x02, // 8169S
97	RTL_GIGA_MAC_VER_03 = 0x03, // 8110S
98	RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
99	RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
100	RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
101	RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
102	RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
103	RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
104	RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
105	RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
106	RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
107	RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
108	RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ?
109	RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ?
110	RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec
111	RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf
112	RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP
113	RTL_GIGA_MAC_VER_19 = 0x13, // 8168C
114	RTL_GIGA_MAC_VER_20 = 0x14, // 8168C
115	RTL_GIGA_MAC_VER_21 = 0x15, // 8168C
116	RTL_GIGA_MAC_VER_22 = 0x16, // 8168C
117	RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP
118	RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
119	RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
120	RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
121	RTL_GIGA_MAC_VER_27 = 0x1b  // 8168DP
122};
123
124#define _R(NAME,MAC,MASK) \
125	{ .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
126
127static const struct {
128	const char *name;
129	u8 mac_version;
130	u32 RxConfigMask;	/* Clears the bits supported by this chip */
131} rtl_chip_info[] = {
132	_R("RTL8169",		RTL_GIGA_MAC_VER_01, 0xff7e1880), // 8169
133	_R("RTL8169s",		RTL_GIGA_MAC_VER_02, 0xff7e1880), // 8169S
134	_R("RTL8110s",		RTL_GIGA_MAC_VER_03, 0xff7e1880), // 8110S
135	_R("RTL8169sb/8110sb",	RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
136	_R("RTL8169sc/8110sc",	RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
137	_R("RTL8169sc/8110sc",	RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
138	_R("RTL8102e",		RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
139	_R("RTL8102e",		RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
140	_R("RTL8102e",		RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
141	_R("RTL8101e",		RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
142	_R("RTL8168b/8111b",	RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
143	_R("RTL8168b/8111b",	RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
144	_R("RTL8101e",		RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
145	_R("RTL8100e",		RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
146	_R("RTL8100e",		RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139
147	_R("RTL8168b/8111b",	RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E
148	_R("RTL8101e",		RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E
149	_R("RTL8168cp/8111cp",	RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E
150	_R("RTL8168c/8111c",	RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E
151	_R("RTL8168c/8111c",	RTL_GIGA_MAC_VER_20, 0xff7e1880), // PCI-E
152	_R("RTL8168c/8111c",	RTL_GIGA_MAC_VER_21, 0xff7e1880), // PCI-E
153	_R("RTL8168c/8111c",	RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
154	_R("RTL8168cp/8111cp",	RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
155	_R("RTL8168cp/8111cp",	RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
156	_R("RTL8168d/8111d",	RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
157	_R("RTL8168d/8111d",	RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
158	_R("RTL8168dp/8111dp",	RTL_GIGA_MAC_VER_27, 0xff7e1880)  // PCI-E
159};
160#undef _R
161
162enum cfg_version {
163	RTL_CFG_0 = 0x00,
164	RTL_CFG_1,
165	RTL_CFG_2
166};
167
168static void rtl_hw_start_8169(struct net_device *);
169static void rtl_hw_start_8168(struct net_device *);
170static void rtl_hw_start_8101(struct net_device *);
171
172static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
173	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
174	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
175	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
176	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
177	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
178	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
179	{ PCI_DEVICE(PCI_VENDOR_ID_AT,		0xc107), 0, 0, RTL_CFG_0 },
180	{ PCI_DEVICE(0x16ec,			0x0116), 0, 0, RTL_CFG_0 },
181	{ PCI_VENDOR_ID_LINKSYS,		0x1032,
182		PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
183	{ 0x0001,				0x8168,
184		PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
185	{0,},
186};
187
188MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
189
190/*
191 * we set our copybreak very high so that we don't have
192 * to allocate 16k frames all the time (see note in
193 * rtl8169_open()
194 */
195static int rx_copybreak = 16383;
196static int use_dac;
197static struct {
198	u32 msg_enable;
199} debug = { -1 };
200
201enum rtl_registers {
202	MAC0		= 0,	/* Ethernet hardware address. */
203	MAC4		= 4,
204	MAR0		= 8,	/* Multicast filter. */
205	CounterAddrLow		= 0x10,
206	CounterAddrHigh		= 0x14,
207	TxDescStartAddrLow	= 0x20,
208	TxDescStartAddrHigh	= 0x24,
209	TxHDescStartAddrLow	= 0x28,
210	TxHDescStartAddrHigh	= 0x2c,
211	FLASH		= 0x30,
212	ERSR		= 0x36,
213	ChipCmd		= 0x37,
214	TxPoll		= 0x38,
215	IntrMask	= 0x3c,
216	IntrStatus	= 0x3e,
217	TxConfig	= 0x40,
218	RxConfig	= 0x44,
219	RxMissed	= 0x4c,
220	Cfg9346		= 0x50,
221	Config0		= 0x51,
222	Config1		= 0x52,
223	Config2		= 0x53,
224	Config3		= 0x54,
225	Config4		= 0x55,
226	Config5		= 0x56,
227	MultiIntr	= 0x5c,
228	PHYAR		= 0x60,
229	PHYstatus	= 0x6c,
230	RxMaxSize	= 0xda,
231	CPlusCmd	= 0xe0,
232	IntrMitigate	= 0xe2,
233	RxDescAddrLow	= 0xe4,
234	RxDescAddrHigh	= 0xe8,
235	EarlyTxThres	= 0xec,
236	FuncEvent	= 0xf0,
237	FuncEventMask	= 0xf4,
238	FuncPresetState	= 0xf8,
239	FuncForceEvent	= 0xfc,
240};
241
242enum rtl8110_registers {
243	TBICSR			= 0x64,
244	TBI_ANAR		= 0x68,
245	TBI_LPAR		= 0x6a,
246};
247
248enum rtl8168_8101_registers {
249	CSIDR			= 0x64,
250	CSIAR			= 0x68,
251#define	CSIAR_FLAG			0x80000000
252#define	CSIAR_WRITE_CMD			0x80000000
253#define	CSIAR_BYTE_ENABLE		0x0f
254#define	CSIAR_BYTE_ENABLE_SHIFT		12
255#define	CSIAR_ADDR_MASK			0x0fff
256
257	EPHYAR			= 0x80,
258#define	EPHYAR_FLAG			0x80000000
259#define	EPHYAR_WRITE_CMD		0x80000000
260#define	EPHYAR_REG_MASK			0x1f
261#define	EPHYAR_REG_SHIFT		16
262#define	EPHYAR_DATA_MASK		0xffff
263	DBG_REG			= 0xd1,
264#define	FIX_NAK_1			(1 << 4)
265#define	FIX_NAK_2			(1 << 3)
266	EFUSEAR			= 0xdc,
267#define	EFUSEAR_FLAG			0x80000000
268#define	EFUSEAR_WRITE_CMD		0x80000000
269#define	EFUSEAR_READ_CMD		0x00000000
270#define	EFUSEAR_REG_MASK		0x03ff
271#define	EFUSEAR_REG_SHIFT		8
272#define	EFUSEAR_DATA_MASK		0xff
273};
274
275enum rtl_register_content {
276	/* InterruptStatusBits */
277	SYSErr		= 0x8000,
278	PCSTimeout	= 0x4000,
279	SWInt		= 0x0100,
280	TxDescUnavail	= 0x0080,
281	RxFIFOOver	= 0x0040,
282	LinkChg		= 0x0020,
283	RxOverflow	= 0x0010,
284	TxErr		= 0x0008,
285	TxOK		= 0x0004,
286	RxErr		= 0x0002,
287	RxOK		= 0x0001,
288
289	/* RxStatusDesc */
290	RxFOVF	= (1 << 23),
291	RxRWT	= (1 << 22),
292	RxRES	= (1 << 21),
293	RxRUNT	= (1 << 20),
294	RxCRC	= (1 << 19),
295
296	/* ChipCmdBits */
297	CmdReset	= 0x10,
298	CmdRxEnb	= 0x08,
299	CmdTxEnb	= 0x04,
300	RxBufEmpty	= 0x01,
301
302	/* TXPoll register p.5 */
303	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
304	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
305	FSWInt		= 0x01,		/* Forced software interrupt */
306
307	/* Cfg9346Bits */
308	Cfg9346_Lock	= 0x00,
309	Cfg9346_Unlock	= 0xc0,
310
311	/* rx_mode_bits */
312	AcceptErr	= 0x20,
313	AcceptRunt	= 0x10,
314	AcceptBroadcast	= 0x08,
315	AcceptMulticast	= 0x04,
316	AcceptMyPhys	= 0x02,
317	AcceptAllPhys	= 0x01,
318
319	/* RxConfigBits */
320	RxCfgFIFOShift	= 13,
321	RxCfgDMAShift	=  8,
322
323	/* TxConfigBits */
324	TxInterFrameGapShift = 24,
325	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
326
327	/* Config1 register p.24 */
328	LEDS1		= (1 << 7),
329	LEDS0		= (1 << 6),
330	MSIEnable	= (1 << 5),	/* Enable Message Signaled Interrupt */
331	Speed_down	= (1 << 4),
332	MEMMAP		= (1 << 3),
333	IOMAP		= (1 << 2),
334	VPD		= (1 << 1),
335	PMEnable	= (1 << 0),	/* Power Management Enable */
336
337	/* Config2 register p. 25 */
338	PCI_Clock_66MHz = 0x01,
339	PCI_Clock_33MHz = 0x00,
340
341	/* Config3 register p.25 */
342	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
343	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
344	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
345
346	/* Config5 register p.27 */
347	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
348	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
349	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
350	LanWake		= (1 << 1),	/* LanWake enable/disable */
351	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
352
353	/* TBICSR p.28 */
354	TBIReset	= 0x80000000,
355	TBILoopback	= 0x40000000,
356	TBINwEnable	= 0x20000000,
357	TBINwRestart	= 0x10000000,
358	TBILinkOk	= 0x02000000,
359	TBINwComplete	= 0x01000000,
360
361	/* CPlusCmd p.31 */
362	EnableBist	= (1 << 15),	// 8168 8101
363	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
364	Normal_mode	= (1 << 13),	// unused
365	Force_half_dup	= (1 << 12),	// 8168 8101
366	Force_rxflow_en	= (1 << 11),	// 8168 8101
367	Force_txflow_en	= (1 << 10),	// 8168 8101
368	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
369	ASF		= (1 << 8),	// 8168 8101
370	PktCntrDisable	= (1 << 7),	// 8168 8101
371	Mac_dbgo_sel	= 0x001c,	// 8168
372	RxVlan		= (1 << 6),
373	RxChkSum	= (1 << 5),
374	PCIDAC		= (1 << 4),
375	PCIMulRW	= (1 << 3),
376	INTT_0		= 0x0000,	// 8168
377	INTT_1		= 0x0001,	// 8168
378	INTT_2		= 0x0002,	// 8168
379	INTT_3		= 0x0003,	// 8168
380
381	/* rtl8169_PHYstatus */
382	TBI_Enable	= 0x80,
383	TxFlowCtrl	= 0x40,
384	RxFlowCtrl	= 0x20,
385	_1000bpsF	= 0x10,
386	_100bps		= 0x08,
387	_10bps		= 0x04,
388	LinkStatus	= 0x02,
389	FullDup		= 0x01,
390
391	/* _TBICSRBit */
392	TBILinkOK	= 0x02000000,
393
394	/* DumpCounterCommand */
395	CounterDump	= 0x8,
396};
397
398enum desc_status_bit {
399	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
400	RingEnd		= (1 << 30), /* End of descriptor ring */
401	FirstFrag	= (1 << 29), /* First segment of a packet */
402	LastFrag	= (1 << 28), /* Final segment of a packet */
403
404	/* Tx private */
405	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
406	MSSShift	= 16,        /* MSS value position */
407	MSSMask		= 0xfff,     /* MSS value + LargeSend bit: 12 bits */
408	IPCS		= (1 << 18), /* Calculate IP checksum */
409	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
410	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
411	TxVlanTag	= (1 << 17), /* Add VLAN tag */
412
413	/* Rx private */
414	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
415	PID0		= (1 << 17), /* Protocol ID bit 2/2 */
416
417#define RxProtoUDP	(PID1)
418#define RxProtoTCP	(PID0)
419#define RxProtoIP	(PID1 | PID0)
420#define RxProtoMask	RxProtoIP
421
422	IPFail		= (1 << 16), /* IP checksum failed */
423	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
424	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
425	RxVlanTag	= (1 << 16), /* VLAN tag available */
426};
427
428#define RsvdMask	0x3fffc000
429
430struct TxDesc {
431	__le32 opts1;
432	__le32 opts2;
433	__le64 addr;
434};
435
436struct RxDesc {
437	__le32 opts1;
438	__le32 opts2;
439	__le64 addr;
440};
441
442struct ring_info {
443	struct sk_buff	*skb;
444	u32		len;
445	u8		__pad[sizeof(void *) - sizeof(u32)];
446};
447
448enum features {
449	RTL_FEATURE_WOL		= (1 << 0),
450	RTL_FEATURE_MSI		= (1 << 1),
451	RTL_FEATURE_GMII	= (1 << 2),
452};
453
454struct rtl8169_counters {
455	__le64	tx_packets;
456	__le64	rx_packets;
457	__le64	tx_errors;
458	__le32	rx_errors;
459	__le16	rx_missed;
460	__le16	align_errors;
461	__le32	tx_one_collision;
462	__le32	tx_multi_collision;
463	__le64	rx_unicast;
464	__le64	rx_broadcast;
465	__le32	rx_multicast;
466	__le16	tx_aborted;
467	__le16	tx_underun;
468};
469
470struct rtl8169_private {
471	void __iomem *mmio_addr;	/* memory map physical address */
472	struct pci_dev *pci_dev;	/* Index of PCI device */
473	struct net_device *dev;
474	struct napi_struct napi;
475	spinlock_t lock;		/* spin lock flag */
476	u32 msg_enable;
477	int chipset;
478	int mac_version;
479	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
480	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
481	u32 dirty_rx;
482	u32 dirty_tx;
483	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
484	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
485	dma_addr_t TxPhyAddr;
486	dma_addr_t RxPhyAddr;
487	struct sk_buff *Rx_skbuff[NUM_RX_DESC];	/* Rx data buffers */
488	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
489	unsigned align;
490	unsigned rx_buf_sz;
491	struct timer_list timer;
492	u16 cp_cmd;
493	u16 intr_event;
494	u16 napi_event;
495	u16 intr_mask;
496	int phy_1000_ctrl_reg;
497#ifdef CONFIG_R8169_VLAN
498	struct vlan_group *vlgrp;
499#endif
500	int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
501	int (*get_settings)(struct net_device *, struct ethtool_cmd *);
502	void (*phy_reset_enable)(void __iomem *);
503	void (*hw_start)(struct net_device *);
504	unsigned int (*phy_reset_pending)(void __iomem *);
505	unsigned int (*link_ok)(void __iomem *);
506	int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
507	int pcie_cap;
508	struct delayed_work task;
509	unsigned features;
510
511	struct mii_if_info mii;
512	struct rtl8169_counters counters;
513	u32 saved_wolopts;
514};
515
516MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
517MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
518module_param(rx_copybreak, int, 0);
519MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
520module_param(use_dac, int, 0);
521MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
522module_param_named(debug, debug.msg_enable, int, 0);
523MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
524MODULE_LICENSE("GPL");
525MODULE_VERSION(RTL8169_VERSION);
526
527static int rtl8169_open(struct net_device *dev);
528static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
529				      struct net_device *dev);
530static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
531static int rtl8169_init_ring(struct net_device *dev);
532static void rtl_hw_start(struct net_device *dev);
533static int rtl8169_close(struct net_device *dev);
534static void rtl_set_rx_mode(struct net_device *dev);
535static void rtl8169_tx_timeout(struct net_device *dev);
536static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
537static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
538				void __iomem *, u32 budget);
539static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
540static void rtl8169_down(struct net_device *dev);
541static void rtl8169_rx_clear(struct rtl8169_private *tp);
542static int rtl8169_poll(struct napi_struct *napi, int budget);
543
544static const unsigned int rtl8169_rx_config =
545	(RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
546
547static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
548{
549	int i;
550
551	RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
552
553	for (i = 20; i > 0; i--) {
554		/*
555		 * Check if the RTL8169 has completed writing to the specified
556		 * MII register.
557		 */
558		if (!(RTL_R32(PHYAR) & 0x80000000))
559			break;
560		udelay(25);
561	}
562	/*
563	 * According to hardware specs a 20us delay is required after write
564	 * complete indication, but before sending next command.
565	 */
566	udelay(20);
567}
568
569static int mdio_read(void __iomem *ioaddr, int reg_addr)
570{
571	int i, value = -1;
572
573	RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
574
575	for (i = 20; i > 0; i--) {
576		/*
577		 * Check if the RTL8169 has completed retrieving data from
578		 * the specified MII register.
579		 */
580		if (RTL_R32(PHYAR) & 0x80000000) {
581			value = RTL_R32(PHYAR) & 0xffff;
582			break;
583		}
584		udelay(25);
585	}
586	/*
587	 * According to hardware specs a 20us delay is required after read
588	 * complete indication, but before sending next command.
589	 */
590	udelay(20);
591
592	return value;
593}
594
595static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
596{
597	mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
598}
599
600static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m)
601{
602	int val;
603
604	val = mdio_read(ioaddr, reg_addr);
605	mdio_write(ioaddr, reg_addr, (val | p) & ~m);
606}
607
608static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
609			   int val)
610{
611	struct rtl8169_private *tp = netdev_priv(dev);
612	void __iomem *ioaddr = tp->mmio_addr;
613
614	mdio_write(ioaddr, location, val);
615}
616
617static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
618{
619	struct rtl8169_private *tp = netdev_priv(dev);
620	void __iomem *ioaddr = tp->mmio_addr;
621
622	return mdio_read(ioaddr, location);
623}
624
625static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
626{
627	unsigned int i;
628
629	RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
630		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
631
632	for (i = 0; i < 100; i++) {
633		if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
634			break;
635		udelay(10);
636	}
637}
638
639static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
640{
641	u16 value = 0xffff;
642	unsigned int i;
643
644	RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
645
646	for (i = 0; i < 100; i++) {
647		if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
648			value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
649			break;
650		}
651		udelay(10);
652	}
653
654	return value;
655}
656
657static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
658{
659	unsigned int i;
660
661	RTL_W32(CSIDR, value);
662	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
663		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
664
665	for (i = 0; i < 100; i++) {
666		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
667			break;
668		udelay(10);
669	}
670}
671
672static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
673{
674	u32 value = ~0x00;
675	unsigned int i;
676
677	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
678		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
679
680	for (i = 0; i < 100; i++) {
681		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
682			value = RTL_R32(CSIDR);
683			break;
684		}
685		udelay(10);
686	}
687
688	return value;
689}
690
691static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
692{
693	u8 value = 0xff;
694	unsigned int i;
695
696	RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
697
698	for (i = 0; i < 300; i++) {
699		if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
700			value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
701			break;
702		}
703		udelay(100);
704	}
705
706	return value;
707}
708
709static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
710{
711	RTL_W16(IntrMask, 0x0000);
712
713	RTL_W16(IntrStatus, 0xffff);
714}
715
716static void rtl8169_asic_down(void __iomem *ioaddr)
717{
718	RTL_W8(ChipCmd, 0x00);
719	rtl8169_irq_mask_and_ack(ioaddr);
720	RTL_R16(CPlusCmd);
721}
722
723static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
724{
725	return RTL_R32(TBICSR) & TBIReset;
726}
727
728static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
729{
730	return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
731}
732
733static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
734{
735	return RTL_R32(TBICSR) & TBILinkOk;
736}
737
738static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
739{
740	return RTL_R8(PHYstatus) & LinkStatus;
741}
742
743static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
744{
745	RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
746}
747
748static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
749{
750	unsigned int val;
751
752	val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
753	mdio_write(ioaddr, MII_BMCR, val & 0xffff);
754}
755
756static void rtl8169_check_link_status(struct net_device *dev,
757				      struct rtl8169_private *tp,
758				      void __iomem *ioaddr)
759{
760	unsigned long flags;
761
762	spin_lock_irqsave(&tp->lock, flags);
763	if (tp->link_ok(ioaddr)) {
764		/* This is to cancel a scheduled suspend if there's one. */
765		pm_request_resume(&tp->pci_dev->dev);
766		netif_carrier_on(dev);
767		netif_info(tp, ifup, dev, "link up\n");
768	} else {
769		netif_carrier_off(dev);
770		netif_info(tp, ifdown, dev, "link down\n");
771		pm_schedule_suspend(&tp->pci_dev->dev, 100);
772	}
773	spin_unlock_irqrestore(&tp->lock, flags);
774}
775
776#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
777
778static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
779{
780	void __iomem *ioaddr = tp->mmio_addr;
781	u8 options;
782	u32 wolopts = 0;
783
784	options = RTL_R8(Config1);
785	if (!(options & PMEnable))
786		return 0;
787
788	options = RTL_R8(Config3);
789	if (options & LinkUp)
790		wolopts |= WAKE_PHY;
791	if (options & MagicPacket)
792		wolopts |= WAKE_MAGIC;
793
794	options = RTL_R8(Config5);
795	if (options & UWF)
796		wolopts |= WAKE_UCAST;
797	if (options & BWF)
798		wolopts |= WAKE_BCAST;
799	if (options & MWF)
800		wolopts |= WAKE_MCAST;
801
802	return wolopts;
803}
804
805static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
806{
807	struct rtl8169_private *tp = netdev_priv(dev);
808
809	spin_lock_irq(&tp->lock);
810
811	wol->supported = WAKE_ANY;
812	wol->wolopts = __rtl8169_get_wol(tp);
813
814	spin_unlock_irq(&tp->lock);
815}
816
817static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
818{
819	void __iomem *ioaddr = tp->mmio_addr;
820	unsigned int i;
821	static const struct {
822		u32 opt;
823		u16 reg;
824		u8  mask;
825	} cfg[] = {
826		{ WAKE_ANY,   Config1, PMEnable },
827		{ WAKE_PHY,   Config3, LinkUp },
828		{ WAKE_MAGIC, Config3, MagicPacket },
829		{ WAKE_UCAST, Config5, UWF },
830		{ WAKE_BCAST, Config5, BWF },
831		{ WAKE_MCAST, Config5, MWF },
832		{ WAKE_ANY,   Config5, LanWake }
833	};
834
835	RTL_W8(Cfg9346, Cfg9346_Unlock);
836
837	for (i = 0; i < ARRAY_SIZE(cfg); i++) {
838		u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
839		if (wolopts & cfg[i].opt)
840			options |= cfg[i].mask;
841		RTL_W8(cfg[i].reg, options);
842	}
843
844	RTL_W8(Cfg9346, Cfg9346_Lock);
845}
846
847static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
848{
849	struct rtl8169_private *tp = netdev_priv(dev);
850
851	spin_lock_irq(&tp->lock);
852
853	if (wol->wolopts)
854		tp->features |= RTL_FEATURE_WOL;
855	else
856		tp->features &= ~RTL_FEATURE_WOL;
857	__rtl8169_set_wol(tp, wol->wolopts);
858	spin_unlock_irq(&tp->lock);
859
860	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
861
862	return 0;
863}
864
865static void rtl8169_get_drvinfo(struct net_device *dev,
866				struct ethtool_drvinfo *info)
867{
868	struct rtl8169_private *tp = netdev_priv(dev);
869
870	strcpy(info->driver, MODULENAME);
871	strcpy(info->version, RTL8169_VERSION);
872	strcpy(info->bus_info, pci_name(tp->pci_dev));
873}
874
875static int rtl8169_get_regs_len(struct net_device *dev)
876{
877	return R8169_REGS_SIZE;
878}
879
880static int rtl8169_set_speed_tbi(struct net_device *dev,
881				 u8 autoneg, u16 speed, u8 duplex)
882{
883	struct rtl8169_private *tp = netdev_priv(dev);
884	void __iomem *ioaddr = tp->mmio_addr;
885	int ret = 0;
886	u32 reg;
887
888	reg = RTL_R32(TBICSR);
889	if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
890	    (duplex == DUPLEX_FULL)) {
891		RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
892	} else if (autoneg == AUTONEG_ENABLE)
893		RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
894	else {
895		netif_warn(tp, link, dev,
896			   "incorrect speed setting refused in TBI mode\n");
897		ret = -EOPNOTSUPP;
898	}
899
900	return ret;
901}
902
903static int rtl8169_set_speed_xmii(struct net_device *dev,
904				  u8 autoneg, u16 speed, u8 duplex)
905{
906	struct rtl8169_private *tp = netdev_priv(dev);
907	void __iomem *ioaddr = tp->mmio_addr;
908	int giga_ctrl, bmcr;
909
910	if (autoneg == AUTONEG_ENABLE) {
911		int auto_nego;
912
913		auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
914		auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
915			      ADVERTISE_100HALF | ADVERTISE_100FULL);
916		auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
917
918		giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
919		giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
920
921		/* The 8100e/8101e/8102e do Fast Ethernet only. */
922		if ((tp->mac_version != RTL_GIGA_MAC_VER_07) &&
923		    (tp->mac_version != RTL_GIGA_MAC_VER_08) &&
924		    (tp->mac_version != RTL_GIGA_MAC_VER_09) &&
925		    (tp->mac_version != RTL_GIGA_MAC_VER_10) &&
926		    (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
927		    (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
928		    (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
929		    (tp->mac_version != RTL_GIGA_MAC_VER_16)) {
930			giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
931		} else {
932			netif_info(tp, link, dev,
933				   "PHY does not support 1000Mbps\n");
934		}
935
936		bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
937
938		if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
939		    (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
940		    (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
941			/*
942			 * Wake up the PHY.
943			 * Vendor specific (0x1f) and reserved (0x0e) MII
944			 * registers.
945			 */
946			mdio_write(ioaddr, 0x1f, 0x0000);
947			mdio_write(ioaddr, 0x0e, 0x0000);
948		}
949
950		mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
951		mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
952	} else {
953		giga_ctrl = 0;
954
955		if (speed == SPEED_10)
956			bmcr = 0;
957		else if (speed == SPEED_100)
958			bmcr = BMCR_SPEED100;
959		else
960			return -EINVAL;
961
962		if (duplex == DUPLEX_FULL)
963			bmcr |= BMCR_FULLDPLX;
964
965		mdio_write(ioaddr, 0x1f, 0x0000);
966	}
967
968	tp->phy_1000_ctrl_reg = giga_ctrl;
969
970	mdio_write(ioaddr, MII_BMCR, bmcr);
971
972	if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
973	    (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
974		if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
975			mdio_write(ioaddr, 0x17, 0x2138);
976			mdio_write(ioaddr, 0x0e, 0x0260);
977		} else {
978			mdio_write(ioaddr, 0x17, 0x2108);
979			mdio_write(ioaddr, 0x0e, 0x0000);
980		}
981	}
982
983	return 0;
984}
985
986static int rtl8169_set_speed(struct net_device *dev,
987			     u8 autoneg, u16 speed, u8 duplex)
988{
989	struct rtl8169_private *tp = netdev_priv(dev);
990	int ret;
991
992	ret = tp->set_speed(dev, autoneg, speed, duplex);
993
994	if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
995		mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
996
997	return ret;
998}
999
1000static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1001{
1002	struct rtl8169_private *tp = netdev_priv(dev);
1003	unsigned long flags;
1004	int ret;
1005
1006	spin_lock_irqsave(&tp->lock, flags);
1007	ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
1008	spin_unlock_irqrestore(&tp->lock, flags);
1009
1010	return ret;
1011}
1012
1013static u32 rtl8169_get_rx_csum(struct net_device *dev)
1014{
1015	struct rtl8169_private *tp = netdev_priv(dev);
1016
1017	return tp->cp_cmd & RxChkSum;
1018}
1019
1020static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1021{
1022	struct rtl8169_private *tp = netdev_priv(dev);
1023	void __iomem *ioaddr = tp->mmio_addr;
1024	unsigned long flags;
1025
1026	spin_lock_irqsave(&tp->lock, flags);
1027
1028	if (data)
1029		tp->cp_cmd |= RxChkSum;
1030	else
1031		tp->cp_cmd &= ~RxChkSum;
1032
1033	RTL_W16(CPlusCmd, tp->cp_cmd);
1034	RTL_R16(CPlusCmd);
1035
1036	spin_unlock_irqrestore(&tp->lock, flags);
1037
1038	return 0;
1039}
1040
1041#ifdef CONFIG_R8169_VLAN
1042
1043static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1044				      struct sk_buff *skb)
1045{
1046	return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
1047		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1048}
1049
1050static void rtl8169_vlan_rx_register(struct net_device *dev,
1051				     struct vlan_group *grp)
1052{
1053	struct rtl8169_private *tp = netdev_priv(dev);
1054	void __iomem *ioaddr = tp->mmio_addr;
1055	unsigned long flags;
1056
1057	spin_lock_irqsave(&tp->lock, flags);
1058	tp->vlgrp = grp;
1059	/*
1060	 * Do not disable RxVlan on 8110SCd.
1061	 */
1062	if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1063		tp->cp_cmd |= RxVlan;
1064	else
1065		tp->cp_cmd &= ~RxVlan;
1066	RTL_W16(CPlusCmd, tp->cp_cmd);
1067	RTL_R16(CPlusCmd);
1068	spin_unlock_irqrestore(&tp->lock, flags);
1069}
1070
1071static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1072			       struct sk_buff *skb, int polling)
1073{
1074	u32 opts2 = le32_to_cpu(desc->opts2);
1075	struct vlan_group *vlgrp = tp->vlgrp;
1076	int ret;
1077
1078	if (vlgrp && (opts2 & RxVlanTag)) {
1079		__vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
1080		ret = 0;
1081	} else
1082		ret = -1;
1083	desc->opts2 = 0;
1084	return ret;
1085}
1086
1087#else /* !CONFIG_R8169_VLAN */
1088
1089static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1090				      struct sk_buff *skb)
1091{
1092	return 0;
1093}
1094
1095static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1096			       struct sk_buff *skb, int polling)
1097{
1098	return -1;
1099}
1100
1101#endif
1102
1103static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1104{
1105	struct rtl8169_private *tp = netdev_priv(dev);
1106	void __iomem *ioaddr = tp->mmio_addr;
1107	u32 status;
1108
1109	cmd->supported =
1110		SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1111	cmd->port = PORT_FIBRE;
1112	cmd->transceiver = XCVR_INTERNAL;
1113
1114	status = RTL_R32(TBICSR);
1115	cmd->advertising = (status & TBINwEnable) ?  ADVERTISED_Autoneg : 0;
1116	cmd->autoneg = !!(status & TBINwEnable);
1117
1118	cmd->speed = SPEED_1000;
1119	cmd->duplex = DUPLEX_FULL; /* Always set */
1120
1121	return 0;
1122}
1123
1124static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1125{
1126	struct rtl8169_private *tp = netdev_priv(dev);
1127
1128	return mii_ethtool_gset(&tp->mii, cmd);
1129}
1130
1131static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1132{
1133	struct rtl8169_private *tp = netdev_priv(dev);
1134	unsigned long flags;
1135	int rc;
1136
1137	spin_lock_irqsave(&tp->lock, flags);
1138
1139	rc = tp->get_settings(dev, cmd);
1140
1141	spin_unlock_irqrestore(&tp->lock, flags);
1142	return rc;
1143}
1144
1145static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1146			     void *p)
1147{
1148	struct rtl8169_private *tp = netdev_priv(dev);
1149	unsigned long flags;
1150
1151	if (regs->len > R8169_REGS_SIZE)
1152		regs->len = R8169_REGS_SIZE;
1153
1154	spin_lock_irqsave(&tp->lock, flags);
1155	memcpy_fromio(p, tp->mmio_addr, regs->len);
1156	spin_unlock_irqrestore(&tp->lock, flags);
1157}
1158
1159static u32 rtl8169_get_msglevel(struct net_device *dev)
1160{
1161	struct rtl8169_private *tp = netdev_priv(dev);
1162
1163	return tp->msg_enable;
1164}
1165
1166static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1167{
1168	struct rtl8169_private *tp = netdev_priv(dev);
1169
1170	tp->msg_enable = value;
1171}
1172
1173static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1174	"tx_packets",
1175	"rx_packets",
1176	"tx_errors",
1177	"rx_errors",
1178	"rx_missed",
1179	"align_errors",
1180	"tx_single_collisions",
1181	"tx_multi_collisions",
1182	"unicast",
1183	"broadcast",
1184	"multicast",
1185	"tx_aborted",
1186	"tx_underrun",
1187};
1188
1189static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1190{
1191	switch (sset) {
1192	case ETH_SS_STATS:
1193		return ARRAY_SIZE(rtl8169_gstrings);
1194	default:
1195		return -EOPNOTSUPP;
1196	}
1197}
1198
1199static void rtl8169_update_counters(struct net_device *dev)
1200{
1201	struct rtl8169_private *tp = netdev_priv(dev);
1202	void __iomem *ioaddr = tp->mmio_addr;
1203	struct rtl8169_counters *counters;
1204	dma_addr_t paddr;
1205	u32 cmd;
1206	int wait = 1000;
1207
1208	/*
1209	 * Some chips are unable to dump tally counters when the receiver
1210	 * is disabled.
1211	 */
1212	if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1213		return;
1214
1215	counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
1216				      &paddr, GFP_KERNEL);
1217	if (!counters)
1218		return;
1219
1220	RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1221	cmd = (u64)paddr & DMA_BIT_MASK(32);
1222	RTL_W32(CounterAddrLow, cmd);
1223	RTL_W32(CounterAddrLow, cmd | CounterDump);
1224
1225	while (wait--) {
1226		if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1227			/* copy updated counters */
1228			memcpy(&tp->counters, counters, sizeof(*counters));
1229			break;
1230		}
1231		udelay(10);
1232	}
1233
1234	RTL_W32(CounterAddrLow, 0);
1235	RTL_W32(CounterAddrHigh, 0);
1236
1237	dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
1238			  paddr);
1239}
1240
1241static void rtl8169_get_ethtool_stats(struct net_device *dev,
1242				      struct ethtool_stats *stats, u64 *data)
1243{
1244	struct rtl8169_private *tp = netdev_priv(dev);
1245
1246	ASSERT_RTNL();
1247
1248	rtl8169_update_counters(dev);
1249
1250	data[0] = le64_to_cpu(tp->counters.tx_packets);
1251	data[1] = le64_to_cpu(tp->counters.rx_packets);
1252	data[2] = le64_to_cpu(tp->counters.tx_errors);
1253	data[3] = le32_to_cpu(tp->counters.rx_errors);
1254	data[4] = le16_to_cpu(tp->counters.rx_missed);
1255	data[5] = le16_to_cpu(tp->counters.align_errors);
1256	data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1257	data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1258	data[8] = le64_to_cpu(tp->counters.rx_unicast);
1259	data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1260	data[10] = le32_to_cpu(tp->counters.rx_multicast);
1261	data[11] = le16_to_cpu(tp->counters.tx_aborted);
1262	data[12] = le16_to_cpu(tp->counters.tx_underun);
1263}
1264
1265static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1266{
1267	switch(stringset) {
1268	case ETH_SS_STATS:
1269		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1270		break;
1271	}
1272}
1273
1274static const struct ethtool_ops rtl8169_ethtool_ops = {
1275	.get_drvinfo		= rtl8169_get_drvinfo,
1276	.get_regs_len		= rtl8169_get_regs_len,
1277	.get_link		= ethtool_op_get_link,
1278	.get_settings		= rtl8169_get_settings,
1279	.set_settings		= rtl8169_set_settings,
1280	.get_msglevel		= rtl8169_get_msglevel,
1281	.set_msglevel		= rtl8169_set_msglevel,
1282	.get_rx_csum		= rtl8169_get_rx_csum,
1283	.set_rx_csum		= rtl8169_set_rx_csum,
1284	.set_tx_csum		= ethtool_op_set_tx_csum,
1285	.set_sg			= ethtool_op_set_sg,
1286	.set_tso		= ethtool_op_set_tso,
1287	.get_regs		= rtl8169_get_regs,
1288	.get_wol		= rtl8169_get_wol,
1289	.set_wol		= rtl8169_set_wol,
1290	.get_strings		= rtl8169_get_strings,
1291	.get_sset_count		= rtl8169_get_sset_count,
1292	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
1293};
1294
1295static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1296				    void __iomem *ioaddr)
1297{
1298	/*
1299	 * The driver currently handles the 8168Bf and the 8168Be identically
1300	 * but they can be identified more specifically through the test below
1301	 * if needed:
1302	 *
1303	 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1304	 *
1305	 * Same thing for the 8101Eb and the 8101Ec:
1306	 *
1307	 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1308	 */
1309	static const struct {
1310		u32 mask;
1311		u32 val;
1312		int mac_version;
1313	} mac_info[] = {
1314		/* 8168D family. */
1315		{ 0x7cf00000, 0x28300000,	RTL_GIGA_MAC_VER_26 },
1316		{ 0x7cf00000, 0x28100000,	RTL_GIGA_MAC_VER_25 },
1317		{ 0x7c800000, 0x28800000,	RTL_GIGA_MAC_VER_27 },
1318		{ 0x7c800000, 0x28000000,	RTL_GIGA_MAC_VER_26 },
1319
1320		/* 8168C family. */
1321		{ 0x7cf00000, 0x3cb00000,	RTL_GIGA_MAC_VER_24 },
1322		{ 0x7cf00000, 0x3c900000,	RTL_GIGA_MAC_VER_23 },
1323		{ 0x7cf00000, 0x3c800000,	RTL_GIGA_MAC_VER_18 },
1324		{ 0x7c800000, 0x3c800000,	RTL_GIGA_MAC_VER_24 },
1325		{ 0x7cf00000, 0x3c000000,	RTL_GIGA_MAC_VER_19 },
1326		{ 0x7cf00000, 0x3c200000,	RTL_GIGA_MAC_VER_20 },
1327		{ 0x7cf00000, 0x3c300000,	RTL_GIGA_MAC_VER_21 },
1328		{ 0x7cf00000, 0x3c400000,	RTL_GIGA_MAC_VER_22 },
1329		{ 0x7c800000, 0x3c000000,	RTL_GIGA_MAC_VER_22 },
1330
1331		/* 8168B family. */
1332		{ 0x7cf00000, 0x38000000,	RTL_GIGA_MAC_VER_12 },
1333		{ 0x7cf00000, 0x38500000,	RTL_GIGA_MAC_VER_17 },
1334		{ 0x7c800000, 0x38000000,	RTL_GIGA_MAC_VER_17 },
1335		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
1336
1337		/* 8101 family. */
1338		{ 0x7cf00000, 0x34a00000,	RTL_GIGA_MAC_VER_09 },
1339		{ 0x7cf00000, 0x24a00000,	RTL_GIGA_MAC_VER_09 },
1340		{ 0x7cf00000, 0x34900000,	RTL_GIGA_MAC_VER_08 },
1341		{ 0x7cf00000, 0x24900000,	RTL_GIGA_MAC_VER_08 },
1342		{ 0x7cf00000, 0x34800000,	RTL_GIGA_MAC_VER_07 },
1343		{ 0x7cf00000, 0x24800000,	RTL_GIGA_MAC_VER_07 },
1344		{ 0x7cf00000, 0x34000000,	RTL_GIGA_MAC_VER_13 },
1345		{ 0x7cf00000, 0x34300000,	RTL_GIGA_MAC_VER_10 },
1346		{ 0x7cf00000, 0x34200000,	RTL_GIGA_MAC_VER_16 },
1347		{ 0x7c800000, 0x34800000,	RTL_GIGA_MAC_VER_09 },
1348		{ 0x7c800000, 0x24800000,	RTL_GIGA_MAC_VER_09 },
1349		{ 0x7c800000, 0x34000000,	RTL_GIGA_MAC_VER_16 },
1350		{ 0xfc800000, 0x38800000,	RTL_GIGA_MAC_VER_15 },
1351		{ 0xfc800000, 0x30800000,	RTL_GIGA_MAC_VER_14 },
1352
1353		/* 8110 family. */
1354		{ 0xfc800000, 0x98000000,	RTL_GIGA_MAC_VER_06 },
1355		{ 0xfc800000, 0x18000000,	RTL_GIGA_MAC_VER_05 },
1356		{ 0xfc800000, 0x10000000,	RTL_GIGA_MAC_VER_04 },
1357		{ 0xfc800000, 0x04000000,	RTL_GIGA_MAC_VER_03 },
1358		{ 0xfc800000, 0x00800000,	RTL_GIGA_MAC_VER_02 },
1359		{ 0xfc800000, 0x00000000,	RTL_GIGA_MAC_VER_01 },
1360
1361		/* Catch-all */
1362		{ 0x00000000, 0x00000000,	RTL_GIGA_MAC_NONE   }
1363	}, *p = mac_info;
1364	u32 reg;
1365
1366	reg = RTL_R32(TxConfig);
1367	while ((reg & p->mask) != p->val)
1368		p++;
1369	tp->mac_version = p->mac_version;
1370}
1371
1372static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1373{
1374	dprintk("mac_version = 0x%02x\n", tp->mac_version);
1375}
1376
1377struct phy_reg {
1378	u16 reg;
1379	u16 val;
1380};
1381
1382static void rtl_phy_write(void __iomem *ioaddr, const struct phy_reg *regs, int len)
1383{
1384	while (len-- > 0) {
1385		mdio_write(ioaddr, regs->reg, regs->val);
1386		regs++;
1387	}
1388}
1389
1390static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
1391{
1392	static const struct phy_reg phy_reg_init[] = {
1393		{ 0x1f, 0x0001 },
1394		{ 0x06, 0x006e },
1395		{ 0x08, 0x0708 },
1396		{ 0x15, 0x4000 },
1397		{ 0x18, 0x65c7 },
1398
1399		{ 0x1f, 0x0001 },
1400		{ 0x03, 0x00a1 },
1401		{ 0x02, 0x0008 },
1402		{ 0x01, 0x0120 },
1403		{ 0x00, 0x1000 },
1404		{ 0x04, 0x0800 },
1405		{ 0x04, 0x0000 },
1406
1407		{ 0x03, 0xff41 },
1408		{ 0x02, 0xdf60 },
1409		{ 0x01, 0x0140 },
1410		{ 0x00, 0x0077 },
1411		{ 0x04, 0x7800 },
1412		{ 0x04, 0x7000 },
1413
1414		{ 0x03, 0x802f },
1415		{ 0x02, 0x4f02 },
1416		{ 0x01, 0x0409 },
1417		{ 0x00, 0xf0f9 },
1418		{ 0x04, 0x9800 },
1419		{ 0x04, 0x9000 },
1420
1421		{ 0x03, 0xdf01 },
1422		{ 0x02, 0xdf20 },
1423		{ 0x01, 0xff95 },
1424		{ 0x00, 0xba00 },
1425		{ 0x04, 0xa800 },
1426		{ 0x04, 0xa000 },
1427
1428		{ 0x03, 0xff41 },
1429		{ 0x02, 0xdf20 },
1430		{ 0x01, 0x0140 },
1431		{ 0x00, 0x00bb },
1432		{ 0x04, 0xb800 },
1433		{ 0x04, 0xb000 },
1434
1435		{ 0x03, 0xdf41 },
1436		{ 0x02, 0xdc60 },
1437		{ 0x01, 0x6340 },
1438		{ 0x00, 0x007d },
1439		{ 0x04, 0xd800 },
1440		{ 0x04, 0xd000 },
1441
1442		{ 0x03, 0xdf01 },
1443		{ 0x02, 0xdf20 },
1444		{ 0x01, 0x100a },
1445		{ 0x00, 0xa0ff },
1446		{ 0x04, 0xf800 },
1447		{ 0x04, 0xf000 },
1448
1449		{ 0x1f, 0x0000 },
1450		{ 0x0b, 0x0000 },
1451		{ 0x00, 0x9200 }
1452	};
1453
1454	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1455}
1456
1457static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
1458{
1459	static const struct phy_reg phy_reg_init[] = {
1460		{ 0x1f, 0x0002 },
1461		{ 0x01, 0x90d0 },
1462		{ 0x1f, 0x0000 }
1463	};
1464
1465	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1466}
1467
1468static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp,
1469					   void __iomem *ioaddr)
1470{
1471	struct pci_dev *pdev = tp->pci_dev;
1472	u16 vendor_id, device_id;
1473
1474	pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
1475	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &device_id);
1476
1477	if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000))
1478		return;
1479
1480	mdio_write(ioaddr, 0x1f, 0x0001);
1481	mdio_write(ioaddr, 0x10, 0xf01b);
1482	mdio_write(ioaddr, 0x1f, 0x0000);
1483}
1484
1485static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
1486				     void __iomem *ioaddr)
1487{
1488	static const struct phy_reg phy_reg_init[] = {
1489		{ 0x1f, 0x0001 },
1490		{ 0x04, 0x0000 },
1491		{ 0x03, 0x00a1 },
1492		{ 0x02, 0x0008 },
1493		{ 0x01, 0x0120 },
1494		{ 0x00, 0x1000 },
1495		{ 0x04, 0x0800 },
1496		{ 0x04, 0x9000 },
1497		{ 0x03, 0x802f },
1498		{ 0x02, 0x4f02 },
1499		{ 0x01, 0x0409 },
1500		{ 0x00, 0xf099 },
1501		{ 0x04, 0x9800 },
1502		{ 0x04, 0xa000 },
1503		{ 0x03, 0xdf01 },
1504		{ 0x02, 0xdf20 },
1505		{ 0x01, 0xff95 },
1506		{ 0x00, 0xba00 },
1507		{ 0x04, 0xa800 },
1508		{ 0x04, 0xf000 },
1509		{ 0x03, 0xdf01 },
1510		{ 0x02, 0xdf20 },
1511		{ 0x01, 0x101a },
1512		{ 0x00, 0xa0ff },
1513		{ 0x04, 0xf800 },
1514		{ 0x04, 0x0000 },
1515		{ 0x1f, 0x0000 },
1516
1517		{ 0x1f, 0x0001 },
1518		{ 0x10, 0xf41b },
1519		{ 0x14, 0xfb54 },
1520		{ 0x18, 0xf5c7 },
1521		{ 0x1f, 0x0000 },
1522
1523		{ 0x1f, 0x0001 },
1524		{ 0x17, 0x0cc0 },
1525		{ 0x1f, 0x0000 }
1526	};
1527
1528	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1529
1530	rtl8169scd_hw_phy_config_quirk(tp, ioaddr);
1531}
1532
1533static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
1534{
1535	static const struct phy_reg phy_reg_init[] = {
1536		{ 0x1f, 0x0001 },
1537		{ 0x04, 0x0000 },
1538		{ 0x03, 0x00a1 },
1539		{ 0x02, 0x0008 },
1540		{ 0x01, 0x0120 },
1541		{ 0x00, 0x1000 },
1542		{ 0x04, 0x0800 },
1543		{ 0x04, 0x9000 },
1544		{ 0x03, 0x802f },
1545		{ 0x02, 0x4f02 },
1546		{ 0x01, 0x0409 },
1547		{ 0x00, 0xf099 },
1548		{ 0x04, 0x9800 },
1549		{ 0x04, 0xa000 },
1550		{ 0x03, 0xdf01 },
1551		{ 0x02, 0xdf20 },
1552		{ 0x01, 0xff95 },
1553		{ 0x00, 0xba00 },
1554		{ 0x04, 0xa800 },
1555		{ 0x04, 0xf000 },
1556		{ 0x03, 0xdf01 },
1557		{ 0x02, 0xdf20 },
1558		{ 0x01, 0x101a },
1559		{ 0x00, 0xa0ff },
1560		{ 0x04, 0xf800 },
1561		{ 0x04, 0x0000 },
1562		{ 0x1f, 0x0000 },
1563
1564		{ 0x1f, 0x0001 },
1565		{ 0x0b, 0x8480 },
1566		{ 0x1f, 0x0000 },
1567
1568		{ 0x1f, 0x0001 },
1569		{ 0x18, 0x67c7 },
1570		{ 0x04, 0x2000 },
1571		{ 0x03, 0x002f },
1572		{ 0x02, 0x4360 },
1573		{ 0x01, 0x0109 },
1574		{ 0x00, 0x3022 },
1575		{ 0x04, 0x2800 },
1576		{ 0x1f, 0x0000 },
1577
1578		{ 0x1f, 0x0001 },
1579		{ 0x17, 0x0cc0 },
1580		{ 0x1f, 0x0000 }
1581	};
1582
1583	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1584}
1585
1586static void rtl8168bb_hw_phy_config(void __iomem *ioaddr)
1587{
1588	static const struct phy_reg phy_reg_init[] = {
1589		{ 0x10, 0xf41b },
1590		{ 0x1f, 0x0000 }
1591	};
1592
1593	mdio_write(ioaddr, 0x1f, 0x0001);
1594	mdio_patch(ioaddr, 0x16, 1 << 0);
1595
1596	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1597}
1598
1599static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
1600{
1601	static const struct phy_reg phy_reg_init[] = {
1602		{ 0x1f, 0x0001 },
1603		{ 0x10, 0xf41b },
1604		{ 0x1f, 0x0000 }
1605	};
1606
1607	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1608}
1609
1610static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
1611{
1612	static const struct phy_reg phy_reg_init[] = {
1613		{ 0x1f, 0x0000 },
1614		{ 0x1d, 0x0f00 },
1615		{ 0x1f, 0x0002 },
1616		{ 0x0c, 0x1ec8 },
1617		{ 0x1f, 0x0000 }
1618	};
1619
1620	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1621}
1622
1623static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
1624{
1625	static const struct phy_reg phy_reg_init[] = {
1626		{ 0x1f, 0x0001 },
1627		{ 0x1d, 0x3d98 },
1628		{ 0x1f, 0x0000 }
1629	};
1630
1631	mdio_write(ioaddr, 0x1f, 0x0000);
1632	mdio_patch(ioaddr, 0x14, 1 << 5);
1633	mdio_patch(ioaddr, 0x0d, 1 << 5);
1634
1635	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1636}
1637
1638static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
1639{
1640	static const struct phy_reg phy_reg_init[] = {
1641		{ 0x1f, 0x0001 },
1642		{ 0x12, 0x2300 },
1643		{ 0x1f, 0x0002 },
1644		{ 0x00, 0x88d4 },
1645		{ 0x01, 0x82b1 },
1646		{ 0x03, 0x7002 },
1647		{ 0x08, 0x9e30 },
1648		{ 0x09, 0x01f0 },
1649		{ 0x0a, 0x5500 },
1650		{ 0x0c, 0x00c8 },
1651		{ 0x1f, 0x0003 },
1652		{ 0x12, 0xc096 },
1653		{ 0x16, 0x000a },
1654		{ 0x1f, 0x0000 },
1655		{ 0x1f, 0x0000 },
1656		{ 0x09, 0x2000 },
1657		{ 0x09, 0x0000 }
1658	};
1659
1660	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1661
1662	mdio_patch(ioaddr, 0x14, 1 << 5);
1663	mdio_patch(ioaddr, 0x0d, 1 << 5);
1664	mdio_write(ioaddr, 0x1f, 0x0000);
1665}
1666
1667static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
1668{
1669	static const struct phy_reg phy_reg_init[] = {
1670		{ 0x1f, 0x0001 },
1671		{ 0x12, 0x2300 },
1672		{ 0x03, 0x802f },
1673		{ 0x02, 0x4f02 },
1674		{ 0x01, 0x0409 },
1675		{ 0x00, 0xf099 },
1676		{ 0x04, 0x9800 },
1677		{ 0x04, 0x9000 },
1678		{ 0x1d, 0x3d98 },
1679		{ 0x1f, 0x0002 },
1680		{ 0x0c, 0x7eb8 },
1681		{ 0x06, 0x0761 },
1682		{ 0x1f, 0x0003 },
1683		{ 0x16, 0x0f0a },
1684		{ 0x1f, 0x0000 }
1685	};
1686
1687	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1688
1689	mdio_patch(ioaddr, 0x16, 1 << 0);
1690	mdio_patch(ioaddr, 0x14, 1 << 5);
1691	mdio_patch(ioaddr, 0x0d, 1 << 5);
1692	mdio_write(ioaddr, 0x1f, 0x0000);
1693}
1694
1695static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr)
1696{
1697	static const struct phy_reg phy_reg_init[] = {
1698		{ 0x1f, 0x0001 },
1699		{ 0x12, 0x2300 },
1700		{ 0x1d, 0x3d98 },
1701		{ 0x1f, 0x0002 },
1702		{ 0x0c, 0x7eb8 },
1703		{ 0x06, 0x5461 },
1704		{ 0x1f, 0x0003 },
1705		{ 0x16, 0x0f0a },
1706		{ 0x1f, 0x0000 }
1707	};
1708
1709	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1710
1711	mdio_patch(ioaddr, 0x16, 1 << 0);
1712	mdio_patch(ioaddr, 0x14, 1 << 5);
1713	mdio_patch(ioaddr, 0x0d, 1 << 5);
1714	mdio_write(ioaddr, 0x1f, 0x0000);
1715}
1716
1717static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
1718{
1719	rtl8168c_3_hw_phy_config(ioaddr);
1720}
1721
1722static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
1723{
1724	static const struct phy_reg phy_reg_init_0[] = {
1725		{ 0x1f, 0x0001 },
1726		{ 0x06, 0x4064 },
1727		{ 0x07, 0x2863 },
1728		{ 0x08, 0x059c },
1729		{ 0x09, 0x26b4 },
1730		{ 0x0a, 0x6a19 },
1731		{ 0x0b, 0xdcc8 },
1732		{ 0x10, 0xf06d },
1733		{ 0x14, 0x7f68 },
1734		{ 0x18, 0x7fd9 },
1735		{ 0x1c, 0xf0ff },
1736		{ 0x1d, 0x3d9c },
1737		{ 0x1f, 0x0003 },
1738		{ 0x12, 0xf49f },
1739		{ 0x13, 0x070b },
1740		{ 0x1a, 0x05ad },
1741		{ 0x14, 0x94c0 }
1742	};
1743	static const struct phy_reg phy_reg_init_1[] = {
1744		{ 0x1f, 0x0002 },
1745		{ 0x06, 0x5561 },
1746		{ 0x1f, 0x0005 },
1747		{ 0x05, 0x8332 },
1748		{ 0x06, 0x5561 }
1749	};
1750	static const struct phy_reg phy_reg_init_2[] = {
1751		{ 0x1f, 0x0005 },
1752		{ 0x05, 0xffc2 },
1753		{ 0x1f, 0x0005 },
1754		{ 0x05, 0x8000 },
1755		{ 0x06, 0xf8f9 },
1756		{ 0x06, 0xfaef },
1757		{ 0x06, 0x59ee },
1758		{ 0x06, 0xf8ea },
1759		{ 0x06, 0x00ee },
1760		{ 0x06, 0xf8eb },
1761		{ 0x06, 0x00e0 },
1762		{ 0x06, 0xf87c },
1763		{ 0x06, 0xe1f8 },
1764		{ 0x06, 0x7d59 },
1765		{ 0x06, 0x0fef },
1766		{ 0x06, 0x0139 },
1767		{ 0x06, 0x029e },
1768		{ 0x06, 0x06ef },
1769		{ 0x06, 0x1039 },
1770		{ 0x06, 0x089f },
1771		{ 0x06, 0x2aee },
1772		{ 0x06, 0xf8ea },
1773		{ 0x06, 0x00ee },
1774		{ 0x06, 0xf8eb },
1775		{ 0x06, 0x01e0 },
1776		{ 0x06, 0xf87c },
1777		{ 0x06, 0xe1f8 },
1778		{ 0x06, 0x7d58 },
1779		{ 0x06, 0x409e },
1780		{ 0x06, 0x0f39 },
1781		{ 0x06, 0x46aa },
1782		{ 0x06, 0x0bbf },
1783		{ 0x06, 0x8290 },
1784		{ 0x06, 0xd682 },
1785		{ 0x06, 0x9802 },
1786		{ 0x06, 0x014f },
1787		{ 0x06, 0xae09 },
1788		{ 0x06, 0xbf82 },
1789		{ 0x06, 0x98d6 },
1790		{ 0x06, 0x82a0 },
1791		{ 0x06, 0x0201 },
1792		{ 0x06, 0x4fef },
1793		{ 0x06, 0x95fe },
1794		{ 0x06, 0xfdfc },
1795		{ 0x06, 0x05f8 },
1796		{ 0x06, 0xf9fa },
1797		{ 0x06, 0xeef8 },
1798		{ 0x06, 0xea00 },
1799		{ 0x06, 0xeef8 },
1800		{ 0x06, 0xeb00 },
1801		{ 0x06, 0xe2f8 },
1802		{ 0x06, 0x7ce3 },
1803		{ 0x06, 0xf87d },
1804		{ 0x06, 0xa511 },
1805		{ 0x06, 0x1112 },
1806		{ 0x06, 0xd240 },
1807		{ 0x06, 0xd644 },
1808		{ 0x06, 0x4402 },
1809		{ 0x06, 0x8217 },
1810		{ 0x06, 0xd2a0 },
1811		{ 0x06, 0xd6aa },
1812		{ 0x06, 0xaa02 },
1813		{ 0x06, 0x8217 },
1814		{ 0x06, 0xae0f },
1815		{ 0x06, 0xa544 },
1816		{ 0x06, 0x4402 },
1817		{ 0x06, 0xae4d },
1818		{ 0x06, 0xa5aa },
1819		{ 0x06, 0xaa02 },
1820		{ 0x06, 0xae47 },
1821		{ 0x06, 0xaf82 },
1822		{ 0x06, 0x13ee },
1823		{ 0x06, 0x834e },
1824		{ 0x06, 0x00ee },
1825		{ 0x06, 0x834d },
1826		{ 0x06, 0x0fee },
1827		{ 0x06, 0x834c },
1828		{ 0x06, 0x0fee },
1829		{ 0x06, 0x834f },
1830		{ 0x06, 0x00ee },
1831		{ 0x06, 0x8351 },
1832		{ 0x06, 0x00ee },
1833		{ 0x06, 0x834a },
1834		{ 0x06, 0xffee },
1835		{ 0x06, 0x834b },
1836		{ 0x06, 0xffe0 },
1837		{ 0x06, 0x8330 },
1838		{ 0x06, 0xe183 },
1839		{ 0x06, 0x3158 },
1840		{ 0x06, 0xfee4 },
1841		{ 0x06, 0xf88a },
1842		{ 0x06, 0xe5f8 },
1843		{ 0x06, 0x8be0 },
1844		{ 0x06, 0x8332 },
1845		{ 0x06, 0xe183 },
1846		{ 0x06, 0x3359 },
1847		{ 0x06, 0x0fe2 },
1848		{ 0x06, 0x834d },
1849		{ 0x06, 0x0c24 },
1850		{ 0x06, 0x5af0 },
1851		{ 0x06, 0x1e12 },
1852		{ 0x06, 0xe4f8 },
1853		{ 0x06, 0x8ce5 },
1854		{ 0x06, 0xf88d },
1855		{ 0x06, 0xaf82 },
1856		{ 0x06, 0x13e0 },
1857		{ 0x06, 0x834f },
1858		{ 0x06, 0x10e4 },
1859		{ 0x06, 0x834f },
1860		{ 0x06, 0xe083 },
1861		{ 0x06, 0x4e78 },
1862		{ 0x06, 0x009f },
1863		{ 0x06, 0x0ae0 },
1864		{ 0x06, 0x834f },
1865		{ 0x06, 0xa010 },
1866		{ 0x06, 0xa5ee },
1867		{ 0x06, 0x834e },
1868		{ 0x06, 0x01e0 },
1869		{ 0x06, 0x834e },
1870		{ 0x06, 0x7805 },
1871		{ 0x06, 0x9e9a },
1872		{ 0x06, 0xe083 },
1873		{ 0x06, 0x4e78 },
1874		{ 0x06, 0x049e },
1875		{ 0x06, 0x10e0 },
1876		{ 0x06, 0x834e },
1877		{ 0x06, 0x7803 },
1878		{ 0x06, 0x9e0f },
1879		{ 0x06, 0xe083 },
1880		{ 0x06, 0x4e78 },
1881		{ 0x06, 0x019e },
1882		{ 0x06, 0x05ae },
1883		{ 0x06, 0x0caf },
1884		{ 0x06, 0x81f8 },
1885		{ 0x06, 0xaf81 },
1886		{ 0x06, 0xa3af },
1887		{ 0x06, 0x81dc },
1888		{ 0x06, 0xaf82 },
1889		{ 0x06, 0x13ee },
1890		{ 0x06, 0x8348 },
1891		{ 0x06, 0x00ee },
1892		{ 0x06, 0x8349 },
1893		{ 0x06, 0x00e0 },
1894		{ 0x06, 0x8351 },
1895		{ 0x06, 0x10e4 },
1896		{ 0x06, 0x8351 },
1897		{ 0x06, 0x5801 },
1898		{ 0x06, 0x9fea },
1899		{ 0x06, 0xd000 },
1900		{ 0x06, 0xd180 },
1901		{ 0x06, 0x1f66 },
1902		{ 0x06, 0xe2f8 },
1903		{ 0x06, 0xeae3 },
1904		{ 0x06, 0xf8eb },
1905		{ 0x06, 0x5af8 },
1906		{ 0x06, 0x1e20 },
1907		{ 0x06, 0xe6f8 },
1908		{ 0x06, 0xeae5 },
1909		{ 0x06, 0xf8eb },
1910		{ 0x06, 0xd302 },
1911		{ 0x06, 0xb3fe },
1912		{ 0x06, 0xe2f8 },
1913		{ 0x06, 0x7cef },
1914		{ 0x06, 0x325b },
1915		{ 0x06, 0x80e3 },
1916		{ 0x06, 0xf87d },
1917		{ 0x06, 0x9e03 },
1918		{ 0x06, 0x7dff },
1919		{ 0x06, 0xff0d },
1920		{ 0x06, 0x581c },
1921		{ 0x06, 0x551a },
1922		{ 0x06, 0x6511 },
1923		{ 0x06, 0xa190 },
1924		{ 0x06, 0xd3e2 },
1925		{ 0x06, 0x8348 },
1926		{ 0x06, 0xe383 },
1927		{ 0x06, 0x491b },
1928		{ 0x06, 0x56ab },
1929		{ 0x06, 0x08ef },
1930		{ 0x06, 0x56e6 },
1931		{ 0x06, 0x8348 },
1932		{ 0x06, 0xe783 },
1933		{ 0x06, 0x4910 },
1934		{ 0x06, 0xd180 },
1935		{ 0x06, 0x1f66 },
1936		{ 0x06, 0xa004 },
1937		{ 0x06, 0xb9e2 },
1938		{ 0x06, 0x8348 },
1939		{ 0x06, 0xe383 },
1940		{ 0x06, 0x49ef },
1941		{ 0x06, 0x65e2 },
1942		{ 0x06, 0x834a },
1943		{ 0x06, 0xe383 },
1944		{ 0x06, 0x4b1b },
1945		{ 0x06, 0x56aa },
1946		{ 0x06, 0x0eef },
1947		{ 0x06, 0x56e6 },
1948		{ 0x06, 0x834a },
1949		{ 0x06, 0xe783 },
1950		{ 0x06, 0x4be2 },
1951		{ 0x06, 0x834d },
1952		{ 0x06, 0xe683 },
1953		{ 0x06, 0x4ce0 },
1954		{ 0x06, 0x834d },
1955		{ 0x06, 0xa000 },
1956		{ 0x06, 0x0caf },
1957		{ 0x06, 0x81dc },
1958		{ 0x06, 0xe083 },
1959		{ 0x06, 0x4d10 },
1960		{ 0x06, 0xe483 },
1961		{ 0x06, 0x4dae },
1962		{ 0x06, 0x0480 },
1963		{ 0x06, 0xe483 },
1964		{ 0x06, 0x4de0 },
1965		{ 0x06, 0x834e },
1966		{ 0x06, 0x7803 },
1967		{ 0x06, 0x9e0b },
1968		{ 0x06, 0xe083 },
1969		{ 0x06, 0x4e78 },
1970		{ 0x06, 0x049e },
1971		{ 0x06, 0x04ee },
1972		{ 0x06, 0x834e },
1973		{ 0x06, 0x02e0 },
1974		{ 0x06, 0x8332 },
1975		{ 0x06, 0xe183 },
1976		{ 0x06, 0x3359 },
1977		{ 0x06, 0x0fe2 },
1978		{ 0x06, 0x834d },
1979		{ 0x06, 0x0c24 },
1980		{ 0x06, 0x5af0 },
1981		{ 0x06, 0x1e12 },
1982		{ 0x06, 0xe4f8 },
1983		{ 0x06, 0x8ce5 },
1984		{ 0x06, 0xf88d },
1985		{ 0x06, 0xe083 },
1986		{ 0x06, 0x30e1 },
1987		{ 0x06, 0x8331 },
1988		{ 0x06, 0x6801 },
1989		{ 0x06, 0xe4f8 },
1990		{ 0x06, 0x8ae5 },
1991		{ 0x06, 0xf88b },
1992		{ 0x06, 0xae37 },
1993		{ 0x06, 0xee83 },
1994		{ 0x06, 0x4e03 },
1995		{ 0x06, 0xe083 },
1996		{ 0x06, 0x4ce1 },
1997		{ 0x06, 0x834d },
1998		{ 0x06, 0x1b01 },
1999		{ 0x06, 0x9e04 },
2000		{ 0x06, 0xaaa1 },
2001		{ 0x06, 0xaea8 },
2002		{ 0x06, 0xee83 },
2003		{ 0x06, 0x4e04 },
2004		{ 0x06, 0xee83 },
2005		{ 0x06, 0x4f00 },
2006		{ 0x06, 0xaeab },
2007		{ 0x06, 0xe083 },
2008		{ 0x06, 0x4f78 },
2009		{ 0x06, 0x039f },
2010		{ 0x06, 0x14ee },
2011		{ 0x06, 0x834e },
2012		{ 0x06, 0x05d2 },
2013		{ 0x06, 0x40d6 },
2014		{ 0x06, 0x5554 },
2015		{ 0x06, 0x0282 },
2016		{ 0x06, 0x17d2 },
2017		{ 0x06, 0xa0d6 },
2018		{ 0x06, 0xba00 },
2019		{ 0x06, 0x0282 },
2020		{ 0x06, 0x17fe },
2021		{ 0x06, 0xfdfc },
2022		{ 0x06, 0x05f8 },
2023		{ 0x06, 0xe0f8 },
2024		{ 0x06, 0x60e1 },
2025		{ 0x06, 0xf861 },
2026		{ 0x06, 0x6802 },
2027		{ 0x06, 0xe4f8 },
2028		{ 0x06, 0x60e5 },
2029		{ 0x06, 0xf861 },
2030		{ 0x06, 0xe0f8 },
2031		{ 0x06, 0x48e1 },
2032		{ 0x06, 0xf849 },
2033		{ 0x06, 0x580f },
2034		{ 0x06, 0x1e02 },
2035		{ 0x06, 0xe4f8 },
2036		{ 0x06, 0x48e5 },
2037		{ 0x06, 0xf849 },
2038		{ 0x06, 0xd000 },
2039		{ 0x06, 0x0282 },
2040		{ 0x06, 0x5bbf },
2041		{ 0x06, 0x8350 },
2042		{ 0x06, 0xef46 },
2043		{ 0x06, 0xdc19 },
2044		{ 0x06, 0xddd0 },
2045		{ 0x06, 0x0102 },
2046		{ 0x06, 0x825b },
2047		{ 0x06, 0x0282 },
2048		{ 0x06, 0x77e0 },
2049		{ 0x06, 0xf860 },
2050		{ 0x06, 0xe1f8 },
2051		{ 0x06, 0x6158 },
2052		{ 0x06, 0xfde4 },
2053		{ 0x06, 0xf860 },
2054		{ 0x06, 0xe5f8 },
2055		{ 0x06, 0x61fc },
2056		{ 0x06, 0x04f9 },
2057		{ 0x06, 0xfafb },
2058		{ 0x06, 0xc6bf },
2059		{ 0x06, 0xf840 },
2060		{ 0x06, 0xbe83 },
2061		{ 0x06, 0x50a0 },
2062		{ 0x06, 0x0101 },
2063		{ 0x06, 0x071b },
2064		{ 0x06, 0x89cf },
2065		{ 0x06, 0xd208 },
2066		{ 0x06, 0xebdb },
2067		{ 0x06, 0x19b2 },
2068		{ 0x06, 0xfbff },
2069		{ 0x06, 0xfefd },
2070		{ 0x06, 0x04f8 },
2071		{ 0x06, 0xe0f8 },
2072		{ 0x06, 0x48e1 },
2073		{ 0x06, 0xf849 },
2074		{ 0x06, 0x6808 },
2075		{ 0x06, 0xe4f8 },
2076		{ 0x06, 0x48e5 },
2077		{ 0x06, 0xf849 },
2078		{ 0x06, 0x58f7 },
2079		{ 0x06, 0xe4f8 },
2080		{ 0x06, 0x48e5 },
2081		{ 0x06, 0xf849 },
2082		{ 0x06, 0xfc04 },
2083		{ 0x06, 0x4d20 },
2084		{ 0x06, 0x0002 },
2085		{ 0x06, 0x4e22 },
2086		{ 0x06, 0x0002 },
2087		{ 0x06, 0x4ddf },
2088		{ 0x06, 0xff01 },
2089		{ 0x06, 0x4edd },
2090		{ 0x06, 0xff01 },
2091		{ 0x05, 0x83d4 },
2092		{ 0x06, 0x8000 },
2093		{ 0x05, 0x83d8 },
2094		{ 0x06, 0x8051 },
2095		{ 0x02, 0x6010 },
2096		{ 0x03, 0xdc00 },
2097		{ 0x05, 0xfff6 },
2098		{ 0x06, 0x00fc },
2099		{ 0x1f, 0x0000 },
2100
2101		{ 0x1f, 0x0000 },
2102		{ 0x0d, 0xf880 },
2103		{ 0x1f, 0x0000 }
2104	};
2105
2106	rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2107
2108	mdio_write(ioaddr, 0x1f, 0x0002);
2109	mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef);
2110	mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00);
2111
2112	rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
2113
2114	if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2115		static const struct phy_reg phy_reg_init[] = {
2116			{ 0x1f, 0x0002 },
2117			{ 0x05, 0x669a },
2118			{ 0x1f, 0x0005 },
2119			{ 0x05, 0x8330 },
2120			{ 0x06, 0x669a },
2121			{ 0x1f, 0x0002 }
2122		};
2123		int val;
2124
2125		rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2126
2127		val = mdio_read(ioaddr, 0x0d);
2128
2129		if ((val & 0x00ff) != 0x006c) {
2130			static const u32 set[] = {
2131				0x0065, 0x0066, 0x0067, 0x0068,
2132				0x0069, 0x006a, 0x006b, 0x006c
2133			};
2134			int i;
2135
2136			mdio_write(ioaddr, 0x1f, 0x0002);
2137
2138			val &= 0xff00;
2139			for (i = 0; i < ARRAY_SIZE(set); i++)
2140				mdio_write(ioaddr, 0x0d, val | set[i]);
2141		}
2142	} else {
2143		static const struct phy_reg phy_reg_init[] = {
2144			{ 0x1f, 0x0002 },
2145			{ 0x05, 0x6662 },
2146			{ 0x1f, 0x0005 },
2147			{ 0x05, 0x8330 },
2148			{ 0x06, 0x6662 }
2149		};
2150
2151		rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2152	}
2153
2154	mdio_write(ioaddr, 0x1f, 0x0002);
2155	mdio_patch(ioaddr, 0x0d, 0x0300);
2156	mdio_patch(ioaddr, 0x0f, 0x0010);
2157
2158	mdio_write(ioaddr, 0x1f, 0x0002);
2159	mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
2160	mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
2161
2162	rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2));
2163}
2164
2165static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2166{
2167	static const struct phy_reg phy_reg_init_0[] = {
2168		{ 0x1f, 0x0001 },
2169		{ 0x06, 0x4064 },
2170		{ 0x07, 0x2863 },
2171		{ 0x08, 0x059c },
2172		{ 0x09, 0x26b4 },
2173		{ 0x0a, 0x6a19 },
2174		{ 0x0b, 0xdcc8 },
2175		{ 0x10, 0xf06d },
2176		{ 0x14, 0x7f68 },
2177		{ 0x18, 0x7fd9 },
2178		{ 0x1c, 0xf0ff },
2179		{ 0x1d, 0x3d9c },
2180		{ 0x1f, 0x0003 },
2181		{ 0x12, 0xf49f },
2182		{ 0x13, 0x070b },
2183		{ 0x1a, 0x05ad },
2184		{ 0x14, 0x94c0 },
2185
2186		{ 0x1f, 0x0002 },
2187		{ 0x06, 0x5561 },
2188		{ 0x1f, 0x0005 },
2189		{ 0x05, 0x8332 },
2190		{ 0x06, 0x5561 }
2191	};
2192	static const struct phy_reg phy_reg_init_1[] = {
2193		{ 0x1f, 0x0005 },
2194		{ 0x05, 0xffc2 },
2195		{ 0x1f, 0x0005 },
2196		{ 0x05, 0x8000 },
2197		{ 0x06, 0xf8f9 },
2198		{ 0x06, 0xfaee },
2199		{ 0x06, 0xf8ea },
2200		{ 0x06, 0x00ee },
2201		{ 0x06, 0xf8eb },
2202		{ 0x06, 0x00e2 },
2203		{ 0x06, 0xf87c },
2204		{ 0x06, 0xe3f8 },
2205		{ 0x06, 0x7da5 },
2206		{ 0x06, 0x1111 },
2207		{ 0x06, 0x12d2 },
2208		{ 0x06, 0x40d6 },
2209		{ 0x06, 0x4444 },
2210		{ 0x06, 0x0281 },
2211		{ 0x06, 0xc6d2 },
2212		{ 0x06, 0xa0d6 },
2213		{ 0x06, 0xaaaa },
2214		{ 0x06, 0x0281 },
2215		{ 0x06, 0xc6ae },
2216		{ 0x06, 0x0fa5 },
2217		{ 0x06, 0x4444 },
2218		{ 0x06, 0x02ae },
2219		{ 0x06, 0x4da5 },
2220		{ 0x06, 0xaaaa },
2221		{ 0x06, 0x02ae },
2222		{ 0x06, 0x47af },
2223		{ 0x06, 0x81c2 },
2224		{ 0x06, 0xee83 },
2225		{ 0x06, 0x4e00 },
2226		{ 0x06, 0xee83 },
2227		{ 0x06, 0x4d0f },
2228		{ 0x06, 0xee83 },
2229		{ 0x06, 0x4c0f },
2230		{ 0x06, 0xee83 },
2231		{ 0x06, 0x4f00 },
2232		{ 0x06, 0xee83 },
2233		{ 0x06, 0x5100 },
2234		{ 0x06, 0xee83 },
2235		{ 0x06, 0x4aff },
2236		{ 0x06, 0xee83 },
2237		{ 0x06, 0x4bff },
2238		{ 0x06, 0xe083 },
2239		{ 0x06, 0x30e1 },
2240		{ 0x06, 0x8331 },
2241		{ 0x06, 0x58fe },
2242		{ 0x06, 0xe4f8 },
2243		{ 0x06, 0x8ae5 },
2244		{ 0x06, 0xf88b },
2245		{ 0x06, 0xe083 },
2246		{ 0x06, 0x32e1 },
2247		{ 0x06, 0x8333 },
2248		{ 0x06, 0x590f },
2249		{ 0x06, 0xe283 },
2250		{ 0x06, 0x4d0c },
2251		{ 0x06, 0x245a },
2252		{ 0x06, 0xf01e },
2253		{ 0x06, 0x12e4 },
2254		{ 0x06, 0xf88c },
2255		{ 0x06, 0xe5f8 },
2256		{ 0x06, 0x8daf },
2257		{ 0x06, 0x81c2 },
2258		{ 0x06, 0xe083 },
2259		{ 0x06, 0x4f10 },
2260		{ 0x06, 0xe483 },
2261		{ 0x06, 0x4fe0 },
2262		{ 0x06, 0x834e },
2263		{ 0x06, 0x7800 },
2264		{ 0x06, 0x9f0a },
2265		{ 0x06, 0xe083 },
2266		{ 0x06, 0x4fa0 },
2267		{ 0x06, 0x10a5 },
2268		{ 0x06, 0xee83 },
2269		{ 0x06, 0x4e01 },
2270		{ 0x06, 0xe083 },
2271		{ 0x06, 0x4e78 },
2272		{ 0x06, 0x059e },
2273		{ 0x06, 0x9ae0 },
2274		{ 0x06, 0x834e },
2275		{ 0x06, 0x7804 },
2276		{ 0x06, 0x9e10 },
2277		{ 0x06, 0xe083 },
2278		{ 0x06, 0x4e78 },
2279		{ 0x06, 0x039e },
2280		{ 0x06, 0x0fe0 },
2281		{ 0x06, 0x834e },
2282		{ 0x06, 0x7801 },
2283		{ 0x06, 0x9e05 },
2284		{ 0x06, 0xae0c },
2285		{ 0x06, 0xaf81 },
2286		{ 0x06, 0xa7af },
2287		{ 0x06, 0x8152 },
2288		{ 0x06, 0xaf81 },
2289		{ 0x06, 0x8baf },
2290		{ 0x06, 0x81c2 },
2291		{ 0x06, 0xee83 },
2292		{ 0x06, 0x4800 },
2293		{ 0x06, 0xee83 },
2294		{ 0x06, 0x4900 },
2295		{ 0x06, 0xe083 },
2296		{ 0x06, 0x5110 },
2297		{ 0x06, 0xe483 },
2298		{ 0x06, 0x5158 },
2299		{ 0x06, 0x019f },
2300		{ 0x06, 0xead0 },
2301		{ 0x06, 0x00d1 },
2302		{ 0x06, 0x801f },
2303		{ 0x06, 0x66e2 },
2304		{ 0x06, 0xf8ea },
2305		{ 0x06, 0xe3f8 },
2306		{ 0x06, 0xeb5a },
2307		{ 0x06, 0xf81e },
2308		{ 0x06, 0x20e6 },
2309		{ 0x06, 0xf8ea },
2310		{ 0x06, 0xe5f8 },
2311		{ 0x06, 0xebd3 },
2312		{ 0x06, 0x02b3 },
2313		{ 0x06, 0xfee2 },
2314		{ 0x06, 0xf87c },
2315		{ 0x06, 0xef32 },
2316		{ 0x06, 0x5b80 },
2317		{ 0x06, 0xe3f8 },
2318		{ 0x06, 0x7d9e },
2319		{ 0x06, 0x037d },
2320		{ 0x06, 0xffff },
2321		{ 0x06, 0x0d58 },
2322		{ 0x06, 0x1c55 },
2323		{ 0x06, 0x1a65 },
2324		{ 0x06, 0x11a1 },
2325		{ 0x06, 0x90d3 },
2326		{ 0x06, 0xe283 },
2327		{ 0x06, 0x48e3 },
2328		{ 0x06, 0x8349 },
2329		{ 0x06, 0x1b56 },
2330		{ 0x06, 0xab08 },
2331		{ 0x06, 0xef56 },
2332		{ 0x06, 0xe683 },
2333		{ 0x06, 0x48e7 },
2334		{ 0x06, 0x8349 },
2335		{ 0x06, 0x10d1 },
2336		{ 0x06, 0x801f },
2337		{ 0x06, 0x66a0 },
2338		{ 0x06, 0x04b9 },
2339		{ 0x06, 0xe283 },
2340		{ 0x06, 0x48e3 },
2341		{ 0x06, 0x8349 },
2342		{ 0x06, 0xef65 },
2343		{ 0x06, 0xe283 },
2344		{ 0x06, 0x4ae3 },
2345		{ 0x06, 0x834b },
2346		{ 0x06, 0x1b56 },
2347		{ 0x06, 0xaa0e },
2348		{ 0x06, 0xef56 },
2349		{ 0x06, 0xe683 },
2350		{ 0x06, 0x4ae7 },
2351		{ 0x06, 0x834b },
2352		{ 0x06, 0xe283 },
2353		{ 0x06, 0x4de6 },
2354		{ 0x06, 0x834c },
2355		{ 0x06, 0xe083 },
2356		{ 0x06, 0x4da0 },
2357		{ 0x06, 0x000c },
2358		{ 0x06, 0xaf81 },
2359		{ 0x06, 0x8be0 },
2360		{ 0x06, 0x834d },
2361		{ 0x06, 0x10e4 },
2362		{ 0x06, 0x834d },
2363		{ 0x06, 0xae04 },
2364		{ 0x06, 0x80e4 },
2365		{ 0x06, 0x834d },
2366		{ 0x06, 0xe083 },
2367		{ 0x06, 0x4e78 },
2368		{ 0x06, 0x039e },
2369		{ 0x06, 0x0be0 },
2370		{ 0x06, 0x834e },
2371		{ 0x06, 0x7804 },
2372		{ 0x06, 0x9e04 },
2373		{ 0x06, 0xee83 },
2374		{ 0x06, 0x4e02 },
2375		{ 0x06, 0xe083 },
2376		{ 0x06, 0x32e1 },
2377		{ 0x06, 0x8333 },
2378		{ 0x06, 0x590f },
2379		{ 0x06, 0xe283 },
2380		{ 0x06, 0x4d0c },
2381		{ 0x06, 0x245a },
2382		{ 0x06, 0xf01e },
2383		{ 0x06, 0x12e4 },
2384		{ 0x06, 0xf88c },
2385		{ 0x06, 0xe5f8 },
2386		{ 0x06, 0x8de0 },
2387		{ 0x06, 0x8330 },
2388		{ 0x06, 0xe183 },
2389		{ 0x06, 0x3168 },
2390		{ 0x06, 0x01e4 },
2391		{ 0x06, 0xf88a },
2392		{ 0x06, 0xe5f8 },
2393		{ 0x06, 0x8bae },
2394		{ 0x06, 0x37ee },
2395		{ 0x06, 0x834e },
2396		{ 0x06, 0x03e0 },
2397		{ 0x06, 0x834c },
2398		{ 0x06, 0xe183 },
2399		{ 0x06, 0x4d1b },
2400		{ 0x06, 0x019e },
2401		{ 0x06, 0x04aa },
2402		{ 0x06, 0xa1ae },
2403		{ 0x06, 0xa8ee },
2404		{ 0x06, 0x834e },
2405		{ 0x06, 0x04ee },
2406		{ 0x06, 0x834f },
2407		{ 0x06, 0x00ae },
2408		{ 0x06, 0xabe0 },
2409		{ 0x06, 0x834f },
2410		{ 0x06, 0x7803 },
2411		{ 0x06, 0x9f14 },
2412		{ 0x06, 0xee83 },
2413		{ 0x06, 0x4e05 },
2414		{ 0x06, 0xd240 },
2415		{ 0x06, 0xd655 },
2416		{ 0x06, 0x5402 },
2417		{ 0x06, 0x81c6 },
2418		{ 0x06, 0xd2a0 },
2419		{ 0x06, 0xd6ba },
2420		{ 0x06, 0x0002 },
2421		{ 0x06, 0x81c6 },
2422		{ 0x06, 0xfefd },
2423		{ 0x06, 0xfc05 },
2424		{ 0x06, 0xf8e0 },
2425		{ 0x06, 0xf860 },
2426		{ 0x06, 0xe1f8 },
2427		{ 0x06, 0x6168 },
2428		{ 0x06, 0x02e4 },
2429		{ 0x06, 0xf860 },
2430		{ 0x06, 0xe5f8 },
2431		{ 0x06, 0x61e0 },
2432		{ 0x06, 0xf848 },
2433		{ 0x06, 0xe1f8 },
2434		{ 0x06, 0x4958 },
2435		{ 0x06, 0x0f1e },
2436		{ 0x06, 0x02e4 },
2437		{ 0x06, 0xf848 },
2438		{ 0x06, 0xe5f8 },
2439		{ 0x06, 0x49d0 },
2440		{ 0x06, 0x0002 },
2441		{ 0x06, 0x820a },
2442		{ 0x06, 0xbf83 },
2443		{ 0x06, 0x50ef },
2444		{ 0x06, 0x46dc },
2445		{ 0x06, 0x19dd },
2446		{ 0x06, 0xd001 },
2447		{ 0x06, 0x0282 },
2448		{ 0x06, 0x0a02 },
2449		{ 0x06, 0x8226 },
2450		{ 0x06, 0xe0f8 },
2451		{ 0x06, 0x60e1 },
2452		{ 0x06, 0xf861 },
2453		{ 0x06, 0x58fd },
2454		{ 0x06, 0xe4f8 },
2455		{ 0x06, 0x60e5 },
2456		{ 0x06, 0xf861 },
2457		{ 0x06, 0xfc04 },
2458		{ 0x06, 0xf9fa },
2459		{ 0x06, 0xfbc6 },
2460		{ 0x06, 0xbff8 },
2461		{ 0x06, 0x40be },
2462		{ 0x06, 0x8350 },
2463		{ 0x06, 0xa001 },
2464		{ 0x06, 0x0107 },
2465		{ 0x06, 0x1b89 },
2466		{ 0x06, 0xcfd2 },
2467		{ 0x06, 0x08eb },
2468		{ 0x06, 0xdb19 },
2469		{ 0x06, 0xb2fb },
2470		{ 0x06, 0xfffe },
2471		{ 0x06, 0xfd04 },
2472		{ 0x06, 0xf8e0 },
2473		{ 0x06, 0xf848 },
2474		{ 0x06, 0xe1f8 },
2475		{ 0x06, 0x4968 },
2476		{ 0x06, 0x08e4 },
2477		{ 0x06, 0xf848 },
2478		{ 0x06, 0xe5f8 },
2479		{ 0x06, 0x4958 },
2480		{ 0x06, 0xf7e4 },
2481		{ 0x06, 0xf848 },
2482		{ 0x06, 0xe5f8 },
2483		{ 0x06, 0x49fc },
2484		{ 0x06, 0x044d },
2485		{ 0x06, 0x2000 },
2486		{ 0x06, 0x024e },
2487		{ 0x06, 0x2200 },
2488		{ 0x06, 0x024d },
2489		{ 0x06, 0xdfff },
2490		{ 0x06, 0x014e },
2491		{ 0x06, 0xddff },
2492		{ 0x06, 0x0100 },
2493		{ 0x05, 0x83d8 },
2494		{ 0x06, 0x8000 },
2495		{ 0x03, 0xdc00 },
2496		{ 0x05, 0xfff6 },
2497		{ 0x06, 0x00fc },
2498		{ 0x1f, 0x0000 },
2499
2500		{ 0x1f, 0x0000 },
2501		{ 0x0d, 0xf880 },
2502		{ 0x1f, 0x0000 }
2503	};
2504
2505	rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2506
2507	if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2508		static const struct phy_reg phy_reg_init[] = {
2509			{ 0x1f, 0x0002 },
2510			{ 0x05, 0x669a },
2511			{ 0x1f, 0x0005 },
2512			{ 0x05, 0x8330 },
2513			{ 0x06, 0x669a },
2514
2515			{ 0x1f, 0x0002 }
2516		};
2517		int val;
2518
2519		rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2520
2521		val = mdio_read(ioaddr, 0x0d);
2522		if ((val & 0x00ff) != 0x006c) {
2523			u32 set[] = {
2524				0x0065, 0x0066, 0x0067, 0x0068,
2525				0x0069, 0x006a, 0x006b, 0x006c
2526			};
2527			int i;
2528
2529			mdio_write(ioaddr, 0x1f, 0x0002);
2530
2531			val &= 0xff00;
2532			for (i = 0; i < ARRAY_SIZE(set); i++)
2533				mdio_write(ioaddr, 0x0d, val | set[i]);
2534		}
2535	} else {
2536		static const struct phy_reg phy_reg_init[] = {
2537			{ 0x1f, 0x0002 },
2538			{ 0x05, 0x2642 },
2539			{ 0x1f, 0x0005 },
2540			{ 0x05, 0x8330 },
2541			{ 0x06, 0x2642 }
2542		};
2543
2544		rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2545	}
2546
2547	mdio_write(ioaddr, 0x1f, 0x0002);
2548	mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
2549	mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
2550
2551	mdio_write(ioaddr, 0x1f, 0x0001);
2552	mdio_write(ioaddr, 0x17, 0x0cc0);
2553
2554	mdio_write(ioaddr, 0x1f, 0x0002);
2555	mdio_patch(ioaddr, 0x0f, 0x0017);
2556
2557	rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
2558}
2559
2560static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
2561{
2562	static const struct phy_reg phy_reg_init[] = {
2563		{ 0x1f, 0x0002 },
2564		{ 0x10, 0x0008 },
2565		{ 0x0d, 0x006c },
2566
2567		{ 0x1f, 0x0000 },
2568		{ 0x0d, 0xf880 },
2569
2570		{ 0x1f, 0x0001 },
2571		{ 0x17, 0x0cc0 },
2572
2573		{ 0x1f, 0x0001 },
2574		{ 0x0b, 0xa4d8 },
2575		{ 0x09, 0x281c },
2576		{ 0x07, 0x2883 },
2577		{ 0x0a, 0x6b35 },
2578		{ 0x1d, 0x3da4 },
2579		{ 0x1c, 0xeffd },
2580		{ 0x14, 0x7f52 },
2581		{ 0x18, 0x7fc6 },
2582		{ 0x08, 0x0601 },
2583		{ 0x06, 0x4063 },
2584		{ 0x10, 0xf074 },
2585		{ 0x1f, 0x0003 },
2586		{ 0x13, 0x0789 },
2587		{ 0x12, 0xf4bd },
2588		{ 0x1a, 0x04fd },
2589		{ 0x14, 0x84b0 },
2590		{ 0x1f, 0x0000 },
2591		{ 0x00, 0x9200 },
2592
2593		{ 0x1f, 0x0005 },
2594		{ 0x01, 0x0340 },
2595		{ 0x1f, 0x0001 },
2596		{ 0x04, 0x4000 },
2597		{ 0x03, 0x1d21 },
2598		{ 0x02, 0x0c32 },
2599		{ 0x01, 0x0200 },
2600		{ 0x00, 0x5554 },
2601		{ 0x04, 0x4800 },
2602		{ 0x04, 0x4000 },
2603		{ 0x04, 0xf000 },
2604		{ 0x03, 0xdf01 },
2605		{ 0x02, 0xdf20 },
2606		{ 0x01, 0x101a },
2607		{ 0x00, 0xa0ff },
2608		{ 0x04, 0xf800 },
2609		{ 0x04, 0xf000 },
2610		{ 0x1f, 0x0000 },
2611
2612		{ 0x1f, 0x0007 },
2613		{ 0x1e, 0x0023 },
2614		{ 0x16, 0x0000 },
2615		{ 0x1f, 0x0000 }
2616	};
2617
2618	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2619}
2620
2621static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
2622{
2623	static const struct phy_reg phy_reg_init[] = {
2624		{ 0x1f, 0x0003 },
2625		{ 0x08, 0x441d },
2626		{ 0x01, 0x9100 },
2627		{ 0x1f, 0x0000 }
2628	};
2629
2630	mdio_write(ioaddr, 0x1f, 0x0000);
2631	mdio_patch(ioaddr, 0x11, 1 << 12);
2632	mdio_patch(ioaddr, 0x19, 1 << 13);
2633	mdio_patch(ioaddr, 0x10, 1 << 15);
2634
2635	rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2636}
2637
2638static void rtl_hw_phy_config(struct net_device *dev)
2639{
2640	struct rtl8169_private *tp = netdev_priv(dev);
2641	void __iomem *ioaddr = tp->mmio_addr;
2642
2643	rtl8169_print_mac_version(tp);
2644
2645	switch (tp->mac_version) {
2646	case RTL_GIGA_MAC_VER_01:
2647		break;
2648	case RTL_GIGA_MAC_VER_02:
2649	case RTL_GIGA_MAC_VER_03:
2650		rtl8169s_hw_phy_config(ioaddr);
2651		break;
2652	case RTL_GIGA_MAC_VER_04:
2653		rtl8169sb_hw_phy_config(ioaddr);
2654		break;
2655	case RTL_GIGA_MAC_VER_05:
2656		rtl8169scd_hw_phy_config(tp, ioaddr);
2657		break;
2658	case RTL_GIGA_MAC_VER_06:
2659		rtl8169sce_hw_phy_config(ioaddr);
2660		break;
2661	case RTL_GIGA_MAC_VER_07:
2662	case RTL_GIGA_MAC_VER_08:
2663	case RTL_GIGA_MAC_VER_09:
2664		rtl8102e_hw_phy_config(ioaddr);
2665		break;
2666	case RTL_GIGA_MAC_VER_11:
2667		rtl8168bb_hw_phy_config(ioaddr);
2668		break;
2669	case RTL_GIGA_MAC_VER_12:
2670		rtl8168bef_hw_phy_config(ioaddr);
2671		break;
2672	case RTL_GIGA_MAC_VER_17:
2673		rtl8168bef_hw_phy_config(ioaddr);
2674		break;
2675	case RTL_GIGA_MAC_VER_18:
2676		rtl8168cp_1_hw_phy_config(ioaddr);
2677		break;
2678	case RTL_GIGA_MAC_VER_19:
2679		rtl8168c_1_hw_phy_config(ioaddr);
2680		break;
2681	case RTL_GIGA_MAC_VER_20:
2682		rtl8168c_2_hw_phy_config(ioaddr);
2683		break;
2684	case RTL_GIGA_MAC_VER_21:
2685		rtl8168c_3_hw_phy_config(ioaddr);
2686		break;
2687	case RTL_GIGA_MAC_VER_22:
2688		rtl8168c_4_hw_phy_config(ioaddr);
2689		break;
2690	case RTL_GIGA_MAC_VER_23:
2691	case RTL_GIGA_MAC_VER_24:
2692		rtl8168cp_2_hw_phy_config(ioaddr);
2693		break;
2694	case RTL_GIGA_MAC_VER_25:
2695		rtl8168d_1_hw_phy_config(ioaddr);
2696		break;
2697	case RTL_GIGA_MAC_VER_26:
2698		rtl8168d_2_hw_phy_config(ioaddr);
2699		break;
2700	case RTL_GIGA_MAC_VER_27:
2701		rtl8168d_3_hw_phy_config(ioaddr);
2702		break;
2703
2704	default:
2705		break;
2706	}
2707}
2708
2709static void rtl8169_phy_timer(unsigned long __opaque)
2710{
2711	struct net_device *dev = (struct net_device *)__opaque;
2712	struct rtl8169_private *tp = netdev_priv(dev);
2713	struct timer_list *timer = &tp->timer;
2714	void __iomem *ioaddr = tp->mmio_addr;
2715	unsigned long timeout = RTL8169_PHY_TIMEOUT;
2716
2717	assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
2718
2719	if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
2720		return;
2721
2722	spin_lock_irq(&tp->lock);
2723
2724	if (tp->phy_reset_pending(ioaddr)) {
2725		/*
2726		 * A busy loop could burn quite a few cycles on nowadays CPU.
2727		 * Let's delay the execution of the timer for a few ticks.
2728		 */
2729		timeout = HZ/10;
2730		goto out_mod_timer;
2731	}
2732
2733	if (tp->link_ok(ioaddr))
2734		goto out_unlock;
2735
2736	netif_warn(tp, link, dev, "PHY reset until link up\n");
2737
2738	tp->phy_reset_enable(ioaddr);
2739
2740out_mod_timer:
2741	mod_timer(timer, jiffies + timeout);
2742out_unlock:
2743	spin_unlock_irq(&tp->lock);
2744}
2745
2746static inline void rtl8169_delete_timer(struct net_device *dev)
2747{
2748	struct rtl8169_private *tp = netdev_priv(dev);
2749	struct timer_list *timer = &tp->timer;
2750
2751	if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
2752		return;
2753
2754	del_timer_sync(timer);
2755}
2756
2757static inline void rtl8169_request_timer(struct net_device *dev)
2758{
2759	struct rtl8169_private *tp = netdev_priv(dev);
2760	struct timer_list *timer = &tp->timer;
2761
2762	if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
2763		return;
2764
2765	mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
2766}
2767
2768#ifdef CONFIG_NET_POLL_CONTROLLER
2769/*
2770 * Polling 'interrupt' - used by things like netconsole to send skbs
2771 * without having to re-enable interrupts. It's not called while
2772 * the interrupt routine is executing.
2773 */
2774static void rtl8169_netpoll(struct net_device *dev)
2775{
2776	struct rtl8169_private *tp = netdev_priv(dev);
2777	struct pci_dev *pdev = tp->pci_dev;
2778
2779	disable_irq(pdev->irq);
2780	rtl8169_interrupt(pdev->irq, dev);
2781	enable_irq(pdev->irq);
2782}
2783#endif
2784
2785static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
2786				  void __iomem *ioaddr)
2787{
2788	iounmap(ioaddr);
2789	pci_release_regions(pdev);
2790	pci_clear_mwi(pdev);
2791	pci_disable_device(pdev);
2792	free_netdev(dev);
2793}
2794
2795static void rtl8169_phy_reset(struct net_device *dev,
2796			      struct rtl8169_private *tp)
2797{
2798	void __iomem *ioaddr = tp->mmio_addr;
2799	unsigned int i;
2800
2801	tp->phy_reset_enable(ioaddr);
2802	for (i = 0; i < 100; i++) {
2803		if (!tp->phy_reset_pending(ioaddr))
2804			return;
2805		msleep(1);
2806	}
2807	netif_err(tp, link, dev, "PHY reset failed\n");
2808}
2809
2810static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2811{
2812	void __iomem *ioaddr = tp->mmio_addr;
2813
2814	rtl_hw_phy_config(dev);
2815
2816	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
2817		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
2818		RTL_W8(0x82, 0x01);
2819	}
2820
2821	pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
2822
2823	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
2824		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
2825
2826	if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
2827		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
2828		RTL_W8(0x82, 0x01);
2829		dprintk("Set PHY Reg 0x0bh = 0x00h\n");
2830		mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
2831	}
2832
2833	rtl8169_phy_reset(dev, tp);
2834
2835	/*
2836	 * rtl8169_set_speed_xmii takes good care of the Fast Ethernet
2837	 * only 8101. Don't panic.
2838	 */
2839	rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
2840
2841	if (RTL_R8(PHYstatus) & TBI_Enable)
2842		netif_info(tp, link, dev, "TBI auto-negotiating\n");
2843}
2844
2845static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2846{
2847	void __iomem *ioaddr = tp->mmio_addr;
2848	u32 high;
2849	u32 low;
2850
2851	low  = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
2852	high = addr[4] | (addr[5] << 8);
2853
2854	spin_lock_irq(&tp->lock);
2855
2856	RTL_W8(Cfg9346, Cfg9346_Unlock);
2857
2858	RTL_W32(MAC4, high);
2859	RTL_R32(MAC4);
2860
2861	RTL_W32(MAC0, low);
2862	RTL_R32(MAC0);
2863
2864	RTL_W8(Cfg9346, Cfg9346_Lock);
2865
2866	spin_unlock_irq(&tp->lock);
2867}
2868
2869static int rtl_set_mac_address(struct net_device *dev, void *p)
2870{
2871	struct rtl8169_private *tp = netdev_priv(dev);
2872	struct sockaddr *addr = p;
2873
2874	if (!is_valid_ether_addr(addr->sa_data))
2875		return -EADDRNOTAVAIL;
2876
2877	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2878
2879	rtl_rar_set(tp, dev->dev_addr);
2880
2881	return 0;
2882}
2883
2884static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2885{
2886	struct rtl8169_private *tp = netdev_priv(dev);
2887	struct mii_ioctl_data *data = if_mii(ifr);
2888
2889	return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
2890}
2891
2892static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
2893{
2894	switch (cmd) {
2895	case SIOCGMIIPHY:
2896		data->phy_id = 32; /* Internal PHY */
2897		return 0;
2898
2899	case SIOCGMIIREG:
2900		data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
2901		return 0;
2902
2903	case SIOCSMIIREG:
2904		mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
2905		return 0;
2906	}
2907	return -EOPNOTSUPP;
2908}
2909
2910static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
2911{
2912	return -EOPNOTSUPP;
2913}
2914
2915static const struct rtl_cfg_info {
2916	void (*hw_start)(struct net_device *);
2917	unsigned int region;
2918	unsigned int align;
2919	u16 intr_event;
2920	u16 napi_event;
2921	unsigned features;
2922	u8 default_ver;
2923} rtl_cfg_infos [] = {
2924	[RTL_CFG_0] = {
2925		.hw_start	= rtl_hw_start_8169,
2926		.region		= 1,
2927		.align		= 0,
2928		.intr_event	= SYSErr | LinkChg | RxOverflow |
2929				  RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
2930		.napi_event	= RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
2931		.features	= RTL_FEATURE_GMII,
2932		.default_ver	= RTL_GIGA_MAC_VER_01,
2933	},
2934	[RTL_CFG_1] = {
2935		.hw_start	= rtl_hw_start_8168,
2936		.region		= 2,
2937		.align		= 8,
2938		.intr_event	= SYSErr | LinkChg | RxOverflow |
2939				  TxErr | TxOK | RxOK | RxErr,
2940		.napi_event	= TxErr | TxOK | RxOK | RxOverflow,
2941		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
2942		.default_ver	= RTL_GIGA_MAC_VER_11,
2943	},
2944	[RTL_CFG_2] = {
2945		.hw_start	= rtl_hw_start_8101,
2946		.region		= 2,
2947		.align		= 8,
2948		.intr_event	= SYSErr | LinkChg | RxOverflow | PCSTimeout |
2949				  RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
2950		.napi_event	= RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
2951		.features	= RTL_FEATURE_MSI,
2952		.default_ver	= RTL_GIGA_MAC_VER_13,
2953	}
2954};
2955
2956/* Cfg9346_Unlock assumed. */
2957static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
2958			    const struct rtl_cfg_info *cfg)
2959{
2960	unsigned msi = 0;
2961	u8 cfg2;
2962
2963	cfg2 = RTL_R8(Config2) & ~MSIEnable;
2964	if (cfg->features & RTL_FEATURE_MSI) {
2965		if (pci_enable_msi(pdev)) {
2966			dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
2967		} else {
2968			cfg2 |= MSIEnable;
2969			msi = RTL_FEATURE_MSI;
2970		}
2971	}
2972	RTL_W8(Config2, cfg2);
2973	return msi;
2974}
2975
2976static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
2977{
2978	if (tp->features & RTL_FEATURE_MSI) {
2979		pci_disable_msi(pdev);
2980		tp->features &= ~RTL_FEATURE_MSI;
2981	}
2982}
2983
2984static const struct net_device_ops rtl8169_netdev_ops = {
2985	.ndo_open		= rtl8169_open,
2986	.ndo_stop		= rtl8169_close,
2987	.ndo_get_stats		= rtl8169_get_stats,
2988	.ndo_start_xmit		= rtl8169_start_xmit,
2989	.ndo_tx_timeout		= rtl8169_tx_timeout,
2990	.ndo_validate_addr	= eth_validate_addr,
2991	.ndo_change_mtu		= rtl8169_change_mtu,
2992	.ndo_set_mac_address	= rtl_set_mac_address,
2993	.ndo_do_ioctl		= rtl8169_ioctl,
2994	.ndo_set_multicast_list	= rtl_set_rx_mode,
2995#ifdef CONFIG_R8169_VLAN
2996	.ndo_vlan_rx_register	= rtl8169_vlan_rx_register,
2997#endif
2998#ifdef CONFIG_NET_POLL_CONTROLLER
2999	.ndo_poll_controller	= rtl8169_netpoll,
3000#endif
3001
3002};
3003
3004static int __devinit
3005rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3006{
3007	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
3008	const unsigned int region = cfg->region;
3009	struct rtl8169_private *tp;
3010	struct mii_if_info *mii;
3011	struct net_device *dev;
3012	void __iomem *ioaddr;
3013	unsigned int i;
3014	int rc;
3015
3016	if (netif_msg_drv(&debug)) {
3017		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
3018		       MODULENAME, RTL8169_VERSION);
3019	}
3020
3021	dev = alloc_etherdev(sizeof (*tp));
3022	if (!dev) {
3023		if (netif_msg_drv(&debug))
3024			dev_err(&pdev->dev, "unable to alloc new ethernet\n");
3025		rc = -ENOMEM;
3026		goto out;
3027	}
3028
3029	SET_NETDEV_DEV(dev, &pdev->dev);
3030	dev->netdev_ops = &rtl8169_netdev_ops;
3031	tp = netdev_priv(dev);
3032	tp->dev = dev;
3033	tp->pci_dev = pdev;
3034	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
3035
3036	mii = &tp->mii;
3037	mii->dev = dev;
3038	mii->mdio_read = rtl_mdio_read;
3039	mii->mdio_write = rtl_mdio_write;
3040	mii->phy_id_mask = 0x1f;
3041	mii->reg_num_mask = 0x1f;
3042	mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
3043
3044	/* enable device (incl. PCI PM wakeup and hotplug setup) */
3045	rc = pci_enable_device(pdev);
3046	if (rc < 0) {
3047		netif_err(tp, probe, dev, "enable failure\n");
3048		goto err_out_free_dev_1;
3049	}
3050
3051	if (pci_set_mwi(pdev) < 0)
3052		netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
3053
3054	/* make sure PCI base addr 1 is MMIO */
3055	if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
3056		netif_err(tp, probe, dev,
3057			  "region #%d not an MMIO resource, aborting\n",
3058			  region);
3059		rc = -ENODEV;
3060		goto err_out_mwi_2;
3061	}
3062
3063	/* check for weird/broken PCI region reporting */
3064	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
3065		netif_err(tp, probe, dev,
3066			  "Invalid PCI region size(s), aborting\n");
3067		rc = -ENODEV;
3068		goto err_out_mwi_2;
3069	}
3070
3071	rc = pci_request_regions(pdev, MODULENAME);
3072	if (rc < 0) {
3073		netif_err(tp, probe, dev, "could not request regions\n");
3074		goto err_out_mwi_2;
3075	}
3076
3077	tp->cp_cmd = PCIMulRW | RxChkSum;
3078
3079	if ((sizeof(dma_addr_t) > 4) &&
3080	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
3081		tp->cp_cmd |= PCIDAC;
3082		dev->features |= NETIF_F_HIGHDMA;
3083	} else {
3084		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3085		if (rc < 0) {
3086			netif_err(tp, probe, dev, "DMA configuration failed\n");
3087			goto err_out_free_res_3;
3088		}
3089	}
3090
3091	/* ioremap MMIO region */
3092	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
3093	if (!ioaddr) {
3094		netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
3095		rc = -EIO;
3096		goto err_out_free_res_3;
3097	}
3098
3099	tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3100	if (!tp->pcie_cap)
3101		netif_info(tp, probe, dev, "no PCI Express capability\n");
3102
3103	RTL_W16(IntrMask, 0x0000);
3104
3105	/* Soft reset the chip. */
3106	RTL_W8(ChipCmd, CmdReset);
3107
3108	/* Check that the chip has finished the reset. */
3109	for (i = 0; i < 100; i++) {
3110		if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3111			break;
3112		msleep_interruptible(1);
3113	}
3114
3115	RTL_W16(IntrStatus, 0xffff);
3116
3117	pci_set_master(pdev);
3118
3119	/* Identify chip attached to board */
3120	rtl8169_get_mac_version(tp, ioaddr);
3121
3122	/* Use appropriate default if unknown */
3123	if (tp->mac_version == RTL_GIGA_MAC_NONE) {
3124		netif_notice(tp, probe, dev,
3125			     "unknown MAC, using family default\n");
3126		tp->mac_version = cfg->default_ver;
3127	}
3128
3129	rtl8169_print_mac_version(tp);
3130
3131	for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) {
3132		if (tp->mac_version == rtl_chip_info[i].mac_version)
3133			break;
3134	}
3135	if (i == ARRAY_SIZE(rtl_chip_info)) {
3136		dev_err(&pdev->dev,
3137			"driver bug, MAC version not found in rtl_chip_info\n");
3138		goto err_out_msi_4;
3139	}
3140	tp->chipset = i;
3141
3142	RTL_W8(Cfg9346, Cfg9346_Unlock);
3143	RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
3144	RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
3145	if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
3146		tp->features |= RTL_FEATURE_WOL;
3147	if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
3148		tp->features |= RTL_FEATURE_WOL;
3149	tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
3150	RTL_W8(Cfg9346, Cfg9346_Lock);
3151
3152	if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
3153	    (RTL_R8(PHYstatus) & TBI_Enable)) {
3154		tp->set_speed = rtl8169_set_speed_tbi;
3155		tp->get_settings = rtl8169_gset_tbi;
3156		tp->phy_reset_enable = rtl8169_tbi_reset_enable;
3157		tp->phy_reset_pending = rtl8169_tbi_reset_pending;
3158		tp->link_ok = rtl8169_tbi_link_ok;
3159		tp->do_ioctl = rtl_tbi_ioctl;
3160
3161		tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
3162	} else {
3163		tp->set_speed = rtl8169_set_speed_xmii;
3164		tp->get_settings = rtl8169_gset_xmii;
3165		tp->phy_reset_enable = rtl8169_xmii_reset_enable;
3166		tp->phy_reset_pending = rtl8169_xmii_reset_pending;
3167		tp->link_ok = rtl8169_xmii_link_ok;
3168		tp->do_ioctl = rtl_xmii_ioctl;
3169	}
3170
3171	spin_lock_init(&tp->lock);
3172
3173	tp->mmio_addr = ioaddr;
3174
3175	/* Get MAC address */
3176	for (i = 0; i < MAC_ADDR_LEN; i++)
3177		dev->dev_addr[i] = RTL_R8(MAC0 + i);
3178	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3179
3180	SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
3181	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
3182	dev->irq = pdev->irq;
3183	dev->base_addr = (unsigned long) ioaddr;
3184
3185	netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
3186
3187#ifdef CONFIG_R8169_VLAN
3188	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3189#endif
3190
3191	tp->intr_mask = 0xffff;
3192	tp->align = cfg->align;
3193	tp->hw_start = cfg->hw_start;
3194	tp->intr_event = cfg->intr_event;
3195	tp->napi_event = cfg->napi_event;
3196
3197	init_timer(&tp->timer);
3198	tp->timer.data = (unsigned long) dev;
3199	tp->timer.function = rtl8169_phy_timer;
3200
3201	rc = register_netdev(dev);
3202	if (rc < 0)
3203		goto err_out_msi_4;
3204
3205	pci_set_drvdata(pdev, dev);
3206
3207	netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
3208		   rtl_chip_info[tp->chipset].name,
3209		   dev->base_addr, dev->dev_addr,
3210		   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
3211
3212	rtl8169_init_phy(dev, tp);
3213
3214	/*
3215	 * Pretend we are using VLANs; This bypasses a nasty bug where
3216	 * Interrupts stop flowing on high load on 8110SCd controllers.
3217	 */
3218	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3219		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3220
3221	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3222
3223	if (pci_dev_run_wake(pdev))
3224		pm_runtime_put_noidle(&pdev->dev);
3225
3226out:
3227	return rc;
3228
3229err_out_msi_4:
3230	rtl_disable_msi(pdev, tp);
3231	iounmap(ioaddr);
3232err_out_free_res_3:
3233	pci_release_regions(pdev);
3234err_out_mwi_2:
3235	pci_clear_mwi(pdev);
3236	pci_disable_device(pdev);
3237err_out_free_dev_1:
3238	free_netdev(dev);
3239	goto out;
3240}
3241
3242static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3243{
3244	struct net_device *dev = pci_get_drvdata(pdev);
3245	struct rtl8169_private *tp = netdev_priv(dev);
3246
3247	flush_scheduled_work();
3248
3249	unregister_netdev(dev);
3250
3251	if (pci_dev_run_wake(pdev))
3252		pm_runtime_get_noresume(&pdev->dev);
3253
3254	/* restore original MAC address */
3255	rtl_rar_set(tp, dev->perm_addr);
3256
3257	rtl_disable_msi(pdev, tp);
3258	rtl8169_release_board(pdev, dev, tp->mmio_addr);
3259	pci_set_drvdata(pdev, NULL);
3260}
3261
3262static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
3263				  unsigned int mtu)
3264{
3265	unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
3266
3267	if (max_frame != 16383)
3268		printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
3269			"NIC may lead to frame reception errors!\n");
3270
3271	tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
3272}
3273
3274static int rtl8169_open(struct net_device *dev)
3275{
3276	struct rtl8169_private *tp = netdev_priv(dev);
3277	struct pci_dev *pdev = tp->pci_dev;
3278	int retval = -ENOMEM;
3279
3280	pm_runtime_get_sync(&pdev->dev);
3281
3282	/*
3283	 * Note that we use a magic value here, its wierd I know
3284	 * its done because, some subset of rtl8169 hardware suffers from
3285	 * a problem in which frames received that are longer than
3286	 * the size set in RxMaxSize register return garbage sizes
3287	 * when received.  To avoid this we need to turn off filtering,
3288	 * which is done by setting a value of 16383 in the RxMaxSize register
3289	 * and allocating 16k frames to handle the largest possible rx value
3290	 * thats what the magic math below does.
3291	 */
3292	rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
3293
3294	/*
3295	 * Rx and Tx desscriptors needs 256 bytes alignment.
3296	 * dma_alloc_coherent provides more.
3297	 */
3298	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
3299					     &tp->TxPhyAddr, GFP_KERNEL);
3300	if (!tp->TxDescArray)
3301		goto err_pm_runtime_put;
3302
3303	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
3304					     &tp->RxPhyAddr, GFP_KERNEL);
3305	if (!tp->RxDescArray)
3306		goto err_free_tx_0;
3307
3308	retval = rtl8169_init_ring(dev);
3309	if (retval < 0)
3310		goto err_free_rx_1;
3311
3312	INIT_DELAYED_WORK(&tp->task, NULL);
3313
3314	smp_mb();
3315
3316	retval = request_irq(dev->irq, rtl8169_interrupt,
3317			     (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
3318			     dev->name, dev);
3319	if (retval < 0)
3320		goto err_release_ring_2;
3321
3322	napi_enable(&tp->napi);
3323
3324	rtl_hw_start(dev);
3325
3326	rtl8169_request_timer(dev);
3327
3328	tp->saved_wolopts = 0;
3329	pm_runtime_put_noidle(&pdev->dev);
3330
3331	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
3332out:
3333	return retval;
3334
3335err_release_ring_2:
3336	rtl8169_rx_clear(tp);
3337err_free_rx_1:
3338	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
3339			  tp->RxPhyAddr);
3340	tp->RxDescArray = NULL;
3341err_free_tx_0:
3342	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
3343			  tp->TxPhyAddr);
3344	tp->TxDescArray = NULL;
3345err_pm_runtime_put:
3346	pm_runtime_put_noidle(&pdev->dev);
3347	goto out;
3348}
3349
3350static void rtl8169_hw_reset(void __iomem *ioaddr)
3351{
3352	/* Disable interrupts */
3353	rtl8169_irq_mask_and_ack(ioaddr);
3354
3355	/* Reset the chipset */
3356	RTL_W8(ChipCmd, CmdReset);
3357
3358	/* PCI commit */
3359	RTL_R8(ChipCmd);
3360}
3361
3362static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
3363{
3364	void __iomem *ioaddr = tp->mmio_addr;
3365	u32 cfg = rtl8169_rx_config;
3366
3367	cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
3368	RTL_W32(RxConfig, cfg);
3369
3370	/* Set DMA burst size and Interframe Gap Time */
3371	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
3372		(InterFrameGap << TxInterFrameGapShift));
3373}
3374
3375static void rtl_hw_start(struct net_device *dev)
3376{
3377	struct rtl8169_private *tp = netdev_priv(dev);
3378	void __iomem *ioaddr = tp->mmio_addr;
3379	unsigned int i;
3380
3381	/* Soft reset the chip. */
3382	RTL_W8(ChipCmd, CmdReset);
3383
3384	/* Check that the chip has finished the reset. */
3385	for (i = 0; i < 100; i++) {
3386		if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3387			break;
3388		msleep_interruptible(1);
3389	}
3390
3391	tp->hw_start(dev);
3392
3393	netif_start_queue(dev);
3394}
3395
3396
3397static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
3398					 void __iomem *ioaddr)
3399{
3400	/*
3401	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
3402	 * register to be written before TxDescAddrLow to work.
3403	 * Switching from MMIO to I/O access fixes the issue as well.
3404	 */
3405	RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
3406	RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
3407	RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
3408	RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
3409}
3410
3411static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
3412{
3413	u16 cmd;
3414
3415	cmd = RTL_R16(CPlusCmd);
3416	RTL_W16(CPlusCmd, cmd);
3417	return cmd;
3418}
3419
3420static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3421{
3422	/* Low hurts. Let's disable the filtering. */
3423	RTL_W16(RxMaxSize, rx_buf_sz + 1);
3424}
3425
3426static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3427{
3428	static const struct {
3429		u32 mac_version;
3430		u32 clk;
3431		u32 val;
3432	} cfg2_info [] = {
3433		{ RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
3434		{ RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
3435		{ RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
3436		{ RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
3437	}, *p = cfg2_info;
3438	unsigned int i;
3439	u32 clk;
3440
3441	clk = RTL_R8(Config2) & PCI_Clock_66MHz;
3442	for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
3443		if ((p->mac_version == mac_version) && (p->clk == clk)) {
3444			RTL_W32(0x7c, p->val);
3445			break;
3446		}
3447	}
3448}
3449
3450static void rtl_hw_start_8169(struct net_device *dev)
3451{
3452	struct rtl8169_private *tp = netdev_priv(dev);
3453	void __iomem *ioaddr = tp->mmio_addr;
3454	struct pci_dev *pdev = tp->pci_dev;
3455
3456	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
3457		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
3458		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
3459	}
3460
3461	RTL_W8(Cfg9346, Cfg9346_Unlock);
3462	if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
3463	    (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
3464	    (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
3465	    (tp->mac_version == RTL_GIGA_MAC_VER_04))
3466		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3467
3468	RTL_W8(EarlyTxThres, EarlyTxThld);
3469
3470	rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
3471
3472	if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
3473	    (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
3474	    (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
3475	    (tp->mac_version == RTL_GIGA_MAC_VER_04))
3476		rtl_set_rx_tx_config_registers(tp);
3477
3478	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
3479
3480	if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
3481	    (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
3482		dprintk("Set MAC Reg C+CR Offset 0xE0. "
3483			"Bit-3 and bit-14 MUST be 1\n");
3484		tp->cp_cmd |= (1 << 14);
3485	}
3486
3487	RTL_W16(CPlusCmd, tp->cp_cmd);
3488
3489	rtl8169_set_magic_reg(ioaddr, tp->mac_version);
3490
3491	/*
3492	 * Undocumented corner. Supposedly:
3493	 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
3494	 */
3495	RTL_W16(IntrMitigate, 0x0000);
3496
3497	rtl_set_rx_tx_desc_registers(tp, ioaddr);
3498
3499	if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
3500	    (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
3501	    (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
3502	    (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
3503		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3504		rtl_set_rx_tx_config_registers(tp);
3505	}
3506
3507	RTL_W8(Cfg9346, Cfg9346_Lock);
3508
3509	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
3510	RTL_R8(IntrMask);
3511
3512	RTL_W32(RxMissed, 0);
3513
3514	rtl_set_rx_mode(dev);
3515
3516	/* no early-rx interrupts */
3517	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
3518
3519	/* Enable all known interrupts by setting the interrupt mask. */
3520	RTL_W16(IntrMask, tp->intr_event);
3521}
3522
3523static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
3524{
3525	struct net_device *dev = pci_get_drvdata(pdev);
3526	struct rtl8169_private *tp = netdev_priv(dev);
3527	int cap = tp->pcie_cap;
3528
3529	if (cap) {
3530		u16 ctl;
3531
3532		pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
3533		ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
3534		pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
3535	}
3536}
3537
3538static void rtl_csi_access_enable(void __iomem *ioaddr)
3539{
3540	u32 csi;
3541
3542	csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
3543	rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
3544}
3545
3546struct ephy_info {
3547	unsigned int offset;
3548	u16 mask;
3549	u16 bits;
3550};
3551
3552static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
3553{
3554	u16 w;
3555
3556	while (len-- > 0) {
3557		w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
3558		rtl_ephy_write(ioaddr, e->offset, w);
3559		e++;
3560	}
3561}
3562
3563static void rtl_disable_clock_request(struct pci_dev *pdev)
3564{
3565	struct net_device *dev = pci_get_drvdata(pdev);
3566	struct rtl8169_private *tp = netdev_priv(dev);
3567	int cap = tp->pcie_cap;
3568
3569	if (cap) {
3570		u16 ctl;
3571
3572		pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
3573		ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
3574		pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
3575	}
3576}
3577
3578#define R8168_CPCMD_QUIRK_MASK (\
3579	EnableBist | \
3580	Mac_dbgo_oe | \
3581	Force_half_dup | \
3582	Force_rxflow_en | \
3583	Force_txflow_en | \
3584	Cxpl_dbg_sel | \
3585	ASF | \
3586	PktCntrDisable | \
3587	Mac_dbgo_sel)
3588
3589static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
3590{
3591	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3592
3593	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3594
3595	rtl_tx_performance_tweak(pdev,
3596		(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3597}
3598
3599static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
3600{
3601	rtl_hw_start_8168bb(ioaddr, pdev);
3602
3603	RTL_W8(EarlyTxThres, EarlyTxThld);
3604
3605	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
3606}
3607
3608static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
3609{
3610	RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
3611
3612	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3613
3614	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3615
3616	rtl_disable_clock_request(pdev);
3617
3618	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3619}
3620
3621static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
3622{
3623	static const struct ephy_info e_info_8168cp[] = {
3624		{ 0x01, 0,	0x0001 },
3625		{ 0x02, 0x0800,	0x1000 },
3626		{ 0x03, 0,	0x0042 },
3627		{ 0x06, 0x0080,	0x0000 },
3628		{ 0x07, 0,	0x2000 }
3629	};
3630
3631	rtl_csi_access_enable(ioaddr);
3632
3633	rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
3634
3635	__rtl_hw_start_8168cp(ioaddr, pdev);
3636}
3637
3638static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
3639{
3640	rtl_csi_access_enable(ioaddr);
3641
3642	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3643
3644	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3645
3646	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3647}
3648
3649static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
3650{
3651	rtl_csi_access_enable(ioaddr);
3652
3653	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3654
3655	/* Magic. */
3656	RTL_W8(DBG_REG, 0x20);
3657
3658	RTL_W8(EarlyTxThres, EarlyTxThld);
3659
3660	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3661
3662	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3663}
3664
3665static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
3666{
3667	static const struct ephy_info e_info_8168c_1[] = {
3668		{ 0x02, 0x0800,	0x1000 },
3669		{ 0x03, 0,	0x0002 },
3670		{ 0x06, 0x0080,	0x0000 }
3671	};
3672
3673	rtl_csi_access_enable(ioaddr);
3674
3675	RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
3676
3677	rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
3678
3679	__rtl_hw_start_8168cp(ioaddr, pdev);
3680}
3681
3682static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
3683{
3684	static const struct ephy_info e_info_8168c_2[] = {
3685		{ 0x01, 0,	0x0001 },
3686		{ 0x03, 0x0400,	0x0220 }
3687	};
3688
3689	rtl_csi_access_enable(ioaddr);
3690
3691	rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
3692
3693	__rtl_hw_start_8168cp(ioaddr, pdev);
3694}
3695
3696static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
3697{
3698	rtl_hw_start_8168c_2(ioaddr, pdev);
3699}
3700
3701static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
3702{
3703	rtl_csi_access_enable(ioaddr);
3704
3705	__rtl_hw_start_8168cp(ioaddr, pdev);
3706}
3707
3708static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
3709{
3710	rtl_csi_access_enable(ioaddr);
3711
3712	rtl_disable_clock_request(pdev);
3713
3714	RTL_W8(EarlyTxThres, EarlyTxThld);
3715
3716	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3717
3718	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3719}
3720
3721static void rtl_hw_start_8168(struct net_device *dev)
3722{
3723	struct rtl8169_private *tp = netdev_priv(dev);
3724	void __iomem *ioaddr = tp->mmio_addr;
3725	struct pci_dev *pdev = tp->pci_dev;
3726
3727	RTL_W8(Cfg9346, Cfg9346_Unlock);
3728
3729	RTL_W8(EarlyTxThres, EarlyTxThld);
3730
3731	rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
3732
3733	tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
3734
3735	RTL_W16(CPlusCmd, tp->cp_cmd);
3736
3737	RTL_W16(IntrMitigate, 0x5151);
3738
3739	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
3740		tp->intr_event |= RxFIFOOver | PCSTimeout;
3741		tp->intr_event &= ~RxOverflow;
3742	}
3743
3744	rtl_set_rx_tx_desc_registers(tp, ioaddr);
3745
3746	rtl_set_rx_mode(dev);
3747
3748	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
3749		(InterFrameGap << TxInterFrameGapShift));
3750
3751	RTL_R8(IntrMask);
3752
3753	switch (tp->mac_version) {
3754	case RTL_GIGA_MAC_VER_11:
3755		rtl_hw_start_8168bb(ioaddr, pdev);
3756	break;
3757
3758	case RTL_GIGA_MAC_VER_12:
3759	case RTL_GIGA_MAC_VER_17:
3760		rtl_hw_start_8168bef(ioaddr, pdev);
3761	break;
3762
3763	case RTL_GIGA_MAC_VER_18:
3764		rtl_hw_start_8168cp_1(ioaddr, pdev);
3765	break;
3766
3767	case RTL_GIGA_MAC_VER_19:
3768		rtl_hw_start_8168c_1(ioaddr, pdev);
3769	break;
3770
3771	case RTL_GIGA_MAC_VER_20:
3772		rtl_hw_start_8168c_2(ioaddr, pdev);
3773	break;
3774
3775	case RTL_GIGA_MAC_VER_21:
3776		rtl_hw_start_8168c_3(ioaddr, pdev);
3777	break;
3778
3779	case RTL_GIGA_MAC_VER_22:
3780		rtl_hw_start_8168c_4(ioaddr, pdev);
3781	break;
3782
3783	case RTL_GIGA_MAC_VER_23:
3784		rtl_hw_start_8168cp_2(ioaddr, pdev);
3785	break;
3786
3787	case RTL_GIGA_MAC_VER_24:
3788		rtl_hw_start_8168cp_3(ioaddr, pdev);
3789	break;
3790
3791	case RTL_GIGA_MAC_VER_25:
3792	case RTL_GIGA_MAC_VER_26:
3793	case RTL_GIGA_MAC_VER_27:
3794		rtl_hw_start_8168d(ioaddr, pdev);
3795	break;
3796
3797	default:
3798		printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
3799			dev->name, tp->mac_version);
3800	break;
3801	}
3802
3803	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3804
3805	RTL_W8(Cfg9346, Cfg9346_Lock);
3806
3807	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
3808
3809	RTL_W16(IntrMask, tp->intr_event);
3810}
3811
3812#define R810X_CPCMD_QUIRK_MASK (\
3813	EnableBist | \
3814	Mac_dbgo_oe | \
3815	Force_half_dup | \
3816	Force_rxflow_en | \
3817	Force_txflow_en | \
3818	Cxpl_dbg_sel | \
3819	ASF | \
3820	PktCntrDisable | \
3821	PCIDAC | \
3822	PCIMulRW)
3823
3824static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3825{
3826	static const struct ephy_info e_info_8102e_1[] = {
3827		{ 0x01,	0, 0x6e65 },
3828		{ 0x02,	0, 0x091f },
3829		{ 0x03,	0, 0xc2f9 },
3830		{ 0x06,	0, 0xafb5 },
3831		{ 0x07,	0, 0x0e00 },
3832		{ 0x19,	0, 0xec80 },
3833		{ 0x01,	0, 0x2e65 },
3834		{ 0x01,	0, 0x6e65 }
3835	};
3836	u8 cfg1;
3837
3838	rtl_csi_access_enable(ioaddr);
3839
3840	RTL_W8(DBG_REG, FIX_NAK_1);
3841
3842	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3843
3844	RTL_W8(Config1,
3845	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
3846	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3847
3848	cfg1 = RTL_R8(Config1);
3849	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3850		RTL_W8(Config1, cfg1 & ~LEDS0);
3851
3852	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3853
3854	rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
3855}
3856
3857static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3858{
3859	rtl_csi_access_enable(ioaddr);
3860
3861	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3862
3863	RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
3864	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3865
3866	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3867}
3868
3869static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
3870{
3871	rtl_hw_start_8102e_2(ioaddr, pdev);
3872
3873	rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
3874}
3875
3876static void rtl_hw_start_8101(struct net_device *dev)
3877{
3878	struct rtl8169_private *tp = netdev_priv(dev);
3879	void __iomem *ioaddr = tp->mmio_addr;
3880	struct pci_dev *pdev = tp->pci_dev;
3881
3882	if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
3883	    (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
3884		int cap = tp->pcie_cap;
3885
3886		if (cap) {
3887			pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
3888					      PCI_EXP_DEVCTL_NOSNOOP_EN);
3889		}
3890	}
3891
3892	switch (tp->mac_version) {
3893	case RTL_GIGA_MAC_VER_07:
3894		rtl_hw_start_8102e_1(ioaddr, pdev);
3895		break;
3896
3897	case RTL_GIGA_MAC_VER_08:
3898		rtl_hw_start_8102e_3(ioaddr, pdev);
3899		break;
3900
3901	case RTL_GIGA_MAC_VER_09:
3902		rtl_hw_start_8102e_2(ioaddr, pdev);
3903		break;
3904	}
3905
3906	RTL_W8(Cfg9346, Cfg9346_Unlock);
3907
3908	RTL_W8(EarlyTxThres, EarlyTxThld);
3909
3910	rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
3911
3912	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
3913
3914	RTL_W16(CPlusCmd, tp->cp_cmd);
3915
3916	RTL_W16(IntrMitigate, 0x0000);
3917
3918	rtl_set_rx_tx_desc_registers(tp, ioaddr);
3919
3920	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3921	rtl_set_rx_tx_config_registers(tp);
3922
3923	RTL_W8(Cfg9346, Cfg9346_Lock);
3924
3925	RTL_R8(IntrMask);
3926
3927	rtl_set_rx_mode(dev);
3928
3929	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3930
3931	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
3932
3933	RTL_W16(IntrMask, tp->intr_event);
3934}
3935
3936static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3937{
3938	struct rtl8169_private *tp = netdev_priv(dev);
3939	int ret = 0;
3940
3941	if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
3942		return -EINVAL;
3943
3944	dev->mtu = new_mtu;
3945
3946	if (!netif_running(dev))
3947		goto out;
3948
3949	rtl8169_down(dev);
3950
3951	rtl8169_set_rxbufsize(tp, dev->mtu);
3952
3953	ret = rtl8169_init_ring(dev);
3954	if (ret < 0)
3955		goto out;
3956
3957	napi_enable(&tp->napi);
3958
3959	rtl_hw_start(dev);
3960
3961	rtl8169_request_timer(dev);
3962
3963out:
3964	return ret;
3965}
3966
3967static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
3968{
3969	desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
3970	desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
3971}
3972
3973static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
3974				struct sk_buff **sk_buff, struct RxDesc *desc)
3975{
3976	struct pci_dev *pdev = tp->pci_dev;
3977
3978	dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
3979			 PCI_DMA_FROMDEVICE);
3980	dev_kfree_skb(*sk_buff);
3981	*sk_buff = NULL;
3982	rtl8169_make_unusable_by_asic(desc);
3983}
3984
3985static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
3986{
3987	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
3988
3989	desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
3990}
3991
3992static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
3993				       u32 rx_buf_sz)
3994{
3995	desc->addr = cpu_to_le64(mapping);
3996	wmb();
3997	rtl8169_mark_to_asic(desc, rx_buf_sz);
3998}
3999
4000static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
4001					    struct net_device *dev,
4002					    struct RxDesc *desc, int rx_buf_sz,
4003					    unsigned int align, gfp_t gfp)
4004{
4005	struct sk_buff *skb;
4006	dma_addr_t mapping;
4007	unsigned int pad;
4008
4009	pad = align ? align : NET_IP_ALIGN;
4010
4011	skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
4012	if (!skb)
4013		goto err_out;
4014
4015	skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
4016
4017	mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
4018				 PCI_DMA_FROMDEVICE);
4019
4020	rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
4021out:
4022	return skb;
4023
4024err_out:
4025	rtl8169_make_unusable_by_asic(desc);
4026	goto out;
4027}
4028
4029static void rtl8169_rx_clear(struct rtl8169_private *tp)
4030{
4031	unsigned int i;
4032
4033	for (i = 0; i < NUM_RX_DESC; i++) {
4034		if (tp->Rx_skbuff[i]) {
4035			rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
4036					    tp->RxDescArray + i);
4037		}
4038	}
4039}
4040
4041static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
4042			   u32 start, u32 end, gfp_t gfp)
4043{
4044	u32 cur;
4045
4046	for (cur = start; end - cur != 0; cur++) {
4047		struct sk_buff *skb;
4048		unsigned int i = cur % NUM_RX_DESC;
4049
4050		WARN_ON((s32)(end - cur) < 0);
4051
4052		if (tp->Rx_skbuff[i])
4053			continue;
4054
4055		skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
4056					   tp->RxDescArray + i,
4057					   tp->rx_buf_sz, tp->align, gfp);
4058		if (!skb)
4059			break;
4060
4061		tp->Rx_skbuff[i] = skb;
4062	}
4063	return cur - start;
4064}
4065
4066static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
4067{
4068	desc->opts1 |= cpu_to_le32(RingEnd);
4069}
4070
4071static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4072{
4073	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4074}
4075
4076static int rtl8169_init_ring(struct net_device *dev)
4077{
4078	struct rtl8169_private *tp = netdev_priv(dev);
4079
4080	rtl8169_init_ring_indexes(tp);
4081
4082	memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
4083	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
4084
4085	if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
4086		goto err_out;
4087
4088	rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
4089
4090	return 0;
4091
4092err_out:
4093	rtl8169_rx_clear(tp);
4094	return -ENOMEM;
4095}
4096
4097static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
4098				 struct TxDesc *desc)
4099{
4100	unsigned int len = tx_skb->len;
4101
4102	dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
4103			 PCI_DMA_TODEVICE);
4104	desc->opts1 = 0x00;
4105	desc->opts2 = 0x00;
4106	desc->addr = 0x00;
4107	tx_skb->len = 0;
4108}
4109
4110static void rtl8169_tx_clear(struct rtl8169_private *tp)
4111{
4112	unsigned int i;
4113
4114	for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
4115		unsigned int entry = i % NUM_TX_DESC;
4116		struct ring_info *tx_skb = tp->tx_skb + entry;
4117		unsigned int len = tx_skb->len;
4118
4119		if (len) {
4120			struct sk_buff *skb = tx_skb->skb;
4121
4122			rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
4123					     tp->TxDescArray + entry);
4124			if (skb) {
4125				dev_kfree_skb(skb);
4126				tx_skb->skb = NULL;
4127			}
4128			tp->dev->stats.tx_dropped++;
4129		}
4130	}
4131	tp->cur_tx = tp->dirty_tx = 0;
4132}
4133
4134static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
4135{
4136	struct rtl8169_private *tp = netdev_priv(dev);
4137
4138	PREPARE_DELAYED_WORK(&tp->task, task);
4139	schedule_delayed_work(&tp->task, 4);
4140}
4141
4142static void rtl8169_wait_for_quiescence(struct net_device *dev)
4143{
4144	struct rtl8169_private *tp = netdev_priv(dev);
4145	void __iomem *ioaddr = tp->mmio_addr;
4146
4147	synchronize_irq(dev->irq);
4148
4149	/* Wait for any pending NAPI task to complete */
4150	napi_disable(&tp->napi);
4151
4152	rtl8169_irq_mask_and_ack(ioaddr);
4153
4154	tp->intr_mask = 0xffff;
4155	RTL_W16(IntrMask, tp->intr_event);
4156	napi_enable(&tp->napi);
4157}
4158
4159static void rtl8169_reinit_task(struct work_struct *work)
4160{
4161	struct rtl8169_private *tp =
4162		container_of(work, struct rtl8169_private, task.work);
4163	struct net_device *dev = tp->dev;
4164	int ret;
4165
4166	rtnl_lock();
4167
4168	if (!netif_running(dev))
4169		goto out_unlock;
4170
4171	rtl8169_wait_for_quiescence(dev);
4172	rtl8169_close(dev);
4173
4174	ret = rtl8169_open(dev);
4175	if (unlikely(ret < 0)) {
4176		if (net_ratelimit())
4177			netif_err(tp, drv, dev,
4178				  "reinit failure (status = %d). Rescheduling\n",
4179				  ret);
4180		rtl8169_schedule_work(dev, rtl8169_reinit_task);
4181	}
4182
4183out_unlock:
4184	rtnl_unlock();
4185}
4186
4187static void rtl8169_reset_task(struct work_struct *work)
4188{
4189	struct rtl8169_private *tp =
4190		container_of(work, struct rtl8169_private, task.work);
4191	struct net_device *dev = tp->dev;
4192
4193	rtnl_lock();
4194
4195	if (!netif_running(dev))
4196		goto out_unlock;
4197
4198	rtl8169_wait_for_quiescence(dev);
4199
4200	rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0);
4201	rtl8169_tx_clear(tp);
4202
4203	if (tp->dirty_rx == tp->cur_rx) {
4204		rtl8169_init_ring_indexes(tp);
4205		rtl_hw_start(dev);
4206		netif_wake_queue(dev);
4207		rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4208	} else {
4209		if (net_ratelimit())
4210			netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
4211		rtl8169_schedule_work(dev, rtl8169_reset_task);
4212	}
4213
4214out_unlock:
4215	rtnl_unlock();
4216}
4217
4218static void rtl8169_tx_timeout(struct net_device *dev)
4219{
4220	struct rtl8169_private *tp = netdev_priv(dev);
4221
4222	rtl8169_hw_reset(tp->mmio_addr);
4223
4224	/* Let's wait a bit while any (async) irq lands on */
4225	rtl8169_schedule_work(dev, rtl8169_reset_task);
4226}
4227
4228static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4229			      u32 opts1)
4230{
4231	struct skb_shared_info *info = skb_shinfo(skb);
4232	unsigned int cur_frag, entry;
4233	struct TxDesc * uninitialized_var(txd);
4234
4235	entry = tp->cur_tx;
4236	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
4237		skb_frag_t *frag = info->frags + cur_frag;
4238		dma_addr_t mapping;
4239		u32 status, len;
4240		void *addr;
4241
4242		entry = (entry + 1) % NUM_TX_DESC;
4243
4244		txd = tp->TxDescArray + entry;
4245		len = frag->size;
4246		addr = ((void *) page_address(frag->page)) + frag->page_offset;
4247		mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
4248					 PCI_DMA_TODEVICE);
4249
4250		/* anti gcc 2.95.3 bugware (sic) */
4251		status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
4252
4253		txd->opts1 = cpu_to_le32(status);
4254		txd->addr = cpu_to_le64(mapping);
4255
4256		tp->tx_skb[entry].len = len;
4257	}
4258
4259	if (cur_frag) {
4260		tp->tx_skb[entry].skb = skb;
4261		txd->opts1 |= cpu_to_le32(LastFrag);
4262	}
4263
4264	return cur_frag;
4265}
4266
4267static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
4268{
4269	if (dev->features & NETIF_F_TSO) {
4270		u32 mss = skb_shinfo(skb)->gso_size;
4271
4272		if (mss)
4273			return LargeSend | ((mss & MSSMask) << MSSShift);
4274	}
4275	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4276		const struct iphdr *ip = ip_hdr(skb);
4277
4278		if (ip->protocol == IPPROTO_TCP)
4279			return IPCS | TCPCS;
4280		else if (ip->protocol == IPPROTO_UDP)
4281			return IPCS | UDPCS;
4282		WARN_ON(1);	/* we need a WARN() */
4283	}
4284	return 0;
4285}
4286
4287static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4288				      struct net_device *dev)
4289{
4290	struct rtl8169_private *tp = netdev_priv(dev);
4291	unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
4292	struct TxDesc *txd = tp->TxDescArray + entry;
4293	void __iomem *ioaddr = tp->mmio_addr;
4294	dma_addr_t mapping;
4295	u32 status, len;
4296	u32 opts1;
4297
4298	if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
4299		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
4300		goto err_stop;
4301	}
4302
4303	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
4304		goto err_stop;
4305
4306	opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
4307
4308	frags = rtl8169_xmit_frags(tp, skb, opts1);
4309	if (frags) {
4310		len = skb_headlen(skb);
4311		opts1 |= FirstFrag;
4312	} else {
4313		len = skb->len;
4314		opts1 |= FirstFrag | LastFrag;
4315		tp->tx_skb[entry].skb = skb;
4316	}
4317
4318	mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
4319				 PCI_DMA_TODEVICE);
4320
4321	tp->tx_skb[entry].len = len;
4322	txd->addr = cpu_to_le64(mapping);
4323	txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
4324
4325	wmb();
4326
4327	/* anti gcc 2.95.3 bugware (sic) */
4328	status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
4329	txd->opts1 = cpu_to_le32(status);
4330
4331	tp->cur_tx += frags + 1;
4332
4333	wmb();
4334
4335	RTL_W8(TxPoll, NPQ);	/* set polling bit */
4336
4337	if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
4338		netif_stop_queue(dev);
4339		smp_rmb();
4340		if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
4341			netif_wake_queue(dev);
4342	}
4343
4344	return NETDEV_TX_OK;
4345
4346err_stop:
4347	netif_stop_queue(dev);
4348	dev->stats.tx_dropped++;
4349	return NETDEV_TX_BUSY;
4350}
4351
4352static void rtl8169_pcierr_interrupt(struct net_device *dev)
4353{
4354	struct rtl8169_private *tp = netdev_priv(dev);
4355	struct pci_dev *pdev = tp->pci_dev;
4356	void __iomem *ioaddr = tp->mmio_addr;
4357	u16 pci_status, pci_cmd;
4358
4359	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4360	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
4361
4362	netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
4363		  pci_cmd, pci_status);
4364
4365	/*
4366	 * The recovery sequence below admits a very elaborated explanation:
4367	 * - it seems to work;
4368	 * - I did not see what else could be done;
4369	 * - it makes iop3xx happy.
4370	 *
4371	 * Feel free to adjust to your needs.
4372	 */
4373	if (pdev->broken_parity_status)
4374		pci_cmd &= ~PCI_COMMAND_PARITY;
4375	else
4376		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
4377
4378	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4379
4380	pci_write_config_word(pdev, PCI_STATUS,
4381		pci_status & (PCI_STATUS_DETECTED_PARITY |
4382		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
4383		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
4384
4385	/* The infamous DAC f*ckup only happens at boot time */
4386	if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
4387		netif_info(tp, intr, dev, "disabling PCI DAC\n");
4388		tp->cp_cmd &= ~PCIDAC;
4389		RTL_W16(CPlusCmd, tp->cp_cmd);
4390		dev->features &= ~NETIF_F_HIGHDMA;
4391	}
4392
4393	rtl8169_hw_reset(ioaddr);
4394
4395	rtl8169_schedule_work(dev, rtl8169_reinit_task);
4396}
4397
4398static void rtl8169_tx_interrupt(struct net_device *dev,
4399				 struct rtl8169_private *tp,
4400				 void __iomem *ioaddr)
4401{
4402	unsigned int dirty_tx, tx_left;
4403
4404	dirty_tx = tp->dirty_tx;
4405	smp_rmb();
4406	tx_left = tp->cur_tx - dirty_tx;
4407
4408	while (tx_left > 0) {
4409		unsigned int entry = dirty_tx % NUM_TX_DESC;
4410		struct ring_info *tx_skb = tp->tx_skb + entry;
4411		u32 len = tx_skb->len;
4412		u32 status;
4413
4414		rmb();
4415		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
4416		if (status & DescOwn)
4417			break;
4418
4419		dev->stats.tx_bytes += len;
4420		dev->stats.tx_packets++;
4421
4422		rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
4423
4424		if (status & LastFrag) {
4425			dev_kfree_skb(tx_skb->skb);
4426			tx_skb->skb = NULL;
4427		}
4428		dirty_tx++;
4429		tx_left--;
4430	}
4431
4432	if (tp->dirty_tx != dirty_tx) {
4433		tp->dirty_tx = dirty_tx;
4434		smp_wmb();
4435		if (netif_queue_stopped(dev) &&
4436		    (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
4437			netif_wake_queue(dev);
4438		}
4439		/*
4440		 * 8168 hack: TxPoll requests are lost when the Tx packets are
4441		 * too close. Let's kick an extra TxPoll request when a burst
4442		 * of start_xmit activity is detected (if it is not detected,
4443		 * it is slow enough). -- FR
4444		 */
4445		smp_rmb();
4446		if (tp->cur_tx != dirty_tx)
4447			RTL_W8(TxPoll, NPQ);
4448	}
4449}
4450
4451static inline int rtl8169_fragmented_frame(u32 status)
4452{
4453	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
4454}
4455
4456static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4457{
4458	u32 status = opts1 & RxProtoMask;
4459
4460	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
4461	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
4462		skb->ip_summed = CHECKSUM_UNNECESSARY;
4463	else
4464		skb->ip_summed = CHECKSUM_NONE;
4465}
4466
4467static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
4468				       struct rtl8169_private *tp, int pkt_size,
4469				       dma_addr_t addr)
4470{
4471	struct sk_buff *skb;
4472	bool done = false;
4473
4474	if (pkt_size >= rx_copybreak)
4475		goto out;
4476
4477	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
4478	if (!skb)
4479		goto out;
4480
4481	dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
4482				PCI_DMA_FROMDEVICE);
4483	skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
4484	*sk_buff = skb;
4485	done = true;
4486out:
4487	return done;
4488}
4489
4490/*
4491 * Warning : rtl8169_rx_interrupt() might be called :
4492 * 1) from NAPI (softirq) context
4493 *	(polling = 1 : we should call netif_receive_skb())
4494 * 2) from process context (rtl8169_reset_task())
4495 *	(polling = 0 : we must call netif_rx() instead)
4496 */
4497static int rtl8169_rx_interrupt(struct net_device *dev,
4498				struct rtl8169_private *tp,
4499				void __iomem *ioaddr, u32 budget)
4500{
4501	unsigned int cur_rx, rx_left;
4502	unsigned int delta, count;
4503	int polling = (budget != ~(u32)0) ? 1 : 0;
4504
4505	cur_rx = tp->cur_rx;
4506	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
4507	rx_left = min(rx_left, budget);
4508
4509	for (; rx_left > 0; rx_left--, cur_rx++) {
4510		unsigned int entry = cur_rx % NUM_RX_DESC;
4511		struct RxDesc *desc = tp->RxDescArray + entry;
4512		u32 status;
4513
4514		rmb();
4515		status = le32_to_cpu(desc->opts1);
4516
4517		if (status & DescOwn)
4518			break;
4519		if (unlikely(status & RxRES)) {
4520			netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
4521				   status);
4522			dev->stats.rx_errors++;
4523			if (status & (RxRWT | RxRUNT))
4524				dev->stats.rx_length_errors++;
4525			if (status & RxCRC)
4526				dev->stats.rx_crc_errors++;
4527			if (status & RxFOVF) {
4528				rtl8169_schedule_work(dev, rtl8169_reset_task);
4529				dev->stats.rx_fifo_errors++;
4530			}
4531			rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4532		} else {
4533			struct sk_buff *skb = tp->Rx_skbuff[entry];
4534			dma_addr_t addr = le64_to_cpu(desc->addr);
4535			int pkt_size = (status & 0x00001FFF) - 4;
4536			struct pci_dev *pdev = tp->pci_dev;
4537
4538			/*
4539			 * The driver does not support incoming fragmented
4540			 * frames. They are seen as a symptom of over-mtu
4541			 * sized frames.
4542			 */
4543			if (unlikely(rtl8169_fragmented_frame(status))) {
4544				dev->stats.rx_dropped++;
4545				dev->stats.rx_length_errors++;
4546				rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4547				continue;
4548			}
4549
4550			if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
4551				dma_sync_single_for_device(&pdev->dev, addr,
4552					pkt_size, PCI_DMA_FROMDEVICE);
4553				rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4554			} else {
4555				dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
4556						 PCI_DMA_FROMDEVICE);
4557				tp->Rx_skbuff[entry] = NULL;
4558			}
4559
4560			rtl8169_rx_csum(skb, status);
4561			skb_put(skb, pkt_size);
4562			skb->protocol = eth_type_trans(skb, dev);
4563
4564			if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
4565				if (likely(polling))
4566					netif_receive_skb(skb);
4567				else
4568					netif_rx(skb);
4569			}
4570
4571			dev->stats.rx_bytes += pkt_size;
4572			dev->stats.rx_packets++;
4573		}
4574
4575		if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
4576		    (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
4577			desc->opts2 = 0;
4578			cur_rx++;
4579		}
4580	}
4581
4582	count = cur_rx - tp->cur_rx;
4583	tp->cur_rx = cur_rx;
4584
4585	delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
4586	if (!delta && count)
4587		netif_info(tp, intr, dev, "no Rx buffer allocated\n");
4588	tp->dirty_rx += delta;
4589
4590	if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
4591		netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
4592
4593	return count;
4594}
4595
4596static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4597{
4598	struct net_device *dev = dev_instance;
4599	struct rtl8169_private *tp = netdev_priv(dev);
4600	void __iomem *ioaddr = tp->mmio_addr;
4601	int handled = 0;
4602	int status;
4603
4604	/* loop handling interrupts until we have no new ones or
4605	 * we hit a invalid/hotplug case.
4606	 */
4607	status = RTL_R16(IntrStatus);
4608	while (status && status != 0xffff) {
4609		handled = 1;
4610
4611		/* Handle all of the error cases first. These will reset
4612		 * the chip, so just exit the loop.
4613		 */
4614		if (unlikely(!netif_running(dev))) {
4615			rtl8169_asic_down(ioaddr);
4616			break;
4617		}
4618
4619		if (unlikely(status & RxFIFOOver) &&
4620		(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4621			netif_stop_queue(dev);
4622			rtl8169_tx_timeout(dev);
4623			break;
4624		}
4625
4626		if (unlikely(status & SYSErr)) {
4627			rtl8169_pcierr_interrupt(dev);
4628			break;
4629		}
4630
4631		if (status & LinkChg)
4632			rtl8169_check_link_status(dev, tp, ioaddr);
4633
4634		/* We need to see the lastest version of tp->intr_mask to
4635		 * avoid ignoring an MSI interrupt and having to wait for
4636		 * another event which may never come.
4637		 */
4638		smp_rmb();
4639		if (status & tp->intr_mask & tp->napi_event) {
4640			RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
4641			tp->intr_mask = ~tp->napi_event;
4642
4643			if (likely(napi_schedule_prep(&tp->napi)))
4644				__napi_schedule(&tp->napi);
4645			else
4646				netif_info(tp, intr, dev,
4647					   "interrupt %04x in poll\n", status);
4648		}
4649
4650		/* We only get a new MSI interrupt when all active irq
4651		 * sources on the chip have been acknowledged. So, ack
4652		 * everything we've seen and check if new sources have become
4653		 * active to avoid blocking all interrupts from the chip.
4654		 */
4655		RTL_W16(IntrStatus,
4656			(status & RxFIFOOver) ? (status | RxOverflow) : status);
4657		status = RTL_R16(IntrStatus);
4658	}
4659
4660	return IRQ_RETVAL(handled);
4661}
4662
4663static int rtl8169_poll(struct napi_struct *napi, int budget)
4664{
4665	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
4666	struct net_device *dev = tp->dev;
4667	void __iomem *ioaddr = tp->mmio_addr;
4668	int work_done;
4669
4670	work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
4671	rtl8169_tx_interrupt(dev, tp, ioaddr);
4672
4673	if (work_done < budget) {
4674		napi_complete(napi);
4675
4676		/* We need for force the visibility of tp->intr_mask
4677		 * for other CPUs, as we can loose an MSI interrupt
4678		 * and potentially wait for a retransmit timeout if we don't.
4679		 * The posted write to IntrMask is safe, as it will
4680		 * eventually make it to the chip and we won't loose anything
4681		 * until it does.
4682		 */
4683		tp->intr_mask = 0xffff;
4684		wmb();
4685		RTL_W16(IntrMask, tp->intr_event);
4686	}
4687
4688	return work_done;
4689}
4690
4691static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
4692{
4693	struct rtl8169_private *tp = netdev_priv(dev);
4694
4695	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
4696		return;
4697
4698	dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
4699	RTL_W32(RxMissed, 0);
4700}
4701
4702static void rtl8169_down(struct net_device *dev)
4703{
4704	struct rtl8169_private *tp = netdev_priv(dev);
4705	void __iomem *ioaddr = tp->mmio_addr;
4706	unsigned int intrmask;
4707
4708	rtl8169_delete_timer(dev);
4709
4710	netif_stop_queue(dev);
4711
4712	napi_disable(&tp->napi);
4713
4714core_down:
4715	spin_lock_irq(&tp->lock);
4716
4717	rtl8169_asic_down(ioaddr);
4718
4719	rtl8169_rx_missed(dev, ioaddr);
4720
4721	spin_unlock_irq(&tp->lock);
4722
4723	synchronize_irq(dev->irq);
4724
4725	/* Give a racing hard_start_xmit a few cycles to complete. */
4726	synchronize_sched();
4727
4728	/*
4729	 * And now for the 50k$ question: are IRQ disabled or not ?
4730	 *
4731	 * Two paths lead here:
4732	 * 1) dev->close
4733	 *    -> netif_running() is available to sync the current code and the
4734	 *       IRQ handler. See rtl8169_interrupt for details.
4735	 * 2) dev->change_mtu
4736	 *    -> rtl8169_poll can not be issued again and re-enable the
4737	 *       interruptions. Let's simply issue the IRQ down sequence again.
4738	 *
4739	 * No loop if hotpluged or major error (0xffff).
4740	 */
4741	intrmask = RTL_R16(IntrMask);
4742	if (intrmask && (intrmask != 0xffff))
4743		goto core_down;
4744
4745	rtl8169_tx_clear(tp);
4746
4747	rtl8169_rx_clear(tp);
4748}
4749
4750static int rtl8169_close(struct net_device *dev)
4751{
4752	struct rtl8169_private *tp = netdev_priv(dev);
4753	struct pci_dev *pdev = tp->pci_dev;
4754
4755	pm_runtime_get_sync(&pdev->dev);
4756
4757	/* update counters before going down */
4758	rtl8169_update_counters(dev);
4759
4760	rtl8169_down(dev);
4761
4762	free_irq(dev->irq, dev);
4763
4764	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4765			  tp->RxPhyAddr);
4766	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4767			  tp->TxPhyAddr);
4768	tp->TxDescArray = NULL;
4769	tp->RxDescArray = NULL;
4770
4771	pm_runtime_put_sync(&pdev->dev);
4772
4773	return 0;
4774}
4775
4776static void rtl_set_rx_mode(struct net_device *dev)
4777{
4778	struct rtl8169_private *tp = netdev_priv(dev);
4779	void __iomem *ioaddr = tp->mmio_addr;
4780	unsigned long flags;
4781	u32 mc_filter[2];	/* Multicast hash filter */
4782	int rx_mode;
4783	u32 tmp = 0;
4784
4785	if (dev->flags & IFF_PROMISC) {
4786		/* Unconditionally log net taps. */
4787		netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4788		rx_mode =
4789		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4790		    AcceptAllPhys;
4791		mc_filter[1] = mc_filter[0] = 0xffffffff;
4792	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4793		   (dev->flags & IFF_ALLMULTI)) {
4794		/* Too many to filter perfectly -- accept all multicasts. */
4795		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4796		mc_filter[1] = mc_filter[0] = 0xffffffff;
4797	} else {
4798		struct netdev_hw_addr *ha;
4799
4800		rx_mode = AcceptBroadcast | AcceptMyPhys;
4801		mc_filter[1] = mc_filter[0] = 0;
4802		netdev_for_each_mc_addr(ha, dev) {
4803			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4804			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4805			rx_mode |= AcceptMulticast;
4806		}
4807	}
4808
4809	spin_lock_irqsave(&tp->lock, flags);
4810
4811	tmp = rtl8169_rx_config | rx_mode |
4812	      (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
4813
4814	if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4815		u32 data = mc_filter[0];
4816
4817		mc_filter[0] = swab32(mc_filter[1]);
4818		mc_filter[1] = swab32(data);
4819	}
4820
4821	RTL_W32(MAR0 + 4, mc_filter[1]);
4822	RTL_W32(MAR0 + 0, mc_filter[0]);
4823
4824	RTL_W32(RxConfig, tmp);
4825
4826	spin_unlock_irqrestore(&tp->lock, flags);
4827}
4828
4829/**
4830 *  rtl8169_get_stats - Get rtl8169 read/write statistics
4831 *  @dev: The Ethernet Device to get statistics for
4832 *
4833 *  Get TX/RX statistics for rtl8169
4834 */
4835static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
4836{
4837	struct rtl8169_private *tp = netdev_priv(dev);
4838	void __iomem *ioaddr = tp->mmio_addr;
4839	unsigned long flags;
4840
4841	if (netif_running(dev)) {
4842		spin_lock_irqsave(&tp->lock, flags);
4843		rtl8169_rx_missed(dev, ioaddr);
4844		spin_unlock_irqrestore(&tp->lock, flags);
4845	}
4846
4847	return &dev->stats;
4848}
4849
4850static void rtl8169_net_suspend(struct net_device *dev)
4851{
4852	if (!netif_running(dev))
4853		return;
4854
4855	netif_device_detach(dev);
4856	netif_stop_queue(dev);
4857}
4858
4859#ifdef CONFIG_PM
4860
4861static int rtl8169_suspend(struct device *device)
4862{
4863	struct pci_dev *pdev = to_pci_dev(device);
4864	struct net_device *dev = pci_get_drvdata(pdev);
4865
4866	rtl8169_net_suspend(dev);
4867
4868	return 0;
4869}
4870
4871static void __rtl8169_resume(struct net_device *dev)
4872{
4873	netif_device_attach(dev);
4874	rtl8169_schedule_work(dev, rtl8169_reset_task);
4875}
4876
4877static int rtl8169_resume(struct device *device)
4878{
4879	struct pci_dev *pdev = to_pci_dev(device);
4880	struct net_device *dev = pci_get_drvdata(pdev);
4881	struct rtl8169_private *tp = netdev_priv(dev);
4882
4883	rtl8169_init_phy(dev, tp);
4884
4885	if (netif_running(dev))
4886		__rtl8169_resume(dev);
4887
4888	return 0;
4889}
4890
4891static int rtl8169_runtime_suspend(struct device *device)
4892{
4893	struct pci_dev *pdev = to_pci_dev(device);
4894	struct net_device *dev = pci_get_drvdata(pdev);
4895	struct rtl8169_private *tp = netdev_priv(dev);
4896
4897	if (!tp->TxDescArray)
4898		return 0;
4899
4900	spin_lock_irq(&tp->lock);
4901	tp->saved_wolopts = __rtl8169_get_wol(tp);
4902	__rtl8169_set_wol(tp, WAKE_ANY);
4903	spin_unlock_irq(&tp->lock);
4904
4905	rtl8169_net_suspend(dev);
4906
4907	return 0;
4908}
4909
4910static int rtl8169_runtime_resume(struct device *device)
4911{
4912	struct pci_dev *pdev = to_pci_dev(device);
4913	struct net_device *dev = pci_get_drvdata(pdev);
4914	struct rtl8169_private *tp = netdev_priv(dev);
4915
4916	if (!tp->TxDescArray)
4917		return 0;
4918
4919	spin_lock_irq(&tp->lock);
4920	__rtl8169_set_wol(tp, tp->saved_wolopts);
4921	tp->saved_wolopts = 0;
4922	spin_unlock_irq(&tp->lock);
4923
4924	rtl8169_init_phy(dev, tp);
4925
4926	__rtl8169_resume(dev);
4927
4928	return 0;
4929}
4930
4931static int rtl8169_runtime_idle(struct device *device)
4932{
4933	struct pci_dev *pdev = to_pci_dev(device);
4934	struct net_device *dev = pci_get_drvdata(pdev);
4935	struct rtl8169_private *tp = netdev_priv(dev);
4936
4937	if (!tp->TxDescArray)
4938		return 0;
4939
4940	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4941	return -EBUSY;
4942}
4943
4944static const struct dev_pm_ops rtl8169_pm_ops = {
4945	.suspend = rtl8169_suspend,
4946	.resume = rtl8169_resume,
4947	.freeze = rtl8169_suspend,
4948	.thaw = rtl8169_resume,
4949	.poweroff = rtl8169_suspend,
4950	.restore = rtl8169_resume,
4951	.runtime_suspend = rtl8169_runtime_suspend,
4952	.runtime_resume = rtl8169_runtime_resume,
4953	.runtime_idle = rtl8169_runtime_idle,
4954};
4955
4956#define RTL8169_PM_OPS	(&rtl8169_pm_ops)
4957
4958#else /* !CONFIG_PM */
4959
4960#define RTL8169_PM_OPS	NULL
4961
4962#endif /* !CONFIG_PM */
4963
4964static void rtl_shutdown(struct pci_dev *pdev)
4965{
4966	struct net_device *dev = pci_get_drvdata(pdev);
4967	struct rtl8169_private *tp = netdev_priv(dev);
4968	void __iomem *ioaddr = tp->mmio_addr;
4969
4970	rtl8169_net_suspend(dev);
4971
4972	/* restore original MAC address */
4973	rtl_rar_set(tp, dev->perm_addr);
4974
4975	spin_lock_irq(&tp->lock);
4976
4977	rtl8169_asic_down(ioaddr);
4978
4979	spin_unlock_irq(&tp->lock);
4980
4981	if (system_state == SYSTEM_POWER_OFF) {
4982		/* WoL fails with some 8168 when the receiver is disabled. */
4983		if (tp->features & RTL_FEATURE_WOL) {
4984			pci_clear_master(pdev);
4985
4986			RTL_W8(ChipCmd, CmdRxEnb);
4987			/* PCI commit */
4988			RTL_R8(ChipCmd);
4989		}
4990
4991		pci_wake_from_d3(pdev, true);
4992		pci_set_power_state(pdev, PCI_D3hot);
4993	}
4994}
4995
4996static struct pci_driver rtl8169_pci_driver = {
4997	.name		= MODULENAME,
4998	.id_table	= rtl8169_pci_tbl,
4999	.probe		= rtl8169_init_one,
5000	.remove		= __devexit_p(rtl8169_remove_one),
5001	.shutdown	= rtl_shutdown,
5002	.driver.pm	= RTL8169_PM_OPS,
5003};
5004
5005static int __init rtl8169_init_module(void)
5006{
5007	return pci_register_driver(&rtl8169_pci_driver);
5008}
5009
5010static void __exit rtl8169_cleanup_module(void)
5011{
5012	pci_unregister_driver(&rtl8169_pci_driver);
5013}
5014
5015module_init(rtl8169_init_module);
5016module_exit(rtl8169_cleanup_module);
5017