1/*
2   sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4   Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5   Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6   Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8   Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9   genuine driver.
10
11   This software may be used and distributed according to the terms of
12   the GNU General Public License (GPL), incorporated herein by reference.
13   Drivers based on or derived from this code fall under the GPL and must
14   retain the authorship, copyright and license notice.  This file is not
15   a complete program and may only be used when the entire operating
16   system is licensed under the GPL.
17
18   See the file COPYING in this distribution for more information.
19
20 */
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/pci.h>
29#include <linux/mii.h>
30#include <linux/delay.h>
31#include <linux/crc32.h>
32#include <linux/dma-mapping.h>
33#include <asm/irq.h>
34
35#define net_drv(p, arg...)	if (netif_msg_drv(p)) \
36					printk(arg)
37#define net_probe(p, arg...)	if (netif_msg_probe(p)) \
38					printk(arg)
39#define net_link(p, arg...)	if (netif_msg_link(p)) \
40					printk(arg)
41#define net_intr(p, arg...)	if (netif_msg_intr(p)) \
42					printk(arg)
43#define net_tx_err(p, arg...)	if (netif_msg_tx_err(p)) \
44					printk(arg)
45
46#define PHY_MAX_ADDR		32
47#define PHY_ID_ANY		0x1f
48#define MII_REG_ANY		0x1f
49
50#ifdef CONFIG_SIS190_NAPI
51#define NAPI_SUFFIX	"-NAPI"
52#else
53#define NAPI_SUFFIX	""
54#endif
55
56#define DRV_VERSION		"1.2" NAPI_SUFFIX
57#define DRV_NAME		"sis190"
58#define SIS190_DRIVER_NAME	DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59#define PFX DRV_NAME ": "
60
61#ifdef CONFIG_SIS190_NAPI
62#define sis190_rx_skb			netif_receive_skb
63#define sis190_rx_quota(count, quota)	min(count, quota)
64#else
65#define sis190_rx_skb			netif_rx
66#define sis190_rx_quota(count, quota)	count
67#endif
68
69#define MAC_ADDR_LEN		6
70
71#define NUM_TX_DESC		64	/* [8..1024] */
72#define NUM_RX_DESC		64	/* [8..8192] */
73#define TX_RING_BYTES		(NUM_TX_DESC * sizeof(struct TxDesc))
74#define RX_RING_BYTES		(NUM_RX_DESC * sizeof(struct RxDesc))
75#define RX_BUF_SIZE		1536
76#define RX_BUF_MASK		0xfff8
77
78#define SIS190_REGS_SIZE	0x80
79#define SIS190_TX_TIMEOUT	(6*HZ)
80#define SIS190_PHY_TIMEOUT	(10*HZ)
81#define SIS190_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82				 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83				 NETIF_MSG_IFDOWN)
84
85/* Enhanced PHY access register bit definitions */
86#define EhnMIIread		0x0000
87#define EhnMIIwrite		0x0020
88#define EhnMIIdataShift		16
89#define EhnMIIpmdShift		6	/* 7016 only */
90#define EhnMIIregShift		11
91#define EhnMIIreq		0x0010
92#define EhnMIInotDone		0x0010
93
94/* Write/read MMIO register */
95#define SIS_W8(reg, val)	writeb ((val), ioaddr + (reg))
96#define SIS_W16(reg, val)	writew ((val), ioaddr + (reg))
97#define SIS_W32(reg, val)	writel ((val), ioaddr + (reg))
98#define SIS_R8(reg)		readb (ioaddr + (reg))
99#define SIS_R16(reg)		readw (ioaddr + (reg))
100#define SIS_R32(reg)		readl (ioaddr + (reg))
101
102#define SIS_PCI_COMMIT()	SIS_R32(IntrControl)
103
104enum sis190_registers {
105	TxControl		= 0x00,
106	TxDescStartAddr		= 0x04,
107	rsv0			= 0x08,	// reserved
108	TxSts			= 0x0c,	// unused (Control/Status)
109	RxControl		= 0x10,
110	RxDescStartAddr		= 0x14,
111	rsv1			= 0x18,	// reserved
112	RxSts			= 0x1c,	// unused
113	IntrStatus		= 0x20,
114	IntrMask		= 0x24,
115	IntrControl		= 0x28,
116	IntrTimer		= 0x2c,	// unused (Interupt Timer)
117	PMControl		= 0x30,	// unused (Power Mgmt Control/Status)
118	rsv2			= 0x34,	// reserved
119	ROMControl		= 0x38,
120	ROMInterface		= 0x3c,
121	StationControl		= 0x40,
122	GMIIControl		= 0x44,
123	GIoCR			= 0x48, // unused (GMAC IO Compensation)
124	GIoCtrl			= 0x4c, // unused (GMAC IO Control)
125	TxMacControl		= 0x50,
126	TxLimit			= 0x54, // unused (Tx MAC Timer/TryLimit)
127	RGDelay			= 0x58, // unused (RGMII Tx Internal Delay)
128	rsv3			= 0x5c, // reserved
129	RxMacControl		= 0x60,
130	RxMacAddr		= 0x62,
131	RxHashTable		= 0x68,
132	// Undocumented		= 0x6c,
133	RxWolCtrl		= 0x70,
134	RxWolData		= 0x74, // unused (Rx WOL Data Access)
135	RxMPSControl		= 0x78,	// unused (Rx MPS Control)
136	rsv4			= 0x7c, // reserved
137};
138
139enum sis190_register_content {
140	/* IntrStatus */
141	SoftInt			= 0x40000000,	// unused
142	Timeup			= 0x20000000,	// unused
143	PauseFrame		= 0x00080000,	// unused
144	MagicPacket		= 0x00040000,	// unused
145	WakeupFrame		= 0x00020000,	// unused
146	LinkChange		= 0x00010000,
147	RxQEmpty		= 0x00000080,
148	RxQInt			= 0x00000040,
149	TxQ1Empty		= 0x00000020,	// unused
150	TxQ1Int			= 0x00000010,
151	TxQ0Empty		= 0x00000008,	// unused
152	TxQ0Int			= 0x00000004,
153	RxHalt			= 0x00000002,
154	TxHalt			= 0x00000001,
155
156	/* {Rx/Tx}CmdBits */
157	CmdReset		= 0x10,
158	CmdRxEnb		= 0x08,		// unused
159	CmdTxEnb		= 0x01,
160	RxBufEmpty		= 0x01,		// unused
161
162	/* Cfg9346Bits */
163	Cfg9346_Lock		= 0x00,		// unused
164	Cfg9346_Unlock		= 0xc0,		// unused
165
166	/* RxMacControl */
167	AcceptErr		= 0x20,		// unused
168	AcceptRunt		= 0x10,		// unused
169	AcceptBroadcast		= 0x0800,
170	AcceptMulticast		= 0x0400,
171	AcceptMyPhys		= 0x0200,
172	AcceptAllPhys		= 0x0100,
173
174	/* RxConfigBits */
175	RxCfgFIFOShift		= 13,
176	RxCfgDMAShift		= 8,		// 0x1a in RxControl ?
177
178	/* TxConfigBits */
179	TxInterFrameGapShift	= 24,
180	TxDMAShift		= 8, /* DMA burst value (0-7) is shift this many bits */
181
182	LinkStatus		= 0x02,		// unused
183	FullDup			= 0x01,		// unused
184
185	/* TBICSRBit */
186	TBILinkOK		= 0x02000000,	// unused
187};
188
189struct TxDesc {
190	__le32 PSize;
191	__le32 status;
192	__le32 addr;
193	__le32 size;
194};
195
196struct RxDesc {
197	__le32 PSize;
198	__le32 status;
199	__le32 addr;
200	__le32 size;
201};
202
203enum _DescStatusBit {
204	/* _Desc.status */
205	OWNbit		= 0x80000000, // RXOWN/TXOWN
206	INTbit		= 0x40000000, // RXINT/TXINT
207	CRCbit		= 0x00020000, // CRCOFF/CRCEN
208	PADbit		= 0x00010000, // PREADD/PADEN
209	/* _Desc.size */
210	RingEnd		= 0x80000000,
211	/* TxDesc.status */
212	LSEN		= 0x08000000, // TSO ? -- FR
213	IPCS		= 0x04000000,
214	TCPCS		= 0x02000000,
215	UDPCS		= 0x01000000,
216	BSTEN		= 0x00800000,
217	EXTEN		= 0x00400000,
218	DEFEN		= 0x00200000,
219	BKFEN		= 0x00100000,
220	CRSEN		= 0x00080000,
221	COLEN		= 0x00040000,
222	THOL3		= 0x30000000,
223	THOL2		= 0x20000000,
224	THOL1		= 0x10000000,
225	THOL0		= 0x00000000,
226	/* RxDesc.status */
227	IPON		= 0x20000000,
228	TCPON		= 0x10000000,
229	UDPON		= 0x08000000,
230	Wakup		= 0x00400000,
231	Magic		= 0x00200000,
232	Pause		= 0x00100000,
233	DEFbit		= 0x00200000,
234	BCAST		= 0x000c0000,
235	MCAST		= 0x00080000,
236	UCAST		= 0x00040000,
237	/* RxDesc.PSize */
238	TAGON		= 0x80000000,
239	RxDescCountMask	= 0x7f000000, // multi-desc pkt when > 1 ? -- FR
240	ABORT		= 0x00800000,
241	SHORT		= 0x00400000,
242	LIMIT		= 0x00200000,
243	MIIER		= 0x00100000,
244	OVRUN		= 0x00080000,
245	NIBON		= 0x00040000,
246	COLON		= 0x00020000,
247	CRCOK		= 0x00010000,
248	RxSizeMask	= 0x0000ffff
249	/*
250	 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251	 * provide two (unused with Linux) Tx queues. No publically
252	 * available documentation alas.
253	 */
254};
255
256enum sis190_eeprom_access_register_bits {
257	EECS	= 0x00000001,	// unused
258	EECLK	= 0x00000002,	// unused
259	EEDO	= 0x00000008,	// unused
260	EEDI	= 0x00000004,	// unused
261	EEREQ	= 0x00000080,
262	EEROP	= 0x00000200,
263	EEWOP	= 0x00000100	// unused
264};
265
266/* EEPROM Addresses */
267enum sis190_eeprom_address {
268	EEPROMSignature	= 0x00,
269	EEPROMCLK	= 0x01,	// unused
270	EEPROMInfo	= 0x02,
271	EEPROMMACAddr	= 0x03
272};
273
274enum sis190_feature {
275	F_HAS_RGMII	= 1,
276	F_PHY_88E1111	= 2,
277	F_PHY_BCM5461	= 4
278};
279
280struct sis190_private {
281	void __iomem *mmio_addr;
282	struct pci_dev *pci_dev;
283	struct net_device *dev;
284	struct net_device_stats stats;
285	spinlock_t lock;
286	u32 rx_buf_sz;
287	u32 cur_rx;
288	u32 cur_tx;
289	u32 dirty_rx;
290	u32 dirty_tx;
291	dma_addr_t rx_dma;
292	dma_addr_t tx_dma;
293	struct RxDesc *RxDescRing;
294	struct TxDesc *TxDescRing;
295	struct sk_buff *Rx_skbuff[NUM_RX_DESC];
296	struct sk_buff *Tx_skbuff[NUM_TX_DESC];
297	struct work_struct phy_task;
298	struct timer_list timer;
299	u32 msg_enable;
300	struct mii_if_info mii_if;
301	struct list_head first_phy;
302	u32 features;
303};
304
305struct sis190_phy {
306	struct list_head list;
307	int phy_id;
308	u16 id[2];
309	u16 status;
310	u8  type;
311};
312
313enum sis190_phy_type {
314	UNKNOWN	= 0x00,
315	HOME	= 0x01,
316	LAN	= 0x02,
317	MIX	= 0x03
318};
319
320static struct mii_chip_info {
321        const char *name;
322        u16 id[2];
323        unsigned int type;
324	u32 feature;
325} mii_chip_table[] = {
326	{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
327	{ "Broadcom PHY AC131",   { 0x0143, 0xbc70 }, LAN, 0 },
328	{ "Agere PHY ET1101B",    { 0x0282, 0xf010 }, LAN, 0 },
329	{ "Marvell PHY 88E1111",  { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
330	{ "Realtek PHY RTL8201",  { 0x0000, 0x8200 }, LAN, 0 },
331	{ NULL, }
332};
333
334static const struct {
335	const char *name;
336} sis_chip_info[] = {
337	{ "SiS 190 PCI Fast Ethernet adapter" },
338	{ "SiS 191 PCI Gigabit Ethernet adapter" },
339};
340
341static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
342	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
343	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
344	{ 0, },
345};
346
347MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
348
349static int rx_copybreak = 200;
350
351static struct {
352	u32 msg_enable;
353} debug = { -1 };
354
355MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
356module_param(rx_copybreak, int, 0);
357MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
358module_param_named(debug, debug.msg_enable, int, 0);
359MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
360MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
361MODULE_VERSION(DRV_VERSION);
362MODULE_LICENSE("GPL");
363
364static const u32 sis190_intr_mask =
365	RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
366
367/*
368 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
369 * The chips use a 64 element hash table based on the Ethernet CRC.
370 */
371static const int multicast_filter_limit = 32;
372
373static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
374{
375	unsigned int i;
376
377	SIS_W32(GMIIControl, ctl);
378
379	msleep(1);
380
381	for (i = 0; i < 100; i++) {
382		if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
383			break;
384		msleep(1);
385	}
386
387	if (i > 999)
388		printk(KERN_ERR PFX "PHY command failed !\n");
389}
390
391static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
392{
393	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
394		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
395		(((u32) val) << EhnMIIdataShift));
396}
397
398static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
399{
400	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
401		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
402
403	return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
404}
405
406static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
407{
408	struct sis190_private *tp = netdev_priv(dev);
409
410	mdio_write(tp->mmio_addr, phy_id, reg, val);
411}
412
413static int __mdio_read(struct net_device *dev, int phy_id, int reg)
414{
415	struct sis190_private *tp = netdev_priv(dev);
416
417	return mdio_read(tp->mmio_addr, phy_id, reg);
418}
419
420static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
421{
422	mdio_read(ioaddr, phy_id, reg);
423	return mdio_read(ioaddr, phy_id, reg);
424}
425
426static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
427{
428	u16 data = 0xffff;
429	unsigned int i;
430
431	if (!(SIS_R32(ROMControl) & 0x0002))
432		return 0;
433
434	SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
435
436	for (i = 0; i < 200; i++) {
437		if (!(SIS_R32(ROMInterface) & EEREQ)) {
438			data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
439			break;
440		}
441		msleep(1);
442	}
443
444	return data;
445}
446
447static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
448{
449	SIS_W32(IntrMask, 0x00);
450	SIS_W32(IntrStatus, 0xffffffff);
451	SIS_PCI_COMMIT();
452}
453
454static void sis190_asic_down(void __iomem *ioaddr)
455{
456	/* Stop the chip's Tx and Rx DMA processes. */
457
458	SIS_W32(TxControl, 0x1a00);
459	SIS_W32(RxControl, 0x1a00);
460
461	sis190_irq_mask_and_ack(ioaddr);
462}
463
464static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
465{
466	desc->size |= cpu_to_le32(RingEnd);
467}
468
469static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
470{
471	u32 eor = le32_to_cpu(desc->size) & RingEnd;
472
473	desc->PSize = 0x0;
474	desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
475	wmb();
476	desc->status = cpu_to_le32(OWNbit | INTbit);
477}
478
479static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
480				      u32 rx_buf_sz)
481{
482	desc->addr = cpu_to_le32(mapping);
483	sis190_give_to_asic(desc, rx_buf_sz);
484}
485
486static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
487{
488	desc->PSize = 0x0;
489	desc->addr = 0xdeadbeef;
490	desc->size &= cpu_to_le32(RingEnd);
491	wmb();
492	desc->status = 0x0;
493}
494
495static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
496			       struct RxDesc *desc, u32 rx_buf_sz)
497{
498	struct sk_buff *skb;
499	dma_addr_t mapping;
500	int ret = 0;
501
502	skb = dev_alloc_skb(rx_buf_sz);
503	if (!skb)
504		goto err_out;
505
506	*sk_buff = skb;
507
508	mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
509				 PCI_DMA_FROMDEVICE);
510
511	sis190_map_to_asic(desc, mapping, rx_buf_sz);
512out:
513	return ret;
514
515err_out:
516	ret = -ENOMEM;
517	sis190_make_unusable_by_asic(desc);
518	goto out;
519}
520
521static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
522			  u32 start, u32 end)
523{
524	u32 cur;
525
526	for (cur = start; cur < end; cur++) {
527		int ret, i = cur % NUM_RX_DESC;
528
529		if (tp->Rx_skbuff[i])
530			continue;
531
532		ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
533					  tp->RxDescRing + i, tp->rx_buf_sz);
534		if (ret < 0)
535			break;
536	}
537	return cur - start;
538}
539
540static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
541				     struct RxDesc *desc, int rx_buf_sz)
542{
543	int ret = -1;
544
545	if (pkt_size < rx_copybreak) {
546		struct sk_buff *skb;
547
548		skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
549		if (skb) {
550			skb_reserve(skb, NET_IP_ALIGN);
551			eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
552			*sk_buff = skb;
553			sis190_give_to_asic(desc, rx_buf_sz);
554			ret = 0;
555		}
556	}
557	return ret;
558}
559
560static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
561{
562#define ErrMask	(OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
563
564	if ((status & CRCOK) && !(status & ErrMask))
565		return 0;
566
567	if (!(status & CRCOK))
568		stats->rx_crc_errors++;
569	else if (status & OVRUN)
570		stats->rx_over_errors++;
571	else if (status & (SHORT | LIMIT))
572		stats->rx_length_errors++;
573	else if (status & (MIIER | NIBON | COLON))
574		stats->rx_frame_errors++;
575
576	stats->rx_errors++;
577	return -1;
578}
579
580static int sis190_rx_interrupt(struct net_device *dev,
581			       struct sis190_private *tp, void __iomem *ioaddr)
582{
583	struct net_device_stats *stats = &tp->stats;
584	u32 rx_left, cur_rx = tp->cur_rx;
585	u32 delta, count;
586
587	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
588	rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
589
590	for (; rx_left > 0; rx_left--, cur_rx++) {
591		unsigned int entry = cur_rx % NUM_RX_DESC;
592		struct RxDesc *desc = tp->RxDescRing + entry;
593		u32 status;
594
595		if (desc->status & OWNbit)
596			break;
597
598		status = le32_to_cpu(desc->PSize);
599
600		// net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
601		//	 status);
602
603		if (sis190_rx_pkt_err(status, stats) < 0)
604			sis190_give_to_asic(desc, tp->rx_buf_sz);
605		else {
606			struct sk_buff *skb = tp->Rx_skbuff[entry];
607			int pkt_size = (status & RxSizeMask) - 4;
608			void (*pci_action)(struct pci_dev *, dma_addr_t,
609				size_t, int) = pci_dma_sync_single_for_device;
610
611			if (unlikely(pkt_size > tp->rx_buf_sz)) {
612				net_intr(tp, KERN_INFO
613					 "%s: (frag) status = %08x.\n",
614					 dev->name, status);
615				stats->rx_dropped++;
616				stats->rx_length_errors++;
617				sis190_give_to_asic(desc, tp->rx_buf_sz);
618				continue;
619			}
620
621			pci_dma_sync_single_for_cpu(tp->pci_dev,
622				le32_to_cpu(desc->addr), tp->rx_buf_sz,
623				PCI_DMA_FROMDEVICE);
624
625			if (sis190_try_rx_copy(&skb, pkt_size, desc,
626					       tp->rx_buf_sz)) {
627				pci_action = pci_unmap_single;
628				tp->Rx_skbuff[entry] = NULL;
629				sis190_make_unusable_by_asic(desc);
630			}
631
632			pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
633				   tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
634
635			skb_put(skb, pkt_size);
636			skb->protocol = eth_type_trans(skb, dev);
637
638			sis190_rx_skb(skb);
639
640			dev->last_rx = jiffies;
641			stats->rx_packets++;
642			stats->rx_bytes += pkt_size;
643			if ((status & BCAST) == MCAST)
644				stats->multicast++;
645		}
646	}
647	count = cur_rx - tp->cur_rx;
648	tp->cur_rx = cur_rx;
649
650	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
651	if (!delta && count && netif_msg_intr(tp))
652		printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
653	tp->dirty_rx += delta;
654
655	if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
656		printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
657
658	return count;
659}
660
661static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
662				struct TxDesc *desc)
663{
664	unsigned int len;
665
666	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
667
668	pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
669
670	memset(desc, 0x00, sizeof(*desc));
671}
672
673static void sis190_tx_interrupt(struct net_device *dev,
674				struct sis190_private *tp, void __iomem *ioaddr)
675{
676	u32 pending, dirty_tx = tp->dirty_tx;
677	/*
678	 * It would not be needed if queueing was allowed to be enabled
679	 * again too early (hint: think preempt and unclocked smp systems).
680	 */
681	unsigned int queue_stopped;
682
683	smp_rmb();
684	pending = tp->cur_tx - dirty_tx;
685	queue_stopped = (pending == NUM_TX_DESC);
686
687	for (; pending; pending--, dirty_tx++) {
688		unsigned int entry = dirty_tx % NUM_TX_DESC;
689		struct TxDesc *txd = tp->TxDescRing + entry;
690		struct sk_buff *skb;
691
692		if (le32_to_cpu(txd->status) & OWNbit)
693			break;
694
695		skb = tp->Tx_skbuff[entry];
696
697		tp->stats.tx_packets++;
698		tp->stats.tx_bytes += skb->len;
699
700		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
701		tp->Tx_skbuff[entry] = NULL;
702		dev_kfree_skb_irq(skb);
703	}
704
705	if (tp->dirty_tx != dirty_tx) {
706		tp->dirty_tx = dirty_tx;
707		smp_wmb();
708		if (queue_stopped)
709			netif_wake_queue(dev);
710	}
711}
712
713/*
714 * The interrupt handler does all of the Rx thread work and cleans up after
715 * the Tx thread.
716 */
717static irqreturn_t sis190_interrupt(int irq, void *__dev)
718{
719	struct net_device *dev = __dev;
720	struct sis190_private *tp = netdev_priv(dev);
721	void __iomem *ioaddr = tp->mmio_addr;
722	unsigned int handled = 0;
723	u32 status;
724
725	status = SIS_R32(IntrStatus);
726
727	if ((status == 0xffffffff) || !status)
728		goto out;
729
730	handled = 1;
731
732	if (unlikely(!netif_running(dev))) {
733		sis190_asic_down(ioaddr);
734		goto out;
735	}
736
737	SIS_W32(IntrStatus, status);
738
739	// net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
740
741	if (status & LinkChange) {
742		net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
743		schedule_work(&tp->phy_task);
744	}
745
746	if (status & RxQInt)
747		sis190_rx_interrupt(dev, tp, ioaddr);
748
749	if (status & TxQ0Int)
750		sis190_tx_interrupt(dev, tp, ioaddr);
751out:
752	return IRQ_RETVAL(handled);
753}
754
755#ifdef CONFIG_NET_POLL_CONTROLLER
756static void sis190_netpoll(struct net_device *dev)
757{
758	struct sis190_private *tp = netdev_priv(dev);
759	struct pci_dev *pdev = tp->pci_dev;
760
761	disable_irq(pdev->irq);
762	sis190_interrupt(pdev->irq, dev);
763	enable_irq(pdev->irq);
764}
765#endif
766
767static void sis190_free_rx_skb(struct sis190_private *tp,
768			       struct sk_buff **sk_buff, struct RxDesc *desc)
769{
770	struct pci_dev *pdev = tp->pci_dev;
771
772	pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
773			 PCI_DMA_FROMDEVICE);
774	dev_kfree_skb(*sk_buff);
775	*sk_buff = NULL;
776	sis190_make_unusable_by_asic(desc);
777}
778
779static void sis190_rx_clear(struct sis190_private *tp)
780{
781	unsigned int i;
782
783	for (i = 0; i < NUM_RX_DESC; i++) {
784		if (!tp->Rx_skbuff[i])
785			continue;
786		sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
787	}
788}
789
790static void sis190_init_ring_indexes(struct sis190_private *tp)
791{
792	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
793}
794
795static int sis190_init_ring(struct net_device *dev)
796{
797	struct sis190_private *tp = netdev_priv(dev);
798
799	sis190_init_ring_indexes(tp);
800
801	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
802	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
803
804	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
805		goto err_rx_clear;
806
807	sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
808
809	return 0;
810
811err_rx_clear:
812	sis190_rx_clear(tp);
813	return -ENOMEM;
814}
815
816static void sis190_set_rx_mode(struct net_device *dev)
817{
818	struct sis190_private *tp = netdev_priv(dev);
819	void __iomem *ioaddr = tp->mmio_addr;
820	unsigned long flags;
821	u32 mc_filter[2];	/* Multicast hash filter */
822	u16 rx_mode;
823
824	if (dev->flags & IFF_PROMISC) {
825		rx_mode =
826			AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
827			AcceptAllPhys;
828		mc_filter[1] = mc_filter[0] = 0xffffffff;
829	} else if ((dev->mc_count > multicast_filter_limit) ||
830		   (dev->flags & IFF_ALLMULTI)) {
831		/* Too many to filter perfectly -- accept all multicasts. */
832		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
833		mc_filter[1] = mc_filter[0] = 0xffffffff;
834	} else {
835		struct dev_mc_list *mclist;
836		unsigned int i;
837
838		rx_mode = AcceptBroadcast | AcceptMyPhys;
839		mc_filter[1] = mc_filter[0] = 0;
840		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
841		     i++, mclist = mclist->next) {
842			int bit_nr =
843				ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
844			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
845			rx_mode |= AcceptMulticast;
846		}
847	}
848
849	spin_lock_irqsave(&tp->lock, flags);
850
851	SIS_W16(RxMacControl, rx_mode | 0x2);
852	SIS_W32(RxHashTable, mc_filter[0]);
853	SIS_W32(RxHashTable + 4, mc_filter[1]);
854
855	spin_unlock_irqrestore(&tp->lock, flags);
856}
857
858static void sis190_soft_reset(void __iomem *ioaddr)
859{
860	SIS_W32(IntrControl, 0x8000);
861	SIS_PCI_COMMIT();
862	msleep(1);
863	SIS_W32(IntrControl, 0x0);
864	sis190_asic_down(ioaddr);
865	msleep(1);
866}
867
868static void sis190_hw_start(struct net_device *dev)
869{
870	struct sis190_private *tp = netdev_priv(dev);
871	void __iomem *ioaddr = tp->mmio_addr;
872
873	sis190_soft_reset(ioaddr);
874
875	SIS_W32(TxDescStartAddr, tp->tx_dma);
876	SIS_W32(RxDescStartAddr, tp->rx_dma);
877
878	SIS_W32(IntrStatus, 0xffffffff);
879	SIS_W32(IntrMask, 0x0);
880	SIS_W32(GMIIControl, 0x0);
881	SIS_W32(TxMacControl, 0x60);
882	SIS_W16(RxMacControl, 0x02);
883	SIS_W32(RxHashTable, 0x0);
884	SIS_W32(0x6c, 0x0);
885	SIS_W32(RxWolCtrl, 0x0);
886	SIS_W32(RxWolData, 0x0);
887
888	SIS_PCI_COMMIT();
889
890	sis190_set_rx_mode(dev);
891
892	/* Enable all known interrupts by setting the interrupt mask. */
893	SIS_W32(IntrMask, sis190_intr_mask);
894
895	SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
896	SIS_W32(RxControl, 0x1a1d);
897
898	netif_start_queue(dev);
899}
900
901static void sis190_phy_task(struct work_struct *work)
902{
903	struct sis190_private *tp =
904		container_of(work, struct sis190_private, phy_task);
905	struct net_device *dev = tp->dev;
906	void __iomem *ioaddr = tp->mmio_addr;
907	int phy_id = tp->mii_if.phy_id;
908	u16 val;
909
910	rtnl_lock();
911
912	if (!netif_running(dev))
913		goto out_unlock;
914
915	val = mdio_read(ioaddr, phy_id, MII_BMCR);
916	if (val & BMCR_RESET) {
917		mod_timer(&tp->timer, jiffies + HZ/10);
918	} else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
919		     BMSR_ANEGCOMPLETE)) {
920		net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
921			 dev->name);
922		netif_carrier_off(dev);
923		mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
924		mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
925	} else {
926		/* Rejoice ! */
927		struct {
928			int val;
929			u32 ctl;
930			const char *msg;
931		} reg31[] = {
932			{ LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
933				"1000 Mbps Full Duplex" },
934			{ LPA_1000XHALF | LPA_SLCT, 0x07000c00,
935				"1000 Mbps Half Duplex" },
936			{ LPA_100FULL, 0x04000800 | 0x00001000,
937				"100 Mbps Full Duplex" },
938			{ LPA_100HALF, 0x04000800,
939				"100 Mbps Half Duplex" },
940			{ LPA_10FULL, 0x04000400 | 0x00001000,
941				"10 Mbps Full Duplex" },
942			{ LPA_10HALF, 0x04000400,
943				"10 Mbps Half Duplex" },
944			{ 0, 0x04000400, "unknown" }
945 		}, *p;
946		u16 adv;
947
948		val = mdio_read(ioaddr, phy_id, 0x1f);
949		net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
950
951		val = mdio_read(ioaddr, phy_id, MII_LPA);
952		adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
953		net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
954			 dev->name, val, adv);
955
956		val &= adv;
957
958		for (p = reg31; p->val; p++) {
959			if ((val & p->val) == p->val)
960				break;
961		}
962
963		p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
964
965		if ((tp->features & F_HAS_RGMII) &&
966		    (tp->features & F_PHY_BCM5461)) {
967			// Set Tx Delay in RGMII mode.
968			mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
969			udelay(200);
970			mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
971			p->ctl |= 0x03000000;
972		}
973
974		SIS_W32(StationControl, p->ctl);
975
976		if (tp->features & F_HAS_RGMII) {
977			SIS_W32(RGDelay, 0x0441);
978			SIS_W32(RGDelay, 0x0440);
979		}
980
981		net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
982			 p->msg);
983		netif_carrier_on(dev);
984	}
985
986out_unlock:
987	rtnl_unlock();
988}
989
990static void sis190_phy_timer(unsigned long __opaque)
991{
992	struct net_device *dev = (struct net_device *)__opaque;
993	struct sis190_private *tp = netdev_priv(dev);
994
995	if (likely(netif_running(dev)))
996		schedule_work(&tp->phy_task);
997}
998
999static inline void sis190_delete_timer(struct net_device *dev)
1000{
1001	struct sis190_private *tp = netdev_priv(dev);
1002
1003	del_timer_sync(&tp->timer);
1004}
1005
1006static inline void sis190_request_timer(struct net_device *dev)
1007{
1008	struct sis190_private *tp = netdev_priv(dev);
1009	struct timer_list *timer = &tp->timer;
1010
1011	init_timer(timer);
1012	timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1013	timer->data = (unsigned long)dev;
1014	timer->function = sis190_phy_timer;
1015	add_timer(timer);
1016}
1017
1018static void sis190_set_rxbufsize(struct sis190_private *tp,
1019				 struct net_device *dev)
1020{
1021	unsigned int mtu = dev->mtu;
1022
1023	tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1024	/* RxDesc->size has a licence to kill the lower bits */
1025	if (tp->rx_buf_sz & 0x07) {
1026		tp->rx_buf_sz += 8;
1027		tp->rx_buf_sz &= RX_BUF_MASK;
1028	}
1029}
1030
1031static int sis190_open(struct net_device *dev)
1032{
1033	struct sis190_private *tp = netdev_priv(dev);
1034	struct pci_dev *pdev = tp->pci_dev;
1035	int rc = -ENOMEM;
1036
1037	sis190_set_rxbufsize(tp, dev);
1038
1039	/*
1040	 * Rx and Tx descriptors need 256 bytes alignment.
1041	 * pci_alloc_consistent() guarantees a stronger alignment.
1042	 */
1043	tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1044	if (!tp->TxDescRing)
1045		goto out;
1046
1047	tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1048	if (!tp->RxDescRing)
1049		goto err_free_tx_0;
1050
1051	rc = sis190_init_ring(dev);
1052	if (rc < 0)
1053		goto err_free_rx_1;
1054
1055	INIT_WORK(&tp->phy_task, sis190_phy_task);
1056
1057	sis190_request_timer(dev);
1058
1059	rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1060	if (rc < 0)
1061		goto err_release_timer_2;
1062
1063	sis190_hw_start(dev);
1064out:
1065	return rc;
1066
1067err_release_timer_2:
1068	sis190_delete_timer(dev);
1069	sis190_rx_clear(tp);
1070err_free_rx_1:
1071	pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1072		tp->rx_dma);
1073err_free_tx_0:
1074	pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1075		tp->tx_dma);
1076	goto out;
1077}
1078
1079static void sis190_tx_clear(struct sis190_private *tp)
1080{
1081	unsigned int i;
1082
1083	for (i = 0; i < NUM_TX_DESC; i++) {
1084		struct sk_buff *skb = tp->Tx_skbuff[i];
1085
1086		if (!skb)
1087			continue;
1088
1089		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1090		tp->Tx_skbuff[i] = NULL;
1091		dev_kfree_skb(skb);
1092
1093		tp->stats.tx_dropped++;
1094	}
1095	tp->cur_tx = tp->dirty_tx = 0;
1096}
1097
1098static void sis190_down(struct net_device *dev)
1099{
1100	struct sis190_private *tp = netdev_priv(dev);
1101	void __iomem *ioaddr = tp->mmio_addr;
1102	unsigned int poll_locked = 0;
1103
1104	sis190_delete_timer(dev);
1105
1106	netif_stop_queue(dev);
1107
1108	do {
1109		spin_lock_irq(&tp->lock);
1110
1111		sis190_asic_down(ioaddr);
1112
1113		spin_unlock_irq(&tp->lock);
1114
1115		synchronize_irq(dev->irq);
1116
1117		if (!poll_locked) {
1118			netif_poll_disable(dev);
1119			poll_locked++;
1120		}
1121
1122		synchronize_sched();
1123
1124	} while (SIS_R32(IntrMask));
1125
1126	sis190_tx_clear(tp);
1127	sis190_rx_clear(tp);
1128}
1129
1130static int sis190_close(struct net_device *dev)
1131{
1132	struct sis190_private *tp = netdev_priv(dev);
1133	struct pci_dev *pdev = tp->pci_dev;
1134
1135	sis190_down(dev);
1136
1137	free_irq(dev->irq, dev);
1138
1139	netif_poll_enable(dev);
1140
1141	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1142	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1143
1144	tp->TxDescRing = NULL;
1145	tp->RxDescRing = NULL;
1146
1147	return 0;
1148}
1149
1150static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1151{
1152	struct sis190_private *tp = netdev_priv(dev);
1153	void __iomem *ioaddr = tp->mmio_addr;
1154	u32 len, entry, dirty_tx;
1155	struct TxDesc *desc;
1156	dma_addr_t mapping;
1157
1158	if (unlikely(skb->len < ETH_ZLEN)) {
1159		if (skb_padto(skb, ETH_ZLEN)) {
1160			tp->stats.tx_dropped++;
1161			goto out;
1162		}
1163		len = ETH_ZLEN;
1164	} else {
1165		len = skb->len;
1166	}
1167
1168	entry = tp->cur_tx % NUM_TX_DESC;
1169	desc = tp->TxDescRing + entry;
1170
1171	if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1172		netif_stop_queue(dev);
1173		net_tx_err(tp, KERN_ERR PFX
1174			   "%s: BUG! Tx Ring full when queue awake!\n",
1175			   dev->name);
1176		return NETDEV_TX_BUSY;
1177	}
1178
1179	mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1180
1181	tp->Tx_skbuff[entry] = skb;
1182
1183	desc->PSize = cpu_to_le32(len);
1184	desc->addr = cpu_to_le32(mapping);
1185
1186	desc->size = cpu_to_le32(len);
1187	if (entry == (NUM_TX_DESC - 1))
1188		desc->size |= cpu_to_le32(RingEnd);
1189
1190	wmb();
1191
1192	desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1193
1194	tp->cur_tx++;
1195
1196	smp_wmb();
1197
1198	SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1199
1200	dev->trans_start = jiffies;
1201
1202	dirty_tx = tp->dirty_tx;
1203	if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1204		netif_stop_queue(dev);
1205		smp_rmb();
1206		if (dirty_tx != tp->dirty_tx)
1207			netif_wake_queue(dev);
1208	}
1209out:
1210	return NETDEV_TX_OK;
1211}
1212
1213static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1214{
1215	struct sis190_private *tp = netdev_priv(dev);
1216
1217	return &tp->stats;
1218}
1219
1220static void sis190_free_phy(struct list_head *first_phy)
1221{
1222	struct sis190_phy *cur, *next;
1223
1224	list_for_each_entry_safe(cur, next, first_phy, list) {
1225		kfree(cur);
1226	}
1227}
1228
1229/**
1230 *	sis190_default_phy - Select default PHY for sis190 mac.
1231 *	@dev: the net device to probe for
1232 *
1233 *	Select first detected PHY with link as default.
1234 *	If no one is link on, select PHY whose types is HOME as default.
1235 *	If HOME doesn't exist, select LAN.
1236 */
1237static u16 sis190_default_phy(struct net_device *dev)
1238{
1239	struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1240	struct sis190_private *tp = netdev_priv(dev);
1241	struct mii_if_info *mii_if = &tp->mii_if;
1242	void __iomem *ioaddr = tp->mmio_addr;
1243	u16 status;
1244
1245	phy_home = phy_default = phy_lan = NULL;
1246
1247	list_for_each_entry(phy, &tp->first_phy, list) {
1248		status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1249
1250		// Link ON & Not select default PHY & not ghost PHY.
1251		if ((status & BMSR_LSTATUS) &&
1252		    !phy_default &&
1253		    (phy->type != UNKNOWN)) {
1254			phy_default = phy;
1255		} else {
1256			status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1257			mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1258				   status | BMCR_ANENABLE | BMCR_ISOLATE);
1259			if (phy->type == HOME)
1260				phy_home = phy;
1261			else if (phy->type == LAN)
1262				phy_lan = phy;
1263		}
1264	}
1265
1266	if (!phy_default) {
1267		if (phy_home)
1268			phy_default = phy_home;
1269		else if (phy_lan)
1270			phy_default = phy_lan;
1271		else
1272			phy_default = list_entry(&tp->first_phy,
1273						 struct sis190_phy, list);
1274	}
1275
1276	if (mii_if->phy_id != phy_default->phy_id) {
1277		mii_if->phy_id = phy_default->phy_id;
1278		net_probe(tp, KERN_INFO
1279		       "%s: Using transceiver at address %d as default.\n",
1280		       pci_name(tp->pci_dev), mii_if->phy_id);
1281	}
1282
1283	status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1284	status &= (~BMCR_ISOLATE);
1285
1286	mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1287	status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1288
1289	return status;
1290}
1291
1292static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1293			    struct sis190_phy *phy, unsigned int phy_id,
1294			    u16 mii_status)
1295{
1296	void __iomem *ioaddr = tp->mmio_addr;
1297	struct mii_chip_info *p;
1298
1299	INIT_LIST_HEAD(&phy->list);
1300	phy->status = mii_status;
1301	phy->phy_id = phy_id;
1302
1303	phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1304	phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1305
1306	for (p = mii_chip_table; p->type; p++) {
1307		if ((p->id[0] == phy->id[0]) &&
1308		    (p->id[1] == (phy->id[1] & 0xfff0))) {
1309			break;
1310		}
1311	}
1312
1313	if (p->id[1]) {
1314		phy->type = (p->type == MIX) ?
1315			((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1316				LAN : HOME) : p->type;
1317		tp->features |= p->feature;
1318	} else
1319		phy->type = UNKNOWN;
1320
1321	net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1322		  pci_name(tp->pci_dev),
1323		  (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1324}
1325
1326static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1327{
1328	if (tp->features & F_PHY_88E1111) {
1329		void __iomem *ioaddr = tp->mmio_addr;
1330		int phy_id = tp->mii_if.phy_id;
1331		u16 reg[2][2] = {
1332			{ 0x808b, 0x0ce1 },
1333			{ 0x808f, 0x0c60 }
1334		}, *p;
1335
1336		p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1337
1338		mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1339		udelay(200);
1340		mdio_write(ioaddr, phy_id, 0x14, p[1]);
1341		udelay(200);
1342	}
1343}
1344
1345/**
1346 *	sis190_mii_probe - Probe MII PHY for sis190
1347 *	@dev: the net device to probe for
1348 *
1349 *	Search for total of 32 possible mii phy addresses.
1350 *	Identify and set current phy if found one,
1351 *	return error if it failed to found.
1352 */
1353static int __devinit sis190_mii_probe(struct net_device *dev)
1354{
1355	struct sis190_private *tp = netdev_priv(dev);
1356	struct mii_if_info *mii_if = &tp->mii_if;
1357	void __iomem *ioaddr = tp->mmio_addr;
1358	int phy_id;
1359	int rc = 0;
1360
1361	INIT_LIST_HEAD(&tp->first_phy);
1362
1363	for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1364		struct sis190_phy *phy;
1365		u16 status;
1366
1367		status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1368
1369		// Try next mii if the current one is not accessible.
1370		if (status == 0xffff || status == 0x0000)
1371			continue;
1372
1373		phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1374		if (!phy) {
1375			sis190_free_phy(&tp->first_phy);
1376			rc = -ENOMEM;
1377			goto out;
1378		}
1379
1380		sis190_init_phy(dev, tp, phy, phy_id, status);
1381
1382		list_add(&tp->first_phy, &phy->list);
1383	}
1384
1385	if (list_empty(&tp->first_phy)) {
1386		net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1387			  pci_name(tp->pci_dev));
1388		rc = -EIO;
1389		goto out;
1390	}
1391
1392	/* Select default PHY for mac */
1393	sis190_default_phy(dev);
1394
1395	sis190_mii_probe_88e1111_fixup(tp);
1396
1397	mii_if->dev = dev;
1398	mii_if->mdio_read = __mdio_read;
1399	mii_if->mdio_write = __mdio_write;
1400	mii_if->phy_id_mask = PHY_ID_ANY;
1401	mii_if->reg_num_mask = MII_REG_ANY;
1402out:
1403	return rc;
1404}
1405
1406static void __devexit sis190_mii_remove(struct net_device *dev)
1407{
1408	struct sis190_private *tp = netdev_priv(dev);
1409
1410	sis190_free_phy(&tp->first_phy);
1411}
1412
1413static void sis190_release_board(struct pci_dev *pdev)
1414{
1415	struct net_device *dev = pci_get_drvdata(pdev);
1416	struct sis190_private *tp = netdev_priv(dev);
1417
1418	iounmap(tp->mmio_addr);
1419	pci_release_regions(pdev);
1420	pci_disable_device(pdev);
1421	free_netdev(dev);
1422}
1423
1424static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1425{
1426	struct sis190_private *tp;
1427	struct net_device *dev;
1428	void __iomem *ioaddr;
1429	int rc;
1430
1431	dev = alloc_etherdev(sizeof(*tp));
1432	if (!dev) {
1433		net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1434		rc = -ENOMEM;
1435		goto err_out_0;
1436	}
1437
1438	SET_MODULE_OWNER(dev);
1439	SET_NETDEV_DEV(dev, &pdev->dev);
1440
1441	tp = netdev_priv(dev);
1442	tp->dev = dev;
1443	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1444
1445	rc = pci_enable_device(pdev);
1446	if (rc < 0) {
1447		net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1448		goto err_free_dev_1;
1449	}
1450
1451	rc = -ENODEV;
1452
1453	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1454		net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1455			  pci_name(pdev));
1456		goto err_pci_disable_2;
1457	}
1458	if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1459		net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1460			  pci_name(pdev));
1461		goto err_pci_disable_2;
1462	}
1463
1464	rc = pci_request_regions(pdev, DRV_NAME);
1465	if (rc < 0) {
1466		net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1467			  pci_name(pdev));
1468		goto err_pci_disable_2;
1469	}
1470
1471	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1472	if (rc < 0) {
1473		net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1474			  pci_name(pdev));
1475		goto err_free_res_3;
1476	}
1477
1478	pci_set_master(pdev);
1479
1480	ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1481	if (!ioaddr) {
1482		net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1483			  pci_name(pdev));
1484		rc = -EIO;
1485		goto err_free_res_3;
1486	}
1487
1488	tp->pci_dev = pdev;
1489	tp->mmio_addr = ioaddr;
1490
1491	sis190_irq_mask_and_ack(ioaddr);
1492
1493	sis190_soft_reset(ioaddr);
1494out:
1495	return dev;
1496
1497err_free_res_3:
1498	pci_release_regions(pdev);
1499err_pci_disable_2:
1500	pci_disable_device(pdev);
1501err_free_dev_1:
1502	free_netdev(dev);
1503err_out_0:
1504	dev = ERR_PTR(rc);
1505	goto out;
1506}
1507
1508static void sis190_tx_timeout(struct net_device *dev)
1509{
1510	struct sis190_private *tp = netdev_priv(dev);
1511	void __iomem *ioaddr = tp->mmio_addr;
1512	u8 tmp8;
1513
1514	/* Disable Tx, if not already */
1515	tmp8 = SIS_R8(TxControl);
1516	if (tmp8 & CmdTxEnb)
1517		SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1518
1519
1520	net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1521		   dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1522
1523	/* Disable interrupts by clearing the interrupt mask. */
1524	SIS_W32(IntrMask, 0x0000);
1525
1526	/* Stop a shared interrupt from scavenging while we are. */
1527	spin_lock_irq(&tp->lock);
1528	sis190_tx_clear(tp);
1529	spin_unlock_irq(&tp->lock);
1530
1531	/* ...and finally, reset everything. */
1532	sis190_hw_start(dev);
1533
1534	netif_wake_queue(dev);
1535}
1536
1537static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1538{
1539	tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1540}
1541
1542static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1543						     struct net_device *dev)
1544{
1545	struct sis190_private *tp = netdev_priv(dev);
1546	void __iomem *ioaddr = tp->mmio_addr;
1547	u16 sig;
1548	int i;
1549
1550	net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1551		  pci_name(pdev));
1552
1553	/* Check to see if there is a sane EEPROM */
1554	sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1555
1556	if ((sig == 0xffff) || (sig == 0x0000)) {
1557		net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1558			  pci_name(pdev), sig);
1559		return -EIO;
1560	}
1561
1562	/* Get MAC address from EEPROM */
1563	for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1564		__le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1565
1566		((u16 *)dev->dev_addr)[i] = le16_to_cpu(w);
1567	}
1568
1569	sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1570
1571	return 0;
1572}
1573
1574/**
1575 *	sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1576 *	@pdev: PCI device
1577 *	@dev:  network device to get address for
1578 *
1579 *	SiS965 model, use APC CMOS RAM to store MAC address.
1580 *	APC CMOS RAM is accessed through ISA bridge.
1581 *	MAC address is read into @net_dev->dev_addr.
1582 */
1583static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1584						  struct net_device *dev)
1585{
1586	struct sis190_private *tp = netdev_priv(dev);
1587	struct pci_dev *isa_bridge;
1588	u8 reg, tmp8;
1589	int i;
1590
1591	net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1592		  pci_name(pdev));
1593
1594	isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1595	if (!isa_bridge) {
1596		net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1597			  pci_name(pdev));
1598		return -EIO;
1599	}
1600
1601	/* Enable port 78h & 79h to access APC Registers. */
1602	pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1603	reg = (tmp8 & ~0x02);
1604	pci_write_config_byte(isa_bridge, 0x48, reg);
1605	udelay(50);
1606	pci_read_config_byte(isa_bridge, 0x48, &reg);
1607
1608        for (i = 0; i < MAC_ADDR_LEN; i++) {
1609                outb(0x9 + i, 0x78);
1610                dev->dev_addr[i] = inb(0x79);
1611        }
1612
1613	outb(0x12, 0x78);
1614	reg = inb(0x79);
1615
1616	sis190_set_rgmii(tp, reg);
1617
1618	/* Restore the value to ISA Bridge */
1619	pci_write_config_byte(isa_bridge, 0x48, tmp8);
1620	pci_dev_put(isa_bridge);
1621
1622	return 0;
1623}
1624
1625/**
1626 *      sis190_init_rxfilter - Initialize the Rx filter
1627 *      @dev: network device to initialize
1628 *
1629 *      Set receive filter address to our MAC address
1630 *      and enable packet filtering.
1631 */
1632static inline void sis190_init_rxfilter(struct net_device *dev)
1633{
1634	struct sis190_private *tp = netdev_priv(dev);
1635	void __iomem *ioaddr = tp->mmio_addr;
1636	u16 ctl;
1637	int i;
1638
1639	ctl = SIS_R16(RxMacControl);
1640	/*
1641	 * Disable packet filtering before setting filter.
1642	 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1643	 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1644	 */
1645	SIS_W16(RxMacControl, ctl & ~0x0f00);
1646
1647	for (i = 0; i < MAC_ADDR_LEN; i++)
1648		SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1649
1650	SIS_W16(RxMacControl, ctl);
1651	SIS_PCI_COMMIT();
1652}
1653
1654static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1655{
1656	u8 from;
1657
1658	pci_read_config_byte(pdev, 0x73, &from);
1659
1660	return (from & 0x00000001) ?
1661		sis190_get_mac_addr_from_apc(pdev, dev) :
1662		sis190_get_mac_addr_from_eeprom(pdev, dev);
1663}
1664
1665static void sis190_set_speed_auto(struct net_device *dev)
1666{
1667	struct sis190_private *tp = netdev_priv(dev);
1668	void __iomem *ioaddr = tp->mmio_addr;
1669	int phy_id = tp->mii_if.phy_id;
1670	int val;
1671
1672	net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1673
1674	val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1675
1676	// Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1677	// unchanged.
1678	mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1679		   ADVERTISE_100FULL | ADVERTISE_10FULL |
1680		   ADVERTISE_100HALF | ADVERTISE_10HALF);
1681
1682	// Enable 1000 Full Mode.
1683	mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1684
1685	// Enable auto-negotiation and restart auto-negotiation.
1686	mdio_write(ioaddr, phy_id, MII_BMCR,
1687		   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1688}
1689
1690static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1691{
1692	struct sis190_private *tp = netdev_priv(dev);
1693
1694	return mii_ethtool_gset(&tp->mii_if, cmd);
1695}
1696
1697static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1698{
1699	struct sis190_private *tp = netdev_priv(dev);
1700
1701	return mii_ethtool_sset(&tp->mii_if, cmd);
1702}
1703
1704static void sis190_get_drvinfo(struct net_device *dev,
1705			       struct ethtool_drvinfo *info)
1706{
1707	struct sis190_private *tp = netdev_priv(dev);
1708
1709	strcpy(info->driver, DRV_NAME);
1710	strcpy(info->version, DRV_VERSION);
1711	strcpy(info->bus_info, pci_name(tp->pci_dev));
1712}
1713
1714static int sis190_get_regs_len(struct net_device *dev)
1715{
1716	return SIS190_REGS_SIZE;
1717}
1718
1719static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1720			    void *p)
1721{
1722	struct sis190_private *tp = netdev_priv(dev);
1723	unsigned long flags;
1724
1725	if (regs->len > SIS190_REGS_SIZE)
1726		regs->len = SIS190_REGS_SIZE;
1727
1728	spin_lock_irqsave(&tp->lock, flags);
1729	memcpy_fromio(p, tp->mmio_addr, regs->len);
1730	spin_unlock_irqrestore(&tp->lock, flags);
1731}
1732
1733static int sis190_nway_reset(struct net_device *dev)
1734{
1735	struct sis190_private *tp = netdev_priv(dev);
1736
1737	return mii_nway_restart(&tp->mii_if);
1738}
1739
1740static u32 sis190_get_msglevel(struct net_device *dev)
1741{
1742	struct sis190_private *tp = netdev_priv(dev);
1743
1744	return tp->msg_enable;
1745}
1746
1747static void sis190_set_msglevel(struct net_device *dev, u32 value)
1748{
1749	struct sis190_private *tp = netdev_priv(dev);
1750
1751	tp->msg_enable = value;
1752}
1753
1754static const struct ethtool_ops sis190_ethtool_ops = {
1755	.get_settings	= sis190_get_settings,
1756	.set_settings	= sis190_set_settings,
1757	.get_drvinfo	= sis190_get_drvinfo,
1758	.get_regs_len	= sis190_get_regs_len,
1759	.get_regs	= sis190_get_regs,
1760	.get_link	= ethtool_op_get_link,
1761	.get_msglevel	= sis190_get_msglevel,
1762	.set_msglevel	= sis190_set_msglevel,
1763	.nway_reset	= sis190_nway_reset,
1764};
1765
1766static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1767{
1768	struct sis190_private *tp = netdev_priv(dev);
1769
1770	return !netif_running(dev) ? -EINVAL :
1771		generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1772}
1773
1774static int __devinit sis190_init_one(struct pci_dev *pdev,
1775				     const struct pci_device_id *ent)
1776{
1777	static int printed_version = 0;
1778	struct sis190_private *tp;
1779	struct net_device *dev;
1780	void __iomem *ioaddr;
1781	int rc;
1782
1783	if (!printed_version) {
1784		net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1785		printed_version = 1;
1786	}
1787
1788	dev = sis190_init_board(pdev);
1789	if (IS_ERR(dev)) {
1790		rc = PTR_ERR(dev);
1791		goto out;
1792	}
1793
1794	pci_set_drvdata(pdev, dev);
1795
1796	tp = netdev_priv(dev);
1797	ioaddr = tp->mmio_addr;
1798
1799	rc = sis190_get_mac_addr(pdev, dev);
1800	if (rc < 0)
1801		goto err_release_board;
1802
1803	sis190_init_rxfilter(dev);
1804
1805	INIT_WORK(&tp->phy_task, sis190_phy_task);
1806
1807	dev->open = sis190_open;
1808	dev->stop = sis190_close;
1809	dev->do_ioctl = sis190_ioctl;
1810	dev->get_stats = sis190_get_stats;
1811	dev->tx_timeout = sis190_tx_timeout;
1812	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1813	dev->hard_start_xmit = sis190_start_xmit;
1814#ifdef CONFIG_NET_POLL_CONTROLLER
1815	dev->poll_controller = sis190_netpoll;
1816#endif
1817	dev->set_multicast_list = sis190_set_rx_mode;
1818	SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1819	dev->irq = pdev->irq;
1820	dev->base_addr = (unsigned long) 0xdead;
1821
1822	spin_lock_init(&tp->lock);
1823
1824	rc = sis190_mii_probe(dev);
1825	if (rc < 0)
1826		goto err_release_board;
1827
1828	rc = register_netdev(dev);
1829	if (rc < 0)
1830		goto err_remove_mii;
1831
1832	net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1833	       "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1834	       pci_name(pdev), sis_chip_info[ent->driver_data].name,
1835	       ioaddr, dev->irq,
1836	       dev->dev_addr[0], dev->dev_addr[1],
1837	       dev->dev_addr[2], dev->dev_addr[3],
1838	       dev->dev_addr[4], dev->dev_addr[5]);
1839
1840	net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1841		  (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1842
1843	netif_carrier_off(dev);
1844
1845	sis190_set_speed_auto(dev);
1846out:
1847	return rc;
1848
1849err_remove_mii:
1850	sis190_mii_remove(dev);
1851err_release_board:
1852	sis190_release_board(pdev);
1853	goto out;
1854}
1855
1856static void __devexit sis190_remove_one(struct pci_dev *pdev)
1857{
1858	struct net_device *dev = pci_get_drvdata(pdev);
1859
1860	sis190_mii_remove(dev);
1861	flush_scheduled_work();
1862	unregister_netdev(dev);
1863	sis190_release_board(pdev);
1864	pci_set_drvdata(pdev, NULL);
1865}
1866
1867static struct pci_driver sis190_pci_driver = {
1868	.name		= DRV_NAME,
1869	.id_table	= sis190_pci_tbl,
1870	.probe		= sis190_init_one,
1871	.remove		= __devexit_p(sis190_remove_one),
1872};
1873
1874static int __init sis190_init_module(void)
1875{
1876	return pci_register_driver(&sis190_pci_driver);
1877}
1878
1879static void __exit sis190_cleanup_module(void)
1880{
1881	pci_unregister_driver(&sis190_pci_driver);
1882}
1883
1884module_init(sis190_init_module);
1885module_exit(sis190_cleanup_module);
1886