• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/tulip/
1/*
2    A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3    ethernet driver for Linux.
4    Copyright (C) 1997  Sten Wang
5
6    This program is free software; you can redistribute it and/or
7    modify it under the terms of the GNU General Public License
8    as published by the Free Software Foundation; either version 2
9    of the License, or (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    DAVICOM Web-Site: www.davicom.com.tw
17
18    Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19    Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21    (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23    Marcelo Tosatti <marcelo@conectiva.com.br> :
24    Made it compile in 2.3 (device to net_device)
25
26    Alan Cox <alan@lxorguk.ukuu.org.uk> :
27    Cleaned up for kernel merge.
28    Removed the back compatibility support
29    Reformatted, fixing spelling etc as I went
30    Removed IRQ 0-15 assumption
31
32    Jeff Garzik <jgarzik@pobox.com> :
33    Updated to use new PCI driver API.
34    Resource usage cleanups.
35    Report driver version to user.
36
37    Tobias Ringstrom <tori@unhappy.mine.nu> :
38    Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
39    Andrew Morton and Frank Davis for the SMP safety fixes.
40
41    Vojtech Pavlik <vojtech@suse.cz> :
42    Cleaned up pointer arithmetics.
43    Fixed a lot of 64bit issues.
44    Cleaned up printk()s a bit.
45    Fixed some obvious big endian problems.
46
47    Tobias Ringstrom <tori@unhappy.mine.nu> :
48    Use time_after for jiffies calculation.  Added ethtool
49    support.  Updated PCI resource allocation.  Do not
50    forget to unmap PCI mapped skbs.
51
52    Alan Cox <alan@lxorguk.ukuu.org.uk>
53    Added new PCI identifiers provided by Clear Zhang at ALi
54    for their 1563 ethernet device.
55
56    TODO
57
58    Check on 64 bit boxes.
59    Check and fix on big endian boxes.
60
61    Test and make sure PCI latency is now correct for all cases.
62*/
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#define DRV_NAME	"dmfe"
67#define DRV_VERSION	"1.36.4"
68#define DRV_RELDATE	"2002-01-17"
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/ptrace.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/ethtool.h>
84#include <linux/skbuff.h>
85#include <linux/delay.h>
86#include <linux/spinlock.h>
87#include <linux/crc32.h>
88#include <linux/bitops.h>
89
90#include <asm/processor.h>
91#include <asm/io.h>
92#include <asm/dma.h>
93#include <asm/uaccess.h>
94#include <asm/irq.h>
95
96#ifdef CONFIG_TULIP_DM910X
97#include <linux/of.h>
98#endif
99
100
101/* Board/System/Debug information/definition ---------------- */
102#define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
103#define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
104#define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
105#define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
106
107#define DM9102_IO_SIZE  0x80
108#define DM9102A_IO_SIZE 0x100
109#define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
110#define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
111#define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
112#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
113#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
114#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
115#define TX_BUF_ALLOC    0x600
116#define RX_ALLOC_SIZE   0x620
117#define DM910X_RESET    1
118#define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
119#define CR6_DEFAULT     0x00080000      /* HD */
120#define CR7_DEFAULT     0x180c1
121#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
122#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
123#define MAX_PACKET_SIZE 1514
124#define DMFE_MAX_MULTICAST 14
125#define RX_COPY_SIZE	100
126#define MAX_CHECK_PACKET 0x8000
127#define DM9801_NOISE_FLOOR 8
128#define DM9802_NOISE_FLOOR 5
129
130#define DMFE_WOL_LINKCHANGE	0x20000000
131#define DMFE_WOL_SAMPLEPACKET	0x10000000
132#define DMFE_WOL_MAGICPACKET	0x08000000
133
134
135#define DMFE_10MHF      0
136#define DMFE_100MHF     1
137#define DMFE_10MFD      4
138#define DMFE_100MFD     5
139#define DMFE_AUTO       8
140#define DMFE_1M_HPNA    0x10
141
142#define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
143#define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
144#define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
145#define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
146#define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
147#define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
148
149#define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
150#define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
151#define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
152
153#define DMFE_DBUG(dbug_now, msg, value)			\
154	do {						\
155		if (dmfe_debug || (dbug_now))		\
156			pr_err("%s %lx\n",		\
157			       (msg), (long) (value));	\
158	} while (0)
159
160#define SHOW_MEDIA_TYPE(mode)				\
161	pr_info("Change Speed to %sMhz %s duplex\n" ,	\
162		(mode & 1) ? "100":"10",		\
163		(mode & 4) ? "full":"half");
164
165
166/* CR9 definition: SROM/MII */
167#define CR9_SROM_READ   0x4800
168#define CR9_SRCS        0x1
169#define CR9_SRCLK       0x2
170#define CR9_CRDOUT      0x8
171#define SROM_DATA_0     0x0
172#define SROM_DATA_1     0x4
173#define PHY_DATA_1      0x20000
174#define PHY_DATA_0      0x00000
175#define MDCLKH          0x10000
176
177#define PHY_POWER_DOWN	0x800
178
179#define SROM_V41_CODE   0x14
180
181#define SROM_CLK_WRITE(data, ioaddr) \
182	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
183	udelay(5); \
184	outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
185	udelay(5); \
186	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
187	udelay(5);
188
189#define __CHK_IO_SIZE(pci_id, dev_rev) \
190 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
191	DM9102A_IO_SIZE: DM9102_IO_SIZE)
192
193#define CHK_IO_SIZE(pci_dev) \
194	(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
195	(pci_dev)->revision))
196
197/* Sten Check */
198#define DEVICE net_device
199
200/* Structure/enum declaration ------------------------------- */
201struct tx_desc {
202        __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
203        char *tx_buf_ptr;               /* Data for us */
204        struct tx_desc *next_tx_desc;
205} __attribute__(( aligned(32) ));
206
207struct rx_desc {
208	__le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
209	struct sk_buff *rx_skb_ptr;	/* Data for us */
210	struct rx_desc *next_rx_desc;
211} __attribute__(( aligned(32) ));
212
213struct dmfe_board_info {
214	u32 chip_id;			/* Chip vendor/Device ID */
215	u8 chip_revision;		/* Chip revision */
216	struct DEVICE *next_dev;	/* next device */
217	struct pci_dev *pdev;		/* PCI device */
218	spinlock_t lock;
219
220	long ioaddr;			/* I/O base address */
221	u32 cr0_data;
222	u32 cr5_data;
223	u32 cr6_data;
224	u32 cr7_data;
225	u32 cr15_data;
226
227	/* pointer for memory physical address */
228	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
229	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
230	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
231	dma_addr_t first_tx_desc_dma;
232	dma_addr_t first_rx_desc_dma;
233
234	/* descriptor pointer */
235	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
236	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
237	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
238	struct tx_desc *first_tx_desc;
239	struct tx_desc *tx_insert_ptr;
240	struct tx_desc *tx_remove_ptr;
241	struct rx_desc *first_rx_desc;
242	struct rx_desc *rx_insert_ptr;
243	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
244	unsigned long tx_packet_cnt;	/* transmitted packet count */
245	unsigned long tx_queue_cnt;	/* wait to send packet count */
246	unsigned long rx_avail_cnt;	/* available rx descriptor count */
247	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
248
249	u16 HPNA_command;		/* For HPNA register 16 */
250	u16 HPNA_timer;			/* For HPNA remote device check */
251	u16 dbug_cnt;
252	u16 NIC_capability;		/* NIC media capability */
253	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
254
255	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
256	u8 chip_type;			/* Keep DM9102A chip type */
257	u8 media_mode;			/* user specify media mode */
258	u8 op_mode;			/* real work media mode */
259	u8 phy_addr;
260	u8 wait_reset;			/* Hardware failed, need to reset */
261	u8 dm910x_chk_mode;		/* Operating mode check */
262	u8 first_in_callback;		/* Flag to record state */
263	u8 wol_mode;			/* user WOL settings */
264	struct timer_list timer;
265
266	/* Driver defined statistic counter */
267	unsigned long tx_fifo_underrun;
268	unsigned long tx_loss_carrier;
269	unsigned long tx_no_carrier;
270	unsigned long tx_late_collision;
271	unsigned long tx_excessive_collision;
272	unsigned long tx_jabber_timeout;
273	unsigned long reset_count;
274	unsigned long reset_cr8;
275	unsigned long reset_fatal;
276	unsigned long reset_TXtimeout;
277
278	/* NIC SROM data */
279	unsigned char srom[128];
280};
281
282enum dmfe_offsets {
283	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
284	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
285	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
286	DCR15 = 0x78
287};
288
289enum dmfe_CR6_bits {
290	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
291	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
292	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
293};
294
295/* Global variable declaration ----------------------------- */
296static int __devinitdata printed_version;
297static const char version[] __devinitconst =
298	KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
299	DRV_VERSION " (" DRV_RELDATE ")\n";
300
301static int dmfe_debug;
302static unsigned char dmfe_media_mode = DMFE_AUTO;
303static u32 dmfe_cr6_user_set;
304
305/* For module input parameter */
306static int debug;
307static u32 cr6set;
308static unsigned char mode = 8;
309static u8 chkmode = 1;
310static u8 HPNA_mode;		/* Default: Low Power/High Speed */
311static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
312static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
313static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
314static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
315				   4: TX pause packet */
316
317
318/* function declaration ------------------------------------- */
319static int dmfe_open(struct DEVICE *);
320static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
321static int dmfe_stop(struct DEVICE *);
322static void dmfe_set_filter_mode(struct DEVICE *);
323static const struct ethtool_ops netdev_ethtool_ops;
324static u16 read_srom_word(long ,int);
325static irqreturn_t dmfe_interrupt(int , void *);
326#ifdef CONFIG_NET_POLL_CONTROLLER
327static void poll_dmfe (struct net_device *dev);
328#endif
329static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
330static void allocate_rx_buffer(struct dmfe_board_info *);
331static void update_cr6(u32, unsigned long);
332static void send_filter_frame(struct DEVICE *);
333static void dm9132_id_table(struct DEVICE *);
334static u16 phy_read(unsigned long, u8, u8, u32);
335static void phy_write(unsigned long, u8, u8, u16, u32);
336static void phy_write_1bit(unsigned long, u32);
337static u16 phy_read_1bit(unsigned long);
338static u8 dmfe_sense_speed(struct dmfe_board_info *);
339static void dmfe_process_mode(struct dmfe_board_info *);
340static void dmfe_timer(unsigned long);
341static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
342static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
343static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
344static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
345static void dmfe_dynamic_reset(struct DEVICE *);
346static void dmfe_free_rxbuffer(struct dmfe_board_info *);
347static void dmfe_init_dm910x(struct DEVICE *);
348static void dmfe_parse_srom(struct dmfe_board_info *);
349static void dmfe_program_DM9801(struct dmfe_board_info *, int);
350static void dmfe_program_DM9802(struct dmfe_board_info *);
351static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
352static void dmfe_set_phyxcer(struct dmfe_board_info *);
353
354/* DM910X network board routine ---------------------------- */
355
356static const struct net_device_ops netdev_ops = {
357	.ndo_open 		= dmfe_open,
358	.ndo_stop		= dmfe_stop,
359	.ndo_start_xmit		= dmfe_start_xmit,
360	.ndo_set_multicast_list = dmfe_set_filter_mode,
361	.ndo_change_mtu		= eth_change_mtu,
362	.ndo_set_mac_address	= eth_mac_addr,
363	.ndo_validate_addr	= eth_validate_addr,
364#ifdef CONFIG_NET_POLL_CONTROLLER
365	.ndo_poll_controller	= poll_dmfe,
366#endif
367};
368
369/*
370 *	Search DM910X board ,allocate space and register it
371 */
372
373static int __devinit dmfe_init_one (struct pci_dev *pdev,
374				    const struct pci_device_id *ent)
375{
376	struct dmfe_board_info *db;	/* board information structure */
377	struct net_device *dev;
378	u32 pci_pmr;
379	int i, err;
380
381	DMFE_DBUG(0, "dmfe_init_one()", 0);
382
383	if (!printed_version++)
384		printk(version);
385
386	/*
387	 *	SPARC on-board DM910x chips should be handled by the main
388	 *	tulip driver, except for early DM9100s.
389	 */
390#ifdef CONFIG_TULIP_DM910X
391	if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
392	    ent->driver_data == PCI_DM9102_ID) {
393		struct device_node *dp = pci_device_to_OF_node(pdev);
394
395		if (dp && of_get_property(dp, "local-mac-address", NULL)) {
396			pr_info("skipping on-board DM910x (use tulip)\n");
397			return -ENODEV;
398		}
399	}
400#endif
401
402	/* Init network device */
403	dev = alloc_etherdev(sizeof(*db));
404	if (dev == NULL)
405		return -ENOMEM;
406	SET_NETDEV_DEV(dev, &pdev->dev);
407
408	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
409		pr_warning("32-bit PCI DMA not available\n");
410		err = -ENODEV;
411		goto err_out_free;
412	}
413
414	/* Enable Master/IO access, Disable memory access */
415	err = pci_enable_device(pdev);
416	if (err)
417		goto err_out_free;
418
419	if (!pci_resource_start(pdev, 0)) {
420		pr_err("I/O base is zero\n");
421		err = -ENODEV;
422		goto err_out_disable;
423	}
424
425	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
426		pr_err("Allocated I/O size too small\n");
427		err = -ENODEV;
428		goto err_out_disable;
429	}
430
431
432	if (pci_request_regions(pdev, DRV_NAME)) {
433		pr_err("Failed to request PCI regions\n");
434		err = -ENODEV;
435		goto err_out_disable;
436	}
437
438	/* Init system & device */
439	db = netdev_priv(dev);
440
441	/* Allocate Tx/Rx descriptor memory */
442	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
443			DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
444	if (!db->desc_pool_ptr)
445		goto err_out_res;
446
447	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
448			TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
449	if (!db->buf_pool_ptr)
450		goto err_out_free_desc;
451
452	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
453	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
454	db->buf_pool_start = db->buf_pool_ptr;
455	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
456
457	db->chip_id = ent->driver_data;
458	db->ioaddr = pci_resource_start(pdev, 0);
459	db->chip_revision = pdev->revision;
460	db->wol_mode = 0;
461
462	db->pdev = pdev;
463
464	dev->base_addr = db->ioaddr;
465	dev->irq = pdev->irq;
466	pci_set_drvdata(pdev, dev);
467	dev->netdev_ops = &netdev_ops;
468	dev->ethtool_ops = &netdev_ethtool_ops;
469	netif_carrier_off(dev);
470	spin_lock_init(&db->lock);
471
472	pci_read_config_dword(pdev, 0x50, &pci_pmr);
473	pci_pmr &= 0x70000;
474	if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
475		db->chip_type = 1;	/* DM9102A E3 */
476	else
477		db->chip_type = 0;
478
479	/* read 64 word srom data */
480	for (i = 0; i < 64; i++)
481		((__le16 *) db->srom)[i] =
482			cpu_to_le16(read_srom_word(db->ioaddr, i));
483
484	/* Set Node address */
485	for (i = 0; i < 6; i++)
486		dev->dev_addr[i] = db->srom[20 + i];
487
488	err = register_netdev (dev);
489	if (err)
490		goto err_out_free_buf;
491
492	dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
493		 ent->driver_data >> 16,
494		 pci_name(pdev), dev->dev_addr, dev->irq);
495
496	pci_set_master(pdev);
497
498	return 0;
499
500err_out_free_buf:
501	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
502			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
503err_out_free_desc:
504	pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
505			    db->desc_pool_ptr, db->desc_pool_dma_ptr);
506err_out_res:
507	pci_release_regions(pdev);
508err_out_disable:
509	pci_disable_device(pdev);
510err_out_free:
511	pci_set_drvdata(pdev, NULL);
512	free_netdev(dev);
513
514	return err;
515}
516
517
518static void __devexit dmfe_remove_one (struct pci_dev *pdev)
519{
520	struct net_device *dev = pci_get_drvdata(pdev);
521	struct dmfe_board_info *db = netdev_priv(dev);
522
523	DMFE_DBUG(0, "dmfe_remove_one()", 0);
524
525 	if (dev) {
526
527		unregister_netdev(dev);
528
529		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
530					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
531 					db->desc_pool_dma_ptr);
532		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
533					db->buf_pool_ptr, db->buf_pool_dma_ptr);
534		pci_release_regions(pdev);
535		free_netdev(dev);	/* free board information */
536
537		pci_set_drvdata(pdev, NULL);
538	}
539
540	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
541}
542
543
544/*
545 *	Open the interface.
546 *	The interface is opened whenever "ifconfig" actives it.
547 */
548
549static int dmfe_open(struct DEVICE *dev)
550{
551	int ret;
552	struct dmfe_board_info *db = netdev_priv(dev);
553
554	DMFE_DBUG(0, "dmfe_open", 0);
555
556	ret = request_irq(dev->irq, dmfe_interrupt,
557			  IRQF_SHARED, dev->name, dev);
558	if (ret)
559		return ret;
560
561	/* system variable init */
562	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
563	db->tx_packet_cnt = 0;
564	db->tx_queue_cnt = 0;
565	db->rx_avail_cnt = 0;
566	db->wait_reset = 0;
567
568	db->first_in_callback = 0;
569	db->NIC_capability = 0xf;	/* All capability*/
570	db->PHY_reg4 = 0x1e0;
571
572	/* CR6 operation mode decision */
573	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
574		(db->chip_revision >= 0x30) ) {
575    		db->cr6_data |= DMFE_TXTH_256;
576		db->cr0_data = CR0_DEFAULT;
577		db->dm910x_chk_mode=4;		/* Enter the normal mode */
578 	} else {
579		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
580		db->cr0_data = 0;
581		db->dm910x_chk_mode = 1;	/* Enter the check mode */
582	}
583
584	/* Initialize DM910X board */
585	dmfe_init_dm910x(dev);
586
587	/* Active System Interface */
588	netif_wake_queue(dev);
589
590	/* set and active a timer process */
591	init_timer(&db->timer);
592	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
593	db->timer.data = (unsigned long)dev;
594	db->timer.function = &dmfe_timer;
595	add_timer(&db->timer);
596
597	return 0;
598}
599
600
601/*	Initialize DM910X board
602 *	Reset DM910X board
603 *	Initialize TX/Rx descriptor chain structure
604 *	Send the set-up frame
605 *	Enable Tx/Rx machine
606 */
607
608static void dmfe_init_dm910x(struct DEVICE *dev)
609{
610	struct dmfe_board_info *db = netdev_priv(dev);
611	unsigned long ioaddr = db->ioaddr;
612
613	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
614
615	/* Reset DM910x MAC controller */
616	outl(DM910X_RESET, ioaddr + DCR0);	/* RESET MAC */
617	udelay(100);
618	outl(db->cr0_data, ioaddr + DCR0);
619	udelay(5);
620
621	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
622	db->phy_addr = 1;
623
624	/* Parser SROM and media mode */
625	dmfe_parse_srom(db);
626	db->media_mode = dmfe_media_mode;
627
628	/* RESET Phyxcer Chip by GPR port bit 7 */
629	outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
630	if (db->chip_id == PCI_DM9009_ID) {
631		outl(0x80, ioaddr + DCR12);	/* Issue RESET signal */
632		mdelay(300);			/* Delay 300 ms */
633	}
634	outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
635
636	/* Process Phyxcer Media Mode */
637	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
638		dmfe_set_phyxcer(db);
639
640	/* Media Mode Process */
641	if ( !(db->media_mode & DMFE_AUTO) )
642		db->op_mode = db->media_mode; 	/* Force Mode */
643
644	/* Initialize Transmit/Receive decriptor and CR3/4 */
645	dmfe_descriptor_init(db, ioaddr);
646
647	/* Init CR6 to program DM910x operation */
648	update_cr6(db->cr6_data, ioaddr);
649
650	/* Send setup frame */
651	if (db->chip_id == PCI_DM9132_ID)
652		dm9132_id_table(dev);	/* DM9132 */
653	else
654		send_filter_frame(dev);	/* DM9102/DM9102A */
655
656	/* Init CR7, interrupt active bit */
657	db->cr7_data = CR7_DEFAULT;
658	outl(db->cr7_data, ioaddr + DCR7);
659
660	/* Init CR15, Tx jabber and Rx watchdog timer */
661	outl(db->cr15_data, ioaddr + DCR15);
662
663	/* Enable DM910X Tx/Rx function */
664	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
665	update_cr6(db->cr6_data, ioaddr);
666}
667
668
669/*
670 *	Hardware start transmission.
671 *	Send a packet to media from the upper layer.
672 */
673
674static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
675					 struct DEVICE *dev)
676{
677	struct dmfe_board_info *db = netdev_priv(dev);
678	struct tx_desc *txptr;
679	unsigned long flags;
680
681	DMFE_DBUG(0, "dmfe_start_xmit", 0);
682
683	/* Resource flag check */
684	netif_stop_queue(dev);
685
686	/* Too large packet check */
687	if (skb->len > MAX_PACKET_SIZE) {
688		pr_err("big packet = %d\n", (u16)skb->len);
689		dev_kfree_skb(skb);
690		return NETDEV_TX_OK;
691	}
692
693	spin_lock_irqsave(&db->lock, flags);
694
695	/* No Tx resource check, it never happen nromally */
696	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
697		spin_unlock_irqrestore(&db->lock, flags);
698		pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
699		return NETDEV_TX_BUSY;
700	}
701
702	/* Disable NIC interrupt */
703	outl(0, dev->base_addr + DCR7);
704
705	/* transmit this packet */
706	txptr = db->tx_insert_ptr;
707	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
708	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
709
710	/* Point to next transmit free descriptor */
711	db->tx_insert_ptr = txptr->next_tx_desc;
712
713	/* Transmit Packet Process */
714	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
715		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
716		db->tx_packet_cnt++;			/* Ready to send */
717		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
718		dev->trans_start = jiffies;		/* saved time stamp */
719	} else {
720		db->tx_queue_cnt++;			/* queue TX packet */
721		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
722	}
723
724	/* Tx resource check */
725	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
726		netif_wake_queue(dev);
727
728	/* Restore CR7 to enable interrupt */
729	spin_unlock_irqrestore(&db->lock, flags);
730	outl(db->cr7_data, dev->base_addr + DCR7);
731
732	/* free this SKB */
733	dev_kfree_skb(skb);
734
735	return NETDEV_TX_OK;
736}
737
738
739/*
740 *	Stop the interface.
741 *	The interface is stopped when it is brought.
742 */
743
744static int dmfe_stop(struct DEVICE *dev)
745{
746	struct dmfe_board_info *db = netdev_priv(dev);
747	unsigned long ioaddr = dev->base_addr;
748
749	DMFE_DBUG(0, "dmfe_stop", 0);
750
751	/* disable system */
752	netif_stop_queue(dev);
753
754	/* deleted timer */
755	del_timer_sync(&db->timer);
756
757	/* Reset & stop DM910X board */
758	outl(DM910X_RESET, ioaddr + DCR0);
759	udelay(5);
760	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
761
762	/* free interrupt */
763	free_irq(dev->irq, dev);
764
765	/* free allocated rx buffer */
766	dmfe_free_rxbuffer(db);
767
768
769	return 0;
770}
771
772
773/*
774 *	DM9102 insterrupt handler
775 *	receive the packet to upper layer, free the transmitted packet
776 */
777
778static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
779{
780	struct DEVICE *dev = dev_id;
781	struct dmfe_board_info *db = netdev_priv(dev);
782	unsigned long ioaddr = dev->base_addr;
783	unsigned long flags;
784
785	DMFE_DBUG(0, "dmfe_interrupt()", 0);
786
787	spin_lock_irqsave(&db->lock, flags);
788
789	/* Got DM910X status */
790	db->cr5_data = inl(ioaddr + DCR5);
791	outl(db->cr5_data, ioaddr + DCR5);
792	if ( !(db->cr5_data & 0xc1) ) {
793		spin_unlock_irqrestore(&db->lock, flags);
794		return IRQ_HANDLED;
795	}
796
797	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
798	outl(0, ioaddr + DCR7);
799
800	/* Check system status */
801	if (db->cr5_data & 0x2000) {
802		/* system bus error happen */
803		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
804		db->reset_fatal++;
805		db->wait_reset = 1;	/* Need to RESET */
806		spin_unlock_irqrestore(&db->lock, flags);
807		return IRQ_HANDLED;
808	}
809
810	 /* Received the coming packet */
811	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
812		dmfe_rx_packet(dev, db);
813
814	/* reallocate rx descriptor buffer */
815	if (db->rx_avail_cnt<RX_DESC_CNT)
816		allocate_rx_buffer(db);
817
818	/* Free the transmitted descriptor */
819	if ( db->cr5_data & 0x01)
820		dmfe_free_tx_pkt(dev, db);
821
822	/* Mode Check */
823	if (db->dm910x_chk_mode & 0x2) {
824		db->dm910x_chk_mode = 0x4;
825		db->cr6_data |= 0x100;
826		update_cr6(db->cr6_data, db->ioaddr);
827	}
828
829	/* Restore CR7 to enable interrupt mask */
830	outl(db->cr7_data, ioaddr + DCR7);
831
832	spin_unlock_irqrestore(&db->lock, flags);
833	return IRQ_HANDLED;
834}
835
836
837#ifdef CONFIG_NET_POLL_CONTROLLER
838/*
839 * Polling 'interrupt' - used by things like netconsole to send skbs
840 * without having to re-enable interrupts. It's not called while
841 * the interrupt routine is executing.
842 */
843
844static void poll_dmfe (struct net_device *dev)
845{
846	/* disable_irq here is not very nice, but with the lockless
847	   interrupt handler we have no other choice. */
848	disable_irq(dev->irq);
849	dmfe_interrupt (dev->irq, dev);
850	enable_irq(dev->irq);
851}
852#endif
853
854/*
855 *	Free TX resource after TX complete
856 */
857
858static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
859{
860	struct tx_desc *txptr;
861	unsigned long ioaddr = dev->base_addr;
862	u32 tdes0;
863
864	txptr = db->tx_remove_ptr;
865	while(db->tx_packet_cnt) {
866		tdes0 = le32_to_cpu(txptr->tdes0);
867		pr_debug("tdes0=%x\n", tdes0);
868		if (tdes0 & 0x80000000)
869			break;
870
871		/* A packet sent completed */
872		db->tx_packet_cnt--;
873		dev->stats.tx_packets++;
874
875		/* Transmit statistic counter */
876		if ( tdes0 != 0x7fffffff ) {
877			pr_debug("tdes0=%x\n", tdes0);
878			dev->stats.collisions += (tdes0 >> 3) & 0xf;
879			dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
880			if (tdes0 & TDES0_ERR_MASK) {
881				dev->stats.tx_errors++;
882
883				if (tdes0 & 0x0002) {	/* UnderRun */
884					db->tx_fifo_underrun++;
885					if ( !(db->cr6_data & CR6_SFT) ) {
886						db->cr6_data = db->cr6_data | CR6_SFT;
887						update_cr6(db->cr6_data, db->ioaddr);
888					}
889				}
890				if (tdes0 & 0x0100)
891					db->tx_excessive_collision++;
892				if (tdes0 & 0x0200)
893					db->tx_late_collision++;
894				if (tdes0 & 0x0400)
895					db->tx_no_carrier++;
896				if (tdes0 & 0x0800)
897					db->tx_loss_carrier++;
898				if (tdes0 & 0x4000)
899					db->tx_jabber_timeout++;
900			}
901		}
902
903    		txptr = txptr->next_tx_desc;
904	}/* End of while */
905
906	/* Update TX remove pointer to next */
907	db->tx_remove_ptr = txptr;
908
909	/* Send the Tx packet in queue */
910	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
911		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
912		db->tx_packet_cnt++;			/* Ready to send */
913		db->tx_queue_cnt--;
914		outl(0x1, ioaddr + DCR1);		/* Issue Tx polling */
915		dev->trans_start = jiffies;		/* saved time stamp */
916	}
917
918	/* Resource available check */
919	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
920		netif_wake_queue(dev);	/* Active upper layer, send again */
921}
922
923
924/*
925 *	Calculate the CRC valude of the Rx packet
926 *	flag = 	1 : return the reverse CRC (for the received packet CRC)
927 *		0 : return the normal CRC (for Hash Table index)
928 */
929
930static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
931{
932	u32 crc = crc32(~0, Data, Len);
933	if (flag) crc = ~crc;
934	return crc;
935}
936
937
938/*
939 *	Receive the come packet and pass to upper layer
940 */
941
942static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
943{
944	struct rx_desc *rxptr;
945	struct sk_buff *skb, *newskb;
946	int rxlen;
947	u32 rdes0;
948
949	rxptr = db->rx_ready_ptr;
950
951	while(db->rx_avail_cnt) {
952		rdes0 = le32_to_cpu(rxptr->rdes0);
953		if (rdes0 & 0x80000000)	/* packet owner check */
954			break;
955
956		db->rx_avail_cnt--;
957		db->interval_rx_cnt++;
958
959		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
960				 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
961
962		if ( (rdes0 & 0x300) != 0x300) {
963			/* A packet without First/Last flag */
964			/* reuse this SKB */
965			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
966			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
967		} else {
968			/* A packet with First/Last flag */
969			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
970
971			/* error summary bit check */
972			if (rdes0 & 0x8000) {
973				/* This is a error packet */
974				pr_debug("rdes0: %x\n", rdes0);
975				dev->stats.rx_errors++;
976				if (rdes0 & 1)
977					dev->stats.rx_fifo_errors++;
978				if (rdes0 & 2)
979					dev->stats.rx_crc_errors++;
980				if (rdes0 & 0x80)
981					dev->stats.rx_length_errors++;
982			}
983
984			if ( !(rdes0 & 0x8000) ||
985				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
986				skb = rxptr->rx_skb_ptr;
987
988				/* Received Packet CRC check need or not */
989				if ( (db->dm910x_chk_mode & 1) &&
990					(cal_CRC(skb->data, rxlen, 1) !=
991					(*(u32 *) (skb->data+rxlen) ))) {
992					/* Found a error received packet */
993					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
994					db->dm910x_chk_mode = 3;
995				} else {
996					/* Good packet, send to upper layer */
997					/* Shorst packet used new SKB */
998					if ((rxlen < RX_COPY_SIZE) &&
999						((newskb = dev_alloc_skb(rxlen + 2))
1000						!= NULL)) {
1001
1002						skb = newskb;
1003						/* size less than COPY_SIZE, allocate a rxlen SKB */
1004						skb_reserve(skb, 2); /* 16byte align */
1005						skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1006							  skb_put(skb, rxlen),
1007									  rxlen);
1008						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1009					} else
1010						skb_put(skb, rxlen);
1011
1012					skb->protocol = eth_type_trans(skb, dev);
1013					netif_rx(skb);
1014					dev->stats.rx_packets++;
1015					dev->stats.rx_bytes += rxlen;
1016				}
1017			} else {
1018				/* Reuse SKB buffer when the packet is error */
1019				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1020				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1021			}
1022		}
1023
1024		rxptr = rxptr->next_rx_desc;
1025	}
1026
1027	db->rx_ready_ptr = rxptr;
1028}
1029
1030/*
1031 * Set DM910X multicast address
1032 */
1033
1034static void dmfe_set_filter_mode(struct DEVICE * dev)
1035{
1036	struct dmfe_board_info *db = netdev_priv(dev);
1037	unsigned long flags;
1038	int mc_count = netdev_mc_count(dev);
1039
1040	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1041	spin_lock_irqsave(&db->lock, flags);
1042
1043	if (dev->flags & IFF_PROMISC) {
1044		DMFE_DBUG(0, "Enable PROM Mode", 0);
1045		db->cr6_data |= CR6_PM | CR6_PBF;
1046		update_cr6(db->cr6_data, db->ioaddr);
1047		spin_unlock_irqrestore(&db->lock, flags);
1048		return;
1049	}
1050
1051	if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1052		DMFE_DBUG(0, "Pass all multicast address", mc_count);
1053		db->cr6_data &= ~(CR6_PM | CR6_PBF);
1054		db->cr6_data |= CR6_PAM;
1055		spin_unlock_irqrestore(&db->lock, flags);
1056		return;
1057	}
1058
1059	DMFE_DBUG(0, "Set multicast address", mc_count);
1060	if (db->chip_id == PCI_DM9132_ID)
1061		dm9132_id_table(dev);	/* DM9132 */
1062	else
1063		send_filter_frame(dev);	/* DM9102/DM9102A */
1064	spin_unlock_irqrestore(&db->lock, flags);
1065}
1066
1067/*
1068 * 	Ethtool interace
1069 */
1070
1071static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1072			       struct ethtool_drvinfo *info)
1073{
1074	struct dmfe_board_info *np = netdev_priv(dev);
1075
1076	strcpy(info->driver, DRV_NAME);
1077	strcpy(info->version, DRV_VERSION);
1078	if (np->pdev)
1079		strcpy(info->bus_info, pci_name(np->pdev));
1080	else
1081		sprintf(info->bus_info, "EISA 0x%lx %d",
1082			dev->base_addr, dev->irq);
1083}
1084
1085static int dmfe_ethtool_set_wol(struct net_device *dev,
1086				struct ethtool_wolinfo *wolinfo)
1087{
1088	struct dmfe_board_info *db = netdev_priv(dev);
1089
1090	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1091		   		WAKE_ARP | WAKE_MAGICSECURE))
1092		   return -EOPNOTSUPP;
1093
1094	db->wol_mode = wolinfo->wolopts;
1095	return 0;
1096}
1097
1098static void dmfe_ethtool_get_wol(struct net_device *dev,
1099				 struct ethtool_wolinfo *wolinfo)
1100{
1101	struct dmfe_board_info *db = netdev_priv(dev);
1102
1103	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1104	wolinfo->wolopts = db->wol_mode;
1105}
1106
1107
1108static const struct ethtool_ops netdev_ethtool_ops = {
1109	.get_drvinfo		= dmfe_ethtool_get_drvinfo,
1110	.get_link               = ethtool_op_get_link,
1111	.set_wol		= dmfe_ethtool_set_wol,
1112	.get_wol		= dmfe_ethtool_get_wol,
1113};
1114
1115/*
1116 *	A periodic timer routine
1117 *	Dynamic media sense, allocate Rx buffer...
1118 */
1119
1120static void dmfe_timer(unsigned long data)
1121{
1122	u32 tmp_cr8;
1123	unsigned char tmp_cr12;
1124	struct DEVICE *dev = (struct DEVICE *) data;
1125	struct dmfe_board_info *db = netdev_priv(dev);
1126 	unsigned long flags;
1127
1128	int link_ok, link_ok_phy;
1129
1130	DMFE_DBUG(0, "dmfe_timer()", 0);
1131	spin_lock_irqsave(&db->lock, flags);
1132
1133	/* Media mode process when Link OK before enter this route */
1134	if (db->first_in_callback == 0) {
1135		db->first_in_callback = 1;
1136		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1137			db->cr6_data &= ~0x40000;
1138			update_cr6(db->cr6_data, db->ioaddr);
1139			phy_write(db->ioaddr,
1140				  db->phy_addr, 0, 0x1000, db->chip_id);
1141			db->cr6_data |= 0x40000;
1142			update_cr6(db->cr6_data, db->ioaddr);
1143			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1144			add_timer(&db->timer);
1145			spin_unlock_irqrestore(&db->lock, flags);
1146			return;
1147		}
1148	}
1149
1150
1151	/* Operating Mode Check */
1152	if ( (db->dm910x_chk_mode & 0x1) &&
1153		(dev->stats.rx_packets > MAX_CHECK_PACKET) )
1154		db->dm910x_chk_mode = 0x4;
1155
1156	/* Dynamic reset DM910X : system error or transmit time-out */
1157	tmp_cr8 = inl(db->ioaddr + DCR8);
1158	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1159		db->reset_cr8++;
1160		db->wait_reset = 1;
1161	}
1162	db->interval_rx_cnt = 0;
1163
1164	/* TX polling kick monitor */
1165	if ( db->tx_packet_cnt &&
1166	     time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1167		outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
1168
1169		/* TX Timeout */
1170		if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1171			db->reset_TXtimeout++;
1172			db->wait_reset = 1;
1173			dev_warn(&dev->dev, "Tx timeout - resetting\n");
1174		}
1175	}
1176
1177	if (db->wait_reset) {
1178		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1179		db->reset_count++;
1180		dmfe_dynamic_reset(dev);
1181		db->first_in_callback = 0;
1182		db->timer.expires = DMFE_TIMER_WUT;
1183		add_timer(&db->timer);
1184		spin_unlock_irqrestore(&db->lock, flags);
1185		return;
1186	}
1187
1188	/* Link status check, Dynamic media type change */
1189	if (db->chip_id == PCI_DM9132_ID)
1190		tmp_cr12 = inb(db->ioaddr + DCR9 + 3);	/* DM9132 */
1191	else
1192		tmp_cr12 = inb(db->ioaddr + DCR12);	/* DM9102/DM9102A */
1193
1194	if ( ((db->chip_id == PCI_DM9102_ID) &&
1195		(db->chip_revision == 0x30)) ||
1196		((db->chip_id == PCI_DM9132_ID) &&
1197		(db->chip_revision == 0x10)) ) {
1198		/* DM9102A Chip */
1199		if (tmp_cr12 & 2)
1200			link_ok = 0;
1201		else
1202			link_ok = 1;
1203	}
1204	else
1205		/*0x43 is used instead of 0x3 because bit 6 should represent
1206			link status of external PHY */
1207		link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1208
1209
1210	/* If chip reports that link is failed it could be because external
1211		PHY link status pin is not conected correctly to chip
1212		To be sure ask PHY too.
1213	*/
1214
1215	/* need a dummy read because of PHY's register latch*/
1216	phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1217	link_ok_phy = (phy_read (db->ioaddr,
1218		       db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1219
1220	if (link_ok_phy != link_ok) {
1221		DMFE_DBUG (0, "PHY and chip report different link status", 0);
1222		link_ok = link_ok | link_ok_phy;
1223 	}
1224
1225	if ( !link_ok && netif_carrier_ok(dev)) {
1226		/* Link Failed */
1227		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1228		netif_carrier_off(dev);
1229
1230		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1231		/* AUTO or force 1M Homerun/Longrun don't need */
1232		if ( !(db->media_mode & 0x38) )
1233			phy_write(db->ioaddr, db->phy_addr,
1234				  0, 0x1000, db->chip_id);
1235
1236		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1237		if (db->media_mode & DMFE_AUTO) {
1238			/* 10/100M link failed, used 1M Home-Net */
1239			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1240			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1241			update_cr6(db->cr6_data, db->ioaddr);
1242		}
1243	} else if (!netif_carrier_ok(dev)) {
1244
1245		DMFE_DBUG(0, "Link link OK", tmp_cr12);
1246
1247		/* Auto Sense Speed */
1248		if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1249			netif_carrier_on(dev);
1250			SHOW_MEDIA_TYPE(db->op_mode);
1251		}
1252
1253		dmfe_process_mode(db);
1254	}
1255
1256	/* HPNA remote command check */
1257	if (db->HPNA_command & 0xf00) {
1258		db->HPNA_timer--;
1259		if (!db->HPNA_timer)
1260			dmfe_HPNA_remote_cmd_chk(db);
1261	}
1262
1263	/* Timer active again */
1264	db->timer.expires = DMFE_TIMER_WUT;
1265	add_timer(&db->timer);
1266	spin_unlock_irqrestore(&db->lock, flags);
1267}
1268
1269
1270/*
1271 *	Dynamic reset the DM910X board
1272 *	Stop DM910X board
1273 *	Free Tx/Rx allocated memory
1274 *	Reset DM910X board
1275 *	Re-initialize DM910X board
1276 */
1277
1278static void dmfe_dynamic_reset(struct DEVICE *dev)
1279{
1280	struct dmfe_board_info *db = netdev_priv(dev);
1281
1282	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1283
1284	/* Sopt MAC controller */
1285	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1286	update_cr6(db->cr6_data, dev->base_addr);
1287	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
1288	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1289
1290	/* Disable upper layer interface */
1291	netif_stop_queue(dev);
1292
1293	/* Free Rx Allocate buffer */
1294	dmfe_free_rxbuffer(db);
1295
1296	/* system variable init */
1297	db->tx_packet_cnt = 0;
1298	db->tx_queue_cnt = 0;
1299	db->rx_avail_cnt = 0;
1300	netif_carrier_off(dev);
1301	db->wait_reset = 0;
1302
1303	/* Re-initialize DM910X board */
1304	dmfe_init_dm910x(dev);
1305
1306	/* Restart upper layer interface */
1307	netif_wake_queue(dev);
1308}
1309
1310
1311/*
1312 *	free all allocated rx buffer
1313 */
1314
1315static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1316{
1317	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1318
1319	/* free allocated rx buffer */
1320	while (db->rx_avail_cnt) {
1321		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1322		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1323		db->rx_avail_cnt--;
1324	}
1325}
1326
1327
1328/*
1329 *	Reuse the SK buffer
1330 */
1331
1332static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1333{
1334	struct rx_desc *rxptr = db->rx_insert_ptr;
1335
1336	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1337		rxptr->rx_skb_ptr = skb;
1338		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1339			    skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1340		wmb();
1341		rxptr->rdes0 = cpu_to_le32(0x80000000);
1342		db->rx_avail_cnt++;
1343		db->rx_insert_ptr = rxptr->next_rx_desc;
1344	} else
1345		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1346}
1347
1348
1349/*
1350 *	Initialize transmit/Receive descriptor
1351 *	Using Chain structure, and allocate Tx/Rx buffer
1352 */
1353
1354static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1355{
1356	struct tx_desc *tmp_tx;
1357	struct rx_desc *tmp_rx;
1358	unsigned char *tmp_buf;
1359	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1360	dma_addr_t tmp_buf_dma;
1361	int i;
1362
1363	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1364
1365	/* tx descriptor start pointer */
1366	db->tx_insert_ptr = db->first_tx_desc;
1367	db->tx_remove_ptr = db->first_tx_desc;
1368	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
1369
1370	/* rx descriptor start pointer */
1371	db->first_rx_desc = (void *)db->first_tx_desc +
1372			sizeof(struct tx_desc) * TX_DESC_CNT;
1373
1374	db->first_rx_desc_dma =  db->first_tx_desc_dma +
1375			sizeof(struct tx_desc) * TX_DESC_CNT;
1376	db->rx_insert_ptr = db->first_rx_desc;
1377	db->rx_ready_ptr = db->first_rx_desc;
1378	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
1379
1380	/* Init Transmit chain */
1381	tmp_buf = db->buf_pool_start;
1382	tmp_buf_dma = db->buf_pool_dma_start;
1383	tmp_tx_dma = db->first_tx_desc_dma;
1384	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1385		tmp_tx->tx_buf_ptr = tmp_buf;
1386		tmp_tx->tdes0 = cpu_to_le32(0);
1387		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1388		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1389		tmp_tx_dma += sizeof(struct tx_desc);
1390		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1391		tmp_tx->next_tx_desc = tmp_tx + 1;
1392		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1393		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1394	}
1395	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1396	tmp_tx->next_tx_desc = db->first_tx_desc;
1397
1398	 /* Init Receive descriptor chain */
1399	tmp_rx_dma=db->first_rx_desc_dma;
1400	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1401		tmp_rx->rdes0 = cpu_to_le32(0);
1402		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1403		tmp_rx_dma += sizeof(struct rx_desc);
1404		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1405		tmp_rx->next_rx_desc = tmp_rx + 1;
1406	}
1407	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1408	tmp_rx->next_rx_desc = db->first_rx_desc;
1409
1410	/* pre-allocate Rx buffer */
1411	allocate_rx_buffer(db);
1412}
1413
1414
1415/*
1416 *	Update CR6 value
1417 *	Firstly stop DM910X , then written value and start
1418 */
1419
1420static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1421{
1422	u32 cr6_tmp;
1423
1424	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1425	outl(cr6_tmp, ioaddr + DCR6);
1426	udelay(5);
1427	outl(cr6_data, ioaddr + DCR6);
1428	udelay(5);
1429}
1430
1431
1432/*
1433 *	Send a setup frame for DM9132
1434 *	This setup frame initialize DM910X address filter mode
1435*/
1436
1437static void dm9132_id_table(struct DEVICE *dev)
1438{
1439	struct netdev_hw_addr *ha;
1440	u16 * addrptr;
1441	unsigned long ioaddr = dev->base_addr+0xc0;		/* ID Table */
1442	u32 hash_val;
1443	u16 i, hash_table[4];
1444
1445	DMFE_DBUG(0, "dm9132_id_table()", 0);
1446
1447	/* Node address */
1448	addrptr = (u16 *) dev->dev_addr;
1449	outw(addrptr[0], ioaddr);
1450	ioaddr += 4;
1451	outw(addrptr[1], ioaddr);
1452	ioaddr += 4;
1453	outw(addrptr[2], ioaddr);
1454	ioaddr += 4;
1455
1456	/* Clear Hash Table */
1457	memset(hash_table, 0, sizeof(hash_table));
1458
1459	/* broadcast address */
1460	hash_table[3] = 0x8000;
1461
1462	/* the multicast address in Hash Table : 64 bits */
1463	netdev_for_each_mc_addr(ha, dev) {
1464		hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
1465		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1466	}
1467
1468	/* Write the hash table to MAC MD table */
1469	for (i = 0; i < 4; i++, ioaddr += 4)
1470		outw(hash_table[i], ioaddr);
1471}
1472
1473
1474/*
1475 *	Send a setup frame for DM9102/DM9102A
1476 *	This setup frame initialize DM910X address filter mode
1477 */
1478
1479static void send_filter_frame(struct DEVICE *dev)
1480{
1481	struct dmfe_board_info *db = netdev_priv(dev);
1482	struct netdev_hw_addr *ha;
1483	struct tx_desc *txptr;
1484	u16 * addrptr;
1485	u32 * suptr;
1486	int i;
1487
1488	DMFE_DBUG(0, "send_filter_frame()", 0);
1489
1490	txptr = db->tx_insert_ptr;
1491	suptr = (u32 *) txptr->tx_buf_ptr;
1492
1493	/* Node address */
1494	addrptr = (u16 *) dev->dev_addr;
1495	*suptr++ = addrptr[0];
1496	*suptr++ = addrptr[1];
1497	*suptr++ = addrptr[2];
1498
1499	/* broadcast address */
1500	*suptr++ = 0xffff;
1501	*suptr++ = 0xffff;
1502	*suptr++ = 0xffff;
1503
1504	/* fit the multicast address */
1505	netdev_for_each_mc_addr(ha, dev) {
1506		addrptr = (u16 *) ha->addr;
1507		*suptr++ = addrptr[0];
1508		*suptr++ = addrptr[1];
1509		*suptr++ = addrptr[2];
1510	}
1511
1512	for (i = netdev_mc_count(dev); i < 14; i++) {
1513		*suptr++ = 0xffff;
1514		*suptr++ = 0xffff;
1515		*suptr++ = 0xffff;
1516	}
1517
1518	/* prepare the setup frame */
1519	db->tx_insert_ptr = txptr->next_tx_desc;
1520	txptr->tdes1 = cpu_to_le32(0x890000c0);
1521
1522	/* Resource Check and Send the setup packet */
1523	if (!db->tx_packet_cnt) {
1524		/* Resource Empty */
1525		db->tx_packet_cnt++;
1526		txptr->tdes0 = cpu_to_le32(0x80000000);
1527		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1528		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
1529		update_cr6(db->cr6_data, dev->base_addr);
1530		dev->trans_start = jiffies;
1531	} else
1532		db->tx_queue_cnt++;	/* Put in TX queue */
1533}
1534
1535
1536/*
1537 *	Allocate rx buffer,
1538 *	As possible as allocate maxiumn Rx buffer
1539 */
1540
1541static void allocate_rx_buffer(struct dmfe_board_info *db)
1542{
1543	struct rx_desc *rxptr;
1544	struct sk_buff *skb;
1545
1546	rxptr = db->rx_insert_ptr;
1547
1548	while(db->rx_avail_cnt < RX_DESC_CNT) {
1549		if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1550			break;
1551		rxptr->rx_skb_ptr = skb;
1552		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1553				    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1554		wmb();
1555		rxptr->rdes0 = cpu_to_le32(0x80000000);
1556		rxptr = rxptr->next_rx_desc;
1557		db->rx_avail_cnt++;
1558	}
1559
1560	db->rx_insert_ptr = rxptr;
1561}
1562
1563
1564/*
1565 *	Read one word data from the serial ROM
1566 */
1567
1568static u16 read_srom_word(long ioaddr, int offset)
1569{
1570	int i;
1571	u16 srom_data = 0;
1572	long cr9_ioaddr = ioaddr + DCR9;
1573
1574	outl(CR9_SROM_READ, cr9_ioaddr);
1575	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1576
1577	/* Send the Read Command 110b */
1578	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1579	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1580	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1581
1582	/* Send the offset */
1583	for (i = 5; i >= 0; i--) {
1584		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1585		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1586	}
1587
1588	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1589
1590	for (i = 16; i > 0; i--) {
1591		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1592		udelay(5);
1593		srom_data = (srom_data << 1) |
1594				((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1595		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1596		udelay(5);
1597	}
1598
1599	outl(CR9_SROM_READ, cr9_ioaddr);
1600	return srom_data;
1601}
1602
1603
1604/*
1605 *	Auto sense the media mode
1606 */
1607
1608static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1609{
1610	u8 ErrFlag = 0;
1611	u16 phy_mode;
1612
1613	/* CR6 bit18=0, select 10/100M */
1614	update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1615
1616	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1617	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1618
1619	if ( (phy_mode & 0x24) == 0x24 ) {
1620		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1621			phy_mode = phy_read(db->ioaddr,
1622				    db->phy_addr, 7, db->chip_id) & 0xf000;
1623		else 				/* DM9102/DM9102A */
1624			phy_mode = phy_read(db->ioaddr,
1625				    db->phy_addr, 17, db->chip_id) & 0xf000;
1626		pr_debug("Phy_mode %x\n", phy_mode);
1627		switch (phy_mode) {
1628		case 0x1000: db->op_mode = DMFE_10MHF; break;
1629		case 0x2000: db->op_mode = DMFE_10MFD; break;
1630		case 0x4000: db->op_mode = DMFE_100MHF; break;
1631		case 0x8000: db->op_mode = DMFE_100MFD; break;
1632		default: db->op_mode = DMFE_10MHF;
1633			ErrFlag = 1;
1634			break;
1635		}
1636	} else {
1637		db->op_mode = DMFE_10MHF;
1638		DMFE_DBUG(0, "Link Failed :", phy_mode);
1639		ErrFlag = 1;
1640	}
1641
1642	return ErrFlag;
1643}
1644
1645
1646/*
1647 *	Set 10/100 phyxcer capability
1648 *	AUTO mode : phyxcer register4 is NIC capability
1649 *	Force mode: phyxcer register4 is the force media
1650 */
1651
1652static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1653{
1654	u16 phy_reg;
1655
1656	/* Select 10/100M phyxcer */
1657	db->cr6_data &= ~0x40000;
1658	update_cr6(db->cr6_data, db->ioaddr);
1659
1660	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1661	if (db->chip_id == PCI_DM9009_ID) {
1662		phy_reg = phy_read(db->ioaddr,
1663				   db->phy_addr, 18, db->chip_id) & ~0x1000;
1664
1665		phy_write(db->ioaddr,
1666			  db->phy_addr, 18, phy_reg, db->chip_id);
1667	}
1668
1669	/* Phyxcer capability setting */
1670	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1671
1672	if (db->media_mode & DMFE_AUTO) {
1673		/* AUTO Mode */
1674		phy_reg |= db->PHY_reg4;
1675	} else {
1676		/* Force Mode */
1677		switch(db->media_mode) {
1678		case DMFE_10MHF: phy_reg |= 0x20; break;
1679		case DMFE_10MFD: phy_reg |= 0x40; break;
1680		case DMFE_100MHF: phy_reg |= 0x80; break;
1681		case DMFE_100MFD: phy_reg |= 0x100; break;
1682		}
1683		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1684	}
1685
1686  	/* Write new capability to Phyxcer Reg4 */
1687	if ( !(phy_reg & 0x01e0)) {
1688		phy_reg|=db->PHY_reg4;
1689		db->media_mode|=DMFE_AUTO;
1690	}
1691	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1692
1693 	/* Restart Auto-Negotiation */
1694	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1695		phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1696	if ( !db->chip_type )
1697		phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1698}
1699
1700
1701/*
1702 *	Process op-mode
1703 *	AUTO mode : PHY controller in Auto-negotiation Mode
1704 *	Force mode: PHY controller in force mode with HUB
1705 *			N-way force capability with SWITCH
1706 */
1707
1708static void dmfe_process_mode(struct dmfe_board_info *db)
1709{
1710	u16 phy_reg;
1711
1712	/* Full Duplex Mode Check */
1713	if (db->op_mode & 0x4)
1714		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1715	else
1716		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1717
1718	/* Transciver Selection */
1719	if (db->op_mode & 0x10)		/* 1M HomePNA */
1720		db->cr6_data |= 0x40000;/* External MII select */
1721	else
1722		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1723
1724	update_cr6(db->cr6_data, db->ioaddr);
1725
1726	/* 10/100M phyxcer force mode need */
1727	if ( !(db->media_mode & 0x18)) {
1728		/* Forece Mode */
1729		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1730		if ( !(phy_reg & 0x1) ) {
1731			/* parter without N-Way capability */
1732			phy_reg = 0x0;
1733			switch(db->op_mode) {
1734			case DMFE_10MHF: phy_reg = 0x0; break;
1735			case DMFE_10MFD: phy_reg = 0x100; break;
1736			case DMFE_100MHF: phy_reg = 0x2000; break;
1737			case DMFE_100MFD: phy_reg = 0x2100; break;
1738			}
1739			phy_write(db->ioaddr,
1740				  db->phy_addr, 0, phy_reg, db->chip_id);
1741       			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1742				mdelay(20);
1743			phy_write(db->ioaddr,
1744				  db->phy_addr, 0, phy_reg, db->chip_id);
1745		}
1746	}
1747}
1748
1749
1750/*
1751 *	Write a word to Phy register
1752 */
1753
1754static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1755		      u16 phy_data, u32 chip_id)
1756{
1757	u16 i;
1758	unsigned long ioaddr;
1759
1760	if (chip_id == PCI_DM9132_ID) {
1761		ioaddr = iobase + 0x80 + offset * 4;
1762		outw(phy_data, ioaddr);
1763	} else {
1764		/* DM9102/DM9102A Chip */
1765		ioaddr = iobase + DCR9;
1766
1767		/* Send 33 synchronization clock to Phy controller */
1768		for (i = 0; i < 35; i++)
1769			phy_write_1bit(ioaddr, PHY_DATA_1);
1770
1771		/* Send start command(01) to Phy */
1772		phy_write_1bit(ioaddr, PHY_DATA_0);
1773		phy_write_1bit(ioaddr, PHY_DATA_1);
1774
1775		/* Send write command(01) to Phy */
1776		phy_write_1bit(ioaddr, PHY_DATA_0);
1777		phy_write_1bit(ioaddr, PHY_DATA_1);
1778
1779		/* Send Phy address */
1780		for (i = 0x10; i > 0; i = i >> 1)
1781			phy_write_1bit(ioaddr,
1782				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1783
1784		/* Send register address */
1785		for (i = 0x10; i > 0; i = i >> 1)
1786			phy_write_1bit(ioaddr,
1787				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1788
1789		/* written trasnition */
1790		phy_write_1bit(ioaddr, PHY_DATA_1);
1791		phy_write_1bit(ioaddr, PHY_DATA_0);
1792
1793		/* Write a word data to PHY controller */
1794		for ( i = 0x8000; i > 0; i >>= 1)
1795			phy_write_1bit(ioaddr,
1796				       phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1797	}
1798}
1799
1800
1801/*
1802 *	Read a word data from phy register
1803 */
1804
1805static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1806{
1807	int i;
1808	u16 phy_data;
1809	unsigned long ioaddr;
1810
1811	if (chip_id == PCI_DM9132_ID) {
1812		/* DM9132 Chip */
1813		ioaddr = iobase + 0x80 + offset * 4;
1814		phy_data = inw(ioaddr);
1815	} else {
1816		/* DM9102/DM9102A Chip */
1817		ioaddr = iobase + DCR9;
1818
1819		/* Send 33 synchronization clock to Phy controller */
1820		for (i = 0; i < 35; i++)
1821			phy_write_1bit(ioaddr, PHY_DATA_1);
1822
1823		/* Send start command(01) to Phy */
1824		phy_write_1bit(ioaddr, PHY_DATA_0);
1825		phy_write_1bit(ioaddr, PHY_DATA_1);
1826
1827		/* Send read command(10) to Phy */
1828		phy_write_1bit(ioaddr, PHY_DATA_1);
1829		phy_write_1bit(ioaddr, PHY_DATA_0);
1830
1831		/* Send Phy address */
1832		for (i = 0x10; i > 0; i = i >> 1)
1833			phy_write_1bit(ioaddr,
1834				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1835
1836		/* Send register address */
1837		for (i = 0x10; i > 0; i = i >> 1)
1838			phy_write_1bit(ioaddr,
1839				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1840
1841		/* Skip transition state */
1842		phy_read_1bit(ioaddr);
1843
1844		/* read 16bit data */
1845		for (phy_data = 0, i = 0; i < 16; i++) {
1846			phy_data <<= 1;
1847			phy_data |= phy_read_1bit(ioaddr);
1848		}
1849	}
1850
1851	return phy_data;
1852}
1853
1854
1855/*
1856 *	Write one bit data to Phy Controller
1857 */
1858
1859static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1860{
1861	outl(phy_data, ioaddr);			/* MII Clock Low */
1862	udelay(1);
1863	outl(phy_data | MDCLKH, ioaddr);	/* MII Clock High */
1864	udelay(1);
1865	outl(phy_data, ioaddr);			/* MII Clock Low */
1866	udelay(1);
1867}
1868
1869
1870/*
1871 *	Read one bit phy data from PHY controller
1872 */
1873
1874static u16 phy_read_1bit(unsigned long ioaddr)
1875{
1876	u16 phy_data;
1877
1878	outl(0x50000, ioaddr);
1879	udelay(1);
1880	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1881	outl(0x40000, ioaddr);
1882	udelay(1);
1883
1884	return phy_data;
1885}
1886
1887
1888/*
1889 *	Parser SROM and media mode
1890 */
1891
1892static void dmfe_parse_srom(struct dmfe_board_info * db)
1893{
1894	char * srom = db->srom;
1895	int dmfe_mode, tmp_reg;
1896
1897	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1898
1899	/* Init CR15 */
1900	db->cr15_data = CR15_DEFAULT;
1901
1902	/* Check SROM Version */
1903	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1904		/* SROM V4.01 */
1905		/* Get NIC support media mode */
1906		db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1907		db->PHY_reg4 = 0;
1908		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1909			switch( db->NIC_capability & tmp_reg ) {
1910			case 0x1: db->PHY_reg4 |= 0x0020; break;
1911			case 0x2: db->PHY_reg4 |= 0x0040; break;
1912			case 0x4: db->PHY_reg4 |= 0x0080; break;
1913			case 0x8: db->PHY_reg4 |= 0x0100; break;
1914			}
1915		}
1916
1917		/* Media Mode Force or not check */
1918		dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1919			     le32_to_cpup((__le32 *) (srom + 36)));
1920		switch(dmfe_mode) {
1921		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1922		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1923		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1924		case 0x100:
1925		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1926		}
1927
1928		/* Special Function setting */
1929		/* VLAN function */
1930		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1931			db->cr15_data |= 0x40;
1932
1933		/* Flow Control */
1934		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1935			db->cr15_data |= 0x400;
1936
1937		/* TX pause packet */
1938		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1939			db->cr15_data |= 0x9800;
1940	}
1941
1942	/* Parse HPNA parameter */
1943	db->HPNA_command = 1;
1944
1945	/* Accept remote command or not */
1946	if (HPNA_rx_cmd == 0)
1947		db->HPNA_command |= 0x8000;
1948
1949	 /* Issue remote command & operation mode */
1950	if (HPNA_tx_cmd == 1)
1951		switch(HPNA_mode) {	/* Issue Remote Command */
1952		case 0: db->HPNA_command |= 0x0904; break;
1953		case 1: db->HPNA_command |= 0x0a00; break;
1954		case 2: db->HPNA_command |= 0x0506; break;
1955		case 3: db->HPNA_command |= 0x0602; break;
1956		}
1957	else
1958		switch(HPNA_mode) {	/* Don't Issue */
1959		case 0: db->HPNA_command |= 0x0004; break;
1960		case 1: db->HPNA_command |= 0x0000; break;
1961		case 2: db->HPNA_command |= 0x0006; break;
1962		case 3: db->HPNA_command |= 0x0002; break;
1963		}
1964
1965	/* Check DM9801 or DM9802 present or not */
1966	db->HPNA_present = 0;
1967	update_cr6(db->cr6_data|0x40000, db->ioaddr);
1968	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1969	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1970		/* DM9801 or DM9802 present */
1971		db->HPNA_timer = 8;
1972		if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1973			/* DM9801 HomeRun */
1974			db->HPNA_present = 1;
1975			dmfe_program_DM9801(db, tmp_reg);
1976		} else {
1977			/* DM9802 LongRun */
1978			db->HPNA_present = 2;
1979			dmfe_program_DM9802(db);
1980		}
1981	}
1982
1983}
1984
1985
1986/*
1987 *	Init HomeRun DM9801
1988 */
1989
1990static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1991{
1992	uint reg17, reg25;
1993
1994	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1995	switch(HPNA_rev) {
1996	case 0xb900: /* DM9801 E3 */
1997		db->HPNA_command |= 0x1000;
1998		reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1999		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2000		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2001		break;
2002	case 0xb901: /* DM9801 E4 */
2003		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2004		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2005		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2006		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2007		break;
2008	case 0xb902: /* DM9801 E5 */
2009	case 0xb903: /* DM9801 E6 */
2010	default:
2011		db->HPNA_command |= 0x1000;
2012		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2013		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2014		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2015		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2016		break;
2017	}
2018	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2019	phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2020	phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2021}
2022
2023
2024/*
2025 *	Init HomeRun DM9802
2026 */
2027
2028static void dmfe_program_DM9802(struct dmfe_board_info * db)
2029{
2030	uint phy_reg;
2031
2032	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2033	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2034	phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2035	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2036	phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2037}
2038
2039
2040/*
2041 *	Check remote HPNA power and speed status. If not correct,
2042 *	issue command again.
2043*/
2044
2045static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2046{
2047	uint phy_reg;
2048
2049	/* Got remote device status */
2050	phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2051	switch(phy_reg) {
2052	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2053	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2054	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2055	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2056	}
2057
2058	/* Check remote device status match our setting ot not */
2059	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2060		phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2061			  db->chip_id);
2062		db->HPNA_timer=8;
2063	} else
2064		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
2065}
2066
2067
2068
2069static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2070	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2071	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2072	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2073	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2074	{ 0, }
2075};
2076MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2077
2078
2079#ifdef CONFIG_PM
2080static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2081{
2082	struct net_device *dev = pci_get_drvdata(pci_dev);
2083	struct dmfe_board_info *db = netdev_priv(dev);
2084	u32 tmp;
2085
2086	/* Disable upper layer interface */
2087	netif_device_detach(dev);
2088
2089	/* Disable Tx/Rx */
2090	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2091	update_cr6(db->cr6_data, dev->base_addr);
2092
2093	/* Disable Interrupt */
2094	outl(0, dev->base_addr + DCR7);
2095	outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2096
2097	/* Fre RX buffers */
2098	dmfe_free_rxbuffer(db);
2099
2100	/* Enable WOL */
2101	pci_read_config_dword(pci_dev, 0x40, &tmp);
2102	tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2103
2104	if (db->wol_mode & WAKE_PHY)
2105		tmp |= DMFE_WOL_LINKCHANGE;
2106	if (db->wol_mode & WAKE_MAGIC)
2107		tmp |= DMFE_WOL_MAGICPACKET;
2108
2109	pci_write_config_dword(pci_dev, 0x40, tmp);
2110
2111	pci_enable_wake(pci_dev, PCI_D3hot, 1);
2112	pci_enable_wake(pci_dev, PCI_D3cold, 1);
2113
2114	/* Power down device*/
2115	pci_save_state(pci_dev);
2116	pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2117
2118	return 0;
2119}
2120
2121static int dmfe_resume(struct pci_dev *pci_dev)
2122{
2123	struct net_device *dev = pci_get_drvdata(pci_dev);
2124	u32 tmp;
2125
2126	pci_set_power_state(pci_dev, PCI_D0);
2127	pci_restore_state(pci_dev);
2128
2129	/* Re-initialize DM910X board */
2130	dmfe_init_dm910x(dev);
2131
2132	/* Disable WOL */
2133	pci_read_config_dword(pci_dev, 0x40, &tmp);
2134
2135	tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2136	pci_write_config_dword(pci_dev, 0x40, tmp);
2137
2138	pci_enable_wake(pci_dev, PCI_D3hot, 0);
2139	pci_enable_wake(pci_dev, PCI_D3cold, 0);
2140
2141	/* Restart upper layer interface */
2142	netif_device_attach(dev);
2143
2144	return 0;
2145}
2146#else
2147#define dmfe_suspend NULL
2148#define dmfe_resume NULL
2149#endif
2150
2151static struct pci_driver dmfe_driver = {
2152	.name		= "dmfe",
2153	.id_table	= dmfe_pci_tbl,
2154	.probe		= dmfe_init_one,
2155	.remove		= __devexit_p(dmfe_remove_one),
2156	.suspend        = dmfe_suspend,
2157	.resume         = dmfe_resume
2158};
2159
2160MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2161MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2162MODULE_LICENSE("GPL");
2163MODULE_VERSION(DRV_VERSION);
2164
2165module_param(debug, int, 0);
2166module_param(mode, byte, 0);
2167module_param(cr6set, int, 0);
2168module_param(chkmode, byte, 0);
2169module_param(HPNA_mode, byte, 0);
2170module_param(HPNA_rx_cmd, byte, 0);
2171module_param(HPNA_tx_cmd, byte, 0);
2172module_param(HPNA_NoiseFloor, byte, 0);
2173module_param(SF_mode, byte, 0);
2174MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2175MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2176		"Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2177
2178MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2179		"(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2180
2181/*	Description:
2182 *	when user used insmod to add module, system invoked init_module()
2183 *	to initialize and register.
2184 */
2185
2186static int __init dmfe_init_module(void)
2187{
2188	int rc;
2189
2190	printk(version);
2191	printed_version = 1;
2192
2193	DMFE_DBUG(0, "init_module() ", debug);
2194
2195	if (debug)
2196		dmfe_debug = debug;	/* set debug flag */
2197	if (cr6set)
2198		dmfe_cr6_user_set = cr6set;
2199
2200 	switch(mode) {
2201   	case DMFE_10MHF:
2202	case DMFE_100MHF:
2203	case DMFE_10MFD:
2204	case DMFE_100MFD:
2205	case DMFE_1M_HPNA:
2206		dmfe_media_mode = mode;
2207		break;
2208	default:dmfe_media_mode = DMFE_AUTO;
2209		break;
2210	}
2211
2212	if (HPNA_mode > 4)
2213		HPNA_mode = 0;		/* Default: LP/HS */
2214	if (HPNA_rx_cmd > 1)
2215		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2216	if (HPNA_tx_cmd > 1)
2217		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2218	if (HPNA_NoiseFloor > 15)
2219		HPNA_NoiseFloor = 0;
2220
2221	rc = pci_register_driver(&dmfe_driver);
2222	if (rc < 0)
2223		return rc;
2224
2225	return 0;
2226}
2227
2228
2229/*
2230 *	Description:
2231 *	when user used rmmod to delete module, system invoked clean_module()
2232 *	to un-register all registered services.
2233 */
2234
2235static void __exit dmfe_cleanup_module(void)
2236{
2237	DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2238	pci_unregister_driver(&dmfe_driver);
2239}
2240
2241module_init(dmfe_init_module);
2242module_exit(dmfe_cleanup_module);
2243