1/*
2    A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3    ethernet driver for Linux.
4    Copyright (C) 1997  Sten Wang
5
6    This program is free software; you can redistribute it and/or
7    modify it under the terms of the GNU General Public License
8    as published by the Free Software Foundation; either version 2
9    of the License, or (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    DAVICOM Web-Site: www.davicom.com.tw
17
18    Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19    Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21    (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23    Marcelo Tosatti <marcelo@conectiva.com.br> :
24    Made it compile in 2.3 (device to net_device)
25
26    Alan Cox <alan@redhat.com> :
27    Cleaned up for kernel merge.
28    Removed the back compatibility support
29    Reformatted, fixing spelling etc as I went
30    Removed IRQ 0-15 assumption
31
32    Jeff Garzik <jgarzik@pobox.com> :
33    Updated to use new PCI driver API.
34    Resource usage cleanups.
35    Report driver version to user.
36
37    Tobias Ringstrom <tori@unhappy.mine.nu> :
38    Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
39    Andrew Morton and Frank Davis for the SMP safety fixes.
40
41    Vojtech Pavlik <vojtech@suse.cz> :
42    Cleaned up pointer arithmetics.
43    Fixed a lot of 64bit issues.
44    Cleaned up printk()s a bit.
45    Fixed some obvious big endian problems.
46
47    Tobias Ringstrom <tori@unhappy.mine.nu> :
48    Use time_after for jiffies calculation.  Added ethtool
49    support.  Updated PCI resource allocation.  Do not
50    forget to unmap PCI mapped skbs.
51
52    Alan Cox <alan@redhat.com>
53    Added new PCI identifiers provided by Clear Zhang at ALi
54    for their 1563 ethernet device.
55
56    TODO
57
58    Check on 64 bit boxes.
59    Check and fix on big endian boxes.
60
61    Test and make sure PCI latency is now correct for all cases.
62*/
63
64#define DRV_NAME	"dmfe"
65#define DRV_VERSION	"1.36.4"
66#define DRV_RELDATE	"2002-01-17"
67
68#include <linux/module.h>
69#include <linux/kernel.h>
70#include <linux/string.h>
71#include <linux/timer.h>
72#include <linux/ptrace.h>
73#include <linux/errno.h>
74#include <linux/ioport.h>
75#include <linux/slab.h>
76#include <linux/interrupt.h>
77#include <linux/pci.h>
78#include <linux/dma-mapping.h>
79#include <linux/init.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/ethtool.h>
83#include <linux/skbuff.h>
84#include <linux/delay.h>
85#include <linux/spinlock.h>
86#include <linux/crc32.h>
87#include <linux/bitops.h>
88
89#include <asm/processor.h>
90#include <asm/io.h>
91#include <asm/dma.h>
92#include <asm/uaccess.h>
93#include <asm/irq.h>
94
95
96/* Board/System/Debug information/definition ---------------- */
97#define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
98#define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
99#define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
100#define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
101
102#define DM9102_IO_SIZE  0x80
103#define DM9102A_IO_SIZE 0x100
104#define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
105#define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
106#define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
107#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
108#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
109#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
110#define TX_BUF_ALLOC    0x600
111#define RX_ALLOC_SIZE   0x620
112#define DM910X_RESET    1
113#define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
114#define CR6_DEFAULT     0x00080000      /* HD */
115#define CR7_DEFAULT     0x180c1
116#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
117#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
118#define MAX_PACKET_SIZE 1514
119#define DMFE_MAX_MULTICAST 14
120#define RX_COPY_SIZE	100
121#define MAX_CHECK_PACKET 0x8000
122#define DM9801_NOISE_FLOOR 8
123#define DM9802_NOISE_FLOOR 5
124
125#define DMFE_WOL_LINKCHANGE	0x20000000
126#define DMFE_WOL_SAMPLEPACKET	0x10000000
127#define DMFE_WOL_MAGICPACKET	0x08000000
128
129
130#define DMFE_10MHF      0
131#define DMFE_100MHF     1
132#define DMFE_10MFD      4
133#define DMFE_100MFD     5
134#define DMFE_AUTO       8
135#define DMFE_1M_HPNA    0x10
136
137#define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
138#define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
139#define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
140#define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
141#define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
142#define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
143
144#define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
145#define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
146#define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
147
148#define DMFE_DBUG(dbug_now, msg, value) \
149	do { \
150 		if (dmfe_debug || (dbug_now)) \
151			printk(KERN_ERR DRV_NAME ": %s %lx\n",\
152 				(msg), (long) (value)); \
153	} while (0)
154
155#define SHOW_MEDIA_TYPE(mode) \
156	printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
157		(mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
158
159
160/* CR9 definition: SROM/MII */
161#define CR9_SROM_READ   0x4800
162#define CR9_SRCS        0x1
163#define CR9_SRCLK       0x2
164#define CR9_CRDOUT      0x8
165#define SROM_DATA_0     0x0
166#define SROM_DATA_1     0x4
167#define PHY_DATA_1      0x20000
168#define PHY_DATA_0      0x00000
169#define MDCLKH          0x10000
170
171#define PHY_POWER_DOWN	0x800
172
173#define SROM_V41_CODE   0x14
174
175#define SROM_CLK_WRITE(data, ioaddr) \
176	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
177	udelay(5); \
178	outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
179	udelay(5); \
180	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
181	udelay(5);
182
183#define __CHK_IO_SIZE(pci_id, dev_rev) \
184 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
185	DM9102A_IO_SIZE: DM9102_IO_SIZE)
186
187#define CHK_IO_SIZE(pci_dev, dev_rev) \
188	(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
189
190/* Sten Check */
191#define DEVICE net_device
192
193/* Structure/enum declaration ------------------------------- */
194struct tx_desc {
195        __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
196        char *tx_buf_ptr;               /* Data for us */
197        struct tx_desc *next_tx_desc;
198} __attribute__(( aligned(32) ));
199
200struct rx_desc {
201	__le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
202	struct sk_buff *rx_skb_ptr;	/* Data for us */
203	struct rx_desc *next_rx_desc;
204} __attribute__(( aligned(32) ));
205
206struct dmfe_board_info {
207	u32 chip_id;			/* Chip vendor/Device ID */
208	u32 chip_revision;		/* Chip revision */
209	struct DEVICE *next_dev;	/* next device */
210	struct pci_dev *pdev;		/* PCI device */
211	spinlock_t lock;
212
213	long ioaddr;			/* I/O base address */
214	u32 cr0_data;
215	u32 cr5_data;
216	u32 cr6_data;
217	u32 cr7_data;
218	u32 cr15_data;
219
220	/* pointer for memory physical address */
221	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
222	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
223	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
224	dma_addr_t first_tx_desc_dma;
225	dma_addr_t first_rx_desc_dma;
226
227	/* descriptor pointer */
228	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
229	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
230	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
231	struct tx_desc *first_tx_desc;
232	struct tx_desc *tx_insert_ptr;
233	struct tx_desc *tx_remove_ptr;
234	struct rx_desc *first_rx_desc;
235	struct rx_desc *rx_insert_ptr;
236	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
237	unsigned long tx_packet_cnt;	/* transmitted packet count */
238	unsigned long tx_queue_cnt;	/* wait to send packet count */
239	unsigned long rx_avail_cnt;	/* available rx descriptor count */
240	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
241
242	u16 HPNA_command;		/* For HPNA register 16 */
243	u16 HPNA_timer;			/* For HPNA remote device check */
244	u16 dbug_cnt;
245	u16 NIC_capability;		/* NIC media capability */
246	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
247
248	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
249	u8 chip_type;			/* Keep DM9102A chip type */
250	u8 media_mode;			/* user specify media mode */
251	u8 op_mode;			/* real work media mode */
252	u8 phy_addr;
253	u8 wait_reset;			/* Hardware failed, need to reset */
254	u8 dm910x_chk_mode;		/* Operating mode check */
255	u8 first_in_callback;		/* Flag to record state */
256	u8 wol_mode;			/* user WOL settings */
257	struct timer_list timer;
258
259	/* System defined statistic counter */
260	struct net_device_stats stats;
261
262	/* Driver defined statistic counter */
263	unsigned long tx_fifo_underrun;
264	unsigned long tx_loss_carrier;
265	unsigned long tx_no_carrier;
266	unsigned long tx_late_collision;
267	unsigned long tx_excessive_collision;
268	unsigned long tx_jabber_timeout;
269	unsigned long reset_count;
270	unsigned long reset_cr8;
271	unsigned long reset_fatal;
272	unsigned long reset_TXtimeout;
273
274	/* NIC SROM data */
275	unsigned char srom[128];
276};
277
278enum dmfe_offsets {
279	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
280	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
281	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
282	DCR15 = 0x78
283};
284
285enum dmfe_CR6_bits {
286	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
287	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
288	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
289};
290
291/* Global variable declaration ----------------------------- */
292static int __devinitdata printed_version;
293static char version[] __devinitdata =
294	KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
295	DRV_VERSION " (" DRV_RELDATE ")\n";
296
297static int dmfe_debug;
298static unsigned char dmfe_media_mode = DMFE_AUTO;
299static u32 dmfe_cr6_user_set;
300
301/* For module input parameter */
302static int debug;
303static u32 cr6set;
304static unsigned char mode = 8;
305static u8 chkmode = 1;
306static u8 HPNA_mode;		/* Default: Low Power/High Speed */
307static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
308static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
309static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
310static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
311				   4: TX pause packet */
312
313
314/* function declaration ------------------------------------- */
315static int dmfe_open(struct DEVICE *);
316static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
317static int dmfe_stop(struct DEVICE *);
318static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
319static void dmfe_set_filter_mode(struct DEVICE *);
320static const struct ethtool_ops netdev_ethtool_ops;
321static u16 read_srom_word(long ,int);
322static irqreturn_t dmfe_interrupt(int , void *);
323#ifdef CONFIG_NET_POLL_CONTROLLER
324static void poll_dmfe (struct net_device *dev);
325#endif
326static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
327static void allocate_rx_buffer(struct dmfe_board_info *);
328static void update_cr6(u32, unsigned long);
329static void send_filter_frame(struct DEVICE * ,int);
330static void dm9132_id_table(struct DEVICE * ,int);
331static u16 phy_read(unsigned long, u8, u8, u32);
332static void phy_write(unsigned long, u8, u8, u16, u32);
333static void phy_write_1bit(unsigned long, u32);
334static u16 phy_read_1bit(unsigned long);
335static u8 dmfe_sense_speed(struct dmfe_board_info *);
336static void dmfe_process_mode(struct dmfe_board_info *);
337static void dmfe_timer(unsigned long);
338static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342static void dmfe_dynamic_reset(struct DEVICE *);
343static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344static void dmfe_init_dm910x(struct DEVICE *);
345static void dmfe_parse_srom(struct dmfe_board_info *);
346static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347static void dmfe_program_DM9802(struct dmfe_board_info *);
348static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349static void dmfe_set_phyxcer(struct dmfe_board_info *);
350
351/* DM910X network board routine ---------------------------- */
352
353/*
354 *	Search DM910X board ,allocate space and register it
355 */
356
357static int __devinit dmfe_init_one (struct pci_dev *pdev,
358				    const struct pci_device_id *ent)
359{
360	struct dmfe_board_info *db;	/* board information structure */
361	struct net_device *dev;
362	u32 dev_rev, pci_pmr;
363	int i, err;
364
365	DMFE_DBUG(0, "dmfe_init_one()", 0);
366
367	if (!printed_version++)
368		printk(version);
369
370	/* Init network device */
371	dev = alloc_etherdev(sizeof(*db));
372	if (dev == NULL)
373		return -ENOMEM;
374	SET_MODULE_OWNER(dev);
375	SET_NETDEV_DEV(dev, &pdev->dev);
376
377	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
378		printk(KERN_WARNING DRV_NAME
379			": 32-bit PCI DMA not available.\n");
380		err = -ENODEV;
381		goto err_out_free;
382	}
383
384	/* Enable Master/IO access, Disable memory access */
385	err = pci_enable_device(pdev);
386	if (err)
387		goto err_out_free;
388
389	if (!pci_resource_start(pdev, 0)) {
390		printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
391		err = -ENODEV;
392		goto err_out_disable;
393	}
394
395	/* Read Chip revision */
396	pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
397
398	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
399		printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
400		err = -ENODEV;
401		goto err_out_disable;
402	}
403
404
405	if (pci_request_regions(pdev, DRV_NAME)) {
406		printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
407		err = -ENODEV;
408		goto err_out_disable;
409	}
410
411	/* Init system & device */
412	db = netdev_priv(dev);
413
414	/* Allocate Tx/Rx descriptor memory */
415	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
416			DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
417
418	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
419			TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
420
421	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
422	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
423	db->buf_pool_start = db->buf_pool_ptr;
424	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
425
426	db->chip_id = ent->driver_data;
427	db->ioaddr = pci_resource_start(pdev, 0);
428	db->chip_revision = dev_rev;
429	db->wol_mode = 0;
430
431	db->pdev = pdev;
432
433	dev->base_addr = db->ioaddr;
434	dev->irq = pdev->irq;
435	pci_set_drvdata(pdev, dev);
436	dev->open = &dmfe_open;
437	dev->hard_start_xmit = &dmfe_start_xmit;
438	dev->stop = &dmfe_stop;
439	dev->get_stats = &dmfe_get_stats;
440	dev->set_multicast_list = &dmfe_set_filter_mode;
441#ifdef CONFIG_NET_POLL_CONTROLLER
442	dev->poll_controller = &poll_dmfe;
443#endif
444	dev->ethtool_ops = &netdev_ethtool_ops;
445	netif_carrier_off(dev);
446	spin_lock_init(&db->lock);
447
448	pci_read_config_dword(pdev, 0x50, &pci_pmr);
449	pci_pmr &= 0x70000;
450	if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
451		db->chip_type = 1;	/* DM9102A E3 */
452	else
453		db->chip_type = 0;
454
455	/* read 64 word srom data */
456	for (i = 0; i < 64; i++)
457		((__le16 *) db->srom)[i] =
458			cpu_to_le16(read_srom_word(db->ioaddr, i));
459
460	/* Set Node address */
461	for (i = 0; i < 6; i++)
462		dev->dev_addr[i] = db->srom[20 + i];
463
464	err = register_netdev (dev);
465	if (err)
466		goto err_out_res;
467
468	printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
469		dev->name,
470		ent->driver_data >> 16,
471		pci_name(pdev));
472	for (i = 0; i < 6; i++)
473		printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
474	printk(", irq %d.\n", dev->irq);
475
476	pci_set_master(pdev);
477
478	return 0;
479
480err_out_res:
481	pci_release_regions(pdev);
482err_out_disable:
483	pci_disable_device(pdev);
484err_out_free:
485	pci_set_drvdata(pdev, NULL);
486	free_netdev(dev);
487
488	return err;
489}
490
491
492static void __devexit dmfe_remove_one (struct pci_dev *pdev)
493{
494	struct net_device *dev = pci_get_drvdata(pdev);
495	struct dmfe_board_info *db = netdev_priv(dev);
496
497	DMFE_DBUG(0, "dmfe_remove_one()", 0);
498
499 	if (dev) {
500
501		unregister_netdev(dev);
502
503		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
504					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
505 					db->desc_pool_dma_ptr);
506		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
507					db->buf_pool_ptr, db->buf_pool_dma_ptr);
508		pci_release_regions(pdev);
509		free_netdev(dev);	/* free board information */
510
511		pci_set_drvdata(pdev, NULL);
512	}
513
514	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
515}
516
517
518/*
519 *	Open the interface.
520 *	The interface is opened whenever "ifconfig" actives it.
521 */
522
523static int dmfe_open(struct DEVICE *dev)
524{
525	int ret;
526	struct dmfe_board_info *db = netdev_priv(dev);
527
528	DMFE_DBUG(0, "dmfe_open", 0);
529
530	ret = request_irq(dev->irq, &dmfe_interrupt,
531			  IRQF_SHARED, dev->name, dev);
532	if (ret)
533		return ret;
534
535	/* system variable init */
536	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
537	db->tx_packet_cnt = 0;
538	db->tx_queue_cnt = 0;
539	db->rx_avail_cnt = 0;
540	db->wait_reset = 0;
541
542	db->first_in_callback = 0;
543	db->NIC_capability = 0xf;	/* All capability*/
544	db->PHY_reg4 = 0x1e0;
545
546	/* CR6 operation mode decision */
547	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
548		(db->chip_revision >= 0x02000030) ) {
549    		db->cr6_data |= DMFE_TXTH_256;
550		db->cr0_data = CR0_DEFAULT;
551		db->dm910x_chk_mode=4;		/* Enter the normal mode */
552 	} else {
553		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
554		db->cr0_data = 0;
555		db->dm910x_chk_mode = 1;	/* Enter the check mode */
556	}
557
558	/* Initilize DM910X board */
559	dmfe_init_dm910x(dev);
560
561	/* Active System Interface */
562	netif_wake_queue(dev);
563
564	/* set and active a timer process */
565	init_timer(&db->timer);
566	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
567	db->timer.data = (unsigned long)dev;
568	db->timer.function = &dmfe_timer;
569	add_timer(&db->timer);
570
571	return 0;
572}
573
574
575/*	Initilize DM910X board
576 *	Reset DM910X board
577 *	Initilize TX/Rx descriptor chain structure
578 *	Send the set-up frame
579 *	Enable Tx/Rx machine
580 */
581
582static void dmfe_init_dm910x(struct DEVICE *dev)
583{
584	struct dmfe_board_info *db = netdev_priv(dev);
585	unsigned long ioaddr = db->ioaddr;
586
587	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
588
589	/* Reset DM910x MAC controller */
590	outl(DM910X_RESET, ioaddr + DCR0);	/* RESET MAC */
591	udelay(100);
592	outl(db->cr0_data, ioaddr + DCR0);
593	udelay(5);
594
595	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
596	db->phy_addr = 1;
597
598	/* Parser SROM and media mode */
599	dmfe_parse_srom(db);
600	db->media_mode = dmfe_media_mode;
601
602	/* RESET Phyxcer Chip by GPR port bit 7 */
603	outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
604	if (db->chip_id == PCI_DM9009_ID) {
605		outl(0x80, ioaddr + DCR12);	/* Issue RESET signal */
606		mdelay(300);			/* Delay 300 ms */
607	}
608	outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
609
610	/* Process Phyxcer Media Mode */
611	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
612		dmfe_set_phyxcer(db);
613
614	/* Media Mode Process */
615	if ( !(db->media_mode & DMFE_AUTO) )
616		db->op_mode = db->media_mode; 	/* Force Mode */
617
618	/* Initiliaze Transmit/Receive decriptor and CR3/4 */
619	dmfe_descriptor_init(db, ioaddr);
620
621	/* Init CR6 to program DM910x operation */
622	update_cr6(db->cr6_data, ioaddr);
623
624	/* Send setup frame */
625	if (db->chip_id == PCI_DM9132_ID)
626		dm9132_id_table(dev, dev->mc_count);	/* DM9132 */
627	else
628		send_filter_frame(dev, dev->mc_count);	/* DM9102/DM9102A */
629
630	/* Init CR7, interrupt active bit */
631	db->cr7_data = CR7_DEFAULT;
632	outl(db->cr7_data, ioaddr + DCR7);
633
634	/* Init CR15, Tx jabber and Rx watchdog timer */
635	outl(db->cr15_data, ioaddr + DCR15);
636
637	/* Enable DM910X Tx/Rx function */
638	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
639	update_cr6(db->cr6_data, ioaddr);
640}
641
642
643/*
644 *	Hardware start transmission.
645 *	Send a packet to media from the upper layer.
646 */
647
648static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
649{
650	struct dmfe_board_info *db = netdev_priv(dev);
651	struct tx_desc *txptr;
652	unsigned long flags;
653
654	DMFE_DBUG(0, "dmfe_start_xmit", 0);
655
656	/* Resource flag check */
657	netif_stop_queue(dev);
658
659	/* Too large packet check */
660	if (skb->len > MAX_PACKET_SIZE) {
661		printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
662		dev_kfree_skb(skb);
663		return 0;
664	}
665
666	spin_lock_irqsave(&db->lock, flags);
667
668	/* No Tx resource check, it never happen nromally */
669	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
670		spin_unlock_irqrestore(&db->lock, flags);
671		printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
672		       db->tx_queue_cnt);
673		return 1;
674	}
675
676	/* Disable NIC interrupt */
677	outl(0, dev->base_addr + DCR7);
678
679	/* transmit this packet */
680	txptr = db->tx_insert_ptr;
681	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
682	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
683
684	/* Point to next transmit free descriptor */
685	db->tx_insert_ptr = txptr->next_tx_desc;
686
687	/* Transmit Packet Process */
688	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
689		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
690		db->tx_packet_cnt++;			/* Ready to send */
691		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
692		dev->trans_start = jiffies;		/* saved time stamp */
693	} else {
694		db->tx_queue_cnt++;			/* queue TX packet */
695		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
696	}
697
698	/* Tx resource check */
699	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
700		netif_wake_queue(dev);
701
702	/* Restore CR7 to enable interrupt */
703	spin_unlock_irqrestore(&db->lock, flags);
704	outl(db->cr7_data, dev->base_addr + DCR7);
705
706	/* free this SKB */
707	dev_kfree_skb(skb);
708
709	return 0;
710}
711
712
713/*
714 *	Stop the interface.
715 *	The interface is stopped when it is brought.
716 */
717
718static int dmfe_stop(struct DEVICE *dev)
719{
720	struct dmfe_board_info *db = netdev_priv(dev);
721	unsigned long ioaddr = dev->base_addr;
722
723	DMFE_DBUG(0, "dmfe_stop", 0);
724
725	/* disable system */
726	netif_stop_queue(dev);
727
728	/* deleted timer */
729	del_timer_sync(&db->timer);
730
731	/* Reset & stop DM910X board */
732	outl(DM910X_RESET, ioaddr + DCR0);
733	udelay(5);
734	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
735
736	/* free interrupt */
737	free_irq(dev->irq, dev);
738
739	/* free allocated rx buffer */
740	dmfe_free_rxbuffer(db);
741
742
743	return 0;
744}
745
746
747/*
748 *	DM9102 insterrupt handler
749 *	receive the packet to upper layer, free the transmitted packet
750 */
751
752static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
753{
754	struct DEVICE *dev = dev_id;
755	struct dmfe_board_info *db = netdev_priv(dev);
756	unsigned long ioaddr = dev->base_addr;
757	unsigned long flags;
758
759	DMFE_DBUG(0, "dmfe_interrupt()", 0);
760
761	spin_lock_irqsave(&db->lock, flags);
762
763	/* Got DM910X status */
764	db->cr5_data = inl(ioaddr + DCR5);
765	outl(db->cr5_data, ioaddr + DCR5);
766	if ( !(db->cr5_data & 0xc1) ) {
767		spin_unlock_irqrestore(&db->lock, flags);
768		return IRQ_HANDLED;
769	}
770
771	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
772	outl(0, ioaddr + DCR7);
773
774	/* Check system status */
775	if (db->cr5_data & 0x2000) {
776		/* system bus error happen */
777		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
778		db->reset_fatal++;
779		db->wait_reset = 1;	/* Need to RESET */
780		spin_unlock_irqrestore(&db->lock, flags);
781		return IRQ_HANDLED;
782	}
783
784	 /* Received the coming packet */
785	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
786		dmfe_rx_packet(dev, db);
787
788	/* reallocate rx descriptor buffer */
789	if (db->rx_avail_cnt<RX_DESC_CNT)
790		allocate_rx_buffer(db);
791
792	/* Free the transmitted descriptor */
793	if ( db->cr5_data & 0x01)
794		dmfe_free_tx_pkt(dev, db);
795
796	/* Mode Check */
797	if (db->dm910x_chk_mode & 0x2) {
798		db->dm910x_chk_mode = 0x4;
799		db->cr6_data |= 0x100;
800		update_cr6(db->cr6_data, db->ioaddr);
801	}
802
803	/* Restore CR7 to enable interrupt mask */
804	outl(db->cr7_data, ioaddr + DCR7);
805
806	spin_unlock_irqrestore(&db->lock, flags);
807	return IRQ_HANDLED;
808}
809
810
811#ifdef CONFIG_NET_POLL_CONTROLLER
812/*
813 * Polling 'interrupt' - used by things like netconsole to send skbs
814 * without having to re-enable interrupts. It's not called while
815 * the interrupt routine is executing.
816 */
817
818static void poll_dmfe (struct net_device *dev)
819{
820	/* disable_irq here is not very nice, but with the lockless
821	   interrupt handler we have no other choice. */
822	disable_irq(dev->irq);
823	dmfe_interrupt (dev->irq, dev);
824	enable_irq(dev->irq);
825}
826#endif
827
828/*
829 *	Free TX resource after TX complete
830 */
831
832static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
833{
834	struct tx_desc *txptr;
835	unsigned long ioaddr = dev->base_addr;
836	u32 tdes0;
837
838	txptr = db->tx_remove_ptr;
839	while(db->tx_packet_cnt) {
840		tdes0 = le32_to_cpu(txptr->tdes0);
841		/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
842		if (tdes0 & 0x80000000)
843			break;
844
845		/* A packet sent completed */
846		db->tx_packet_cnt--;
847		db->stats.tx_packets++;
848
849		/* Transmit statistic counter */
850		if ( tdes0 != 0x7fffffff ) {
851			/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
852			db->stats.collisions += (tdes0 >> 3) & 0xf;
853			db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
854			if (tdes0 & TDES0_ERR_MASK) {
855				db->stats.tx_errors++;
856
857				if (tdes0 & 0x0002) {	/* UnderRun */
858					db->tx_fifo_underrun++;
859					if ( !(db->cr6_data & CR6_SFT) ) {
860						db->cr6_data = db->cr6_data | CR6_SFT;
861						update_cr6(db->cr6_data, db->ioaddr);
862					}
863				}
864				if (tdes0 & 0x0100)
865					db->tx_excessive_collision++;
866				if (tdes0 & 0x0200)
867					db->tx_late_collision++;
868				if (tdes0 & 0x0400)
869					db->tx_no_carrier++;
870				if (tdes0 & 0x0800)
871					db->tx_loss_carrier++;
872				if (tdes0 & 0x4000)
873					db->tx_jabber_timeout++;
874			}
875		}
876
877    		txptr = txptr->next_tx_desc;
878	}/* End of while */
879
880	/* Update TX remove pointer to next */
881	db->tx_remove_ptr = txptr;
882
883	/* Send the Tx packet in queue */
884	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
885		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
886		db->tx_packet_cnt++;			/* Ready to send */
887		db->tx_queue_cnt--;
888		outl(0x1, ioaddr + DCR1);		/* Issue Tx polling */
889		dev->trans_start = jiffies;		/* saved time stamp */
890	}
891
892	/* Resource available check */
893	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
894		netif_wake_queue(dev);	/* Active upper layer, send again */
895}
896
897
898/*
899 *	Calculate the CRC valude of the Rx packet
900 *	flag = 	1 : return the reverse CRC (for the received packet CRC)
901 *		0 : return the normal CRC (for Hash Table index)
902 */
903
904static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
905{
906	u32 crc = crc32(~0, Data, Len);
907	if (flag) crc = ~crc;
908	return crc;
909}
910
911
912/*
913 *	Receive the come packet and pass to upper layer
914 */
915
916static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
917{
918	struct rx_desc *rxptr;
919	struct sk_buff *skb, *newskb;
920	int rxlen;
921	u32 rdes0;
922
923	rxptr = db->rx_ready_ptr;
924
925	while(db->rx_avail_cnt) {
926		rdes0 = le32_to_cpu(rxptr->rdes0);
927		if (rdes0 & 0x80000000)	/* packet owner check */
928			break;
929
930		db->rx_avail_cnt--;
931		db->interval_rx_cnt++;
932
933		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
934				 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
935
936		if ( (rdes0 & 0x300) != 0x300) {
937			/* A packet without First/Last flag */
938			/* reuse this SKB */
939			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
940			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
941		} else {
942			/* A packet with First/Last flag */
943			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
944
945			/* error summary bit check */
946			if (rdes0 & 0x8000) {
947				/* This is a error packet */
948				//printk(DRV_NAME ": rdes0: %lx\n", rdes0);
949				db->stats.rx_errors++;
950				if (rdes0 & 1)
951					db->stats.rx_fifo_errors++;
952				if (rdes0 & 2)
953					db->stats.rx_crc_errors++;
954				if (rdes0 & 0x80)
955					db->stats.rx_length_errors++;
956			}
957
958			if ( !(rdes0 & 0x8000) ||
959				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
960				skb = rxptr->rx_skb_ptr;
961
962				/* Received Packet CRC check need or not */
963				if ( (db->dm910x_chk_mode & 1) &&
964					(cal_CRC(skb->data, rxlen, 1) !=
965					(*(u32 *) (skb->data+rxlen) ))) {
966					/* Found a error received packet */
967					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
968					db->dm910x_chk_mode = 3;
969				} else {
970					/* Good packet, send to upper layer */
971					/* Shorst packet used new SKB */
972					if ((rxlen < RX_COPY_SIZE) &&
973						((newskb = dev_alloc_skb(rxlen + 2))
974						!= NULL)) {
975
976						skb = newskb;
977						/* size less than COPY_SIZE, allocate a rxlen SKB */
978						skb_reserve(skb, 2); /* 16byte align */
979						skb_copy_from_linear_data(rxptr->rx_skb_ptr,
980							  skb_put(skb, rxlen),
981									  rxlen);
982						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
983					} else
984						skb_put(skb, rxlen);
985
986					skb->protocol = eth_type_trans(skb, dev);
987					netif_rx(skb);
988					dev->last_rx = jiffies;
989					db->stats.rx_packets++;
990					db->stats.rx_bytes += rxlen;
991				}
992			} else {
993				/* Reuse SKB buffer when the packet is error */
994				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
995				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
996			}
997		}
998
999		rxptr = rxptr->next_rx_desc;
1000	}
1001
1002	db->rx_ready_ptr = rxptr;
1003}
1004
1005
1006/*
1007 *	Get statistics from driver.
1008 */
1009
1010static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1011{
1012	struct dmfe_board_info *db = netdev_priv(dev);
1013
1014	DMFE_DBUG(0, "dmfe_get_stats", 0);
1015	return &db->stats;
1016}
1017
1018
1019/*
1020 * Set DM910X multicast address
1021 */
1022
1023static void dmfe_set_filter_mode(struct DEVICE * dev)
1024{
1025	struct dmfe_board_info *db = netdev_priv(dev);
1026	unsigned long flags;
1027
1028	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1029	spin_lock_irqsave(&db->lock, flags);
1030
1031	if (dev->flags & IFF_PROMISC) {
1032		DMFE_DBUG(0, "Enable PROM Mode", 0);
1033		db->cr6_data |= CR6_PM | CR6_PBF;
1034		update_cr6(db->cr6_data, db->ioaddr);
1035		spin_unlock_irqrestore(&db->lock, flags);
1036		return;
1037	}
1038
1039	if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1040		DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1041		db->cr6_data &= ~(CR6_PM | CR6_PBF);
1042		db->cr6_data |= CR6_PAM;
1043		spin_unlock_irqrestore(&db->lock, flags);
1044		return;
1045	}
1046
1047	DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1048	if (db->chip_id == PCI_DM9132_ID)
1049		dm9132_id_table(dev, dev->mc_count);	/* DM9132 */
1050	else
1051		send_filter_frame(dev, dev->mc_count); 	/* DM9102/DM9102A */
1052	spin_unlock_irqrestore(&db->lock, flags);
1053}
1054
1055/*
1056 * 	Ethtool interace
1057 */
1058
1059static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1060			       struct ethtool_drvinfo *info)
1061{
1062	struct dmfe_board_info *np = netdev_priv(dev);
1063
1064	strcpy(info->driver, DRV_NAME);
1065	strcpy(info->version, DRV_VERSION);
1066	if (np->pdev)
1067		strcpy(info->bus_info, pci_name(np->pdev));
1068	else
1069		sprintf(info->bus_info, "EISA 0x%lx %d",
1070			dev->base_addr, dev->irq);
1071}
1072
1073static int dmfe_ethtool_set_wol(struct net_device *dev,
1074				struct ethtool_wolinfo *wolinfo)
1075{
1076	struct dmfe_board_info *db = netdev_priv(dev);
1077
1078	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1079		   		WAKE_ARP | WAKE_MAGICSECURE))
1080		   return -EOPNOTSUPP;
1081
1082	db->wol_mode = wolinfo->wolopts;
1083	return 0;
1084}
1085
1086static void dmfe_ethtool_get_wol(struct net_device *dev,
1087				 struct ethtool_wolinfo *wolinfo)
1088{
1089	struct dmfe_board_info *db = netdev_priv(dev);
1090
1091	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1092	wolinfo->wolopts = db->wol_mode;
1093	return;
1094}
1095
1096
1097static const struct ethtool_ops netdev_ethtool_ops = {
1098	.get_drvinfo		= dmfe_ethtool_get_drvinfo,
1099	.get_link               = ethtool_op_get_link,
1100	.set_wol		= dmfe_ethtool_set_wol,
1101	.get_wol		= dmfe_ethtool_get_wol,
1102};
1103
1104/*
1105 *	A periodic timer routine
1106 *	Dynamic media sense, allocate Rx buffer...
1107 */
1108
1109static void dmfe_timer(unsigned long data)
1110{
1111	u32 tmp_cr8;
1112	unsigned char tmp_cr12;
1113	struct DEVICE *dev = (struct DEVICE *) data;
1114	struct dmfe_board_info *db = netdev_priv(dev);
1115 	unsigned long flags;
1116
1117	int link_ok, link_ok_phy;
1118
1119	DMFE_DBUG(0, "dmfe_timer()", 0);
1120	spin_lock_irqsave(&db->lock, flags);
1121
1122	/* Media mode process when Link OK before enter this route */
1123	if (db->first_in_callback == 0) {
1124		db->first_in_callback = 1;
1125		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1126			db->cr6_data &= ~0x40000;
1127			update_cr6(db->cr6_data, db->ioaddr);
1128			phy_write(db->ioaddr,
1129				  db->phy_addr, 0, 0x1000, db->chip_id);
1130			db->cr6_data |= 0x40000;
1131			update_cr6(db->cr6_data, db->ioaddr);
1132			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1133			add_timer(&db->timer);
1134			spin_unlock_irqrestore(&db->lock, flags);
1135			return;
1136		}
1137	}
1138
1139
1140	/* Operating Mode Check */
1141	if ( (db->dm910x_chk_mode & 0x1) &&
1142		(db->stats.rx_packets > MAX_CHECK_PACKET) )
1143		db->dm910x_chk_mode = 0x4;
1144
1145	/* Dynamic reset DM910X : system error or transmit time-out */
1146	tmp_cr8 = inl(db->ioaddr + DCR8);
1147	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1148		db->reset_cr8++;
1149		db->wait_reset = 1;
1150	}
1151	db->interval_rx_cnt = 0;
1152
1153	/* TX polling kick monitor */
1154	if ( db->tx_packet_cnt &&
1155	     time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1156		outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
1157
1158		/* TX Timeout */
1159		if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1160			db->reset_TXtimeout++;
1161			db->wait_reset = 1;
1162			printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1163			       dev->name);
1164		}
1165	}
1166
1167	if (db->wait_reset) {
1168		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1169		db->reset_count++;
1170		dmfe_dynamic_reset(dev);
1171		db->first_in_callback = 0;
1172		db->timer.expires = DMFE_TIMER_WUT;
1173		add_timer(&db->timer);
1174		spin_unlock_irqrestore(&db->lock, flags);
1175		return;
1176	}
1177
1178	/* Link status check, Dynamic media type change */
1179	if (db->chip_id == PCI_DM9132_ID)
1180		tmp_cr12 = inb(db->ioaddr + DCR9 + 3);	/* DM9132 */
1181	else
1182		tmp_cr12 = inb(db->ioaddr + DCR12);	/* DM9102/DM9102A */
1183
1184	if ( ((db->chip_id == PCI_DM9102_ID) &&
1185		(db->chip_revision == 0x02000030)) ||
1186		((db->chip_id == PCI_DM9132_ID) &&
1187		(db->chip_revision == 0x02000010)) ) {
1188		/* DM9102A Chip */
1189		if (tmp_cr12 & 2)
1190			link_ok = 0;
1191		else
1192			link_ok = 1;
1193	}
1194	else
1195		/*0x43 is used instead of 0x3 because bit 6 should represent
1196			link status of external PHY */
1197		link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1198
1199
1200	/* If chip reports that link is failed it could be because external
1201		PHY link status pin is not conected correctly to chip
1202		To be sure ask PHY too.
1203	*/
1204
1205	/* need a dummy read because of PHY's register latch*/
1206	phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1207	link_ok_phy = (phy_read (db->ioaddr,
1208		       db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1209
1210	if (link_ok_phy != link_ok) {
1211		DMFE_DBUG (0, "PHY and chip report different link status", 0);
1212		link_ok = link_ok | link_ok_phy;
1213 	}
1214
1215	if ( !link_ok && netif_carrier_ok(dev)) {
1216		/* Link Failed */
1217		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1218		netif_carrier_off(dev);
1219
1220		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1221		/* AUTO or force 1M Homerun/Longrun don't need */
1222		if ( !(db->media_mode & 0x38) )
1223			phy_write(db->ioaddr, db->phy_addr,
1224				  0, 0x1000, db->chip_id);
1225
1226		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1227		if (db->media_mode & DMFE_AUTO) {
1228			/* 10/100M link failed, used 1M Home-Net */
1229			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1230			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1231			update_cr6(db->cr6_data, db->ioaddr);
1232		}
1233	} else if (!netif_carrier_ok(dev)) {
1234
1235		DMFE_DBUG(0, "Link link OK", tmp_cr12);
1236
1237		/* Auto Sense Speed */
1238		if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1239			netif_carrier_on(dev);
1240			SHOW_MEDIA_TYPE(db->op_mode);
1241		}
1242
1243		dmfe_process_mode(db);
1244	}
1245
1246	/* HPNA remote command check */
1247	if (db->HPNA_command & 0xf00) {
1248		db->HPNA_timer--;
1249		if (!db->HPNA_timer)
1250			dmfe_HPNA_remote_cmd_chk(db);
1251	}
1252
1253	/* Timer active again */
1254	db->timer.expires = DMFE_TIMER_WUT;
1255	add_timer(&db->timer);
1256	spin_unlock_irqrestore(&db->lock, flags);
1257}
1258
1259
1260/*
1261 *	Dynamic reset the DM910X board
1262 *	Stop DM910X board
1263 *	Free Tx/Rx allocated memory
1264 *	Reset DM910X board
1265 *	Re-initilize DM910X board
1266 */
1267
1268static void dmfe_dynamic_reset(struct DEVICE *dev)
1269{
1270	struct dmfe_board_info *db = netdev_priv(dev);
1271
1272	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1273
1274	/* Sopt MAC controller */
1275	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1276	update_cr6(db->cr6_data, dev->base_addr);
1277	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
1278	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1279
1280	/* Disable upper layer interface */
1281	netif_stop_queue(dev);
1282
1283	/* Free Rx Allocate buffer */
1284	dmfe_free_rxbuffer(db);
1285
1286	/* system variable init */
1287	db->tx_packet_cnt = 0;
1288	db->tx_queue_cnt = 0;
1289	db->rx_avail_cnt = 0;
1290	netif_carrier_off(dev);
1291	db->wait_reset = 0;
1292
1293	/* Re-initilize DM910X board */
1294	dmfe_init_dm910x(dev);
1295
1296	/* Restart upper layer interface */
1297	netif_wake_queue(dev);
1298}
1299
1300
1301/*
1302 *	free all allocated rx buffer
1303 */
1304
1305static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1306{
1307	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1308
1309	/* free allocated rx buffer */
1310	while (db->rx_avail_cnt) {
1311		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1312		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1313		db->rx_avail_cnt--;
1314	}
1315}
1316
1317
1318/*
1319 *	Reuse the SK buffer
1320 */
1321
1322static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1323{
1324	struct rx_desc *rxptr = db->rx_insert_ptr;
1325
1326	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1327		rxptr->rx_skb_ptr = skb;
1328		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1329			    skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1330		wmb();
1331		rxptr->rdes0 = cpu_to_le32(0x80000000);
1332		db->rx_avail_cnt++;
1333		db->rx_insert_ptr = rxptr->next_rx_desc;
1334	} else
1335		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1336}
1337
1338
1339/*
1340 *	Initialize transmit/Receive descriptor
1341 *	Using Chain structure, and allocate Tx/Rx buffer
1342 */
1343
1344static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1345{
1346	struct tx_desc *tmp_tx;
1347	struct rx_desc *tmp_rx;
1348	unsigned char *tmp_buf;
1349	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1350	dma_addr_t tmp_buf_dma;
1351	int i;
1352
1353	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1354
1355	/* tx descriptor start pointer */
1356	db->tx_insert_ptr = db->first_tx_desc;
1357	db->tx_remove_ptr = db->first_tx_desc;
1358	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
1359
1360	/* rx descriptor start pointer */
1361	db->first_rx_desc = (void *)db->first_tx_desc +
1362			sizeof(struct tx_desc) * TX_DESC_CNT;
1363
1364	db->first_rx_desc_dma =  db->first_tx_desc_dma +
1365			sizeof(struct tx_desc) * TX_DESC_CNT;
1366	db->rx_insert_ptr = db->first_rx_desc;
1367	db->rx_ready_ptr = db->first_rx_desc;
1368	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
1369
1370	/* Init Transmit chain */
1371	tmp_buf = db->buf_pool_start;
1372	tmp_buf_dma = db->buf_pool_dma_start;
1373	tmp_tx_dma = db->first_tx_desc_dma;
1374	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1375		tmp_tx->tx_buf_ptr = tmp_buf;
1376		tmp_tx->tdes0 = cpu_to_le32(0);
1377		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1378		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1379		tmp_tx_dma += sizeof(struct tx_desc);
1380		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1381		tmp_tx->next_tx_desc = tmp_tx + 1;
1382		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1383		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1384	}
1385	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1386	tmp_tx->next_tx_desc = db->first_tx_desc;
1387
1388	 /* Init Receive descriptor chain */
1389	tmp_rx_dma=db->first_rx_desc_dma;
1390	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1391		tmp_rx->rdes0 = cpu_to_le32(0);
1392		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1393		tmp_rx_dma += sizeof(struct rx_desc);
1394		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1395		tmp_rx->next_rx_desc = tmp_rx + 1;
1396	}
1397	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1398	tmp_rx->next_rx_desc = db->first_rx_desc;
1399
1400	/* pre-allocate Rx buffer */
1401	allocate_rx_buffer(db);
1402}
1403
1404
1405/*
1406 *	Update CR6 value
1407 *	Firstly stop DM910X , then written value and start
1408 */
1409
1410static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1411{
1412	u32 cr6_tmp;
1413
1414	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1415	outl(cr6_tmp, ioaddr + DCR6);
1416	udelay(5);
1417	outl(cr6_data, ioaddr + DCR6);
1418	udelay(5);
1419}
1420
1421
1422/*
1423 *	Send a setup frame for DM9132
1424 *	This setup frame initilize DM910X address filter mode
1425*/
1426
1427static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1428{
1429	struct dev_mc_list *mcptr;
1430	u16 * addrptr;
1431	unsigned long ioaddr = dev->base_addr+0xc0;		/* ID Table */
1432	u32 hash_val;
1433	u16 i, hash_table[4];
1434
1435	DMFE_DBUG(0, "dm9132_id_table()", 0);
1436
1437	/* Node address */
1438	addrptr = (u16 *) dev->dev_addr;
1439	outw(addrptr[0], ioaddr);
1440	ioaddr += 4;
1441	outw(addrptr[1], ioaddr);
1442	ioaddr += 4;
1443	outw(addrptr[2], ioaddr);
1444	ioaddr += 4;
1445
1446	/* Clear Hash Table */
1447	for (i = 0; i < 4; i++)
1448		hash_table[i] = 0x0;
1449
1450	/* broadcast address */
1451	hash_table[3] = 0x8000;
1452
1453	/* the multicast address in Hash Table : 64 bits */
1454	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1455		hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1456		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1457	}
1458
1459	/* Write the hash table to MAC MD table */
1460	for (i = 0; i < 4; i++, ioaddr += 4)
1461		outw(hash_table[i], ioaddr);
1462}
1463
1464
1465/*
1466 *	Send a setup frame for DM9102/DM9102A
1467 *	This setup frame initilize DM910X address filter mode
1468 */
1469
1470static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1471{
1472	struct dmfe_board_info *db = netdev_priv(dev);
1473	struct dev_mc_list *mcptr;
1474	struct tx_desc *txptr;
1475	u16 * addrptr;
1476	u32 * suptr;
1477	int i;
1478
1479	DMFE_DBUG(0, "send_filter_frame()", 0);
1480
1481	txptr = db->tx_insert_ptr;
1482	suptr = (u32 *) txptr->tx_buf_ptr;
1483
1484	/* Node address */
1485	addrptr = (u16 *) dev->dev_addr;
1486	*suptr++ = addrptr[0];
1487	*suptr++ = addrptr[1];
1488	*suptr++ = addrptr[2];
1489
1490	/* broadcast address */
1491	*suptr++ = 0xffff;
1492	*suptr++ = 0xffff;
1493	*suptr++ = 0xffff;
1494
1495	/* fit the multicast address */
1496	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1497		addrptr = (u16 *) mcptr->dmi_addr;
1498		*suptr++ = addrptr[0];
1499		*suptr++ = addrptr[1];
1500		*suptr++ = addrptr[2];
1501	}
1502
1503	for (; i<14; i++) {
1504		*suptr++ = 0xffff;
1505		*suptr++ = 0xffff;
1506		*suptr++ = 0xffff;
1507	}
1508
1509	/* prepare the setup frame */
1510	db->tx_insert_ptr = txptr->next_tx_desc;
1511	txptr->tdes1 = cpu_to_le32(0x890000c0);
1512
1513	/* Resource Check and Send the setup packet */
1514	if (!db->tx_packet_cnt) {
1515		/* Resource Empty */
1516		db->tx_packet_cnt++;
1517		txptr->tdes0 = cpu_to_le32(0x80000000);
1518		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1519		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
1520		update_cr6(db->cr6_data, dev->base_addr);
1521		dev->trans_start = jiffies;
1522	} else
1523		db->tx_queue_cnt++;	/* Put in TX queue */
1524}
1525
1526
1527/*
1528 *	Allocate rx buffer,
1529 *	As possible as allocate maxiumn Rx buffer
1530 */
1531
1532static void allocate_rx_buffer(struct dmfe_board_info *db)
1533{
1534	struct rx_desc *rxptr;
1535	struct sk_buff *skb;
1536
1537	rxptr = db->rx_insert_ptr;
1538
1539	while(db->rx_avail_cnt < RX_DESC_CNT) {
1540		if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1541			break;
1542		rxptr->rx_skb_ptr = skb;
1543		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1544				    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1545		wmb();
1546		rxptr->rdes0 = cpu_to_le32(0x80000000);
1547		rxptr = rxptr->next_rx_desc;
1548		db->rx_avail_cnt++;
1549	}
1550
1551	db->rx_insert_ptr = rxptr;
1552}
1553
1554
1555/*
1556 *	Read one word data from the serial ROM
1557 */
1558
1559static u16 read_srom_word(long ioaddr, int offset)
1560{
1561	int i;
1562	u16 srom_data = 0;
1563	long cr9_ioaddr = ioaddr + DCR9;
1564
1565	outl(CR9_SROM_READ, cr9_ioaddr);
1566	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1567
1568	/* Send the Read Command 110b */
1569	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1570	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1571	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1572
1573	/* Send the offset */
1574	for (i = 5; i >= 0; i--) {
1575		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1576		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1577	}
1578
1579	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1580
1581	for (i = 16; i > 0; i--) {
1582		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1583		udelay(5);
1584		srom_data = (srom_data << 1) |
1585				((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1586		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1587		udelay(5);
1588	}
1589
1590	outl(CR9_SROM_READ, cr9_ioaddr);
1591	return srom_data;
1592}
1593
1594
1595/*
1596 *	Auto sense the media mode
1597 */
1598
1599static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1600{
1601	u8 ErrFlag = 0;
1602	u16 phy_mode;
1603
1604	/* CR6 bit18=0, select 10/100M */
1605	update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1606
1607	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1608	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1609
1610	if ( (phy_mode & 0x24) == 0x24 ) {
1611		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1612			phy_mode = phy_read(db->ioaddr,
1613				    db->phy_addr, 7, db->chip_id) & 0xf000;
1614		else 				/* DM9102/DM9102A */
1615			phy_mode = phy_read(db->ioaddr,
1616				    db->phy_addr, 17, db->chip_id) & 0xf000;
1617		/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1618		switch (phy_mode) {
1619		case 0x1000: db->op_mode = DMFE_10MHF; break;
1620		case 0x2000: db->op_mode = DMFE_10MFD; break;
1621		case 0x4000: db->op_mode = DMFE_100MHF; break;
1622		case 0x8000: db->op_mode = DMFE_100MFD; break;
1623		default: db->op_mode = DMFE_10MHF;
1624			ErrFlag = 1;
1625			break;
1626		}
1627	} else {
1628		db->op_mode = DMFE_10MHF;
1629		DMFE_DBUG(0, "Link Failed :", phy_mode);
1630		ErrFlag = 1;
1631	}
1632
1633	return ErrFlag;
1634}
1635
1636
1637/*
1638 *	Set 10/100 phyxcer capability
1639 *	AUTO mode : phyxcer register4 is NIC capability
1640 *	Force mode: phyxcer register4 is the force media
1641 */
1642
1643static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1644{
1645	u16 phy_reg;
1646
1647	/* Select 10/100M phyxcer */
1648	db->cr6_data &= ~0x40000;
1649	update_cr6(db->cr6_data, db->ioaddr);
1650
1651	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1652	if (db->chip_id == PCI_DM9009_ID) {
1653		phy_reg = phy_read(db->ioaddr,
1654				   db->phy_addr, 18, db->chip_id) & ~0x1000;
1655
1656		phy_write(db->ioaddr,
1657			  db->phy_addr, 18, phy_reg, db->chip_id);
1658	}
1659
1660	/* Phyxcer capability setting */
1661	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1662
1663	if (db->media_mode & DMFE_AUTO) {
1664		/* AUTO Mode */
1665		phy_reg |= db->PHY_reg4;
1666	} else {
1667		/* Force Mode */
1668		switch(db->media_mode) {
1669		case DMFE_10MHF: phy_reg |= 0x20; break;
1670		case DMFE_10MFD: phy_reg |= 0x40; break;
1671		case DMFE_100MHF: phy_reg |= 0x80; break;
1672		case DMFE_100MFD: phy_reg |= 0x100; break;
1673		}
1674		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1675	}
1676
1677  	/* Write new capability to Phyxcer Reg4 */
1678	if ( !(phy_reg & 0x01e0)) {
1679		phy_reg|=db->PHY_reg4;
1680		db->media_mode|=DMFE_AUTO;
1681	}
1682	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1683
1684 	/* Restart Auto-Negotiation */
1685	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1686		phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1687	if ( !db->chip_type )
1688		phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1689}
1690
1691
1692/*
1693 *	Process op-mode
1694 *	AUTO mode : PHY controller in Auto-negotiation Mode
1695 *	Force mode: PHY controller in force mode with HUB
1696 *			N-way force capability with SWITCH
1697 */
1698
1699static void dmfe_process_mode(struct dmfe_board_info *db)
1700{
1701	u16 phy_reg;
1702
1703	/* Full Duplex Mode Check */
1704	if (db->op_mode & 0x4)
1705		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1706	else
1707		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1708
1709	/* Transciver Selection */
1710	if (db->op_mode & 0x10)		/* 1M HomePNA */
1711		db->cr6_data |= 0x40000;/* External MII select */
1712	else
1713		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1714
1715	update_cr6(db->cr6_data, db->ioaddr);
1716
1717	/* 10/100M phyxcer force mode need */
1718	if ( !(db->media_mode & 0x18)) {
1719		/* Forece Mode */
1720		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1721		if ( !(phy_reg & 0x1) ) {
1722			/* parter without N-Way capability */
1723			phy_reg = 0x0;
1724			switch(db->op_mode) {
1725			case DMFE_10MHF: phy_reg = 0x0; break;
1726			case DMFE_10MFD: phy_reg = 0x100; break;
1727			case DMFE_100MHF: phy_reg = 0x2000; break;
1728			case DMFE_100MFD: phy_reg = 0x2100; break;
1729			}
1730			phy_write(db->ioaddr,
1731				  db->phy_addr, 0, phy_reg, db->chip_id);
1732       			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1733				mdelay(20);
1734			phy_write(db->ioaddr,
1735				  db->phy_addr, 0, phy_reg, db->chip_id);
1736		}
1737	}
1738}
1739
1740
1741/*
1742 *	Write a word to Phy register
1743 */
1744
1745static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1746		      u16 phy_data, u32 chip_id)
1747{
1748	u16 i;
1749	unsigned long ioaddr;
1750
1751	if (chip_id == PCI_DM9132_ID) {
1752		ioaddr = iobase + 0x80 + offset * 4;
1753		outw(phy_data, ioaddr);
1754	} else {
1755		/* DM9102/DM9102A Chip */
1756		ioaddr = iobase + DCR9;
1757
1758		/* Send 33 synchronization clock to Phy controller */
1759		for (i = 0; i < 35; i++)
1760			phy_write_1bit(ioaddr, PHY_DATA_1);
1761
1762		/* Send start command(01) to Phy */
1763		phy_write_1bit(ioaddr, PHY_DATA_0);
1764		phy_write_1bit(ioaddr, PHY_DATA_1);
1765
1766		/* Send write command(01) to Phy */
1767		phy_write_1bit(ioaddr, PHY_DATA_0);
1768		phy_write_1bit(ioaddr, PHY_DATA_1);
1769
1770		/* Send Phy address */
1771		for (i = 0x10; i > 0; i = i >> 1)
1772			phy_write_1bit(ioaddr,
1773				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1774
1775		/* Send register address */
1776		for (i = 0x10; i > 0; i = i >> 1)
1777			phy_write_1bit(ioaddr,
1778				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1779
1780		/* written trasnition */
1781		phy_write_1bit(ioaddr, PHY_DATA_1);
1782		phy_write_1bit(ioaddr, PHY_DATA_0);
1783
1784		/* Write a word data to PHY controller */
1785		for ( i = 0x8000; i > 0; i >>= 1)
1786			phy_write_1bit(ioaddr,
1787				       phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1788	}
1789}
1790
1791
1792/*
1793 *	Read a word data from phy register
1794 */
1795
1796static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1797{
1798	int i;
1799	u16 phy_data;
1800	unsigned long ioaddr;
1801
1802	if (chip_id == PCI_DM9132_ID) {
1803		/* DM9132 Chip */
1804		ioaddr = iobase + 0x80 + offset * 4;
1805		phy_data = inw(ioaddr);
1806	} else {
1807		/* DM9102/DM9102A Chip */
1808		ioaddr = iobase + DCR9;
1809
1810		/* Send 33 synchronization clock to Phy controller */
1811		for (i = 0; i < 35; i++)
1812			phy_write_1bit(ioaddr, PHY_DATA_1);
1813
1814		/* Send start command(01) to Phy */
1815		phy_write_1bit(ioaddr, PHY_DATA_0);
1816		phy_write_1bit(ioaddr, PHY_DATA_1);
1817
1818		/* Send read command(10) to Phy */
1819		phy_write_1bit(ioaddr, PHY_DATA_1);
1820		phy_write_1bit(ioaddr, PHY_DATA_0);
1821
1822		/* Send Phy address */
1823		for (i = 0x10; i > 0; i = i >> 1)
1824			phy_write_1bit(ioaddr,
1825				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1826
1827		/* Send register address */
1828		for (i = 0x10; i > 0; i = i >> 1)
1829			phy_write_1bit(ioaddr,
1830				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1831
1832		/* Skip transition state */
1833		phy_read_1bit(ioaddr);
1834
1835		/* read 16bit data */
1836		for (phy_data = 0, i = 0; i < 16; i++) {
1837			phy_data <<= 1;
1838			phy_data |= phy_read_1bit(ioaddr);
1839		}
1840	}
1841
1842	return phy_data;
1843}
1844
1845
1846/*
1847 *	Write one bit data to Phy Controller
1848 */
1849
1850static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1851{
1852	outl(phy_data, ioaddr);			/* MII Clock Low */
1853	udelay(1);
1854	outl(phy_data | MDCLKH, ioaddr);	/* MII Clock High */
1855	udelay(1);
1856	outl(phy_data, ioaddr);			/* MII Clock Low */
1857	udelay(1);
1858}
1859
1860
1861/*
1862 *	Read one bit phy data from PHY controller
1863 */
1864
1865static u16 phy_read_1bit(unsigned long ioaddr)
1866{
1867	u16 phy_data;
1868
1869	outl(0x50000, ioaddr);
1870	udelay(1);
1871	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1872	outl(0x40000, ioaddr);
1873	udelay(1);
1874
1875	return phy_data;
1876}
1877
1878
1879/*
1880 *	Parser SROM and media mode
1881 */
1882
1883static void dmfe_parse_srom(struct dmfe_board_info * db)
1884{
1885	char * srom = db->srom;
1886	int dmfe_mode, tmp_reg;
1887
1888	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1889
1890	/* Init CR15 */
1891	db->cr15_data = CR15_DEFAULT;
1892
1893	/* Check SROM Version */
1894	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1895		/* SROM V4.01 */
1896		/* Get NIC support media mode */
1897		db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
1898		db->PHY_reg4 = 0;
1899		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1900			switch( db->NIC_capability & tmp_reg ) {
1901			case 0x1: db->PHY_reg4 |= 0x0020; break;
1902			case 0x2: db->PHY_reg4 |= 0x0040; break;
1903			case 0x4: db->PHY_reg4 |= 0x0080; break;
1904			case 0x8: db->PHY_reg4 |= 0x0100; break;
1905			}
1906		}
1907
1908		/* Media Mode Force or not check */
1909		dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1910				le32_to_cpup((__le32 *)srom + 36/4);
1911		switch(dmfe_mode) {
1912		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1913		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1914		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1915		case 0x100:
1916		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1917		}
1918
1919		/* Special Function setting */
1920		/* VLAN function */
1921		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1922			db->cr15_data |= 0x40;
1923
1924		/* Flow Control */
1925		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1926			db->cr15_data |= 0x400;
1927
1928		/* TX pause packet */
1929		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1930			db->cr15_data |= 0x9800;
1931	}
1932
1933	/* Parse HPNA parameter */
1934	db->HPNA_command = 1;
1935
1936	/* Accept remote command or not */
1937	if (HPNA_rx_cmd == 0)
1938		db->HPNA_command |= 0x8000;
1939
1940	 /* Issue remote command & operation mode */
1941	if (HPNA_tx_cmd == 1)
1942		switch(HPNA_mode) {	/* Issue Remote Command */
1943		case 0: db->HPNA_command |= 0x0904; break;
1944		case 1: db->HPNA_command |= 0x0a00; break;
1945		case 2: db->HPNA_command |= 0x0506; break;
1946		case 3: db->HPNA_command |= 0x0602; break;
1947		}
1948	else
1949		switch(HPNA_mode) {	/* Don't Issue */
1950		case 0: db->HPNA_command |= 0x0004; break;
1951		case 1: db->HPNA_command |= 0x0000; break;
1952		case 2: db->HPNA_command |= 0x0006; break;
1953		case 3: db->HPNA_command |= 0x0002; break;
1954		}
1955
1956	/* Check DM9801 or DM9802 present or not */
1957	db->HPNA_present = 0;
1958	update_cr6(db->cr6_data|0x40000, db->ioaddr);
1959	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1960	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1961		/* DM9801 or DM9802 present */
1962		db->HPNA_timer = 8;
1963		if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1964			/* DM9801 HomeRun */
1965			db->HPNA_present = 1;
1966			dmfe_program_DM9801(db, tmp_reg);
1967		} else {
1968			/* DM9802 LongRun */
1969			db->HPNA_present = 2;
1970			dmfe_program_DM9802(db);
1971		}
1972	}
1973
1974}
1975
1976
1977/*
1978 *	Init HomeRun DM9801
1979 */
1980
1981static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1982{
1983	uint reg17, reg25;
1984
1985	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1986	switch(HPNA_rev) {
1987	case 0xb900: /* DM9801 E3 */
1988		db->HPNA_command |= 0x1000;
1989		reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1990		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1991		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1992		break;
1993	case 0xb901: /* DM9801 E4 */
1994		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1995		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1996		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1997		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1998		break;
1999	case 0xb902: /* DM9801 E5 */
2000	case 0xb903: /* DM9801 E6 */
2001	default:
2002		db->HPNA_command |= 0x1000;
2003		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2004		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2005		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2006		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2007		break;
2008	}
2009	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2010	phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2011	phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2012}
2013
2014
2015/*
2016 *	Init HomeRun DM9802
2017 */
2018
2019static void dmfe_program_DM9802(struct dmfe_board_info * db)
2020{
2021	uint phy_reg;
2022
2023	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2024	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2025	phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2026	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2027	phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2028}
2029
2030
2031/*
2032 *	Check remote HPNA power and speed status. If not correct,
2033 *	issue command again.
2034*/
2035
2036static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2037{
2038	uint phy_reg;
2039
2040	/* Got remote device status */
2041	phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2042	switch(phy_reg) {
2043	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2044	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2045	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2046	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2047	}
2048
2049	/* Check remote device status match our setting ot not */
2050	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2051		phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2052			  db->chip_id);
2053		db->HPNA_timer=8;
2054	} else
2055		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
2056}
2057
2058
2059
2060static struct pci_device_id dmfe_pci_tbl[] = {
2061	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2062	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2063	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2064	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2065	{ 0, }
2066};
2067MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2068
2069
2070#ifdef CONFIG_PM
2071static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2072{
2073	struct net_device *dev = pci_get_drvdata(pci_dev);
2074	struct dmfe_board_info *db = netdev_priv(dev);
2075	u32 tmp;
2076
2077	/* Disable upper layer interface */
2078	netif_device_detach(dev);
2079
2080	/* Disable Tx/Rx */
2081	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2082	update_cr6(db->cr6_data, dev->base_addr);
2083
2084	/* Disable Interrupt */
2085	outl(0, dev->base_addr + DCR7);
2086	outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2087
2088	/* Fre RX buffers */
2089	dmfe_free_rxbuffer(db);
2090
2091	/* Enable WOL */
2092	pci_read_config_dword(pci_dev, 0x40, &tmp);
2093	tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2094
2095	if (db->wol_mode & WAKE_PHY)
2096		tmp |= DMFE_WOL_LINKCHANGE;
2097	if (db->wol_mode & WAKE_MAGIC)
2098		tmp |= DMFE_WOL_MAGICPACKET;
2099
2100	pci_write_config_dword(pci_dev, 0x40, tmp);
2101
2102	pci_enable_wake(pci_dev, PCI_D3hot, 1);
2103	pci_enable_wake(pci_dev, PCI_D3cold, 1);
2104
2105	/* Power down device*/
2106	pci_set_power_state(pci_dev, pci_choose_state (pci_dev,state));
2107	pci_save_state(pci_dev);
2108
2109	return 0;
2110}
2111
2112static int dmfe_resume(struct pci_dev *pci_dev)
2113{
2114	struct net_device *dev = pci_get_drvdata(pci_dev);
2115	u32 tmp;
2116
2117	pci_restore_state(pci_dev);
2118	pci_set_power_state(pci_dev, PCI_D0);
2119
2120	/* Re-initilize DM910X board */
2121	dmfe_init_dm910x(dev);
2122
2123	/* Disable WOL */
2124	pci_read_config_dword(pci_dev, 0x40, &tmp);
2125
2126	tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2127	pci_write_config_dword(pci_dev, 0x40, tmp);
2128
2129	pci_enable_wake(pci_dev, PCI_D3hot, 0);
2130	pci_enable_wake(pci_dev, PCI_D3cold, 0);
2131
2132	/* Restart upper layer interface */
2133	netif_device_attach(dev);
2134
2135	return 0;
2136}
2137#else
2138#define dmfe_suspend NULL
2139#define dmfe_resume NULL
2140#endif
2141
2142static struct pci_driver dmfe_driver = {
2143	.name		= "dmfe",
2144	.id_table	= dmfe_pci_tbl,
2145	.probe		= dmfe_init_one,
2146	.remove		= __devexit_p(dmfe_remove_one),
2147	.suspend        = dmfe_suspend,
2148	.resume         = dmfe_resume
2149};
2150
2151MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2152MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2153MODULE_LICENSE("GPL");
2154MODULE_VERSION(DRV_VERSION);
2155
2156module_param(debug, int, 0);
2157module_param(mode, byte, 0);
2158module_param(cr6set, int, 0);
2159module_param(chkmode, byte, 0);
2160module_param(HPNA_mode, byte, 0);
2161module_param(HPNA_rx_cmd, byte, 0);
2162module_param(HPNA_tx_cmd, byte, 0);
2163module_param(HPNA_NoiseFloor, byte, 0);
2164module_param(SF_mode, byte, 0);
2165MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2166MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2167		"Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2168
2169MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2170		"(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2171
2172/*	Description:
2173 *	when user used insmod to add module, system invoked init_module()
2174 *	to initilize and register.
2175 */
2176
2177static int __init dmfe_init_module(void)
2178{
2179	int rc;
2180
2181	printk(version);
2182	printed_version = 1;
2183
2184	DMFE_DBUG(0, "init_module() ", debug);
2185
2186	if (debug)
2187		dmfe_debug = debug;	/* set debug flag */
2188	if (cr6set)
2189		dmfe_cr6_user_set = cr6set;
2190
2191 	switch(mode) {
2192   	case DMFE_10MHF:
2193	case DMFE_100MHF:
2194	case DMFE_10MFD:
2195	case DMFE_100MFD:
2196	case DMFE_1M_HPNA:
2197		dmfe_media_mode = mode;
2198		break;
2199	default:dmfe_media_mode = DMFE_AUTO;
2200		break;
2201	}
2202
2203	if (HPNA_mode > 4)
2204		HPNA_mode = 0;		/* Default: LP/HS */
2205	if (HPNA_rx_cmd > 1)
2206		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2207	if (HPNA_tx_cmd > 1)
2208		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2209	if (HPNA_NoiseFloor > 15)
2210		HPNA_NoiseFloor = 0;
2211
2212	rc = pci_register_driver(&dmfe_driver);
2213	if (rc < 0)
2214		return rc;
2215
2216	return 0;
2217}
2218
2219
2220/*
2221 *	Description:
2222 *	when user used rmmod to delete module, system invoked clean_module()
2223 *	to un-register all registered services.
2224 */
2225
2226static void __exit dmfe_cleanup_module(void)
2227{
2228	DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2229	pci_unregister_driver(&dmfe_driver);
2230}
2231
2232module_init(dmfe_init_module);
2233module_exit(dmfe_cleanup_module);
2234