1/*
2    A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3    ethernet driver for Linux.
4    Copyright (C) 1997  Sten Wang
5
6    This program is free software; you can redistribute it and/or
7    modify it under the terms of the GNU General Public License
8    as published by the Free Software Foundation; either version 2
9    of the License, or (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    DAVICOM Web-Site: www.davicom.com.tw
17
18    Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19    Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21    (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23    Marcelo Tosatti <marcelo@conectiva.com.br> :
24    Made it compile in 2.3 (device to net_device)
25
26    Alan Cox <alan@redhat.com> :
27    Cleaned up for kernel merge.
28    Removed the back compatibility support
29    Reformatted, fixing spelling etc as I went
30    Removed IRQ 0-15 assumption
31
32    Jeff Garzik <jgarzik@mandrakesoft.com> :
33    Updated to use new PCI driver API.
34    Resource usage cleanups.
35    Report driver version to user.
36
37    Tobias Ringstrom <tori@unhappy.mine.nu> :
38    Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
39    Andrew Morton and Frank Davis for the SMP safety fixes.
40
41    Vojtech Pavlik <vojtech@suse.cz> :
42    Cleaned up pointer arithmetics.
43    Fixed a lot of 64bit issues.
44    Cleaned up printk()s a bit.
45    Fixed some obvious big endian problems.
46
47    Tobias Ringstrom <tori@unhappy.mine.nu> :
48    Use time_after for jiffies calculation.  Added ethtool
49    support.  Updated PCI resource allocation.  Do not
50    forget to unmap PCI mapped skbs.
51
52    TODO
53
54    Implement pci_driver::suspend() and pci_driver::resume()
55    power management methods.
56
57    Check on 64 bit boxes.
58    Check and fix on big endian boxes.
59
60    Test and make sure PCI latency is now correct for all cases.
61*/
62
63#define DRV_NAME	"dmfe"
64#define DRV_VERSION	"1.36.4"
65#define DRV_RELDATE	"2002-01-17"
66
67#include <linux/module.h>
68
69#include <linux/kernel.h>
70#include <linux/sched.h>
71#include <linux/string.h>
72#include <linux/timer.h>
73#include <linux/ptrace.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/slab.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/init.h>
80#include <linux/version.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/ethtool.h>
84#include <linux/skbuff.h>
85#include <linux/delay.h>
86#include <linux/spinlock.h>
87#include <linux/crc32.h>
88
89#include <asm/processor.h>
90#include <asm/bitops.h>
91#include <asm/io.h>
92#include <asm/dma.h>
93#include <asm/uaccess.h>
94
95
96/* Board/System/Debug information/definition ---------------- */
97#define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
98#define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
99#define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
100#define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
101
102#define DM9102_IO_SIZE  0x80
103#define DM9102A_IO_SIZE 0x100
104#define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
105#define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
106#define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
107#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
108#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
109#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
110#define TX_BUF_ALLOC    0x600
111#define RX_ALLOC_SIZE   0x620
112#define DM910X_RESET    1
113#define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
114#define CR6_DEFAULT     0x00080000      /* HD */
115#define CR7_DEFAULT     0x180c1
116#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
117#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
118#define MAX_PACKET_SIZE 1514
119#define DMFE_MAX_MULTICAST 14
120#define RX_COPY_SIZE	100
121#define MAX_CHECK_PACKET 0x8000
122#define DM9801_NOISE_FLOOR 8
123#define DM9802_NOISE_FLOOR 5
124
125#define DMFE_10MHF      0
126#define DMFE_100MHF     1
127#define DMFE_10MFD      4
128#define DMFE_100MFD     5
129#define DMFE_AUTO       8
130#define DMFE_1M_HPNA    0x10
131
132#define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
133#define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
134#define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
135#define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
136#define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
137#define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
138
139#define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
140#define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
141#define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
142
143#define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
144
145#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
146
147
148/* CR9 definition: SROM/MII */
149#define CR9_SROM_READ   0x4800
150#define CR9_SRCS        0x1
151#define CR9_SRCLK       0x2
152#define CR9_CRDOUT      0x8
153#define SROM_DATA_0     0x0
154#define SROM_DATA_1     0x4
155#define PHY_DATA_1      0x20000
156#define PHY_DATA_0      0x00000
157#define MDCLKH          0x10000
158
159#define PHY_POWER_DOWN	0x800
160
161#define SROM_V41_CODE   0x14
162
163#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
164
165#define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
166#define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
167
168/* Sten Check */
169#define DEVICE net_device
170
171/* Structure/enum declaration ------------------------------- */
172struct tx_desc {
173        u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
174        char *tx_buf_ptr;               /* Data for us */
175        struct tx_desc *next_tx_desc;
176} __attribute__(( aligned(32) ));
177
178struct rx_desc {
179	u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
180	struct sk_buff *rx_skb_ptr;	/* Data for us */
181	struct rx_desc *next_rx_desc;
182} __attribute__(( aligned(32) ));
183
184struct dmfe_board_info {
185	u32 chip_id;			/* Chip vendor/Device ID */
186	u32 chip_revision;		/* Chip revision */
187	struct DEVICE *next_dev;	/* next device */
188	struct pci_dev *pdev;		/* PCI device */
189	spinlock_t lock;
190
191	long ioaddr;			/* I/O base address */
192	u32 cr0_data;
193	u32 cr5_data;
194	u32 cr6_data;
195	u32 cr7_data;
196	u32 cr15_data;
197
198	/* pointer for memory physical address */
199	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
200	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
201	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
202	dma_addr_t first_tx_desc_dma;
203	dma_addr_t first_rx_desc_dma;
204
205	/* descriptor pointer */
206	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
207	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
208	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
209	struct tx_desc *first_tx_desc;
210	struct tx_desc *tx_insert_ptr;
211	struct tx_desc *tx_remove_ptr;
212	struct rx_desc *first_rx_desc;
213	struct rx_desc *rx_insert_ptr;
214	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
215	unsigned long tx_packet_cnt;	/* transmitted packet count */
216	unsigned long tx_queue_cnt;	/* wait to send packet count */
217	unsigned long rx_avail_cnt;	/* available rx descriptor count */
218	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
219
220	u16 HPNA_command;		/* For HPNA register 16 */
221	u16 HPNA_timer;			/* For HPNA remote device check */
222	u16 dbug_cnt;
223	u16 NIC_capability;		/* NIC media capability */
224	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
225
226	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
227	u8 chip_type;			/* Keep DM9102A chip type */
228	u8 media_mode;			/* user specify media mode */
229	u8 op_mode;			/* real work media mode */
230	u8 phy_addr;
231	u8 link_failed;			/* Ever link failed */
232	u8 wait_reset;			/* Hardware failed, need to reset */
233	u8 dm910x_chk_mode;		/* Operating mode check */
234	u8 first_in_callback;		/* Flag to record state */
235	struct timer_list timer;
236
237	/* System defined statistic counter */
238	struct net_device_stats stats;
239
240	/* Driver defined statistic counter */
241	unsigned long tx_fifo_underrun;
242	unsigned long tx_loss_carrier;
243	unsigned long tx_no_carrier;
244	unsigned long tx_late_collision;
245	unsigned long tx_excessive_collision;
246	unsigned long tx_jabber_timeout;
247	unsigned long reset_count;
248	unsigned long reset_cr8;
249	unsigned long reset_fatal;
250	unsigned long reset_TXtimeout;
251
252	/* NIC SROM data */
253	unsigned char srom[128];
254};
255
256enum dmfe_offsets {
257	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
258	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
259	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
260	DCR15 = 0x78
261};
262
263enum dmfe_CR6_bits {
264	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
265	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
266	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
267};
268
269/* Global variable declaration ----------------------------- */
270static int __devinitdata printed_version;
271static char version[] __devinitdata =
272	KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
273	DRV_VERSION " (" DRV_RELDATE ")\n";
274
275static int dmfe_debug;
276static unsigned char dmfe_media_mode = DMFE_AUTO;
277static u32 dmfe_cr6_user_set;
278
279/* For module input parameter */
280static int debug;
281static u32 cr6set;
282static unsigned char mode = 8;
283static u8 chkmode = 1;
284static u8 HPNA_mode;		/* Default: Low Power/High Speed */
285static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
286static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
287static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
288static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
289				   4: TX pause packet */
290
291
292/* function declaration ------------------------------------- */
293static int dmfe_open(struct DEVICE *);
294static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
295static int dmfe_stop(struct DEVICE *);
296static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
297static void dmfe_set_filter_mode(struct DEVICE *);
298static int dmfe_do_ioctl(struct DEVICE *, struct ifreq *, int);
299static u16 read_srom_word(long ,int);
300static void dmfe_interrupt(int , void *, struct pt_regs *);
301static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
302static void allocate_rx_buffer(struct dmfe_board_info *);
303static void update_cr6(u32, unsigned long);
304static void send_filter_frame(struct DEVICE * ,int);
305static void dm9132_id_table(struct DEVICE * ,int);
306static u16 phy_read(unsigned long, u8, u8, u32);
307static void phy_write(unsigned long, u8, u8, u16, u32);
308static void phy_write_1bit(unsigned long, u32);
309static u16 phy_read_1bit(unsigned long);
310static u8 dmfe_sense_speed(struct dmfe_board_info *);
311static void dmfe_process_mode(struct dmfe_board_info *);
312static void dmfe_timer(unsigned long);
313static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
314static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
315static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
316static void dmfe_dynamic_reset(struct DEVICE *);
317static void dmfe_free_rxbuffer(struct dmfe_board_info *);
318static void dmfe_init_dm910x(struct DEVICE *);
319static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
320static void dmfe_parse_srom(struct dmfe_board_info *);
321static void dmfe_program_DM9801(struct dmfe_board_info *, int);
322static void dmfe_program_DM9802(struct dmfe_board_info *);
323static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
324static void dmfe_set_phyxcer(struct dmfe_board_info *);
325
326/* DM910X network baord routine ---------------------------- */
327
328/*
329 *	Search DM910X board ,allocate space and register it
330 */
331
332static int __devinit dmfe_init_one (struct pci_dev *pdev,
333				    const struct pci_device_id *ent)
334{
335	struct dmfe_board_info *db;	/* board information structure */
336	struct net_device *dev;
337	u32 dev_rev, pci_pmr;
338	int i, err;
339
340	DMFE_DBUG(0, "dmfe_init_one()", 0);
341
342	if (!printed_version++)
343		printk(version);
344
345	/* Init network device */
346	dev = alloc_etherdev(sizeof(*db));
347	if (dev == NULL)
348		return -ENOMEM;
349	SET_MODULE_OWNER(dev);
350
351	if (pci_set_dma_mask(pdev, 0xffffffff)) {
352		printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
353		err = -ENODEV;
354		goto err_out_free;
355	}
356
357	/* Enable Master/IO access, Disable memory access */
358	err = pci_enable_device(pdev);
359	if (err)
360		goto err_out_free;
361
362	if (!pci_resource_start(pdev, 0)) {
363		printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
364		err = -ENODEV;
365		goto err_out_disable;
366	}
367
368	/* Read Chip revision */
369	pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
370
371	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
372		printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
373		err = -ENODEV;
374		goto err_out_disable;
375	}
376
377
378	if (pci_request_regions(pdev, DRV_NAME)) {
379		printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
380		err = -ENODEV;
381		goto err_out_disable;
382	}
383
384	/* Init system & device */
385	db = dev->priv;
386
387	/* Allocate Tx/Rx descriptor memory */
388	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
389	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
390
391	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
392	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
393	db->buf_pool_start = db->buf_pool_ptr;
394	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
395
396	db->chip_id = ent->driver_data;
397	db->ioaddr = pci_resource_start(pdev, 0);
398	db->chip_revision = dev_rev;
399
400	db->pdev = pdev;
401
402	dev->base_addr = db->ioaddr;
403	dev->irq = pdev->irq;
404	pci_set_drvdata(pdev, dev);
405	dev->open = &dmfe_open;
406	dev->hard_start_xmit = &dmfe_start_xmit;
407	dev->stop = &dmfe_stop;
408	dev->get_stats = &dmfe_get_stats;
409	dev->set_multicast_list = &dmfe_set_filter_mode;
410	dev->do_ioctl = &dmfe_do_ioctl;
411	spin_lock_init(&db->lock);
412
413	pci_read_config_dword(pdev, 0x50, &pci_pmr);
414	pci_pmr &= 0x70000;
415	if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
416		db->chip_type = 1;	/* DM9102A E3 */
417	else
418		db->chip_type = 0;
419
420	/* read 64 word srom data */
421	for (i = 0; i < 64; i++)
422		((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
423
424	/* Set Node address */
425	for (i = 0; i < 6; i++)
426		dev->dev_addr[i] = db->srom[20 + i];
427
428	err = register_netdev (dev);
429	if (err)
430		goto err_out_res;
431
432	printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
433		dev->name,
434		ent->driver_data >> 16,
435		pdev->slot_name);
436	for (i = 0; i < 6; i++)
437		printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
438	printk(", irq %d.\n", dev->irq);
439
440	pci_set_master(pdev);
441
442	return 0;
443
444err_out_res:
445	pci_release_regions(pdev);
446err_out_disable:
447	pci_disable_device(pdev);
448err_out_free:
449	pci_set_drvdata(pdev, NULL);
450	kfree(dev);
451
452	return err;
453}
454
455
456static void __devexit dmfe_remove_one (struct pci_dev *pdev)
457{
458	struct net_device *dev = pci_get_drvdata(pdev);
459	struct dmfe_board_info *db = dev->priv;
460
461	DMFE_DBUG(0, "dmfe_remove_one()", 0);
462
463 	if (dev) {
464		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
465					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
466 					db->desc_pool_dma_ptr);
467		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
468					db->buf_pool_ptr, db->buf_pool_dma_ptr);
469		unregister_netdev(dev);
470		pci_release_regions(pdev);
471		kfree(dev);	/* free board information */
472		pci_set_drvdata(pdev, NULL);
473	}
474
475	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
476}
477
478
479/*
480 *	Open the interface.
481 *	The interface is opened whenever "ifconfig" actives it.
482 */
483
484static int dmfe_open(struct DEVICE *dev)
485{
486	int ret;
487	struct dmfe_board_info *db = dev->priv;
488
489	DMFE_DBUG(0, "dmfe_open", 0);
490
491	ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ, dev->name, dev);
492	if (ret)
493		return ret;
494
495	/* system variable init */
496	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
497	db->tx_packet_cnt = 0;
498	db->tx_queue_cnt = 0;
499	db->rx_avail_cnt = 0;
500	db->link_failed = 1;
501	db->wait_reset = 0;
502
503	db->first_in_callback = 0;
504	db->NIC_capability = 0xf;	/* All capability*/
505	db->PHY_reg4 = 0x1e0;
506
507	/* CR6 operation mode decision */
508	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
509		(db->chip_revision >= 0x02000030) ) {
510    		db->cr6_data |= DMFE_TXTH_256;
511		db->cr0_data = CR0_DEFAULT;
512		db->dm910x_chk_mode=4;		/* Enter the normal mode */
513 	} else {
514		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
515		db->cr0_data = 0;
516		db->dm910x_chk_mode = 1;	/* Enter the check mode */
517	}
518
519	/* Initilize DM910X board */
520	dmfe_init_dm910x(dev);
521
522	/* Active System Interface */
523	netif_wake_queue(dev);
524
525	/* set and active a timer process */
526	init_timer(&db->timer);
527	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
528	db->timer.data = (unsigned long)dev;
529	db->timer.function = &dmfe_timer;
530	add_timer(&db->timer);
531
532	return 0;
533}
534
535
536/*	Initilize DM910X board
537 *	Reset DM910X board
538 *	Initilize TX/Rx descriptor chain structure
539 *	Send the set-up frame
540 *	Enable Tx/Rx machine
541 */
542
543static void dmfe_init_dm910x(struct DEVICE *dev)
544{
545	struct dmfe_board_info *db = dev->priv;
546	unsigned long ioaddr = db->ioaddr;
547
548	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
549
550	/* Reset DM910x MAC controller */
551	outl(DM910X_RESET, ioaddr + DCR0);	/* RESET MAC */
552	udelay(100);
553	outl(db->cr0_data, ioaddr + DCR0);
554	udelay(5);
555
556	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
557	db->phy_addr = 1;
558
559	/* Parser SROM and media mode */
560	dmfe_parse_srom(db);
561	db->media_mode = dmfe_media_mode;
562
563	/* RESET Phyxcer Chip by GPR port bit 7 */
564	outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
565	if (db->chip_id == PCI_DM9009_ID) {
566		outl(0x80, ioaddr + DCR12);	/* Issue RESET signal */
567		mdelay(300);			/* Delay 300 ms */
568	}
569	outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
570
571	/* Process Phyxcer Media Mode */
572	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
573		dmfe_set_phyxcer(db);
574
575	/* Media Mode Process */
576	if ( !(db->media_mode & DMFE_AUTO) )
577		db->op_mode = db->media_mode; 	/* Force Mode */
578
579	/* Initiliaze Transmit/Receive decriptor and CR3/4 */
580	dmfe_descriptor_init(db, ioaddr);
581
582	/* Init CR6 to program DM910x operation */
583	update_cr6(db->cr6_data, ioaddr);
584
585	/* Send setup frame */
586	if (db->chip_id == PCI_DM9132_ID)
587		dm9132_id_table(dev, dev->mc_count);	/* DM9132 */
588	else
589		send_filter_frame(dev, dev->mc_count);	/* DM9102/DM9102A */
590
591	/* Init CR7, interrupt active bit */
592	db->cr7_data = CR7_DEFAULT;
593	outl(db->cr7_data, ioaddr + DCR7);
594
595	/* Init CR15, Tx jabber and Rx watchdog timer */
596	outl(db->cr15_data, ioaddr + DCR15);
597
598	/* Enable DM910X Tx/Rx function */
599	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
600	update_cr6(db->cr6_data, ioaddr);
601}
602
603
604/*
605 *	Hardware start transmission.
606 *	Send a packet to media from the upper layer.
607 */
608
609static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
610{
611	struct dmfe_board_info *db = dev->priv;
612	struct tx_desc *txptr;
613	unsigned long flags;
614
615	DMFE_DBUG(0, "dmfe_start_xmit", 0);
616
617	/* Resource flag check */
618	netif_stop_queue(dev);
619
620	/* Too large packet check */
621	if (skb->len > MAX_PACKET_SIZE) {
622		printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
623		dev_kfree_skb(skb);
624		return 0;
625	}
626
627	spin_lock_irqsave(&db->lock, flags);
628
629	/* No Tx resource check, it never happen nromally */
630	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
631		spin_unlock_irqrestore(&db->lock, flags);
632		printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt);
633		return 1;
634	}
635
636	/* Disable NIC interrupt */
637	outl(0, dev->base_addr + DCR7);
638
639	/* transmit this packet */
640	txptr = db->tx_insert_ptr;
641	memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
642	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
643
644	/* Point to next transmit free descriptor */
645	db->tx_insert_ptr = txptr->next_tx_desc;
646
647	/* Transmit Packet Process */
648	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
649		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
650		db->tx_packet_cnt++;			/* Ready to send */
651		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
652		dev->trans_start = jiffies;		/* saved time stamp */
653	} else {
654		db->tx_queue_cnt++;			/* queue TX packet */
655		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
656	}
657
658	/* Tx resource check */
659	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
660		netif_wake_queue(dev);
661
662	/* free this SKB */
663	dev_kfree_skb(skb);
664
665	/* Restore CR7 to enable interrupt */
666	spin_unlock_irqrestore(&db->lock, flags);
667	outl(db->cr7_data, dev->base_addr + DCR7);
668
669	return 0;
670}
671
672
673/*
674 *	Stop the interface.
675 *	The interface is stopped when it is brought.
676 */
677
678static int dmfe_stop(struct DEVICE *dev)
679{
680	struct dmfe_board_info *db = dev->priv;
681	unsigned long ioaddr = dev->base_addr;
682
683	DMFE_DBUG(0, "dmfe_stop", 0);
684
685	/* disable system */
686	netif_stop_queue(dev);
687
688	/* deleted timer */
689	del_timer_sync(&db->timer);
690
691	/* Reset & stop DM910X board */
692	outl(DM910X_RESET, ioaddr + DCR0);
693	udelay(5);
694	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
695
696	/* free interrupt */
697	free_irq(dev->irq, dev);
698
699	/* free allocated rx buffer */
700	dmfe_free_rxbuffer(db);
701
702
703	return 0;
704}
705
706
707/*
708 *	DM9102 insterrupt handler
709 *	receive the packet to upper layer, free the transmitted packet
710 */
711
712static void dmfe_interrupt(int irq, void *dev_id, struct pt_regs *regs)
713{
714	struct DEVICE *dev = dev_id;
715	struct dmfe_board_info *db = (struct dmfe_board_info *) dev->priv;
716	unsigned long ioaddr = dev->base_addr;
717	unsigned long flags;
718
719	DMFE_DBUG(0, "dmfe_interrupt()", 0);
720
721	if (!dev) {
722		DMFE_DBUG(1, "dmfe_interrupt() without DEVICE arg", 0);
723		return;
724	}
725
726	spin_lock_irqsave(&db->lock, flags);
727
728	/* Got DM910X status */
729	db->cr5_data = inl(ioaddr + DCR5);
730	outl(db->cr5_data, ioaddr + DCR5);
731	if ( !(db->cr5_data & 0xc1) ) {
732		spin_unlock_irqrestore(&db->lock, flags);
733		return;
734	}
735
736	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
737	outl(0, ioaddr + DCR7);
738
739	/* Check system status */
740	if (db->cr5_data & 0x2000) {
741		/* system bus error happen */
742		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
743		db->reset_fatal++;
744		db->wait_reset = 1;	/* Need to RESET */
745		spin_unlock_irqrestore(&db->lock, flags);
746		return;
747	}
748
749	 /* Received the coming packet */
750	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
751		dmfe_rx_packet(dev, db);
752
753	/* reallocate rx descriptor buffer */
754	if (db->rx_avail_cnt<RX_DESC_CNT)
755		allocate_rx_buffer(db);
756
757	/* Free the transmitted descriptor */
758	if ( db->cr5_data & 0x01)
759		dmfe_free_tx_pkt(dev, db);
760
761	/* Mode Check */
762	if (db->dm910x_chk_mode & 0x2) {
763		db->dm910x_chk_mode = 0x4;
764		db->cr6_data |= 0x100;
765		update_cr6(db->cr6_data, db->ioaddr);
766	}
767
768	/* Restore CR7 to enable interrupt mask */
769	outl(db->cr7_data, ioaddr + DCR7);
770
771	spin_unlock_irqrestore(&db->lock, flags);
772}
773
774
775/*
776 *	Free TX resource after TX complete
777 */
778
779static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
780{
781	struct tx_desc *txptr;
782	unsigned long ioaddr = dev->base_addr;
783	u32 tdes0;
784
785	txptr = db->tx_remove_ptr;
786	while(db->tx_packet_cnt) {
787		tdes0 = le32_to_cpu(txptr->tdes0);
788		/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
789		if (tdes0 & 0x80000000)
790			break;
791
792		/* A packet sent completed */
793		db->tx_packet_cnt--;
794		db->stats.tx_packets++;
795
796		/* Transmit statistic counter */
797		if ( tdes0 != 0x7fffffff ) {
798			/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
799			db->stats.collisions += (tdes0 >> 3) & 0xf;
800			db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
801			if (tdes0 & TDES0_ERR_MASK) {
802				db->stats.tx_errors++;
803
804				if (tdes0 & 0x0002) {	/* UnderRun */
805					db->tx_fifo_underrun++;
806					if ( !(db->cr6_data & CR6_SFT) ) {
807						db->cr6_data = db->cr6_data | CR6_SFT;
808						update_cr6(db->cr6_data, db->ioaddr);
809					}
810				}
811				if (tdes0 & 0x0100)
812					db->tx_excessive_collision++;
813				if (tdes0 & 0x0200)
814					db->tx_late_collision++;
815				if (tdes0 & 0x0400)
816					db->tx_no_carrier++;
817				if (tdes0 & 0x0800)
818					db->tx_loss_carrier++;
819				if (tdes0 & 0x4000)
820					db->tx_jabber_timeout++;
821			}
822		}
823
824    		txptr = txptr->next_tx_desc;
825	}/* End of while */
826
827	/* Update TX remove pointer to next */
828	db->tx_remove_ptr = txptr;
829
830	/* Send the Tx packet in queue */
831	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
832		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
833		db->tx_packet_cnt++;			/* Ready to send */
834		db->tx_queue_cnt--;
835		outl(0x1, ioaddr + DCR1);		/* Issue Tx polling */
836		dev->trans_start = jiffies;		/* saved time stamp */
837	}
838
839	/* Resource available check */
840	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
841		netif_wake_queue(dev);	/* Active upper layer, send again */
842}
843
844
845/*
846 *	Receive the come packet and pass to upper layer
847 */
848
849static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
850{
851	struct rx_desc *rxptr;
852	struct sk_buff *skb;
853	int rxlen;
854	u32 rdes0;
855
856	rxptr = db->rx_ready_ptr;
857
858	while(db->rx_avail_cnt) {
859		rdes0 = le32_to_cpu(rxptr->rdes0);
860		if (rdes0 & 0x80000000)	/* packet owner check */
861			break;
862
863		db->rx_avail_cnt--;
864		db->interval_rx_cnt++;
865
866		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
867		if ( (rdes0 & 0x300) != 0x300) {
868			/* A packet without First/Last flag */
869			/* reuse this SKB */
870			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
871			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
872		} else {
873			/* A packet with First/Last flag */
874			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
875
876			/* error summary bit check */
877			if (rdes0 & 0x8000) {
878				/* This is a error packet */
879				//printk(DRV_NAME ": rdes0: %lx\n", rdes0);
880				db->stats.rx_errors++;
881				if (rdes0 & 1)
882					db->stats.rx_fifo_errors++;
883				if (rdes0 & 2)
884					db->stats.rx_crc_errors++;
885				if (rdes0 & 0x80)
886					db->stats.rx_length_errors++;
887			}
888
889			if ( !(rdes0 & 0x8000) ||
890				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
891				skb = rxptr->rx_skb_ptr;
892
893				/* Received Packet CRC check need or not */
894				if ( (db->dm910x_chk_mode & 1) &&
895					(cal_CRC(skb->tail, rxlen, 1) !=
896					(*(u32 *) (skb->tail+rxlen) ))) {
897					/* Found a error received packet */
898					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
899					db->dm910x_chk_mode = 3;
900				} else {
901					/* Good packet, send to upper layer */
902					/* Shorst packet used new SKB */
903					if ( (rxlen < RX_COPY_SIZE) &&
904						( (skb = dev_alloc_skb(rxlen + 2) )
905						!= NULL) ) {
906						/* size less than COPY_SIZE, allocate a rxlen SKB */
907						skb->dev = dev;
908						skb_reserve(skb, 2); /* 16byte align */
909						memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
910						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
911					} else {
912						skb->dev = dev;
913						skb_put(skb, rxlen);
914					}
915					skb->protocol = eth_type_trans(skb, dev);
916					netif_rx(skb);
917					dev->last_rx = jiffies;
918					db->stats.rx_packets++;
919					db->stats.rx_bytes += rxlen;
920				}
921			} else {
922				/* Reuse SKB buffer when the packet is error */
923				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
924				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
925			}
926		}
927
928		rxptr = rxptr->next_rx_desc;
929	}
930
931	db->rx_ready_ptr = rxptr;
932}
933
934
935/*
936 *	Get statistics from driver.
937 */
938
939static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
940{
941	struct dmfe_board_info *db = (struct dmfe_board_info *)dev->priv;
942
943	DMFE_DBUG(0, "dmfe_get_stats", 0);
944	return &db->stats;
945}
946
947
948/*
949 * Set DM910X multicast address
950 */
951
952static void dmfe_set_filter_mode(struct DEVICE * dev)
953{
954	struct dmfe_board_info *db = dev->priv;
955	unsigned long flags;
956
957	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
958	spin_lock_irqsave(&db->lock, flags);
959
960	if (dev->flags & IFF_PROMISC) {
961		DMFE_DBUG(0, "Enable PROM Mode", 0);
962		db->cr6_data |= CR6_PM | CR6_PBF;
963		update_cr6(db->cr6_data, db->ioaddr);
964		spin_unlock_irqrestore(&db->lock, flags);
965		return;
966	}
967
968	if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
969		DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
970		db->cr6_data &= ~(CR6_PM | CR6_PBF);
971		db->cr6_data |= CR6_PAM;
972		spin_unlock_irqrestore(&db->lock, flags);
973		return;
974	}
975
976	DMFE_DBUG(0, "Set multicast address", dev->mc_count);
977	if (db->chip_id == PCI_DM9132_ID)
978		dm9132_id_table(dev, dev->mc_count);	/* DM9132 */
979	else
980		send_filter_frame(dev, dev->mc_count); 	/* DM9102/DM9102A */
981	spin_unlock_irqrestore(&db->lock, flags);
982}
983
984
985/*
986 *	Process the ethtool ioctl command
987 */
988
989static int dmfe_ethtool_ioctl(struct net_device *dev, void *useraddr)
990{
991	struct dmfe_board_info *db = dev->priv;
992	struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
993	u32 ethcmd;
994
995	if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
996		return -EFAULT;
997
998        switch (ethcmd) {
999        case ETHTOOL_GDRVINFO:
1000		strcpy(info.driver, DRV_NAME);
1001		strcpy(info.version, DRV_VERSION);
1002		if (db->pdev)
1003			strcpy(info.bus_info, db->pdev->slot_name);
1004		else
1005			sprintf(info.bus_info, "EISA 0x%lx %d",
1006				dev->base_addr, dev->irq);
1007		if (copy_to_user(useraddr, &info, sizeof(info)))
1008			return -EFAULT;
1009		return 0;
1010        }
1011
1012	return -EOPNOTSUPP;
1013}
1014
1015
1016/*
1017 *	Process the upper socket ioctl command
1018 */
1019
1020static int dmfe_do_ioctl(struct DEVICE *dev, struct ifreq *ifr, int cmd)
1021{
1022	int retval = -EOPNOTSUPP;
1023	DMFE_DBUG(0, "dmfe_do_ioctl()", 0);
1024
1025	switch(cmd) {
1026	case SIOCETHTOOL:
1027		return dmfe_ethtool_ioctl(dev, (void*)ifr->ifr_data);
1028	}
1029
1030	return retval;
1031}
1032
1033
1034/*
1035 *	A periodic timer routine
1036 *	Dynamic media sense, allocate Rx buffer...
1037 */
1038
1039static void dmfe_timer(unsigned long data)
1040{
1041	u32 tmp_cr8;
1042	unsigned char tmp_cr12;
1043	struct DEVICE *dev = (struct DEVICE *) data;
1044	struct dmfe_board_info *db = (struct dmfe_board_info *) dev->priv;
1045 	unsigned long flags;
1046
1047	DMFE_DBUG(0, "dmfe_timer()", 0);
1048	spin_lock_irqsave(&db->lock, flags);
1049
1050	/* Media mode process when Link OK before enter this route */
1051	if (db->first_in_callback == 0) {
1052		db->first_in_callback = 1;
1053		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1054			db->cr6_data &= ~0x40000;
1055			update_cr6(db->cr6_data, db->ioaddr);
1056			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1057			db->cr6_data |= 0x40000;
1058			update_cr6(db->cr6_data, db->ioaddr);
1059			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1060			add_timer(&db->timer);
1061			spin_unlock_irqrestore(&db->lock, flags);
1062			return;
1063		}
1064	}
1065
1066
1067	/* Operating Mode Check */
1068	if ( (db->dm910x_chk_mode & 0x1) &&
1069		(db->stats.rx_packets > MAX_CHECK_PACKET) )
1070		db->dm910x_chk_mode = 0x4;
1071
1072	/* Dynamic reset DM910X : system error or transmit time-out */
1073	tmp_cr8 = inl(db->ioaddr + DCR8);
1074	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1075		db->reset_cr8++;
1076		db->wait_reset = 1;
1077	}
1078	db->interval_rx_cnt = 0;
1079
1080	/* TX polling kick monitor */
1081	if ( db->tx_packet_cnt &&
1082	     time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1083		outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
1084
1085		/* TX Timeout */
1086		if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1087			db->reset_TXtimeout++;
1088			db->wait_reset = 1;
1089			printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1090			       dev->name);
1091		}
1092	}
1093
1094	if (db->wait_reset) {
1095		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1096		db->reset_count++;
1097		dmfe_dynamic_reset(dev);
1098		db->first_in_callback = 0;
1099		db->timer.expires = DMFE_TIMER_WUT;
1100		add_timer(&db->timer);
1101		spin_unlock_irqrestore(&db->lock, flags);
1102		return;
1103	}
1104
1105	/* Link status check, Dynamic media type change */
1106	if (db->chip_id == PCI_DM9132_ID)
1107		tmp_cr12 = inb(db->ioaddr + DCR9 + 3);	/* DM9132 */
1108	else
1109		tmp_cr12 = inb(db->ioaddr + DCR12);	/* DM9102/DM9102A */
1110
1111	if ( ((db->chip_id == PCI_DM9102_ID) &&
1112		(db->chip_revision == 0x02000030)) ||
1113		((db->chip_id == PCI_DM9132_ID) &&
1114		(db->chip_revision == 0x02000010)) ) {
1115		/* DM9102A Chip */
1116		if (tmp_cr12 & 2)
1117			tmp_cr12 = 0x0;		/* Link failed */
1118		else
1119			tmp_cr12 = 0x3;	/* Link OK */
1120	}
1121
1122	if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1123		/* Link Failed */
1124		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1125		db->link_failed = 1;
1126
1127		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1128		/* AUTO or force 1M Homerun/Longrun don't need */
1129		if ( !(db->media_mode & 0x38) )
1130			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1131
1132		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1133		if (db->media_mode & DMFE_AUTO) {
1134			/* 10/100M link failed, used 1M Home-Net */
1135			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1136			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1137			update_cr6(db->cr6_data, db->ioaddr);
1138		}
1139	} else
1140		if ((tmp_cr12 & 0x3) && db->link_failed) {
1141			DMFE_DBUG(0, "Link link OK", tmp_cr12);
1142			db->link_failed = 0;
1143
1144			/* Auto Sense Speed */
1145			if ( (db->media_mode & DMFE_AUTO) &&
1146				dmfe_sense_speed(db) )
1147				db->link_failed = 1;
1148			dmfe_process_mode(db);
1149			/* SHOW_MEDIA_TYPE(db->op_mode); */
1150		}
1151
1152	/* HPNA remote command check */
1153	if (db->HPNA_command & 0xf00) {
1154		db->HPNA_timer--;
1155		if (!db->HPNA_timer)
1156			dmfe_HPNA_remote_cmd_chk(db);
1157	}
1158
1159	/* Timer active again */
1160	db->timer.expires = DMFE_TIMER_WUT;
1161	add_timer(&db->timer);
1162	spin_unlock_irqrestore(&db->lock, flags);
1163}
1164
1165
1166/*
1167 *	Dynamic reset the DM910X board
1168 *	Stop DM910X board
1169 *	Free Tx/Rx allocated memory
1170 *	Reset DM910X board
1171 *	Re-initilize DM910X board
1172 */
1173
1174static void dmfe_dynamic_reset(struct DEVICE *dev)
1175{
1176	struct dmfe_board_info *db = dev->priv;
1177
1178	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1179
1180	/* Sopt MAC controller */
1181	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1182	update_cr6(db->cr6_data, dev->base_addr);
1183	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
1184	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1185
1186	/* Disable upper layer interface */
1187	netif_stop_queue(dev);
1188
1189	/* Free Rx Allocate buffer */
1190	dmfe_free_rxbuffer(db);
1191
1192	/* system variable init */
1193	db->tx_packet_cnt = 0;
1194	db->tx_queue_cnt = 0;
1195	db->rx_avail_cnt = 0;
1196	db->link_failed = 1;
1197	db->wait_reset = 0;
1198
1199	/* Re-initilize DM910X board */
1200	dmfe_init_dm910x(dev);
1201
1202	/* Restart upper layer interface */
1203	netif_wake_queue(dev);
1204}
1205
1206
1207/*
1208 *	free all allocated rx buffer
1209 */
1210
1211static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1212{
1213	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1214
1215	/* free allocated rx buffer */
1216	while (db->rx_avail_cnt) {
1217		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1218		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1219		db->rx_avail_cnt--;
1220	}
1221}
1222
1223
1224/*
1225 *	Reuse the SK buffer
1226 */
1227
1228static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1229{
1230	struct rx_desc *rxptr = db->rx_insert_ptr;
1231
1232	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1233		rxptr->rx_skb_ptr = skb;
1234		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1235		wmb();
1236		rxptr->rdes0 = cpu_to_le32(0x80000000);
1237		db->rx_avail_cnt++;
1238		db->rx_insert_ptr = rxptr->next_rx_desc;
1239	} else
1240		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1241}
1242
1243
1244/*
1245 *	Initialize transmit/Receive descriptor
1246 *	Using Chain structure, and allocate Tx/Rx buffer
1247 */
1248
1249static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1250{
1251	struct tx_desc *tmp_tx;
1252	struct rx_desc *tmp_rx;
1253	unsigned char *tmp_buf;
1254	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1255	dma_addr_t tmp_buf_dma;
1256	int i;
1257
1258	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1259
1260	/* tx descriptor start pointer */
1261	db->tx_insert_ptr = db->first_tx_desc;
1262	db->tx_remove_ptr = db->first_tx_desc;
1263	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
1264
1265	/* rx descriptor start pointer */
1266	db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1267	db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1268	db->rx_insert_ptr = db->first_rx_desc;
1269	db->rx_ready_ptr = db->first_rx_desc;
1270	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
1271
1272	/* Init Transmit chain */
1273	tmp_buf = db->buf_pool_start;
1274	tmp_buf_dma = db->buf_pool_dma_start;
1275	tmp_tx_dma = db->first_tx_desc_dma;
1276	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1277		tmp_tx->tx_buf_ptr = tmp_buf;
1278		tmp_tx->tdes0 = cpu_to_le32(0);
1279		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1280		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1281		tmp_tx_dma += sizeof(struct tx_desc);
1282		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1283		tmp_tx->next_tx_desc = tmp_tx + 1;
1284		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1285		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1286	}
1287	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1288	tmp_tx->next_tx_desc = db->first_tx_desc;
1289
1290	 /* Init Receive descriptor chain */
1291	tmp_rx_dma=db->first_rx_desc_dma;
1292	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1293		tmp_rx->rdes0 = cpu_to_le32(0);
1294		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1295		tmp_rx_dma += sizeof(struct rx_desc);
1296		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1297		tmp_rx->next_rx_desc = tmp_rx + 1;
1298	}
1299	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1300	tmp_rx->next_rx_desc = db->first_rx_desc;
1301
1302	/* pre-allocate Rx buffer */
1303	allocate_rx_buffer(db);
1304}
1305
1306
1307/*
1308 *	Update CR6 value
1309 *	Firstly stop DM910X , then written value and start
1310 */
1311
1312static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1313{
1314	u32 cr6_tmp;
1315
1316	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1317	outl(cr6_tmp, ioaddr + DCR6);
1318	udelay(5);
1319	outl(cr6_data, ioaddr + DCR6);
1320	udelay(5);
1321}
1322
1323
1324/*
1325 *	Send a setup frame for DM9132
1326 *	This setup frame initilize DM910X addres filter mode
1327*/
1328
1329static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1330{
1331	struct dev_mc_list *mcptr;
1332	u16 * addrptr;
1333	unsigned long ioaddr = dev->base_addr+0xc0;		/* ID Table */
1334	u32 hash_val;
1335	u16 i, hash_table[4];
1336
1337	DMFE_DBUG(0, "dm9132_id_table()", 0);
1338
1339	/* Node address */
1340	addrptr = (u16 *) dev->dev_addr;
1341	outw(addrptr[0], ioaddr);
1342	ioaddr += 4;
1343	outw(addrptr[1], ioaddr);
1344	ioaddr += 4;
1345	outw(addrptr[2], ioaddr);
1346	ioaddr += 4;
1347
1348	/* Clear Hash Table */
1349	for (i = 0; i < 4; i++)
1350		hash_table[i] = 0x0;
1351
1352	/* broadcast address */
1353	hash_table[3] = 0x8000;
1354
1355	/* the multicast address in Hash Table : 64 bits */
1356	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1357		hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1358		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1359	}
1360
1361	/* Write the hash table to MAC MD table */
1362	for (i = 0; i < 4; i++, ioaddr += 4)
1363		outw(hash_table[i], ioaddr);
1364}
1365
1366
1367/*
1368 *	Send a setup frame for DM9102/DM9102A
1369 *	This setup frame initilize DM910X addres filter mode
1370 */
1371
1372static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1373{
1374	struct dmfe_board_info *db = dev->priv;
1375	struct dev_mc_list *mcptr;
1376	struct tx_desc *txptr;
1377	u16 * addrptr;
1378	u32 * suptr;
1379	int i;
1380
1381	DMFE_DBUG(0, "send_filter_frame()", 0);
1382
1383	txptr = db->tx_insert_ptr;
1384	suptr = (u32 *) txptr->tx_buf_ptr;
1385
1386	/* Node address */
1387	addrptr = (u16 *) dev->dev_addr;
1388	*suptr++ = addrptr[0];
1389	*suptr++ = addrptr[1];
1390	*suptr++ = addrptr[2];
1391
1392	/* broadcast address */
1393	*suptr++ = 0xffff;
1394	*suptr++ = 0xffff;
1395	*suptr++ = 0xffff;
1396
1397	/* fit the multicast address */
1398	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1399		addrptr = (u16 *) mcptr->dmi_addr;
1400		*suptr++ = addrptr[0];
1401		*suptr++ = addrptr[1];
1402		*suptr++ = addrptr[2];
1403	}
1404
1405	for (; i<14; i++) {
1406		*suptr++ = 0xffff;
1407		*suptr++ = 0xffff;
1408		*suptr++ = 0xffff;
1409	}
1410
1411	/* prepare the setup frame */
1412	db->tx_insert_ptr = txptr->next_tx_desc;
1413	txptr->tdes1 = cpu_to_le32(0x890000c0);
1414
1415	/* Resource Check and Send the setup packet */
1416	if (!db->tx_packet_cnt) {
1417		/* Resource Empty */
1418		db->tx_packet_cnt++;
1419		txptr->tdes0 = cpu_to_le32(0x80000000);
1420		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1421		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
1422		update_cr6(db->cr6_data, dev->base_addr);
1423		dev->trans_start = jiffies;
1424	} else
1425		db->tx_queue_cnt++;	/* Put in TX queue */
1426}
1427
1428
1429/*
1430 *	Allocate rx buffer,
1431 *	As possible as allocate maxiumn Rx buffer
1432 */
1433
1434static void allocate_rx_buffer(struct dmfe_board_info *db)
1435{
1436	struct rx_desc *rxptr;
1437	struct sk_buff *skb;
1438
1439	rxptr = db->rx_insert_ptr;
1440
1441	while(db->rx_avail_cnt < RX_DESC_CNT) {
1442		if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1443			break;
1444		rxptr->rx_skb_ptr = skb;
1445		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1446		wmb();
1447		rxptr->rdes0 = cpu_to_le32(0x80000000);
1448		rxptr = rxptr->next_rx_desc;
1449		db->rx_avail_cnt++;
1450	}
1451
1452	db->rx_insert_ptr = rxptr;
1453}
1454
1455
1456/*
1457 *	Read one word data from the serial ROM
1458 */
1459
1460static u16 read_srom_word(long ioaddr, int offset)
1461{
1462	int i;
1463	u16 srom_data = 0;
1464	long cr9_ioaddr = ioaddr + DCR9;
1465
1466	outl(CR9_SROM_READ, cr9_ioaddr);
1467	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1468
1469	/* Send the Read Command 110b */
1470	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1471	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1472	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1473
1474	/* Send the offset */
1475	for (i = 5; i >= 0; i--) {
1476		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1477		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1478	}
1479
1480	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1481
1482	for (i = 16; i > 0; i--) {
1483		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1484		udelay(5);
1485		srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1486		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1487		udelay(5);
1488	}
1489
1490	outl(CR9_SROM_READ, cr9_ioaddr);
1491	return srom_data;
1492}
1493
1494
1495/*
1496 *	Auto sense the media mode
1497 */
1498
1499static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1500{
1501	u8 ErrFlag = 0;
1502	u16 phy_mode;
1503
1504	/* CR6 bit18=0, select 10/100M */
1505	update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1506
1507	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1508	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1509
1510	if ( (phy_mode & 0x24) == 0x24 ) {
1511		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1512			phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000;
1513		else 				/* DM9102/DM9102A */
1514			phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000;
1515		/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1516		switch (phy_mode) {
1517		case 0x1000: db->op_mode = DMFE_10MHF; break;
1518		case 0x2000: db->op_mode = DMFE_10MFD; break;
1519		case 0x4000: db->op_mode = DMFE_100MHF; break;
1520		case 0x8000: db->op_mode = DMFE_100MFD; break;
1521		default: db->op_mode = DMFE_10MHF;
1522			ErrFlag = 1;
1523			break;
1524		}
1525	} else {
1526		db->op_mode = DMFE_10MHF;
1527		DMFE_DBUG(0, "Link Failed :", phy_mode);
1528		ErrFlag = 1;
1529	}
1530
1531	return ErrFlag;
1532}
1533
1534
1535/*
1536 *	Set 10/100 phyxcer capability
1537 *	AUTO mode : phyxcer register4 is NIC capability
1538 *	Force mode: phyxcer register4 is the force media
1539 */
1540
1541static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1542{
1543	u16 phy_reg;
1544
1545	/* Select 10/100M phyxcer */
1546	db->cr6_data &= ~0x40000;
1547	update_cr6(db->cr6_data, db->ioaddr);
1548
1549	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1550	if (db->chip_id == PCI_DM9009_ID) {
1551		phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000;
1552		phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id);
1553	}
1554
1555	/* Phyxcer capability setting */
1556	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1557
1558	if (db->media_mode & DMFE_AUTO) {
1559		/* AUTO Mode */
1560		phy_reg |= db->PHY_reg4;
1561	} else {
1562		/* Force Mode */
1563		switch(db->media_mode) {
1564		case DMFE_10MHF: phy_reg |= 0x20; break;
1565		case DMFE_10MFD: phy_reg |= 0x40; break;
1566		case DMFE_100MHF: phy_reg |= 0x80; break;
1567		case DMFE_100MFD: phy_reg |= 0x100; break;
1568		}
1569		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1570	}
1571
1572  	/* Write new capability to Phyxcer Reg4 */
1573	if ( !(phy_reg & 0x01e0)) {
1574		phy_reg|=db->PHY_reg4;
1575		db->media_mode|=DMFE_AUTO;
1576	}
1577	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1578
1579 	/* Restart Auto-Negotiation */
1580	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1581		phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1582	if ( !db->chip_type )
1583		phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1584}
1585
1586
1587/*
1588 *	Process op-mode
1589 *	AUTO mode : PHY controller in Auto-negotiation Mode
1590 *	Force mode: PHY controller in force mode with HUB
1591 *			N-way force capability with SWITCH
1592 */
1593
1594static void dmfe_process_mode(struct dmfe_board_info *db)
1595{
1596	u16 phy_reg;
1597
1598	/* Full Duplex Mode Check */
1599	if (db->op_mode & 0x4)
1600		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1601	else
1602		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1603
1604	/* Transciver Selection */
1605	if (db->op_mode & 0x10)		/* 1M HomePNA */
1606		db->cr6_data |= 0x40000;/* External MII select */
1607	else
1608		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1609
1610	update_cr6(db->cr6_data, db->ioaddr);
1611
1612	/* 10/100M phyxcer force mode need */
1613	if ( !(db->media_mode & 0x18)) {
1614		/* Forece Mode */
1615		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1616		if ( !(phy_reg & 0x1) ) {
1617			/* parter without N-Way capability */
1618			phy_reg = 0x0;
1619			switch(db->op_mode) {
1620			case DMFE_10MHF: phy_reg = 0x0; break;
1621			case DMFE_10MFD: phy_reg = 0x100; break;
1622			case DMFE_100MHF: phy_reg = 0x2000; break;
1623			case DMFE_100MFD: phy_reg = 0x2100; break;
1624			}
1625			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1626       			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1627				mdelay(20);
1628			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1629		}
1630	}
1631}
1632
1633
1634/*
1635 *	Write a word to Phy register
1636 */
1637
1638static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1639{
1640	u16 i;
1641	unsigned long ioaddr;
1642
1643	if (chip_id == PCI_DM9132_ID) {
1644		ioaddr = iobase + 0x80 + offset * 4;
1645		outw(phy_data, ioaddr);
1646	} else {
1647		/* DM9102/DM9102A Chip */
1648		ioaddr = iobase + DCR9;
1649
1650		/* Send 33 synchronization clock to Phy controller */
1651		for (i = 0; i < 35; i++)
1652			phy_write_1bit(ioaddr, PHY_DATA_1);
1653
1654		/* Send start command(01) to Phy */
1655		phy_write_1bit(ioaddr, PHY_DATA_0);
1656		phy_write_1bit(ioaddr, PHY_DATA_1);
1657
1658		/* Send write command(01) to Phy */
1659		phy_write_1bit(ioaddr, PHY_DATA_0);
1660		phy_write_1bit(ioaddr, PHY_DATA_1);
1661
1662		/* Send Phy addres */
1663		for (i = 0x10; i > 0; i = i >> 1)
1664			phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1665
1666		/* Send register addres */
1667		for (i = 0x10; i > 0; i = i >> 1)
1668			phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1669
1670		/* written trasnition */
1671		phy_write_1bit(ioaddr, PHY_DATA_1);
1672		phy_write_1bit(ioaddr, PHY_DATA_0);
1673
1674		/* Write a word data to PHY controller */
1675		for ( i = 0x8000; i > 0; i >>= 1)
1676			phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1677	}
1678}
1679
1680
1681/*
1682 *	Read a word data from phy register
1683 */
1684
1685static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1686{
1687	int i;
1688	u16 phy_data;
1689	unsigned long ioaddr;
1690
1691	if (chip_id == PCI_DM9132_ID) {
1692		/* DM9132 Chip */
1693		ioaddr = iobase + 0x80 + offset * 4;
1694		phy_data = inw(ioaddr);
1695	} else {
1696		/* DM9102/DM9102A Chip */
1697		ioaddr = iobase + DCR9;
1698
1699		/* Send 33 synchronization clock to Phy controller */
1700		for (i = 0; i < 35; i++)
1701			phy_write_1bit(ioaddr, PHY_DATA_1);
1702
1703		/* Send start command(01) to Phy */
1704		phy_write_1bit(ioaddr, PHY_DATA_0);
1705		phy_write_1bit(ioaddr, PHY_DATA_1);
1706
1707		/* Send read command(10) to Phy */
1708		phy_write_1bit(ioaddr, PHY_DATA_1);
1709		phy_write_1bit(ioaddr, PHY_DATA_0);
1710
1711		/* Send Phy addres */
1712		for (i = 0x10; i > 0; i = i >> 1)
1713			phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1714
1715		/* Send register addres */
1716		for (i = 0x10; i > 0; i = i >> 1)
1717			phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1718
1719		/* Skip transition state */
1720		phy_read_1bit(ioaddr);
1721
1722		/* read 16bit data */
1723		for (phy_data = 0, i = 0; i < 16; i++) {
1724			phy_data <<= 1;
1725			phy_data |= phy_read_1bit(ioaddr);
1726		}
1727	}
1728
1729	return phy_data;
1730}
1731
1732
1733/*
1734 *	Write one bit data to Phy Controller
1735 */
1736
1737static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1738{
1739	outl(phy_data, ioaddr);			/* MII Clock Low */
1740	udelay(1);
1741	outl(phy_data | MDCLKH, ioaddr);	/* MII Clock High */
1742	udelay(1);
1743	outl(phy_data, ioaddr);			/* MII Clock Low */
1744	udelay(1);
1745}
1746
1747
1748/*
1749 *	Read one bit phy data from PHY controller
1750 */
1751
1752static u16 phy_read_1bit(unsigned long ioaddr)
1753{
1754	u16 phy_data;
1755
1756	outl(0x50000, ioaddr);
1757	udelay(1);
1758	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1759	outl(0x40000, ioaddr);
1760	udelay(1);
1761
1762	return phy_data;
1763}
1764
1765
1766/*
1767 *	Calculate the CRC valude of the Rx packet
1768 *	flag = 	1 : return the reverse CRC (for the received packet CRC)
1769 *		0 : return the normal CRC (for Hash Table index)
1770 */
1771
1772static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
1773{
1774	u32 crc = ether_crc_le(Len, Data);
1775	if (flag) crc = ~crc;
1776	return crc;
1777}
1778
1779
1780/*
1781 *	Parser SROM and media mode
1782 */
1783
1784static void dmfe_parse_srom(struct dmfe_board_info * db)
1785{
1786	char * srom = db->srom;
1787	int dmfe_mode, tmp_reg;
1788
1789	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1790
1791	/* Init CR15 */
1792	db->cr15_data = CR15_DEFAULT;
1793
1794	/* Check SROM Version */
1795	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1796		/* SROM V4.01 */
1797		/* Get NIC support media mode */
1798		db->NIC_capability = le16_to_cpup(srom + 34);
1799		db->PHY_reg4 = 0;
1800		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1801			switch( db->NIC_capability & tmp_reg ) {
1802			case 0x1: db->PHY_reg4 |= 0x0020; break;
1803			case 0x2: db->PHY_reg4 |= 0x0040; break;
1804			case 0x4: db->PHY_reg4 |= 0x0080; break;
1805			case 0x8: db->PHY_reg4 |= 0x0100; break;
1806			}
1807		}
1808
1809		/* Media Mode Force or not check */
1810		dmfe_mode = le32_to_cpup(srom + 34) & le32_to_cpup(srom + 36);
1811		switch(dmfe_mode) {
1812		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1813		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1814		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1815		case 0x100:
1816		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1817		}
1818
1819		/* Special Function setting */
1820		/* VLAN function */
1821		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1822			db->cr15_data |= 0x40;
1823
1824		/* Flow Control */
1825		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1826			db->cr15_data |= 0x400;
1827
1828		/* TX pause packet */
1829		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1830			db->cr15_data |= 0x9800;
1831	}
1832
1833	/* Parse HPNA parameter */
1834	db->HPNA_command = 1;
1835
1836	/* Accept remote command or not */
1837	if (HPNA_rx_cmd == 0)
1838		db->HPNA_command |= 0x8000;
1839
1840	 /* Issue remote command & operation mode */
1841	if (HPNA_tx_cmd == 1)
1842		switch(HPNA_mode) {	/* Issue Remote Command */
1843		case 0: db->HPNA_command |= 0x0904; break;
1844		case 1: db->HPNA_command |= 0x0a00; break;
1845		case 2: db->HPNA_command |= 0x0506; break;
1846		case 3: db->HPNA_command |= 0x0602; break;
1847		}
1848	else
1849		switch(HPNA_mode) {	/* Don't Issue */
1850		case 0: db->HPNA_command |= 0x0004; break;
1851		case 1: db->HPNA_command |= 0x0000; break;
1852		case 2: db->HPNA_command |= 0x0006; break;
1853		case 3: db->HPNA_command |= 0x0002; break;
1854		}
1855
1856	/* Check DM9801 or DM9802 present or not */
1857	db->HPNA_present = 0;
1858	update_cr6(db->cr6_data|0x40000, db->ioaddr);
1859	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1860	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1861		/* DM9801 or DM9802 present */
1862		db->HPNA_timer = 8;
1863		if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1864			/* DM9801 HomeRun */
1865			db->HPNA_present = 1;
1866			dmfe_program_DM9801(db, tmp_reg);
1867		} else {
1868			/* DM9802 LongRun */
1869			db->HPNA_present = 2;
1870			dmfe_program_DM9802(db);
1871		}
1872	}
1873
1874}
1875
1876
1877/*
1878 *	Init HomeRun DM9801
1879 */
1880
1881static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1882{
1883	uint reg17, reg25;
1884
1885	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1886	switch(HPNA_rev) {
1887	case 0xb900: /* DM9801 E3 */
1888		db->HPNA_command |= 0x1000;
1889		reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1890		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1891		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1892		break;
1893	case 0xb901: /* DM9801 E4 */
1894		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1895		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1896		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1897		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1898		break;
1899	case 0xb902: /* DM9801 E5 */
1900	case 0xb903: /* DM9801 E6 */
1901	default:
1902		db->HPNA_command |= 0x1000;
1903		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1904		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1905		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1906		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1907		break;
1908	}
1909	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1910	phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1911	phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1912}
1913
1914
1915/*
1916 *	Init HomeRun DM9802
1917 */
1918
1919static void dmfe_program_DM9802(struct dmfe_board_info * db)
1920{
1921	uint phy_reg;
1922
1923	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
1924	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1925	phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1926	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
1927	phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
1928}
1929
1930
1931/*
1932 *	Check remote HPNA power and speed status. If not correct,
1933 *	issue command again.
1934*/
1935
1936static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
1937{
1938	uint phy_reg;
1939
1940	/* Got remote device status */
1941	phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
1942	switch(phy_reg) {
1943	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
1944	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
1945	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
1946	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
1947	}
1948
1949	/* Check remote device status match our setting ot not */
1950	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
1951		phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1952		db->HPNA_timer=8;
1953	} else
1954		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
1955}
1956
1957
1958
1959static struct pci_device_id dmfe_pci_tbl[] __devinitdata = {
1960	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
1961	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
1962	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
1963	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
1964	{ 0, }
1965};
1966MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
1967
1968
1969static struct pci_driver dmfe_driver = {
1970	name:		"dmfe",
1971	id_table:	dmfe_pci_tbl,
1972	probe:		dmfe_init_one,
1973	remove:		__devexit_p(dmfe_remove_one),
1974};
1975
1976MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
1977MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
1978MODULE_LICENSE("GPL");
1979
1980MODULE_PARM(debug, "i");
1981MODULE_PARM(mode, "i");
1982MODULE_PARM(cr6set, "i");
1983MODULE_PARM(chkmode, "i");
1984MODULE_PARM(HPNA_mode, "i");
1985MODULE_PARM(HPNA_rx_cmd, "i");
1986MODULE_PARM(HPNA_tx_cmd, "i");
1987MODULE_PARM(HPNA_NoiseFloor, "i");
1988MODULE_PARM(SF_mode, "i");
1989MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
1990MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1991MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
1992
1993/*	Description:
1994 *	when user used insmod to add module, system invoked init_module()
1995 *	to initilize and register.
1996 */
1997
1998static int __init dmfe_init_module(void)
1999{
2000	int rc;
2001
2002	printk(version);
2003	printed_version = 1;
2004
2005	DMFE_DBUG(0, "init_module() ", debug);
2006
2007	if (debug)
2008		dmfe_debug = debug;	/* set debug flag */
2009	if (cr6set)
2010		dmfe_cr6_user_set = cr6set;
2011
2012 	switch(mode) {
2013   	case DMFE_10MHF:
2014	case DMFE_100MHF:
2015	case DMFE_10MFD:
2016	case DMFE_100MFD:
2017	case DMFE_1M_HPNA:
2018		dmfe_media_mode = mode;
2019		break;
2020	default:dmfe_media_mode = DMFE_AUTO;
2021		break;
2022	}
2023
2024	if (HPNA_mode > 4)
2025		HPNA_mode = 0;		/* Default: LP/HS */
2026	if (HPNA_rx_cmd > 1)
2027		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2028	if (HPNA_tx_cmd > 1)
2029		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2030	if (HPNA_NoiseFloor > 15)
2031		HPNA_NoiseFloor = 0;
2032
2033	rc = pci_module_init(&dmfe_driver);
2034	if (rc < 0)
2035		return rc;
2036
2037	return 0;
2038}
2039
2040
2041/*
2042 *	Description:
2043 *	when user used rmmod to delete module, system invoked clean_module()
2044 *	to un-register all registered services.
2045 */
2046
2047static void __exit dmfe_cleanup_module(void)
2048{
2049	DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2050	pci_unregister_driver(&dmfe_driver);
2051}
2052
2053module_init(dmfe_init_module);
2054module_exit(dmfe_cleanup_module);
2055