1/*  D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
2/*
3    Copyright (c) 2001, 2002 by D-Link Corporation
4    Written by Edward Peng.<edward_peng@dlink.com.tw>
5    Created 03-May-2001, base on Linux' sundance.c.
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11*/
12/*
13    Rev		Date		Description
14    ==========================================================================
15    0.01	2001/05/03	Created DL2000-based linux driver
16    0.02	2001/05/21	Added VLAN and hardware checksum support.
17    1.00	2001/06/26	Added jumbo frame support.
18    1.01	2001/08/21	Added two parameters, rx_coalesce and rx_timeout.
19    1.02	2001/10/08	Supported fiber media.
20    				Added flow control parameters.
21    1.03	2001/10/12	Changed the default media to 1000mbps_fd for
22    				the fiber devices.
23    1.04	2001/11/08	Fixed Tx stopped when tx very busy.
24    1.05	2001/11/22	Fixed Tx stopped when unidirectional tx busy.
25    1.06	2001/12/13	Fixed disconnect bug at 10Mbps mode.
26    				Fixed tx_full flag incorrect.
27				Added tx_coalesce paramter.
28    1.07	2002/01/03	Fixed miscount of RX frame error.
29    1.08	2002/01/17	Fixed the multicast bug.
30    1.09	2002/03/07	Move rx-poll-now to re-fill loop.
31    				Added rio_timer() to watch rx buffers.
32    1.10	2002/04/16	Fixed miscount of carrier error.
33    1.11	2002/05/23	Added ISR schedule scheme.
34    				Fixed miscount of rx frame error for DGE-550SX.
35    				Fixed VLAN bug.
36    1.12	2002/06/13	Lock tx_coalesce=1 on 10/100Mbps mode.
37 */
38
39#include "dl2k.h"
40
41static char version[] __devinitdata =
42    KERN_INFO "D-Link DL2000-based linux driver v1.12 2002/06/13\n";
43
44#define MAX_UNITS 8
45static int mtu[MAX_UNITS];
46static int vlan[MAX_UNITS];
47static int jumbo[MAX_UNITS];
48static char *media[MAX_UNITS];
49static int tx_flow=-1;
50static int rx_flow=-1;
51static int copy_thresh;
52static int rx_coalesce=10;	/* Rx frame count each interrupt */
53static int rx_timeout=200;	/* Rx DMA wait time in 640ns increments */
54static int tx_coalesce=16;	/* HW xmit count each TxDMAComplete */
55
56
57MODULE_AUTHOR ("Edward Peng");
58MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
59MODULE_LICENSE("GPL");
60MODULE_PARM (mtu, "1-" __MODULE_STRING (MAX_UNITS) "i");
61MODULE_PARM (media, "1-" __MODULE_STRING (MAX_UNITS) "s");
62MODULE_PARM (vlan, "1-" __MODULE_STRING (MAX_UNITS) "i");
63MODULE_PARM (jumbo, "1-" __MODULE_STRING (MAX_UNITS) "i");
64MODULE_PARM (tx_flow, "i");
65MODULE_PARM (rx_flow, "i");
66MODULE_PARM (copy_thresh, "i");
67MODULE_PARM (rx_coalesce, "i");	/* Rx frame count each interrupt */
68MODULE_PARM (rx_timeout, "i");	/* Rx DMA wait time in 64ns increments */
69MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxDMAComplete */
70
71
72/* Enable the default interrupts */
73#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
74       UpdateStats | LinkEvent)
75#define EnableInt() \
76writew(DEFAULT_INTR, ioaddr + IntEnable)
77
78static int max_intrloop = 50;
79static int multicast_filter_limit = 0x40;
80
81static int rio_open (struct net_device *dev);
82static void rio_timer (unsigned long data);
83static void rio_tx_timeout (struct net_device *dev);
84static void alloc_list (struct net_device *dev);
85static int start_xmit (struct sk_buff *skb, struct net_device *dev);
86static void rio_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
87static void rio_free_tx (struct net_device *dev, int irq);
88static void tx_error (struct net_device *dev, int tx_status);
89static int receive_packet (struct net_device *dev);
90static void rio_error (struct net_device *dev, int int_status);
91static int change_mtu (struct net_device *dev, int new_mtu);
92static void set_multicast (struct net_device *dev);
93static struct net_device_stats *get_stats (struct net_device *dev);
94static int clear_stats (struct net_device *dev);
95static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
96static int rio_close (struct net_device *dev);
97static int find_miiphy (struct net_device *dev);
98static int parse_eeprom (struct net_device *dev);
99static int read_eeprom (long ioaddr, int eep_addr);
100static int mii_wait_link (struct net_device *dev, int wait);
101static int mii_set_media (struct net_device *dev);
102static int mii_get_media (struct net_device *dev);
103static int mii_set_media_pcs (struct net_device *dev);
104static int mii_get_media_pcs (struct net_device *dev);
105static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
106static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
107		      u16 data);
108
109static int __devinit
110rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
111{
112	struct net_device *dev;
113	struct netdev_private *np;
114	static int card_idx;
115	int chip_idx = ent->driver_data;
116	int err, irq;
117	long ioaddr;
118	static int version_printed;
119	void *ring_space;
120	dma_addr_t ring_dma;
121
122	if (!version_printed++)
123		printk ("%s", version);
124
125	err = pci_enable_device (pdev);
126	if (err)
127		return err;
128
129	irq = pdev->irq;
130	err = pci_request_regions (pdev, "dl2k");
131	if (err)
132		goto err_out_disable;
133
134	pci_set_master (pdev);
135	dev = alloc_etherdev (sizeof (*np));
136	if (!dev) {
137		err = -ENOMEM;
138		goto err_out_res;
139	}
140	SET_MODULE_OWNER (dev);
141
142#ifdef MEM_MAPPING
143	ioaddr = pci_resource_start (pdev, 1);
144	ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
145	if (!ioaddr) {
146		err = -ENOMEM;
147		goto err_out_dev;
148	}
149#else
150	ioaddr = pci_resource_start (pdev, 0);
151#endif
152	dev->base_addr = ioaddr;
153	dev->irq = irq;
154	np = dev->priv;
155	np->chip_id = chip_idx;
156	np->pdev = pdev;
157	spin_lock_init (&np->tx_lock);
158	spin_lock_init (&np->rx_lock);
159
160	/* Parse manual configuration */
161	np->an_enable = 1;
162	np->tx_coalesce = 1;
163	if (card_idx < MAX_UNITS) {
164		if (media[card_idx] != NULL) {
165			np->an_enable = 0;
166			if (strcmp (media[card_idx], "auto") == 0 ||
167			    strcmp (media[card_idx], "autosense") == 0 ||
168			    strcmp (media[card_idx], "0") == 0 ) {
169				np->an_enable = 2;
170			} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
171			    strcmp (media[card_idx], "4") == 0) {
172				np->speed = 100;
173				np->full_duplex = 1;
174			} else if (strcmp (media[card_idx], "100mbps_hd") == 0
175				   || strcmp (media[card_idx], "3") == 0) {
176				np->speed = 100;
177				np->full_duplex = 0;
178			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
179				   strcmp (media[card_idx], "2") == 0) {
180				np->speed = 10;
181				np->full_duplex = 1;
182			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
183				   strcmp (media[card_idx], "1") == 0) {
184				np->speed = 10;
185				np->full_duplex = 0;
186			} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
187				 strcmp (media[card_idx], "6") == 0) {
188				np->speed=1000;
189				np->full_duplex=1;
190			} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
191				 strcmp (media[card_idx], "5") == 0) {
192				np->speed = 1000;
193				np->full_duplex = 0;
194			} else {
195				np->an_enable = 1;
196			}
197		}
198		if (jumbo[card_idx] != 0) {
199			np->jumbo = 1;
200			dev->mtu = MAX_JUMBO;
201		} else {
202			np->jumbo = 0;
203			if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
204				dev->mtu = mtu[card_idx];
205		}
206		np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
207		    vlan[card_idx] : 0;
208		if (rx_coalesce > 0 && rx_timeout > 0) {
209			np->rx_coalesce = rx_coalesce;
210			np->rx_timeout = rx_timeout;
211			np->coalesce = 1;
212		}
213		np->tx_flow = (tx_flow == 0) ? 0 : 1;
214		np->rx_flow = (rx_flow == 0) ? 0 : 1;
215
216		if (tx_coalesce < 1)
217			tx_coalesce = 1;
218		else if (tx_coalesce > TX_RING_SIZE-1)
219			tx_coalesce = TX_RING_SIZE - 1;
220	}
221	dev->open = &rio_open;
222	dev->hard_start_xmit = &start_xmit;
223	dev->stop = &rio_close;
224	dev->get_stats = &get_stats;
225	dev->set_multicast_list = &set_multicast;
226	dev->do_ioctl = &rio_ioctl;
227	dev->tx_timeout = &rio_tx_timeout;
228	dev->watchdog_timeo = TX_TIMEOUT;
229	dev->change_mtu = &change_mtu;
230	pci_set_drvdata (pdev, dev);
231
232	ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
233	if (!ring_space)
234		goto err_out_iounmap;
235	np->tx_ring = (struct netdev_desc *) ring_space;
236	np->tx_ring_dma = ring_dma;
237
238	ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
239	if (!ring_space)
240		goto err_out_unmap_tx;
241	np->rx_ring = (struct netdev_desc *) ring_space;
242	np->rx_ring_dma = ring_dma;
243
244	/* Parse eeprom data */
245	parse_eeprom (dev);
246
247	/* Find PHY address */
248	err = find_miiphy (dev);
249	if (err)
250		goto err_out_unmap_rx;
251
252	/* Fiber device? */
253	np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
254	np->link_status = 0;
255	/* Set media and reset PHY */
256	if (np->phy_media) {
257		/* default 1000mbps_fd for fiber deivices */
258		if (np->an_enable == 1) {
259			np->an_enable = 0;
260			np->speed = 1000;
261			np->full_duplex = 1;
262		} else if (np->an_enable == 2) {
263			np->an_enable = 1;
264		}
265		mii_set_media_pcs (dev);
266	} else {
267		/* Auto-Negotiation is mandatory for 1000BASE-T,
268		   IEEE 802.3ab Annex 28D page 14 */
269		if (np->speed == 1000)
270			np->an_enable = 1;
271		mii_set_media (dev);
272	}
273	pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
274
275	/* Reset all logic functions */
276	writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
277		ioaddr + ASICCtrl + 2);
278
279	err = register_netdev (dev);
280	if (err)
281		goto err_out_unmap_rx;
282
283	card_idx++;
284
285	printk (KERN_INFO "%s: %s, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
286		dev->name, np->name,
287		dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
288		dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
289	if (tx_coalesce > 1)
290		printk(KERN_INFO "tx_coalesce:\t%d packets\n",
291				tx_coalesce);
292	if (np->coalesce)
293		printk(KERN_INFO "rx_coalesce:\t%d packets\n"
294		       KERN_INFO "rx_timeout: \t%d ns\n",
295				np->rx_coalesce, np->rx_timeout*640);
296	if (np->vlan)
297		printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
298	return 0;
299
300      err_out_unmap_rx:
301	pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
302      err_out_unmap_tx:
303	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
304      err_out_iounmap:
305#ifdef MEM_MAPPING
306	iounmap ((void *) ioaddr);
307
308      err_out_dev:
309#endif
310	kfree (dev);
311
312      err_out_res:
313	pci_release_regions (pdev);
314
315      err_out_disable:
316	pci_disable_device (pdev);
317	return err;
318}
319
320int
321find_miiphy (struct net_device *dev)
322{
323	int i, phy_found = 0;
324	struct netdev_private *np;
325	long ioaddr;
326	np = dev->priv;
327	ioaddr = dev->base_addr;
328	np->phy_addr = 1;
329
330	for (i = 31; i >= 0; i--) {
331		int mii_status = mii_read (dev, i, 1);
332		if (mii_status != 0xffff && mii_status != 0x0000) {
333			np->phy_addr = i;
334			phy_found++;
335		}
336	}
337	if (!phy_found) {
338		printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
339		return -ENODEV;
340	}
341	return 0;
342}
343
344int
345parse_eeprom (struct net_device *dev)
346{
347	int i, j;
348	long ioaddr = dev->base_addr;
349	u8 sromdata[256];
350	u8 *psib;
351	u32 crc;
352	PSROM_t psrom = (PSROM_t) sromdata;
353	struct netdev_private *np = dev->priv;
354
355	int cid, next;
356
357	/* Read eeprom */
358	for (i = 0; i < 128; i++) {
359		((u16 *) sromdata)[i] = le16_to_cpu (read_eeprom (ioaddr, i));
360	}
361
362	/* Check CRC */
363	crc = ~ether_crc_le (256 - 4, sromdata);
364	if (psrom->crc != crc) {
365		printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name);
366		return -1;
367	}
368
369	/* Set MAC address */
370	for (i = 0; i < 6; i++)
371		dev->dev_addr[i] = psrom->mac_addr[i];
372
373	/* Parse Software Infomation Block */
374	i = 0x30;
375	psib = (u8 *) sromdata;
376	do {
377		cid = psib[i++];
378		next = psib[i++];
379		if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
380			printk (KERN_ERR "Cell data error\n");
381			return -1;
382		}
383		switch (cid) {
384		case 0:	/* Format version */
385			break;
386		case 1:	/* End of cell */
387			return 0;
388		case 2:	/* Duplex Polarity */
389			np->duplex_polarity = psib[i];
390			writeb (readb (ioaddr + PhyCtrl) | psib[i],
391				ioaddr + PhyCtrl);
392			break;
393		case 3:	/* Wake Polarity */
394			np->wake_polarity = psib[i];
395			break;
396		case 9:	/* Adapter description */
397			j = (next - i > 255) ? 255 : next - i;
398			memcpy (np->name, &(psib[i]), j);
399			break;
400		case 4:
401		case 5:
402		case 6:
403		case 7:
404		case 8:	/* Reversed */
405			break;
406		default:	/* Unknown cell */
407			return -1;
408		}
409		i = next;
410	} while (1);
411
412	return 0;
413}
414
415static int
416rio_open (struct net_device *dev)
417{
418	struct netdev_private *np = dev->priv;
419	long ioaddr = dev->base_addr;
420	int i;
421
422	i = request_irq (dev->irq, &rio_interrupt, SA_SHIRQ, dev->name, dev);
423	if (i)
424		return i;
425	/* DebugCtrl bit 4, 5, 9 must set */
426	writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
427
428	/* Jumbo frame */
429	if (np->jumbo != 0)
430		writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
431
432	alloc_list (dev);
433
434	/* Get station address */
435	for (i = 0; i < 6; i++)
436		writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
437
438	set_multicast (dev);
439	if (np->coalesce) {
440		writel (np->rx_coalesce | np->rx_timeout << 16,
441			ioaddr + RxDMAIntCtrl);
442	}
443	/* Set RIO to poll every N*320nsec. */
444	writeb (0x20, ioaddr + RxDMAPollPeriod);
445	writeb (0xff, ioaddr + TxDMAPollPeriod);
446	writeb (0x30, ioaddr + RxDMABurstThresh);
447	writeb (0x30, ioaddr + RxDMAUrgentThresh);
448
449	/* clear statistics */
450	clear_stats (dev);
451
452	/* VLAN supported */
453	if (np->vlan) {
454		/* priority field in RxDMAIntCtrl  */
455		writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
456			ioaddr + RxDMAIntCtrl);
457		/* VLANId */
458		writew (np->vlan, ioaddr + VLANId);
459		/* Length/Type should be 0x8100 */
460		writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
461		/* Enable AutoVLANuntagging, but disable AutoVLANtagging.
462		   VLAN information tagged by TFC' VID, CFI fields. */
463		writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
464			ioaddr + MACCtrl);
465	}
466
467	/* Enable default interrupts */
468	EnableInt ();
469
470	init_timer (&np->timer);
471	np->timer.expires = jiffies + 1*HZ;
472	np->timer.data = (unsigned long) dev;
473	np->timer.function = &rio_timer;
474	add_timer (&np->timer);
475
476	/* Start Tx/Rx */
477	writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
478			ioaddr + MACCtrl);
479
480	netif_start_queue (dev);
481	return 0;
482}
483
484static void
485rio_timer (unsigned long data)
486{
487	struct net_device *dev = (struct net_device *)data;
488	struct netdev_private *np = dev->priv;
489	unsigned int entry;
490	int next_tick = 1*HZ;
491	unsigned long flags;
492
493	spin_lock_irqsave(&np->rx_lock, flags);
494	/* Recover rx ring exhausted error */
495	if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
496		printk(KERN_INFO "Try to recover rx ring exhausted...\n");
497		/* Re-allocate skbuffs to fill the descriptor ring */
498		for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
499			struct sk_buff *skb;
500			entry = np->old_rx % RX_RING_SIZE;
501			/* Dropped packets don't need to re-allocate */
502			if (np->rx_skbuff[entry] == NULL) {
503				skb = dev_alloc_skb (np->rx_buf_sz);
504				if (skb == NULL) {
505					np->rx_ring[entry].fraginfo = 0;
506					printk (KERN_INFO
507						"%s: Still unable to re-allocate Rx skbuff.#%d\n",
508						dev->name, entry);
509					break;
510				}
511				np->rx_skbuff[entry] = skb;
512				skb->dev = dev;
513				/* 16 byte align the IP header */
514				skb_reserve (skb, 2);
515				np->rx_ring[entry].fraginfo =
516				    cpu_to_le64 (pci_map_single
517					 (np->pdev, skb->tail, np->rx_buf_sz,
518					  PCI_DMA_FROMDEVICE));
519			}
520			np->rx_ring[entry].fraginfo |=
521			    cpu_to_le64 (np->rx_buf_sz) << 48;
522			np->rx_ring[entry].status = 0;
523		} /* end for */
524	} /* end if */
525	spin_unlock_irqrestore (&np->rx_lock, flags);
526	np->timer.expires = jiffies + next_tick;
527	add_timer(&np->timer);
528}
529
530static void
531rio_tx_timeout (struct net_device *dev)
532{
533	long ioaddr = dev->base_addr;
534
535	printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
536		dev->name, readl (ioaddr + TxStatus));
537	rio_free_tx(dev, 0);
538	dev->if_port = 0;
539	dev->trans_start = jiffies;
540}
541
542 /* allocate and initialize Tx and Rx descriptors */
543static void
544alloc_list (struct net_device *dev)
545{
546	struct netdev_private *np = dev->priv;
547	int i;
548
549	np->cur_rx = np->cur_tx = 0;
550	np->old_rx = np->old_tx = 0;
551	np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
552
553	/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
554	for (i = 0; i < TX_RING_SIZE; i++) {
555		np->tx_skbuff[i] = 0;
556		np->tx_ring[i].status = cpu_to_le64 (TFDDone);
557		np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
558					      ((i+1)%TX_RING_SIZE) *
559					      sizeof (struct netdev_desc));
560	}
561
562	/* Initialize Rx descriptors */
563	for (i = 0; i < RX_RING_SIZE; i++) {
564		np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
565						((i + 1) % RX_RING_SIZE) *
566						sizeof (struct netdev_desc));
567		np->rx_ring[i].status = 0;
568		np->rx_ring[i].fraginfo = 0;
569		np->rx_skbuff[i] = 0;
570	}
571
572	/* Allocate the rx buffers */
573	for (i = 0; i < RX_RING_SIZE; i++) {
574		/* Allocated fixed size of skbuff */
575		struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz);
576		np->rx_skbuff[i] = skb;
577		if (skb == NULL) {
578			printk (KERN_ERR
579				"%s: alloc_list: allocate Rx buffer error! ",
580				dev->name);
581			break;
582		}
583		skb->dev = dev;	/* Mark as being used by this device. */
584		skb_reserve (skb, 2);	/* 16 byte align the IP header. */
585		/* Rubicon now supports 40 bits of addressing space. */
586		np->rx_ring[i].fraginfo =
587		    cpu_to_le64 ( pci_map_single (
588			 	  np->pdev, skb->tail, np->rx_buf_sz,
589				  PCI_DMA_FROMDEVICE));
590		np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
591	}
592
593	/* Set RFDListPtr */
594	writel (cpu_to_le32 (np->rx_ring_dma), dev->base_addr + RFDListPtr0);
595	writel (0, dev->base_addr + RFDListPtr1);
596
597	return;
598}
599
600static int
601start_xmit (struct sk_buff *skb, struct net_device *dev)
602{
603	struct netdev_private *np = dev->priv;
604	struct netdev_desc *txdesc;
605	unsigned entry;
606	u32 ioaddr;
607	u64 tfc_vlan_tag = 0;
608
609	if (np->link_status == 0) {	/* Link Down */
610		dev_kfree_skb(skb);
611		return 0;
612	}
613	ioaddr = dev->base_addr;
614	entry = np->cur_tx % TX_RING_SIZE;
615	np->tx_skbuff[entry] = skb;
616	txdesc = &np->tx_ring[entry];
617
618	if (np->vlan) {
619		txdesc->status |=
620		    cpu_to_le64 (VLANTagInsert) |
621		    (cpu_to_le64 (np->vlan) << 32) |
622		    (cpu_to_le64 (skb->priority) << 45);
623	}
624	txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
625							skb->len,
626							PCI_DMA_TODEVICE));
627	txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
628
629	/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
630	 * Work around: Always use 1 descriptor in 10Mbps mode */
631	if (entry % np->tx_coalesce == 0 || np->speed == 10)
632		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
633					      WordAlignDisable |
634					      TxDMAIndicate |
635					      (1 << FragCountShift));
636	else
637		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
638					      WordAlignDisable |
639					      (1 << FragCountShift));
640
641	/* TxDMAPollNow */
642	writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
643	/* Schedule ISR */
644	writel(10000, ioaddr + CountDown);
645	np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
646	if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
647			< TX_QUEUE_LEN - 1 && np->speed != 10) {
648		/* do nothing */
649	} else if (!netif_queue_stopped(dev)) {
650		netif_stop_queue (dev);
651	}
652
653	/* The first TFDListPtr */
654	if (readl (dev->base_addr + TFDListPtr0) == 0) {
655		writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
656			dev->base_addr + TFDListPtr0);
657		writel (0, dev->base_addr + TFDListPtr1);
658	}
659
660	/* NETDEV WATCHDOG timer */
661	dev->trans_start = jiffies;
662	return 0;
663}
664
665static void
666rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
667{
668	struct net_device *dev = dev_instance;
669	struct netdev_private *np;
670	unsigned int_status;
671	long ioaddr;
672	int cnt = max_intrloop;
673
674	ioaddr = dev->base_addr;
675	np = dev->priv;
676	while (1) {
677		int_status = readw (ioaddr + IntStatus);
678		writew (int_status, ioaddr + IntStatus);
679		int_status &= DEFAULT_INTR;
680		if (int_status == 0 || --cnt < 0)
681			break;
682		/* Processing received packets */
683		if (int_status & RxDMAComplete)
684			receive_packet (dev);
685		/* TxDMAComplete interrupt */
686		if ((int_status & (TxDMAComplete|IntRequested))) {
687			int tx_status;
688			tx_status = readl (ioaddr + TxStatus);
689			if (tx_status & 0x01)
690				tx_error (dev, tx_status);
691			/* Free used tx skbuffs */
692			rio_free_tx (dev, 1);
693		}
694
695		/* Handle uncommon events */
696		if (int_status &
697		    (HostError | LinkEvent | UpdateStats))
698			rio_error (dev, int_status);
699	}
700	if (np->cur_tx != np->old_tx)
701		writel (100, ioaddr + CountDown);
702}
703
704static void
705rio_free_tx (struct net_device *dev, int irq)
706{
707	struct netdev_private *np = (struct netdev_private *) dev->priv;
708	int entry = np->old_tx % TX_RING_SIZE;
709	int tx_use = 0;
710	long flag = 0;
711
712	if (irq)
713		spin_lock_irqsave(&np->tx_lock, flag);
714	else
715		spin_lock(&np->tx_lock);
716	/* Free used tx skbuffs */
717	while (entry != np->cur_tx) {
718		struct sk_buff *skb;
719
720		if (!(np->tx_ring[entry].status & TFDDone))
721			break;
722		skb = np->tx_skbuff[entry];
723		pci_unmap_single (np->pdev,
724				  np->tx_ring[entry].fraginfo,
725				  skb->len, PCI_DMA_TODEVICE);
726		if (irq)
727			dev_kfree_skb_irq (skb);
728		else
729			dev_kfree_skb (skb);
730
731		np->tx_skbuff[entry] = 0;
732		entry = (entry + 1) % TX_RING_SIZE;
733		tx_use++;
734	}
735	if (irq)
736		spin_unlock_irqrestore(&np->tx_lock, flag);
737	else
738		spin_unlock(&np->tx_lock);
739	np->old_tx = entry;
740
741	/* If the ring is no longer full, clear tx_full and
742	   call netif_wake_queue() */
743
744	if (netif_queue_stopped(dev) &&
745	    ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
746	    < TX_QUEUE_LEN - 1 || np->speed == 10)) {
747		netif_wake_queue (dev);
748	}
749}
750
751static void
752tx_error (struct net_device *dev, int tx_status)
753{
754	struct netdev_private *np;
755	long ioaddr = dev->base_addr;
756	int frame_id;
757	int i;
758
759	np = dev->priv;
760
761	frame_id = (tx_status & 0xffff0000);
762	printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
763		dev->name, tx_status, frame_id);
764	np->stats.tx_errors++;
765	/* Ttransmit Underrun */
766	if (tx_status & 0x10) {
767		np->stats.tx_fifo_errors++;
768		writew (readw (ioaddr + TxStartThresh) + 0x10,
769			ioaddr + TxStartThresh);
770		/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
771		writew (TxReset | DMAReset | FIFOReset | NetworkReset,
772			ioaddr + ASICCtrl + 2);
773		/* Wait for ResetBusy bit clear */
774		for (i = 50; i > 0; i--) {
775			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
776				break;
777			mdelay (1);
778		}
779		rio_free_tx (dev, 1);
780		/* Reset TFDListPtr */
781		writel (np->tx_ring_dma +
782			np->old_tx * sizeof (struct netdev_desc),
783			dev->base_addr + TFDListPtr0);
784		writel (0, dev->base_addr + TFDListPtr1);
785
786		/* Let TxStartThresh stay default value */
787	}
788	/* Late Collision */
789	if (tx_status & 0x04) {
790		np->stats.tx_fifo_errors++;
791		/* TxReset and clear FIFO */
792		writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
793		/* Wait reset done */
794		for (i = 50; i > 0; i--) {
795			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
796				break;
797			mdelay (1);
798		}
799		/* Let TxStartThresh stay default value */
800	}
801	/* Maximum Collisions */
802	if (tx_status & 0x08)
803		np->stats.collisions++;
804	/* Restart the Tx */
805	writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
806}
807
808static int
809receive_packet (struct net_device *dev)
810{
811	struct netdev_private *np = (struct netdev_private *) dev->priv;
812	int entry = np->cur_rx % RX_RING_SIZE;
813	int cnt = 30;
814
815	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
816	while (1) {
817		struct netdev_desc *desc = &np->rx_ring[entry];
818		int pkt_len;
819		u64 frame_status;
820
821		if (!(desc->status & RFDDone) ||
822		    !(desc->status & FrameStart) || !(desc->status & FrameEnd))
823			break;
824
825		/* Chip omits the CRC. */
826		pkt_len = le64_to_cpu (desc->status & 0xffff);
827		frame_status = le64_to_cpu (desc->status);
828		if (--cnt < 0)
829			break;
830		pci_dma_sync_single (np->pdev, desc->fraginfo, np->rx_buf_sz,
831				     PCI_DMA_FROMDEVICE);
832		/* Update rx error statistics, drop packet. */
833		if (frame_status & RFS_Errors) {
834			np->stats.rx_errors++;
835			if (frame_status & (RxRuntFrame | RxLengthError))
836				np->stats.rx_length_errors++;
837			if (frame_status & RxFCSError)
838				np->stats.rx_crc_errors++;
839			if (frame_status & RxAlignmentError && np->speed != 1000)
840				np->stats.rx_frame_errors++;
841			if (frame_status & RxFIFOOverrun)
842	 			np->stats.rx_fifo_errors++;
843		} else {
844			struct sk_buff *skb;
845
846			/* Small skbuffs for short packets */
847			if (pkt_len > copy_thresh) {
848				pci_unmap_single (np->pdev, desc->fraginfo,
849						  np->rx_buf_sz,
850						  PCI_DMA_FROMDEVICE);
851				skb_put (skb = np->rx_skbuff[entry], pkt_len);
852				np->rx_skbuff[entry] = NULL;
853			} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
854				skb->dev = dev;
855				/* 16 byte align the IP header */
856				skb_reserve (skb, 2);
857				eth_copy_and_sum (skb,
858						  np->rx_skbuff[entry]->tail,
859						  pkt_len, 0);
860				skb_put (skb, pkt_len);
861			}
862			skb->protocol = eth_type_trans (skb, dev);
863			netif_rx (skb);
864			dev->last_rx = jiffies;
865		}
866		entry = (entry + 1) % RX_RING_SIZE;
867	}
868	spin_lock(&np->rx_lock);
869	np->cur_rx = entry;
870	/* Re-allocate skbuffs to fill the descriptor ring */
871	entry = np->old_rx;
872	while (entry != np->cur_rx) {
873		struct sk_buff *skb;
874		/* Dropped packets don't need to re-allocate */
875		if (np->rx_skbuff[entry] == NULL) {
876			skb = dev_alloc_skb (np->rx_buf_sz);
877			if (skb == NULL) {
878				np->rx_ring[entry].fraginfo = 0;
879				printk (KERN_INFO
880					"%s: receive_packet: "
881					"Unable to re-allocate Rx skbuff.#%d\n",
882					dev->name, entry);
883				break;
884			}
885			np->rx_skbuff[entry] = skb;
886			skb->dev = dev;
887			/* 16 byte align the IP header */
888			skb_reserve (skb, 2);
889			np->rx_ring[entry].fraginfo =
890			    cpu_to_le64 (pci_map_single
891					 (np->pdev, skb->tail, np->rx_buf_sz,
892					  PCI_DMA_FROMDEVICE));
893		}
894		np->rx_ring[entry].fraginfo |=
895		    cpu_to_le64 (np->rx_buf_sz) << 48;
896		np->rx_ring[entry].status = 0;
897		entry = (entry + 1) % RX_RING_SIZE;
898	}
899	np->old_rx = entry;
900	spin_unlock(&np->rx_lock);
901	return 0;
902}
903
904static void
905rio_error (struct net_device *dev, int int_status)
906{
907	long ioaddr = dev->base_addr;
908	struct netdev_private *np = dev->priv;
909	u16 macctrl;
910
911	/* Link change event */
912	if (int_status & LinkEvent) {
913		if (mii_wait_link (dev, 10) == 0) {
914			printk (KERN_INFO "%s: Link up\n", dev->name);
915			if (np->phy_media)
916				mii_get_media_pcs (dev);
917			else
918				mii_get_media (dev);
919			if (np->speed == 1000)
920				np->tx_coalesce = tx_coalesce;
921			else
922				np->tx_coalesce = 1;
923			macctrl = 0;
924			macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
925			macctrl |= (np->full_duplex) ? DuplexSelect : 0;
926			macctrl |= (np->tx_flow) ?
927				TxFlowControlEnable : 0;
928			macctrl |= (np->rx_flow) ?
929				RxFlowControlEnable : 0;
930			writew(macctrl,	ioaddr + MACCtrl);
931			np->link_status = 1;
932			netif_carrier_on(dev);
933		} else {
934			printk (KERN_INFO "%s: Link off\n", dev->name);
935			np->link_status = 0;
936			netif_carrier_off(dev);
937		}
938	}
939
940	/* UpdateStats statistics registers */
941	if (int_status & UpdateStats) {
942		get_stats (dev);
943	}
944
945	/* PCI Error, a catastronphic error related to the bus interface
946	   occurs, set GlobalReset and HostReset to reset. */
947	if (int_status & HostError) {
948		printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
949			dev->name, int_status);
950		writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
951		mdelay (500);
952	}
953}
954
955static struct net_device_stats *
956get_stats (struct net_device *dev)
957{
958	long ioaddr = dev->base_addr;
959	struct netdev_private *np = dev->priv;
960	int i;
961	unsigned int stat_reg;
962
963	/* All statistics registers need to be acknowledged,
964	   else statistic overflow could cause problems */
965
966	np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
967	np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
968	np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
969	np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
970
971	np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
972	np->stats.collisions += readl (ioaddr + SingleColFrames)
973			     +  readl (ioaddr + MultiColFrames);
974
975	/* detailed tx errors */
976	stat_reg = readw (ioaddr + FramesAbortXSColls);
977	np->stats.tx_aborted_errors += stat_reg;
978	np->stats.tx_errors += stat_reg;
979
980	stat_reg = readw (ioaddr + CarrierSenseErrors);
981	np->stats.tx_carrier_errors += stat_reg;
982	np->stats.tx_errors += stat_reg;
983
984	/* Clear all other statistic register. */
985	readl (ioaddr + McstOctetXmtOk);
986	readw (ioaddr + BcstFramesXmtdOk);
987	readl (ioaddr + McstFramesXmtdOk);
988	readw (ioaddr + BcstFramesRcvdOk);
989	readw (ioaddr + MacControlFramesRcvd);
990	readw (ioaddr + FrameTooLongErrors);
991	readw (ioaddr + InRangeLengthErrors);
992	readw (ioaddr + FramesCheckSeqErrors);
993	readw (ioaddr + FramesLostRxErrors);
994	readl (ioaddr + McstOctetXmtOk);
995	readl (ioaddr + BcstOctetXmtOk);
996	readl (ioaddr + McstFramesXmtdOk);
997	readl (ioaddr + FramesWDeferredXmt);
998	readl (ioaddr + LateCollisions);
999	readw (ioaddr + BcstFramesXmtdOk);
1000	readw (ioaddr + MacControlFramesXmtd);
1001	readw (ioaddr + FramesWEXDeferal);
1002
1003
1004	for (i = 0x100; i <= 0x150; i += 4)
1005		readl (ioaddr + i);
1006	readw (ioaddr + TxJumboFrames);
1007	readw (ioaddr + RxJumboFrames);
1008	readw (ioaddr + TCPCheckSumErrors);
1009	readw (ioaddr + UDPCheckSumErrors);
1010	readw (ioaddr + IPCheckSumErrors);
1011	return &np->stats;
1012}
1013
1014static int
1015clear_stats (struct net_device *dev)
1016{
1017	long ioaddr = dev->base_addr;
1018	int i;
1019
1020	/* All statistics registers need to be acknowledged,
1021	   else statistic overflow could cause problems */
1022	readl (ioaddr + FramesRcvOk);
1023	readl (ioaddr + FramesXmtOk);
1024	readl (ioaddr + OctetRcvOk);
1025	readl (ioaddr + OctetXmtOk);
1026
1027	readl (ioaddr + McstFramesRcvdOk);
1028	readl (ioaddr + SingleColFrames);
1029	readl (ioaddr + MultiColFrames);
1030	readl (ioaddr + LateCollisions);
1031	/* detailed rx errors */
1032	readw (ioaddr + FrameTooLongErrors);
1033	readw (ioaddr + InRangeLengthErrors);
1034	readw (ioaddr + FramesCheckSeqErrors);
1035	readw (ioaddr + FramesLostRxErrors);
1036
1037	/* detailed tx errors */
1038	readw (ioaddr + FramesAbortXSColls);
1039	readw (ioaddr + CarrierSenseErrors);
1040
1041	/* Clear all other statistic register. */
1042	readl (ioaddr + McstOctetXmtOk);
1043	readw (ioaddr + BcstFramesXmtdOk);
1044	readl (ioaddr + McstFramesXmtdOk);
1045	readw (ioaddr + BcstFramesRcvdOk);
1046	readw (ioaddr + MacControlFramesRcvd);
1047	readl (ioaddr + McstOctetXmtOk);
1048	readl (ioaddr + BcstOctetXmtOk);
1049	readl (ioaddr + McstFramesXmtdOk);
1050	readl (ioaddr + FramesWDeferredXmt);
1051	readw (ioaddr + BcstFramesXmtdOk);
1052	readw (ioaddr + MacControlFramesXmtd);
1053	readw (ioaddr + FramesWEXDeferal);
1054
1055	for (i = 0x100; i <= 0x150; i += 4)
1056		readl (ioaddr + i);
1057	readw (ioaddr + TxJumboFrames);
1058	readw (ioaddr + RxJumboFrames);
1059	readw (ioaddr + TCPCheckSumErrors);
1060	readw (ioaddr + UDPCheckSumErrors);
1061	readw (ioaddr + IPCheckSumErrors);
1062	return 0;
1063}
1064
1065
1066int
1067change_mtu (struct net_device *dev, int new_mtu)
1068{
1069	struct netdev_private *np = dev->priv;
1070	int max = (np->jumbo) ? MAX_JUMBO : 1536;
1071
1072	if ((new_mtu < 68) || (new_mtu > max)) {
1073		return -EINVAL;
1074	}
1075
1076	dev->mtu = new_mtu;
1077
1078	return 0;
1079}
1080
1081static void
1082set_multicast (struct net_device *dev)
1083{
1084	long ioaddr = dev->base_addr;
1085	u32 hash_table[2];
1086	u16 rx_mode = 0;
1087	int i;
1088	int bit;
1089	struct dev_mc_list *mclist;
1090	struct netdev_private *np = dev->priv;
1091
1092	hash_table[0] = hash_table[1] = 0;
1093	/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1094	hash_table[1] |= cpu_to_le32(0x02000000);
1095	if (dev->flags & IFF_PROMISC) {
1096		/* Receive all frames promiscuously. */
1097		rx_mode = ReceiveAllFrames;
1098	} else if ((dev->flags & IFF_ALLMULTI) ||
1099			(dev->mc_count > multicast_filter_limit)) {
1100		/* Receive broadcast and multicast frames */
1101		rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1102	} else if (dev->mc_count > 0) {
1103		/* Receive broadcast frames and multicast frames filtering
1104		   by Hashtable */
1105		rx_mode =
1106		    ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1107		for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1108			i++, mclist=mclist->next) {
1109
1110			int index = 0;
1111			int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1112
1113			/* The inverted high significant 6 bits of CRC are
1114			   used as an index to hashtable */
1115			for (bit = 0; bit < 6; bit++)
1116				if (crc & (1 << (31 - bit)))
1117					index |= (1 << bit);
1118
1119			hash_table[index / 32] |= (1 << (index % 32));
1120		}
1121	} else {
1122		rx_mode = ReceiveBroadcast | ReceiveUnicast;
1123	}
1124	if (np->vlan) {
1125		/* ReceiveVLANMatch field in ReceiveMode */
1126		rx_mode |= ReceiveVLANMatch;
1127	}
1128
1129	writel (hash_table[0], ioaddr + HashTable0);
1130	writel (hash_table[1], ioaddr + HashTable1);
1131	writew (rx_mode, ioaddr + ReceiveMode);
1132}
1133
1134static int
1135rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1136{
1137	int phy_addr;
1138	struct netdev_private *np = dev->priv;
1139	struct mii_data *miidata = (struct mii_data *) &rq->ifr_data;
1140
1141	struct netdev_desc *desc;
1142	int i;
1143
1144	phy_addr = np->phy_addr;
1145	switch (cmd) {
1146	case SIOCDEVPRIVATE:
1147		break;
1148
1149	case SIOCDEVPRIVATE + 1:
1150		miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
1151		break;
1152	case SIOCDEVPRIVATE + 2:
1153		mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
1154		break;
1155	case SIOCDEVPRIVATE + 3:
1156		break;
1157	case SIOCDEVPRIVATE + 4:
1158		break;
1159	case SIOCDEVPRIVATE + 5:
1160		netif_stop_queue (dev);
1161		break;
1162	case SIOCDEVPRIVATE + 6:
1163		netif_wake_queue (dev);
1164		break;
1165	case SIOCDEVPRIVATE + 7:
1166		printk
1167		    ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
1168		     netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
1169		     np->old_rx);
1170		break;
1171	case SIOCDEVPRIVATE + 8:
1172		printk("TX ring:\n");
1173		for (i = 0; i < TX_RING_SIZE; i++) {
1174			desc = &np->tx_ring[i];
1175			printk
1176			    ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
1177			     i,
1178			     (u32) (np->tx_ring_dma + i * sizeof (*desc)),
1179			     (u32) desc->next_desc,
1180			     (u32) desc->status, (u32) (desc->fraginfo >> 32),
1181			     (u32) desc->fraginfo);
1182			printk ("\n");
1183		}
1184		printk ("\n");
1185		break;
1186
1187	default:
1188		return -EOPNOTSUPP;
1189	}
1190	return 0;
1191}
1192
1193#define EEP_READ 0x0200
1194#define EEP_BUSY 0x8000
1195/* Read the EEPROM word */
1196int
1197read_eeprom (long ioaddr, int eep_addr)
1198{
1199	int i = 1000;
1200	writew (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
1201	while (i-- > 0) {
1202		if (!(readw (ioaddr + EepromCtrl) & EEP_BUSY)) {
1203			return readw (ioaddr + EepromData);
1204		}
1205	}
1206	return 0;
1207}
1208
1209enum phy_ctrl_bits {
1210	MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
1211	MII_DUPLEX = 0x08,
1212};
1213
1214#define mii_delay() readb(ioaddr)
1215static void
1216mii_sendbit (struct net_device *dev, u32 data)
1217{
1218	long ioaddr = dev->base_addr + PhyCtrl;
1219	data = (data) ? MII_DATA1 : 0;
1220	data |= MII_WRITE;
1221	data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
1222	writeb (data, ioaddr);
1223	mii_delay ();
1224	writeb (data | MII_CLK, ioaddr);
1225	mii_delay ();
1226}
1227
1228static int
1229mii_getbit (struct net_device *dev)
1230{
1231	long ioaddr = dev->base_addr + PhyCtrl;
1232	u8 data;
1233
1234	data = (readb (ioaddr) & 0xf8) | MII_READ;
1235	writeb (data, ioaddr);
1236	mii_delay ();
1237	writeb (data | MII_CLK, ioaddr);
1238	mii_delay ();
1239	return ((readb (ioaddr) >> 1) & 1);
1240}
1241
1242static void
1243mii_send_bits (struct net_device *dev, u32 data, int len)
1244{
1245	int i;
1246	for (i = len - 1; i >= 0; i--) {
1247		mii_sendbit (dev, data & (1 << i));
1248	}
1249}
1250
1251static int
1252mii_read (struct net_device *dev, int phy_addr, int reg_num)
1253{
1254	u32 cmd;
1255	int i;
1256	u32 retval = 0;
1257
1258	/* Preamble */
1259	mii_send_bits (dev, 0xffffffff, 32);
1260	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1261	/* ST,OP = 0110'b for read operation */
1262	cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
1263	mii_send_bits (dev, cmd, 14);
1264	/* Turnaround */
1265	if (mii_getbit (dev))
1266		goto err_out;
1267	/* Read data */
1268	for (i = 0; i < 16; i++) {
1269		retval |= mii_getbit (dev);
1270		retval <<= 1;
1271	}
1272	/* End cycle */
1273	mii_getbit (dev);
1274	return (retval >> 1) & 0xffff;
1275
1276      err_out:
1277	return 0;
1278}
1279static int
1280mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
1281{
1282	u32 cmd;
1283
1284	/* Preamble */
1285	mii_send_bits (dev, 0xffffffff, 32);
1286	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1287	/* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1288	cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
1289	mii_send_bits (dev, cmd, 32);
1290	/* End cycle */
1291	mii_getbit (dev);
1292	return 0;
1293}
1294static int
1295mii_wait_link (struct net_device *dev, int wait)
1296{
1297	BMSR_t bmsr;
1298	int phy_addr;
1299	struct netdev_private *np;
1300
1301	np = dev->priv;
1302	phy_addr = np->phy_addr;
1303
1304	do {
1305		bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
1306		if (bmsr.bits.link_status)
1307			return 0;
1308		mdelay (1);
1309	} while (--wait > 0);
1310	return -1;
1311}
1312static int
1313mii_get_media (struct net_device *dev)
1314{
1315	ANAR_t negotiate;
1316	BMSR_t bmsr;
1317	BMCR_t bmcr;
1318	MSCR_t mscr;
1319	MSSR_t mssr;
1320	int phy_addr;
1321	struct netdev_private *np;
1322
1323	np = dev->priv;
1324	phy_addr = np->phy_addr;
1325
1326	bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
1327	if (np->an_enable) {
1328		if (!bmsr.bits.an_complete) {
1329			/* Auto-Negotiation not completed */
1330			return -1;
1331		}
1332		negotiate.image = mii_read (dev, phy_addr, MII_ANAR) &
1333			mii_read (dev, phy_addr, MII_ANLPAR);
1334		mscr.image = mii_read (dev, phy_addr, MII_MSCR);
1335		mssr.image = mii_read (dev, phy_addr, MII_MSSR);
1336		if (mscr.bits.media_1000BT_FD & mssr.bits.lp_1000BT_FD) {
1337			np->speed = 1000;
1338			np->full_duplex = 1;
1339			printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1340		} else if (mscr.bits.media_1000BT_HD & mssr.bits.lp_1000BT_HD) {
1341			np->speed = 1000;
1342			np->full_duplex = 0;
1343			printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
1344		} else if (negotiate.bits.media_100BX_FD) {
1345			np->speed = 100;
1346			np->full_duplex = 1;
1347			printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
1348		} else if (negotiate.bits.media_100BX_HD) {
1349			np->speed = 100;
1350			np->full_duplex = 0;
1351			printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
1352		} else if (negotiate.bits.media_10BT_FD) {
1353			np->speed = 10;
1354			np->full_duplex = 1;
1355			printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
1356		} else if (negotiate.bits.media_10BT_HD) {
1357			np->speed = 10;
1358			np->full_duplex = 0;
1359			printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
1360		}
1361		if (negotiate.bits.pause) {
1362			np->tx_flow &= 1;
1363			np->rx_flow &= 1;
1364		} else if (negotiate.bits.asymmetric) {
1365			np->tx_flow = 0;
1366			np->rx_flow &= 1;
1367		}
1368		/* else tx_flow, rx_flow = user select  */
1369	} else {
1370		bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
1371		if (bmcr.bits.speed100 == 1 && bmcr.bits.speed1000 == 0) {
1372			printk (KERN_INFO "Operating at 100 Mbps, ");
1373		} else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 0) {
1374			printk (KERN_INFO "Operating at 10 Mbps, ");
1375		} else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 1) {
1376			printk (KERN_INFO "Operating at 1000 Mbps, ");
1377		}
1378		if (bmcr.bits.duplex_mode) {
1379			printk ("Full duplex\n");
1380		} else {
1381			printk ("Half duplex\n");
1382		}
1383	}
1384	if (np->tx_flow)
1385		printk(KERN_INFO "Enable Tx Flow Control\n");
1386	else
1387		printk(KERN_INFO "Disable Tx Flow Control\n");
1388	if (np->rx_flow)
1389		printk(KERN_INFO "Enable Rx Flow Control\n");
1390	else
1391		printk(KERN_INFO "Disable Rx Flow Control\n");
1392
1393	return 0;
1394}
1395
1396static int
1397mii_set_media (struct net_device *dev)
1398{
1399	PHY_SCR_t pscr;
1400	BMCR_t bmcr;
1401	BMSR_t bmsr;
1402	ANAR_t anar;
1403	int phy_addr;
1404	struct netdev_private *np;
1405	np = dev->priv;
1406	phy_addr = np->phy_addr;
1407
1408	/* Does user set speed? */
1409	if (np->an_enable) {
1410		/* Advertise capabilities */
1411		bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
1412		anar.image = mii_read (dev, phy_addr, MII_ANAR);
1413		anar.bits.media_100BX_FD = bmsr.bits.media_100BX_FD;
1414		anar.bits.media_100BX_HD = bmsr.bits.media_100BX_HD;
1415		anar.bits.media_100BT4 = bmsr.bits.media_100BT4;
1416		anar.bits.media_10BT_FD = bmsr.bits.media_10BT_FD;
1417		anar.bits.media_10BT_HD = bmsr.bits.media_10BT_HD;
1418		anar.bits.pause = 1;
1419		anar.bits.asymmetric = 1;
1420		mii_write (dev, phy_addr, MII_ANAR, anar.image);
1421
1422		/* Enable Auto crossover */
1423		pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
1424		pscr.bits.mdi_crossover_mode = 3;	/* 11'b */
1425		mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
1426
1427		/* Soft reset PHY */
1428		mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1429		bmcr.image = 0;
1430		bmcr.bits.an_enable = 1;
1431		bmcr.bits.restart_an = 1;
1432		bmcr.bits.reset = 1;
1433		mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1434		mdelay(1);
1435	} else {
1436		/* Force speed setting */
1437		/* 1) Disable Auto crossover */
1438		pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
1439		pscr.bits.mdi_crossover_mode = 0;
1440		mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
1441
1442		/* 2) PHY Reset */
1443		bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
1444		bmcr.bits.reset = 1;
1445		mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1446
1447		/* 3) Power Down */
1448		bmcr.image = 0x1940;	/* must be 0x1940 */
1449		mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1450		mdelay (10);	/* wait a certain time */
1451
1452		/* 4) Advertise nothing */
1453		mii_write (dev, phy_addr, MII_ANAR, 0);
1454
1455		/* 5) Set media and Power Up */
1456		bmcr.image = 0;
1457		bmcr.bits.power_down = 1;
1458		if (np->speed == 100) {
1459			bmcr.bits.speed100 = 1;
1460			bmcr.bits.speed1000 = 0;
1461			printk (KERN_INFO "Manual 100 Mbps, ");
1462		} else if (np->speed == 10) {
1463			bmcr.bits.speed100 = 0;
1464			bmcr.bits.speed1000 = 0;
1465			printk (KERN_INFO "Manual 10 Mbps, ");
1466		}
1467		if (np->full_duplex) {
1468			bmcr.bits.duplex_mode = 1;
1469			printk ("Full duplex\n");
1470		} else {
1471			bmcr.bits.duplex_mode = 0;
1472			printk ("Half duplex\n");
1473		}
1474		mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1475		mdelay(10);
1476	}
1477	return 0;
1478}
1479
1480static int
1481mii_get_media_pcs (struct net_device *dev)
1482{
1483	ANAR_PCS_t negotiate;
1484	BMSR_t bmsr;
1485	BMCR_t bmcr;
1486	int phy_addr;
1487	struct netdev_private *np;
1488
1489	np = dev->priv;
1490	phy_addr = np->phy_addr;
1491
1492	bmsr.image = mii_read (dev, phy_addr, PCS_BMSR);
1493	if (np->an_enable) {
1494		if (!bmsr.bits.an_complete) {
1495			/* Auto-Negotiation not completed */
1496			return -1;
1497		}
1498		negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) &
1499			mii_read (dev, phy_addr, PCS_ANLPAR);
1500		np->speed = 1000;
1501		if (negotiate.bits.full_duplex) {
1502			printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1503			np->full_duplex = 1;
1504		} else {
1505			printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
1506			np->full_duplex = 0;
1507		}
1508		if (negotiate.bits.pause) {
1509			np->tx_flow &= 1;
1510			np->rx_flow &= 1;
1511		} else if (negotiate.bits.asymmetric) {
1512			np->tx_flow = 0;
1513			np->rx_flow &= 1;
1514		}
1515		/* else tx_flow, rx_flow = user select  */
1516	} else {
1517		bmcr.image = mii_read (dev, phy_addr, PCS_BMCR);
1518		printk (KERN_INFO "Operating at 1000 Mbps, ");
1519		if (bmcr.bits.duplex_mode) {
1520			printk ("Full duplex\n");
1521		} else {
1522			printk ("Half duplex\n");
1523		}
1524	}
1525	if (np->tx_flow)
1526		printk(KERN_INFO "Enable Tx Flow Control\n");
1527	else
1528		printk(KERN_INFO "Disable Tx Flow Control\n");
1529	if (np->rx_flow)
1530		printk(KERN_INFO "Enable Rx Flow Control\n");
1531	else
1532		printk(KERN_INFO "Disable Rx Flow Control\n");
1533
1534	return 0;
1535}
1536
1537static int
1538mii_set_media_pcs (struct net_device *dev)
1539{
1540	BMCR_t bmcr;
1541	ESR_t esr;
1542	ANAR_PCS_t anar;
1543	int phy_addr;
1544	struct netdev_private *np;
1545	np = dev->priv;
1546	phy_addr = np->phy_addr;
1547
1548	/* Auto-Negotiation? */
1549	if (np->an_enable) {
1550		/* Advertise capabilities */
1551		esr.image = mii_read (dev, phy_addr, PCS_ESR);
1552		anar.image = mii_read (dev, phy_addr, MII_ANAR);
1553		anar.bits.half_duplex =
1554			esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD;
1555		anar.bits.full_duplex =
1556			esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD;
1557		anar.bits.pause = 1;
1558		anar.bits.asymmetric = 1;
1559		mii_write (dev, phy_addr, MII_ANAR, anar.image);
1560
1561		/* Soft reset PHY */
1562		mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1563		bmcr.image = 0;
1564		bmcr.bits.an_enable = 1;
1565		bmcr.bits.restart_an = 1;
1566		bmcr.bits.reset = 1;
1567		mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1568		mdelay(1);
1569	} else {
1570		/* Force speed setting */
1571		/* PHY Reset */
1572		bmcr.image = 0;
1573		bmcr.bits.reset = 1;
1574		mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1575		mdelay(10);
1576		bmcr.image = 0;
1577		bmcr.bits.an_enable = 0;
1578		if (np->full_duplex) {
1579			bmcr.bits.duplex_mode = 1;
1580			printk (KERN_INFO "Manual full duplex\n");
1581		} else {
1582			bmcr.bits.duplex_mode = 0;
1583			printk (KERN_INFO "Manual half duplex\n");
1584		}
1585		mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1586		mdelay(10);
1587
1588		/*  Advertise nothing */
1589		mii_write (dev, phy_addr, MII_ANAR, 0);
1590	}
1591	return 0;
1592}
1593
1594
1595static int
1596rio_close (struct net_device *dev)
1597{
1598	long ioaddr = dev->base_addr;
1599	struct netdev_private *np = dev->priv;
1600	struct sk_buff *skb;
1601	int i;
1602
1603	netif_stop_queue (dev);
1604
1605	/* Disable interrupts */
1606	writew (0, ioaddr + IntEnable);
1607
1608	/* Stop Tx and Rx logics */
1609	writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
1610	synchronize_irq ();
1611	free_irq (dev->irq, dev);
1612	del_timer_sync (&np->timer);
1613
1614	/* Free all the skbuffs in the queue. */
1615	for (i = 0; i < RX_RING_SIZE; i++) {
1616		np->rx_ring[i].status = 0;
1617		np->rx_ring[i].fraginfo = 0;
1618		skb = np->rx_skbuff[i];
1619		if (skb) {
1620			pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
1621					  skb->len, PCI_DMA_FROMDEVICE);
1622			dev_kfree_skb (skb);
1623			np->rx_skbuff[i] = 0;
1624		}
1625	}
1626	for (i = 0; i < TX_RING_SIZE; i++) {
1627		skb = np->tx_skbuff[i];
1628		if (skb) {
1629			pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
1630					  skb->len, PCI_DMA_TODEVICE);
1631			dev_kfree_skb (skb);
1632			np->tx_skbuff[i] = 0;
1633		}
1634	}
1635
1636	return 0;
1637}
1638
1639static void __devexit
1640rio_remove1 (struct pci_dev *pdev)
1641{
1642	struct net_device *dev = pci_get_drvdata (pdev);
1643
1644	if (dev) {
1645		struct netdev_private *np = dev->priv;
1646
1647		unregister_netdev (dev);
1648		pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
1649				     np->rx_ring_dma);
1650		pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1651				     np->tx_ring_dma);
1652#ifdef MEM_MAPPING
1653		iounmap ((char *) (dev->base_addr));
1654#endif
1655		kfree (dev);
1656		pci_release_regions (pdev);
1657		pci_disable_device (pdev);
1658	}
1659	pci_set_drvdata (pdev, NULL);
1660}
1661
1662static struct pci_driver rio_driver = {
1663	name:		"dl2k",
1664	id_table:	rio_pci_tbl,
1665	probe:		rio_probe1,
1666	remove:		__devexit_p(rio_remove1),
1667};
1668
1669static int __init
1670rio_init (void)
1671{
1672	return pci_module_init (&rio_driver);
1673}
1674
1675static void __exit
1676rio_exit (void)
1677{
1678	pci_unregister_driver (&rio_driver);
1679}
1680
1681module_init (rio_init);
1682module_exit (rio_exit);
1683
1684/*
1685
1686Compile command:
1687
1688gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
1689
1690Read Documentation/networking/dl2k.txt for details.
1691
1692*/
1693
1694