1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2001 Patton Electronics Company
7 * Copyright (C) 2002 Momentum Computer
8 *
9 * Copyright 2000 MontaVista Software Inc.
10 * Author: MontaVista Software, Inc.
11 *         	stevel@mvista.com or support@mvista.com
12 *
13 *  This program is free software; you can distribute it and/or modify it
14 *  under the terms of the GNU General Public License (Version 2) as
15 *  published by the Free Software Foundation.
16 *
17 *  This program is distributed in the hope it will be useful, but WITHOUT
18 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20 *  for more details.
21 *
22 *  You should have received a copy of the GNU General Public License along
23 *  with this program; if not, write to the Free Software Foundation, Inc.,
24 *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
25 *
26 * Ethernet driver for the MIPS GT96100 Advanced Communication Controller.
27 *
28 * Modified for the Gallileo/Marvell GT-64240 Communication Controller.
29 *
30 * Support for Rx NAPI, Rx checksum offload, IOCTL and ETHTOOL added
31 * Manish Lachwani (lachwani@pmc-sierra.com) - 09/16/2003
32 *
33 * Modified for later version of Linux 2.4 kernel
34 * Manish Lachwani (lachwani@pmc-sierra.com) - 04/29/2004
35 */
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39#include <linux/string.h>
40#include <linux/timer.h>
41#include <linux/errno.h>
42#include <linux/in.h>
43#include <linux/ioport.h>
44#include <linux/slab.h>
45#include <linux/interrupt.h>
46#include <linux/pci.h>
47#include <linux/init.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/ethtool.h>
51#include <linux/skbuff.h>
52#include <linux/delay.h>
53#include <linux/ctype.h>
54#include <linux/mii.h>
55
56#include <asm/irq.h>
57#include <asm/bitops.h>
58#include <asm/io.h>
59#include <asm/uaccess.h>
60
61#define DESC_DATA_BE 1
62
63#include "gt64240eth.h"
64
65// enable this port (set hash size to 1/2K)
66//- #define PORT_CONFIG pcrHS
67#define PORT_CONFIG (pcrHS | pcrHD)
68//- #define PORT_CONFIG pcrHS |pcrPM |pcrPBF|pcrHDM
69//- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS);
70//- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS | pcrPM);
71//- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS | pcrPM | 1<<pcrLPBKBit);
72
73// clear all the MIB ctr regs
74#define EXT_CONFIG_CLEAR (pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen | pcxrPRIOrxOverride | pcxrRMIIen)
75
76/*
77 * _debug level:
78 * <= 2	none.
79 *  > 2	some warnings such as queue full, .....
80 *  > 3	lots of change-of-state messages.
81 *  > 4	EXTENSIVE data/descriptor dumps.
82 */
83
84#ifdef GT64240_DEBUG
85static int gt64240_debug = GT64240_DEBUG;
86#else
87static int gt64240_debug = 0;
88#endif
89
90static int debug = -1;
91
92#define GT64240_MSG_ENABLE	(NETIF_MSG_DRV          | \
93				NETIF_MSG_PROBE        | \
94				NETIF_MSG_LINK)
95
96
97/********************************************************/
98
99// prototypes
100static void gt64240_delay(int msec);
101static int gt64240_add_hash_entry(struct net_device *dev,
102				  unsigned char *addr);
103static void read_mib_counters(struct gt64240_private *gp);
104static void dump_MII(struct net_device *dev);
105static void dump_tx_desc(struct net_device *dev, int i);
106static void dump_rx_desc(struct net_device *dev, int i);
107static void dump_hw_addr(unsigned char *addr_str);
108static void update_stats(struct gt64240_private *gp);
109static void abort(struct net_device *dev, u32 abort_bits);
110static void hard_stop(struct net_device *dev);
111static void enable_ether_irq(struct net_device *dev);
112static void disable_ether_irq(struct net_device *dev);
113static int __init gt64240_probe1(unsigned long ioaddr, int irq, int port_num);
114static void reset_tx(struct net_device *dev);
115static void reset_rx(struct net_device *dev);
116static int gt64240_init(struct net_device *dev);
117static int gt64240_open(struct net_device *dev);
118static int gt64240_close(struct net_device *dev);
119static int gt64240_tx(struct sk_buff *skb, struct net_device *dev);
120#ifdef GT64240_NAPI
121static int gt64240_poll(struct net_device *dev, int *budget);
122static int gt64240_rx(struct net_device *dev, u32 status, int budget);
123#else
124static int gt64240_rx(struct net_device *dev, u32 status);
125#endif
126static void gt64240_tx_timeout(struct net_device *dev);
127static void gt64240_set_rx_mode(struct net_device *dev);
128static struct net_device_stats *gt64240_get_stats(struct net_device *dev);
129
130extern char * prom_getcmdline(void);
131extern int prom_get_mac_addrs(unsigned char
132			      station_addr[NUM_INTERFACES][6]);
133
134static char version[] __devinitdata =
135	"gt64240eth.o: version 0.1, <www.patton.com>\n";
136
137// PHY device addresses
138static u32 gt64240_phy_addr[NUM_INTERFACES] __devinitdata = { 0x8, 0x1, 0xa };
139
140// Need real Ethernet addresses -- in parse_mac_addr_options(),
141// these will be replaced by prom_get_mac_addrs() and/or prom_getcmdline().
142static unsigned char gt64240_station_addr[NUM_INTERFACES][6] = {
143	{0x00, 0x01, 0x02, 0x03, 0x04, 0x05},
144	{0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
145	{0x02, 0x03, 0x04, 0x05, 0x06, 0x07}
146};
147
148static int max_interrupt_work = 32;
149
150/*
151 * Base address and interupt of the GT64240 ethernet controllers
152 */
153static struct {
154	unsigned int port;
155	int irq;
156} gt64240_iflist[NUM_INTERFACES] = {
157	{
158	GT64240_ETH0_BASE, 8}, {
159	GT64240_ETH1_BASE, 8}, {
160	GT64240_ETH2_BASE, 8}
161};
162
163static void gt64240_delay(int ms)
164{
165	if (in_interrupt())
166		return;
167	else {
168		current->state = TASK_INTERRUPTIBLE;
169		schedule_timeout(ms * HZ / 1000);
170	}
171}
172
173unsigned char prom_mac_addr_base[6];
174
175int prom_get_mac_addrs(unsigned char station_addr[NUM_INTERFACES][6])
176{
177	memcpy(station_addr[0], prom_mac_addr_base, 6);
178	memcpy(station_addr[1], prom_mac_addr_base, 6);
179	memcpy(station_addr[2], prom_mac_addr_base, 6);
180
181	station_addr[1][5] += 1;
182	station_addr[2][5] += 2;
183
184	return 0;
185}
186
187void parse_mac_addr_options(void)
188{
189	prom_get_mac_addrs(gt64240_station_addr);
190}
191
192static int read_MII(struct net_device *dev, int phy, int reg)
193{
194	int timedout = 20;
195	u32 smir = smirOpCode | (phy << smirPhyAdBit) |
196	    (reg << smirRegAdBit);
197
198	// wait for last operation to complete
199	while ((GT64240_READ(GT64240_ETH_SMI_REG)) & smirBusy) {
200		// snooze for 1 msec and check again
201		gt64240_delay(1);
202
203		if (--timedout == 0) {
204			printk("%s: read_MII busy timeout!!\n", dev->name);
205			return -1;
206		}
207	}
208
209	GT64240_WRITE(GT64240_ETH_SMI_REG, smir);
210
211	timedout = 20;
212	// wait for read to complete
213	while (!
214	       ((smir =
215		 GT64240_READ(GT64240_ETH_SMI_REG)) & smirReadValid)) {
216		// snooze for 1 msec and check again
217		gt64240_delay(1);
218
219		if (--timedout == 0) {
220			printk("%s: read_MII timeout!!\n", dev->name);
221			return -1;
222		}
223	}
224
225	return (int) (smir & smirDataMask);
226}
227
228static void gp_get_drvinfo (struct net_device *dev,
229				struct ethtool_drvinfo *info)
230{
231	strcpy(info->driver, "gt64260");
232	strcpy(info->version, version);
233}
234
235static int gp_get_settings(struct net_device *dev,
236				struct ethtool_cmd *cmd)
237{
238	struct gt64240_private *gp = netdev_priv(dev);
239	int rc;
240
241	spin_lock_irq(&gp->lock);
242	rc = mii_ethtool_gset(&gp->mii_if, cmd);
243	spin_unlock_irq(&gp->lock);
244	return rc;
245}
246
247static int gp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
248{
249	struct gt64240_private *gp = netdev_priv(dev);
250	int rc;
251
252	spin_lock_irq(&gp->lock);
253	rc = mii_ethtool_sset(&gp->mii_if, cmd);
254	spin_unlock_irq(&gp->lock);
255	return rc;
256}
257
258static int gp_nway_reset(struct net_device *dev)
259{
260	struct gt64240_private *gp = netdev_priv(dev);
261	return mii_nway_restart(&gp->mii_if);
262}
263
264static u32 gp_get_link(struct net_device *dev)
265{
266	struct gt64240_private *gp = netdev_priv(dev);
267	return mii_link_ok(&gp->mii_if);
268}
269
270static u32 gp_get_msglevel(struct net_device *dev)
271{
272	struct gt64240_private *gp = netdev_priv(dev);
273	return gp->msg_enable;
274}
275
276static void gp_set_msglevel(struct net_device *dev, u32 value)
277{
278	struct gt64240_private *gp = netdev_priv(dev);
279	gp->msg_enable = value;
280}
281
282static struct ethtool_ops gp_ethtool_ops = {
283	.get_drvinfo		= gp_get_drvinfo,
284	.get_settings		= gp_get_settings,
285	.set_settings		= gp_set_settings,
286	.nway_reset		= gp_nway_reset,
287	.get_link		= gp_get_link,
288	.get_msglevel		= gp_get_msglevel,
289	.set_msglevel		= gp_set_msglevel,
290};
291
292static int gt64240_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
293{
294	struct gt64240_private *gp = netdev_priv(dev);
295	struct mii_ioctl_data *data =
296	    (struct mii_ioctl_data *) &rq->ifr_data;
297	int retval;
298
299	if (!netif_running(dev))
300		return -EINVAL;
301
302	spin_lock_irq(&gp->lock);
303	retval = generic_mii_ioctl(&gp->mii_if, data, cmd, NULL);
304	spin_unlock_irq(&gp->lock);
305
306	return retval;
307}
308
309static void dump_tx_desc(struct net_device *dev, int i)
310{
311	struct gt64240_private *gp = netdev_priv(dev);
312	gt64240_td_t *td = &gp->tx_ring[i];
313
314	printk("%s:tx[%d]: self=%p cmd=%08x, cnt=%4d. bufp=%08x, next=%08x\n",
315	       dev->name, i, td, td->cmdstat, td->byte_cnt, td->buff_ptr,
316	       td->next);
317}
318
319static void dump_rx_desc(struct net_device *dev, int i)
320{
321	struct gt64240_private *gp = netdev_priv(dev);
322	gt64240_rd_t *rd = &gp->rx_ring[i];
323
324	printk("%s:rx_dsc[%d]: self=%p cst=%08x,size=%4d. cnt=%4d. "
325	       "bufp=%08x, next=%08x\n",
326	       dev->name, i, rd, rd->cmdstat, rd->buff_sz, rd->byte_cnt,
327	       rd->buff_ptr, rd->next);
328}
329
330// These routines work, just disabled to avoid compile warnings
331static void write_MII(struct net_device *dev, int phy, int reg, int data)
332{
333	u32 smir = (phy << smirPhyAdBit) | (reg << smirRegAdBit) | data;
334	int timedout = 20;
335
336	// wait for last operation to complete
337	while (GT64240_READ(GT64240_ETH_SMI_REG) & smirBusy) {
338		// snooze for 1 msec and check again
339		gt64240_delay(1);
340
341		if (--timedout == 0) {
342			printk("%s: write_MII busy timeout!!\n",
343			       dev->name);
344			return;
345		}
346	}
347
348	GT64240_WRITE(GT64240_ETH_SMI_REG, smir);
349}
350
351static void dump_MII(struct net_device *dev)
352{
353	struct gt64240_private *gp = netdev_priv(dev);
354	int i, val;
355
356	for (i = 0; i < 7; i++) {
357		if ((val = read_MII(dev, gp->phy_addr, i)) >= 0)
358			printk("%s: MII Reg %d=%x\n", dev->name, i, val);
359	}
360	for (i = 16; i < 21; i++) {
361		if ((val = read_MII(dev, gp->phy_addr, i)) >= 0)
362			printk("%s: MII Reg %d=%x\n", dev->name, i, val);
363	}
364}
365
366
367static void dump_hw_addr(unsigned char *addr_str)
368{
369	int i;
370	for (i = 0; i < 6; i++) {
371		printk("%2.2x", addr_str[i]);
372		printk(i < 5 ? ":" : "\n");
373	}
374}
375
376static int gt64240_add_hash_entry(struct net_device *dev,
377				  unsigned char *addr)
378{
379	static unsigned char swapped[256];
380	struct gt64240_private *gp;
381	u32 value1, value0, *entry;
382	unsigned char hash_ea[6];
383	static int flag = 0;
384	u16 hashResult;
385	int i;
386
387	if (flag == 0) {	/* Create table to swap bits in a byte  */
388		flag = 1;
389		for (i = 0; i < 256; i++) {
390			swapped[i] = (i & 0x01) << 7;
391			swapped[i] |= (i & 0x02) << 5;
392			swapped[i] |= (i & 0x04) << 3;
393			swapped[i] |= (i & 0x08) << 1;
394			swapped[i] |= (i & 0x10) >> 1;
395			swapped[i] |= (i & 0x20) >> 3;
396			swapped[i] |= (i & 0x40) >> 5;
397			swapped[i] |= (i & 0x80) >> 7;
398		}
399	}
400
401	for (i = 0; i < 6; i++) {	/* swap bits from mac to create hash mac */
402		hash_ea[i] = swapped[addr[i]];
403	}
404
405	gp = netdev_priv(dev);
406
407	/* create hash entry address    */
408	hashResult = (((hash_ea[5] >> 2) & 0x3F) << 9) & 0x7E00;
409	hashResult |= ((hash_ea[4] & 0x7F) << 2) | (hash_ea[5] & 0x03);
410	hashResult ^=
411	    ((hash_ea[3] & 0xFF) << 1) | ((hash_ea[4] >> 7) & 0x01);
412	hashResult ^= ((hash_ea[1] & 0x01) << 8) | (hash_ea[2] & 0xFF);
413
414	value0 = hteValid | hteRD;	/* Create hash table entry value */
415	value0 |= (u32) addr[0] << 3;
416	value0 |= (u32) addr[1] << 11;
417	value0 |= (u32) addr[2] << 19;
418	value0 |= ((u32) addr[3] & 0x1f) << 27;
419
420	value1 = ((u32) addr[3] >> 5) & 0x07;
421	value1 |= (u32) addr[4] << 3;
422	value1 |= (u32) addr[5] << 11;
423
424	/* Inset entry value into hash table */
425	for (i = 0; i < HASH_HOP_NUMBER; i++) {
426		entry = (u32 *) ((u32) gp->hash_table +
427				 (((u32) hashResult & 0x07ff) << 3));
428		if ((*entry & hteValid) && !(*entry & hteSkip)) {
429			hashResult += 2;	/* oops, occupied, go to next entry */
430		} else {
431#ifdef __LITTLE_ENDIAN
432			entry[1] = value1;
433			entry[0] = value0;
434#else
435			entry[0] = value1;
436			entry[1] = value0;
437#endif
438			break;
439		}
440	}
441	if (i >= HASH_HOP_NUMBER) {
442		printk("%s: gt64240_add_hash_entry expired!\n", dev->name);
443		return (-1);
444	}
445	return (0);
446}
447
448
449static void read_mib_counters(struct gt64240_private *gp)
450{
451	u32 *mib_regs = (u32 *) & gp->mib;
452	int i;
453
454	for (i = 0; i < sizeof(mib_counters_t) / sizeof(u32); i++)
455		mib_regs[i] =
456		    GT64240ETH_READ(gp,
457				    GT64240_ETH_MIB_COUNT_BASE +
458				    i * sizeof(u32));
459}
460
461
462static void update_stats(struct gt64240_private *gp)
463{
464	mib_counters_t *mib = &gp->mib;
465	struct net_device_stats *stats = &gp->stats;
466
467	read_mib_counters(gp);
468
469	stats->rx_packets = mib->totalFramesReceived;
470	stats->tx_packets = mib->framesSent;
471	stats->rx_bytes = mib->totalByteReceived;
472	stats->tx_bytes = mib->byteSent;
473	stats->rx_errors = mib->totalFramesReceived - mib->framesReceived;
474	//the tx error counters are incremented by the ISR
475	//rx_dropped incremented by gt64240_rx
476	//tx_dropped incremented by gt64240_tx
477	stats->multicast = mib->multicastFramesReceived;
478	// collisions incremented by gt64240_tx_complete
479	stats->rx_length_errors = mib->oversizeFrames + mib->fragments;
480	// The RxError condition means the Rx DMA encountered a
481	// CPU owned descriptor, which, if things are working as
482	// they should, means the Rx ring has overflowed.
483	stats->rx_over_errors = mib->macRxError;
484	stats->rx_crc_errors = mib->cRCError;
485}
486
487static void abort(struct net_device *dev, u32 abort_bits)
488{
489	struct gt64240_private *gp = netdev_priv(dev);
490	int timedout = 100;	// wait up to 100 msec for hard stop to complete
491
492	if (gt64240_debug > 3)
493		printk("%s: abort\n", dev->name);
494
495	// Return if neither Rx or Tx abort bits are set
496	if (!(abort_bits & (sdcmrAR | sdcmrAT)))
497		return;
498
499	// make sure only the Rx/Tx abort bits are set
500	abort_bits &= (sdcmrAR | sdcmrAT);
501
502	spin_lock(&gp->lock);
503
504	// abort any Rx/Tx DMA immediately
505	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, abort_bits);
506
507	if (gt64240_debug > 3)
508		printk("%s: abort: SDMA cmd  = %x/%x\n",
509		       dev->name, abort_bits, GT64240ETH_READ(gp,
510							      GT64240_ETH_SDMA_COMM));
511
512	// wait for abort to complete
513	while ((GT64240ETH_READ(gp, GT64240_ETH_SDMA_COMM)) & abort_bits) {
514		// snooze for 20 msec and check again
515		gt64240_delay(1);
516
517		if (--timedout == 0) {
518			printk("%s: abort timeout!!\n", dev->name);
519			break;
520		}
521	}
522
523	spin_unlock(&gp->lock);
524}
525
526
527static void hard_stop(struct net_device *dev)
528{
529	struct gt64240_private *gp = netdev_priv(dev);
530
531	if (gt64240_debug > 3)
532		printk("%s: hard stop\n", dev->name);
533
534	disable_ether_irq(dev);
535
536	abort(dev, sdcmrAR | sdcmrAT);
537
538	// disable port
539	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, 0);
540	if (gt64240_debug > 3)
541		printk("%s: gt64240_hard_stop: Port Config=%x\n",
542		       dev->name, GT64240ETH_READ(gp,
543						  GT64240_ETH_PORT_CONFIG));
544
545}
546
547static void gt64240_tx_complete(struct net_device *dev, u32 status)
548{
549	struct gt64240_private *gp = netdev_priv(dev);
550	int nextOut, cdp;
551	gt64240_td_t *td;
552	u32 cmdstat;
553
554	cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_TX_DESC_PTR0)
555	       - gp->tx_ring_dma) / sizeof(gt64240_td_t);
556
557	if (gt64240_debug > 3) {	/*+prk17aug01 */
558		nextOut = gp->tx_next_out;
559		printk
560		    ("%s: tx_complete: TX_PTR0=0x%08x, cdp=%d. nextOut=%d.\n",
561		     dev->name, GT64240ETH_READ(gp,
562						GT64240_ETH_CURR_TX_DESC_PTR0),
563		     cdp, nextOut);
564		td = &gp->tx_ring[nextOut];
565	}
566
567/*** NEED to check and CLEAR these errors every time thru here: ***/
568	if (gt64240_debug > 2) {
569		if (GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE))
570			printk
571			    ("%s: gt64240_tx_complete: CIU Cause=%08x, Mask=%08x, EAddr=%08x\n",
572			     dev->name,
573			     GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE),
574			     GT64240_READ(COMM_UNIT_INTERRUPT_MASK),
575			     GT64240_READ(COMM_UNIT_ERROR_ADDRESS));
576		GT64240_WRITE(COMM_UNIT_INTERRUPT_CAUSE, 0);
577	}
578	// Continue until we reach the current descriptor pointer
579	for (nextOut = gp->tx_next_out; nextOut != cdp;
580	     nextOut = (nextOut + 1) % TX_RING_SIZE) {
581
582		if (--gp->intr_work_done == 0)
583			break;
584
585		td = &gp->tx_ring[nextOut];
586		cmdstat = td->cmdstat;
587
588		if (cmdstat & (u32) txOwn) {
589			// DMA is not finished writing descriptor???
590			// Leave and come back later to pick-up where we left off.
591			break;
592		}
593		// increment Tx error stats
594		if (cmdstat & (u32) txErrorSummary) {
595			if (gt64240_debug > 2)
596				printk
597				    ("%s: tx_complete: Tx error, cmdstat = %x\n",
598				     dev->name, cmdstat);
599			gp->stats.tx_errors++;
600			if (cmdstat & (u32) txReTxLimit)
601				gp->stats.tx_aborted_errors++;
602			if (cmdstat & (u32) txUnderrun)
603				gp->stats.tx_fifo_errors++;
604			if (cmdstat & (u32) txLateCollision)
605				gp->stats.tx_window_errors++;
606		}
607
608		if (cmdstat & (u32) txCollision)
609			gp->stats.collisions +=
610			    (unsigned long) ((cmdstat & txReTxCntMask) >>
611					     txReTxCntBit);
612
613		// Wake the queue if the ring was full
614		if (gp->tx_full) {
615			gp->tx_full = 0;
616			if (gp->last_psr & psrLink) {
617				netif_wake_queue(dev);
618			}
619		}
620		// decrement tx ring buffer count
621		if (gp->tx_count)
622			gp->tx_count--;
623
624		// free the skb
625		if (gp->tx_skbuff[nextOut]) {
626			if (gt64240_debug > 3)
627				printk
628				    ("%s: tx_complete: good Tx, skb=%p\n",
629				     dev->name, gp->tx_skbuff[nextOut]);
630			dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
631			gp->tx_skbuff[nextOut] = NULL;
632		} else {
633			printk("%s: tx_complete: no skb!\n", dev->name);
634		}
635	}
636
637	gp->tx_next_out = nextOut;
638
639	if ((status & icrTxEndLow) && gp->tx_count != 0) {
640		// we must restart the DMA
641		GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
642				 sdcmrERD | sdcmrTXDL);
643	}
644}
645
646static irqreturn_t gt64240_interrupt(int irq, void *dev_id)
647{
648	struct net_device *dev = (struct net_device *) dev_id;
649	struct gt64240_private *gp = netdev_priv(dev);
650	u32 status;
651
652	if (dev == NULL) {
653		printk("%s: isr: null dev ptr\n", dev->name);
654		return IRQ_NONE;
655	}
656
657	spin_lock(&gp->lock);
658
659	if (gt64240_debug > 3)
660		printk("%s: isr: entry\n", dev->name);
661
662	gp->intr_work_done = max_interrupt_work;
663
664	while (gp->intr_work_done > 0) {
665
666		status = GT64240ETH_READ(gp, GT64240_ETH_INT_CAUSE);
667#ifdef GT64240_NAPI
668		/* dont ack Rx interrupts */
669		if (!(status & icrRxBuffer))
670			GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
671#else
672		// ACK interrupts
673		GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
674#endif
675
676		if (gt64240_debug > 3)
677			printk("%s: isr: work=%d., icr=%x\n", dev->name,
678			       gp->intr_work_done, status);
679
680		if ((status & icrEtherIntSum) == 0) {
681			if (!(status &
682			      (icrTxBufferLow | icrTxBufferHigh |
683			       icrRxBuffer))) {
684				/* exit from the while() loop */
685				break;
686			}
687		}
688
689		if (status & icrMIIPhySTC) {
690			u32 psr =
691			    GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS);
692			if (gp->last_psr != psr) {
693				printk("%s: port status: 0x%08x\n",
694				       dev->name, psr);
695				printk
696				    ("%s:    %s MBit/s, %s-duplex, flow-control %s, link is %s,\n",
697				     dev->name,
698				     psr & psrSpeed ? "100" : "10",
699				     psr & psrDuplex ? "full" : "half",
700				     psr & psrFctl ? "disabled" :
701				     "enabled",
702				     psr & psrLink ? "up" : "down");
703				printk
704				    ("%s:    TxLowQ is %s, TxHighQ is %s, Transmitter is %s\n",
705				     dev->name,
706				     psr & psrTxLow ? "running" :
707				     "stopped",
708				     psr & psrTxHigh ? "running" :
709				     "stopped",
710				     psr & psrTxInProg ? "on" : "off");
711
712				if ((psr & psrLink) && !gp->tx_full &&
713				    netif_queue_stopped(dev)) {
714					printk
715					    ("%s: isr: Link up, waking queue.\n",
716					     dev->name);
717					netif_wake_queue(dev);
718				} else if (!(psr & psrLink)
719					   && !netif_queue_stopped(dev)) {
720					printk
721					    ("%s: isr: Link down, stopping queue.\n",
722					     dev->name);
723					netif_stop_queue(dev);
724				}
725
726				gp->last_psr = psr;
727			}
728		}
729
730		if (status & (icrTxBufferLow | icrTxEndLow))
731			gt64240_tx_complete(dev, status);
732
733		if (status & icrRxBuffer) {
734#ifdef GT64240_NAPI
735			if (netif_rx_schedule_prep(dev)) {
736				disable_ether_irq(dev);
737				__netif_rx_schedule(dev);
738			}
739#else
740			gt64240_rx(dev, status);
741#endif
742		}
743		// Now check TX errors (RX errors were handled in gt64240_rx)
744		if (status & icrTxErrorLow) {
745			printk("%s: isr: Tx resource error\n", dev->name);
746		}
747
748		if (status & icrTxUdr) {
749			printk("%s: isr: Tx underrun error\n", dev->name);
750		}
751	}
752
753	if (gp->intr_work_done == 0) {
754		// ACK any remaining pending interrupts
755		GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
756		if (gt64240_debug > 3)
757			printk("%s: isr: hit max work\n", dev->name);
758	}
759
760	if (gt64240_debug > 3)
761		printk("%s: isr: exit, icr=%x\n",
762		       dev->name, GT64240ETH_READ(gp,
763						  GT64240_ETH_INT_CAUSE));
764
765	spin_unlock(&gp->lock);
766
767	return IRQ_HANDLED;
768}
769
770static void enable_ether_irq(struct net_device *dev)
771{
772	struct gt64240_private *gp = netdev_priv(dev);
773	u32 intMask;
774
775	intMask =
776	    icrTxBufferLow | icrTxEndLow | icrTxErrorLow |
777	    icrTxBufferHigh | icrTxEndHigh | icrTxErrorHigh | icrTxUdr |
778	    icrRxBuffer | icrRxOVR | icrRxError | icrMIIPhySTC |
779	    icrEtherIntSum;
780
781
782//- GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0); /* CLEAR existing ints */
783	// unmask device interrupts:
784	GT64240ETH_WRITE(gp, GT64240_ETH_INT_MASK, intMask);
785
786	// now route ethernet interrupts to GT PCI1 (eth0 and eth1 will be
787	// sharing it).
788	intMask = MV_READ(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH);
789	intMask |= 1 << gp->port_num;
790	MV_WRITE(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH, intMask);
791}
792
793static void disable_ether_irq(struct net_device *dev)
794{
795	struct gt64240_private *gp = netdev_priv(dev);
796	u32 intMask;
797
798	intMask = MV_READ(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH);
799	intMask &= ~(1 << gp->port_num);
800	MV_WRITE(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH, intMask);
801
802	// mask all device interrupts:
803	GT64240ETH_WRITE(gp, GT64240_ETH_INT_MASK, 0);
804}
805
806/*
807 * Probe for a GT64240 ethernet controller.
808 */
809static int __init gt64240_probe(void)
810{
811	int found = 0;
812	int i;
813
814	parse_mac_addr_options();
815
816	for (i = 0; i < NUM_INTERFACES; i++) {
817		unsigned long base_addr = gt64240_iflist[i].port;
818
819		if (check_region(base_addr, GT64240_ETH_IO_SIZE)) {
820			printk("gt64240_probe: ioaddr 0x%lx taken?\n",
821			       base_addr);
822			continue;
823		}
824
825		if (gt64240_probe1(base_addr, gt64240_iflist[i].irq, i) == 0) {
826			/*
827			 * Does not seem to be the "traditional" way folks do
828			 * this, but I want to init both eth ports if at all
829			 * possible!
830			 *
831			 * So, until I find out the "correct" way to do this:
832			 */
833			if (++found == NUM_INTERFACES)	/* That's all of them */
834				return 0;
835		}
836	}
837
838	if (found)
839		return 0;	/* as long as we found at least one! */
840
841	return -ENODEV;
842}
843
844module_init(gt64240_probe);
845
846static int __init gt64240_probe1(unsigned long ioaddr, int irq, int port_num)
847{
848	struct net_device *dev = NULL;
849	static unsigned version_printed = 0;
850	struct gt64240_private *gp = NULL;
851	int retval;
852	u32 cpuConfig;
853
854	dev = alloc_etherdev(sizeof(struct gt64240_private));
855	if (!dev)
856		return -ENOMEM;
857
858	if (irq < 0) {
859		printk
860		    ("gt64240_probe1: irq unknown - probing not supported\n");
861		return -ENODEV;
862	}
863	if (!request_region(ioaddr, GT64240_ETH_IO_SIZE, "gt64240eth"))
864		printk("*** request_region() failed!\n");
865
866	cpuConfig = GT64240_READ(CPU_CONFIGURATION);
867	printk("gt64240_probe1: cpu in %s-endian mode\n",
868	       (cpuConfig & (1 << 12)) ? "little" : "big");
869
870	printk("%s: GT64240 found at ioaddr 0x%lx, irq %d.\n",
871	       dev->name, ioaddr, irq);
872
873	if (gt64240_debug && version_printed++ == 0)
874		printk("%s: %s", dev->name, version);
875
876	/* private struct aligned and zeroed by init_etherdev */
877	/* Fill in the 'dev' fields. */
878	dev->base_addr = ioaddr;
879	dev->irq = irq;
880	memcpy(dev->dev_addr, gt64240_station_addr[port_num],
881	       sizeof(dev->dev_addr));
882
883	printk("%s: HW Address ", dev->name);
884	dump_hw_addr(dev->dev_addr);
885
886	gp = dev->priv;
887
888	gp->msg_enable = (debug < 0 ? GT64240_MSG_ENABLE : debug);
889	gp->port_num = port_num;
890	gp->io_size = GT64240_ETH_IO_SIZE;
891	gp->port_offset = port_num * GT64240_ETH_IO_SIZE;
892	gp->phy_addr = gt64240_phy_addr[port_num];
893
894	printk("%s: GT64240 ethernet port %d\n", dev->name, gp->port_num);
895
896#ifdef GT64240_NAPI
897	printk("Rx NAPI supported \n");
898#endif
899
900/* MII Initialization */
901	gp->mii_if.dev = dev;
902	gp->mii_if.phy_id = dev->base_addr;
903	gp->mii_if.mdio_read = read_MII;
904	gp->mii_if.mdio_write = write_MII;
905	gp->mii_if.advertising = read_MII(dev, gp->phy_addr, MII_ADVERTISE);
906
907	// Allocate Rx and Tx descriptor rings
908	if (gp->rx_ring == NULL) {
909		// All descriptors in ring must be 16-byte aligned
910		gp->rx_ring = dma_alloc_noncoherent(NULL,
911					sizeof(gt64240_rd_t) * RX_RING_SIZE +
912					sizeof(gt64240_td_t) * TX_RING_SIZE,
913					&gp->rx_ring_dma, GFP_KERNEL);
914		if (gp->rx_ring == NULL) {
915			retval = -ENOMEM;
916			goto free_region;
917		}
918
919		gp->tx_ring = (gt64240_td_t *) (gp->rx_ring + RX_RING_SIZE);
920		gp->tx_ring_dma =
921			gp->rx_ring_dma + sizeof(gt64240_rd_t) * RX_RING_SIZE;
922	}
923	// Allocate the Rx Data Buffers
924	if (gp->rx_buff == NULL) {
925		gp->rx_buff = dma_alloc_coherent(NULL,
926				PKT_BUF_SZ * RX_RING_SIZE, &gp->rx_buff_dma,
927				GFP_KERNEL);
928		if (gp->rx_buff == NULL) {
929			dma_free_noncoherent(NULL,
930				sizeof(gt64240_rd_t) * RX_RING_SIZE +
931				sizeof(gt64240_td_t) * TX_RING_SIZE,
932				gp->rx_ring, gp->rx_ring_dma);
933			retval = -ENOMEM;
934			goto free_region;
935		}
936	}
937
938	if (gt64240_debug > 3)
939		printk("%s: gt64240_probe1, rx_ring=%p, tx_ring=%p\n",
940		       dev->name, gp->rx_ring, gp->tx_ring);
941
942	// Allocate Rx Hash Table
943	if (gp->hash_table == NULL) {
944		gp->hash_table = dma_alloc_coherent(NULL,
945				RX_HASH_TABLE_SIZE, &gp->hash_table_dma,
946				GFP_KERNEL);
947		if (gp->hash_table == NULL) {
948			dma_free_noncoherent(NULL,
949				sizeof(gt64240_rd_t) * RX_RING_SIZE +
950				sizeof(gt64240_td_t) * TX_RING_SIZE,
951				gp->rx_ring, gp->rx_ring_dma);
952			dma_free_noncoherent(NULL, PKT_BUF_SZ * RX_RING_SIZE,
953				gp->rx_buff, gp->rx_buff_dma);
954			retval = -ENOMEM;
955			goto free_region;
956		}
957	}
958
959	if (gt64240_debug > 3)
960		printk("%s: gt64240_probe1, hash=%p\n",
961		       dev->name, gp->hash_table);
962
963	spin_lock_init(&gp->lock);
964
965	dev->open = gt64240_open;
966	dev->hard_start_xmit = gt64240_tx;
967	dev->stop = gt64240_close;
968	dev->get_stats = gt64240_get_stats;
969	dev->do_ioctl = gt64240_ioctl;
970	dev->set_multicast_list = gt64240_set_rx_mode;
971	dev->tx_timeout = gt64240_tx_timeout;
972	dev->watchdog_timeo = GT64240ETH_TX_TIMEOUT;
973
974#ifdef GT64240_NAPI
975	dev->poll = gt64240_poll;
976	dev->weight = 64;
977#endif
978	dev->ethtool_ops = &gp_ethtool_ops;
979
980	/* Fill in the fields of the device structure with ethernet values. */
981	return 0;
982
983free_region:
984	release_region(ioaddr, gp->io_size);
985	unregister_netdev(dev);
986	free_netdev(dev);
987	printk("%s: gt64240_probe1 failed.  Returns %d\n",
988	       dev->name, retval);
989	return retval;
990}
991
992
993static void reset_tx(struct net_device *dev)
994{
995	struct gt64240_private *gp = netdev_priv(dev);
996	int i;
997
998	abort(dev, sdcmrAT);
999
1000	for (i = 0; i < TX_RING_SIZE; i++) {
1001		if (gp->tx_skbuff[i]) {
1002			if (in_interrupt())
1003				dev_kfree_skb_irq(gp->tx_skbuff[i]);
1004			else
1005				dev_kfree_skb(gp->tx_skbuff[i]);
1006			gp->tx_skbuff[i] = NULL;
1007		}
1008//-     gp->tx_ring[i].cmdstat = 0; // CPU owns
1009		gp->tx_ring[i].cmdstat =
1010		    (u32) (txGenCRC | txEI | txPad | txFirst | txLast);
1011		gp->tx_ring[i].byte_cnt = 0;
1012		gp->tx_ring[i].buff_ptr = 0;
1013		gp->tx_ring[i].next =
1014		    gp->tx_ring_dma + sizeof(gt64240_td_t) * (i + 1);
1015		if (gt64240_debug > 4)
1016			dump_tx_desc(dev, i);
1017	}
1018	/* Wrap the ring. */
1019	gp->tx_ring[i - 1].next = gp->tx_ring_dma;
1020	if (gt64240_debug > 4)
1021		dump_tx_desc(dev, i - 1);
1022
1023	// setup only the lowest priority TxCDP reg
1024	GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,
1025			 gp->tx_ring_dma);
1026//- GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0, 0);     /* ROLLINS */
1027//- GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,virt_to_phys(&gp->tx_ring[0]));  /* ROLLINS */
1028
1029	GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR1, 0);
1030
1031	// init Tx indeces and pkt counter
1032	gp->tx_next_in = gp->tx_next_out = 0;
1033	gp->tx_count = 0;
1034}
1035
1036static void reset_rx(struct net_device *dev)
1037{
1038	struct gt64240_private *gp = netdev_priv(dev);
1039	int i;
1040
1041	abort(dev, sdcmrAR);
1042
1043	for (i = 0; i < RX_RING_SIZE; i++) {
1044		gp->rx_ring[i].next =
1045		    gp->rx_ring_dma + sizeof(gt64240_rd_t) * (i + 1);
1046		gp->rx_ring[i].buff_ptr = gp->rx_buff_dma + i * PKT_BUF_SZ;
1047		gp->rx_ring[i].buff_sz = PKT_BUF_SZ;
1048		gp->rx_ring[i].byte_cnt = 0;	/* just for debug printk's */
1049		// Give ownership to device, set first and last, enable interrupt
1050		gp->rx_ring[i].cmdstat =
1051		    (uint32_t) (rxFirst | rxLast | rxOwn | rxEI);
1052		if (gt64240_debug > 4)
1053			dump_rx_desc(dev, i);
1054	}
1055	/* Wrap the ring. */
1056	gp->rx_ring[i - 1].next = gp->rx_ring_dma;
1057	if (gt64240_debug > 4)
1058		dump_rx_desc(dev, i - 1);
1059
1060	// Setup only the lowest priority RxFDP and RxCDP regs
1061	for (i = 0; i < 4; i++) {
1062		if (i == 0) {
1063			GT64240ETH_WRITE(gp, GT64240_ETH_1ST_RX_DESC_PTR0,
1064					 gp->rx_ring_dma);
1065			GT64240ETH_WRITE(gp, GT64240_ETH_CURR_RX_DESC_PTR0,
1066					 gp->rx_ring_dma);
1067		} else {
1068			GT64240ETH_WRITE(gp,
1069					 GT64240_ETH_1ST_RX_DESC_PTR0 +
1070					 i * 4, 0);
1071			GT64240ETH_WRITE(gp,
1072					 GT64240_ETH_CURR_RX_DESC_PTR0 +
1073					 i * 4, 0);
1074		}
1075	}
1076
1077	// init Rx NextOut index
1078	gp->rx_next_out = 0;
1079}
1080
1081
1082static int gt64240_init(struct net_device *dev)
1083{
1084	struct gt64240_private *gp = netdev_priv(dev);
1085
1086	if (gt64240_debug > 3) {
1087		printk("%s: gt64240_init: dev=%p\n", dev->name, dev);
1088		printk("%s: gt64240_init: scs0_lo=%04x, scs0_hi=%04x\n",
1089		       dev->name, GT64240_READ(0x008),
1090		       GT64240_READ(0x010));
1091		printk("%s: gt64240_init: scs1_lo=%04x, scs1_hi=%04x\n",
1092		       dev->name, GT64240_READ(0x208),
1093		       GT64240_READ(0x210));
1094		printk("%s: gt64240_init: scs2_lo=%04x, scs2_hi=%04x\n",
1095		       dev->name, GT64240_READ(0x018),
1096		       GT64240_READ(0x020));
1097		printk("%s: gt64240_init: scs3_lo=%04x, scs3_hi=%04x\n",
1098		       dev->name, GT64240_READ(0x218),
1099		       GT64240_READ(0x220));
1100	}
1101	// Stop and disable Port
1102	hard_stop(dev);
1103
1104	GT64240_WRITE(COMM_UNIT_INTERRUPT_MASK, 0x07070777);	/*+prk21aug01 */
1105	if (gt64240_debug > 2)
1106		printk
1107		    ("%s: gt64240_init: CIU Cause=%08x, Mask=%08x, EAddr=%08x\n",
1108		     dev->name, GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE),
1109		     GT64240_READ(COMM_UNIT_INTERRUPT_MASK),
1110		     GT64240_READ(COMM_UNIT_ERROR_ADDRESS));
1111
1112	// Set-up hash table
1113	memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);	// clear it
1114	gp->hash_mode = 0;
1115	// Add a single entry to hash table - our ethernet address
1116	gt64240_add_hash_entry(dev, dev->dev_addr);
1117	// Set-up DMA ptr to hash table
1118	GT64240ETH_WRITE(gp, GT64240_ETH_HASH_TBL_PTR, gp->hash_table_dma);
1119	if (gt64240_debug > 3)
1120		printk("%s: gt64240_init: Hash Tbl Ptr=%x\n", dev->name,
1121		       GT64240ETH_READ(gp, GT64240_ETH_HASH_TBL_PTR));
1122
1123	// Setup Tx
1124	reset_tx(dev);
1125
1126	if (gt64240_debug > 3)
1127		printk("%s: gt64240_init: Curr Tx Desc Ptr0=%x\n",
1128		       dev->name, GT64240ETH_READ(gp,
1129						  GT64240_ETH_CURR_TX_DESC_PTR0));
1130
1131	// Setup Rx
1132	reset_rx(dev);
1133
1134	if (gt64240_debug > 3)
1135		printk("%s: gt64240_init: 1st/Curr Rx Desc Ptr0=%x/%x\n",
1136		       dev->name, GT64240ETH_READ(gp,
1137						  GT64240_ETH_1ST_RX_DESC_PTR0),
1138		       GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0));
1139
1140	if (gt64240_debug > 3)
1141		dump_MII(dev);
1142
1143	/* force a PHY reset -- self-clearing! */
1144	write_MII(dev, gp->phy_addr, 0, 0x8000);
1145
1146	if (gt64240_debug > 3)
1147		printk("%s: gt64240_init: PhyAD=%x\n", dev->name,
1148		       GT64240_READ(GT64240_ETH_PHY_ADDR_REG));
1149
1150	// setup DMA
1151	// We want the Rx/Tx DMA to write/read data to/from memory in
1152	// Big Endian mode. Also set DMA Burst Size to 8 64Bit words.
1153#ifdef DESC_DATA_BE
1154	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_CONFIG,
1155			 (0xf << sdcrRCBit) | sdcrRIFB | (3 <<
1156							  sdcrBSZBit));
1157#else
1158	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_CONFIG, sdcrBLMR | sdcrBLMT |
1159//-                  (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
1160			 (0xf << sdcrRCBit) | sdcrRIFB | (2 <<
1161							  sdcrBSZBit));
1162#endif
1163
1164	if (gt64240_debug > 3)
1165		printk("%s: gt64240_init: SDMA Config=%x\n", dev->name,
1166		       GT64240ETH_READ(gp, GT64240_ETH_SDMA_CONFIG));
1167
1168
1169	if (gt64240_debug > 3)
1170		printk("%s: gt64240_init: SDMA Cmd =%x\n", dev->name,
1171		       GT64240ETH_READ(gp, GT64240_ETH_SDMA_COMM));
1172
1173	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, PORT_CONFIG);
1174
1175	if (gt64240_debug > 3)
1176		printk("%s: gt64240_init: Port Config=%x\n", dev->name,
1177		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
1178
1179	/*
1180	 * Disable all Type-of-Service queueing. All Rx packets will be
1181	 * treated normally and will be sent to the lowest priority
1182	 * queue.
1183	 *
1184	 * Disable flow-control for now. FIX! support flow control?
1185	 */
1186
1187	// clear all the MIB ctr regs
1188	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG_EXT,
1189			 EXT_CONFIG_CLEAR);
1190	read_mib_counters(gp);
1191	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG_EXT,
1192			 EXT_CONFIG_CLEAR | pcxrMIBclrMode);
1193
1194	if (gt64240_debug > 3)
1195		printk("%s: gt64240_init: Port Config Ext=%x\n", dev->name,
1196		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG_EXT));
1197
1198	if (gt64240_debug > 3)
1199		printk("%s: gt64240_init: Port Command=%x\n", dev->name,
1200		       GT64240ETH_READ(gp, GT64240_ETH_PORT_COMMAND));
1201	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_COMMAND, 0x0);
1202
1203	netif_start_queue(dev);
1204
1205	/* enable the port */
1206	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG,
1207			 (PORT_CONFIG | pcrEN));
1208	if (gt64240_debug > 3)
1209		printk("%s: gt64240_init: Port Config=%x\n", dev->name,
1210		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
1211	// start Rx DMA
1212	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
1213
1214
1215	// enable interrupts
1216	enable_ether_irq(dev);
1217
1218//---    gp->last_psr |= psrLink;   /* KLUDGE ALERT */
1219
1220	// we should now be receiving frames
1221	return 0;
1222}
1223
1224
1225static int gt64240_open(struct net_device *dev)
1226{
1227	int retval;
1228
1229	if (gt64240_debug > 3)
1230		printk("%s: gt64240_open: dev=%p\n", dev->name, dev);
1231
1232	if ((retval = request_irq(dev->irq, &gt64240_interrupt,
1233				  SA_SHIRQ, dev->name, dev))) {
1234		printk("%s: unable to get IRQ %d\n", dev->name, dev->irq);
1235
1236		return retval;
1237	}
1238	// Initialize and startup the GT-64240 ethernet port
1239	if ((retval = gt64240_init(dev))) {
1240		printk("%s: error in gt64240_open\n", dev->name);
1241		free_irq(dev->irq, dev);
1242
1243		return retval;
1244	}
1245
1246	if (gt64240_debug > 3)
1247		printk("%s: gt64240_open: Initialization done.\n",
1248		       dev->name);
1249
1250	return 0;
1251}
1252
1253static int gt64240_close(struct net_device *dev)
1254{
1255	if (gt64240_debug > 3)
1256		printk("%s: gt64240_close: dev=%p\n", dev->name, dev);
1257
1258	// stop the device
1259	if (netif_device_present(dev)) {
1260		netif_stop_queue(dev);
1261		hard_stop(dev);
1262	}
1263
1264	free_irq(dev->irq, dev);
1265
1266	return 0;
1267}
1268
1269#ifdef GT64240_NAPI
1270/*
1271 * Function will release Tx skbs which are now complete
1272 */
1273static void gt64240_tx_fill(struct net_device *dev, u32 status)
1274{
1275	struct gt64240_private *gp = netdev_priv(dev);
1276	int nextOut, cdp;
1277	gt64240_td_t *td;
1278	u32 cmdstat;
1279
1280	cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_TX_DESC_PTR0)
1281	       - gp->tx_ring_dma) / sizeof(gt64240_td_t);
1282
1283	for (nextOut = gp->tx_next_out; nextOut != cdp;
1284	     nextOut = (nextOut + 1) % TX_RING_SIZE) {
1285		if (--gp->intr_work_done == 0)
1286			break;
1287
1288		td = &gp->tx_ring[nextOut];
1289		cmdstat = td->cmdstat;
1290
1291		if (cmdstat & (u32) txOwn)
1292			break;
1293
1294		if (gp->tx_full) {
1295			gp->tx_full = 0;
1296			if (gp->last_psr & psrLink) {
1297				netif_wake_queue(dev);
1298			}
1299		}
1300		// decrement tx ring buffer count
1301		if (gp->tx_count)
1302			gp->tx_count--;
1303
1304		// free the skb
1305		if (gp->tx_skbuff[nextOut]) {
1306			dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
1307			gp->tx_skbuff[nextOut] = NULL;
1308		}
1309	}
1310
1311	gp->tx_next_out = nextOut;
1312
1313	if ((status & icrTxEndLow) && gp->tx_count != 0)
1314		// we must restart the DMA
1315		GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
1316				 sdcmrERD | sdcmrTXDL);
1317}
1318
1319/*
1320 * Main function for NAPI
1321 */
1322static int gt64240_poll(struct net_device *dev, int *budget)
1323{
1324	struct gt64240_private *gp = netdev_priv(dev);
1325	unsigned long flags;
1326	int done = 1, orig_budget, work_done;
1327	u32 status = GT64240ETH_READ(gp, GT64240_ETH_INT_CAUSE);
1328
1329	spin_lock_irqsave(&gp->lock, flags);
1330	gt64240_tx_fill(dev, status);
1331
1332	if (GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0) !=
1333	    gp->rx_next_out) {
1334		orig_budget = *budget;
1335		if (orig_budget > dev->quota)
1336			orig_budget = dev->quota;
1337
1338		work_done = gt64240_rx(dev, status, orig_budget);
1339		*budget -= work_done;
1340		dev->quota -= work_done;
1341		if (work_done >= orig_budget)
1342			done = 0;
1343		if (done) {
1344			__netif_rx_complete(dev);
1345			enable_ether_irq(dev);
1346		}
1347	}
1348
1349	spin_unlock_irqrestore(&gp->lock, flags);
1350
1351	return (done ? 0 : 1);
1352}
1353#endif
1354
1355static int gt64240_tx(struct sk_buff *skb, struct net_device *dev)
1356{
1357	struct gt64240_private *gp = netdev_priv(dev);
1358	unsigned long flags;
1359	int nextIn;
1360
1361	spin_lock_irqsave(&gp->lock, flags);
1362
1363	nextIn = gp->tx_next_in;
1364
1365	if (gt64240_debug > 3) {
1366		printk("%s: gt64240_tx: nextIn=%d.\n", dev->name, nextIn);
1367	}
1368
1369	if (gp->tx_count >= TX_RING_SIZE) {
1370		printk("%s: Tx Ring full, pkt dropped.\n", dev->name);
1371		gp->stats.tx_dropped++;
1372		spin_unlock_irqrestore(&gp->lock, flags);
1373		return 1;
1374	}
1375
1376	if (!(gp->last_psr & psrLink)) {
1377		printk("%s: gt64240_tx: Link down, pkt dropped.\n",
1378		       dev->name);
1379		gp->stats.tx_dropped++;
1380		spin_unlock_irqrestore(&gp->lock, flags);
1381//---   dump_MII(dev);          /* KLUDGE ALERT !!! */
1382		return 1;
1383	}
1384
1385	if (gp->tx_ring[nextIn].cmdstat & txOwn) {
1386		printk
1387		    ("%s: gt64240_tx: device owns descriptor, pkt dropped.\n",
1388		     dev->name);
1389		gp->stats.tx_dropped++;
1390		// stop the queue, so Tx timeout can fix it
1391		netif_stop_queue(dev);
1392		spin_unlock_irqrestore(&gp->lock, flags);
1393		return 1;
1394	}
1395	// Prepare the Descriptor at tx_next_in
1396	gp->tx_skbuff[nextIn] = skb;
1397	gp->tx_ring[nextIn].byte_cnt = skb->len;
1398	gp->tx_ring[nextIn].buff_ptr = virt_to_phys(skb->data);
1399
1400	// make sure packet gets written back to memory
1401	dma_cache_wback_inv((unsigned long) (skb->data), skb->len);
1402	mb();
1403
1404	// Give ownership to device, set first and last desc, enable interrupt
1405	// Setting of ownership bit must be *last*!
1406	gp->tx_ring[nextIn].cmdstat =
1407	    txOwn | txGenCRC | txEI | txPad | txFirst | txLast;
1408
1409	if (gt64240_debug > 5) {
1410		dump_tx_desc(dev, nextIn);
1411	}
1412	// increment tx_next_in with wrap
1413	gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
1414
1415//+prk20aug01:
1416	if (0) {		/* ROLLINS */
1417		GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,
1418				 virt_to_phys(&gp->tx_ring[nextIn]));
1419	}
1420
1421	if (gt64240_debug > 3) {	/*+prk17aug01 */
1422		printk
1423		    ("%s: gt64240_tx: TX_PTR0=0x%08x, EthPortStatus=0x%08x\n",
1424		     dev->name, GT64240ETH_READ(gp,
1425						GT64240_ETH_CURR_TX_DESC_PTR0),
1426		     GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS));
1427	}
1428	// If DMA is stopped, restart
1429	if (!((GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS)) & psrTxLow)) {
1430		GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
1431				 sdcmrERD | sdcmrTXDL);
1432	}
1433
1434	if (gt64240_debug > 3) {	/*+prk17aug01 */
1435		printk
1436		    ("%s: gt64240_tx: TX_PTR0=0x%08x, EthPortStatus=0x%08x\n",
1437		     dev->name, GT64240ETH_READ(gp,
1438						GT64240_ETH_CURR_TX_DESC_PTR0),
1439		     GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS));
1440	}
1441	// increment count and stop queue if full
1442	if (++gp->tx_count >= TX_RING_SIZE) {
1443		gp->tx_full = 1;
1444		netif_stop_queue(dev);
1445	}
1446
1447	dev->trans_start = jiffies;
1448	spin_unlock_irqrestore(&gp->lock, flags);
1449
1450	return 0;
1451}
1452
1453
1454static int
1455#ifdef GT64240_NAPI
1456gt64240_rx(struct net_device *dev, u32 status, int budget)
1457#else
1458gt64240_rx(struct net_device *dev, u32 status)
1459#endif
1460{
1461	struct gt64240_private *gp = netdev_priv(dev);
1462	struct sk_buff *skb;
1463	int pkt_len, nextOut, cdp;
1464	gt64240_rd_t *rd;
1465	u32 cmdstat;
1466
1467	if (gt64240_debug > 3)
1468		printk("%s: gt64240_rx: dev=%p, status=%x\n",
1469		       dev->name, dev, status);
1470
1471	cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0)
1472	       - gp->rx_ring_dma) / sizeof(gt64240_rd_t);
1473
1474	// Continue until we reach the current descriptor pointer
1475	for (nextOut = gp->rx_next_out; nextOut != cdp;
1476	     nextOut = (nextOut + 1) % RX_RING_SIZE) {
1477
1478#ifdef GT64240_NAPI
1479		if (budget <= 0)
1480			break;
1481
1482		budget--;
1483#endif
1484
1485		if (--gp->intr_work_done == 0)
1486			break;
1487
1488		if (gt64240_debug > 4)
1489			dump_rx_desc(dev, nextOut);
1490
1491		rd = &gp->rx_ring[nextOut];
1492		cmdstat = rd->cmdstat;
1493
1494		if (gt64240_debug > 3)
1495			printk("%s: isr: Rx desc cmdstat=%x, nextOut=%d\n",
1496			       dev->name, cmdstat, nextOut);
1497
1498		if (cmdstat & (u32) rxOwn) {
1499			if (gt64240_debug > 2)
1500				printk
1501				    ("%s: gt64240_rx: device owns descriptor!\n",
1502				     dev->name);
1503			// DMA is not finished updating descriptor???
1504			// Leave and come back later to pick-up where we left off.
1505			break;
1506		}
1507		// must be first and last (ie only) buffer of packet
1508		if (!(cmdstat & (u32) rxFirst)
1509		    || !(cmdstat & (u32) rxLast)) {
1510			printk
1511			    ("%s: gt64240_rx: desc not first and last!\n",
1512			     dev->name);
1513			cmdstat |= (u32) rxOwn;
1514			rd->cmdstat = cmdstat;
1515			continue;
1516		}
1517		// Drop this received pkt if there were any errors
1518		if ((cmdstat & (u32) rxErrorSummary)
1519		    || (status & icrRxError)) {
1520			// update the detailed rx error counters that are not covered
1521			// by the MIB counters.
1522			if (cmdstat & (u32) rxOverrun)
1523				gp->stats.rx_fifo_errors++;
1524			cmdstat |= (u32) rxOwn;
1525			rd->cmdstat = cmdstat;
1526			continue;
1527		}
1528
1529		pkt_len = rd->byte_cnt;
1530
1531		/* Create new skb. */
1532//      skb = dev_alloc_skb(pkt_len+2);
1533		skb = dev_alloc_skb(1538);
1534		if (skb == NULL) {
1535			printk("%s: Memory squeeze, dropping packet.\n",
1536			       dev->name);
1537			gp->stats.rx_dropped++;
1538			cmdstat |= (u32) rxOwn;
1539			rd->cmdstat = cmdstat;
1540			continue;
1541		}
1542		skb->dev = dev;
1543		skb_reserve(skb, 2);	/* 16 byte IP header align */
1544		memcpy(skb_put(skb, pkt_len),
1545		       &gp->rx_buff[nextOut * PKT_BUF_SZ], pkt_len);
1546		skb->protocol = eth_type_trans(skb, dev);
1547
1548		/* NIC performed some checksum computation */
1549		skb->ip_summed = CHECKSUM_UNNECESSARY;
1550#ifdef GT64240_NAPI
1551		netif_receive_skb(skb);
1552#else
1553		netif_rx(skb);	/* pass the packet to upper layers */
1554#endif
1555
1556		// now we can release ownership of this desc back to device
1557		cmdstat |= (u32) rxOwn;
1558		rd->cmdstat = cmdstat;
1559
1560		dev->last_rx = jiffies;
1561	}
1562
1563	if (gt64240_debug > 3 && nextOut == gp->rx_next_out)
1564		printk("%s: gt64240_rx: RxCDP did not increment?\n",
1565		       dev->name);
1566
1567	gp->rx_next_out = nextOut;
1568	return 0;
1569}
1570
1571
1572static void gt64240_tx_timeout(struct net_device *dev)
1573{
1574	struct gt64240_private *gp = netdev_priv(dev);
1575	unsigned long flags;
1576
1577	spin_lock_irqsave(&gp->lock, flags);
1578
1579
1580	if (!(gp->last_psr & psrLink)) {
1581		spin_unlock_irqrestore(&gp->lock, flags);
1582	} else {
1583		printk("======------> gt64240_tx_timeout: %d jiffies \n",
1584		       GT64240ETH_TX_TIMEOUT);
1585
1586		disable_ether_irq(dev);
1587		spin_unlock_irqrestore(&gp->lock, flags);
1588		reset_tx(dev);
1589		enable_ether_irq(dev);
1590
1591		netif_wake_queue(dev);
1592	}
1593}
1594
1595
1596static void gt64240_set_rx_mode(struct net_device *dev)
1597{
1598	struct gt64240_private *gp = netdev_priv(dev);
1599	unsigned long flags;
1600	struct dev_mc_list *mcptr;
1601
1602	if (gt64240_debug > 3)
1603		printk("%s: gt64240_set_rx_mode: dev=%p, flags=%x\n",
1604		       dev->name, dev, dev->flags);
1605
1606	// stop the Receiver DMA
1607	abort(dev, sdcmrAR);
1608
1609	spin_lock_irqsave(&gp->lock, flags);
1610
1611	if (dev->flags & IFF_PROMISC)
1612		GT64240ETH_SETBIT(gp, GT64240_ETH_PORT_CONFIG, pcrPM);
1613	else
1614		GT64240ETH_CLRBIT(gp, GT64240_ETH_PORT_CONFIG, pcrPM);
1615/*
1616	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG,
1617		(PORT_CONFIG | pcrPM | pcrEN));
1618*/
1619
1620	memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);	// clear hash table
1621	// Add our ethernet address
1622	gt64240_add_hash_entry(dev, dev->dev_addr);
1623	if (dev->mc_count) {
1624		for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) {
1625			if (gt64240_debug > 2) {
1626				printk("%s: gt64240_set_rx_mode: addr=\n",
1627				       dev->name);
1628				dump_hw_addr(mcptr->dmi_addr);
1629			}
1630			gt64240_add_hash_entry(dev, mcptr->dmi_addr);
1631		}
1632	}
1633
1634	if (gt64240_debug > 3)
1635		printk("%s: gt64240_set_rx: Port Config=%x\n", dev->name,
1636		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
1637
1638	// restart Rx DMA
1639	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
1640
1641	spin_unlock_irqrestore(&gp->lock, flags);
1642}
1643
1644static struct net_device_stats *gt64240_get_stats(struct net_device *dev)
1645{
1646	struct gt64240_private *gp = netdev_priv(dev);
1647	unsigned long flags;
1648
1649	if (gt64240_debug > 3)
1650		printk("%s: gt64240_get_stats: dev=%p\n", dev->name, dev);
1651
1652	if (netif_device_present(dev)) {
1653		spin_lock_irqsave(&gp->lock, flags);
1654		update_stats(gp);
1655		spin_unlock_irqrestore(&gp->lock, flags);
1656	}
1657
1658	return &gp->stats;
1659}
1660