1/*
2 * drivers/net/titan_ge.c - Driver for Titan ethernet ports
3 *
4 * Copyright (C) 2003 PMC-Sierra Inc.
5 * Author : Manish Lachwani (lachwani@pmc-sierra.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
20 */
21
22/*
23 * The MAC unit of the Titan consists of the following:
24 *
25 * -> XDMA Engine to move data to from the memory to the MAC packet FIFO
26 * -> FIFO is where the incoming and outgoing data is placed
27 * -> TRTG is the unit that pulls the data from the FIFO for Tx and pushes
28 *    the data into the FIFO for Rx
29 * -> TMAC is the outgoing MAC interface and RMAC is the incoming.
30 * -> AFX is the address filtering block
31 * -> GMII block to communicate with the PHY
32 *
33 * Rx will look like the following:
34 * GMII --> RMAC --> AFX --> TRTG --> Rx FIFO --> XDMA --> CPU memory
35 *
36 * Tx will look like the following:
37 * CPU memory --> XDMA --> Tx FIFO --> TRTG --> TMAC --> GMII
38 *
39 * The Titan driver has support for the following performance features:
40 * -> Rx side checksumming
41 * -> Jumbo Frames
42 * -> Interrupt Coalscing
43 * -> Rx NAPI
44 * -> SKB Recycling
45 * -> Transmit/Receive descriptors in SRAM
46 * -> Fast routing for IP forwarding
47 */
48
49#include <linux/dma-mapping.h>
50#include <linux/module.h>
51#include <linux/kernel.h>
52#include <linux/sched.h>
53#include <linux/ioport.h>
54#include <linux/interrupt.h>
55#include <linux/slab.h>
56#include <linux/string.h>
57#include <linux/errno.h>
58#include <linux/ip.h>
59#include <linux/init.h>
60#include <linux/in.h>
61#include <linux/platform_device.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/skbuff.h>
65#include <linux/mii.h>
66#include <linux/delay.h>
67#include <linux/skbuff.h>
68#include <linux/prefetch.h>
69
70/* For MII specifc registers, titan_mdio.h should be included */
71#include <net/ip.h>
72
73#include <asm/bitops.h>
74#include <asm/io.h>
75#include <asm/types.h>
76#include <asm/pgtable.h>
77#include <asm/system.h>
78#include <asm/titan_dep.h>
79
80#include "titan_ge.h"
81#include "titan_mdio.h"
82
83/* Static Function Declarations	 */
84static int titan_ge_eth_open(struct net_device *);
85static void titan_ge_eth_stop(struct net_device *);
86static struct net_device_stats *titan_ge_get_stats(struct net_device *);
87static int titan_ge_init_rx_desc_ring(titan_ge_port_info *, int, int,
88				      unsigned long, unsigned long,
89				      unsigned long);
90static int titan_ge_init_tx_desc_ring(titan_ge_port_info *, int,
91				      unsigned long, unsigned long);
92
93static int titan_ge_open(struct net_device *);
94static int titan_ge_start_xmit(struct sk_buff *, struct net_device *);
95static int titan_ge_stop(struct net_device *);
96
97static unsigned long titan_ge_tx_coal(unsigned long, int);
98
99static void titan_ge_port_reset(unsigned int);
100static int titan_ge_free_tx_queue(titan_ge_port_info *);
101static int titan_ge_rx_task(struct net_device *, titan_ge_port_info *);
102static int titan_ge_port_start(struct net_device *, titan_ge_port_info *);
103
104static int titan_ge_return_tx_desc(titan_ge_port_info *, int);
105
106/*
107 * Some configuration for the FIFO and the XDMA channel needs
108 * to be done only once for all the ports. This flag controls
109 * that
110 */
111static unsigned long config_done;
112
113/*
114 * One time out of memory flag
115 */
116static unsigned int oom_flag;
117
118static int titan_ge_poll(struct net_device *netdev, int *budget);
119
120static int titan_ge_receive_queue(struct net_device *, unsigned int);
121
122static struct platform_device *titan_ge_device[3];
123
124/* MAC Address */
125extern unsigned char titan_ge_mac_addr_base[6];
126
127unsigned long titan_ge_base;
128static unsigned long titan_ge_sram;
129
130static char titan_string[] = "titan";
131
132/*
133 * The Titan GE has two alignment requirements:
134 * -> skb->data to be cacheline aligned (32 byte)
135 * -> IP header alignment to 16 bytes
136 *
137 * The latter is not implemented. So, that results in an extra copy on
138 * the Rx. This is a big performance hog. For the former case, the
139 * dev_alloc_skb() has been replaced with titan_ge_alloc_skb(). The size
140 * requested is calculated:
141 *
142 * Ethernet Frame Size : 1518
143 * Ethernet Header     : 14
144 * Future Titan change for IP header alignment : 2
145 *
146 * Hence, we allocate (1518 + 14 + 2+ 64) = 1580 bytes.  For IP header
147 * alignment, we use skb_reserve().
148 */
149
150#define ALIGNED_RX_SKB_ADDR(addr) \
151	((((unsigned long)(addr) + (64UL - 1UL)) \
152	& ~(64UL - 1UL)) - (unsigned long)(addr))
153
154#define titan_ge_alloc_skb(__length, __gfp_flags) \
155({      struct sk_buff *__skb; \
156	__skb = alloc_skb((__length) + 64, (__gfp_flags)); \
157	if(__skb) { \
158		int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
159		if(__offset) \
160			skb_reserve(__skb, __offset); \
161	} \
162	__skb; \
163})
164
165/*
166 * Configure the GMII block of the Titan based on what the PHY tells us
167 */
168static void titan_ge_gmii_config(int port_num)
169{
170	unsigned int reg_data = 0, phy_reg;
171	int err;
172
173	err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
174
175	if (err == TITAN_GE_MDIO_ERROR) {
176		printk(KERN_ERR
177		       "Could not read PHY control register 0x11 \n");
178		printk(KERN_ERR
179			"Setting speed to 1000 Mbps and Duplex to Full \n");
180
181		return;
182	}
183
184	err = titan_ge_mdio_write(port_num, TITAN_GE_MDIO_PHY_IE, 0);
185
186	if (phy_reg & 0x8000) {
187		if (phy_reg & 0x2000) {
188			/* Full Duplex and 1000 Mbps */
189			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
190					(port_num << 12)), 0x201);
191		}  else {
192			/* Half Duplex and 1000 Mbps */
193			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
194					(port_num << 12)), 0x2201);
195			}
196	}
197	if (phy_reg & 0x4000) {
198		if (phy_reg & 0x2000) {
199			/* Full Duplex and 100 Mbps */
200			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
201					(port_num << 12)), 0x100);
202		} else {
203			/* Half Duplex and 100 Mbps */
204			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
205					(port_num << 12)), 0x2100);
206		}
207	}
208	reg_data = TITAN_GE_READ(TITAN_GE_GMII_CONFIG_GENERAL +
209				(port_num << 12));
210	reg_data |= 0x3;
211	TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_GENERAL +
212			(port_num << 12)), reg_data);
213}
214
215/*
216 * Enable the TMAC if it is not
217 */
218static void titan_ge_enable_tx(unsigned int port_num)
219{
220	unsigned long reg_data;
221
222	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12));
223	if (!(reg_data & 0x8000)) {
224		printk("TMAC disabled for port %d!! \n", port_num);
225
226		reg_data |= 0x0001;	/* Enable TMAC */
227		reg_data |= 0x4000;	/* CRC Check Enable */
228		reg_data |= 0x2000;	/* Padding enable */
229		reg_data |= 0x0800;	/* CRC Add enable */
230		reg_data |= 0x0080;	/* PAUSE frame */
231
232		TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
233				(port_num << 12)), reg_data);
234	}
235}
236
237/*
238 * Tx Timeout function
239 */
240static void titan_ge_tx_timeout(struct net_device *netdev)
241{
242	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
243
244	printk(KERN_INFO "%s: TX timeout  ", netdev->name);
245	printk(KERN_INFO "Resetting card \n");
246
247	/* Do the reset outside of interrupt context */
248	schedule_work(&titan_ge_eth->tx_timeout_task);
249}
250
251/*
252 * Update the AFX tables for UC and MC for slice 0 only
253 */
254static void titan_ge_update_afx(titan_ge_port_info * titan_ge_eth)
255{
256	int port = titan_ge_eth->port_num;
257	unsigned int i;
258	volatile unsigned long reg_data = 0;
259	u8 p_addr[6];
260
261	memcpy(p_addr, titan_ge_eth->port_mac_addr, 6);
262
263	/* Set the MAC address here for TMAC and RMAC */
264	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port << 12)),
265		       ((p_addr[5] << 8) | p_addr[4]));
266	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port << 12)),
267		       ((p_addr[3] << 8) | p_addr[2]));
268	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port << 12)),
269		       ((p_addr[1] << 8) | p_addr[0]));
270
271	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port << 12)),
272		       ((p_addr[5] << 8) | p_addr[4]));
273	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port << 12)),
274		       ((p_addr[3] << 8) | p_addr[2]));
275	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port << 12)),
276		       ((p_addr[1] << 8) | p_addr[0]));
277
278	TITAN_GE_WRITE((0x112c | (port << 12)), 0x1);
279	/* Configure the eight address filters */
280	for (i = 0; i < 8; i++) {
281		/* Select each of the eight filters */
282		TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_2 +
283				(port << 12)), i);
284
285		/* Configure the match */
286		reg_data = 0x9;	/* Forward Enable Bit */
287		TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_0 +
288				(port << 12)), reg_data);
289
290		/* Finally, AFX Exact Match Address Registers */
291		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_LOW + (port << 12)),
292			       ((p_addr[1] << 8) | p_addr[0]));
293		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_MID + (port << 12)),
294			       ((p_addr[3] << 8) | p_addr[2]));
295		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_HIGH + (port << 12)),
296			       ((p_addr[5] << 8) | p_addr[4]));
297
298		/* VLAN id set to 0 */
299		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_VID +
300				(port << 12)), 0);
301	}
302}
303
304/*
305 * Actual Routine to reset the adapter when the timeout occurred
306 */
307static void titan_ge_tx_timeout_task(struct net_device *netdev)
308{
309	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
310	int port = titan_ge_eth->port_num;
311
312	printk("Titan GE: Transmit timed out. Resetting ... \n");
313
314	/* Dump debug info */
315	printk(KERN_ERR "TRTG cause : %x \n",
316			TITAN_GE_READ(0x100c + (port << 12)));
317
318	/* Fix this for the other ports */
319	printk(KERN_ERR "FIFO cause : %x \n", TITAN_GE_READ(0x482c));
320	printk(KERN_ERR "IE cause : %x \n", TITAN_GE_READ(0x0040));
321	printk(KERN_ERR "XDMA GDI ERROR : %x \n",
322			TITAN_GE_READ(0x5008 + (port << 8)));
323	printk(KERN_ERR "CHANNEL ERROR: %x \n",
324			TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT
325						+ (port << 8)));
326
327	netif_device_detach(netdev);
328	titan_ge_port_reset(titan_ge_eth->port_num);
329	titan_ge_port_start(netdev, titan_ge_eth);
330	netif_device_attach(netdev);
331}
332
333/*
334 * Change the MTU of the Ethernet Device
335 */
336static int titan_ge_change_mtu(struct net_device *netdev, int new_mtu)
337{
338	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
339	unsigned long flags;
340
341	if ((new_mtu > 9500) || (new_mtu < 64))
342		return -EINVAL;
343
344	spin_lock_irqsave(&titan_ge_eth->lock, flags);
345
346	netdev->mtu = new_mtu;
347
348	/* Now we have to reopen the interface so that SKBs with the new
349	 * size will be allocated */
350
351	if (netif_running(netdev)) {
352		titan_ge_eth_stop(netdev);
353
354		if (titan_ge_eth_open(netdev) != TITAN_OK) {
355			printk(KERN_ERR
356			       "%s: Fatal error on opening device\n",
357			       netdev->name);
358			spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
359			return -1;
360		}
361	}
362
363	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
364	return 0;
365}
366
367/*
368 * Titan Gbe Interrupt Handler. All the three ports send interrupt to one line
369 * only. Once an interrupt is triggered, figure out the port and then check
370 * the channel.
371 */
372static irqreturn_t titan_ge_int_handler(int irq, void *dev_id)
373{
374	struct net_device *netdev = (struct net_device *) dev_id;
375	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
376	unsigned int port_num = titan_ge_eth->port_num;
377	unsigned int reg_data;
378	unsigned int eth_int_cause_error = 0, is;
379	unsigned long eth_int_cause1;
380	int err = 0;
381#ifdef CONFIG_SMP
382	unsigned long eth_int_cause2;
383#endif
384
385	/* Ack the CPU interrupt */
386	switch (port_num) {
387	case 0:
388		is = OCD_READ(RM9000x2_OCD_INTP0STATUS1);
389		OCD_WRITE(RM9000x2_OCD_INTP0CLEAR1, is);
390
391#ifdef CONFIG_SMP
392		is = OCD_READ(RM9000x2_OCD_INTP1STATUS1);
393		OCD_WRITE(RM9000x2_OCD_INTP1CLEAR1, is);
394#endif
395		break;
396
397	case 1:
398		is = OCD_READ(RM9000x2_OCD_INTP0STATUS0);
399		OCD_WRITE(RM9000x2_OCD_INTP0CLEAR0, is);
400
401#ifdef CONFIG_SMP
402		is = OCD_READ(RM9000x2_OCD_INTP1STATUS0);
403		OCD_WRITE(RM9000x2_OCD_INTP1CLEAR0, is);
404#endif
405		break;
406
407	case 2:
408		is = OCD_READ(RM9000x2_OCD_INTP0STATUS4);
409		OCD_WRITE(RM9000x2_OCD_INTP0CLEAR4, is);
410
411#ifdef CONFIG_SMP
412		is = OCD_READ(RM9000x2_OCD_INTP1STATUS4);
413		OCD_WRITE(RM9000x2_OCD_INTP1CLEAR4, is);
414#endif
415	}
416
417	eth_int_cause1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
418#ifdef CONFIG_SMP
419	eth_int_cause2 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_B);
420#endif
421
422	/* Spurious interrupt */
423#ifdef CONFIG_SMP
424	if ( (eth_int_cause1 == 0) && (eth_int_cause2 == 0)) {
425#else
426	if (eth_int_cause1 == 0) {
427#endif
428		eth_int_cause_error = TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT +
429					(port_num << 8));
430
431		if (eth_int_cause_error == 0)
432			return IRQ_NONE;
433	}
434
435	/* Handle Tx first. No need to ack interrupts */
436#ifdef CONFIG_SMP
437	if ( (eth_int_cause1 & 0x20202) ||
438		(eth_int_cause2 & 0x20202) )
439#else
440	if (eth_int_cause1 & 0x20202)
441#endif
442		titan_ge_free_tx_queue(titan_ge_eth);
443
444	/* Handle the Rx next */
445#ifdef CONFIG_SMP
446	if ( (eth_int_cause1 & 0x10101) ||
447		(eth_int_cause2 & 0x10101)) {
448#else
449	if (eth_int_cause1 & 0x10101) {
450#endif
451		if (netif_rx_schedule_prep(netdev)) {
452			unsigned int ack;
453
454			ack = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
455			/* Disable Tx and Rx both */
456			if (port_num == 0)
457				ack &= ~(0x3);
458			if (port_num == 1)
459				ack &= ~(0x300);
460
461			if (port_num == 2)
462				ack &= ~(0x30000);
463
464			/* Interrupts have been disabled */
465			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, ack);
466
467			__netif_rx_schedule(netdev);
468		}
469	}
470
471	/* Handle error interrupts */
472	if (eth_int_cause_error && (eth_int_cause_error != 0x2)) {
473		printk(KERN_ERR
474			"XDMA Channel Error : %x  on port %d\n",
475			eth_int_cause_error, port_num);
476
477		printk(KERN_ERR
478			"XDMA GDI Hardware error : %x  on port %d\n",
479			TITAN_GE_READ(0x5008 + (port_num << 8)), port_num);
480
481		printk(KERN_ERR
482			"XDMA currently has %d Rx descriptors \n",
483			TITAN_GE_READ(0x5048 + (port_num << 8)));
484
485		printk(KERN_ERR
486			"XDMA currently has prefetcted %d Rx descriptors \n",
487			TITAN_GE_READ(0x505c + (port_num << 8)));
488
489		TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT +
490			       (port_num << 8)), eth_int_cause_error);
491	}
492
493	/*
494	 * PHY interrupt to inform abt the changes. Reading the
495	 * PHY Status register will clear the interrupt
496	 */
497	if ((!(eth_int_cause1 & 0x30303)) &&
498		(eth_int_cause_error == 0)) {
499		err =
500		    titan_ge_mdio_read(port_num,
501			       TITAN_GE_MDIO_PHY_IS, &reg_data);
502
503		if (reg_data & 0x0400) {
504			/* Link status change */
505			titan_ge_mdio_read(port_num,
506				   TITAN_GE_MDIO_PHY_STATUS, &reg_data);
507			if (!(reg_data & 0x0400)) {
508				/* Link is down */
509				netif_carrier_off(netdev);
510				netif_stop_queue(netdev);
511			} else {
512				/* Link is up */
513				netif_carrier_on(netdev);
514				netif_wake_queue(netdev);
515
516				/* Enable the queue */
517				titan_ge_enable_tx(port_num);
518			}
519		}
520	}
521
522	return IRQ_HANDLED;
523}
524
525/*
526 * Multicast and Promiscuous mode set. The
527 * set_multi entry point is called whenever the
528 * multicast address list or the network interface
529 * flags are updated.
530 */
531static void titan_ge_set_multi(struct net_device *netdev)
532{
533	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
534	unsigned int port_num = titan_ge_eth->port_num;
535	unsigned long reg_data;
536
537	reg_data = TITAN_GE_READ(TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
538				(port_num << 12));
539
540	if (netdev->flags & IFF_PROMISC) {
541		reg_data |= 0x2;
542	}
543	else if (netdev->flags & IFF_ALLMULTI) {
544		reg_data |= 0x01;
545		reg_data |= 0x400; /* Use the 64-bit Multicast Hash bin */
546	}
547	else {
548		reg_data = 0x2;
549	}
550
551	TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
552			(port_num << 12)), reg_data);
553	if (reg_data & 0x01) {
554		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_LOW +
555				(port_num << 12)), 0xffff);
556		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDLOW +
557				(port_num << 12)), 0xffff);
558		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDHI +
559				(port_num << 12)), 0xffff);
560		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_HI +
561				(port_num << 12)), 0xffff);
562	}
563}
564
565/*
566 * Open the network device
567 */
568static int titan_ge_open(struct net_device *netdev)
569{
570	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
571	unsigned int port_num = titan_ge_eth->port_num;
572	unsigned int irq = TITAN_ETH_PORT_IRQ - port_num;
573	int retval;
574
575	retval = request_irq(irq, titan_ge_int_handler,
576		     SA_INTERRUPT | SA_SAMPLE_RANDOM , netdev->name, netdev);
577
578	if (retval != 0) {
579		printk(KERN_ERR "Cannot assign IRQ number to TITAN GE \n");
580		return -1;
581	}
582
583	netdev->irq = irq;
584	printk(KERN_INFO "Assigned IRQ %d to port %d\n", irq, port_num);
585
586	spin_lock_irq(&(titan_ge_eth->lock));
587
588	if (titan_ge_eth_open(netdev) != TITAN_OK) {
589		spin_unlock_irq(&(titan_ge_eth->lock));
590		printk("%s: Error opening interface \n", netdev->name);
591		free_irq(netdev->irq, netdev);
592		return -EBUSY;
593	}
594
595	spin_unlock_irq(&(titan_ge_eth->lock));
596
597	return 0;
598}
599
600/*
601 * Allocate the SKBs for the Rx ring. Also used
602 * for refilling the queue
603 */
604static int titan_ge_rx_task(struct net_device *netdev,
605				titan_ge_port_info *titan_ge_port)
606{
607	struct device *device = &titan_ge_device[titan_ge_port->port_num]->dev;
608	volatile titan_ge_rx_desc *rx_desc;
609	struct sk_buff *skb;
610	int rx_used_desc;
611	int count = 0;
612
613	while (titan_ge_port->rx_ring_skbs < titan_ge_port->rx_ring_size) {
614
615	/* First try to get the skb from the recycler */
616#ifdef TITAN_GE_JUMBO_FRAMES
617		skb = titan_ge_alloc_skb(TITAN_GE_JUMBO_BUFSIZE, GFP_ATOMIC);
618#else
619		skb = titan_ge_alloc_skb(TITAN_GE_STD_BUFSIZE, GFP_ATOMIC);
620#endif
621		if (unlikely(!skb)) {
622			/* OOM, set the flag */
623			printk("OOM \n");
624			oom_flag = 1;
625			break;
626		}
627		count++;
628		skb->dev = netdev;
629
630		titan_ge_port->rx_ring_skbs++;
631
632		rx_used_desc = titan_ge_port->rx_used_desc_q;
633		rx_desc = &(titan_ge_port->rx_desc_area[rx_used_desc]);
634
635#ifdef TITAN_GE_JUMBO_FRAMES
636		rx_desc->buffer_addr = dma_map_single(device, skb->data,
637				TITAN_GE_JUMBO_BUFSIZE - 2, DMA_FROM_DEVICE);
638#else
639		rx_desc->buffer_addr = dma_map_single(device, skb->data,
640				TITAN_GE_STD_BUFSIZE - 2, DMA_FROM_DEVICE);
641#endif
642
643		titan_ge_port->rx_skb[rx_used_desc] = skb;
644		rx_desc->cmd_sts = TITAN_GE_RX_BUFFER_OWNED;
645
646		titan_ge_port->rx_used_desc_q =
647			(rx_used_desc + 1) % TITAN_GE_RX_QUEUE;
648	}
649
650	return count;
651}
652
653/*
654 * Actual init of the Tital GE port. There is one register for
655 * the channel configuration
656 */
657static void titan_port_init(struct net_device *netdev,
658			    titan_ge_port_info * titan_ge_eth)
659{
660	unsigned long reg_data;
661
662	titan_ge_port_reset(titan_ge_eth->port_num);
663
664	/* First reset the TMAC */
665	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
666	reg_data |= 0x80000000;
667	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
668
669	udelay(30);
670
671	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
672	reg_data &= ~(0xc0000000);
673	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
674
675	/* Now reset the RMAC */
676	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
677	reg_data |= 0x00080000;
678	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
679
680	udelay(30);
681
682	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
683	reg_data &= ~(0x000c0000);
684	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
685}
686
687/*
688 * Start the port. All the hardware specific configuration
689 * for the XDMA, Tx FIFO, Rx FIFO, TMAC, RMAC, TRTG and AFX
690 * go here
691 */
692static int titan_ge_port_start(struct net_device *netdev,
693				titan_ge_port_info * titan_port)
694{
695	volatile unsigned long reg_data, reg_data1;
696	int port_num = titan_port->port_num;
697	int count = 0;
698	unsigned long reg_data_1;
699
700	if (config_done == 0) {
701		reg_data = TITAN_GE_READ(0x0004);
702		reg_data |= 0x100;
703		TITAN_GE_WRITE(0x0004, reg_data);
704
705		reg_data &= ~(0x100);
706		TITAN_GE_WRITE(0x0004, reg_data);
707
708		/* Turn on GMII/MII mode and turn off TBI mode */
709		reg_data = TITAN_GE_READ(TITAN_GE_TSB_CTRL_1);
710		reg_data |= 0x00000700;
711		reg_data &= ~(0x00800000); /* Fencing */
712
713		TITAN_GE_WRITE(0x000c, 0x00001100);
714
715		TITAN_GE_WRITE(TITAN_GE_TSB_CTRL_1, reg_data);
716
717		/* Set the CPU Resource Limit register */
718		TITAN_GE_WRITE(0x00f8, 0x8);
719
720		/* Be conservative when using the BIU buffers */
721		TITAN_GE_WRITE(0x0068, 0x4);
722	}
723
724	titan_port->tx_threshold = 0;
725	titan_port->rx_threshold = 0;
726
727	/* We need to write the descriptors for Tx and Rx */
728	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_TX_DESC + (port_num << 8)),
729		       (unsigned long) titan_port->tx_dma);
730	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_RX_DESC + (port_num << 8)),
731		       (unsigned long) titan_port->rx_dma);
732
733	if (config_done == 0) {
734		/* Step 1:  XDMA config	*/
735		reg_data = TITAN_GE_READ(TITAN_GE_XDMA_CONFIG);
736		reg_data &= ~(0x80000000);      /* clear reset */
737		reg_data |= 0x1 << 29;	/* sparse tx descriptor spacing */
738		reg_data |= 0x1 << 28;	/* sparse rx descriptor spacing */
739		reg_data |= (0x1 << 23) | (0x1 << 24);  /* Descriptor Coherency */
740		reg_data |= (0x1 << 21) | (0x1 << 22);  /* Data Coherency */
741		TITAN_GE_WRITE(TITAN_GE_XDMA_CONFIG, reg_data);
742	}
743
744	/* IR register for the XDMA */
745	reg_data = TITAN_GE_READ(TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8));
746	reg_data |= 0x80068000; /* No Rx_OOD */
747	TITAN_GE_WRITE((TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8)), reg_data);
748
749	/* Start the Tx and Rx XDMA controller */
750	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG + (port_num << 8));
751	reg_data &= 0x4fffffff;     /* Clear tx reset */
752	reg_data &= 0xfff4ffff;     /* Clear rx reset */
753
754#ifdef TITAN_GE_JUMBO_FRAMES
755	reg_data |= 0xa0 | 0x30030000;
756#else
757	reg_data |= 0x40 | 0x20030000;
758#endif
759
760#ifndef CONFIG_SMP
761	reg_data &= ~(0x10);
762	reg_data |= 0x0f; /* All of the packet */
763#endif
764
765	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG + (port_num << 8)), reg_data);
766
767	/* Rx desc count */
768	count = titan_ge_rx_task(netdev, titan_port);
769	TITAN_GE_WRITE((0x5048 + (port_num << 8)), count);
770	count = TITAN_GE_READ(0x5048 + (port_num << 8));
771
772	udelay(30);
773
774	/*
775	 * Step 2:  Configure the SDQPF, i.e. FIFO
776	 */
777	if (config_done == 0) {
778		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
779		reg_data = 0x1;
780		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
781		reg_data &= ~(0x1);
782		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
783		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
784		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
785
786		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
787		reg_data = 0x1;
788		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
789		reg_data &= ~(0x1);
790		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
791		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
792		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
793	}
794	/*
795	 * Enable RX FIFO 0, 4 and 8
796	 */
797	if (port_num == 0) {
798		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_0);
799
800		reg_data |= 0x100000;
801		reg_data |= (0xff << 10);
802
803		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
804		/*
805		 * BAV2,BAV and DAV settings for the Rx FIFO
806		 */
807		reg_data1 = TITAN_GE_READ(0x4844);
808		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
809		TITAN_GE_WRITE(0x4844, reg_data1);
810
811		reg_data &= ~(0x00100000);
812		reg_data |= 0x200000;
813
814		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
815
816		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_0);
817		reg_data |= 0x100000;
818
819		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
820
821		reg_data |= (0xff << 10);
822
823		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
824
825		/*
826		 * BAV2, BAV and DAV settings for the Tx FIFO
827		 */
828		reg_data1 = TITAN_GE_READ(0x4944);
829		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
830
831		TITAN_GE_WRITE(0x4944, reg_data1);
832
833		reg_data &= ~(0x00100000);
834		reg_data |= 0x200000;
835
836		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
837
838	}
839
840	if (port_num == 1) {
841		reg_data = TITAN_GE_READ(0x4870);
842
843		reg_data |= 0x100000;
844		reg_data |= (0xff << 10) | (0xff + 1);
845
846		TITAN_GE_WRITE(0x4870, reg_data);
847		/*
848		 * BAV2,BAV and DAV settings for the Rx FIFO
849		 */
850		reg_data1 = TITAN_GE_READ(0x4874);
851		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
852		TITAN_GE_WRITE(0x4874, reg_data1);
853
854		reg_data &= ~(0x00100000);
855		reg_data |= 0x200000;
856
857		TITAN_GE_WRITE(0x4870, reg_data);
858
859		reg_data = TITAN_GE_READ(0x494c);
860		reg_data |= 0x100000;
861
862		TITAN_GE_WRITE(0x494c, reg_data);
863		reg_data |= (0xff << 10) | (0xff + 1);
864		TITAN_GE_WRITE(0x494c, reg_data);
865
866		/*
867		 * BAV2, BAV and DAV settings for the Tx FIFO
868		 */
869		reg_data1 = TITAN_GE_READ(0x4950);
870		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
871
872		TITAN_GE_WRITE(0x4950, reg_data1);
873
874		reg_data &= ~(0x00100000);
875		reg_data |= 0x200000;
876
877		TITAN_GE_WRITE(0x494c, reg_data);
878	}
879
880	/*
881	 * Titan 1.2 revision does support port #2
882	 */
883	if (port_num == 2) {
884		/*
885		 * Put the descriptors in the SRAM
886		 */
887		reg_data = TITAN_GE_READ(0x48a0);
888
889		reg_data |= 0x100000;
890		reg_data |= (0xff << 10) | (2*(0xff + 1));
891
892		TITAN_GE_WRITE(0x48a0, reg_data);
893		/*
894		 * BAV2,BAV and DAV settings for the Rx FIFO
895		 */
896		reg_data1 = TITAN_GE_READ(0x48a4);
897		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
898		TITAN_GE_WRITE(0x48a4, reg_data1);
899
900		reg_data &= ~(0x00100000);
901		reg_data |= 0x200000;
902
903		TITAN_GE_WRITE(0x48a0, reg_data);
904
905		reg_data = TITAN_GE_READ(0x4958);
906		reg_data |= 0x100000;
907
908		TITAN_GE_WRITE(0x4958, reg_data);
909		reg_data |= (0xff << 10) | (2*(0xff + 1));
910		TITAN_GE_WRITE(0x4958, reg_data);
911
912		/*
913		 * BAV2, BAV and DAV settings for the Tx FIFO
914		 */
915		reg_data1 = TITAN_GE_READ(0x495c);
916		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
917
918		TITAN_GE_WRITE(0x495c, reg_data1);
919
920		reg_data &= ~(0x00100000);
921		reg_data |= 0x200000;
922
923		TITAN_GE_WRITE(0x4958, reg_data);
924	}
925
926	if (port_num == 2) {
927		reg_data = TITAN_GE_READ(0x48a0);
928
929		reg_data |= 0x100000;
930		reg_data |= (0xff << 10) | (2*(0xff + 1));
931
932		TITAN_GE_WRITE(0x48a0, reg_data);
933		/*
934		 * BAV2,BAV and DAV settings for the Rx FIFO
935		 */
936		reg_data1 = TITAN_GE_READ(0x48a4);
937		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
938		TITAN_GE_WRITE(0x48a4, reg_data1);
939
940		reg_data &= ~(0x00100000);
941		reg_data |= 0x200000;
942
943		TITAN_GE_WRITE(0x48a0, reg_data);
944
945		reg_data = TITAN_GE_READ(0x4958);
946		reg_data |= 0x100000;
947
948		TITAN_GE_WRITE(0x4958, reg_data);
949		reg_data |= (0xff << 10) | (2*(0xff + 1));
950		TITAN_GE_WRITE(0x4958, reg_data);
951
952		/*
953		 * BAV2, BAV and DAV settings for the Tx FIFO
954		 */
955		reg_data1 = TITAN_GE_READ(0x495c);
956		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
957
958		TITAN_GE_WRITE(0x495c, reg_data1);
959
960		reg_data &= ~(0x00100000);
961		reg_data |= 0x200000;
962
963		TITAN_GE_WRITE(0x4958, reg_data);
964	}
965
966	/*
967	 * Step 3:  TRTG block enable
968	 */
969	reg_data = TITAN_GE_READ(TITAN_GE_TRTG_CONFIG + (port_num << 12));
970
971	/*
972	 * This is the 1.2 revision of the chip. It has fix for the
973	 * IP header alignment. Now, the IP header begins at an
974	 * aligned address and this wont need an extra copy in the
975	 * driver. This performance drawback existed in the previous
976	 * versions of the silicon
977	 */
978	reg_data_1 = TITAN_GE_READ(0x103c + (port_num << 12));
979	reg_data_1 |= 0x40000000;
980	TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
981
982	reg_data_1 |= 0x04000000;
983	TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
984
985	mdelay(5);
986
987	reg_data_1 &= ~(0x04000000);
988	TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
989
990	mdelay(5);
991
992	reg_data |= 0x0001;
993	TITAN_GE_WRITE((TITAN_GE_TRTG_CONFIG + (port_num << 12)), reg_data);
994
995	/*
996	 * Step 4:  Start the Tx activity
997	 */
998	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_2 + (port_num << 12)), 0xe197);
999#ifdef TITAN_GE_JUMBO_FRAMES
1000	TITAN_GE_WRITE((0x1258 + (port_num << 12)), 0x4000);
1001#endif
1002	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12));
1003	reg_data |= 0x0001;	/* Enable TMAC */
1004	reg_data |= 0x6c70;	/* PAUSE also set */
1005
1006	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + (port_num << 12)), reg_data);
1007
1008	udelay(30);
1009
1010	/* Destination Address drop bit */
1011	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_2 + (port_num << 12));
1012	reg_data |= 0x218;        /* DA_DROP bit and pause */
1013	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_2 + (port_num << 12)), reg_data);
1014
1015	TITAN_GE_WRITE((0x1218 + (port_num << 12)), 0x3);
1016
1017#ifdef TITAN_GE_JUMBO_FRAMES
1018	TITAN_GE_WRITE((0x1208 + (port_num << 12)), 0x4000);
1019#endif
1020	/* Start the Rx activity */
1021	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12));
1022	reg_data |= 0x0001;	/* RMAC Enable */
1023	reg_data |= 0x0010;	/* CRC Check enable */
1024	reg_data |= 0x0040;	/* Min Frame check enable */
1025	reg_data |= 0x4400;	/* Max Frame check enable */
1026
1027	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data);
1028
1029	udelay(30);
1030
1031	/*
1032	 * Enable the Interrupts for Tx and Rx
1033	 */
1034	reg_data1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
1035
1036	if (port_num == 0) {
1037		reg_data1 |= 0x3;
1038#ifdef CONFIG_SMP
1039		TITAN_GE_WRITE(0x0038, 0x003);
1040#else
1041		TITAN_GE_WRITE(0x0038, 0x303);
1042#endif
1043	}
1044
1045	if (port_num == 1) {
1046		reg_data1 |= 0x300;
1047	}
1048
1049	if (port_num == 2)
1050		reg_data1 |= 0x30000;
1051
1052	TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data1);
1053	TITAN_GE_WRITE(0x003c, 0x300);
1054
1055	if (config_done == 0) {
1056		TITAN_GE_WRITE(0x0024, 0x04000024);	/* IRQ vector */
1057		TITAN_GE_WRITE(0x0020, 0x000fb000);	/* INTMSG base */
1058	}
1059
1060	/* Priority */
1061	reg_data = TITAN_GE_READ(0x1038 + (port_num << 12));
1062	reg_data &= ~(0x00f00000);
1063	TITAN_GE_WRITE((0x1038 + (port_num << 12)), reg_data);
1064
1065	/* Step 5:  GMII config */
1066	titan_ge_gmii_config(port_num);
1067
1068	if (config_done == 0) {
1069		TITAN_GE_WRITE(0x1a80, 0);
1070		config_done = 1;
1071	}
1072
1073	return TITAN_OK;
1074}
1075
1076/*
1077 * Function to queue the packet for the Ethernet device
1078 */
1079static void titan_ge_tx_queue(titan_ge_port_info * titan_ge_eth,
1080				struct sk_buff * skb)
1081{
1082	struct device *device = &titan_ge_device[titan_ge_eth->port_num]->dev;
1083	unsigned int curr_desc = titan_ge_eth->tx_curr_desc_q;
1084	volatile titan_ge_tx_desc *tx_curr;
1085	int port_num = titan_ge_eth->port_num;
1086
1087	tx_curr = &(titan_ge_eth->tx_desc_area[curr_desc]);
1088	tx_curr->buffer_addr =
1089		dma_map_single(device, skb->data, skb_headlen(skb),
1090			       DMA_TO_DEVICE);
1091
1092	titan_ge_eth->tx_skb[curr_desc] = (struct sk_buff *) skb;
1093	tx_curr->buffer_len = skb_headlen(skb);
1094
1095	/* Last descriptor enables interrupt and changes ownership */
1096	tx_curr->cmd_sts = 0x1 | (1 << 15) | (1 << 5);
1097
1098	/* Kick the XDMA to start the transfer from memory to the FIFO */
1099	TITAN_GE_WRITE((0x5044 + (port_num << 8)), 0x1);
1100
1101	/* Current descriptor updated */
1102	titan_ge_eth->tx_curr_desc_q = (curr_desc + 1) % TITAN_GE_TX_QUEUE;
1103
1104	/* Prefetch the next descriptor */
1105	prefetch((const void *)
1106		 &titan_ge_eth->tx_desc_area[titan_ge_eth->tx_curr_desc_q]);
1107}
1108
1109/*
1110 * Actually does the open of the Ethernet device
1111 */
1112static int titan_ge_eth_open(struct net_device *netdev)
1113{
1114	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1115	unsigned int port_num = titan_ge_eth->port_num;
1116	struct device *device = &titan_ge_device[port_num]->dev;
1117	unsigned long reg_data;
1118	unsigned int phy_reg;
1119	int err = 0;
1120
1121	/* Stop the Rx activity */
1122	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12));
1123	reg_data &= ~(0x00000001);
1124	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data);
1125
1126	/* Clear the port interrupts */
1127	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT + (port_num << 8)), 0x0);
1128
1129	if (config_done == 0) {
1130		TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0);
1131		TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_B, 0);
1132	}
1133
1134	/* Set the MAC Address */
1135	memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
1136
1137	if (config_done == 0)
1138		titan_port_init(netdev, titan_ge_eth);
1139
1140	titan_ge_update_afx(titan_ge_eth);
1141
1142	/* Allocate the Tx ring now */
1143	titan_ge_eth->tx_ring_skbs = 0;
1144	titan_ge_eth->tx_ring_size = TITAN_GE_TX_QUEUE;
1145
1146	/* Allocate space in the SRAM for the descriptors */
1147	titan_ge_eth->tx_desc_area = (titan_ge_tx_desc *)
1148		(titan_ge_sram + TITAN_TX_RING_BYTES * port_num);
1149	titan_ge_eth->tx_dma = TITAN_SRAM_BASE + TITAN_TX_RING_BYTES * port_num;
1150
1151	if (!titan_ge_eth->tx_desc_area) {
1152		printk(KERN_ERR
1153		       "%s: Cannot allocate Tx Ring (size %d bytes) for port %d\n",
1154		       netdev->name, TITAN_TX_RING_BYTES, port_num);
1155		return -ENOMEM;
1156	}
1157
1158	memset(titan_ge_eth->tx_desc_area, 0, titan_ge_eth->tx_desc_area_size);
1159
1160	/* Now initialize the Tx descriptor ring */
1161	titan_ge_init_tx_desc_ring(titan_ge_eth,
1162				   titan_ge_eth->tx_ring_size,
1163				   (unsigned long) titan_ge_eth->tx_desc_area,
1164				   (unsigned long) titan_ge_eth->tx_dma);
1165
1166	/* Allocate the Rx ring now */
1167	titan_ge_eth->rx_ring_size = TITAN_GE_RX_QUEUE;
1168	titan_ge_eth->rx_ring_skbs = 0;
1169
1170	titan_ge_eth->rx_desc_area =
1171		(titan_ge_rx_desc *)(titan_ge_sram + 0x1000 + TITAN_RX_RING_BYTES * port_num);
1172
1173	titan_ge_eth->rx_dma = TITAN_SRAM_BASE + 0x1000 + TITAN_RX_RING_BYTES * port_num;
1174
1175	if (!titan_ge_eth->rx_desc_area) {
1176		printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
1177		       netdev->name, TITAN_RX_RING_BYTES);
1178
1179		printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
1180		       netdev->name);
1181
1182		dma_free_coherent(device, titan_ge_eth->tx_desc_area_size,
1183				    (void *) titan_ge_eth->tx_desc_area,
1184				    titan_ge_eth->tx_dma);
1185
1186		return -ENOMEM;
1187	}
1188
1189	memset(titan_ge_eth->rx_desc_area, 0, titan_ge_eth->rx_desc_area_size);
1190
1191	/* Now initialize the Rx ring */
1192#ifdef TITAN_GE_JUMBO_FRAMES
1193	if ((titan_ge_init_rx_desc_ring
1194	    (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_JUMBO_BUFSIZE,
1195	     (unsigned long) titan_ge_eth->rx_desc_area, 0,
1196	      (unsigned long) titan_ge_eth->rx_dma)) == 0)
1197#else
1198	if ((titan_ge_init_rx_desc_ring
1199	     (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_STD_BUFSIZE,
1200	      (unsigned long) titan_ge_eth->rx_desc_area, 0,
1201	      (unsigned long) titan_ge_eth->rx_dma)) == 0)
1202#endif
1203		panic("%s: Error initializing RX Ring\n", netdev->name);
1204
1205	/* Fill the Rx ring with the SKBs */
1206	titan_ge_port_start(netdev, titan_ge_eth);
1207
1208	/*
1209	 * Check if Interrupt Coalscing needs to be turned on. The
1210	 * values specified in the register is multiplied by
1211	 * (8 x 64 nanoseconds) to determine when an interrupt should
1212	 * be sent to the CPU.
1213	 */
1214
1215	if (TITAN_GE_TX_COAL) {
1216		titan_ge_eth->tx_int_coal =
1217		    titan_ge_tx_coal(TITAN_GE_TX_COAL, port_num);
1218	}
1219
1220	err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
1221	if (err == TITAN_GE_MDIO_ERROR) {
1222		printk(KERN_ERR
1223		       "Could not read PHY control register 0x11 \n");
1224		return TITAN_ERROR;
1225	}
1226	if (!(phy_reg & 0x0400)) {
1227		netif_carrier_off(netdev);
1228		netif_stop_queue(netdev);
1229		return TITAN_ERROR;
1230	} else {
1231		netif_carrier_on(netdev);
1232		netif_start_queue(netdev);
1233	}
1234
1235	return TITAN_OK;
1236}
1237
1238/*
1239 * Queue the packet for Tx. Currently no support for zero copy,
1240 * checksum offload and Scatter Gather. The chip does support
1241 * Scatter Gather only. But, that wont help here since zero copy
1242 * requires support for Tx checksumming also.
1243 */
1244int titan_ge_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1245{
1246	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1247	unsigned long flags;
1248	struct net_device_stats *stats;
1249//printk("titan_ge_start_xmit\n");
1250
1251	stats = &titan_ge_eth->stats;
1252	spin_lock_irqsave(&titan_ge_eth->lock, flags);
1253
1254	if ((TITAN_GE_TX_QUEUE - titan_ge_eth->tx_ring_skbs) <=
1255	    (skb_shinfo(skb)->nr_frags + 1)) {
1256		netif_stop_queue(netdev);
1257		spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1258		printk(KERN_ERR "Tx OOD \n");
1259		return 1;
1260	}
1261
1262	titan_ge_tx_queue(titan_ge_eth, skb);
1263	titan_ge_eth->tx_ring_skbs++;
1264
1265	if (TITAN_GE_TX_QUEUE <= (titan_ge_eth->tx_ring_skbs + 4)) {
1266		spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1267		titan_ge_free_tx_queue(titan_ge_eth);
1268		spin_lock_irqsave(&titan_ge_eth->lock, flags);
1269	}
1270
1271	stats->tx_bytes += skb->len;
1272	stats->tx_packets++;
1273
1274	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1275
1276	netdev->trans_start = jiffies;
1277
1278	return 0;
1279}
1280
1281/*
1282 * Actually does the Rx. Rx side checksumming supported.
1283 */
1284static int titan_ge_rx(struct net_device *netdev, int port_num,
1285			titan_ge_port_info * titan_ge_port,
1286		       titan_ge_packet * packet)
1287{
1288	int rx_curr_desc, rx_used_desc;
1289	volatile titan_ge_rx_desc *rx_desc;
1290
1291	rx_curr_desc = titan_ge_port->rx_curr_desc_q;
1292	rx_used_desc = titan_ge_port->rx_used_desc_q;
1293
1294	if (((rx_curr_desc + 1) % TITAN_GE_RX_QUEUE) == rx_used_desc)
1295		return TITAN_ERROR;
1296
1297	rx_desc = &(titan_ge_port->rx_desc_area[rx_curr_desc]);
1298
1299	if (rx_desc->cmd_sts & TITAN_GE_RX_BUFFER_OWNED)
1300		return TITAN_ERROR;
1301
1302	packet->skb = titan_ge_port->rx_skb[rx_curr_desc];
1303	packet->len = (rx_desc->cmd_sts & 0x7fff);
1304
1305	/*
1306	 * At this point, we dont know if the checksumming
1307	 * actually helps relieve CPU. So, keep it for
1308	 * port 0 only
1309	 */
1310	packet->checksum = ntohs((rx_desc->buffer & 0xffff0000) >> 16);
1311	packet->cmd_sts = rx_desc->cmd_sts;
1312
1313	titan_ge_port->rx_curr_desc_q = (rx_curr_desc + 1) % TITAN_GE_RX_QUEUE;
1314
1315	/* Prefetch the next descriptor */
1316	prefetch((const void *)
1317	       &titan_ge_port->rx_desc_area[titan_ge_port->rx_curr_desc_q + 1]);
1318
1319	return TITAN_OK;
1320}
1321
1322/*
1323 * Free the Tx queue of the used SKBs
1324 */
1325static int titan_ge_free_tx_queue(titan_ge_port_info *titan_ge_eth)
1326{
1327	unsigned long flags;
1328
1329	/* Take the lock */
1330	spin_lock_irqsave(&(titan_ge_eth->lock), flags);
1331
1332	while (titan_ge_return_tx_desc(titan_ge_eth, titan_ge_eth->port_num) == 0)
1333		if (titan_ge_eth->tx_ring_skbs != 1)
1334			titan_ge_eth->tx_ring_skbs--;
1335
1336	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1337
1338	return TITAN_OK;
1339}
1340
1341/*
1342 * Threshold beyond which we do the cleaning of
1343 * Tx queue and new allocation for the Rx
1344 * queue
1345 */
1346#define	TX_THRESHOLD	4
1347#define	RX_THRESHOLD	10
1348
1349/*
1350 * Receive the packets and send it to the kernel.
1351 */
1352static int titan_ge_receive_queue(struct net_device *netdev, unsigned int max)
1353{
1354	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1355	unsigned int port_num = titan_ge_eth->port_num;
1356	titan_ge_packet packet;
1357	struct net_device_stats *stats;
1358	struct sk_buff *skb;
1359	unsigned long received_packets = 0;
1360	unsigned int ack;
1361
1362	stats = &titan_ge_eth->stats;
1363
1364	while ((--max)
1365	       && (titan_ge_rx(netdev, port_num, titan_ge_eth, &packet) == TITAN_OK)) {
1366		skb = (struct sk_buff *) packet.skb;
1367
1368		titan_ge_eth->rx_ring_skbs--;
1369
1370		if (--titan_ge_eth->rx_work_limit < 0)
1371			break;
1372		received_packets++;
1373
1374		stats->rx_packets++;
1375		stats->rx_bytes += packet.len;
1376
1377		if ((packet.cmd_sts & TITAN_GE_RX_PERR) ||
1378			(packet.cmd_sts & TITAN_GE_RX_OVERFLOW_ERROR) ||
1379			(packet.cmd_sts & TITAN_GE_RX_TRUNC) ||
1380			(packet.cmd_sts & TITAN_GE_RX_CRC_ERROR)) {
1381				stats->rx_dropped++;
1382				dev_kfree_skb_any(skb);
1383
1384				continue;
1385		}
1386		/*
1387		 * Either support fast path or slow path. Decision
1388		 * making can really slow down the performance. The
1389		 * idea is to cut down the number of checks and improve
1390		 * the fastpath.
1391		 */
1392
1393		skb_put(skb, packet.len - 2);
1394
1395		/*
1396		 * Increment data pointer by two since thats where
1397		 * the MAC starts
1398		 */
1399		skb_reserve(skb, 2);
1400		skb->protocol = eth_type_trans(skb, netdev);
1401		netif_receive_skb(skb);
1402
1403		if (titan_ge_eth->rx_threshold > RX_THRESHOLD) {
1404			ack = titan_ge_rx_task(netdev, titan_ge_eth);
1405			TITAN_GE_WRITE((0x5048 + (port_num << 8)), ack);
1406			titan_ge_eth->rx_threshold = 0;
1407		} else
1408			titan_ge_eth->rx_threshold++;
1409
1410		if (titan_ge_eth->tx_threshold > TX_THRESHOLD) {
1411			titan_ge_eth->tx_threshold = 0;
1412			titan_ge_free_tx_queue(titan_ge_eth);
1413		}
1414		else
1415			titan_ge_eth->tx_threshold++;
1416
1417	}
1418	return received_packets;
1419}
1420
1421
1422/*
1423 * Enable the Rx side interrupts
1424 */
1425static void titan_ge_enable_int(unsigned int port_num,
1426			titan_ge_port_info *titan_ge_eth,
1427			struct net_device *netdev)
1428{
1429	unsigned long reg_data = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
1430
1431	if (port_num == 0)
1432		reg_data |= 0x3;
1433	if (port_num == 1)
1434		reg_data |= 0x300;
1435	if (port_num == 2)
1436		reg_data |= 0x30000;
1437
1438	/* Re-enable interrupts */
1439	TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data);
1440}
1441
1442/*
1443 * Main function to handle the polling for Rx side NAPI.
1444 * Receive interrupts have been disabled at this point.
1445 * The poll schedules the transmit followed by receive.
1446 */
1447static int titan_ge_poll(struct net_device *netdev, int *budget)
1448{
1449	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1450	int port_num = titan_ge_eth->port_num;
1451	int work_done = 0;
1452	unsigned long flags, status;
1453
1454	titan_ge_eth->rx_work_limit = *budget;
1455	if (titan_ge_eth->rx_work_limit > netdev->quota)
1456		titan_ge_eth->rx_work_limit = netdev->quota;
1457
1458	do {
1459		/* Do the transmit cleaning work here */
1460		titan_ge_free_tx_queue(titan_ge_eth);
1461
1462		/* Ack the Rx interrupts */
1463		if (port_num == 0)
1464			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x3);
1465		if (port_num == 1)
1466			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x300);
1467		if (port_num == 2)
1468			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x30000);
1469
1470		work_done += titan_ge_receive_queue(netdev, 0);
1471
1472		/* Out of quota and there is work to be done */
1473		if (titan_ge_eth->rx_work_limit < 0)
1474			goto not_done;
1475
1476		/* Receive alloc_skb could lead to OOM */
1477		if (oom_flag == 1) {
1478			oom_flag = 0;
1479			goto oom;
1480		}
1481
1482		status = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
1483	} while (status & 0x30300);
1484
1485	/* If we are here, then no more interrupts to process */
1486	goto done;
1487
1488not_done:
1489	*budget -= work_done;
1490	netdev->quota -= work_done;
1491	return 1;
1492
1493oom:
1494	printk(KERN_ERR "OOM \n");
1495	netif_rx_complete(netdev);
1496	return 0;
1497
1498done:
1499	/*
1500	 * No more packets on the poll list. Turn the interrupts
1501	 * back on and we should be able to catch the new
1502	 * packets in the interrupt handler
1503	 */
1504	if (!work_done)
1505		work_done = 1;
1506
1507	*budget -= work_done;
1508	netdev->quota -= work_done;
1509
1510	spin_lock_irqsave(&titan_ge_eth->lock, flags);
1511
1512	/* Remove us from the poll list */
1513	netif_rx_complete(netdev);
1514
1515	/* Re-enable interrupts */
1516	titan_ge_enable_int(port_num, titan_ge_eth, netdev);
1517
1518	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1519
1520	return 0;
1521}
1522
1523/*
1524 * Close the network device
1525 */
1526int titan_ge_stop(struct net_device *netdev)
1527{
1528	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1529
1530	spin_lock_irq(&(titan_ge_eth->lock));
1531	titan_ge_eth_stop(netdev);
1532	free_irq(netdev->irq, netdev);
1533	spin_unlock_irq(&titan_ge_eth->lock);
1534
1535	return TITAN_OK;
1536}
1537
1538/*
1539 * Free the Tx ring
1540 */
1541static void titan_ge_free_tx_rings(struct net_device *netdev)
1542{
1543	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1544	unsigned int port_num = titan_ge_eth->port_num;
1545	unsigned int curr;
1546	unsigned long reg_data;
1547
1548	/* Stop the Tx DMA */
1549	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
1550				(port_num << 8));
1551	reg_data |= 0xc0000000;
1552	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
1553			(port_num << 8)), reg_data);
1554
1555	/* Disable the TMAC */
1556	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
1557				(port_num << 12));
1558	reg_data &= ~(0x00000001);
1559	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
1560			(port_num << 12)), reg_data);
1561
1562	for (curr = 0;
1563	     (titan_ge_eth->tx_ring_skbs) && (curr < TITAN_GE_TX_QUEUE);
1564	     curr++) {
1565		if (titan_ge_eth->tx_skb[curr]) {
1566			dev_kfree_skb(titan_ge_eth->tx_skb[curr]);
1567			titan_ge_eth->tx_ring_skbs--;
1568		}
1569	}
1570
1571	if (titan_ge_eth->tx_ring_skbs != 0)
1572		printk
1573		    ("%s: Error on Tx descriptor free - could not free %d"
1574		     " descriptors\n", netdev->name,
1575		     titan_ge_eth->tx_ring_skbs);
1576
1577#ifndef TITAN_RX_RING_IN_SRAM
1578	dma_free_coherent(&titan_ge_device[port_num]->dev,
1579			  titan_ge_eth->tx_desc_area_size,
1580			  (void *) titan_ge_eth->tx_desc_area,
1581			  titan_ge_eth->tx_dma);
1582#endif
1583}
1584
1585/*
1586 * Free the Rx ring
1587 */
1588static void titan_ge_free_rx_rings(struct net_device *netdev)
1589{
1590	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1591	unsigned int port_num = titan_ge_eth->port_num;
1592	unsigned int curr;
1593	unsigned long reg_data;
1594
1595	/* Stop the Rx DMA */
1596	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
1597				(port_num << 8));
1598	reg_data |= 0x000c0000;
1599	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
1600			(port_num << 8)), reg_data);
1601
1602	/* Disable the RMAC */
1603	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
1604				(port_num << 12));
1605	reg_data &= ~(0x00000001);
1606	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
1607			(port_num << 12)), reg_data);
1608
1609	for (curr = 0;
1610	     titan_ge_eth->rx_ring_skbs && (curr < TITAN_GE_RX_QUEUE);
1611	     curr++) {
1612		if (titan_ge_eth->rx_skb[curr]) {
1613			dev_kfree_skb(titan_ge_eth->rx_skb[curr]);
1614			titan_ge_eth->rx_ring_skbs--;
1615		}
1616	}
1617
1618	if (titan_ge_eth->rx_ring_skbs != 0)
1619		printk(KERN_ERR
1620		       "%s: Error in freeing Rx Ring. %d skb's still"
1621		       " stuck in RX Ring - ignoring them\n", netdev->name,
1622		       titan_ge_eth->rx_ring_skbs);
1623
1624#ifndef TITAN_RX_RING_IN_SRAM
1625	dma_free_coherent(&titan_ge_device[port_num]->dev,
1626			  titan_ge_eth->rx_desc_area_size,
1627			  (void *) titan_ge_eth->rx_desc_area,
1628			  titan_ge_eth->rx_dma);
1629#endif
1630}
1631
1632/*
1633 * Actually does the stop of the Ethernet device
1634 */
1635static void titan_ge_eth_stop(struct net_device *netdev)
1636{
1637	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1638
1639	netif_stop_queue(netdev);
1640
1641	titan_ge_port_reset(titan_ge_eth->port_num);
1642
1643	titan_ge_free_tx_rings(netdev);
1644	titan_ge_free_rx_rings(netdev);
1645
1646	/* Disable the Tx and Rx Interrupts for all channels */
1647	TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, 0x0);
1648}
1649
1650/*
1651 * Update the MAC address. Note that we have to write the
1652 * address in three station registers, 16 bits each. And this
1653 * has to be done for TMAC and RMAC
1654 */
1655static void titan_ge_update_mac_address(struct net_device *netdev)
1656{
1657	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1658	unsigned int port_num = titan_ge_eth->port_num;
1659	u8 p_addr[6];
1660
1661	memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
1662	memcpy(p_addr, netdev->dev_addr, 6);
1663
1664	/* Update the Address Filtering Match tables */
1665	titan_ge_update_afx(titan_ge_eth);
1666
1667	printk("Station MAC : %d %d %d %d %d %d  \n",
1668		p_addr[5], p_addr[4], p_addr[3],
1669		p_addr[2], p_addr[1], p_addr[0]);
1670
1671	/* Set the MAC address here for TMAC and RMAC */
1672	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port_num << 12)),
1673		       ((p_addr[5] << 8) | p_addr[4]));
1674	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port_num << 12)),
1675		       ((p_addr[3] << 8) | p_addr[2]));
1676	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port_num << 12)),
1677		       ((p_addr[1] << 8) | p_addr[0]));
1678
1679	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port_num << 12)),
1680		       ((p_addr[5] << 8) | p_addr[4]));
1681	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port_num << 12)),
1682		       ((p_addr[3] << 8) | p_addr[2]));
1683	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port_num << 12)),
1684		       ((p_addr[1] << 8) | p_addr[0]));
1685}
1686
1687/*
1688 * Set the MAC address of the Ethernet device
1689 */
1690static int titan_ge_set_mac_address(struct net_device *dev, void *addr)
1691{
1692	titan_ge_port_info *tp = netdev_priv(dev);
1693	struct sockaddr *sa = addr;
1694
1695	memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
1696
1697	spin_lock_irq(&tp->lock);
1698	titan_ge_update_mac_address(dev);
1699	spin_unlock_irq(&tp->lock);
1700
1701	return 0;
1702}
1703
1704/*
1705 * Get the Ethernet device stats
1706 */
1707static struct net_device_stats *titan_ge_get_stats(struct net_device *netdev)
1708{
1709	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
1710
1711	return &titan_ge_eth->stats;
1712}
1713
1714/*
1715 * Initialize the Rx descriptor ring for the Titan Ge
1716 */
1717static int titan_ge_init_rx_desc_ring(titan_ge_port_info * titan_eth_port,
1718				      int rx_desc_num,
1719				      int rx_buff_size,
1720				      unsigned long rx_desc_base_addr,
1721				      unsigned long rx_buff_base_addr,
1722				      unsigned long rx_dma)
1723{
1724	volatile titan_ge_rx_desc *rx_desc;
1725	unsigned long buffer_addr;
1726	int index;
1727	unsigned long titan_ge_rx_desc_bus = rx_dma;
1728
1729	buffer_addr = rx_buff_base_addr;
1730	rx_desc = (titan_ge_rx_desc *) rx_desc_base_addr;
1731
1732	/* Check alignment */
1733	if (rx_buff_base_addr & 0xF)
1734		return 0;
1735
1736	/* Check Rx buffer size */
1737	if ((rx_buff_size < 8) || (rx_buff_size > TITAN_GE_MAX_RX_BUFFER))
1738		return 0;
1739
1740	/* 64-bit alignment
1741	if ((rx_buff_base_addr + rx_buff_size) & 0x7)
1742		return 0; */
1743
1744	/* Initialize the Rx desc ring */
1745	for (index = 0; index < rx_desc_num; index++) {
1746		titan_ge_rx_desc_bus += sizeof(titan_ge_rx_desc);
1747		rx_desc[index].cmd_sts = 0;
1748		rx_desc[index].buffer_addr = buffer_addr;
1749		titan_eth_port->rx_skb[index] = NULL;
1750		buffer_addr += rx_buff_size;
1751	}
1752
1753	titan_eth_port->rx_curr_desc_q = 0;
1754	titan_eth_port->rx_used_desc_q = 0;
1755
1756	titan_eth_port->rx_desc_area = (titan_ge_rx_desc *) rx_desc_base_addr;
1757	titan_eth_port->rx_desc_area_size =
1758	    rx_desc_num * sizeof(titan_ge_rx_desc);
1759
1760	titan_eth_port->rx_dma = rx_dma;
1761
1762	return TITAN_OK;
1763}
1764
1765/*
1766 * Initialize the Tx descriptor ring. Descriptors in the SRAM
1767 */
1768static int titan_ge_init_tx_desc_ring(titan_ge_port_info * titan_ge_port,
1769				      int tx_desc_num,
1770				      unsigned long tx_desc_base_addr,
1771				      unsigned long tx_dma)
1772{
1773	titan_ge_tx_desc *tx_desc;
1774	int index;
1775	unsigned long titan_ge_tx_desc_bus = tx_dma;
1776
1777	if (tx_desc_base_addr & 0xF)
1778		return 0;
1779
1780	tx_desc = (titan_ge_tx_desc *) tx_desc_base_addr;
1781
1782	for (index = 0; index < tx_desc_num; index++) {
1783		titan_ge_port->tx_dma_array[index] =
1784		    (dma_addr_t) titan_ge_tx_desc_bus;
1785		titan_ge_tx_desc_bus += sizeof(titan_ge_tx_desc);
1786		tx_desc[index].cmd_sts = 0x0000;
1787		tx_desc[index].buffer_len = 0;
1788		tx_desc[index].buffer_addr = 0x00000000;
1789		titan_ge_port->tx_skb[index] = NULL;
1790	}
1791
1792	titan_ge_port->tx_curr_desc_q = 0;
1793	titan_ge_port->tx_used_desc_q = 0;
1794
1795	titan_ge_port->tx_desc_area = (titan_ge_tx_desc *) tx_desc_base_addr;
1796	titan_ge_port->tx_desc_area_size =
1797	    tx_desc_num * sizeof(titan_ge_tx_desc);
1798
1799	titan_ge_port->tx_dma = tx_dma;
1800	return TITAN_OK;
1801}
1802
1803/*
1804 * Initialize the device as an Ethernet device
1805 */
1806static int __init titan_ge_probe(struct device *device)
1807{
1808	titan_ge_port_info *titan_ge_eth;
1809	struct net_device *netdev;
1810	int port = to_platform_device(device)->id;
1811	int err;
1812
1813	netdev = alloc_etherdev(sizeof(titan_ge_port_info));
1814	if (!netdev) {
1815		err = -ENODEV;
1816		goto out;
1817	}
1818
1819	netdev->open = titan_ge_open;
1820	netdev->stop = titan_ge_stop;
1821	netdev->hard_start_xmit = titan_ge_start_xmit;
1822	netdev->get_stats = titan_ge_get_stats;
1823	netdev->set_multicast_list = titan_ge_set_multi;
1824	netdev->set_mac_address = titan_ge_set_mac_address;
1825
1826	/* Tx timeout */
1827	netdev->tx_timeout = titan_ge_tx_timeout;
1828	netdev->watchdog_timeo = 2 * HZ;
1829
1830	/* Set these to very high values */
1831	netdev->poll = titan_ge_poll;
1832	netdev->weight = 64;
1833
1834	netdev->tx_queue_len = TITAN_GE_TX_QUEUE;
1835	netif_carrier_off(netdev);
1836	netdev->base_addr = 0;
1837
1838	netdev->change_mtu = titan_ge_change_mtu;
1839
1840	titan_ge_eth = netdev_priv(netdev);
1841	/* Allocation of memory for the driver structures */
1842
1843	titan_ge_eth->port_num = port;
1844
1845	/* Configure the Tx timeout handler */
1846	INIT_WORK(&titan_ge_eth->tx_timeout_task,
1847		  (void (*)(void *)) titan_ge_tx_timeout_task, netdev);
1848
1849	spin_lock_init(&titan_ge_eth->lock);
1850
1851	/* set MAC addresses */
1852	memcpy(netdev->dev_addr, titan_ge_mac_addr_base, 6);
1853	netdev->dev_addr[5] += port;
1854
1855	err = register_netdev(netdev);
1856
1857	if (err)
1858		goto out_free_netdev;
1859
1860	printk(KERN_NOTICE
1861	       "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1862	       netdev->name, port, netdev->dev_addr[0],
1863	       netdev->dev_addr[1], netdev->dev_addr[2],
1864	       netdev->dev_addr[3], netdev->dev_addr[4],
1865	       netdev->dev_addr[5]);
1866
1867	printk(KERN_NOTICE "Rx NAPI supported, Tx Coalescing ON \n");
1868
1869	return 0;
1870
1871out_free_netdev:
1872	kfree(netdev);
1873
1874out:
1875	return err;
1876}
1877
1878static void __devexit titan_device_remove(struct device *device)
1879{
1880}
1881
1882/*
1883 * Reset the Ethernet port
1884 */
1885static void titan_ge_port_reset(unsigned int port_num)
1886{
1887	unsigned int reg_data;
1888
1889	/* Stop the Tx port activity */
1890	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
1891				(port_num << 12));
1892	reg_data &= ~(0x0001);
1893	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
1894			(port_num << 12)), reg_data);
1895
1896	/* Stop the Rx port activity */
1897	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
1898				(port_num << 12));
1899	reg_data &= ~(0x0001);
1900	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
1901			(port_num << 12)), reg_data);
1902
1903	return;
1904}
1905
1906/*
1907 * Return the Tx desc after use by the XDMA
1908 */
1909static int titan_ge_return_tx_desc(titan_ge_port_info * titan_ge_eth, int port)
1910{
1911	int tx_desc_used;
1912	struct sk_buff *skb;
1913
1914	tx_desc_used = titan_ge_eth->tx_used_desc_q;
1915
1916	/* return right away */
1917	if (tx_desc_used == titan_ge_eth->tx_curr_desc_q)
1918		return TITAN_ERROR;
1919
1920	/* Now the critical stuff */
1921	skb = titan_ge_eth->tx_skb[tx_desc_used];
1922
1923	dev_kfree_skb_any(skb);
1924
1925	titan_ge_eth->tx_skb[tx_desc_used] = NULL;
1926	titan_ge_eth->tx_used_desc_q =
1927	    (tx_desc_used + 1) % TITAN_GE_TX_QUEUE;
1928
1929	return 0;
1930}
1931
1932/*
1933 * Coalescing for the Tx path
1934 */
1935static unsigned long titan_ge_tx_coal(unsigned long delay, int port)
1936{
1937	unsigned long rx_delay;
1938
1939	rx_delay = TITAN_GE_READ(TITAN_GE_INT_COALESCING);
1940	delay = (delay << 16) | rx_delay;
1941
1942	TITAN_GE_WRITE(TITAN_GE_INT_COALESCING, delay);
1943	TITAN_GE_WRITE(0x5038, delay);
1944
1945	return delay;
1946}
1947
1948static struct device_driver titan_soc_driver = {
1949	.name   = titan_string,
1950	.bus    = &platform_bus_type,
1951	.probe  = titan_ge_probe,
1952	.remove = __devexit_p(titan_device_remove),
1953};
1954
1955static void titan_platform_release (struct device *device)
1956{
1957	struct platform_device *pldev;
1958
1959	/* free device */
1960	pldev = to_platform_device (device);
1961	kfree (pldev);
1962}
1963
1964/*
1965 * Register the Titan GE with the kernel
1966 */
1967static int __init titan_ge_init_module(void)
1968{
1969	struct platform_device *pldev;
1970	unsigned int version, device;
1971	int i;
1972
1973	printk(KERN_NOTICE
1974	       "PMC-Sierra TITAN 10/100/1000 Ethernet Driver \n");
1975
1976	titan_ge_base = (unsigned long) ioremap(TITAN_GE_BASE, TITAN_GE_SIZE);
1977	if (!titan_ge_base) {
1978		printk("Mapping Titan GE failed\n");
1979		goto out;
1980	}
1981
1982	device = TITAN_GE_READ(TITAN_GE_DEVICE_ID);
1983	version = (device & 0x000f0000) >> 16;
1984	device &= 0x0000ffff;
1985
1986	printk(KERN_NOTICE "Device Id : %x,  Version : %x \n", device, version);
1987
1988#ifdef TITAN_RX_RING_IN_SRAM
1989	titan_ge_sram = (unsigned long) ioremap(TITAN_SRAM_BASE,
1990						TITAN_SRAM_SIZE);
1991	if (!titan_ge_sram) {
1992		printk("Mapping Titan SRAM failed\n");
1993		goto out_unmap_ge;
1994	}
1995#endif
1996
1997	if (driver_register(&titan_soc_driver)) {
1998		printk(KERN_ERR "Driver registration failed\n");
1999		goto out_unmap_sram;
2000	}
2001
2002	for (i = 0; i < 3; i++) {
2003		titan_ge_device[i] = NULL;
2004
2005	        if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL)))
2006	                continue;
2007
2008                memset (pldev, 0, sizeof (*pldev));
2009                pldev->name		= titan_string;
2010                pldev->id		= i;
2011                pldev->dev.release	= titan_platform_release;
2012                titan_ge_device[i]	= pldev;
2013
2014                if (platform_device_register (pldev)) {
2015                        kfree (pldev);
2016                        titan_ge_device[i] = NULL;
2017                        continue;
2018                }
2019
2020                if (!pldev->dev.driver) {
2021	                /*
2022			 * The driver was not bound to this device, there was
2023	                 * no hardware at this address. Unregister it, as the
2024	                 * release fuction will take care of freeing the
2025	                 * allocated structure
2026			 */
2027                        titan_ge_device[i] = NULL;
2028                        platform_device_unregister (pldev);
2029                }
2030        }
2031
2032	return 0;
2033
2034out_unmap_sram:
2035	iounmap((void *)titan_ge_sram);
2036
2037out_unmap_ge:
2038	iounmap((void *)titan_ge_base);
2039
2040out:
2041	return -ENOMEM;
2042}
2043
2044/*
2045 * Unregister the Titan GE from the kernel
2046 */
2047static void __exit titan_ge_cleanup_module(void)
2048{
2049	int i;
2050
2051	driver_unregister(&titan_soc_driver);
2052
2053	for (i = 0; i < 3; i++) {
2054		if (titan_ge_device[i]) {
2055			platform_device_unregister (titan_ge_device[i]);
2056			titan_ge_device[i] = NULL;
2057		}
2058	}
2059
2060	iounmap((void *)titan_ge_sram);
2061	iounmap((void *)titan_ge_base);
2062}
2063
2064MODULE_AUTHOR("Manish Lachwani <lachwani@pmc-sierra.com>");
2065MODULE_DESCRIPTION("Titan GE Ethernet driver");
2066MODULE_LICENSE("GPL");
2067
2068module_init(titan_ge_init_module);
2069module_exit(titan_ge_cleanup_module);
2070