1/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 *  by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
8 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
9 *
10 * Released under the GPL
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/ptrace.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/bitops.h>
31#include <linux/dma-mapping.h>
32
33#include <asm/8xx_immap.h>
34#include <asm/pgtable.h>
35#include <asm/mpc8xx.h>
36#include <asm/irq.h>
37#include <asm/uaccess.h>
38#include <asm/commproc.h>
39
40#include "fec_8xx.h"
41
42/*************************************************/
43
44#define FEC_MAX_MULTICAST_ADDRS	64
45
46/*************************************************/
47
48static char version[] __devinitdata =
49    DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
50
51MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
52MODULE_DESCRIPTION("Motorola 8xx FEC ethernet driver");
53MODULE_LICENSE("GPL");
54
55int fec_8xx_debug = -1;		/* -1 == use FEC_8XX_DEF_MSG_ENABLE as value */
56module_param(fec_8xx_debug, int, 0);
57MODULE_PARM_DESC(fec_8xx_debug,
58		 "FEC 8xx bitmapped debugging message enable value");
59
60
61/*************************************************/
62
63/*
64 * Delay to wait for FEC reset command to complete (in us)
65 */
66#define FEC_RESET_DELAY		50
67
68/*****************************************************************************************/
69
70static void fec_whack_reset(fec_t * fecp)
71{
72	int i;
73
74	/*
75	 * Whack a reset.  We should wait for this.
76	 */
77	FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
78	for (i = 0;
79	     (FR(fecp, ecntrl) & FEC_ECNTRL_RESET) != 0 && i < FEC_RESET_DELAY;
80	     i++)
81		udelay(1);
82
83	if (i == FEC_RESET_DELAY)
84		printk(KERN_WARNING "FEC Reset timeout!\n");
85
86}
87
88/****************************************************************************/
89
90/*
91 * Transmitter timeout.
92 */
93#define TX_TIMEOUT (2*HZ)
94
95/****************************************************************************/
96
97/*
98 * Returns the CRC needed when filling in the hash table for
99 * multicast group filtering
100 * pAddr must point to a MAC address (6 bytes)
101 */
102static __u32 fec_mulicast_calc_crc(char *pAddr)
103{
104	u8 byte;
105	int byte_count;
106	int bit_count;
107	__u32 crc = 0xffffffff;
108	u8 msb;
109
110	for (byte_count = 0; byte_count < 6; byte_count++) {
111		byte = pAddr[byte_count];
112		for (bit_count = 0; bit_count < 8; bit_count++) {
113			msb = crc >> 31;
114			crc <<= 1;
115			if (msb ^ (byte & 0x1)) {
116				crc ^= FEC_CRC_POLY;
117			}
118			byte >>= 1;
119		}
120	}
121	return (crc);
122}
123
124/*
125 * Set or clear the multicast filter for this adaptor.
126 * Skeleton taken from sunlance driver.
127 * The CPM Ethernet implementation allows Multicast as well as individual
128 * MAC address filtering.  Some of the drivers check to make sure it is
129 * a group multicast address, and discard those that are not.  I guess I
130 * will do the same for now, but just remove the test if you want
131 * individual filtering as well (do the upper net layers want or support
132 * this kind of feature?).
133 */
134static void fec_set_multicast_list(struct net_device *dev)
135{
136	struct fec_enet_private *fep = netdev_priv(dev);
137	fec_t *fecp = fep->fecp;
138	struct dev_mc_list *pmc;
139	__u32 crc;
140	int temp;
141	__u32 csrVal;
142	int hash_index;
143	__u32 hthi, htlo;
144	unsigned long flags;
145
146
147	if ((dev->flags & IFF_PROMISC) != 0) {
148
149		spin_lock_irqsave(&fep->lock, flags);
150		FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
151		spin_unlock_irqrestore(&fep->lock, flags);
152
153		/*
154		 * Log any net taps.
155		 */
156		printk(KERN_WARNING DRV_MODULE_NAME
157		       ": %s: Promiscuous mode enabled.\n", dev->name);
158		return;
159
160	}
161
162	if ((dev->flags & IFF_ALLMULTI) != 0 ||
163	    dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
164		/*
165		 * Catch all multicast addresses, set the filter to all 1's.
166		 */
167		hthi = 0xffffffffU;
168		htlo = 0xffffffffU;
169	} else {
170		hthi = 0;
171		htlo = 0;
172
173		/*
174		 * Now populate the hash table
175		 */
176		for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) {
177			crc = fec_mulicast_calc_crc(pmc->dmi_addr);
178			temp = (crc & 0x3f) >> 1;
179			hash_index = ((temp & 0x01) << 4) |
180				     ((temp & 0x02) << 2) |
181				     ((temp & 0x04)) |
182				     ((temp & 0x08) >> 2) |
183				     ((temp & 0x10) >> 4);
184			csrVal = (1 << hash_index);
185			if (crc & 1)
186				hthi |= csrVal;
187			else
188				htlo |= csrVal;
189		}
190	}
191
192	spin_lock_irqsave(&fep->lock, flags);
193	FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
194	FW(fecp, hash_table_high, hthi);
195	FW(fecp, hash_table_low, htlo);
196	spin_unlock_irqrestore(&fep->lock, flags);
197}
198
199static int fec_set_mac_address(struct net_device *dev, void *addr)
200{
201	struct sockaddr *mac = addr;
202	struct fec_enet_private *fep = netdev_priv(dev);
203	struct fec *fecp = fep->fecp;
204	int i;
205	__u32 addrhi, addrlo;
206	unsigned long flags;
207
208	/* Get pointer to SCC area in parameter RAM. */
209	for (i = 0; i < 6; i++)
210		dev->dev_addr[i] = mac->sa_data[i];
211
212	/*
213	 * Set station address.
214	 */
215	addrhi = ((__u32) dev->dev_addr[0] << 24) |
216		 ((__u32) dev->dev_addr[1] << 16) |
217	   	 ((__u32) dev->dev_addr[2] <<  8) |
218	    	  (__u32) dev->dev_addr[3];
219	addrlo = ((__u32) dev->dev_addr[4] << 24) |
220	    	 ((__u32) dev->dev_addr[5] << 16);
221
222	spin_lock_irqsave(&fep->lock, flags);
223	FW(fecp, addr_low, addrhi);
224	FW(fecp, addr_high, addrlo);
225	spin_unlock_irqrestore(&fep->lock, flags);
226
227	return 0;
228}
229
230/*
231 * This function is called to start or restart the FEC during a link
232 * change.  This only happens when switching between half and full
233 * duplex.
234 */
235void fec_restart(struct net_device *dev, int duplex, int speed)
236{
237#ifdef CONFIG_DUET
238	immap_t *immap = (immap_t *) IMAP_ADDR;
239	__u32 cptr;
240#endif
241	struct fec_enet_private *fep = netdev_priv(dev);
242	struct fec *fecp = fep->fecp;
243	const struct fec_platform_info *fpi = fep->fpi;
244	cbd_t *bdp;
245	struct sk_buff *skb;
246	int i;
247	__u32 addrhi, addrlo;
248
249	fec_whack_reset(fep->fecp);
250
251	/*
252	 * Set station address.
253	 */
254	addrhi = ((__u32) dev->dev_addr[0] << 24) |
255		 ((__u32) dev->dev_addr[1] << 16) |
256		 ((__u32) dev->dev_addr[2] <<  8) |
257		 (__u32) dev->dev_addr[3];
258	addrlo = ((__u32) dev->dev_addr[4] << 24) |
259		 ((__u32) dev->dev_addr[5] << 16);
260	FW(fecp, addr_low, addrhi);
261	FW(fecp, addr_high, addrlo);
262
263	/*
264	 * Reset all multicast.
265	 */
266	FW(fecp, hash_table_high, 0);
267	FW(fecp, hash_table_low, 0);
268
269	/*
270	 * Set maximum receive buffer size.
271	 */
272	FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
273	FW(fecp, r_hash, PKT_MAXBUF_SIZE);
274
275	/*
276	 * Set receive and transmit descriptor base.
277	 */
278	FW(fecp, r_des_start, iopa((__u32) (fep->rx_bd_base)));
279	FW(fecp, x_des_start, iopa((__u32) (fep->tx_bd_base)));
280
281	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
282	fep->tx_free = fep->tx_ring;
283	fep->cur_rx = fep->rx_bd_base;
284
285	/*
286	 * Reset SKB receive buffers
287	 */
288	for (i = 0; i < fep->rx_ring; i++) {
289		if ((skb = fep->rx_skbuff[i]) == NULL)
290			continue;
291		fep->rx_skbuff[i] = NULL;
292		dev_kfree_skb(skb);
293	}
294
295	/*
296	 * Initialize the receive buffer descriptors.
297	 */
298	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
299		skb = dev_alloc_skb(ENET_RX_FRSIZE);
300		if (skb == NULL) {
301			printk(KERN_WARNING DRV_MODULE_NAME
302			       ": %s Memory squeeze, unable to allocate skb\n",
303			       dev->name);
304			fep->stats.rx_dropped++;
305			break;
306		}
307		fep->rx_skbuff[i] = skb;
308		skb->dev = dev;
309		CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
310					 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
311					 DMA_FROM_DEVICE));
312		CBDW_DATLEN(bdp, 0);	/* zero */
313		CBDW_SC(bdp, BD_ENET_RX_EMPTY |
314			((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
315	}
316	/*
317	 * if we failed, fillup remainder
318	 */
319	for (; i < fep->rx_ring; i++, bdp++) {
320		fep->rx_skbuff[i] = NULL;
321		CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
322	}
323
324	/*
325	 * Reset SKB transmit buffers.
326	 */
327	for (i = 0; i < fep->tx_ring; i++) {
328		if ((skb = fep->tx_skbuff[i]) == NULL)
329			continue;
330		fep->tx_skbuff[i] = NULL;
331		dev_kfree_skb(skb);
332	}
333
334	/*
335	 * ...and the same for transmit.
336	 */
337	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
338		fep->tx_skbuff[i] = NULL;
339		CBDW_BUFADDR(bdp, virt_to_bus(NULL));
340		CBDW_DATLEN(bdp, 0);
341		CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
342	}
343
344	/*
345	 * Enable big endian and don't care about SDMA FC.
346	 */
347	FW(fecp, fun_code, 0x78000000);
348
349	/*
350	 * Set MII speed.
351	 */
352	FW(fecp, mii_speed, fep->fec_phy_speed);
353
354	/*
355	 * Clear any outstanding interrupt.
356	 */
357	FW(fecp, ievent, 0xffc0);
358	FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
359
360	/*
361	 * adjust to speed (only for DUET & RMII)
362	 */
363#ifdef CONFIG_DUET
364	cptr = in_be32(&immap->im_cpm.cp_cptr);
365	switch (fpi->fec_no) {
366	case 0:
367		/*
368		 * check if in RMII mode
369		 */
370		if ((cptr & 0x100) == 0)
371			break;
372
373		if (speed == 10)
374			cptr |= 0x0000010;
375		else if (speed == 100)
376			cptr &= ~0x0000010;
377		break;
378	case 1:
379		/*
380		 * check if in RMII mode
381		 */
382		if ((cptr & 0x80) == 0)
383			break;
384
385		if (speed == 10)
386			cptr |= 0x0000008;
387		else if (speed == 100)
388			cptr &= ~0x0000008;
389		break;
390	default:
391		break;
392	}
393	out_be32(&immap->im_cpm.cp_cptr, cptr);
394#endif
395
396	FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);	/* MII enable */
397	/*
398	 * adjust to duplex mode
399	 */
400	if (duplex) {
401		FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
402		FS(fecp, x_cntrl, FEC_TCNTRL_FDEN);	/* FD enable */
403	} else {
404		FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
405		FC(fecp, x_cntrl, FEC_TCNTRL_FDEN);	/* FD disable */
406	}
407
408	/*
409	 * Enable interrupts we wish to service.
410	 */
411	FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
412	   FEC_ENET_RXF | FEC_ENET_RXB);
413
414	/*
415	 * And last, enable the transmit and receive processing.
416	 */
417	FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
418	FW(fecp, r_des_active, 0x01000000);
419}
420
421void fec_stop(struct net_device *dev)
422{
423	struct fec_enet_private *fep = netdev_priv(dev);
424	fec_t *fecp = fep->fecp;
425	struct sk_buff *skb;
426	int i;
427
428	if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
429		return;		/* already down */
430
431	FW(fecp, x_cntrl, 0x01);	/* Graceful transmit stop */
432	for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
433	     i < FEC_RESET_DELAY; i++)
434		udelay(1);
435
436	if (i == FEC_RESET_DELAY)
437		printk(KERN_WARNING DRV_MODULE_NAME
438		       ": %s FEC timeout on graceful transmit stop\n",
439		       dev->name);
440	/*
441	 * Disable FEC. Let only MII interrupts.
442	 */
443	FW(fecp, imask, 0);
444	FW(fecp, ecntrl, ~FEC_ECNTRL_ETHER_EN);
445
446	/*
447	 * Reset SKB transmit buffers.
448	 */
449	for (i = 0; i < fep->tx_ring; i++) {
450		if ((skb = fep->tx_skbuff[i]) == NULL)
451			continue;
452		fep->tx_skbuff[i] = NULL;
453		dev_kfree_skb(skb);
454	}
455
456	/*
457	 * Reset SKB receive buffers
458	 */
459	for (i = 0; i < fep->rx_ring; i++) {
460		if ((skb = fep->rx_skbuff[i]) == NULL)
461			continue;
462		fep->rx_skbuff[i] = NULL;
463		dev_kfree_skb(skb);
464	}
465}
466
467/* common receive function */
468static int fec_enet_rx_common(struct net_device *dev, int *budget)
469{
470	struct fec_enet_private *fep = netdev_priv(dev);
471	fec_t *fecp = fep->fecp;
472	const struct fec_platform_info *fpi = fep->fpi;
473	cbd_t *bdp;
474	struct sk_buff *skb, *skbn, *skbt;
475	int received = 0;
476	__u16 pkt_len, sc;
477	int curidx;
478	int rx_work_limit;
479
480	if (fpi->use_napi) {
481		rx_work_limit = min(dev->quota, *budget);
482
483		if (!netif_running(dev))
484			return 0;
485	}
486
487	/*
488	 * First, grab all of the stats for the incoming packet.
489	 * These get messed up if we get called due to a busy condition.
490	 */
491	bdp = fep->cur_rx;
492
493	/* clear RX status bits for napi*/
494	if (fpi->use_napi)
495		FW(fecp, ievent, FEC_ENET_RXF | FEC_ENET_RXB);
496
497	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
498
499		curidx = bdp - fep->rx_bd_base;
500
501		/*
502		 * Since we have allocated space to hold a complete frame,
503		 * the last indicator should be set.
504		 */
505		if ((sc & BD_ENET_RX_LAST) == 0)
506			printk(KERN_WARNING DRV_MODULE_NAME
507			       ": %s rcv is not +last\n",
508			       dev->name);
509
510		/*
511		 * Check for errors.
512		 */
513		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
514			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
515			fep->stats.rx_errors++;
516			/* Frame too long or too short. */
517			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
518				fep->stats.rx_length_errors++;
519			/* Frame alignment */
520			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
521				fep->stats.rx_frame_errors++;
522			/* CRC Error */
523			if (sc & BD_ENET_RX_CR)
524				fep->stats.rx_crc_errors++;
525			/* FIFO overrun */
526			if (sc & BD_ENET_RX_OV)
527				fep->stats.rx_crc_errors++;
528
529			skbn = fep->rx_skbuff[curidx];
530			BUG_ON(skbn == NULL);
531
532		} else {
533
534			/* napi, got packet but no quota */
535			if (fpi->use_napi && --rx_work_limit < 0)
536				break;
537
538			skb = fep->rx_skbuff[curidx];
539			BUG_ON(skb == NULL);
540
541			/*
542			 * Process the incoming frame.
543			 */
544			fep->stats.rx_packets++;
545			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
546			fep->stats.rx_bytes += pkt_len + 4;
547
548			if (pkt_len <= fpi->rx_copybreak) {
549				/* +2 to make IP header L1 cache aligned */
550				skbn = dev_alloc_skb(pkt_len + 2);
551				if (skbn != NULL) {
552					skb_reserve(skbn, 2);	/* align IP header */
553					skb_copy_from_linear_data(skb,
554								  skbn->data,
555								  pkt_len);
556					/* swap */
557					skbt = skb;
558					skb = skbn;
559					skbn = skbt;
560				}
561			} else
562				skbn = dev_alloc_skb(ENET_RX_FRSIZE);
563
564			if (skbn != NULL) {
565				skb_put(skb, pkt_len);	/* Make room */
566				skb->protocol = eth_type_trans(skb, dev);
567				received++;
568				if (!fpi->use_napi)
569					netif_rx(skb);
570				else
571					netif_receive_skb(skb);
572			} else {
573				printk(KERN_WARNING DRV_MODULE_NAME
574				       ": %s Memory squeeze, dropping packet.\n",
575				       dev->name);
576				fep->stats.rx_dropped++;
577				skbn = skb;
578			}
579		}
580
581		fep->rx_skbuff[curidx] = skbn;
582		CBDW_BUFADDR(bdp, dma_map_single(NULL, skbn->data,
583						 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
584						 DMA_FROM_DEVICE));
585		CBDW_DATLEN(bdp, 0);
586		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
587
588		/*
589		 * Update BD pointer to next entry.
590		 */
591		if ((sc & BD_ENET_RX_WRAP) == 0)
592			bdp++;
593		else
594			bdp = fep->rx_bd_base;
595
596		/*
597		 * Doing this here will keep the FEC running while we process
598		 * incoming frames.  On a heavily loaded network, we should be
599		 * able to keep up at the expense of system resources.
600		 */
601		FW(fecp, r_des_active, 0x01000000);
602	}
603
604	fep->cur_rx = bdp;
605
606	if (fpi->use_napi) {
607		dev->quota -= received;
608		*budget -= received;
609
610		if (rx_work_limit < 0)
611			return 1;	/* not done */
612
613		/* done */
614		netif_rx_complete(dev);
615
616		/* enable RX interrupt bits */
617		FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
618	}
619
620	return 0;
621}
622
623static void fec_enet_tx(struct net_device *dev)
624{
625	struct fec_enet_private *fep = netdev_priv(dev);
626	cbd_t *bdp;
627	struct sk_buff *skb;
628	int dirtyidx, do_wake;
629	__u16 sc;
630
631	spin_lock(&fep->lock);
632	bdp = fep->dirty_tx;
633
634	do_wake = 0;
635	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
636
637		dirtyidx = bdp - fep->tx_bd_base;
638
639		if (fep->tx_free == fep->tx_ring)
640			break;
641
642		skb = fep->tx_skbuff[dirtyidx];
643
644		/*
645		 * Check for errors.
646		 */
647		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
648			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
649			fep->stats.tx_errors++;
650			if (sc & BD_ENET_TX_HB)	/* No heartbeat */
651				fep->stats.tx_heartbeat_errors++;
652			if (sc & BD_ENET_TX_LC)	/* Late collision */
653				fep->stats.tx_window_errors++;
654			if (sc & BD_ENET_TX_RL)	/* Retrans limit */
655				fep->stats.tx_aborted_errors++;
656			if (sc & BD_ENET_TX_UN)	/* Underrun */
657				fep->stats.tx_fifo_errors++;
658			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */
659				fep->stats.tx_carrier_errors++;
660		} else
661			fep->stats.tx_packets++;
662
663		if (sc & BD_ENET_TX_READY)
664			printk(KERN_WARNING DRV_MODULE_NAME
665			       ": %s HEY! Enet xmit interrupt and TX_READY.\n",
666			       dev->name);
667
668		/*
669		 * Deferred means some collisions occurred during transmit,
670		 * but we eventually sent the packet OK.
671		 */
672		if (sc & BD_ENET_TX_DEF)
673			fep->stats.collisions++;
674
675		/*
676		 * Free the sk buffer associated with this last transmit.
677		 */
678		dev_kfree_skb_irq(skb);
679		fep->tx_skbuff[dirtyidx] = NULL;
680
681		/*
682		 * Update pointer to next buffer descriptor to be transmitted.
683		 */
684		if ((sc & BD_ENET_TX_WRAP) == 0)
685			bdp++;
686		else
687			bdp = fep->tx_bd_base;
688
689		/*
690		 * Since we have freed up a buffer, the ring is no longer
691		 * full.
692		 */
693		if (!fep->tx_free++)
694			do_wake = 1;
695	}
696
697	fep->dirty_tx = bdp;
698
699	spin_unlock(&fep->lock);
700
701	if (do_wake && netif_queue_stopped(dev))
702		netif_wake_queue(dev);
703}
704
705/*
706 * The interrupt handler.
707 * This is called from the MPC core interrupt.
708 */
709static irqreturn_t
710fec_enet_interrupt(int irq, void *dev_id)
711{
712	struct net_device *dev = dev_id;
713	struct fec_enet_private *fep;
714	const struct fec_platform_info *fpi;
715	fec_t *fecp;
716	__u32 int_events;
717	__u32 int_events_napi;
718
719	if (unlikely(dev == NULL))
720		return IRQ_NONE;
721
722	fep = netdev_priv(dev);
723	fecp = fep->fecp;
724	fpi = fep->fpi;
725
726	/*
727	 * Get the interrupt events that caused us to be here.
728	 */
729	while ((int_events = FR(fecp, ievent) & FR(fecp, imask)) != 0) {
730
731		if (!fpi->use_napi)
732			FW(fecp, ievent, int_events);
733		else {
734			int_events_napi = int_events & ~(FEC_ENET_RXF | FEC_ENET_RXB);
735			FW(fecp, ievent, int_events_napi);
736		}
737
738		if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR |
739				   FEC_ENET_BABT | FEC_ENET_EBERR)) != 0)
740			printk(KERN_WARNING DRV_MODULE_NAME
741			       ": %s FEC ERROR(s) 0x%x\n",
742			       dev->name, int_events);
743
744		if ((int_events & FEC_ENET_RXF) != 0) {
745			if (!fpi->use_napi)
746				fec_enet_rx_common(dev, NULL);
747			else {
748				if (netif_rx_schedule_prep(dev)) {
749					/* disable rx interrupts */
750					FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
751					__netif_rx_schedule(dev);
752				} else {
753					printk(KERN_ERR DRV_MODULE_NAME
754					       ": %s driver bug! interrupt while in poll!\n",
755					       dev->name);
756					FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
757				}
758			}
759		}
760
761		if ((int_events & FEC_ENET_TXF) != 0)
762			fec_enet_tx(dev);
763	}
764
765	return IRQ_HANDLED;
766}
767
768/* This interrupt occurs when the PHY detects a link change. */
769static irqreturn_t
770fec_mii_link_interrupt(int irq, void *dev_id)
771{
772	struct net_device *dev = dev_id;
773	struct fec_enet_private *fep;
774	const struct fec_platform_info *fpi;
775
776	if (unlikely(dev == NULL))
777		return IRQ_NONE;
778
779	fep = netdev_priv(dev);
780	fpi = fep->fpi;
781
782	if (!fpi->use_mdio)
783		return IRQ_NONE;
784
785	/*
786	 * Acknowledge the interrupt if possible. If we have not
787	 * found the PHY yet we can't process or acknowledge the
788	 * interrupt now. Instead we ignore this interrupt for now,
789	 * which we can do since it is edge triggered. It will be
790	 * acknowledged later by fec_enet_open().
791	 */
792	if (!fep->phy)
793		return IRQ_NONE;
794
795	fec_mii_ack_int(dev);
796	fec_mii_link_status_change_check(dev, 0);
797
798	return IRQ_HANDLED;
799}
800
801
802/**********************************************************************************/
803
804static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
805{
806	struct fec_enet_private *fep = netdev_priv(dev);
807	fec_t *fecp = fep->fecp;
808	cbd_t *bdp;
809	int curidx;
810	unsigned long flags;
811
812	spin_lock_irqsave(&fep->tx_lock, flags);
813
814	/*
815	 * Fill in a Tx ring entry
816	 */
817	bdp = fep->cur_tx;
818
819	if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
820		netif_stop_queue(dev);
821		spin_unlock_irqrestore(&fep->tx_lock, flags);
822
823		/*
824		 * Ooops.  All transmit buffers are full.  Bail out.
825		 * This should not happen, since the tx queue should be stopped.
826		 */
827		printk(KERN_WARNING DRV_MODULE_NAME
828		       ": %s tx queue full!.\n", dev->name);
829		return 1;
830	}
831
832	curidx = bdp - fep->tx_bd_base;
833	/*
834	 * Clear all of the status flags.
835	 */
836	CBDC_SC(bdp, BD_ENET_TX_STATS);
837
838	/*
839	 * Save skb pointer.
840	 */
841	fep->tx_skbuff[curidx] = skb;
842
843	fep->stats.tx_bytes += skb->len;
844
845	/*
846	 * Push the data cache so the CPM does not get stale memory data.
847	 */
848	CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
849					 skb->len, DMA_TO_DEVICE));
850	CBDW_DATLEN(bdp, skb->len);
851
852	dev->trans_start = jiffies;
853
854	/*
855	 * If this was the last BD in the ring, start at the beginning again.
856	 */
857	if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
858		fep->cur_tx++;
859	else
860		fep->cur_tx = fep->tx_bd_base;
861
862	if (!--fep->tx_free)
863		netif_stop_queue(dev);
864
865	/*
866	 * Trigger transmission start
867	 */
868	CBDS_SC(bdp, BD_ENET_TX_READY | BD_ENET_TX_INTR |
869		BD_ENET_TX_LAST | BD_ENET_TX_TC);
870	FW(fecp, x_des_active, 0x01000000);
871
872	spin_unlock_irqrestore(&fep->tx_lock, flags);
873
874	return 0;
875}
876
877static void fec_timeout(struct net_device *dev)
878{
879	struct fec_enet_private *fep = netdev_priv(dev);
880
881	fep->stats.tx_errors++;
882
883	if (fep->tx_free)
884		netif_wake_queue(dev);
885
886	/* check link status again */
887	fec_mii_link_status_change_check(dev, 0);
888}
889
890static int fec_enet_open(struct net_device *dev)
891{
892	struct fec_enet_private *fep = netdev_priv(dev);
893	const struct fec_platform_info *fpi = fep->fpi;
894	unsigned long flags;
895
896	/* Install our interrupt handler. */
897	if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) {
898		printk(KERN_ERR DRV_MODULE_NAME
899		       ": %s Could not allocate FEC IRQ!", dev->name);
900		return -EINVAL;
901	}
902
903	/* Install our phy interrupt handler */
904	if (fpi->phy_irq != -1 &&
905		request_irq(fpi->phy_irq, fec_mii_link_interrupt, 0, "fec-phy",
906				dev) != 0) {
907		printk(KERN_ERR DRV_MODULE_NAME
908		       ": %s Could not allocate PHY IRQ!", dev->name);
909		free_irq(fpi->fec_irq, dev);
910		return -EINVAL;
911	}
912
913	if (fpi->use_mdio) {
914		fec_mii_startup(dev);
915		netif_carrier_off(dev);
916		fec_mii_link_status_change_check(dev, 1);
917	} else {
918		spin_lock_irqsave(&fep->lock, flags);
919		fec_restart(dev, 1, 100);
920		spin_unlock_irqrestore(&fep->lock, flags);
921
922		netif_carrier_on(dev);
923		netif_start_queue(dev);
924	}
925	return 0;
926}
927
928static int fec_enet_close(struct net_device *dev)
929{
930	struct fec_enet_private *fep = netdev_priv(dev);
931	const struct fec_platform_info *fpi = fep->fpi;
932	unsigned long flags;
933
934	netif_stop_queue(dev);
935	netif_carrier_off(dev);
936
937	if (fpi->use_mdio)
938		fec_mii_shutdown(dev);
939
940	spin_lock_irqsave(&fep->lock, flags);
941	fec_stop(dev);
942	spin_unlock_irqrestore(&fep->lock, flags);
943
944	/* release any irqs */
945	if (fpi->phy_irq != -1)
946		free_irq(fpi->phy_irq, dev);
947	free_irq(fpi->fec_irq, dev);
948
949	return 0;
950}
951
952static struct net_device_stats *fec_enet_get_stats(struct net_device *dev)
953{
954	struct fec_enet_private *fep = netdev_priv(dev);
955	return &fep->stats;
956}
957
958static int fec_enet_poll(struct net_device *dev, int *budget)
959{
960	return fec_enet_rx_common(dev, budget);
961}
962
963/*************************************************************************/
964
965static void fec_get_drvinfo(struct net_device *dev,
966			    struct ethtool_drvinfo *info)
967{
968	strcpy(info->driver, DRV_MODULE_NAME);
969	strcpy(info->version, DRV_MODULE_VERSION);
970}
971
972static int fec_get_regs_len(struct net_device *dev)
973{
974	return sizeof(fec_t);
975}
976
977static void fec_get_regs(struct net_device *dev, struct ethtool_regs *regs,
978			 void *p)
979{
980	struct fec_enet_private *fep = netdev_priv(dev);
981	unsigned long flags;
982
983	if (regs->len < sizeof(fec_t))
984		return;
985
986	regs->version = 0;
987	spin_lock_irqsave(&fep->lock, flags);
988	memcpy_fromio(p, fep->fecp, sizeof(fec_t));
989	spin_unlock_irqrestore(&fep->lock, flags);
990}
991
992static int fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
993{
994	struct fec_enet_private *fep = netdev_priv(dev);
995	unsigned long flags;
996	int rc;
997
998	spin_lock_irqsave(&fep->lock, flags);
999	rc = mii_ethtool_gset(&fep->mii_if, cmd);
1000	spin_unlock_irqrestore(&fep->lock, flags);
1001
1002	return rc;
1003}
1004
1005static int fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1006{
1007	struct fec_enet_private *fep = netdev_priv(dev);
1008	unsigned long flags;
1009	int rc;
1010
1011	spin_lock_irqsave(&fep->lock, flags);
1012	rc = mii_ethtool_sset(&fep->mii_if, cmd);
1013	spin_unlock_irqrestore(&fep->lock, flags);
1014
1015	return rc;
1016}
1017
1018static int fec_nway_reset(struct net_device *dev)
1019{
1020	struct fec_enet_private *fep = netdev_priv(dev);
1021	return mii_nway_restart(&fep->mii_if);
1022}
1023
1024static __u32 fec_get_msglevel(struct net_device *dev)
1025{
1026	struct fec_enet_private *fep = netdev_priv(dev);
1027	return fep->msg_enable;
1028}
1029
1030static void fec_set_msglevel(struct net_device *dev, __u32 value)
1031{
1032	struct fec_enet_private *fep = netdev_priv(dev);
1033	fep->msg_enable = value;
1034}
1035
1036static const struct ethtool_ops fec_ethtool_ops = {
1037	.get_drvinfo	= fec_get_drvinfo,
1038	.get_regs_len	= fec_get_regs_len,
1039	.get_settings	= fec_get_settings,
1040	.set_settings	= fec_set_settings,
1041	.nway_reset	= fec_nway_reset,
1042	.get_link	= ethtool_op_get_link,
1043	.get_msglevel	= fec_get_msglevel,
1044	.set_msglevel	= fec_set_msglevel,
1045	.get_tx_csum	= ethtool_op_get_tx_csum,
1046	.set_tx_csum	= ethtool_op_set_tx_csum,	/* local! */
1047	.get_sg		= ethtool_op_get_sg,
1048	.set_sg		= ethtool_op_set_sg,
1049	.get_regs	= fec_get_regs,
1050};
1051
1052static int fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1053{
1054	struct fec_enet_private *fep = netdev_priv(dev);
1055	struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
1056	unsigned long flags;
1057	int rc;
1058
1059	if (!netif_running(dev))
1060		return -EINVAL;
1061
1062	spin_lock_irqsave(&fep->lock, flags);
1063	rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
1064	spin_unlock_irqrestore(&fep->lock, flags);
1065	return rc;
1066}
1067
1068int fec_8xx_init_one(const struct fec_platform_info *fpi,
1069		     struct net_device **devp)
1070{
1071	immap_t *immap = (immap_t *) IMAP_ADDR;
1072	static int fec_8xx_version_printed = 0;
1073	struct net_device *dev = NULL;
1074	struct fec_enet_private *fep = NULL;
1075	fec_t *fecp = NULL;
1076	int i;
1077	int err = 0;
1078	int registered = 0;
1079	__u32 siel;
1080
1081	*devp = NULL;
1082
1083	switch (fpi->fec_no) {
1084	case 0:
1085		fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
1086		break;
1087#ifdef CONFIG_DUET
1088	case 1:
1089		fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec2;
1090		break;
1091#endif
1092	default:
1093		return -EINVAL;
1094	}
1095
1096	if (fec_8xx_version_printed++ == 0)
1097		printk(KERN_INFO "%s", version);
1098
1099	i = sizeof(*fep) + (sizeof(struct sk_buff **) *
1100			    (fpi->rx_ring + fpi->tx_ring));
1101
1102	dev = alloc_etherdev(i);
1103	if (!dev) {
1104		err = -ENOMEM;
1105		goto err;
1106	}
1107	SET_MODULE_OWNER(dev);
1108
1109	fep = netdev_priv(dev);
1110
1111	/* partial reset of FEC */
1112	fec_whack_reset(fecp);
1113
1114	/* point rx_skbuff, tx_skbuff */
1115	fep->rx_skbuff = (struct sk_buff **)&fep[1];
1116	fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1117
1118	fep->fecp = fecp;
1119	fep->fpi = fpi;
1120
1121	/* init locks */
1122	spin_lock_init(&fep->lock);
1123	spin_lock_init(&fep->tx_lock);
1124
1125	/*
1126	 * Set the Ethernet address.
1127	 */
1128	for (i = 0; i < 6; i++)
1129		dev->dev_addr[i] = fpi->macaddr[i];
1130
1131	fep->ring_base = dma_alloc_coherent(NULL,
1132					    (fpi->tx_ring + fpi->rx_ring) *
1133					    sizeof(cbd_t), &fep->ring_mem_addr,
1134					    GFP_KERNEL);
1135	if (fep->ring_base == NULL) {
1136		printk(KERN_ERR DRV_MODULE_NAME
1137		       ": %s dma alloc failed.\n", dev->name);
1138		err = -ENOMEM;
1139		goto err;
1140	}
1141
1142	/*
1143	 * Set receive and transmit descriptor base.
1144	 */
1145	fep->rx_bd_base = fep->ring_base;
1146	fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1147
1148	/* initialize ring size variables */
1149	fep->tx_ring = fpi->tx_ring;
1150	fep->rx_ring = fpi->rx_ring;
1151
1152	/* SIU interrupt */
1153	if (fpi->phy_irq != -1 &&
1154		(fpi->phy_irq >= SIU_IRQ0 && fpi->phy_irq < SIU_LEVEL7)) {
1155
1156		siel = in_be32(&immap->im_siu_conf.sc_siel);
1157		if ((fpi->phy_irq & 1) == 0)
1158			siel |= (0x80000000 >> fpi->phy_irq);
1159		else
1160			siel &= ~(0x80000000 >> (fpi->phy_irq & ~1));
1161		out_be32(&immap->im_siu_conf.sc_siel, siel);
1162	}
1163
1164	/*
1165	 * The FEC Ethernet specific entries in the device structure.
1166	 */
1167	dev->open = fec_enet_open;
1168	dev->hard_start_xmit = fec_enet_start_xmit;
1169	dev->tx_timeout = fec_timeout;
1170	dev->watchdog_timeo = TX_TIMEOUT;
1171	dev->stop = fec_enet_close;
1172	dev->get_stats = fec_enet_get_stats;
1173	dev->set_multicast_list = fec_set_multicast_list;
1174	dev->set_mac_address = fec_set_mac_address;
1175	if (fpi->use_napi) {
1176		dev->poll = fec_enet_poll;
1177		dev->weight = fpi->napi_weight;
1178	}
1179	dev->ethtool_ops = &fec_ethtool_ops;
1180	dev->do_ioctl = fec_ioctl;
1181
1182	fep->fec_phy_speed =
1183	    ((((fpi->sys_clk + 4999999) / 2500000) / 2) & 0x3F) << 1;
1184
1185	init_timer(&fep->phy_timer_list);
1186
1187	/* partial reset of FEC so that only MII works */
1188	FW(fecp, mii_speed, fep->fec_phy_speed);
1189	FW(fecp, ievent, 0xffc0);
1190	FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
1191	FW(fecp, imask, 0);
1192	FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);	/* MII enable */
1193	FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
1194
1195	netif_carrier_off(dev);
1196
1197	err = register_netdev(dev);
1198	if (err != 0)
1199		goto err;
1200	registered = 1;
1201
1202	if (fpi->use_mdio) {
1203		fep->mii_if.dev = dev;
1204		fep->mii_if.mdio_read = fec_mii_read;
1205		fep->mii_if.mdio_write = fec_mii_write;
1206		fep->mii_if.phy_id_mask = 0x1f;
1207		fep->mii_if.reg_num_mask = 0x1f;
1208		fep->mii_if.phy_id = fec_mii_phy_id_detect(dev);
1209	}
1210
1211	*devp = dev;
1212
1213	return 0;
1214
1215      err:
1216	if (dev != NULL) {
1217		if (fecp != NULL)
1218			fec_whack_reset(fecp);
1219
1220		if (registered)
1221			unregister_netdev(dev);
1222
1223		if (fep != NULL) {
1224			if (fep->ring_base)
1225				dma_free_coherent(NULL,
1226						  (fpi->tx_ring +
1227						   fpi->rx_ring) *
1228						  sizeof(cbd_t), fep->ring_base,
1229						  fep->ring_mem_addr);
1230		}
1231		free_netdev(dev);
1232	}
1233	return err;
1234}
1235
1236int fec_8xx_cleanup_one(struct net_device *dev)
1237{
1238	struct fec_enet_private *fep = netdev_priv(dev);
1239	fec_t *fecp = fep->fecp;
1240	const struct fec_platform_info *fpi = fep->fpi;
1241
1242	fec_whack_reset(fecp);
1243
1244	unregister_netdev(dev);
1245
1246	dma_free_coherent(NULL, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1247			  fep->ring_base, fep->ring_mem_addr);
1248
1249	free_netdev(dev);
1250
1251	return 0;
1252}
1253
1254/**************************************************************************************/
1255/**************************************************************************************/
1256/**************************************************************************************/
1257
1258static int __init fec_8xx_init(void)
1259{
1260	return fec_8xx_platform_init();
1261}
1262
1263static void __exit fec_8xx_cleanup(void)
1264{
1265	fec_8xx_platform_cleanup();
1266}
1267
1268/**************************************************************************************/
1269/**************************************************************************************/
1270/**************************************************************************************/
1271
1272module_init(fec_8xx_init);
1273module_exit(fec_8xx_cleanup);
1274