1/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 *
5 * This version of the driver is specific to the FADS implementation,
6 * since the board contains control registers external to the processor
7 * for the control of the LevelOne LXT970 transceiver.  The MPC860T manual
8 * describes connections using the internal parallel port I/O, which
9 * is basically all of Port D.
10 *
11 * Includes support for the following PHYs: QS6612, LXT970, LXT971/2.
12 *
13 * Right now, I am very wasteful with the buffers.  I allocate memory
14 * pages and then divide them into 2K frame buffers.  This way I know I
15 * have buffers large enough to hold one frame within one buffer descriptor.
16 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
17 * will be much more memory efficient and will easily handle lots of
18 * small packets.
19 *
20 * Much better multiple PHY support by Magnus Damm.
21 * Copyright (c) 2000 Ericsson Radio Systems AB.
22 *
23 * Make use of MII for PHY control configurable.
24 * Some fixes.
25 * Copyright (c) 2000-2002 Wolfgang Denk, DENX Software Engineering.
26 *
27 * Support for AMD AM79C874 added.
28 * Thomas Lange, thomas@corelatus.com
29 */
30
31#include <linux/kernel.h>
32#include <linux/sched.h>
33#include <linux/string.h>
34#include <linux/ptrace.h>
35#include <linux/errno.h>
36#include <linux/ioport.h>
37#include <linux/slab.h>
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/delay.h>
42#include <linux/netdevice.h>
43#include <linux/etherdevice.h>
44#include <linux/skbuff.h>
45#include <linux/spinlock.h>
46#include <linux/bitops.h>
47#ifdef CONFIG_FEC_PACKETHOOK
48#include <linux/pkthook.h>
49#endif
50
51#include <asm/8xx_immap.h>
52#include <asm/pgtable.h>
53#include <asm/mpc8xx.h>
54#include <asm/irq.h>
55#include <asm/uaccess.h>
56#include <asm/commproc.h>
57
58#ifdef	CONFIG_USE_MDIO
59/* Forward declarations of some structures to support different PHYs
60*/
61
62typedef struct {
63	uint mii_data;
64	void (*funct)(uint mii_reg, struct net_device *dev);
65} phy_cmd_t;
66
67typedef struct {
68	uint id;
69	char *name;
70
71	const phy_cmd_t *config;
72	const phy_cmd_t *startup;
73	const phy_cmd_t *ack_int;
74	const phy_cmd_t *shutdown;
75} phy_info_t;
76#endif	/* CONFIG_USE_MDIO */
77
78/* The number of Tx and Rx buffers.  These are allocated from the page
79 * pool.  The code may assume these are power of two, so it is best
80 * to keep them that size.
81 * We don't need to allocate pages for the transmitter.  We just use
82 * the skbuffer directly.
83 */
84#ifdef CONFIG_ENET_BIG_BUFFERS
85#define FEC_ENET_RX_PAGES	16
86#define FEC_ENET_RX_FRSIZE	2048
87#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
88#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
89#define TX_RING_SIZE		16	/* Must be power of two */
90#define TX_RING_MOD_MASK	15	/*   for this to work */
91#else
92#define FEC_ENET_RX_PAGES	4
93#define FEC_ENET_RX_FRSIZE	2048
94#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
95#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
96#define TX_RING_SIZE		8	/* Must be power of two */
97#define TX_RING_MOD_MASK	7	/*   for this to work */
98#endif
99
100/* Interrupt events/masks.
101*/
102#define FEC_ENET_HBERR	((uint)0x80000000)	/* Heartbeat error */
103#define FEC_ENET_BABR	((uint)0x40000000)	/* Babbling receiver */
104#define FEC_ENET_BABT	((uint)0x20000000)	/* Babbling transmitter */
105#define FEC_ENET_GRA	((uint)0x10000000)	/* Graceful stop complete */
106#define FEC_ENET_TXF	((uint)0x08000000)	/* Full frame transmitted */
107#define FEC_ENET_TXB	((uint)0x04000000)	/* A buffer was transmitted */
108#define FEC_ENET_RXF	((uint)0x02000000)	/* Full frame received */
109#define FEC_ENET_RXB	((uint)0x01000000)	/* A buffer was received */
110#define FEC_ENET_MII	((uint)0x00800000)	/* MII interrupt */
111#define FEC_ENET_EBERR	((uint)0x00400000)	/* SDMA bus error */
112
113/*
114*/
115#define FEC_ECNTRL_PINMUX	0x00000004
116#define FEC_ECNTRL_ETHER_EN	0x00000002
117#define FEC_ECNTRL_RESET	0x00000001
118
119#define FEC_RCNTRL_BC_REJ	0x00000010
120#define FEC_RCNTRL_PROM		0x00000008
121#define FEC_RCNTRL_MII_MODE	0x00000004
122#define FEC_RCNTRL_DRT		0x00000002
123#define FEC_RCNTRL_LOOP		0x00000001
124
125#define FEC_TCNTRL_FDEN		0x00000004
126#define FEC_TCNTRL_HBC		0x00000002
127#define FEC_TCNTRL_GTS		0x00000001
128
129/* Delay to wait for FEC reset command to complete (in us)
130*/
131#define FEC_RESET_DELAY		50
132
133/* The FEC stores dest/src/type, data, and checksum for receive packets.
134 */
135#define PKT_MAXBUF_SIZE		1518
136#define PKT_MINBUF_SIZE		64
137#define PKT_MAXBLR_SIZE		1520
138
139/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
140 * tx_bd_base always point to the base of the buffer descriptors.  The
141 * cur_rx and cur_tx point to the currently available buffer.
142 * The dirty_tx tracks the current buffer that is being sent by the
143 * controller.  The cur_tx and dirty_tx are equal under both completely
144 * empty and completely full conditions.  The empty/ready indicator in
145 * the buffer descriptor determines the actual condition.
146 */
147struct fec_enet_private {
148	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
149	struct	sk_buff* tx_skbuff[TX_RING_SIZE];
150	ushort	skb_cur;
151	ushort	skb_dirty;
152
153	/* CPM dual port RAM relative addresses.
154	*/
155	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
156	cbd_t	*tx_bd_base;
157	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
158	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
159
160	/* Virtual addresses for the receive buffers because we can't
161	 * do a __va() on them anymore.
162	 */
163	unsigned char *rx_vaddr[RX_RING_SIZE];
164
165	struct	net_device_stats stats;
166	uint	tx_full;
167	spinlock_t lock;
168
169#ifdef	CONFIG_USE_MDIO
170	uint	phy_id;
171	uint	phy_id_done;
172	uint	phy_status;
173	uint	phy_speed;
174	phy_info_t	*phy;
175	struct work_struct phy_task;
176	struct net_device *dev;
177
178	uint	sequence_done;
179
180	uint	phy_addr;
181#endif	/* CONFIG_USE_MDIO */
182
183	int	link;
184	int	old_link;
185	int	full_duplex;
186
187#ifdef CONFIG_FEC_PACKETHOOK
188	unsigned long	ph_lock;
189	fec_ph_func	*ph_rxhandler;
190	fec_ph_func	*ph_txhandler;
191	__u16		ph_proto;
192	volatile __u32	*ph_regaddr;
193	void 		*ph_priv;
194#endif
195};
196
197static int fec_enet_open(struct net_device *dev);
198static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
199#ifdef	CONFIG_USE_MDIO
200static void fec_enet_mii(struct net_device *dev);
201#endif	/* CONFIG_USE_MDIO */
202static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
203#ifdef CONFIG_FEC_PACKETHOOK
204static void  fec_enet_tx(struct net_device *dev, __u32 regval);
205static void  fec_enet_rx(struct net_device *dev, __u32 regval);
206#else
207static void  fec_enet_tx(struct net_device *dev);
208static void  fec_enet_rx(struct net_device *dev);
209#endif
210static int fec_enet_close(struct net_device *dev);
211static struct net_device_stats *fec_enet_get_stats(struct net_device *dev);
212static void set_multicast_list(struct net_device *dev);
213static void fec_restart(struct net_device *dev, int duplex);
214static void fec_stop(struct net_device *dev);
215static	ushort	my_enet_addr[3];
216
217#ifdef	CONFIG_USE_MDIO
218/* MII processing.  We keep this as simple as possible.  Requests are
219 * placed on the list (if there is room).  When the request is finished
220 * by the MII, an optional function may be called.
221 */
222typedef struct mii_list {
223	uint	mii_regval;
224	void	(*mii_func)(uint val, struct net_device *dev);
225	struct	mii_list *mii_next;
226} mii_list_t;
227
228#define		NMII	20
229mii_list_t	mii_cmds[NMII];
230mii_list_t	*mii_free;
231mii_list_t	*mii_head;
232mii_list_t	*mii_tail;
233
234static int	mii_queue(struct net_device *dev, int request,
235				void (*func)(uint, struct net_device *));
236
237/* Make MII read/write commands for the FEC.
238*/
239#define mk_mii_read(REG)	(0x60020000 | ((REG & 0x1f) << 18))
240#define mk_mii_write(REG, VAL)	(0x50020000 | ((REG & 0x1f) << 18) | \
241						(VAL & 0xffff))
242#define mk_mii_end	0
243#endif	/* CONFIG_USE_MDIO */
244
245/* Transmitter timeout.
246*/
247#define TX_TIMEOUT (2*HZ)
248
249#ifdef	CONFIG_USE_MDIO
250/* Register definitions for the PHY.
251*/
252
253#define MII_REG_CR          0  /* Control Register                         */
254#define MII_REG_SR          1  /* Status Register                          */
255#define MII_REG_PHYIR1      2  /* PHY Identification Register 1            */
256#define MII_REG_PHYIR2      3  /* PHY Identification Register 2            */
257#define MII_REG_ANAR        4  /* A-N Advertisement Register               */
258#define MII_REG_ANLPAR      5  /* A-N Link Partner Ability Register        */
259#define MII_REG_ANER        6  /* A-N Expansion Register                   */
260#define MII_REG_ANNPTR      7  /* A-N Next Page Transmit Register          */
261#define MII_REG_ANLPRNPR    8  /* A-N Link Partner Received Next Page Reg. */
262
263/* values for phy_status */
264
265#define PHY_CONF_ANE	0x0001  /* 1 auto-negotiation enabled */
266#define PHY_CONF_LOOP	0x0002  /* 1 loopback mode enabled */
267#define PHY_CONF_SPMASK	0x00f0  /* mask for speed */
268#define PHY_CONF_10HDX	0x0010  /* 10 Mbit half duplex supported */
269#define PHY_CONF_10FDX	0x0020  /* 10 Mbit full duplex supported */
270#define PHY_CONF_100HDX	0x0040  /* 100 Mbit half duplex supported */
271#define PHY_CONF_100FDX	0x0080  /* 100 Mbit full duplex supported */
272
273#define PHY_STAT_LINK	0x0100  /* 1 up - 0 down */
274#define PHY_STAT_FAULT	0x0200  /* 1 remote fault */
275#define PHY_STAT_ANC	0x0400  /* 1 auto-negotiation complete	*/
276#define PHY_STAT_SPMASK	0xf000  /* mask for speed */
277#define PHY_STAT_10HDX	0x1000  /* 10 Mbit half duplex selected	*/
278#define PHY_STAT_10FDX	0x2000  /* 10 Mbit full duplex selected	*/
279#define PHY_STAT_100HDX	0x4000  /* 100 Mbit half duplex selected */
280#define PHY_STAT_100FDX	0x8000  /* 100 Mbit full duplex selected */
281#endif	/* CONFIG_USE_MDIO */
282
283#ifdef CONFIG_FEC_PACKETHOOK
284int
285fec_register_ph(struct net_device *dev, fec_ph_func *rxfun, fec_ph_func *txfun,
286		__u16 proto, volatile __u32 *regaddr, void *priv)
287{
288	struct fec_enet_private *fep;
289	int retval = 0;
290
291	fep = dev->priv;
292
293	if (test_and_set_bit(0, (void*)&fep->ph_lock) != 0) {
294		/* Someone is messing with the packet hook */
295		return -EAGAIN;
296	}
297	if (fep->ph_rxhandler != NULL || fep->ph_txhandler != NULL) {
298		retval = -EBUSY;
299		goto out;
300	}
301	fep->ph_rxhandler = rxfun;
302	fep->ph_txhandler = txfun;
303	fep->ph_proto = proto;
304	fep->ph_regaddr = regaddr;
305	fep->ph_priv = priv;
306
307	out:
308	fep->ph_lock = 0;
309
310	return retval;
311}
312
313
314int
315fec_unregister_ph(struct net_device *dev)
316{
317	struct fec_enet_private *fep;
318	int retval = 0;
319
320	fep = dev->priv;
321
322	if (test_and_set_bit(0, (void*)&fep->ph_lock) != 0) {
323		/* Someone is messing with the packet hook */
324		return -EAGAIN;
325	}
326
327	fep->ph_rxhandler = fep->ph_txhandler = NULL;
328	fep->ph_proto = 0;
329	fep->ph_regaddr = NULL;
330	fep->ph_priv = NULL;
331
332	fep->ph_lock = 0;
333
334	return retval;
335}
336
337EXPORT_SYMBOL(fec_register_ph);
338EXPORT_SYMBOL(fec_unregister_ph);
339
340#endif /* CONFIG_FEC_PACKETHOOK */
341
342static int
343fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
344{
345	struct fec_enet_private *fep;
346	volatile fec_t	*fecp;
347	volatile cbd_t	*bdp;
348
349	fep = dev->priv;
350	fecp = (volatile fec_t*)dev->base_addr;
351
352	if (!fep->link) {
353		/* Link is down or autonegotiation is in progress. */
354		return 1;
355	}
356
357	/* Fill in a Tx ring entry */
358	bdp = fep->cur_tx;
359
360#ifndef final_version
361	if (bdp->cbd_sc & BD_ENET_TX_READY) {
362		/* Ooops.  All transmit buffers are full.  Bail out.
363		 * This should not happen, since dev->tbusy should be set.
364		 */
365		printk("%s: tx queue full!.\n", dev->name);
366		return 1;
367	}
368#endif
369
370	/* Clear all of the status flags.
371	 */
372	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
373
374	/* Set buffer length and buffer pointer.
375	*/
376	bdp->cbd_bufaddr = __pa(skb->data);
377	bdp->cbd_datlen = skb->len;
378
379	/* Save skb pointer.
380	*/
381	fep->tx_skbuff[fep->skb_cur] = skb;
382
383	fep->stats.tx_bytes += skb->len;
384	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
385
386	/* Push the data cache so the CPM does not get stale memory
387	 * data.
388	 */
389	flush_dcache_range((unsigned long)skb->data,
390			   (unsigned long)skb->data + skb->len);
391
392	/* disable interrupts while triggering transmit */
393	spin_lock_irq(&fep->lock);
394
395	/* Send it on its way.  Tell FEC its ready, interrupt when done,
396	 * its the last BD of the frame, and to put the CRC on the end.
397	 */
398
399	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
400			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
401
402	dev->trans_start = jiffies;
403
404	/* Trigger transmission start */
405	fecp->fec_x_des_active = 0x01000000;
406
407	/* If this was the last BD in the ring, start at the beginning again.
408	*/
409	if (bdp->cbd_sc & BD_ENET_TX_WRAP) {
410		bdp = fep->tx_bd_base;
411	} else {
412		bdp++;
413	}
414
415	if (bdp->cbd_sc & BD_ENET_TX_READY) {
416		netif_stop_queue(dev);
417		fep->tx_full = 1;
418	}
419
420	fep->cur_tx = (cbd_t *)bdp;
421
422	spin_unlock_irq(&fep->lock);
423
424	return 0;
425}
426
427static void
428fec_timeout(struct net_device *dev)
429{
430	struct fec_enet_private *fep = dev->priv;
431
432	printk("%s: transmit timed out.\n", dev->name);
433	fep->stats.tx_errors++;
434#ifndef final_version
435	{
436	int	i;
437	cbd_t	*bdp;
438
439	printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n",
440	       (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "",
441	       (unsigned long)fep->dirty_tx,
442	       (unsigned long)fep->cur_rx);
443
444	bdp = fep->tx_bd_base;
445	printk(" tx: %u buffers\n",  TX_RING_SIZE);
446	for (i = 0 ; i < TX_RING_SIZE; i++) {
447		printk("  %08x: %04x %04x %08x\n",
448		       (uint) bdp,
449		       bdp->cbd_sc,
450		       bdp->cbd_datlen,
451		       bdp->cbd_bufaddr);
452		bdp++;
453	}
454
455	bdp = fep->rx_bd_base;
456	printk(" rx: %lu buffers\n",  RX_RING_SIZE);
457	for (i = 0 ; i < RX_RING_SIZE; i++) {
458		printk("  %08x: %04x %04x %08x\n",
459		       (uint) bdp,
460		       bdp->cbd_sc,
461		       bdp->cbd_datlen,
462		       bdp->cbd_bufaddr);
463		bdp++;
464	}
465	}
466#endif
467	if (!fep->tx_full)
468		netif_wake_queue(dev);
469}
470
471/* The interrupt handler.
472 * This is called from the MPC core interrupt.
473 */
474static	irqreturn_t
475fec_enet_interrupt(int irq, void * dev_id)
476{
477	struct	net_device *dev = dev_id;
478	volatile fec_t	*fecp;
479	uint	int_events;
480#ifdef CONFIG_FEC_PACKETHOOK
481	struct	fec_enet_private *fep = dev->priv;
482	__u32 regval;
483
484	if (fep->ph_regaddr) regval = *fep->ph_regaddr;
485#endif
486	fecp = (volatile fec_t*)dev->base_addr;
487
488	/* Get the interrupt events that caused us to be here.
489	*/
490	while ((int_events = fecp->fec_ievent) != 0) {
491		fecp->fec_ievent = int_events;
492		if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR |
493				   FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) {
494			printk("FEC ERROR %x\n", int_events);
495		}
496
497		/* Handle receive event in its own function.
498		 */
499		if (int_events & FEC_ENET_RXF) {
500#ifdef CONFIG_FEC_PACKETHOOK
501			fec_enet_rx(dev, regval);
502#else
503			fec_enet_rx(dev);
504#endif
505		}
506
507		/* Transmit OK, or non-fatal error. Update the buffer
508		   descriptors. FEC handles all errors, we just discover
509		   them as part of the transmit process.
510		*/
511		if (int_events & FEC_ENET_TXF) {
512#ifdef CONFIG_FEC_PACKETHOOK
513			fec_enet_tx(dev, regval);
514#else
515			fec_enet_tx(dev);
516#endif
517		}
518
519		if (int_events & FEC_ENET_MII) {
520#ifdef	CONFIG_USE_MDIO
521			fec_enet_mii(dev);
522#else
523printk("%s[%d] %s: unexpected FEC_ENET_MII event\n", __FILE__,__LINE__,__FUNCTION__);
524#endif	/* CONFIG_USE_MDIO */
525		}
526
527	}
528	return IRQ_RETVAL(IRQ_HANDLED);
529}
530
531
532static void
533#ifdef CONFIG_FEC_PACKETHOOK
534fec_enet_tx(struct net_device *dev, __u32 regval)
535#else
536fec_enet_tx(struct net_device *dev)
537#endif
538{
539	struct	fec_enet_private *fep;
540	volatile cbd_t	*bdp;
541	struct	sk_buff	*skb;
542
543	fep = dev->priv;
544	/* lock while transmitting */
545	spin_lock(&fep->lock);
546	bdp = fep->dirty_tx;
547
548	while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) {
549		if (bdp == fep->cur_tx && fep->tx_full == 0) break;
550
551		skb = fep->tx_skbuff[fep->skb_dirty];
552		/* Check for errors. */
553		if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
554				   BD_ENET_TX_RL | BD_ENET_TX_UN |
555				   BD_ENET_TX_CSL)) {
556			fep->stats.tx_errors++;
557			if (bdp->cbd_sc & BD_ENET_TX_HB)  /* No heartbeat */
558				fep->stats.tx_heartbeat_errors++;
559			if (bdp->cbd_sc & BD_ENET_TX_LC)  /* Late collision */
560				fep->stats.tx_window_errors++;
561			if (bdp->cbd_sc & BD_ENET_TX_RL)  /* Retrans limit */
562				fep->stats.tx_aborted_errors++;
563			if (bdp->cbd_sc & BD_ENET_TX_UN)  /* Underrun */
564				fep->stats.tx_fifo_errors++;
565			if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
566				fep->stats.tx_carrier_errors++;
567		} else {
568#ifdef CONFIG_FEC_PACKETHOOK
569			/* Packet hook ... */
570			if (fep->ph_txhandler &&
571			    ((struct ethhdr *)skb->data)->h_proto
572			    == fep->ph_proto) {
573				fep->ph_txhandler((__u8*)skb->data, skb->len,
574						  regval, fep->ph_priv);
575			}
576#endif
577			fep->stats.tx_packets++;
578		}
579
580#ifndef final_version
581		if (bdp->cbd_sc & BD_ENET_TX_READY)
582			printk("HEY! Enet xmit interrupt and TX_READY.\n");
583#endif
584		/* Deferred means some collisions occurred during transmit,
585		 * but we eventually sent the packet OK.
586		 */
587		if (bdp->cbd_sc & BD_ENET_TX_DEF)
588			fep->stats.collisions++;
589
590		/* Free the sk buffer associated with this last transmit.
591		 */
592		dev_kfree_skb_irq (skb/*, FREE_WRITE*/);
593		fep->tx_skbuff[fep->skb_dirty] = NULL;
594		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
595
596		/* Update pointer to next buffer descriptor to be transmitted.
597		 */
598		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
599			bdp = fep->tx_bd_base;
600		else
601			bdp++;
602
603		/* Since we have freed up a buffer, the ring is no longer
604		 * full.
605		 */
606		if (fep->tx_full) {
607			fep->tx_full = 0;
608			if (netif_queue_stopped(dev))
609				netif_wake_queue(dev);
610		}
611#ifdef CONFIG_FEC_PACKETHOOK
612		/* Re-read register. Not exactly guaranteed to be correct,
613		   but... */
614		if (fep->ph_regaddr) regval = *fep->ph_regaddr;
615#endif
616	}
617	fep->dirty_tx = (cbd_t *)bdp;
618	spin_unlock(&fep->lock);
619}
620
621
622/* During a receive, the cur_rx points to the current incoming buffer.
623 * When we update through the ring, if the next incoming buffer has
624 * not been given to the system, we just set the empty indicator,
625 * effectively tossing the packet.
626 */
627static void
628#ifdef CONFIG_FEC_PACKETHOOK
629fec_enet_rx(struct net_device *dev, __u32 regval)
630#else
631fec_enet_rx(struct net_device *dev)
632#endif
633{
634	struct	fec_enet_private *fep;
635	volatile fec_t	*fecp;
636	volatile cbd_t *bdp;
637	struct	sk_buff	*skb;
638	ushort	pkt_len;
639	__u8 *data;
640
641	fep = dev->priv;
642	fecp = (volatile fec_t*)dev->base_addr;
643
644	/* First, grab all of the stats for the incoming packet.
645	 * These get messed up if we get called due to a busy condition.
646	 */
647	bdp = fep->cur_rx;
648
649while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
650
651#ifndef final_version
652	/* Since we have allocated space to hold a complete frame,
653	 * the last indicator should be set.
654	 */
655	if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0)
656		printk("FEC ENET: rcv is not +last\n");
657#endif
658
659	/* Check for errors. */
660	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
661			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
662		fep->stats.rx_errors++;
663		if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
664		/* Frame too long or too short. */
665			fep->stats.rx_length_errors++;
666		}
667		if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
668			fep->stats.rx_frame_errors++;
669		if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
670			fep->stats.rx_crc_errors++;
671		if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
672			fep->stats.rx_crc_errors++;
673	}
674
675	/* Report late collisions as a frame error.
676	 * On this error, the BD is closed, but we don't know what we
677	 * have in the buffer.  So, just drop this frame on the floor.
678	 */
679	if (bdp->cbd_sc & BD_ENET_RX_CL) {
680		fep->stats.rx_errors++;
681		fep->stats.rx_frame_errors++;
682		goto rx_processing_done;
683	}
684
685	/* Process the incoming frame.
686	 */
687	fep->stats.rx_packets++;
688	pkt_len = bdp->cbd_datlen;
689	fep->stats.rx_bytes += pkt_len;
690	data = fep->rx_vaddr[bdp - fep->rx_bd_base];
691
692#ifdef CONFIG_FEC_PACKETHOOK
693	/* Packet hook ... */
694	if (fep->ph_rxhandler) {
695		if (((struct ethhdr *)data)->h_proto == fep->ph_proto) {
696			switch (fep->ph_rxhandler(data, pkt_len, regval,
697						  fep->ph_priv)) {
698			case 1:
699				goto rx_processing_done;
700				break;
701			case 0:
702				break;
703			default:
704				fep->stats.rx_errors++;
705				goto rx_processing_done;
706			}
707		}
708	}
709
710	/* If it wasn't filtered - copy it to an sk buffer. */
711#endif
712
713	/* This does 16 byte alignment, exactly what we need.
714	 * The packet length includes FCS, but we don't want to
715	 * include that when passing upstream as it messes up
716	 * bridging applications.
717	 */
718	skb = dev_alloc_skb(pkt_len-4);
719
720	if (skb == NULL) {
721		printk("%s: Memory squeeze, dropping packet.\n", dev->name);
722		fep->stats.rx_dropped++;
723	} else {
724		skb_put(skb,pkt_len-4);	/* Make room */
725		eth_copy_and_sum(skb, data, pkt_len-4, 0);
726		skb->protocol=eth_type_trans(skb,dev);
727		netif_rx(skb);
728	}
729  rx_processing_done:
730
731	/* Clear the status flags for this buffer.
732	*/
733	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
734
735	/* Mark the buffer empty.
736	*/
737	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
738
739	/* Update BD pointer to next entry.
740	*/
741	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
742		bdp = fep->rx_bd_base;
743	else
744		bdp++;
745
746	/* Doing this here will keep the FEC running while we process
747	 * incoming frames.  On a heavily loaded network, we should be
748	 * able to keep up at the expense of system resources.
749	 */
750	fecp->fec_r_des_active = 0x01000000;
751#ifdef CONFIG_FEC_PACKETHOOK
752	/* Re-read register. Not exactly guaranteed to be correct,
753	   but... */
754	if (fep->ph_regaddr) regval = *fep->ph_regaddr;
755#endif
756   } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */
757	fep->cur_rx = (cbd_t *)bdp;
758
759}
760
761
762#ifdef	CONFIG_USE_MDIO
763static void
764fec_enet_mii(struct net_device *dev)
765{
766	struct	fec_enet_private *fep;
767	volatile fec_t	*ep;
768	mii_list_t	*mip;
769	uint		mii_reg;
770
771	fep = (struct fec_enet_private *)dev->priv;
772	ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec);
773	mii_reg = ep->fec_mii_data;
774
775	if ((mip = mii_head) == NULL) {
776		printk("MII and no head!\n");
777		return;
778	}
779
780	if (mip->mii_func != NULL)
781		(*(mip->mii_func))(mii_reg, dev);
782
783	mii_head = mip->mii_next;
784	mip->mii_next = mii_free;
785	mii_free = mip;
786
787	if ((mip = mii_head) != NULL) {
788		ep->fec_mii_data = mip->mii_regval;
789
790	}
791}
792
793static int
794mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
795{
796	struct fec_enet_private *fep;
797	unsigned long	flags;
798	mii_list_t	*mip;
799	int		retval;
800
801	/* Add PHY address to register command.
802	*/
803	fep = dev->priv;
804	regval |= fep->phy_addr << 23;
805
806	retval = 0;
807
808	/* lock while modifying mii_list */
809	spin_lock_irqsave(&fep->lock, flags);
810
811	if ((mip = mii_free) != NULL) {
812		mii_free = mip->mii_next;
813		mip->mii_regval = regval;
814		mip->mii_func = func;
815		mip->mii_next = NULL;
816		if (mii_head) {
817			mii_tail->mii_next = mip;
818			mii_tail = mip;
819		} else {
820			mii_head = mii_tail = mip;
821			(&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec))->fec_mii_data = regval;
822		}
823	} else {
824		retval = 1;
825	}
826
827	spin_unlock_irqrestore(&fep->lock, flags);
828
829	return(retval);
830}
831
832static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
833{
834	int k;
835
836	if(!c)
837		return;
838
839	for(k = 0; (c+k)->mii_data != mk_mii_end; k++)
840		mii_queue(dev, (c+k)->mii_data, (c+k)->funct);
841}
842
843static void mii_parse_sr(uint mii_reg, struct net_device *dev)
844{
845	struct fec_enet_private *fep = dev->priv;
846	volatile uint *s = &(fep->phy_status);
847
848	*s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
849
850	if (mii_reg & 0x0004)
851		*s |= PHY_STAT_LINK;
852	if (mii_reg & 0x0010)
853		*s |= PHY_STAT_FAULT;
854	if (mii_reg & 0x0020)
855		*s |= PHY_STAT_ANC;
856
857	fep->link = (*s & PHY_STAT_LINK) ? 1 : 0;
858}
859
860static void mii_parse_cr(uint mii_reg, struct net_device *dev)
861{
862	struct fec_enet_private *fep = dev->priv;
863	volatile uint *s = &(fep->phy_status);
864
865	*s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
866
867	if (mii_reg & 0x1000)
868		*s |= PHY_CONF_ANE;
869	if (mii_reg & 0x4000)
870		*s |= PHY_CONF_LOOP;
871}
872
873static void mii_parse_anar(uint mii_reg, struct net_device *dev)
874{
875	struct fec_enet_private *fep = dev->priv;
876	volatile uint *s = &(fep->phy_status);
877
878	*s &= ~(PHY_CONF_SPMASK);
879
880	if (mii_reg & 0x0020)
881		*s |= PHY_CONF_10HDX;
882	if (mii_reg & 0x0040)
883		*s |= PHY_CONF_10FDX;
884	if (mii_reg & 0x0080)
885		*s |= PHY_CONF_100HDX;
886	if (mii_reg & 0x00100)
887		*s |= PHY_CONF_100FDX;
888}
889
890/* ------------------------------------------------------------------------- */
891/* The Level one LXT970 is used by many boards				     */
892
893#ifdef CONFIG_FEC_LXT970
894
895#define MII_LXT970_MIRROR    16  /* Mirror register           */
896#define MII_LXT970_IER       17  /* Interrupt Enable Register */
897#define MII_LXT970_ISR       18  /* Interrupt Status Register */
898#define MII_LXT970_CONFIG    19  /* Configuration Register    */
899#define MII_LXT970_CSR       20  /* Chip Status Register      */
900
901static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
902{
903	struct fec_enet_private *fep = dev->priv;
904	volatile uint *s = &(fep->phy_status);
905
906	*s &= ~(PHY_STAT_SPMASK);
907
908	if (mii_reg & 0x0800) {
909		if (mii_reg & 0x1000)
910			*s |= PHY_STAT_100FDX;
911		else
912			*s |= PHY_STAT_100HDX;
913	}
914	else {
915		if (mii_reg & 0x1000)
916			*s |= PHY_STAT_10FDX;
917		else
918			*s |= PHY_STAT_10HDX;
919	}
920}
921
922static phy_info_t phy_info_lxt970 = {
923	0x07810000,
924	"LXT970",
925
926	(const phy_cmd_t []) {  /* config */
927		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
928		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
929		{ mk_mii_end, }
930	},
931	(const phy_cmd_t []) {  /* startup - enable interrupts */
932		{ mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
933		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
934		{ mk_mii_end, }
935	},
936	(const phy_cmd_t []) { /* ack_int */
937		/* read SR and ISR to acknowledge */
938
939		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
940		{ mk_mii_read(MII_LXT970_ISR), NULL },
941
942		/* find out the current status */
943
944		{ mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
945		{ mk_mii_end, }
946	},
947	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
948		{ mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
949		{ mk_mii_end, }
950	},
951};
952
953#endif /* CONFIG_FEC_LXT970 */
954
955/* ------------------------------------------------------------------------- */
956/* The Level one LXT971 is used on some of my custom boards                  */
957
958#ifdef CONFIG_FEC_LXT971
959
960/* register definitions for the 971 */
961
962#define MII_LXT971_PCR       16  /* Port Control Register     */
963#define MII_LXT971_SR2       17  /* Status Register 2         */
964#define MII_LXT971_IER       18  /* Interrupt Enable Register */
965#define MII_LXT971_ISR       19  /* Interrupt Status Register */
966#define MII_LXT971_LCR       20  /* LED Control Register      */
967#define MII_LXT971_TCR       30  /* Transmit Control Register */
968
969/*
970 * I had some nice ideas of running the MDIO faster...
971 * The 971 should support 8MHz and I tried it, but things acted really
972 * weird, so 2.5 MHz ought to be enough for anyone...
973 */
974
975static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
976{
977	struct fec_enet_private *fep = dev->priv;
978	volatile uint *s = &(fep->phy_status);
979
980	*s &= ~(PHY_STAT_SPMASK);
981
982	if (mii_reg & 0x4000) {
983		if (mii_reg & 0x0200)
984			*s |= PHY_STAT_100FDX;
985		else
986			*s |= PHY_STAT_100HDX;
987	}
988	else {
989		if (mii_reg & 0x0200)
990			*s |= PHY_STAT_10FDX;
991		else
992			*s |= PHY_STAT_10HDX;
993	}
994	if (mii_reg & 0x0008)
995		*s |= PHY_STAT_FAULT;
996}
997
998static phy_info_t phy_info_lxt971 = {
999	0x0001378e,
1000	"LXT971",
1001
1002	(const phy_cmd_t []) {  /* config */
1003//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
1004		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
1005		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1006		{ mk_mii_end, }
1007	},
1008	(const phy_cmd_t []) {  /* startup - enable interrupts */
1009		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
1010		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1011
1012		/* Somehow does the 971 tell me that the link is down
1013		 * the first read after power-up.
1014		 * read here to get a valid value in ack_int */
1015
1016		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
1017		{ mk_mii_end, }
1018	},
1019	(const phy_cmd_t []) { /* ack_int */
1020		/* find out the current status */
1021
1022		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
1023		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
1024
1025		/* we only need to read ISR to acknowledge */
1026
1027		{ mk_mii_read(MII_LXT971_ISR), NULL },
1028		{ mk_mii_end, }
1029	},
1030	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
1031		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
1032		{ mk_mii_end, }
1033	},
1034};
1035
1036#endif /* CONFIG_FEC_LXT970 */
1037
1038
1039/* ------------------------------------------------------------------------- */
1040/* The Quality Semiconductor QS6612 is used on the RPX CLLF                  */
1041
1042#ifdef CONFIG_FEC_QS6612
1043
1044/* register definitions */
1045
1046#define MII_QS6612_MCR       17  /* Mode Control Register      */
1047#define MII_QS6612_FTR       27  /* Factory Test Register      */
1048#define MII_QS6612_MCO       28  /* Misc. Control Register     */
1049#define MII_QS6612_ISR       29  /* Interrupt Source Register  */
1050#define MII_QS6612_IMR       30  /* Interrupt Mask Register    */
1051#define MII_QS6612_PCR       31  /* 100BaseTx PHY Control Reg. */
1052
1053static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
1054{
1055	struct fec_enet_private *fep = dev->priv;
1056	volatile uint *s = &(fep->phy_status);
1057
1058	*s &= ~(PHY_STAT_SPMASK);
1059
1060	switch((mii_reg >> 2) & 7) {
1061	case 1: *s |= PHY_STAT_10HDX; break;
1062	case 2: *s |= PHY_STAT_100HDX; break;
1063	case 5: *s |= PHY_STAT_10FDX; break;
1064	case 6: *s |= PHY_STAT_100FDX; break;
1065	}
1066}
1067
1068static phy_info_t phy_info_qs6612 = {
1069	0x00181440,
1070	"QS6612",
1071
1072	(const phy_cmd_t []) {  /* config */
1073//	{ mk_mii_write(MII_REG_ANAR, 0x061), NULL }, /* 10  Mbps */
1074
1075		/* The PHY powers up isolated on the RPX,
1076		 * so send a command to allow operation.
1077		 */
1078
1079		{ mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
1080
1081		/* parse cr and anar to get some info */
1082
1083		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
1084		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1085		{ mk_mii_end, }
1086	},
1087	(const phy_cmd_t []) {  /* startup - enable interrupts */
1088		{ mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
1089		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1090		{ mk_mii_end, }
1091	},
1092	(const phy_cmd_t []) { /* ack_int */
1093
1094		/* we need to read ISR, SR and ANER to acknowledge */
1095
1096		{ mk_mii_read(MII_QS6612_ISR), NULL },
1097		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
1098		{ mk_mii_read(MII_REG_ANER), NULL },
1099
1100		/* read pcr to get info */
1101
1102		{ mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
1103		{ mk_mii_end, }
1104	},
1105	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
1106		{ mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
1107		{ mk_mii_end, }
1108	},
1109};
1110
1111#endif /* CONFIG_FEC_QS6612 */
1112
1113/* ------------------------------------------------------------------------- */
1114/* The Advanced Micro Devices AM79C874 is used on the ICU862		     */
1115
1116#ifdef CONFIG_FEC_AM79C874
1117
1118/* register definitions for the 79C874 */
1119
1120#define MII_AM79C874_MFR	16  /* Miscellaneous Features Register      */
1121#define MII_AM79C874_ICSR	17  /* Interrupt Control/Status Register    */
1122#define MII_AM79C874_DR		18  /* Diagnostic Register		    */
1123#define MII_AM79C874_PMLR	19  /* Power Management & Loopback Register */
1124#define MII_AM79C874_MCR	21  /* Mode Control Register		    */
1125#define MII_AM79C874_DC		23  /* Disconnect Counter		    */
1126#define MII_AM79C874_REC	24  /* Receiver Error Counter		    */
1127
1128static void mii_parse_amd79c874_dr(uint mii_reg, struct net_device *dev, uint data)
1129{
1130	volatile struct fec_enet_private *fep = dev->priv;
1131	uint s = fep->phy_status;
1132
1133	s &= ~(PHY_STAT_SPMASK);
1134
1135	/* Register 18: Bit 10 is data rate, 11 is Duplex */
1136	switch ((mii_reg >> 10) & 3) {
1137	case 0:	s |= PHY_STAT_10HDX;	break;
1138	case 1:	s |= PHY_STAT_100HDX;	break;
1139	case 2:	s |= PHY_STAT_10FDX;	break;
1140	case 3:	s |= PHY_STAT_100FDX;	break;
1141	}
1142
1143	fep->phy_status = s;
1144}
1145
1146static phy_info_t phy_info_amd79c874 = {
1147	0x00022561,
1148	"AM79C874",
1149
1150	(const phy_cmd_t []) {  /* config */
1151//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
1152		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
1153		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1154		{ mk_mii_end, }
1155	},
1156	(const phy_cmd_t []) {  /* startup - enable interrupts */
1157		{ mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
1158		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1159		{ mk_mii_end, }
1160	},
1161	(const phy_cmd_t []) { /* ack_int */
1162		/* find out the current status */
1163
1164		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
1165		{ mk_mii_read(MII_AM79C874_DR), mii_parse_amd79c874_dr },
1166
1167		/* we only need to read ICSR to acknowledge */
1168
1169		{ mk_mii_read(MII_AM79C874_ICSR), NULL },
1170		{ mk_mii_end, }
1171	},
1172	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
1173		{ mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
1174		{ mk_mii_end, }
1175	},
1176};
1177
1178#endif /* CONFIG_FEC_AM79C874 */
1179
1180static phy_info_t *phy_info[] = {
1181
1182#ifdef CONFIG_FEC_LXT970
1183	&phy_info_lxt970,
1184#endif /* CONFIG_FEC_LXT970 */
1185
1186#ifdef CONFIG_FEC_LXT971
1187	&phy_info_lxt971,
1188#endif /* CONFIG_FEC_LXT971 */
1189
1190#ifdef CONFIG_FEC_QS6612
1191	&phy_info_qs6612,
1192#endif /* CONFIG_FEC_QS6612 */
1193
1194#ifdef CONFIG_FEC_AM79C874
1195	&phy_info_amd79c874,
1196#endif /* CONFIG_FEC_AM79C874 */
1197
1198	NULL
1199};
1200
1201static void mii_display_status(struct net_device *dev)
1202{
1203	struct fec_enet_private *fep = dev->priv;
1204	volatile uint *s = &(fep->phy_status);
1205
1206	if (!fep->link && !fep->old_link) {
1207		/* Link is still down - don't print anything */
1208		return;
1209	}
1210
1211	printk("%s: status: ", dev->name);
1212
1213	if (!fep->link) {
1214		printk("link down");
1215	} else {
1216		printk("link up");
1217
1218		switch(*s & PHY_STAT_SPMASK) {
1219		case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
1220		case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
1221		case PHY_STAT_10FDX: printk(", 10 Mbps Full Duplex"); break;
1222		case PHY_STAT_10HDX: printk(", 10 Mbps Half Duplex"); break;
1223		default:
1224			printk(", Unknown speed/duplex");
1225		}
1226
1227		if (*s & PHY_STAT_ANC)
1228			printk(", auto-negotiation complete");
1229	}
1230
1231	if (*s & PHY_STAT_FAULT)
1232		printk(", remote fault");
1233
1234	printk(".\n");
1235}
1236
1237static void mii_display_config(struct work_struct *work)
1238{
1239	struct fec_enet_private *fep =
1240		container_of(work, struct fec_enet_private, phy_task);
1241	struct net_device *dev = fep->dev;
1242	volatile uint *s = &(fep->phy_status);
1243
1244	printk("%s: config: auto-negotiation ", dev->name);
1245
1246	if (*s & PHY_CONF_ANE)
1247		printk("on");
1248	else
1249		printk("off");
1250
1251	if (*s & PHY_CONF_100FDX)
1252		printk(", 100FDX");
1253	if (*s & PHY_CONF_100HDX)
1254		printk(", 100HDX");
1255	if (*s & PHY_CONF_10FDX)
1256		printk(", 10FDX");
1257	if (*s & PHY_CONF_10HDX)
1258		printk(", 10HDX");
1259	if (!(*s & PHY_CONF_SPMASK))
1260		printk(", No speed/duplex selected?");
1261
1262	if (*s & PHY_CONF_LOOP)
1263		printk(", loopback enabled");
1264
1265	printk(".\n");
1266
1267	fep->sequence_done = 1;
1268}
1269
1270static void mii_relink(struct work_struct *work)
1271{
1272	struct fec_enet_private *fep =
1273		container_of(work, struct fec_enet_private, phy_task);
1274	struct net_device *dev = fep->dev;
1275	int duplex;
1276
1277	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1278	mii_display_status(dev);
1279	fep->old_link = fep->link;
1280
1281	if (fep->link) {
1282		duplex = 0;
1283		if (fep->phy_status
1284		    & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1285			duplex = 1;
1286		fec_restart(dev, duplex);
1287	}
1288	else
1289		fec_stop(dev);
1290
1291
1292}
1293
1294static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1295{
1296	struct fec_enet_private *fep = dev->priv;
1297
1298	fep->dev = dev;
1299	INIT_WORK(&fep->phy_task, mii_relink);
1300	schedule_work(&fep->phy_task);
1301}
1302
1303static void mii_queue_config(uint mii_reg, struct net_device *dev)
1304{
1305	struct fec_enet_private *fep = dev->priv;
1306
1307	fep->dev = dev;
1308	INIT_WORK(&fep->phy_task, mii_display_config);
1309	schedule_work(&fep->phy_task);
1310}
1311
1312
1313
1314phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink },
1315			       { mk_mii_end, } };
1316phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
1317			       { mk_mii_end, } };
1318
1319
1320
1321/* Read remainder of PHY ID.
1322*/
1323static void
1324mii_discover_phy3(uint mii_reg, struct net_device *dev)
1325{
1326	struct fec_enet_private *fep;
1327	int	i;
1328
1329	fep = dev->priv;
1330	fep->phy_id |= (mii_reg & 0xffff);
1331
1332	for(i = 0; phy_info[i]; i++)
1333		if(phy_info[i]->id == (fep->phy_id >> 4))
1334			break;
1335
1336	if(!phy_info[i])
1337		panic("%s: PHY id 0x%08x is not supported!\n",
1338		      dev->name, fep->phy_id);
1339
1340	fep->phy = phy_info[i];
1341	fep->phy_id_done = 1;
1342
1343	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
1344		dev->name, fep->phy_addr, fep->phy->name, fep->phy_id);
1345}
1346
1347/* Scan all of the MII PHY addresses looking for someone to respond
1348 * with a valid ID.  This usually happens quickly.
1349 */
1350static void
1351mii_discover_phy(uint mii_reg, struct net_device *dev)
1352{
1353	struct fec_enet_private *fep;
1354	uint	phytype;
1355
1356	fep = dev->priv;
1357
1358	if ((phytype = (mii_reg & 0xffff)) != 0xffff) {
1359
1360		/* Got first part of ID, now get remainder.
1361		*/
1362		fep->phy_id = phytype << 16;
1363		mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3);
1364	} else {
1365		fep->phy_addr++;
1366		if (fep->phy_addr < 32) {
1367			mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
1368							mii_discover_phy);
1369		} else {
1370			printk("fec: No PHY device found.\n");
1371		}
1372	}
1373}
1374#endif	/* CONFIG_USE_MDIO */
1375
1376/* This interrupt occurs when the PHY detects a link change.
1377*/
1378static
1379#ifdef CONFIG_RPXCLASSIC
1380void mii_link_interrupt(void *dev_id)
1381#else
1382irqreturn_t mii_link_interrupt(int irq, void * dev_id)
1383#endif
1384{
1385#ifdef	CONFIG_USE_MDIO
1386	struct	net_device *dev = dev_id;
1387	struct fec_enet_private *fep = dev->priv;
1388	volatile immap_t *immap = (immap_t *)IMAP_ADDR;
1389	volatile fec_t *fecp = &(immap->im_cpm.cp_fec);
1390	unsigned int ecntrl = fecp->fec_ecntrl;
1391
1392	/* We need the FEC enabled to access the MII
1393	*/
1394	if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) {
1395		fecp->fec_ecntrl |= FEC_ECNTRL_ETHER_EN;
1396	}
1397#endif	/* CONFIG_USE_MDIO */
1398
1399
1400
1401#ifdef	CONFIG_USE_MDIO
1402	mii_do_cmd(dev, fep->phy->ack_int);
1403	mii_do_cmd(dev, phy_cmd_relink);  /* restart and display status */
1404
1405	if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) {
1406		fecp->fec_ecntrl = ecntrl;	/* restore old settings */
1407	}
1408#else
1409printk("%s[%d] %s: unexpected Link interrupt\n", __FILE__,__LINE__,__FUNCTION__);
1410#endif	/* CONFIG_USE_MDIO */
1411
1412#ifndef CONFIG_RPXCLASSIC
1413	return IRQ_RETVAL(IRQ_HANDLED);
1414#endif	/* CONFIG_RPXCLASSIC */
1415}
1416
1417static int
1418fec_enet_open(struct net_device *dev)
1419{
1420	struct fec_enet_private *fep = dev->priv;
1421
1422	/* I should reset the ring buffers here, but I don't yet know
1423	 * a simple way to do that.
1424	 */
1425
1426#ifdef	CONFIG_USE_MDIO
1427	fep->sequence_done = 0;
1428	fep->link = 0;
1429
1430	if (fep->phy) {
1431		mii_do_cmd(dev, fep->phy->ack_int);
1432		mii_do_cmd(dev, fep->phy->config);
1433		mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
1434		while(!fep->sequence_done)
1435			schedule();
1436
1437		mii_do_cmd(dev, fep->phy->startup);
1438		netif_start_queue(dev);
1439		return 0;		/* Success */
1440	}
1441	return -ENODEV;		/* No PHY we understand */
1442#else
1443	fep->link = 1;
1444	netif_start_queue(dev);
1445	return 0;	/* Success */
1446#endif	/* CONFIG_USE_MDIO */
1447
1448}
1449
1450static int
1451fec_enet_close(struct net_device *dev)
1452{
1453	/* Don't know what to do yet.
1454	*/
1455	netif_stop_queue(dev);
1456	fec_stop(dev);
1457
1458	return 0;
1459}
1460
1461static struct net_device_stats *fec_enet_get_stats(struct net_device *dev)
1462{
1463	struct fec_enet_private *fep = (struct fec_enet_private *)dev->priv;
1464
1465	return &fep->stats;
1466}
1467
1468/* Set or clear the multicast filter for this adaptor.
1469 * Skeleton taken from sunlance driver.
1470 * The CPM Ethernet implementation allows Multicast as well as individual
1471 * MAC address filtering.  Some of the drivers check to make sure it is
1472 * a group multicast address, and discard those that are not.  I guess I
1473 * will do the same for now, but just remove the test if you want
1474 * individual filtering as well (do the upper net layers want or support
1475 * this kind of feature?).
1476 */
1477
1478static void set_multicast_list(struct net_device *dev)
1479{
1480	struct	fec_enet_private *fep;
1481	volatile fec_t *ep;
1482
1483	fep = (struct fec_enet_private *)dev->priv;
1484	ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec);
1485
1486	if (dev->flags&IFF_PROMISC) {
1487
1488		/* Log any net taps. */
1489		printk("%s: Promiscuous mode enabled.\n", dev->name);
1490		ep->fec_r_cntrl |= FEC_RCNTRL_PROM;
1491	} else {
1492
1493		ep->fec_r_cntrl &= ~FEC_RCNTRL_PROM;
1494
1495		if (dev->flags & IFF_ALLMULTI) {
1496			/* Catch all multicast addresses, so set the
1497			 * filter to all 1's.
1498			 */
1499			ep->fec_hash_table_high = 0xffffffff;
1500			ep->fec_hash_table_low = 0xffffffff;
1501		}
1502	}
1503}
1504
1505/* Initialize the FEC Ethernet on 860T.
1506 */
1507static int __init fec_enet_init(void)
1508{
1509	struct net_device *dev;
1510	struct fec_enet_private *fep;
1511	int i, j, k, err;
1512	unsigned char	*eap, *iap, *ba;
1513	dma_addr_t	mem_addr;
1514	volatile	cbd_t	*bdp;
1515	cbd_t		*cbd_base;
1516	volatile	immap_t	*immap;
1517	volatile	fec_t	*fecp;
1518	bd_t		*bd;
1519#ifdef CONFIG_SCC_ENET
1520	unsigned char	tmpaddr[6];
1521#endif
1522
1523	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
1524
1525	bd = (bd_t *)__res;
1526
1527	dev = alloc_etherdev(sizeof(*fep));
1528	if (!dev)
1529		return -ENOMEM;
1530
1531	fep = dev->priv;
1532
1533	fecp = &(immap->im_cpm.cp_fec);
1534
1535	/* Whack a reset.  We should wait for this.
1536	*/
1537	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET;
1538	for (i = 0;
1539	     (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY);
1540	     ++i) {
1541		udelay(1);
1542	}
1543	if (i == FEC_RESET_DELAY) {
1544		printk ("FEC Reset timeout!\n");
1545	}
1546
1547	/* Set the Ethernet address.  If using multiple Enets on the 8xx,
1548	 * this needs some work to get unique addresses.
1549	 */
1550	eap = (unsigned char *)my_enet_addr;
1551	iap = bd->bi_enetaddr;
1552
1553#ifdef CONFIG_SCC_ENET
1554	/*
1555         * If a board has Ethernet configured both on a SCC and the
1556         * FEC, it needs (at least) 2 MAC addresses (we know that Sun
1557         * disagrees, but anyway). For the FEC port, we create
1558         * another address by setting one of the address bits above
1559         * something that would have (up to now) been allocated.
1560	 */
1561	for (i=0; i<6; i++)
1562		tmpaddr[i] = *iap++;
1563	tmpaddr[3] |= 0x80;
1564	iap = tmpaddr;
1565#endif
1566
1567	for (i=0; i<6; i++) {
1568		dev->dev_addr[i] = *eap++ = *iap++;
1569	}
1570
1571	/* Allocate memory for buffer descriptors.
1572	*/
1573	if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) {
1574		printk("FEC init error.  Need more space.\n");
1575		printk("FEC initialization failed.\n");
1576		return 1;
1577	}
1578	cbd_base = (cbd_t *)dma_alloc_coherent(dev->class_dev.dev, PAGE_SIZE,
1579					       &mem_addr, GFP_KERNEL);
1580
1581	/* Set receive and transmit descriptor base.
1582	*/
1583	fep->rx_bd_base = cbd_base;
1584	fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1585
1586	fep->skb_cur = fep->skb_dirty = 0;
1587
1588	/* Initialize the receive buffer descriptors.
1589	*/
1590	bdp = fep->rx_bd_base;
1591	k = 0;
1592	for (i=0; i<FEC_ENET_RX_PAGES; i++) {
1593
1594		/* Allocate a page.
1595		*/
1596		ba = (unsigned char *)dma_alloc_coherent(dev->class_dev.dev,
1597							 PAGE_SIZE,
1598							 &mem_addr,
1599							 GFP_KERNEL);
1600		/* BUG: no check for failure */
1601
1602		/* Initialize the BD for every fragment in the page.
1603		*/
1604		for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
1605			bdp->cbd_sc = BD_ENET_RX_EMPTY;
1606			bdp->cbd_bufaddr = mem_addr;
1607			fep->rx_vaddr[k++] = ba;
1608			mem_addr += FEC_ENET_RX_FRSIZE;
1609			ba += FEC_ENET_RX_FRSIZE;
1610			bdp++;
1611		}
1612	}
1613
1614	/* Set the last buffer to wrap.
1615	*/
1616	bdp--;
1617	bdp->cbd_sc |= BD_SC_WRAP;
1618
1619#ifdef CONFIG_FEC_PACKETHOOK
1620	fep->ph_lock = 0;
1621	fep->ph_rxhandler = fep->ph_txhandler = NULL;
1622	fep->ph_proto = 0;
1623	fep->ph_regaddr = NULL;
1624	fep->ph_priv = NULL;
1625#endif
1626
1627	/* Install our interrupt handler.
1628	*/
1629	if (request_irq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0)
1630		panic("Could not allocate FEC IRQ!");
1631
1632#ifdef CONFIG_RPXCLASSIC
1633	/* Make Port C, bit 15 an input that causes interrupts.
1634	*/
1635	immap->im_ioport.iop_pcpar &= ~0x0001;
1636	immap->im_ioport.iop_pcdir &= ~0x0001;
1637	immap->im_ioport.iop_pcso  &= ~0x0001;
1638	immap->im_ioport.iop_pcint |=  0x0001;
1639	cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
1640
1641	/* Make LEDS reflect Link status.
1642	*/
1643	*((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
1644#endif
1645
1646#ifdef PHY_INTERRUPT
1647	((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel |=
1648		(0x80000000 >> PHY_INTERRUPT);
1649
1650	if (request_irq(PHY_INTERRUPT, mii_link_interrupt, 0, "mii", dev) != 0)
1651		panic("Could not allocate MII IRQ!");
1652#endif
1653
1654	dev->base_addr = (unsigned long)fecp;
1655
1656	/* The FEC Ethernet specific entries in the device structure. */
1657	dev->open = fec_enet_open;
1658	dev->hard_start_xmit = fec_enet_start_xmit;
1659	dev->tx_timeout = fec_timeout;
1660	dev->watchdog_timeo = TX_TIMEOUT;
1661	dev->stop = fec_enet_close;
1662	dev->get_stats = fec_enet_get_stats;
1663	dev->set_multicast_list = set_multicast_list;
1664
1665#ifdef	CONFIG_USE_MDIO
1666	for (i=0; i<NMII-1; i++)
1667		mii_cmds[i].mii_next = &mii_cmds[i+1];
1668	mii_free = mii_cmds;
1669#endif	/* CONFIG_USE_MDIO */
1670
1671	/* Configure all of port D for MII.
1672	*/
1673	immap->im_ioport.iop_pdpar = 0x1fff;
1674
1675	/* Bits moved from Rev. D onward.
1676	*/
1677	if ((mfspr(SPRN_IMMR) & 0xffff) < 0x0501)
1678		immap->im_ioport.iop_pddir = 0x1c58;	/* Pre rev. D */
1679	else
1680		immap->im_ioport.iop_pddir = 0x1fff;	/* Rev. D and later */
1681
1682#ifdef	CONFIG_USE_MDIO
1683	/* Set MII speed to 2.5 MHz
1684	*/
1685	fecp->fec_mii_speed = fep->phy_speed =
1686		(( (bd->bi_intfreq + 500000) / 2500000 / 2 ) & 0x3F ) << 1;
1687#else
1688	fecp->fec_mii_speed = 0;	/* turn off MDIO */
1689#endif	/* CONFIG_USE_MDIO */
1690
1691	err = register_netdev(dev);
1692	if (err) {
1693		free_netdev(dev);
1694		return err;
1695	}
1696
1697	printk ("%s: FEC ENET Version 0.2, FEC irq %d"
1698#ifdef PHY_INTERRUPT
1699		", MII irq %d"
1700#endif
1701		", addr ",
1702		dev->name, FEC_INTERRUPT
1703#ifdef PHY_INTERRUPT
1704		, PHY_INTERRUPT
1705#endif
1706	);
1707	for (i=0; i<6; i++)
1708		printk("%02x%c", dev->dev_addr[i], (i==5) ? '\n' : ':');
1709
1710#ifdef	CONFIG_USE_MDIO	    /* start in full duplex mode, and negotiate speed */
1711	fec_restart (dev, 1);
1712#else			/* always use half duplex mode only */
1713	fec_restart (dev, 0);
1714#endif
1715
1716#ifdef	CONFIG_USE_MDIO
1717	/* Queue up command to detect the PHY and initialize the
1718	 * remainder of the interface.
1719	 */
1720	fep->phy_id_done = 0;
1721	fep->phy_addr = 0;
1722	mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
1723#endif	/* CONFIG_USE_MDIO */
1724
1725	return 0;
1726}
1727module_init(fec_enet_init);
1728
1729/* This function is called to start or restart the FEC during a link
1730 * change.  This only happens when switching between half and full
1731 * duplex.
1732 */
1733static void
1734fec_restart(struct net_device *dev, int duplex)
1735{
1736	struct fec_enet_private *fep;
1737	int i;
1738	volatile	cbd_t	*bdp;
1739	volatile	immap_t	*immap;
1740	volatile	fec_t	*fecp;
1741
1742	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
1743
1744	fecp = &(immap->im_cpm.cp_fec);
1745
1746	fep = dev->priv;
1747
1748	/* Whack a reset.  We should wait for this.
1749	*/
1750	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET;
1751	for (i = 0;
1752	     (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY);
1753	     ++i) {
1754		udelay(1);
1755	}
1756	if (i == FEC_RESET_DELAY) {
1757		printk ("FEC Reset timeout!\n");
1758	}
1759
1760	/* Set station address.
1761	*/
1762	fecp->fec_addr_low  = (my_enet_addr[0] << 16) | my_enet_addr[1];
1763	fecp->fec_addr_high =  my_enet_addr[2];
1764
1765	/* Reset all multicast.
1766	*/
1767	fecp->fec_hash_table_high = 0;
1768	fecp->fec_hash_table_low  = 0;
1769
1770	/* Set maximum receive buffer size.
1771	*/
1772	fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
1773	fecp->fec_r_hash = PKT_MAXBUF_SIZE;
1774
1775	/* Set receive and transmit descriptor base.
1776	*/
1777	fecp->fec_r_des_start = iopa((uint)(fep->rx_bd_base));
1778	fecp->fec_x_des_start = iopa((uint)(fep->tx_bd_base));
1779
1780	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1781	fep->cur_rx = fep->rx_bd_base;
1782
1783	/* Reset SKB transmit buffers.
1784	*/
1785	fep->skb_cur = fep->skb_dirty = 0;
1786	for (i=0; i<=TX_RING_MOD_MASK; i++) {
1787		if (fep->tx_skbuff[i] != NULL) {
1788			dev_kfree_skb(fep->tx_skbuff[i]);
1789			fep->tx_skbuff[i] = NULL;
1790		}
1791	}
1792
1793	/* Initialize the receive buffer descriptors.
1794	*/
1795	bdp = fep->rx_bd_base;
1796	for (i=0; i<RX_RING_SIZE; i++) {
1797
1798		/* Initialize the BD for every fragment in the page.
1799		*/
1800		bdp->cbd_sc = BD_ENET_RX_EMPTY;
1801		bdp++;
1802	}
1803
1804	/* Set the last buffer to wrap.
1805	*/
1806	bdp--;
1807	bdp->cbd_sc |= BD_SC_WRAP;
1808
1809	/* ...and the same for transmit.
1810	*/
1811	bdp = fep->tx_bd_base;
1812	for (i=0; i<TX_RING_SIZE; i++) {
1813
1814		/* Initialize the BD for every fragment in the page.
1815		*/
1816		bdp->cbd_sc = 0;
1817		bdp->cbd_bufaddr = 0;
1818		bdp++;
1819	}
1820
1821	/* Set the last buffer to wrap.
1822	*/
1823	bdp--;
1824	bdp->cbd_sc |= BD_SC_WRAP;
1825
1826	/* Enable MII mode.
1827	*/
1828	if (duplex) {
1829		fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE;	/* MII enable */
1830		fecp->fec_x_cntrl = FEC_TCNTRL_FDEN;		/* FD enable */
1831	}
1832	else {
1833		fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE | FEC_RCNTRL_DRT;
1834		fecp->fec_x_cntrl = 0;
1835	}
1836	fep->full_duplex = duplex;
1837
1838	/* Enable big endian and don't care about SDMA FC.
1839	*/
1840	fecp->fec_fun_code = 0x78000000;
1841
1842#ifdef	CONFIG_USE_MDIO
1843	/* Set MII speed.
1844	*/
1845	fecp->fec_mii_speed = fep->phy_speed;
1846#endif	/* CONFIG_USE_MDIO */
1847
1848	/* Clear any outstanding interrupt.
1849	*/
1850	fecp->fec_ievent = 0xffc0;
1851
1852	fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
1853
1854	/* Enable interrupts we wish to service.
1855	*/
1856	fecp->fec_imask = ( FEC_ENET_TXF | FEC_ENET_TXB |
1857			    FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII );
1858
1859	/* And last, enable the transmit and receive processing.
1860	*/
1861	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN;
1862	fecp->fec_r_des_active = 0x01000000;
1863}
1864
1865static void
1866fec_stop(struct net_device *dev)
1867{
1868	volatile	immap_t	*immap;
1869	volatile	fec_t	*fecp;
1870	struct fec_enet_private *fep;
1871	int i;
1872
1873	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
1874
1875	fecp = &(immap->im_cpm.cp_fec);
1876
1877	if ((fecp->fec_ecntrl & FEC_ECNTRL_ETHER_EN) == 0)
1878		return;	/* already down */
1879
1880	fep = dev->priv;
1881
1882
1883	fecp->fec_x_cntrl = 0x01;	/* Graceful transmit stop */
1884
1885	for (i = 0;
1886	     ((fecp->fec_ievent & 0x10000000) == 0) && (i < FEC_RESET_DELAY);
1887	     ++i) {
1888		udelay(1);
1889	}
1890	if (i == FEC_RESET_DELAY) {
1891		printk ("FEC timeout on graceful transmit stop\n");
1892	}
1893
1894	/* Clear outstanding MII command interrupts.
1895	*/
1896	fecp->fec_ievent = FEC_ENET_MII;
1897
1898	/* Enable MII command finished interrupt
1899	*/
1900	fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
1901	fecp->fec_imask = FEC_ENET_MII;
1902
1903#ifdef	CONFIG_USE_MDIO
1904	/* Set MII speed.
1905	*/
1906	fecp->fec_mii_speed = fep->phy_speed;
1907#endif	/* CONFIG_USE_MDIO */
1908
1909	/* Disable FEC
1910	*/
1911	fecp->fec_ecntrl &= ~(FEC_ECNTRL_ETHER_EN);
1912}
1913