1/*
2 * Fast Ethernet Controller (FCC) driver for Motorola MPC8260.
3 * Copyright (c) 2000 MontaVista Software, Inc.   Dan Malek (dmalek@jlc.net)
4 *
5 * This version of the driver is a combination of the 8xx fec and
6 * 8260 SCC Ethernet drivers.  This version has some additional
7 * configuration options, which should probably be moved out of
8 * here.  This driver currently works for the EST SBC8260,
9 * SBS Diablo/BCM, Embedded Planet RPX6, TQM8260, and others.
10 *
11 * Right now, I am very watseful with the buffers.  I allocate memory
12 * pages and then divide them into 2K frame buffers.  This way I know I
13 * have buffers large enough to hold one frame within one buffer descriptor.
14 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
15 * will be much more memory efficient and will easily handle lots of
16 * small packets.  Since this is a cache coherent processor and CPM,
17 * I could also preallocate SKB's and use them directly on the interface.
18 *
19 * 2004-12	Leo Li (leoli@freescale.com)
20 * - Rework the FCC clock configuration part, make it easier to configure.
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/string.h>
27#include <linux/ptrace.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/init.h>
33#include <linux/delay.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/spinlock.h>
38#include <linux/mii.h>
39#include <linux/workqueue.h>
40#include <linux/bitops.h>
41
42#include <asm/immap_cpm2.h>
43#include <asm/pgtable.h>
44#include <asm/mpc8260.h>
45#include <asm/irq.h>
46#include <asm/uaccess.h>
47#include <asm/signal.h>
48
49/* We can't use the PHY interrupt if we aren't using MDIO. */
50#if !defined(CONFIG_USE_MDIO)
51#undef PHY_INTERRUPT
52#endif
53
54/* If we have a PHY interrupt, we will advertise both full-duplex and half-
55 * duplex capabilities.  If we don't have a PHY interrupt, then we will only
56 * advertise half-duplex capabilities.
57 */
58#define MII_ADVERTISE_HALF	(ADVERTISE_100HALF | ADVERTISE_10HALF | \
59				 ADVERTISE_CSMA)
60#define MII_ADVERTISE_ALL	(ADVERTISE_100FULL | ADVERTISE_10FULL | \
61				 MII_ADVERTISE_HALF)
62#ifdef PHY_INTERRUPT
63#define MII_ADVERTISE_DEFAULT	MII_ADVERTISE_ALL
64#else
65#define MII_ADVERTISE_DEFAULT	MII_ADVERTISE_HALF
66#endif
67#include <asm/cpm2.h>
68
69/* The transmitter timeout
70 */
71#define TX_TIMEOUT	(2*HZ)
72
73#ifdef	CONFIG_USE_MDIO
74/* Forward declarations of some structures to support different PHYs */
75
76typedef struct {
77	uint mii_data;
78	void (*funct)(uint mii_reg, struct net_device *dev);
79} phy_cmd_t;
80
81typedef struct {
82	uint id;
83	char *name;
84
85	const phy_cmd_t *config;
86	const phy_cmd_t *startup;
87	const phy_cmd_t *ack_int;
88	const phy_cmd_t *shutdown;
89} phy_info_t;
90
91/* values for phy_status */
92
93#define PHY_CONF_ANE	0x0001  /* 1 auto-negotiation enabled */
94#define PHY_CONF_LOOP	0x0002  /* 1 loopback mode enabled */
95#define PHY_CONF_SPMASK	0x00f0  /* mask for speed */
96#define PHY_CONF_10HDX	0x0010  /* 10 Mbit half duplex supported */
97#define PHY_CONF_10FDX	0x0020  /* 10 Mbit full duplex supported */
98#define PHY_CONF_100HDX	0x0040  /* 100 Mbit half duplex supported */
99#define PHY_CONF_100FDX	0x0080  /* 100 Mbit full duplex supported */
100
101#define PHY_STAT_LINK	0x0100  /* 1 up - 0 down */
102#define PHY_STAT_FAULT	0x0200  /* 1 remote fault */
103#define PHY_STAT_ANC	0x0400  /* 1 auto-negotiation complete	*/
104#define PHY_STAT_SPMASK	0xf000  /* mask for speed */
105#define PHY_STAT_10HDX	0x1000  /* 10 Mbit half duplex selected	*/
106#define PHY_STAT_10FDX	0x2000  /* 10 Mbit full duplex selected	*/
107#define PHY_STAT_100HDX	0x4000  /* 100 Mbit half duplex selected */
108#define PHY_STAT_100FDX	0x8000  /* 100 Mbit full duplex selected */
109#endif	/* CONFIG_USE_MDIO */
110
111/* The number of Tx and Rx buffers.  These are allocated from the page
112 * pool.  The code may assume these are power of two, so it is best
113 * to keep them that size.
114 * We don't need to allocate pages for the transmitter.  We just use
115 * the skbuffer directly.
116 */
117#define FCC_ENET_RX_PAGES	16
118#define FCC_ENET_RX_FRSIZE	2048
119#define FCC_ENET_RX_FRPPG	(PAGE_SIZE / FCC_ENET_RX_FRSIZE)
120#define RX_RING_SIZE		(FCC_ENET_RX_FRPPG * FCC_ENET_RX_PAGES)
121#define TX_RING_SIZE		16	/* Must be power of two */
122#define TX_RING_MOD_MASK	15	/*   for this to work */
123
124/* The FCC stores dest/src/type, data, and checksum for receive packets.
125 * size includes support for VLAN
126 */
127#define PKT_MAXBUF_SIZE		1522
128#define PKT_MINBUF_SIZE		64
129
130/* Maximum input DMA size.  Must be a should(?) be a multiple of 4.
131 * size includes support for VLAN
132 */
133#define PKT_MAXDMA_SIZE		1524
134
135/* Maximum input buffer size.  Must be a multiple of 32.
136*/
137#define PKT_MAXBLR_SIZE		1536
138
139static int fcc_enet_open(struct net_device *dev);
140static int fcc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
141static int fcc_enet_rx(struct net_device *dev);
142static irqreturn_t fcc_enet_interrupt(int irq, void *dev_id);
143static int fcc_enet_close(struct net_device *dev);
144static struct net_device_stats *fcc_enet_get_stats(struct net_device *dev);
145/* static void set_multicast_list(struct net_device *dev); */
146static void fcc_restart(struct net_device *dev, int duplex);
147static void fcc_stop(struct net_device *dev);
148static int fcc_enet_set_mac_address(struct net_device *dev, void *addr);
149
150/* These will be configurable for the FCC choice.
151 * Multiple ports can be configured.  There is little choice among the
152 * I/O pins to the PHY, except the clocks.  We will need some board
153 * dependent clock selection.
154 * Why in the hell did I put these inside #ifdef's?  I dunno, maybe to
155 * help show what pins are used for each device.
156 */
157
158/* Since the CLK setting changes greatly from board to board, I changed
159 * it to a easy way.  You just need to specify which CLK number to use.
160 * Note that only limited choices can be make on each port.
161 */
162
163/* FCC1 Clock Source Configuration.  There are board specific.
164   Can only choose from CLK9-12 */
165#ifdef CONFIG_SBC82xx
166#define F1_RXCLK	9
167#define F1_TXCLK	10
168#elif defined(CONFIG_ADS8272)
169#define F1_RXCLK	11
170#define F1_TXCLK	10
171#else
172#define F1_RXCLK	12
173#define F1_TXCLK	11
174#endif
175
176/* FCC2 Clock Source Configuration.  There are board specific.
177   Can only choose from CLK13-16 */
178#ifdef CONFIG_ADS8272
179#define F2_RXCLK	15
180#define F2_TXCLK	16
181#else
182#define F2_RXCLK	13
183#define F2_TXCLK	14
184#endif
185
186/* FCC3 Clock Source Configuration.  There are board specific.
187   Can only choose from CLK13-16 */
188#define F3_RXCLK	15
189#define F3_TXCLK	16
190
191/* Automatically generates register configurations */
192#define PC_CLK(x)	((uint)(1<<(x-1)))	/* FCC CLK I/O ports */
193
194#define CMXFCR_RF1CS(x)	((uint)((x-5)<<27))	/* FCC1 Receive Clock Source */
195#define CMXFCR_TF1CS(x)	((uint)((x-5)<<24))	/* FCC1 Transmit Clock Source */
196#define CMXFCR_RF2CS(x)	((uint)((x-9)<<19))	/* FCC2 Receive Clock Source */
197#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16))	/* FCC2 Transmit Clock Source */
198#define CMXFCR_RF3CS(x)	((uint)((x-9)<<11))	/* FCC3 Receive Clock Source */
199#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8))	/* FCC3 Transmit Clock Source */
200
201#define PC_F1RXCLK	PC_CLK(F1_RXCLK)
202#define PC_F1TXCLK	PC_CLK(F1_TXCLK)
203#define CMX1_CLK_ROUTE	(CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK))
204#define CMX1_CLK_MASK	((uint)0xff000000)
205
206#define PC_F2RXCLK	PC_CLK(F2_RXCLK)
207#define PC_F2TXCLK	PC_CLK(F2_TXCLK)
208#define CMX2_CLK_ROUTE	(CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK))
209#define CMX2_CLK_MASK	((uint)0x00ff0000)
210
211#define PC_F3RXCLK	PC_CLK(F3_RXCLK)
212#define PC_F3TXCLK	PC_CLK(F3_TXCLK)
213#define CMX3_CLK_ROUTE	(CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK))
214#define CMX3_CLK_MASK	((uint)0x0000ff00)
215
216
217/* I/O Pin assignment for FCC1.  I don't yet know the best way to do this,
218 * but there is little variation among the choices.
219 */
220#define PA1_COL		((uint)0x00000001)
221#define PA1_CRS		((uint)0x00000002)
222#define PA1_TXER	((uint)0x00000004)
223#define PA1_TXEN	((uint)0x00000008)
224#define PA1_RXDV	((uint)0x00000010)
225#define PA1_RXER	((uint)0x00000020)
226#define PA1_TXDAT	((uint)0x00003c00)
227#define PA1_RXDAT	((uint)0x0003c000)
228#define PA1_PSORA_BOUT	(PA1_RXDAT | PA1_TXDAT)
229#define PA1_PSORA_BIN	(PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
230				PA1_RXDV | PA1_RXER)
231#define PA1_DIRA_BOUT	(PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
232#define PA1_DIRA_BIN	(PA1_TXDAT | PA1_TXEN | PA1_TXER)
233
234
235/* I/O Pin assignment for FCC2.  I don't yet know the best way to do this,
236 * but there is little variation among the choices.
237 */
238#define PB2_TXER	((uint)0x00000001)
239#define PB2_RXDV	((uint)0x00000002)
240#define PB2_TXEN	((uint)0x00000004)
241#define PB2_RXER	((uint)0x00000008)
242#define PB2_COL		((uint)0x00000010)
243#define PB2_CRS		((uint)0x00000020)
244#define PB2_TXDAT	((uint)0x000003c0)
245#define PB2_RXDAT	((uint)0x00003c00)
246#define PB2_PSORB_BOUT	(PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
247				PB2_RXER | PB2_RXDV | PB2_TXER)
248#define PB2_PSORB_BIN	(PB2_TXEN)
249#define PB2_DIRB_BOUT	(PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
250#define PB2_DIRB_BIN	(PB2_TXDAT | PB2_TXEN | PB2_TXER)
251
252
253/* I/O Pin assignment for FCC3.  I don't yet know the best way to do this,
254 * but there is little variation among the choices.
255 */
256#define PB3_RXDV	((uint)0x00004000)
257#define PB3_RXER	((uint)0x00008000)
258#define PB3_TXER	((uint)0x00010000)
259#define PB3_TXEN	((uint)0x00020000)
260#define PB3_COL		((uint)0x00040000)
261#define PB3_CRS		((uint)0x00080000)
262#ifndef CONFIG_RPX8260
263#define PB3_TXDAT	((uint)0x0f000000)
264#define PC3_TXDAT	((uint)0x00000000)
265#else
266#define PB3_TXDAT	((uint)0x0f000000)
267#define PC3_TXDAT	0
268#endif
269#define PB3_RXDAT	((uint)0x00f00000)
270#define PB3_PSORB_BOUT	(PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
271				PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
272#define PB3_PSORB_BIN	(0)
273#define PB3_DIRB_BOUT	(PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
274#define PB3_DIRB_BIN	(PB3_TXDAT | PB3_TXEN | PB3_TXER)
275
276#define PC3_PSORC_BOUT	(PC3_TXDAT)
277#define PC3_PSORC_BIN	(0)
278#define PC3_DIRC_BOUT	(0)
279#define PC3_DIRC_BIN	(PC3_TXDAT)
280
281
282/* MII status/control serial interface.
283*/
284#if defined(CONFIG_RPX8260)
285/* The EP8260 doesn't use Port C for MDIO */
286#define PC_MDIO		((uint)0x00000000)
287#define PC_MDCK		((uint)0x00000000)
288#elif defined(CONFIG_TQM8260)
289/* TQM8260 has MDIO and MDCK on PC30 and PC31 respectively */
290#define PC_MDIO		((uint)0x00000002)
291#define PC_MDCK		((uint)0x00000001)
292#elif defined(CONFIG_ADS8272)
293#define PC_MDIO		((uint)0x00002000)
294#define PC_MDCK		((uint)0x00001000)
295#elif defined(CONFIG_EST8260) || defined(CONFIG_ADS8260) || defined(CONFIG_PQ2FADS)
296#define PC_MDIO		((uint)0x00400000)
297#define PC_MDCK		((uint)0x00200000)
298#else
299#define PC_MDIO		((uint)0x00000004)
300#define PC_MDCK		((uint)0x00000020)
301#endif
302
303#if defined(CONFIG_USE_MDIO) && (!defined(PC_MDIO) || !defined(PC_MDCK))
304#error "Must define PC_MDIO and PC_MDCK if using MDIO"
305#endif
306
307/* PHY addresses */
308/* default to dynamic config of phy addresses */
309#define FCC1_PHY_ADDR 0
310#ifdef CONFIG_PQ2FADS
311#define FCC2_PHY_ADDR 0
312#else
313#define FCC2_PHY_ADDR 2
314#endif
315#define FCC3_PHY_ADDR 3
316
317/* A table of information for supporting FCCs.  This does two things.
318 * First, we know how many FCCs we have and they are always externally
319 * numbered from zero.  Second, it holds control register and I/O
320 * information that could be different among board designs.
321 */
322typedef struct fcc_info {
323	uint	fc_fccnum;
324	uint	fc_phyaddr;
325	uint	fc_cpmblock;
326	uint	fc_cpmpage;
327	uint	fc_proff;
328	uint	fc_interrupt;
329	uint	fc_trxclocks;
330	uint	fc_clockroute;
331	uint	fc_clockmask;
332	uint	fc_mdio;
333	uint	fc_mdck;
334} fcc_info_t;
335
336static fcc_info_t fcc_ports[] = {
337#ifdef CONFIG_FCC1_ENET
338	{ 0, FCC1_PHY_ADDR, CPM_CR_FCC1_SBLOCK, CPM_CR_FCC1_PAGE, PROFF_FCC1, SIU_INT_FCC1,
339		(PC_F1RXCLK | PC_F1TXCLK), CMX1_CLK_ROUTE, CMX1_CLK_MASK,
340		PC_MDIO, PC_MDCK },
341#endif
342#ifdef CONFIG_FCC2_ENET
343	{ 1, FCC2_PHY_ADDR, CPM_CR_FCC2_SBLOCK, CPM_CR_FCC2_PAGE, PROFF_FCC2, SIU_INT_FCC2,
344		(PC_F2RXCLK | PC_F2TXCLK), CMX2_CLK_ROUTE, CMX2_CLK_MASK,
345		PC_MDIO, PC_MDCK },
346#endif
347#ifdef CONFIG_FCC3_ENET
348	{ 2, FCC3_PHY_ADDR, CPM_CR_FCC3_SBLOCK, CPM_CR_FCC3_PAGE, PROFF_FCC3, SIU_INT_FCC3,
349		(PC_F3RXCLK | PC_F3TXCLK), CMX3_CLK_ROUTE, CMX3_CLK_MASK,
350		PC_MDIO, PC_MDCK },
351#endif
352};
353
354/* The FCC buffer descriptors track the ring buffers.  The rx_bd_base and
355 * tx_bd_base always point to the base of the buffer descriptors.  The
356 * cur_rx and cur_tx point to the currently available buffer.
357 * The dirty_tx tracks the current buffer that is being sent by the
358 * controller.  The cur_tx and dirty_tx are equal under both completely
359 * empty and completely full conditions.  The empty/ready indicator in
360 * the buffer descriptor determines the actual condition.
361 */
362struct fcc_enet_private {
363	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
364	struct	sk_buff* tx_skbuff[TX_RING_SIZE];
365	ushort	skb_cur;
366	ushort	skb_dirty;
367
368	/* CPM dual port RAM relative addresses.
369	*/
370	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
371	cbd_t	*tx_bd_base;
372	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
373	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
374	volatile fcc_t	*fccp;
375	volatile fcc_enet_t	*ep;
376	struct	net_device_stats stats;
377	uint	tx_free;
378	spinlock_t lock;
379
380#ifdef	CONFIG_USE_MDIO
381	uint	phy_id;
382	uint	phy_id_done;
383	uint	phy_status;
384	phy_info_t	*phy;
385	struct work_struct phy_relink;
386	struct work_struct phy_display_config;
387	struct net_device *dev;
388
389	uint	sequence_done;
390
391	uint	phy_addr;
392#endif	/* CONFIG_USE_MDIO */
393
394	int	link;
395	int	old_link;
396	int	full_duplex;
397
398	fcc_info_t	*fip;
399};
400
401static void init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
402	volatile cpm2_map_t *immap);
403static void init_fcc_startup(fcc_info_t *fip, struct net_device *dev);
404static void init_fcc_ioports(fcc_info_t *fip, volatile iop_cpm2_t *io,
405	volatile cpm2_map_t *immap);
406static void init_fcc_param(fcc_info_t *fip, struct net_device *dev,
407	volatile cpm2_map_t *immap);
408
409#ifdef	CONFIG_USE_MDIO
410static int	mii_queue(struct net_device *dev, int request, void (*func)(uint, struct net_device *));
411static uint	mii_send_receive(fcc_info_t *fip, uint cmd);
412static void	mii_do_cmd(struct net_device *dev, const phy_cmd_t *c);
413
414/* Make MII read/write commands for the FCC.
415*/
416#define mk_mii_read(REG)	(0x60020000 | (((REG) & 0x1f) << 18))
417#define mk_mii_write(REG, VAL)	(0x50020000 | (((REG) & 0x1f) << 18) | \
418						((VAL) & 0xffff))
419#define mk_mii_end	0
420#endif	/* CONFIG_USE_MDIO */
421
422
423static int
424fcc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
425{
426	struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
427	volatile cbd_t	*bdp;
428
429	/* Fill in a Tx ring entry */
430	bdp = cep->cur_tx;
431
432#ifndef final_version
433	if (!cep->tx_free || (bdp->cbd_sc & BD_ENET_TX_READY)) {
434		/* Ooops.  All transmit buffers are full.  Bail out.
435		 * This should not happen, since the tx queue should be stopped.
436		 */
437		printk("%s: tx queue full!.\n", dev->name);
438		return 1;
439	}
440#endif
441
442	/* Clear all of the status flags. */
443	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
444
445	/* If the frame is short, tell CPM to pad it. */
446	if (skb->len <= ETH_ZLEN)
447		bdp->cbd_sc |= BD_ENET_TX_PAD;
448	else
449		bdp->cbd_sc &= ~BD_ENET_TX_PAD;
450
451	/* Set buffer length and buffer pointer. */
452	bdp->cbd_datlen = skb->len;
453	bdp->cbd_bufaddr = __pa(skb->data);
454
455	spin_lock_irq(&cep->lock);
456
457	/* Save skb pointer. */
458	cep->tx_skbuff[cep->skb_cur] = skb;
459
460	cep->stats.tx_bytes += skb->len;
461	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
462
463	/* Send it on its way.  Tell CPM its ready, interrupt when done,
464	 * its the last BD of the frame, and to put the CRC on the end.
465	 */
466	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
467
468	dev->trans_start = jiffies;
469
470	/* If this was the last BD in the ring, start at the beginning again. */
471	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
472		bdp = cep->tx_bd_base;
473	else
474		bdp++;
475
476	if (!--cep->tx_free)
477		netif_stop_queue(dev);
478
479	cep->cur_tx = (cbd_t *)bdp;
480
481	spin_unlock_irq(&cep->lock);
482
483	return 0;
484}
485
486
487static void
488fcc_enet_timeout(struct net_device *dev)
489{
490	struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
491
492	printk("%s: transmit timed out.\n", dev->name);
493	cep->stats.tx_errors++;
494#ifndef final_version
495	{
496		int	i;
497		cbd_t	*bdp;
498		printk(" Ring data dump: cur_tx %p tx_free %d cur_rx %p.\n",
499		       cep->cur_tx, cep->tx_free,
500		       cep->cur_rx);
501		bdp = cep->tx_bd_base;
502		printk(" Tx @base %p :\n", bdp);
503		for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
504			printk("%04x %04x %08x\n",
505			       bdp->cbd_sc,
506			       bdp->cbd_datlen,
507			       bdp->cbd_bufaddr);
508		bdp = cep->rx_bd_base;
509		printk(" Rx @base %p :\n", bdp);
510		for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
511			printk("%04x %04x %08x\n",
512			       bdp->cbd_sc,
513			       bdp->cbd_datlen,
514			       bdp->cbd_bufaddr);
515	}
516#endif
517	if (cep->tx_free)
518		netif_wake_queue(dev);
519}
520
521/* The interrupt handler. */
522static irqreturn_t
523fcc_enet_interrupt(int irq, void * dev_id)
524{
525	struct	net_device *dev = dev_id;
526	volatile struct	fcc_enet_private *cep;
527	volatile cbd_t	*bdp;
528	ushort	int_events;
529	int	must_restart;
530
531	cep = (struct fcc_enet_private *)dev->priv;
532
533	/* Get the interrupt events that caused us to be here.
534	*/
535	int_events = cep->fccp->fcc_fcce;
536	cep->fccp->fcc_fcce = (int_events & cep->fccp->fcc_fccm);
537	must_restart = 0;
538
539#ifdef PHY_INTERRUPT
540	/* We have to be careful here to make sure that we aren't
541	 * interrupted by a PHY interrupt.
542	 */
543	disable_irq_nosync(PHY_INTERRUPT);
544#endif
545
546	/* Handle receive event in its own function.
547	*/
548	if (int_events & FCC_ENET_RXF)
549		fcc_enet_rx(dev_id);
550
551	/* Check for a transmit error.  The manual is a little unclear
552	 * about this, so the debug code until I get it figured out.  It
553	 * appears that if TXE is set, then TXB is not set.  However,
554	 * if carrier sense is lost during frame transmission, the TXE
555	 * bit is set, "and continues the buffer transmission normally."
556	 * I don't know if "normally" implies TXB is set when the buffer
557	 * descriptor is closed.....trial and error :-).
558	 */
559
560	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
561	*/
562	if (int_events & (FCC_ENET_TXE | FCC_ENET_TXB)) {
563	    spin_lock(&cep->lock);
564	    bdp = cep->dirty_tx;
565	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
566		if (cep->tx_free == TX_RING_SIZE)
567		    break;
568
569		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
570			cep->stats.tx_heartbeat_errors++;
571		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
572			cep->stats.tx_window_errors++;
573		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
574			cep->stats.tx_aborted_errors++;
575		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
576			cep->stats.tx_fifo_errors++;
577		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
578			cep->stats.tx_carrier_errors++;
579
580
581		/* No heartbeat or Lost carrier are not really bad errors.
582		 * The others require a restart transmit command.
583		 */
584		if (bdp->cbd_sc &
585		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
586			must_restart = 1;
587			cep->stats.tx_errors++;
588		}
589
590		cep->stats.tx_packets++;
591
592		/* Deferred means some collisions occurred during transmit,
593		 * but we eventually sent the packet OK.
594		 */
595		if (bdp->cbd_sc & BD_ENET_TX_DEF)
596			cep->stats.collisions++;
597
598		/* Free the sk buffer associated with this last transmit. */
599		dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]);
600		cep->tx_skbuff[cep->skb_dirty] = NULL;
601		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
602
603		/* Update pointer to next buffer descriptor to be transmitted. */
604		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
605			bdp = cep->tx_bd_base;
606		else
607			bdp++;
608
609		/* I don't know if we can be held off from processing these
610		 * interrupts for more than one frame time.  I really hope
611		 * not.  In such a case, we would now want to check the
612		 * currently available BD (cur_tx) and determine if any
613		 * buffers between the dirty_tx and cur_tx have also been
614		 * sent.  We would want to process anything in between that
615		 * does not have BD_ENET_TX_READY set.
616		 */
617
618		/* Since we have freed up a buffer, the ring is no longer
619		 * full.
620		 */
621		if (!cep->tx_free++) {
622			if (netif_queue_stopped(dev)) {
623				netif_wake_queue(dev);
624			}
625		}
626
627		cep->dirty_tx = (cbd_t *)bdp;
628	    }
629
630	    if (must_restart) {
631		volatile cpm_cpm2_t *cp;
632
633		cep->fccp->fcc_gfmr &= ~FCC_GFMR_ENT;
634		udelay(10); /* wait a few microseconds just on principle */
635		cep->fccp->fcc_gfmr |=  FCC_GFMR_ENT;
636
637		cp = cpmp;
638		cp->cp_cpcr =
639		    mk_cr_cmd(cep->fip->fc_cpmpage, cep->fip->fc_cpmblock,
640		    		0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG;
641		while (cp->cp_cpcr & CPM_CR_FLG);
642	    }
643	    spin_unlock(&cep->lock);
644	}
645
646	/* Check for receive busy, i.e. packets coming but no place to
647	 * put them.
648	 */
649	if (int_events & FCC_ENET_BSY) {
650		cep->fccp->fcc_fcce = FCC_ENET_BSY;
651		cep->stats.rx_dropped++;
652	}
653
654#ifdef PHY_INTERRUPT
655	enable_irq(PHY_INTERRUPT);
656#endif
657	return IRQ_HANDLED;
658}
659
660/* During a receive, the cur_rx points to the current incoming buffer.
661 * When we update through the ring, if the next incoming buffer has
662 * not been given to the system, we just set the empty indicator,
663 * effectively tossing the packet.
664 */
665static int
666fcc_enet_rx(struct net_device *dev)
667{
668	struct	fcc_enet_private *cep;
669	volatile cbd_t	*bdp;
670	struct	sk_buff *skb;
671	ushort	pkt_len;
672
673	cep = (struct fcc_enet_private *)dev->priv;
674
675	/* First, grab all of the stats for the incoming packet.
676	 * These get messed up if we get called due to a busy condition.
677	 */
678	bdp = cep->cur_rx;
679
680for (;;) {
681	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
682		break;
683
684#ifndef final_version
685	/* Since we have allocated space to hold a complete frame, both
686	 * the first and last indicators should be set.
687	 */
688	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
689		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
690			printk("CPM ENET: rcv is not first+last\n");
691#endif
692
693	/* Frame too long or too short. */
694	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
695		cep->stats.rx_length_errors++;
696	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
697		cep->stats.rx_frame_errors++;
698	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
699		cep->stats.rx_crc_errors++;
700	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
701		cep->stats.rx_crc_errors++;
702	if (bdp->cbd_sc & BD_ENET_RX_CL)	/* Late Collision */
703		cep->stats.rx_frame_errors++;
704
705	if (!(bdp->cbd_sc &
706	      (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR
707	       | BD_ENET_RX_OV | BD_ENET_RX_CL)))
708	{
709		/* Process the incoming frame. */
710		cep->stats.rx_packets++;
711
712		/* Remove the FCS from the packet length. */
713		pkt_len = bdp->cbd_datlen - 4;
714		cep->stats.rx_bytes += pkt_len;
715
716		/* This does 16 byte alignment, much more than we need. */
717		skb = dev_alloc_skb(pkt_len);
718
719		if (skb == NULL) {
720			printk("%s: Memory squeeze, dropping packet.\n", dev->name);
721			cep->stats.rx_dropped++;
722		}
723		else {
724			skb_put(skb,pkt_len);	/* Make room */
725			eth_copy_and_sum(skb,
726				(unsigned char *)__va(bdp->cbd_bufaddr),
727				pkt_len, 0);
728			skb->protocol=eth_type_trans(skb,dev);
729			netif_rx(skb);
730		}
731	}
732
733	/* Clear the status flags for this buffer. */
734	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
735
736	/* Mark the buffer empty. */
737	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
738
739	/* Update BD pointer to next entry. */
740	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
741		bdp = cep->rx_bd_base;
742	else
743		bdp++;
744
745   }
746	cep->cur_rx = (cbd_t *)bdp;
747
748	return 0;
749}
750
751static int
752fcc_enet_close(struct net_device *dev)
753{
754#ifdef	CONFIG_USE_MDIO
755	struct fcc_enet_private *fep = dev->priv;
756#endif
757
758	netif_stop_queue(dev);
759	fcc_stop(dev);
760#ifdef	CONFIG_USE_MDIO
761	if (fep->phy)
762		mii_do_cmd(dev, fep->phy->shutdown);
763#endif
764
765	return 0;
766}
767
768static struct net_device_stats *fcc_enet_get_stats(struct net_device *dev)
769{
770	struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
771
772	return &cep->stats;
773}
774
775#ifdef	CONFIG_USE_MDIO
776
777/* NOTE: Most of the following comes from the FEC driver for 860. The
778 * overall structure of MII code has been retained (as it's proved stable
779 * and well-tested), but actual transfer requests are processed "at once"
780 * instead of being queued (there's no interrupt-driven MII transfer
781 * mechanism, one has to toggle the data/clock bits manually).
782 */
783static int
784mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
785{
786	struct fcc_enet_private *fep;
787	int		retval, tmp;
788
789	/* Add PHY address to register command. */
790	fep = dev->priv;
791	regval |= fep->phy_addr << 23;
792
793	retval = 0;
794
795	tmp = mii_send_receive(fep->fip, regval);
796	if (func)
797		func(tmp, dev);
798
799	return retval;
800}
801
802static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
803{
804	int k;
805
806	if(!c)
807		return;
808
809	for(k = 0; (c+k)->mii_data != mk_mii_end; k++)
810		mii_queue(dev, (c+k)->mii_data, (c+k)->funct);
811}
812
813static void mii_parse_sr(uint mii_reg, struct net_device *dev)
814{
815	volatile struct fcc_enet_private *fep = dev->priv;
816	uint s = fep->phy_status;
817
818	s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
819
820	if (mii_reg & BMSR_LSTATUS)
821		s |= PHY_STAT_LINK;
822	if (mii_reg & BMSR_RFAULT)
823		s |= PHY_STAT_FAULT;
824	if (mii_reg & BMSR_ANEGCOMPLETE)
825		s |= PHY_STAT_ANC;
826
827	fep->phy_status = s;
828}
829
830static void mii_parse_cr(uint mii_reg, struct net_device *dev)
831{
832	volatile struct fcc_enet_private *fep = dev->priv;
833	uint s = fep->phy_status;
834
835	s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
836
837	if (mii_reg & BMCR_ANENABLE)
838		s |= PHY_CONF_ANE;
839	if (mii_reg & BMCR_LOOPBACK)
840		s |= PHY_CONF_LOOP;
841
842	fep->phy_status = s;
843}
844
845static void mii_parse_anar(uint mii_reg, struct net_device *dev)
846{
847	volatile struct fcc_enet_private *fep = dev->priv;
848	uint s = fep->phy_status;
849
850	s &= ~(PHY_CONF_SPMASK);
851
852	if (mii_reg & ADVERTISE_10HALF)
853		s |= PHY_CONF_10HDX;
854	if (mii_reg & ADVERTISE_10FULL)
855		s |= PHY_CONF_10FDX;
856	if (mii_reg & ADVERTISE_100HALF)
857		s |= PHY_CONF_100HDX;
858	if (mii_reg & ADVERTISE_100FULL)
859		s |= PHY_CONF_100FDX;
860
861	fep->phy_status = s;
862}
863
864/* ------------------------------------------------------------------------- */
865/* Generic PHY support.  Should work for all PHYs, but does not support link
866 * change interrupts.
867 */
868#ifdef CONFIG_FCC_GENERIC_PHY
869
870static phy_info_t phy_info_generic = {
871	0x00000000, /* 0-->match any PHY */
872	"GENERIC",
873
874	(const phy_cmd_t []) {  /* config */
875		/* advertise only half-duplex capabilities */
876		{ mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_HALF),
877			mii_parse_anar },
878
879		/* enable auto-negotiation */
880		{ mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr },
881		{ mk_mii_end, }
882	},
883	(const phy_cmd_t []) {  /* startup */
884		/* restart auto-negotiation */
885		{ mk_mii_write(MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART),
886			NULL },
887		{ mk_mii_end, }
888	},
889	(const phy_cmd_t []) { /* ack_int */
890		/* We don't actually use the ack_int table with a generic
891		 * PHY, but putting a reference to mii_parse_sr here keeps
892		 * us from getting a compiler warning about unused static
893		 * functions in the case where we only compile in generic
894		 * PHY support.
895		 */
896		{ mk_mii_read(MII_BMSR), mii_parse_sr },
897		{ mk_mii_end, }
898	},
899	(const phy_cmd_t []) {  /* shutdown */
900		{ mk_mii_end, }
901	},
902};
903#endif	/* ifdef CONFIG_FCC_GENERIC_PHY */
904
905/* ------------------------------------------------------------------------- */
906/* The Level one LXT970 is used by many boards				     */
907
908#ifdef CONFIG_FCC_LXT970
909
910#define MII_LXT970_MIRROR    16  /* Mirror register           */
911#define MII_LXT970_IER       17  /* Interrupt Enable Register */
912#define MII_LXT970_ISR       18  /* Interrupt Status Register */
913#define MII_LXT970_CONFIG    19  /* Configuration Register    */
914#define MII_LXT970_CSR       20  /* Chip Status Register      */
915
916static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
917{
918	volatile struct fcc_enet_private *fep = dev->priv;
919	uint s = fep->phy_status;
920
921	s &= ~(PHY_STAT_SPMASK);
922
923	if (mii_reg & 0x0800) {
924		if (mii_reg & 0x1000)
925			s |= PHY_STAT_100FDX;
926		else
927			s |= PHY_STAT_100HDX;
928	} else {
929		if (mii_reg & 0x1000)
930			s |= PHY_STAT_10FDX;
931		else
932			s |= PHY_STAT_10HDX;
933	}
934
935	fep->phy_status = s;
936}
937
938static phy_info_t phy_info_lxt970 = {
939	0x07810000,
940	"LXT970",
941
942	(const phy_cmd_t []) {  /* config */
943		{ mk_mii_read(MII_BMCR), mii_parse_cr },
944		{ mk_mii_read(MII_ADVERTISE), mii_parse_anar },
945		{ mk_mii_end, }
946	},
947	(const phy_cmd_t []) {  /* startup - enable interrupts */
948		{ mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
949		{ mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */
950		{ mk_mii_end, }
951	},
952	(const phy_cmd_t []) { /* ack_int */
953		/* read SR and ISR to acknowledge */
954
955		{ mk_mii_read(MII_BMSR), mii_parse_sr },
956		{ mk_mii_read(MII_LXT970_ISR), NULL },
957
958		/* find out the current status */
959
960		{ mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
961		{ mk_mii_end, }
962	},
963	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
964		{ mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
965		{ mk_mii_end, }
966	},
967};
968
969#endif /* CONFIG_FEC_LXT970 */
970
971/* ------------------------------------------------------------------------- */
972/* The Level one LXT971 is used on some of my custom boards                  */
973
974#ifdef CONFIG_FCC_LXT971
975
976/* register definitions for the 971 */
977
978#define MII_LXT971_PCR       16  /* Port Control Register     */
979#define MII_LXT971_SR2       17  /* Status Register 2         */
980#define MII_LXT971_IER       18  /* Interrupt Enable Register */
981#define MII_LXT971_ISR       19  /* Interrupt Status Register */
982#define MII_LXT971_LCR       20  /* LED Control Register      */
983#define MII_LXT971_TCR       30  /* Transmit Control Register */
984
985/*
986 * I had some nice ideas of running the MDIO faster...
987 * The 971 should support 8MHz and I tried it, but things acted really
988 * weird, so 2.5 MHz ought to be enough for anyone...
989 */
990
991static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
992{
993	volatile struct fcc_enet_private *fep = dev->priv;
994	uint s = fep->phy_status;
995
996	s &= ~(PHY_STAT_SPMASK);
997
998	if (mii_reg & 0x4000) {
999		if (mii_reg & 0x0200)
1000			s |= PHY_STAT_100FDX;
1001		else
1002			s |= PHY_STAT_100HDX;
1003	} else {
1004		if (mii_reg & 0x0200)
1005			s |= PHY_STAT_10FDX;
1006		else
1007			s |= PHY_STAT_10HDX;
1008	}
1009	if (mii_reg & 0x0008)
1010		s |= PHY_STAT_FAULT;
1011
1012	fep->phy_status = s;
1013}
1014
1015static phy_info_t phy_info_lxt971 = {
1016	0x0001378e,
1017	"LXT971",
1018
1019	(const phy_cmd_t []) {  /* config */
1020		/* configure link capabilities to advertise */
1021		{ mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_DEFAULT),
1022			mii_parse_anar },
1023
1024		/* enable auto-negotiation */
1025		{ mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr },
1026		{ mk_mii_end, }
1027	},
1028	(const phy_cmd_t []) {  /* startup - enable interrupts */
1029		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
1030
1031		/* restart auto-negotiation */
1032		{ mk_mii_write(MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART),
1033			NULL },
1034		{ mk_mii_end, }
1035	},
1036	(const phy_cmd_t []) { /* ack_int */
1037		/* find out the current status */
1038		{ mk_mii_read(MII_BMSR), NULL },
1039		{ mk_mii_read(MII_BMSR), mii_parse_sr },
1040		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
1041
1042		/* we only need to read ISR to acknowledge */
1043		{ mk_mii_read(MII_LXT971_ISR), NULL },
1044		{ mk_mii_end, }
1045	},
1046	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
1047		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
1048		{ mk_mii_end, }
1049	},
1050};
1051
1052#endif /* CONFIG_FCC_LXT971 */
1053
1054/* ------------------------------------------------------------------------- */
1055/* The Quality Semiconductor QS6612 is used on the RPX CLLF                  */
1056
1057#ifdef CONFIG_FCC_QS6612
1058
1059/* register definitions */
1060
1061#define MII_QS6612_MCR       17  /* Mode Control Register      */
1062#define MII_QS6612_FTR       27  /* Factory Test Register      */
1063#define MII_QS6612_MCO       28  /* Misc. Control Register     */
1064#define MII_QS6612_ISR       29  /* Interrupt Source Register  */
1065#define MII_QS6612_IMR       30  /* Interrupt Mask Register    */
1066#define MII_QS6612_PCR       31  /* 100BaseTx PHY Control Reg. */
1067
1068static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
1069{
1070	volatile struct fcc_enet_private *fep = dev->priv;
1071	uint s = fep->phy_status;
1072
1073	s &= ~(PHY_STAT_SPMASK);
1074
1075	switch((mii_reg >> 2) & 7) {
1076	case 1: s |= PHY_STAT_10HDX;  break;
1077	case 2: s |= PHY_STAT_100HDX; break;
1078	case 5: s |= PHY_STAT_10FDX;  break;
1079	case 6: s |= PHY_STAT_100FDX; break;
1080	}
1081
1082	fep->phy_status = s;
1083}
1084
1085static phy_info_t phy_info_qs6612 = {
1086	0x00181440,
1087	"QS6612",
1088
1089	(const phy_cmd_t []) {  /* config */
1090//	{ mk_mii_write(MII_ADVERTISE, 0x061), NULL }, /* 10  Mbps */
1091
1092		/* The PHY powers up isolated on the RPX,
1093		 * so send a command to allow operation.
1094		 */
1095
1096		{ mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
1097
1098		/* parse cr and anar to get some info */
1099
1100		{ mk_mii_read(MII_BMCR), mii_parse_cr },
1101		{ mk_mii_read(MII_ADVERTISE), mii_parse_anar },
1102		{ mk_mii_end, }
1103	},
1104	(const phy_cmd_t []) {  /* startup - enable interrupts */
1105		{ mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
1106		{ mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */
1107		{ mk_mii_end, }
1108	},
1109	(const phy_cmd_t []) { /* ack_int */
1110
1111		/* we need to read ISR, SR and ANER to acknowledge */
1112
1113		{ mk_mii_read(MII_QS6612_ISR), NULL },
1114		{ mk_mii_read(MII_BMSR), mii_parse_sr },
1115		{ mk_mii_read(MII_EXPANSION), NULL },
1116
1117		/* read pcr to get info */
1118
1119		{ mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
1120		{ mk_mii_end, }
1121	},
1122	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
1123		{ mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
1124		{ mk_mii_end, }
1125	},
1126};
1127
1128
1129#endif /* CONFIG_FEC_QS6612 */
1130
1131
1132/* ------------------------------------------------------------------------- */
1133/* The Davicom DM9131 is used on the HYMOD board			     */
1134
1135#ifdef CONFIG_FCC_DM9131
1136
1137/* register definitions */
1138
1139#define MII_DM9131_ACR		16	/* Aux. Config Register		*/
1140#define MII_DM9131_ACSR		17	/* Aux. Config/Status Register	*/
1141#define MII_DM9131_10TCSR	18	/* 10BaseT Config/Status Reg.	*/
1142#define MII_DM9131_INTR		21	/* Interrupt Register		*/
1143#define MII_DM9131_RECR		22	/* Receive Error Counter Reg.	*/
1144#define MII_DM9131_DISCR	23	/* Disconnect Counter Register	*/
1145
1146static void mii_parse_dm9131_acsr(uint mii_reg, struct net_device *dev)
1147{
1148	volatile struct fcc_enet_private *fep = dev->priv;
1149	uint s = fep->phy_status;
1150
1151	s &= ~(PHY_STAT_SPMASK);
1152
1153	switch ((mii_reg >> 12) & 0xf) {
1154	case 1: s |= PHY_STAT_10HDX;  break;
1155	case 2: s |= PHY_STAT_10FDX;  break;
1156	case 4: s |= PHY_STAT_100HDX; break;
1157	case 8: s |= PHY_STAT_100FDX; break;
1158	}
1159
1160	fep->phy_status = s;
1161}
1162
1163static phy_info_t phy_info_dm9131 = {
1164	0x00181b80,
1165	"DM9131",
1166
1167	(const phy_cmd_t []) {  /* config */
1168		/* parse cr and anar to get some info */
1169		{ mk_mii_read(MII_BMCR), mii_parse_cr },
1170		{ mk_mii_read(MII_ADVERTISE), mii_parse_anar },
1171		{ mk_mii_end, }
1172	},
1173	(const phy_cmd_t []) {  /* startup - enable interrupts */
1174		{ mk_mii_write(MII_DM9131_INTR, 0x0002), NULL },
1175		{ mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */
1176		{ mk_mii_end, }
1177	},
1178	(const phy_cmd_t []) { /* ack_int */
1179
1180		/* we need to read INTR, SR and ANER to acknowledge */
1181
1182		{ mk_mii_read(MII_DM9131_INTR), NULL },
1183		{ mk_mii_read(MII_BMSR), mii_parse_sr },
1184		{ mk_mii_read(MII_EXPANSION), NULL },
1185
1186		/* read acsr to get info */
1187
1188		{ mk_mii_read(MII_DM9131_ACSR), mii_parse_dm9131_acsr },
1189		{ mk_mii_end, }
1190	},
1191	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
1192		{ mk_mii_write(MII_DM9131_INTR, 0x0f00), NULL },
1193		{ mk_mii_end, }
1194	},
1195};
1196
1197
1198#endif /* CONFIG_FEC_DM9131 */
1199#ifdef CONFIG_FCC_DM9161
1200/* ------------------------------------------------------------------------- */
1201/* DM9161 Control register values */
1202#define MIIM_DM9161_CR_STOP     0x0400
1203#define MIIM_DM9161_CR_RSTAN    0x1200
1204
1205#define MIIM_DM9161_SCR         0x10
1206#define MIIM_DM9161_SCR_INIT    0x0610
1207
1208/* DM9161 Specified Configuration and Status Register */
1209#define MIIM_DM9161_SCSR        0x11
1210#define MIIM_DM9161_SCSR_100F   0x8000
1211#define MIIM_DM9161_SCSR_100H   0x4000
1212#define MIIM_DM9161_SCSR_10F    0x2000
1213#define MIIM_DM9161_SCSR_10H    0x1000
1214/* DM9161 10BT register */
1215#define MIIM_DM9161_10BTCSR 	0x12
1216#define MIIM_DM9161_10BTCSR_INIT 0x7800
1217/* DM9161 Interrupt Register */
1218#define MIIM_DM9161_INTR        0x15
1219#define MIIM_DM9161_INTR_PEND           0x8000
1220#define MIIM_DM9161_INTR_DPLX_MASK      0x0800
1221#define MIIM_DM9161_INTR_SPD_MASK       0x0400
1222#define MIIM_DM9161_INTR_LINK_MASK      0x0200
1223#define MIIM_DM9161_INTR_MASK           0x0100
1224#define MIIM_DM9161_INTR_DPLX_CHANGE    0x0010
1225#define MIIM_DM9161_INTR_SPD_CHANGE     0x0008
1226#define MIIM_DM9161_INTR_LINK_CHANGE    0x0004
1227#define MIIM_DM9161_INTR_INIT           0x0000
1228#define MIIM_DM9161_INTR_STOP   \
1229(MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \
1230  | MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK)
1231
1232static void mii_parse_dm9161_sr(uint mii_reg, struct net_device * dev)
1233{
1234	volatile struct fcc_enet_private *fep = dev->priv;
1235	uint regstat,  timeout=0xffff;
1236
1237	while(!(mii_reg & 0x0020) && timeout--)
1238	{
1239		regstat=mk_mii_read(MII_BMSR);
1240	        regstat |= fep->phy_addr <<23;
1241	        mii_reg = mii_send_receive(fep->fip,regstat);
1242	}
1243
1244	mii_parse_sr(mii_reg, dev);
1245}
1246
1247static void mii_parse_dm9161_scsr(uint mii_reg, struct net_device * dev)
1248{
1249	volatile struct fcc_enet_private *fep = dev->priv;
1250	uint s = fep->phy_status;
1251
1252	s &= ~(PHY_STAT_SPMASK);
1253	switch((mii_reg >>12) & 0xf) {
1254		case 1:
1255		{
1256			s |= PHY_STAT_10HDX;
1257			printk("10BaseT Half Duplex\n");
1258			break;
1259		}
1260		case 2:
1261		{
1262			s |= PHY_STAT_10FDX;
1263		        printk("10BaseT Full Duplex\n");
1264			break;
1265		}
1266		case 4:
1267	        {
1268			s |= PHY_STAT_100HDX;
1269		        printk("100BaseT Half Duplex\n");
1270			break;
1271		}
1272		case 8:
1273		{
1274			s |= PHY_STAT_100FDX;
1275			printk("100BaseT Full Duplex\n");
1276			break;
1277		}
1278	}
1279
1280	fep->phy_status = s;
1281
1282}
1283
1284static void mii_dm9161_wait(uint mii_reg, struct net_device *dev)
1285{
1286	int timeout = HZ;
1287
1288	/* Davicom takes a bit to come up after a reset,
1289	 * so wait here for a bit */
1290	schedule_timeout_uninterruptible(timeout);
1291}
1292
1293static phy_info_t phy_info_dm9161 = {
1294        0x00181b88,
1295        "Davicom DM9161E",
1296        (const phy_cmd_t[]) { /* config */
1297                { mk_mii_write(MII_BMCR, MIIM_DM9161_CR_STOP), NULL},
1298                /* Do not bypass the scrambler/descrambler */
1299                { mk_mii_write(MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT), NULL},
1300		/* Configure 10BTCSR register */
1301		{ mk_mii_write(MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT),NULL},
1302                /* Configure some basic stuff */
1303                { mk_mii_write(MII_BMCR, 0x1000), NULL},
1304		{ mk_mii_read(MII_BMCR), mii_parse_cr },
1305		{ mk_mii_read(MII_ADVERTISE), mii_parse_anar },
1306		{ mk_mii_end,}
1307        },
1308       (const phy_cmd_t[]) { /* startup */
1309                /* Restart Auto Negotiation */
1310                { mk_mii_write(MII_BMCR, MIIM_DM9161_CR_RSTAN), NULL},
1311                /* Status is read once to clear old link state */
1312                { mk_mii_read(MII_BMSR), mii_dm9161_wait},
1313                /* Auto-negotiate */
1314                { mk_mii_read(MII_BMSR), mii_parse_dm9161_sr},
1315                /* Read the status */
1316                { mk_mii_read(MIIM_DM9161_SCSR), mii_parse_dm9161_scsr},
1317                /* Clear any pending interrupts */
1318                { mk_mii_read(MIIM_DM9161_INTR), NULL},
1319                /* Enable Interrupts */
1320                { mk_mii_write(MIIM_DM9161_INTR, MIIM_DM9161_INTR_INIT), NULL},
1321                { mk_mii_end,}
1322        },
1323       (const phy_cmd_t[]) { /* ack_int */
1324                { mk_mii_read(MIIM_DM9161_INTR), NULL},
1325                { mk_mii_end,}
1326        },
1327        (const phy_cmd_t[]) { /* shutdown */
1328	        { mk_mii_read(MIIM_DM9161_INTR),NULL},
1329                { mk_mii_write(MIIM_DM9161_INTR, MIIM_DM9161_INTR_STOP), NULL},
1330	        { mk_mii_end,}
1331	},
1332};
1333#endif /* CONFIG_FCC_DM9161 */
1334
1335static phy_info_t *phy_info[] = {
1336
1337#ifdef CONFIG_FCC_LXT970
1338	&phy_info_lxt970,
1339#endif /* CONFIG_FEC_LXT970 */
1340
1341#ifdef CONFIG_FCC_LXT971
1342	&phy_info_lxt971,
1343#endif /* CONFIG_FEC_LXT971 */
1344
1345#ifdef CONFIG_FCC_QS6612
1346	&phy_info_qs6612,
1347#endif /* CONFIG_FEC_QS6612 */
1348
1349#ifdef CONFIG_FCC_DM9131
1350	&phy_info_dm9131,
1351#endif /* CONFIG_FEC_DM9131 */
1352
1353#ifdef CONFIG_FCC_DM9161
1354	&phy_info_dm9161,
1355#endif /* CONFIG_FCC_DM9161 */
1356
1357#ifdef CONFIG_FCC_GENERIC_PHY
1358	/* Generic PHY support.  This must be the last PHY in the table.
1359	 * It will be used to support any PHY that doesn't match a previous
1360	 * entry in the table.
1361	 */
1362	&phy_info_generic,
1363#endif /* CONFIG_FCC_GENERIC_PHY */
1364
1365	NULL
1366};
1367
1368static void mii_display_status(struct work_struct *work)
1369{
1370	volatile struct fcc_enet_private *fep =
1371		container_of(work, struct fcc_enet_private, phy_relink);
1372	struct net_device *dev = fep->dev;
1373	uint s = fep->phy_status;
1374
1375	if (!fep->link && !fep->old_link) {
1376		/* Link is still down - don't print anything */
1377		return;
1378	}
1379
1380	printk("%s: status: ", dev->name);
1381
1382	if (!fep->link) {
1383		printk("link down");
1384	} else {
1385		printk("link up");
1386
1387		switch(s & PHY_STAT_SPMASK) {
1388		case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
1389		case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
1390		case PHY_STAT_10FDX:  printk(", 10 Mbps Full Duplex");  break;
1391		case PHY_STAT_10HDX:  printk(", 10 Mbps Half Duplex");  break;
1392		default:
1393			printk(", Unknown speed/duplex");
1394		}
1395
1396		if (s & PHY_STAT_ANC)
1397			printk(", auto-negotiation complete");
1398	}
1399
1400	if (s & PHY_STAT_FAULT)
1401		printk(", remote fault");
1402
1403	printk(".\n");
1404}
1405
1406static void mii_display_config(struct work_struct *work)
1407{
1408	volatile struct fcc_enet_private *fep =
1409		container_of(work, struct fcc_enet_private,
1410			     phy_display_config);
1411	struct net_device *dev = fep->dev;
1412	uint s = fep->phy_status;
1413
1414	printk("%s: config: auto-negotiation ", dev->name);
1415
1416	if (s & PHY_CONF_ANE)
1417		printk("on");
1418	else
1419		printk("off");
1420
1421	if (s & PHY_CONF_100FDX)
1422		printk(", 100FDX");
1423	if (s & PHY_CONF_100HDX)
1424		printk(", 100HDX");
1425	if (s & PHY_CONF_10FDX)
1426		printk(", 10FDX");
1427	if (s & PHY_CONF_10HDX)
1428		printk(", 10HDX");
1429	if (!(s & PHY_CONF_SPMASK))
1430		printk(", No speed/duplex selected?");
1431
1432	if (s & PHY_CONF_LOOP)
1433		printk(", loopback enabled");
1434
1435	printk(".\n");
1436
1437	fep->sequence_done = 1;
1438}
1439
1440static void mii_relink(struct net_device *dev)
1441{
1442	struct fcc_enet_private *fep = dev->priv;
1443	int duplex = 0;
1444
1445	fep->old_link = fep->link;
1446	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1447
1448#ifdef MDIO_DEBUG
1449	printk("  mii_relink:  link=%d\n", fep->link);
1450#endif
1451
1452	if (fep->link) {
1453		if (fep->phy_status
1454		    & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1455			duplex = 1;
1456		fcc_restart(dev, duplex);
1457#ifdef MDIO_DEBUG
1458		printk("  mii_relink:  duplex=%d\n", duplex);
1459#endif
1460	}
1461}
1462
1463static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1464{
1465	struct fcc_enet_private *fep = dev->priv;
1466
1467	mii_relink(dev);
1468
1469	schedule_work(&fep->phy_relink);
1470}
1471
1472static void mii_queue_config(uint mii_reg, struct net_device *dev)
1473{
1474	struct fcc_enet_private *fep = dev->priv;
1475
1476	schedule_work(&fep->phy_display_config);
1477}
1478
1479phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_BMCR), mii_queue_relink },
1480			       { mk_mii_end, } };
1481phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_BMCR), mii_queue_config },
1482			       { mk_mii_end, } };
1483
1484
1485/* Read remainder of PHY ID.
1486*/
1487static void
1488mii_discover_phy3(uint mii_reg, struct net_device *dev)
1489{
1490	struct fcc_enet_private *fep;
1491	int	i;
1492
1493	fep = dev->priv;
1494	printk("mii_reg: %08x\n", mii_reg);
1495	fep->phy_id |= (mii_reg & 0xffff);
1496
1497	for(i = 0; phy_info[i]; i++)
1498		if((phy_info[i]->id == (fep->phy_id >> 4)) || !phy_info[i]->id)
1499			break;
1500
1501	if(!phy_info[i])
1502		panic("%s: PHY id 0x%08x is not supported!\n",
1503		      dev->name, fep->phy_id);
1504
1505	fep->phy = phy_info[i];
1506	fep->phy_id_done = 1;
1507
1508	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
1509		dev->name, fep->phy_addr, fep->phy->name, fep->phy_id);
1510}
1511
1512/* Scan all of the MII PHY addresses looking for someone to respond
1513 * with a valid ID.  This usually happens quickly.
1514 */
1515static void
1516mii_discover_phy(uint mii_reg, struct net_device *dev)
1517{
1518	struct fcc_enet_private *fep;
1519	uint	phytype;
1520
1521	fep = dev->priv;
1522
1523	if ((phytype = (mii_reg & 0xffff)) != 0xffff) {
1524
1525		/* Got first part of ID, now get remainder. */
1526		fep->phy_id = phytype << 16;
1527		mii_queue(dev, mk_mii_read(MII_PHYSID2), mii_discover_phy3);
1528	} else {
1529		fep->phy_addr++;
1530		if (fep->phy_addr < 32) {
1531			mii_queue(dev, mk_mii_read(MII_PHYSID1),
1532							mii_discover_phy);
1533		} else {
1534			printk("fec: No PHY device found.\n");
1535		}
1536	}
1537}
1538#endif	/* CONFIG_USE_MDIO */
1539
1540#ifdef PHY_INTERRUPT
1541/* This interrupt occurs when the PHY detects a link change. */
1542static irqreturn_t
1543mii_link_interrupt(int irq, void * dev_id)
1544{
1545	struct	net_device *dev = dev_id;
1546	struct fcc_enet_private *fep = dev->priv;
1547	fcc_info_t *fip = fep->fip;
1548
1549	if (fep->phy) {
1550		/* We don't want to be interrupted by an FCC
1551		 * interrupt here.
1552		 */
1553		disable_irq_nosync(fip->fc_interrupt);
1554
1555		mii_do_cmd(dev, fep->phy->ack_int);
1556		/* restart and display status */
1557		mii_do_cmd(dev, phy_cmd_relink);
1558
1559		enable_irq(fip->fc_interrupt);
1560	}
1561	return IRQ_HANDLED;
1562}
1563#endif	/* ifdef PHY_INTERRUPT */
1564
1565
1566
1567/* Set the individual MAC address.
1568 */
1569int fcc_enet_set_mac_address(struct net_device *dev, void *p)
1570{
1571	struct sockaddr *addr= (struct sockaddr *) p;
1572	struct fcc_enet_private *cep;
1573	volatile fcc_enet_t *ep;
1574	unsigned char *eap;
1575	int i;
1576
1577	cep = (struct fcc_enet_private *)(dev->priv);
1578	ep = cep->ep;
1579
1580        if (netif_running(dev))
1581                return -EBUSY;
1582
1583        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1584
1585	eap = (unsigned char *) &(ep->fen_paddrh);
1586	for (i=5; i>=0; i--)
1587		*eap++ = addr->sa_data[i];
1588
1589        return 0;
1590}
1591
1592
1593/* Initialize the CPM Ethernet on FCC.
1594 */
1595static int __init fec_enet_init(void)
1596{
1597	struct net_device *dev;
1598	struct fcc_enet_private *cep;
1599	fcc_info_t	*fip;
1600	int	i, np, err;
1601	volatile	cpm2_map_t		*immap;
1602	volatile	iop_cpm2_t	*io;
1603
1604	immap = (cpm2_map_t *)CPM_MAP_ADDR;	/* and to internal registers */
1605	io = &immap->im_ioport;
1606
1607	np = sizeof(fcc_ports) / sizeof(fcc_info_t);
1608	fip = fcc_ports;
1609
1610	while (np-- > 0) {
1611		/* Create an Ethernet device instance.
1612		*/
1613		dev = alloc_etherdev(sizeof(*cep));
1614		if (!dev)
1615			return -ENOMEM;
1616
1617		cep = dev->priv;
1618		spin_lock_init(&cep->lock);
1619		cep->fip = fip;
1620
1621		init_fcc_shutdown(fip, cep, immap);
1622		init_fcc_ioports(fip, io, immap);
1623		init_fcc_param(fip, dev, immap);
1624
1625		dev->base_addr = (unsigned long)(cep->ep);
1626
1627		/* The CPM Ethernet specific entries in the device
1628		 * structure.
1629		 */
1630		dev->open = fcc_enet_open;
1631		dev->hard_start_xmit = fcc_enet_start_xmit;
1632		dev->tx_timeout = fcc_enet_timeout;
1633		dev->watchdog_timeo = TX_TIMEOUT;
1634		dev->stop = fcc_enet_close;
1635		dev->get_stats = fcc_enet_get_stats;
1636		/* dev->set_multicast_list = set_multicast_list; */
1637		dev->set_mac_address = fcc_enet_set_mac_address;
1638
1639		init_fcc_startup(fip, dev);
1640
1641		err = register_netdev(dev);
1642		if (err) {
1643			free_netdev(dev);
1644			return err;
1645		}
1646
1647		printk("%s: FCC ENET Version 0.3, ", dev->name);
1648		for (i=0; i<5; i++)
1649			printk("%02x:", dev->dev_addr[i]);
1650		printk("%02x\n", dev->dev_addr[5]);
1651
1652#ifdef	CONFIG_USE_MDIO
1653		/* Queue up command to detect the PHY and initialize the
1654	 	* remainder of the interface.
1655	 	*/
1656		cep->phy_id_done = 0;
1657		cep->phy_addr = fip->fc_phyaddr;
1658		mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
1659		INIT_WORK(&cep->phy_relink, mii_display_status);
1660		INIT_WORK(&cep->phy_display_config, mii_display_config);
1661		cep->dev = dev;
1662#endif	/* CONFIG_USE_MDIO */
1663
1664		fip++;
1665	}
1666
1667	return 0;
1668}
1669module_init(fec_enet_init);
1670
1671/* Make sure the device is shut down during initialization.
1672*/
1673static void __init
1674init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
1675						volatile cpm2_map_t *immap)
1676{
1677	volatile	fcc_enet_t	*ep;
1678	volatile	fcc_t		*fccp;
1679
1680	/* Get pointer to FCC area in parameter RAM.
1681	*/
1682	ep = (fcc_enet_t *)(&immap->im_dprambase[fip->fc_proff]);
1683
1684	/* And another to the FCC register area.
1685	*/
1686	fccp = (volatile fcc_t *)(&immap->im_fcc[fip->fc_fccnum]);
1687	cep->fccp = fccp;		/* Keep the pointers handy */
1688	cep->ep = ep;
1689
1690	/* Disable receive and transmit in case someone left it running.
1691	*/
1692	fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
1693}
1694
1695/* Initialize the I/O pins for the FCC Ethernet.
1696*/
1697static void __init
1698init_fcc_ioports(fcc_info_t *fip, volatile iop_cpm2_t *io,
1699						volatile cpm2_map_t *immap)
1700{
1701
1702	/* FCC1 pins are on port A/C.  FCC2/3 are port B/C.
1703	*/
1704	if (fip->fc_proff == PROFF_FCC1) {
1705		/* Configure port A and C pins for FCC1 Ethernet.
1706		 */
1707		io->iop_pdira &= ~PA1_DIRA_BOUT;
1708		io->iop_pdira |= PA1_DIRA_BIN;
1709		io->iop_psora &= ~PA1_PSORA_BOUT;
1710		io->iop_psora |= PA1_PSORA_BIN;
1711		io->iop_ppara |= (PA1_DIRA_BOUT | PA1_DIRA_BIN);
1712	}
1713	if (fip->fc_proff == PROFF_FCC2) {
1714		/* Configure port B and C pins for FCC Ethernet.
1715		 */
1716		io->iop_pdirb &= ~PB2_DIRB_BOUT;
1717		io->iop_pdirb |= PB2_DIRB_BIN;
1718		io->iop_psorb &= ~PB2_PSORB_BOUT;
1719		io->iop_psorb |= PB2_PSORB_BIN;
1720		io->iop_pparb |= (PB2_DIRB_BOUT | PB2_DIRB_BIN);
1721	}
1722	if (fip->fc_proff == PROFF_FCC3) {
1723		/* Configure port B and C pins for FCC Ethernet.
1724		 */
1725		io->iop_pdirb &= ~PB3_DIRB_BOUT;
1726		io->iop_pdirb |= PB3_DIRB_BIN;
1727		io->iop_psorb &= ~PB3_PSORB_BOUT;
1728		io->iop_psorb |= PB3_PSORB_BIN;
1729		io->iop_pparb |= (PB3_DIRB_BOUT | PB3_DIRB_BIN);
1730
1731		io->iop_pdirc &= ~PC3_DIRC_BOUT;
1732		io->iop_pdirc |= PC3_DIRC_BIN;
1733		io->iop_psorc &= ~PC3_PSORC_BOUT;
1734		io->iop_psorc |= PC3_PSORC_BIN;
1735		io->iop_pparc |= (PC3_DIRC_BOUT | PC3_DIRC_BIN);
1736
1737	}
1738
1739	/* Port C has clocks......
1740	*/
1741	io->iop_psorc &= ~(fip->fc_trxclocks);
1742	io->iop_pdirc &= ~(fip->fc_trxclocks);
1743	io->iop_pparc |= fip->fc_trxclocks;
1744
1745#ifdef	CONFIG_USE_MDIO
1746	/* ....and the MII serial clock/data.
1747	*/
1748	io->iop_pdatc |= (fip->fc_mdio | fip->fc_mdck);
1749	io->iop_podrc &= ~(fip->fc_mdio | fip->fc_mdck);
1750	io->iop_pdirc |= (fip->fc_mdio | fip->fc_mdck);
1751	io->iop_pparc &= ~(fip->fc_mdio | fip->fc_mdck);
1752#endif	/* CONFIG_USE_MDIO */
1753
1754	/* Configure Serial Interface clock routing.
1755	 * First, clear all FCC bits to zero,
1756	 * then set the ones we want.
1757	 */
1758	immap->im_cpmux.cmx_fcr &= ~(fip->fc_clockmask);
1759	immap->im_cpmux.cmx_fcr |= fip->fc_clockroute;
1760}
1761
1762static void __init
1763init_fcc_param(fcc_info_t *fip, struct net_device *dev,
1764						volatile cpm2_map_t *immap)
1765{
1766	unsigned char	*eap;
1767	unsigned long	mem_addr;
1768	bd_t		*bd;
1769	int		i, j;
1770	struct		fcc_enet_private *cep;
1771	volatile	fcc_enet_t	*ep;
1772	volatile	cbd_t		*bdp;
1773	volatile	cpm_cpm2_t	*cp;
1774
1775	cep = (struct fcc_enet_private *)(dev->priv);
1776	ep = cep->ep;
1777	cp = cpmp;
1778
1779	bd = (bd_t *)__res;
1780
1781	/* Zero the whole thing.....I must have missed some individually.
1782	 * It works when I do this.
1783	 */
1784	memset((char *)ep, 0, sizeof(fcc_enet_t));
1785
1786	/* Allocate space for the buffer descriptors from regular memory.
1787	 * Initialize base addresses for the buffer descriptors.
1788	 */
1789	cep->rx_bd_base = kmalloc(sizeof(cbd_t) * RX_RING_SIZE,
1790			GFP_KERNEL | GFP_DMA);
1791	ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base);
1792	cep->tx_bd_base = kmalloc(sizeof(cbd_t) * TX_RING_SIZE,
1793			GFP_KERNEL | GFP_DMA);
1794	ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base);
1795
1796	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
1797	cep->cur_rx = cep->rx_bd_base;
1798
1799	ep->fen_genfcc.fcc_rstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
1800	ep->fen_genfcc.fcc_tstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
1801
1802	/* Set maximum bytes per receive buffer.
1803	 * It must be a multiple of 32.
1804	 */
1805	ep->fen_genfcc.fcc_mrblr = PKT_MAXBLR_SIZE;
1806
1807	/* Allocate space in the reserved FCC area of DPRAM for the
1808	 * internal buffers.  No one uses this space (yet), so we
1809	 * can do this.  Later, we will add resource management for
1810	 * this area.
1811	 */
1812	mem_addr = CPM_FCC_SPECIAL_BASE + (fip->fc_fccnum * 128);
1813	ep->fen_genfcc.fcc_riptr = mem_addr;
1814	ep->fen_genfcc.fcc_tiptr = mem_addr+32;
1815	ep->fen_padptr = mem_addr+64;
1816	memset((char *)(&(immap->im_dprambase[(mem_addr+64)])), 0x88, 32);
1817
1818	ep->fen_genfcc.fcc_rbptr = 0;
1819	ep->fen_genfcc.fcc_tbptr = 0;
1820	ep->fen_genfcc.fcc_rcrc = 0;
1821	ep->fen_genfcc.fcc_tcrc = 0;
1822	ep->fen_genfcc.fcc_res1 = 0;
1823	ep->fen_genfcc.fcc_res2 = 0;
1824
1825	ep->fen_camptr = 0;	/* CAM isn't used in this driver */
1826
1827	/* Set CRC preset and mask.
1828	*/
1829	ep->fen_cmask = 0xdebb20e3;
1830	ep->fen_cpres = 0xffffffff;
1831
1832	ep->fen_crcec = 0;	/* CRC Error counter */
1833	ep->fen_alec = 0;	/* alignment error counter */
1834	ep->fen_disfc = 0;	/* discard frame counter */
1835	ep->fen_retlim = 15;	/* Retry limit threshold */
1836	ep->fen_pper = 0;	/* Normal persistence */
1837
1838	/* Clear hash filter tables.
1839	*/
1840	ep->fen_gaddrh = 0;
1841	ep->fen_gaddrl = 0;
1842	ep->fen_iaddrh = 0;
1843	ep->fen_iaddrl = 0;
1844
1845	/* Clear the Out-of-sequence TxBD.
1846	*/
1847	ep->fen_tfcstat = 0;
1848	ep->fen_tfclen = 0;
1849	ep->fen_tfcptr = 0;
1850
1851	ep->fen_mflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
1852	ep->fen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */
1853
1854	/* Set Ethernet station address.
1855	 *
1856	 * This is supplied in the board information structure, so we
1857	 * copy that into the controller.
1858	 * So, far we have only been given one Ethernet address. We make
1859	 * it unique by setting a few bits in the upper byte of the
1860	 * non-static part of the address.
1861	 */
1862	eap = (unsigned char *)&(ep->fen_paddrh);
1863	for (i=5; i>=0; i--) {
1864
1865/*
1866 * The EP8260 only uses FCC3, so we can safely give it the real
1867 * MAC address.
1868 */
1869#ifdef CONFIG_SBC82xx
1870		if (i == 5) {
1871			/* bd->bi_enetaddr holds the SCC0 address; the FCC
1872			   devices count up from there */
1873			dev->dev_addr[i] = bd->bi_enetaddr[i] & ~3;
1874			dev->dev_addr[i] += 1 + fip->fc_fccnum;
1875			*eap++ = dev->dev_addr[i];
1876		}
1877#else
1878#ifndef CONFIG_RPX8260
1879		if (i == 3) {
1880			dev->dev_addr[i] = bd->bi_enetaddr[i];
1881			dev->dev_addr[i] |= (1 << (7 - fip->fc_fccnum));
1882			*eap++ = dev->dev_addr[i];
1883		} else
1884#endif
1885		{
1886			*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
1887		}
1888#endif
1889	}
1890
1891	ep->fen_taddrh = 0;
1892	ep->fen_taddrm = 0;
1893	ep->fen_taddrl = 0;
1894
1895	ep->fen_maxd1 = PKT_MAXDMA_SIZE;	/* maximum DMA1 length */
1896	ep->fen_maxd2 = PKT_MAXDMA_SIZE;	/* maximum DMA2 length */
1897
1898	/* Clear stat counters, in case we ever enable RMON.
1899	*/
1900	ep->fen_octc = 0;
1901	ep->fen_colc = 0;
1902	ep->fen_broc = 0;
1903	ep->fen_mulc = 0;
1904	ep->fen_uspc = 0;
1905	ep->fen_frgc = 0;
1906	ep->fen_ospc = 0;
1907	ep->fen_jbrc = 0;
1908	ep->fen_p64c = 0;
1909	ep->fen_p65c = 0;
1910	ep->fen_p128c = 0;
1911	ep->fen_p256c = 0;
1912	ep->fen_p512c = 0;
1913	ep->fen_p1024c = 0;
1914
1915	ep->fen_rfthr = 0;	/* Suggested by manual */
1916	ep->fen_rfcnt = 0;
1917	ep->fen_cftype = 0;
1918
1919	/* Now allocate the host memory pages and initialize the
1920	 * buffer descriptors.
1921	 */
1922	bdp = cep->tx_bd_base;
1923	for (i=0; i<TX_RING_SIZE; i++) {
1924
1925		/* Initialize the BD for every fragment in the page.
1926		*/
1927		bdp->cbd_sc = 0;
1928		bdp->cbd_datlen = 0;
1929		bdp->cbd_bufaddr = 0;
1930		bdp++;
1931	}
1932
1933	/* Set the last buffer to wrap.
1934	*/
1935	bdp--;
1936	bdp->cbd_sc |= BD_SC_WRAP;
1937
1938	bdp = cep->rx_bd_base;
1939	for (i=0; i<FCC_ENET_RX_PAGES; i++) {
1940
1941		/* Allocate a page.
1942		*/
1943		mem_addr = __get_free_page(GFP_KERNEL);
1944
1945		/* Initialize the BD for every fragment in the page.
1946		*/
1947		for (j=0; j<FCC_ENET_RX_FRPPG; j++) {
1948			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
1949			bdp->cbd_datlen = 0;
1950			bdp->cbd_bufaddr = __pa(mem_addr);
1951			mem_addr += FCC_ENET_RX_FRSIZE;
1952			bdp++;
1953		}
1954	}
1955
1956	/* Set the last buffer to wrap.
1957	*/
1958	bdp--;
1959	bdp->cbd_sc |= BD_SC_WRAP;
1960
1961	/* Let's re-initialize the channel now.  We have to do it later
1962	 * than the manual describes because we have just now finished
1963	 * the BD initialization.
1964	 */
1965	cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, 0x0c,
1966			CPM_CR_INIT_TRX) | CPM_CR_FLG;
1967	while (cp->cp_cpcr & CPM_CR_FLG);
1968
1969	cep->skb_cur = cep->skb_dirty = 0;
1970}
1971
1972/* Let 'er rip.
1973*/
1974static void __init
1975init_fcc_startup(fcc_info_t *fip, struct net_device *dev)
1976{
1977	volatile fcc_t	*fccp;
1978	struct fcc_enet_private *cep;
1979
1980	cep = (struct fcc_enet_private *)(dev->priv);
1981	fccp = cep->fccp;
1982
1983#ifdef CONFIG_RPX8260
1984#ifdef PHY_INTERRUPT
1985	/* Route PHY interrupt to IRQ.  The following code only works for
1986	 * IRQ1 - IRQ7.  It does not work for Port C interrupts.
1987	 */
1988	*((volatile u_char *) (RPX_CSR_ADDR + 13)) &= ~BCSR13_FETH_IRQMASK;
1989	*((volatile u_char *) (RPX_CSR_ADDR + 13)) |=
1990		((PHY_INTERRUPT - SIU_INT_IRQ1 + 1) << 4);
1991#endif
1992	/* Initialize MDIO pins. */
1993	*((volatile u_char *) (RPX_CSR_ADDR + 4)) &= ~BCSR4_MII_MDC;
1994	*((volatile u_char *) (RPX_CSR_ADDR + 4)) |=
1995		BCSR4_MII_READ | BCSR4_MII_MDIO;
1996	/* Enable external LXT971 PHY. */
1997	*((volatile u_char *) (RPX_CSR_ADDR + 4)) |= BCSR4_EN_PHY;
1998	udelay(1000);
1999	*((volatile u_char *) (RPX_CSR_ADDR+ 4)) |= BCSR4_EN_MII;
2000	udelay(1000);
2001#endif	/* ifdef CONFIG_RPX8260 */
2002
2003	fccp->fcc_fcce = 0xffff;	/* Clear any pending events */
2004
2005	/* Leave FCC interrupts masked for now.  Will be unmasked by
2006	 * fcc_restart().
2007	 */
2008	fccp->fcc_fccm = 0;
2009
2010	/* Install our interrupt handler.
2011	*/
2012	if (request_irq(fip->fc_interrupt, fcc_enet_interrupt, 0, "fenet",
2013				dev) < 0)
2014		printk("Can't get FCC IRQ %d\n", fip->fc_interrupt);
2015
2016#ifdef	PHY_INTERRUPT
2017#ifdef CONFIG_ADS8272
2018	if (request_irq(PHY_INTERRUPT, mii_link_interrupt, IRQF_SHARED,
2019				"mii", dev) < 0)
2020		printk(KERN_CRIT "Can't get MII IRQ %d\n", PHY_INTERRUPT);
2021#else
2022	/* Make IRQn edge triggered.  This does not work if PHY_INTERRUPT is
2023	 * on Port C.
2024	 */
2025	((volatile cpm2_map_t *) CPM_MAP_ADDR)->im_intctl.ic_siexr |=
2026		(1 << (14 - (PHY_INTERRUPT - SIU_INT_IRQ1)));
2027
2028	if (request_irq(PHY_INTERRUPT, mii_link_interrupt, 0,
2029							"mii", dev) < 0)
2030		printk(KERN_CRIT "Can't get MII IRQ %d\n", PHY_INTERRUPT);
2031#endif
2032#endif	/* PHY_INTERRUPT */
2033
2034	/* Set GFMR to enable Ethernet operating mode.
2035	 */
2036	fccp->fcc_gfmr = (FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
2037
2038	/* Set sync/delimiters.
2039	*/
2040	fccp->fcc_fdsr = 0xd555;
2041
2042	/* Set protocol specific processing mode for Ethernet.
2043	 * This has to be adjusted for Full Duplex operation after we can
2044	 * determine how to detect that.
2045	 */
2046	fccp->fcc_fpsmr = FCC_PSMR_ENCRC;
2047
2048#ifdef CONFIG_PQ2ADS
2049	/* Enable the PHY. */
2050	*(volatile uint *)(BCSR_ADDR + 4) &= ~BCSR1_FETHIEN;
2051	*(volatile uint *)(BCSR_ADDR + 4) |=  BCSR1_FETH_RST;
2052#endif
2053#if defined(CONFIG_PQ2ADS) || defined(CONFIG_PQ2FADS)
2054	/* Enable the 2nd PHY. */
2055	*(volatile uint *)(BCSR_ADDR + 12) &= ~BCSR3_FETHIEN2;
2056	*(volatile uint *)(BCSR_ADDR + 12) |=  BCSR3_FETH2_RST;
2057#endif
2058
2059#if defined(CONFIG_USE_MDIO) || defined(CONFIG_TQM8260)
2060	/* start in full duplex mode, and negotiate speed
2061	 */
2062	fcc_restart (dev, 1);
2063#else
2064	/* start in half duplex mode
2065	 */
2066	fcc_restart (dev, 0);
2067#endif
2068}
2069
2070#ifdef	CONFIG_USE_MDIO
2071/* MII command/status interface.
2072 * I'm not going to describe all of the details.  You can find the
2073 * protocol definition in many other places, including the data sheet
2074 * of most PHY parts.
2075 * I wonder what "they" were thinking (maybe weren't) when they leave
2076 * the I2C in the CPM but I have to toggle these bits......
2077 */
2078#ifdef CONFIG_RPX8260
2079	/* The EP8260 has the MDIO pins in a BCSR instead of on Port C
2080	 * like most other boards.
2081	 */
2082#define MDIO_ADDR ((volatile u_char *)(RPX_CSR_ADDR + 4))
2083#define MAKE_MDIO_OUTPUT *MDIO_ADDR &= ~BCSR4_MII_READ
2084#define MAKE_MDIO_INPUT  *MDIO_ADDR |=  BCSR4_MII_READ | BCSR4_MII_MDIO
2085#define OUT_MDIO(bit)				\
2086	if (bit)				\
2087		*MDIO_ADDR |=  BCSR4_MII_MDIO;	\
2088	else					\
2089		*MDIO_ADDR &= ~BCSR4_MII_MDIO;
2090#define IN_MDIO (*MDIO_ADDR & BCSR4_MII_MDIO)
2091#define OUT_MDC(bit)				\
2092	if (bit)				\
2093		*MDIO_ADDR |=  BCSR4_MII_MDC;	\
2094	else					\
2095		*MDIO_ADDR &= ~BCSR4_MII_MDC;
2096#else	/* ifdef CONFIG_RPX8260 */
2097	/* This is for the usual case where the MDIO pins are on Port C.
2098	 */
2099#define MDIO_ADDR (((volatile cpm2_map_t *)CPM_MAP_ADDR)->im_ioport)
2100#define MAKE_MDIO_OUTPUT MDIO_ADDR.iop_pdirc |= fip->fc_mdio
2101#define MAKE_MDIO_INPUT MDIO_ADDR.iop_pdirc &= ~fip->fc_mdio
2102#define OUT_MDIO(bit)				\
2103	if (bit)				\
2104		MDIO_ADDR.iop_pdatc |= fip->fc_mdio;	\
2105	else					\
2106		MDIO_ADDR.iop_pdatc &= ~fip->fc_mdio;
2107#define IN_MDIO ((MDIO_ADDR.iop_pdatc) & fip->fc_mdio)
2108#define OUT_MDC(bit)				\
2109	if (bit)				\
2110		MDIO_ADDR.iop_pdatc |= fip->fc_mdck;	\
2111	else					\
2112		MDIO_ADDR.iop_pdatc &= ~fip->fc_mdck;
2113#endif	/* ifdef CONFIG_RPX8260 */
2114
2115static uint
2116mii_send_receive(fcc_info_t *fip, uint cmd)
2117{
2118	uint		retval;
2119	int		read_op, i, off;
2120	const int	us = 1;
2121
2122	read_op = ((cmd & 0xf0000000) == 0x60000000);
2123
2124	/* Write preamble
2125	 */
2126	OUT_MDIO(1);
2127	MAKE_MDIO_OUTPUT;
2128	OUT_MDIO(1);
2129	for (i = 0; i < 32; i++)
2130	{
2131		udelay(us);
2132		OUT_MDC(1);
2133		udelay(us);
2134		OUT_MDC(0);
2135	}
2136
2137	/* Write data
2138	 */
2139	for (i = 0, off = 31; i < (read_op ? 14 : 32); i++, --off)
2140	{
2141		OUT_MDIO((cmd >> off) & 0x00000001);
2142		udelay(us);
2143		OUT_MDC(1);
2144		udelay(us);
2145		OUT_MDC(0);
2146	}
2147
2148	retval = cmd;
2149
2150	if (read_op)
2151	{
2152		retval >>= 16;
2153
2154		MAKE_MDIO_INPUT;
2155		udelay(us);
2156		OUT_MDC(1);
2157		udelay(us);
2158		OUT_MDC(0);
2159
2160		for (i = 0; i < 16; i++)
2161		{
2162			udelay(us);
2163			OUT_MDC(1);
2164			udelay(us);
2165			retval <<= 1;
2166			if (IN_MDIO)
2167				retval++;
2168			OUT_MDC(0);
2169		}
2170	}
2171
2172	MAKE_MDIO_INPUT;
2173	udelay(us);
2174	OUT_MDC(1);
2175	udelay(us);
2176	OUT_MDC(0);
2177
2178	return retval;
2179}
2180#endif	/* CONFIG_USE_MDIO */
2181
2182static void
2183fcc_stop(struct net_device *dev)
2184{
2185	struct fcc_enet_private	*fep= (struct fcc_enet_private *)(dev->priv);
2186	volatile fcc_t	*fccp = fep->fccp;
2187	fcc_info_t *fip = fep->fip;
2188	volatile fcc_enet_t *ep = fep->ep;
2189	volatile cpm_cpm2_t *cp = cpmp;
2190	volatile cbd_t *bdp;
2191	int i;
2192
2193	if ((fccp->fcc_gfmr & (FCC_GFMR_ENR | FCC_GFMR_ENT)) == 0)
2194		return;	/* already down */
2195
2196	fccp->fcc_fccm = 0;
2197
2198	/* issue the graceful stop tx command */
2199	while (cp->cp_cpcr & CPM_CR_FLG);
2200	cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock,
2201				0x0c, CPM_CR_GRA_STOP_TX) | CPM_CR_FLG;
2202	while (cp->cp_cpcr & CPM_CR_FLG);
2203
2204	/* Disable transmit/receive */
2205	fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
2206
2207	/* issue the restart tx command */
2208	fccp->fcc_fcce = FCC_ENET_GRA;
2209	while (cp->cp_cpcr & CPM_CR_FLG);
2210	cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock,
2211				0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG;
2212	while (cp->cp_cpcr & CPM_CR_FLG);
2213
2214	/* free tx buffers */
2215	fep->skb_cur = fep->skb_dirty = 0;
2216	for (i=0; i<=TX_RING_MOD_MASK; i++) {
2217		if (fep->tx_skbuff[i] != NULL) {
2218			dev_kfree_skb(fep->tx_skbuff[i]);
2219			fep->tx_skbuff[i] = NULL;
2220		}
2221	}
2222	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
2223	fep->tx_free = TX_RING_SIZE;
2224	ep->fen_genfcc.fcc_tbptr = ep->fen_genfcc.fcc_tbase;
2225
2226	/* Initialize the tx buffer descriptors. */
2227	bdp = fep->tx_bd_base;
2228	for (i=0; i<TX_RING_SIZE; i++) {
2229		bdp->cbd_sc = 0;
2230		bdp->cbd_datlen = 0;
2231		bdp->cbd_bufaddr = 0;
2232		bdp++;
2233	}
2234	/* Set the last buffer to wrap. */
2235	bdp--;
2236	bdp->cbd_sc |= BD_SC_WRAP;
2237}
2238
2239static void
2240fcc_restart(struct net_device *dev, int duplex)
2241{
2242	struct fcc_enet_private	*fep = (struct fcc_enet_private *)(dev->priv);
2243	volatile fcc_t	*fccp = fep->fccp;
2244
2245	/* stop any transmissions in progress */
2246	fcc_stop(dev);
2247
2248	if (duplex)
2249		fccp->fcc_fpsmr |= FCC_PSMR_FDE | FCC_PSMR_LPB;
2250	else
2251		fccp->fcc_fpsmr &= ~(FCC_PSMR_FDE | FCC_PSMR_LPB);
2252
2253	/* Enable interrupts for transmit error, complete frame
2254	 * received, and any transmit buffer we have also set the
2255	 * interrupt flag.
2256	 */
2257	fccp->fcc_fccm = (FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
2258
2259	/* Enable transmit/receive */
2260	fccp->fcc_gfmr |= FCC_GFMR_ENR | FCC_GFMR_ENT;
2261}
2262
2263static int
2264fcc_enet_open(struct net_device *dev)
2265{
2266	struct fcc_enet_private *fep = dev->priv;
2267
2268#ifdef	CONFIG_USE_MDIO
2269	fep->sequence_done = 0;
2270	fep->link = 0;
2271
2272	if (fep->phy) {
2273		fcc_restart(dev, 0);	/* always start in half-duplex */
2274		mii_do_cmd(dev, fep->phy->ack_int);
2275		mii_do_cmd(dev, fep->phy->config);
2276		mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
2277		while(!fep->sequence_done)
2278			schedule();
2279
2280		mii_do_cmd(dev, fep->phy->startup);
2281		netif_start_queue(dev);
2282		return 0;		/* Success */
2283	}
2284	return -ENODEV;		/* No PHY we understand */
2285#else
2286	fep->link = 1;
2287	fcc_restart(dev, 0);	/* always start in half-duplex */
2288	netif_start_queue(dev);
2289	return 0;					/* Always succeed */
2290#endif	/* CONFIG_USE_MDIO */
2291}
2292