• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/ibm_newemac/
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 *                <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * 	Matt Porter <mporter@kernel.crashing.org>
16 *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 *      Armin Kuster <akuster@mvista.com>
18 * 	Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute  it and/or modify it
21 * under  the terms of  the GNU General  Public License as published by the
22 * Free Software Foundation;  either version 2 of the  License, or (at your
23 * option) any later version.
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/sched.h>
29#include <linux/string.h>
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/pci.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/bitops.h>
40#include <linux/workqueue.h>
41#include <linux/of.h>
42#include <linux/slab.h>
43
44#include <asm/processor.h>
45#include <asm/io.h>
46#include <asm/dma.h>
47#include <asm/uaccess.h>
48#include <asm/dcr.h>
49#include <asm/dcr-regs.h>
50
51#include "core.h"
52
53/*
54 * Lack of dma_unmap_???? calls is intentional.
55 *
56 * API-correct usage requires additional support state information to be
57 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
58 * EMAC design (e.g. TX buffer passed from network stack can be split into
59 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
60 * maintaining such information will add additional overhead.
61 * Current DMA API implementation for 4xx processors only ensures cache coherency
62 * and dma_unmap_???? routines are empty and are likely to stay this way.
63 * I decided to omit dma_unmap_??? calls because I don't want to add additional
64 * complexity just for the sake of following some abstract API, when it doesn't
65 * add any real benefit to the driver. I understand that this decision maybe
66 * controversial, but I really tried to make code API-correct and efficient
67 * at the same time and didn't come up with code I liked :(.                --ebs
68 */
69
70#define DRV_NAME        "emac"
71#define DRV_VERSION     "3.54"
72#define DRV_DESC        "PPC 4xx OCP EMAC driver"
73
74MODULE_DESCRIPTION(DRV_DESC);
75MODULE_AUTHOR
76    ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
77MODULE_LICENSE("GPL");
78
79/*
80 * PPC64 doesn't (yet) have a cacheable_memcpy
81 */
82#ifdef CONFIG_PPC64
83#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84#endif
85
86/* minimum number of free TX descriptors required to wake up TX process */
87#define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)
88
89/* If packet size is less than this number, we allocate small skb and copy packet
90 * contents into it instead of just sending original big skb up
91 */
92#define EMAC_RX_COPY_THRESH		CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
93
94static u32 busy_phy_map;
95static DEFINE_MUTEX(emac_phy_map_lock);
96
97/* This is the wait queue used to wait on any event related to probe, that
98 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
99 */
100static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
101
102/* Having stable interface names is a doomed idea. However, it would be nice
103 * if we didn't have completely random interface names at boot too :-) It's
104 * just a matter of making everybody's life easier. Since we are doing
105 * threaded probing, it's a bit harder though. The base idea here is that
106 * we make up a list of all emacs in the device-tree before we register the
107 * driver. Every emac will then wait for the previous one in the list to
108 * initialize before itself. We should also keep that list ordered by
109 * cell_index.
110 * That list is only 4 entries long, meaning that additional EMACs don't
111 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
112 */
113
114#define EMAC_BOOT_LIST_SIZE	4
115static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
116
117/* How long should I wait for dependent devices ? */
118#define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)
119
120/* I don't want to litter system log with timeout errors
121 * when we have brain-damaged PHY.
122 */
123static inline void emac_report_timeout_error(struct emac_instance *dev,
124					     const char *error)
125{
126	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
127				  EMAC_FTR_460EX_PHY_CLK_FIX |
128				  EMAC_FTR_440EP_PHY_CLK_FIX))
129		DBG(dev, "%s" NL, error);
130	else if (net_ratelimit())
131		printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
132			error);
133}
134
135static inline void emac_rx_clk_tx(struct emac_instance *dev)
136{
137#ifdef CONFIG_PPC_DCR_NATIVE
138	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
139		dcri_clrset(SDR0, SDR0_MFR,
140			    0, SDR0_MFR_ECS >> dev->cell_index);
141#endif
142}
143
144static inline void emac_rx_clk_default(struct emac_instance *dev)
145{
146#ifdef CONFIG_PPC_DCR_NATIVE
147	if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148		dcri_clrset(SDR0, SDR0_MFR,
149			    SDR0_MFR_ECS >> dev->cell_index, 0);
150#endif
151}
152
153/* PHY polling intervals */
154#define PHY_POLL_LINK_ON	HZ
155#define PHY_POLL_LINK_OFF	(HZ / 5)
156
157/* Graceful stop timeouts in us.
158 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
159 */
160#define STOP_TIMEOUT_10		1230
161#define STOP_TIMEOUT_100	124
162#define STOP_TIMEOUT_1000	13
163#define STOP_TIMEOUT_1000_JUMBO	73
164
165static unsigned char default_mcast_addr[] = {
166	0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
167};
168
169/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
170static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
171	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
172	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
173	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
174	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
175	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
176	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
177	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
178	"rx_bad_packet", "rx_runt_packet", "rx_short_event",
179	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
180	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
181	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
182	"tx_bd_excessive_collisions", "tx_bd_late_collision",
183	"tx_bd_multple_collisions", "tx_bd_single_collision",
184	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
185	"tx_errors"
186};
187
188static irqreturn_t emac_irq(int irq, void *dev_instance);
189static void emac_clean_tx_ring(struct emac_instance *dev);
190static void __emac_set_multicast_list(struct emac_instance *dev);
191
192static inline int emac_phy_supports_gige(int phy_mode)
193{
194	return  phy_mode == PHY_MODE_GMII ||
195		phy_mode == PHY_MODE_RGMII ||
196		phy_mode == PHY_MODE_SGMII ||
197		phy_mode == PHY_MODE_TBI ||
198		phy_mode == PHY_MODE_RTBI;
199}
200
201static inline int emac_phy_gpcs(int phy_mode)
202{
203	return  phy_mode == PHY_MODE_SGMII ||
204		phy_mode == PHY_MODE_TBI ||
205		phy_mode == PHY_MODE_RTBI;
206}
207
208static inline void emac_tx_enable(struct emac_instance *dev)
209{
210	struct emac_regs __iomem *p = dev->emacp;
211	u32 r;
212
213	DBG(dev, "tx_enable" NL);
214
215	r = in_be32(&p->mr0);
216	if (!(r & EMAC_MR0_TXE))
217		out_be32(&p->mr0, r | EMAC_MR0_TXE);
218}
219
220static void emac_tx_disable(struct emac_instance *dev)
221{
222	struct emac_regs __iomem *p = dev->emacp;
223	u32 r;
224
225	DBG(dev, "tx_disable" NL);
226
227	r = in_be32(&p->mr0);
228	if (r & EMAC_MR0_TXE) {
229		int n = dev->stop_timeout;
230		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
231		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
232			udelay(1);
233			--n;
234		}
235		if (unlikely(!n))
236			emac_report_timeout_error(dev, "TX disable timeout");
237	}
238}
239
240static void emac_rx_enable(struct emac_instance *dev)
241{
242	struct emac_regs __iomem *p = dev->emacp;
243	u32 r;
244
245	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
246		goto out;
247
248	DBG(dev, "rx_enable" NL);
249
250	r = in_be32(&p->mr0);
251	if (!(r & EMAC_MR0_RXE)) {
252		if (unlikely(!(r & EMAC_MR0_RXI))) {
253			/* Wait if previous async disable is still in progress */
254			int n = dev->stop_timeout;
255			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
256				udelay(1);
257				--n;
258			}
259			if (unlikely(!n))
260				emac_report_timeout_error(dev,
261							  "RX disable timeout");
262		}
263		out_be32(&p->mr0, r | EMAC_MR0_RXE);
264	}
265 out:
266	;
267}
268
269static void emac_rx_disable(struct emac_instance *dev)
270{
271	struct emac_regs __iomem *p = dev->emacp;
272	u32 r;
273
274	DBG(dev, "rx_disable" NL);
275
276	r = in_be32(&p->mr0);
277	if (r & EMAC_MR0_RXE) {
278		int n = dev->stop_timeout;
279		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
280		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
281			udelay(1);
282			--n;
283		}
284		if (unlikely(!n))
285			emac_report_timeout_error(dev, "RX disable timeout");
286	}
287}
288
289static inline void emac_netif_stop(struct emac_instance *dev)
290{
291	netif_tx_lock_bh(dev->ndev);
292	netif_addr_lock(dev->ndev);
293	dev->no_mcast = 1;
294	netif_addr_unlock(dev->ndev);
295	netif_tx_unlock_bh(dev->ndev);
296	dev->ndev->trans_start = jiffies;	/* prevent tx timeout */
297	mal_poll_disable(dev->mal, &dev->commac);
298	netif_tx_disable(dev->ndev);
299}
300
301static inline void emac_netif_start(struct emac_instance *dev)
302{
303	netif_tx_lock_bh(dev->ndev);
304	netif_addr_lock(dev->ndev);
305	dev->no_mcast = 0;
306	if (dev->mcast_pending && netif_running(dev->ndev))
307		__emac_set_multicast_list(dev);
308	netif_addr_unlock(dev->ndev);
309	netif_tx_unlock_bh(dev->ndev);
310
311	netif_wake_queue(dev->ndev);
312
313	/* NOTE: unconditional netif_wake_queue is only appropriate
314	 * so long as all callers are assured to have free tx slots
315	 * (taken from tg3... though the case where that is wrong is
316	 *  not terribly harmful)
317	 */
318	mal_poll_enable(dev->mal, &dev->commac);
319}
320
321static inline void emac_rx_disable_async(struct emac_instance *dev)
322{
323	struct emac_regs __iomem *p = dev->emacp;
324	u32 r;
325
326	DBG(dev, "rx_disable_async" NL);
327
328	r = in_be32(&p->mr0);
329	if (r & EMAC_MR0_RXE)
330		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
331}
332
333static int emac_reset(struct emac_instance *dev)
334{
335	struct emac_regs __iomem *p = dev->emacp;
336	int n = 20;
337
338	DBG(dev, "reset" NL);
339
340	if (!dev->reset_failed) {
341		/* 40x erratum suggests stopping RX channel before reset,
342		 * we stop TX as well
343		 */
344		emac_rx_disable(dev);
345		emac_tx_disable(dev);
346	}
347
348#ifdef CONFIG_PPC_DCR_NATIVE
349	/* Enable internal clock source */
350	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
351		dcri_clrset(SDR0, SDR0_ETH_CFG,
352			    0, SDR0_ETH_CFG_ECS << dev->cell_index);
353#endif
354
355	out_be32(&p->mr0, EMAC_MR0_SRST);
356	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
357		--n;
358
359#ifdef CONFIG_PPC_DCR_NATIVE
360	 /* Enable external clock source */
361	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
362		dcri_clrset(SDR0, SDR0_ETH_CFG,
363			    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
364#endif
365
366	if (n) {
367		dev->reset_failed = 0;
368		return 0;
369	} else {
370		emac_report_timeout_error(dev, "reset timeout");
371		dev->reset_failed = 1;
372		return -ETIMEDOUT;
373	}
374}
375
376static void emac_hash_mc(struct emac_instance *dev)
377{
378	const int regs = EMAC_XAHT_REGS(dev);
379	u32 *gaht_base = emac_gaht_base(dev);
380	u32 gaht_temp[regs];
381	struct netdev_hw_addr *ha;
382	int i;
383
384	DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
385
386	memset(gaht_temp, 0, sizeof (gaht_temp));
387
388	netdev_for_each_mc_addr(ha, dev->ndev) {
389		int slot, reg, mask;
390		DBG2(dev, "mc %pM" NL, ha->addr);
391
392		slot = EMAC_XAHT_CRC_TO_SLOT(dev,
393					     ether_crc(ETH_ALEN, ha->addr));
394		reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
395		mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
396
397		gaht_temp[reg] |= mask;
398	}
399
400	for (i = 0; i < regs; i++)
401		out_be32(gaht_base + i, gaht_temp[i]);
402}
403
404static inline u32 emac_iff2rmr(struct net_device *ndev)
405{
406	struct emac_instance *dev = netdev_priv(ndev);
407	u32 r;
408
409	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
410
411	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
412	    r |= EMAC4_RMR_BASE;
413	else
414	    r |= EMAC_RMR_BASE;
415
416	if (ndev->flags & IFF_PROMISC)
417		r |= EMAC_RMR_PME;
418	else if (ndev->flags & IFF_ALLMULTI ||
419			 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
420		r |= EMAC_RMR_PMME;
421	else if (!netdev_mc_empty(ndev))
422		r |= EMAC_RMR_MAE;
423
424	return r;
425}
426
427static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
428{
429	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
430
431	DBG2(dev, "__emac_calc_base_mr1" NL);
432
433	switch(tx_size) {
434	case 2048:
435		ret |= EMAC_MR1_TFS_2K;
436		break;
437	default:
438		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
439		       dev->ndev->name, tx_size);
440	}
441
442	switch(rx_size) {
443	case 16384:
444		ret |= EMAC_MR1_RFS_16K;
445		break;
446	case 4096:
447		ret |= EMAC_MR1_RFS_4K;
448		break;
449	default:
450		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
451		       dev->ndev->name, rx_size);
452	}
453
454	return ret;
455}
456
457static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
458{
459	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
460		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
461
462	DBG2(dev, "__emac4_calc_base_mr1" NL);
463
464	switch(tx_size) {
465	case 16384:
466		ret |= EMAC4_MR1_TFS_16K;
467		break;
468	case 4096:
469		ret |= EMAC4_MR1_TFS_4K;
470		break;
471	case 2048:
472		ret |= EMAC4_MR1_TFS_2K;
473		break;
474	default:
475		printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
476		       dev->ndev->name, tx_size);
477	}
478
479	switch(rx_size) {
480	case 16384:
481		ret |= EMAC4_MR1_RFS_16K;
482		break;
483	case 4096:
484		ret |= EMAC4_MR1_RFS_4K;
485		break;
486	case 2048:
487		ret |= EMAC4_MR1_RFS_2K;
488		break;
489	default:
490		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
491		       dev->ndev->name, rx_size);
492	}
493
494	return ret;
495}
496
497static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
498{
499	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
500		__emac4_calc_base_mr1(dev, tx_size, rx_size) :
501		__emac_calc_base_mr1(dev, tx_size, rx_size);
502}
503
504static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
505{
506	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
507		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
508	else
509		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
510}
511
512static inline u32 emac_calc_rwmr(struct emac_instance *dev,
513				 unsigned int low, unsigned int high)
514{
515	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
516		return (low << 22) | ( (high & 0x3ff) << 6);
517	else
518		return (low << 23) | ( (high & 0x1ff) << 7);
519}
520
521static int emac_configure(struct emac_instance *dev)
522{
523	struct emac_regs __iomem *p = dev->emacp;
524	struct net_device *ndev = dev->ndev;
525	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
526	u32 r, mr1 = 0;
527
528	DBG(dev, "configure" NL);
529
530	if (!link) {
531		out_be32(&p->mr1, in_be32(&p->mr1)
532			 | EMAC_MR1_FDE | EMAC_MR1_ILE);
533		udelay(100);
534	} else if (emac_reset(dev) < 0)
535		return -ETIMEDOUT;
536
537	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
538		tah_reset(dev->tah_dev);
539
540	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
541	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
542
543	/* Default fifo sizes */
544	tx_size = dev->tx_fifo_size;
545	rx_size = dev->rx_fifo_size;
546
547	/* No link, force loopback */
548	if (!link)
549		mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
550
551	/* Check for full duplex */
552	else if (dev->phy.duplex == DUPLEX_FULL)
553		mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
554
555	/* Adjust fifo sizes, mr1 and timeouts based on link speed */
556	dev->stop_timeout = STOP_TIMEOUT_10;
557	switch (dev->phy.speed) {
558	case SPEED_1000:
559		if (emac_phy_gpcs(dev->phy.mode)) {
560			mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
561				(dev->phy.gpcs_address != 0xffffffff) ?
562				 dev->phy.gpcs_address : dev->phy.address);
563
564			/* Put some arbitrary OUI, Manuf & Rev IDs so we can
565			 * identify this GPCS PHY later.
566			 */
567			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
568		} else
569			mr1 |= EMAC_MR1_MF_1000;
570
571		/* Extended fifo sizes */
572		tx_size = dev->tx_fifo_size_gige;
573		rx_size = dev->rx_fifo_size_gige;
574
575		if (dev->ndev->mtu > ETH_DATA_LEN) {
576			if (emac_has_feature(dev, EMAC_FTR_EMAC4))
577				mr1 |= EMAC4_MR1_JPSM;
578			else
579				mr1 |= EMAC_MR1_JPSM;
580			dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
581		} else
582			dev->stop_timeout = STOP_TIMEOUT_1000;
583		break;
584	case SPEED_100:
585		mr1 |= EMAC_MR1_MF_100;
586		dev->stop_timeout = STOP_TIMEOUT_100;
587		break;
588	default: /* make gcc happy */
589		break;
590	}
591
592	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
593		rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
594				dev->phy.speed);
595	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
596		zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
597
598	/* on 40x erratum forces us to NOT use integrated flow control,
599	 * let's hope it works on 44x ;)
600	 */
601	if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
602	    dev->phy.duplex == DUPLEX_FULL) {
603		if (dev->phy.pause)
604			mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
605		else if (dev->phy.asym_pause)
606			mr1 |= EMAC_MR1_APP;
607	}
608
609	/* Add base settings & fifo sizes & program MR1 */
610	mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
611	out_be32(&p->mr1, mr1);
612
613	/* Set individual MAC address */
614	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
615	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
616		 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
617		 ndev->dev_addr[5]);
618
619	/* VLAN Tag Protocol ID */
620	out_be32(&p->vtpid, 0x8100);
621
622	/* Receive mode register */
623	r = emac_iff2rmr(ndev);
624	if (r & EMAC_RMR_MAE)
625		emac_hash_mc(dev);
626	out_be32(&p->rmr, r);
627
628	/* FIFOs thresholds */
629	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
630		r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
631			       tx_size / 2 / dev->fifo_entry_size);
632	else
633		r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
634			      tx_size / 2 / dev->fifo_entry_size);
635	out_be32(&p->tmr1, r);
636	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
637
638	/* PAUSE frame is sent when RX FIFO reaches its high-water mark,
639	   there should be still enough space in FIFO to allow the our link
640	   partner time to process this frame and also time to send PAUSE
641	   frame itself.
642
643	   Here is the worst case scenario for the RX FIFO "headroom"
644	   (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
645
646	   1) One maximum-length frame on TX                    1522 bytes
647	   2) One PAUSE frame time                                64 bytes
648	   3) PAUSE frame decode time allowance                   64 bytes
649	   4) One maximum-length frame on RX                    1522 bytes
650	   5) Round-trip propagation delay of the link (100Mb)    15 bytes
651	   ----------
652	   3187 bytes
653
654	   I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
655	   low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
656	 */
657	r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
658			   rx_size / 4 / dev->fifo_entry_size);
659	out_be32(&p->rwmr, r);
660
661	/* Set PAUSE timer to the maximum */
662	out_be32(&p->ptr, 0xffff);
663
664	/* IRQ sources */
665	r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
666		EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
667		EMAC_ISR_IRE | EMAC_ISR_TE;
668	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
669	    r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
670						  EMAC4_ISR_RXOE | */;
671	out_be32(&p->iser,  r);
672
673	/* We need to take GPCS PHY out of isolate mode after EMAC reset */
674	if (emac_phy_gpcs(dev->phy.mode)) {
675		if (dev->phy.gpcs_address != 0xffffffff)
676			emac_mii_reset_gpcs(&dev->phy);
677		else
678			emac_mii_reset_phy(&dev->phy);
679	}
680
681	return 0;
682}
683
684static void emac_reinitialize(struct emac_instance *dev)
685{
686	DBG(dev, "reinitialize" NL);
687
688	emac_netif_stop(dev);
689	if (!emac_configure(dev)) {
690		emac_tx_enable(dev);
691		emac_rx_enable(dev);
692	}
693	emac_netif_start(dev);
694}
695
696static void emac_full_tx_reset(struct emac_instance *dev)
697{
698	DBG(dev, "full_tx_reset" NL);
699
700	emac_tx_disable(dev);
701	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
702	emac_clean_tx_ring(dev);
703	dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
704
705	emac_configure(dev);
706
707	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
708	emac_tx_enable(dev);
709	emac_rx_enable(dev);
710}
711
712static void emac_reset_work(struct work_struct *work)
713{
714	struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
715
716	DBG(dev, "reset_work" NL);
717
718	mutex_lock(&dev->link_lock);
719	if (dev->opened) {
720		emac_netif_stop(dev);
721		emac_full_tx_reset(dev);
722		emac_netif_start(dev);
723	}
724	mutex_unlock(&dev->link_lock);
725}
726
727static void emac_tx_timeout(struct net_device *ndev)
728{
729	struct emac_instance *dev = netdev_priv(ndev);
730
731	DBG(dev, "tx_timeout" NL);
732
733	schedule_work(&dev->reset_work);
734}
735
736
737static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
738{
739	int done = !!(stacr & EMAC_STACR_OC);
740
741	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
742		done = !done;
743
744	return done;
745};
746
747static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
748{
749	struct emac_regs __iomem *p = dev->emacp;
750	u32 r = 0;
751	int n, err = -ETIMEDOUT;
752
753	mutex_lock(&dev->mdio_lock);
754
755	DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
756
757	/* Enable proper MDIO port */
758	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
759		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
760	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
761		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
762
763	/* Wait for management interface to become idle */
764	n = 20;
765	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
766		udelay(1);
767		if (!--n) {
768			DBG2(dev, " -> timeout wait idle\n");
769			goto bail;
770		}
771	}
772
773	/* Issue read command */
774	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
775		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
776	else
777		r = EMAC_STACR_BASE(dev->opb_bus_freq);
778	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
779		r |= EMAC_STACR_OC;
780	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
781		r |= EMACX_STACR_STAC_READ;
782	else
783		r |= EMAC_STACR_STAC_READ;
784	r |= (reg & EMAC_STACR_PRA_MASK)
785		| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
786	out_be32(&p->stacr, r);
787
788	/* Wait for read to complete */
789	n = 200;
790	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
791		udelay(1);
792		if (!--n) {
793			DBG2(dev, " -> timeout wait complete\n");
794			goto bail;
795		}
796	}
797
798	if (unlikely(r & EMAC_STACR_PHYE)) {
799		DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
800		err = -EREMOTEIO;
801		goto bail;
802	}
803
804	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
805
806	DBG2(dev, "mdio_read -> %04x" NL, r);
807	err = 0;
808 bail:
809	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
810		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
811	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
812		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
813	mutex_unlock(&dev->mdio_lock);
814
815	return err == 0 ? r : err;
816}
817
818static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
819			      u16 val)
820{
821	struct emac_regs __iomem *p = dev->emacp;
822	u32 r = 0;
823	int n, err = -ETIMEDOUT;
824
825	mutex_lock(&dev->mdio_lock);
826
827	DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
828
829	/* Enable proper MDIO port */
830	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
831		zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
832	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
833		rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
834
835	/* Wait for management interface to be idle */
836	n = 20;
837	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
838		udelay(1);
839		if (!--n) {
840			DBG2(dev, " -> timeout wait idle\n");
841			goto bail;
842		}
843	}
844
845	/* Issue write command */
846	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
847		r = EMAC4_STACR_BASE(dev->opb_bus_freq);
848	else
849		r = EMAC_STACR_BASE(dev->opb_bus_freq);
850	if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
851		r |= EMAC_STACR_OC;
852	if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
853		r |= EMACX_STACR_STAC_WRITE;
854	else
855		r |= EMAC_STACR_STAC_WRITE;
856	r |= (reg & EMAC_STACR_PRA_MASK) |
857		((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
858		(val << EMAC_STACR_PHYD_SHIFT);
859	out_be32(&p->stacr, r);
860
861	/* Wait for write to complete */
862	n = 200;
863	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
864		udelay(1);
865		if (!--n) {
866			DBG2(dev, " -> timeout wait complete\n");
867			goto bail;
868		}
869	}
870	err = 0;
871 bail:
872	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
873		rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
874	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
875		zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
876	mutex_unlock(&dev->mdio_lock);
877}
878
879static int emac_mdio_read(struct net_device *ndev, int id, int reg)
880{
881	struct emac_instance *dev = netdev_priv(ndev);
882	int res;
883
884	res = __emac_mdio_read((dev->mdio_instance &&
885				dev->phy.gpcs_address != id) ?
886				dev->mdio_instance : dev,
887			       (u8) id, (u8) reg);
888	return res;
889}
890
891static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
892{
893	struct emac_instance *dev = netdev_priv(ndev);
894
895	__emac_mdio_write((dev->mdio_instance &&
896			   dev->phy.gpcs_address != id) ?
897			   dev->mdio_instance : dev,
898			  (u8) id, (u8) reg, (u16) val);
899}
900
901/* Tx lock BH */
902static void __emac_set_multicast_list(struct emac_instance *dev)
903{
904	struct emac_regs __iomem *p = dev->emacp;
905	u32 rmr = emac_iff2rmr(dev->ndev);
906
907	DBG(dev, "__multicast %08x" NL, rmr);
908
909	/* I decided to relax register access rules here to avoid
910	 * full EMAC reset.
911	 *
912	 * There is a real problem with EMAC4 core if we use MWSW_001 bit
913	 * in MR1 register and do a full EMAC reset.
914	 * One TX BD status update is delayed and, after EMAC reset, it
915	 * never happens, resulting in TX hung (it'll be recovered by TX
916	 * timeout handler eventually, but this is just gross).
917	 * So we either have to do full TX reset or try to cheat here :)
918	 *
919	 * The only required change is to RX mode register, so I *think* all
920	 * we need is just to stop RX channel. This seems to work on all
921	 * tested SoCs.                                                --ebs
922	 *
923	 * If we need the full reset, we might just trigger the workqueue
924	 * and do it async... a bit nasty but should work --BenH
925	 */
926	dev->mcast_pending = 0;
927	emac_rx_disable(dev);
928	if (rmr & EMAC_RMR_MAE)
929		emac_hash_mc(dev);
930	out_be32(&p->rmr, rmr);
931	emac_rx_enable(dev);
932}
933
934/* Tx lock BH */
935static void emac_set_multicast_list(struct net_device *ndev)
936{
937	struct emac_instance *dev = netdev_priv(ndev);
938
939	DBG(dev, "multicast" NL);
940
941	BUG_ON(!netif_running(dev->ndev));
942
943	if (dev->no_mcast) {
944		dev->mcast_pending = 1;
945		return;
946	}
947	__emac_set_multicast_list(dev);
948}
949
950static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
951{
952	int rx_sync_size = emac_rx_sync_size(new_mtu);
953	int rx_skb_size = emac_rx_skb_size(new_mtu);
954	int i, ret = 0;
955
956	mutex_lock(&dev->link_lock);
957	emac_netif_stop(dev);
958	emac_rx_disable(dev);
959	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
960
961	if (dev->rx_sg_skb) {
962		++dev->estats.rx_dropped_resize;
963		dev_kfree_skb(dev->rx_sg_skb);
964		dev->rx_sg_skb = NULL;
965	}
966
967	/* Make a first pass over RX ring and mark BDs ready, dropping
968	 * non-processed packets on the way. We need this as a separate pass
969	 * to simplify error recovery in the case of allocation failure later.
970	 */
971	for (i = 0; i < NUM_RX_BUFF; ++i) {
972		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
973			++dev->estats.rx_dropped_resize;
974
975		dev->rx_desc[i].data_len = 0;
976		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
977		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
978	}
979
980	/* Reallocate RX ring only if bigger skb buffers are required */
981	if (rx_skb_size <= dev->rx_skb_size)
982		goto skip;
983
984	/* Second pass, allocate new skbs */
985	for (i = 0; i < NUM_RX_BUFF; ++i) {
986		struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
987		if (!skb) {
988			ret = -ENOMEM;
989			goto oom;
990		}
991
992		BUG_ON(!dev->rx_skb[i]);
993		dev_kfree_skb(dev->rx_skb[i]);
994
995		skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
996		dev->rx_desc[i].data_ptr =
997		    dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
998				   DMA_FROM_DEVICE) + 2;
999		dev->rx_skb[i] = skb;
1000	}
1001 skip:
1002	/* Check if we need to change "Jumbo" bit in MR1 */
1003	if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1004		/* This is to prevent starting RX channel in emac_rx_enable() */
1005		set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1006
1007		dev->ndev->mtu = new_mtu;
1008		emac_full_tx_reset(dev);
1009	}
1010
1011	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1012 oom:
1013	/* Restart RX */
1014	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1015	dev->rx_slot = 0;
1016	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1017	emac_rx_enable(dev);
1018	emac_netif_start(dev);
1019	mutex_unlock(&dev->link_lock);
1020
1021	return ret;
1022}
1023
1024/* Process ctx, rtnl_lock semaphore */
1025static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1026{
1027	struct emac_instance *dev = netdev_priv(ndev);
1028	int ret = 0;
1029
1030	if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1031		return -EINVAL;
1032
1033	DBG(dev, "change_mtu(%d)" NL, new_mtu);
1034
1035	if (netif_running(ndev)) {
1036		/* Check if we really need to reinitialize RX ring */
1037		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1038			ret = emac_resize_rx_ring(dev, new_mtu);
1039	}
1040
1041	if (!ret) {
1042		ndev->mtu = new_mtu;
1043		dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1044		dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1045	}
1046
1047	return ret;
1048}
1049
1050static void emac_clean_tx_ring(struct emac_instance *dev)
1051{
1052	int i;
1053
1054	for (i = 0; i < NUM_TX_BUFF; ++i) {
1055		if (dev->tx_skb[i]) {
1056			dev_kfree_skb(dev->tx_skb[i]);
1057			dev->tx_skb[i] = NULL;
1058			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1059				++dev->estats.tx_dropped;
1060		}
1061		dev->tx_desc[i].ctrl = 0;
1062		dev->tx_desc[i].data_ptr = 0;
1063	}
1064}
1065
1066static void emac_clean_rx_ring(struct emac_instance *dev)
1067{
1068	int i;
1069
1070	for (i = 0; i < NUM_RX_BUFF; ++i)
1071		if (dev->rx_skb[i]) {
1072			dev->rx_desc[i].ctrl = 0;
1073			dev_kfree_skb(dev->rx_skb[i]);
1074			dev->rx_skb[i] = NULL;
1075			dev->rx_desc[i].data_ptr = 0;
1076		}
1077
1078	if (dev->rx_sg_skb) {
1079		dev_kfree_skb(dev->rx_sg_skb);
1080		dev->rx_sg_skb = NULL;
1081	}
1082}
1083
1084static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1085				    gfp_t flags)
1086{
1087	struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1088	if (unlikely(!skb))
1089		return -ENOMEM;
1090
1091	dev->rx_skb[slot] = skb;
1092	dev->rx_desc[slot].data_len = 0;
1093
1094	skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1095	dev->rx_desc[slot].data_ptr =
1096	    dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1097			   DMA_FROM_DEVICE) + 2;
1098	wmb();
1099	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1100	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1101
1102	return 0;
1103}
1104
1105static void emac_print_link_status(struct emac_instance *dev)
1106{
1107	if (netif_carrier_ok(dev->ndev))
1108		printk(KERN_INFO "%s: link is up, %d %s%s\n",
1109		       dev->ndev->name, dev->phy.speed,
1110		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1111		       dev->phy.pause ? ", pause enabled" :
1112		       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1113	else
1114		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1115}
1116
1117/* Process ctx, rtnl_lock semaphore */
1118static int emac_open(struct net_device *ndev)
1119{
1120	struct emac_instance *dev = netdev_priv(ndev);
1121	int err, i;
1122
1123	DBG(dev, "open" NL);
1124
1125	/* Setup error IRQ handler */
1126	err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1127	if (err) {
1128		printk(KERN_ERR "%s: failed to request IRQ %d\n",
1129		       ndev->name, dev->emac_irq);
1130		return err;
1131	}
1132
1133	/* Allocate RX ring */
1134	for (i = 0; i < NUM_RX_BUFF; ++i)
1135		if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1136			printk(KERN_ERR "%s: failed to allocate RX ring\n",
1137			       ndev->name);
1138			goto oom;
1139		}
1140
1141	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1142	clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1143	dev->rx_sg_skb = NULL;
1144
1145	mutex_lock(&dev->link_lock);
1146	dev->opened = 1;
1147
1148	/* Start PHY polling now.
1149	 */
1150	if (dev->phy.address >= 0) {
1151		int link_poll_interval;
1152		if (dev->phy.def->ops->poll_link(&dev->phy)) {
1153			dev->phy.def->ops->read_link(&dev->phy);
1154			emac_rx_clk_default(dev);
1155			netif_carrier_on(dev->ndev);
1156			link_poll_interval = PHY_POLL_LINK_ON;
1157		} else {
1158			emac_rx_clk_tx(dev);
1159			netif_carrier_off(dev->ndev);
1160			link_poll_interval = PHY_POLL_LINK_OFF;
1161		}
1162		dev->link_polling = 1;
1163		wmb();
1164		schedule_delayed_work(&dev->link_work, link_poll_interval);
1165		emac_print_link_status(dev);
1166	} else
1167		netif_carrier_on(dev->ndev);
1168
1169	/* Required for Pause packet support in EMAC */
1170	dev_mc_add_global(ndev, default_mcast_addr);
1171
1172	emac_configure(dev);
1173	mal_poll_add(dev->mal, &dev->commac);
1174	mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1175	mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1176	mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1177	emac_tx_enable(dev);
1178	emac_rx_enable(dev);
1179	emac_netif_start(dev);
1180
1181	mutex_unlock(&dev->link_lock);
1182
1183	return 0;
1184 oom:
1185	emac_clean_rx_ring(dev);
1186	free_irq(dev->emac_irq, dev);
1187
1188	return -ENOMEM;
1189}
1190
1191/* BHs disabled */
1192
1193static void emac_link_timer(struct work_struct *work)
1194{
1195	struct emac_instance *dev =
1196		container_of(to_delayed_work(work),
1197			     struct emac_instance, link_work);
1198	int link_poll_interval;
1199
1200	mutex_lock(&dev->link_lock);
1201	DBG2(dev, "link timer" NL);
1202
1203	if (!dev->opened)
1204		goto bail;
1205
1206	if (dev->phy.def->ops->poll_link(&dev->phy)) {
1207		if (!netif_carrier_ok(dev->ndev)) {
1208			emac_rx_clk_default(dev);
1209			/* Get new link parameters */
1210			dev->phy.def->ops->read_link(&dev->phy);
1211
1212			netif_carrier_on(dev->ndev);
1213			emac_netif_stop(dev);
1214			emac_full_tx_reset(dev);
1215			emac_netif_start(dev);
1216			emac_print_link_status(dev);
1217		}
1218		link_poll_interval = PHY_POLL_LINK_ON;
1219	} else {
1220		if (netif_carrier_ok(dev->ndev)) {
1221			emac_rx_clk_tx(dev);
1222			netif_carrier_off(dev->ndev);
1223			netif_tx_disable(dev->ndev);
1224			emac_reinitialize(dev);
1225			emac_print_link_status(dev);
1226		}
1227		link_poll_interval = PHY_POLL_LINK_OFF;
1228	}
1229	schedule_delayed_work(&dev->link_work, link_poll_interval);
1230 bail:
1231	mutex_unlock(&dev->link_lock);
1232}
1233
1234static void emac_force_link_update(struct emac_instance *dev)
1235{
1236	netif_carrier_off(dev->ndev);
1237	smp_rmb();
1238	if (dev->link_polling) {
1239		cancel_rearming_delayed_work(&dev->link_work);
1240		if (dev->link_polling)
1241			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1242	}
1243}
1244
1245/* Process ctx, rtnl_lock semaphore */
1246static int emac_close(struct net_device *ndev)
1247{
1248	struct emac_instance *dev = netdev_priv(ndev);
1249
1250	DBG(dev, "close" NL);
1251
1252	if (dev->phy.address >= 0) {
1253		dev->link_polling = 0;
1254		cancel_rearming_delayed_work(&dev->link_work);
1255	}
1256	mutex_lock(&dev->link_lock);
1257	emac_netif_stop(dev);
1258	dev->opened = 0;
1259	mutex_unlock(&dev->link_lock);
1260
1261	emac_rx_disable(dev);
1262	emac_tx_disable(dev);
1263	mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1264	mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1265	mal_poll_del(dev->mal, &dev->commac);
1266
1267	emac_clean_tx_ring(dev);
1268	emac_clean_rx_ring(dev);
1269
1270	free_irq(dev->emac_irq, dev);
1271
1272	netif_carrier_off(ndev);
1273
1274	return 0;
1275}
1276
1277static inline u16 emac_tx_csum(struct emac_instance *dev,
1278			       struct sk_buff *skb)
1279{
1280	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1281		(skb->ip_summed == CHECKSUM_PARTIAL)) {
1282		++dev->stats.tx_packets_csum;
1283		return EMAC_TX_CTRL_TAH_CSUM;
1284	}
1285	return 0;
1286}
1287
1288static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1289{
1290	struct emac_regs __iomem *p = dev->emacp;
1291	struct net_device *ndev = dev->ndev;
1292
1293	/* Send the packet out. If the if makes a significant perf
1294	 * difference, then we can store the TMR0 value in "dev"
1295	 * instead
1296	 */
1297	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1298		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1299	else
1300		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1301
1302	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1303		netif_stop_queue(ndev);
1304		DBG2(dev, "stopped TX queue" NL);
1305	}
1306
1307	ndev->trans_start = jiffies;
1308	++dev->stats.tx_packets;
1309	dev->stats.tx_bytes += len;
1310
1311	return NETDEV_TX_OK;
1312}
1313
1314/* Tx lock BH */
1315static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1316{
1317	struct emac_instance *dev = netdev_priv(ndev);
1318	unsigned int len = skb->len;
1319	int slot;
1320
1321	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1322	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1323
1324	slot = dev->tx_slot++;
1325	if (dev->tx_slot == NUM_TX_BUFF) {
1326		dev->tx_slot = 0;
1327		ctrl |= MAL_TX_CTRL_WRAP;
1328	}
1329
1330	DBG2(dev, "xmit(%u) %d" NL, len, slot);
1331
1332	dev->tx_skb[slot] = skb;
1333	dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1334						     skb->data, len,
1335						     DMA_TO_DEVICE);
1336	dev->tx_desc[slot].data_len = (u16) len;
1337	wmb();
1338	dev->tx_desc[slot].ctrl = ctrl;
1339
1340	return emac_xmit_finish(dev, len);
1341}
1342
1343static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1344				  u32 pd, int len, int last, u16 base_ctrl)
1345{
1346	while (1) {
1347		u16 ctrl = base_ctrl;
1348		int chunk = min(len, MAL_MAX_TX_SIZE);
1349		len -= chunk;
1350
1351		slot = (slot + 1) % NUM_TX_BUFF;
1352
1353		if (last && !len)
1354			ctrl |= MAL_TX_CTRL_LAST;
1355		if (slot == NUM_TX_BUFF - 1)
1356			ctrl |= MAL_TX_CTRL_WRAP;
1357
1358		dev->tx_skb[slot] = NULL;
1359		dev->tx_desc[slot].data_ptr = pd;
1360		dev->tx_desc[slot].data_len = (u16) chunk;
1361		dev->tx_desc[slot].ctrl = ctrl;
1362		++dev->tx_cnt;
1363
1364		if (!len)
1365			break;
1366
1367		pd += chunk;
1368	}
1369	return slot;
1370}
1371
1372/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1373static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1374{
1375	struct emac_instance *dev = netdev_priv(ndev);
1376	int nr_frags = skb_shinfo(skb)->nr_frags;
1377	int len = skb->len, chunk;
1378	int slot, i;
1379	u16 ctrl;
1380	u32 pd;
1381
1382	/* This is common "fast" path */
1383	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1384		return emac_start_xmit(skb, ndev);
1385
1386	len -= skb->data_len;
1387
1388	/* Note, this is only an *estimation*, we can still run out of empty
1389	 * slots because of the additional fragmentation into
1390	 * MAL_MAX_TX_SIZE-sized chunks
1391	 */
1392	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1393		goto stop_queue;
1394
1395	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1396	    emac_tx_csum(dev, skb);
1397	slot = dev->tx_slot;
1398
1399	/* skb data */
1400	dev->tx_skb[slot] = NULL;
1401	chunk = min(len, MAL_MAX_TX_SIZE);
1402	dev->tx_desc[slot].data_ptr = pd =
1403	    dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1404	dev->tx_desc[slot].data_len = (u16) chunk;
1405	len -= chunk;
1406	if (unlikely(len))
1407		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1408				       ctrl);
1409	/* skb fragments */
1410	for (i = 0; i < nr_frags; ++i) {
1411		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1412		len = frag->size;
1413
1414		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1415			goto undo_frame;
1416
1417		pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1418				  DMA_TO_DEVICE);
1419
1420		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1421				       ctrl);
1422	}
1423
1424	DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1425
1426	/* Attach skb to the last slot so we don't release it too early */
1427	dev->tx_skb[slot] = skb;
1428
1429	/* Send the packet out */
1430	if (dev->tx_slot == NUM_TX_BUFF - 1)
1431		ctrl |= MAL_TX_CTRL_WRAP;
1432	wmb();
1433	dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1434	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1435
1436	return emac_xmit_finish(dev, skb->len);
1437
1438 undo_frame:
1439	/* Well, too bad. Our previous estimation was overly optimistic.
1440	 * Undo everything.
1441	 */
1442	while (slot != dev->tx_slot) {
1443		dev->tx_desc[slot].ctrl = 0;
1444		--dev->tx_cnt;
1445		if (--slot < 0)
1446			slot = NUM_TX_BUFF - 1;
1447	}
1448	++dev->estats.tx_undo;
1449
1450 stop_queue:
1451	netif_stop_queue(ndev);
1452	DBG2(dev, "stopped TX queue" NL);
1453	return NETDEV_TX_BUSY;
1454}
1455
1456/* Tx lock BHs */
1457static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1458{
1459	struct emac_error_stats *st = &dev->estats;
1460
1461	DBG(dev, "BD TX error %04x" NL, ctrl);
1462
1463	++st->tx_bd_errors;
1464	if (ctrl & EMAC_TX_ST_BFCS)
1465		++st->tx_bd_bad_fcs;
1466	if (ctrl & EMAC_TX_ST_LCS)
1467		++st->tx_bd_carrier_loss;
1468	if (ctrl & EMAC_TX_ST_ED)
1469		++st->tx_bd_excessive_deferral;
1470	if (ctrl & EMAC_TX_ST_EC)
1471		++st->tx_bd_excessive_collisions;
1472	if (ctrl & EMAC_TX_ST_LC)
1473		++st->tx_bd_late_collision;
1474	if (ctrl & EMAC_TX_ST_MC)
1475		++st->tx_bd_multple_collisions;
1476	if (ctrl & EMAC_TX_ST_SC)
1477		++st->tx_bd_single_collision;
1478	if (ctrl & EMAC_TX_ST_UR)
1479		++st->tx_bd_underrun;
1480	if (ctrl & EMAC_TX_ST_SQE)
1481		++st->tx_bd_sqe;
1482}
1483
1484static void emac_poll_tx(void *param)
1485{
1486	struct emac_instance *dev = param;
1487	u32 bad_mask;
1488
1489	DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1490
1491	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1492		bad_mask = EMAC_IS_BAD_TX_TAH;
1493	else
1494		bad_mask = EMAC_IS_BAD_TX;
1495
1496	netif_tx_lock_bh(dev->ndev);
1497	if (dev->tx_cnt) {
1498		u16 ctrl;
1499		int slot = dev->ack_slot, n = 0;
1500	again:
1501		ctrl = dev->tx_desc[slot].ctrl;
1502		if (!(ctrl & MAL_TX_CTRL_READY)) {
1503			struct sk_buff *skb = dev->tx_skb[slot];
1504			++n;
1505
1506			if (skb) {
1507				dev_kfree_skb(skb);
1508				dev->tx_skb[slot] = NULL;
1509			}
1510			slot = (slot + 1) % NUM_TX_BUFF;
1511
1512			if (unlikely(ctrl & bad_mask))
1513				emac_parse_tx_error(dev, ctrl);
1514
1515			if (--dev->tx_cnt)
1516				goto again;
1517		}
1518		if (n) {
1519			dev->ack_slot = slot;
1520			if (netif_queue_stopped(dev->ndev) &&
1521			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1522				netif_wake_queue(dev->ndev);
1523
1524			DBG2(dev, "tx %d pkts" NL, n);
1525		}
1526	}
1527	netif_tx_unlock_bh(dev->ndev);
1528}
1529
1530static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1531				       int len)
1532{
1533	struct sk_buff *skb = dev->rx_skb[slot];
1534
1535	DBG2(dev, "recycle %d %d" NL, slot, len);
1536
1537	if (len)
1538		dma_map_single(&dev->ofdev->dev, skb->data - 2,
1539			       EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1540
1541	dev->rx_desc[slot].data_len = 0;
1542	wmb();
1543	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1544	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1545}
1546
1547static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1548{
1549	struct emac_error_stats *st = &dev->estats;
1550
1551	DBG(dev, "BD RX error %04x" NL, ctrl);
1552
1553	++st->rx_bd_errors;
1554	if (ctrl & EMAC_RX_ST_OE)
1555		++st->rx_bd_overrun;
1556	if (ctrl & EMAC_RX_ST_BP)
1557		++st->rx_bd_bad_packet;
1558	if (ctrl & EMAC_RX_ST_RP)
1559		++st->rx_bd_runt_packet;
1560	if (ctrl & EMAC_RX_ST_SE)
1561		++st->rx_bd_short_event;
1562	if (ctrl & EMAC_RX_ST_AE)
1563		++st->rx_bd_alignment_error;
1564	if (ctrl & EMAC_RX_ST_BFCS)
1565		++st->rx_bd_bad_fcs;
1566	if (ctrl & EMAC_RX_ST_PTL)
1567		++st->rx_bd_packet_too_long;
1568	if (ctrl & EMAC_RX_ST_ORE)
1569		++st->rx_bd_out_of_range;
1570	if (ctrl & EMAC_RX_ST_IRE)
1571		++st->rx_bd_in_range;
1572}
1573
1574static inline void emac_rx_csum(struct emac_instance *dev,
1575				struct sk_buff *skb, u16 ctrl)
1576{
1577#ifdef CONFIG_IBM_NEW_EMAC_TAH
1578	if (!ctrl && dev->tah_dev) {
1579		skb->ip_summed = CHECKSUM_UNNECESSARY;
1580		++dev->stats.rx_packets_csum;
1581	}
1582#endif
1583}
1584
1585static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1586{
1587	if (likely(dev->rx_sg_skb != NULL)) {
1588		int len = dev->rx_desc[slot].data_len;
1589		int tot_len = dev->rx_sg_skb->len + len;
1590
1591		if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1592			++dev->estats.rx_dropped_mtu;
1593			dev_kfree_skb(dev->rx_sg_skb);
1594			dev->rx_sg_skb = NULL;
1595		} else {
1596			cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1597					 dev->rx_skb[slot]->data, len);
1598			skb_put(dev->rx_sg_skb, len);
1599			emac_recycle_rx_skb(dev, slot, len);
1600			return 0;
1601		}
1602	}
1603	emac_recycle_rx_skb(dev, slot, 0);
1604	return -1;
1605}
1606
1607/* NAPI poll context */
1608static int emac_poll_rx(void *param, int budget)
1609{
1610	struct emac_instance *dev = param;
1611	int slot = dev->rx_slot, received = 0;
1612
1613	DBG2(dev, "poll_rx(%d)" NL, budget);
1614
1615 again:
1616	while (budget > 0) {
1617		int len;
1618		struct sk_buff *skb;
1619		u16 ctrl = dev->rx_desc[slot].ctrl;
1620
1621		if (ctrl & MAL_RX_CTRL_EMPTY)
1622			break;
1623
1624		skb = dev->rx_skb[slot];
1625		mb();
1626		len = dev->rx_desc[slot].data_len;
1627
1628		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1629			goto sg;
1630
1631		ctrl &= EMAC_BAD_RX_MASK;
1632		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1633			emac_parse_rx_error(dev, ctrl);
1634			++dev->estats.rx_dropped_error;
1635			emac_recycle_rx_skb(dev, slot, 0);
1636			len = 0;
1637			goto next;
1638		}
1639
1640		if (len < ETH_HLEN) {
1641			++dev->estats.rx_dropped_stack;
1642			emac_recycle_rx_skb(dev, slot, len);
1643			goto next;
1644		}
1645
1646		if (len && len < EMAC_RX_COPY_THRESH) {
1647			struct sk_buff *copy_skb =
1648			    alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1649			if (unlikely(!copy_skb))
1650				goto oom;
1651
1652			skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1653			cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1654					 len + 2);
1655			emac_recycle_rx_skb(dev, slot, len);
1656			skb = copy_skb;
1657		} else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1658			goto oom;
1659
1660		skb_put(skb, len);
1661	push_packet:
1662		skb->protocol = eth_type_trans(skb, dev->ndev);
1663		emac_rx_csum(dev, skb, ctrl);
1664
1665		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1666			++dev->estats.rx_dropped_stack;
1667	next:
1668		++dev->stats.rx_packets;
1669	skip:
1670		dev->stats.rx_bytes += len;
1671		slot = (slot + 1) % NUM_RX_BUFF;
1672		--budget;
1673		++received;
1674		continue;
1675	sg:
1676		if (ctrl & MAL_RX_CTRL_FIRST) {
1677			BUG_ON(dev->rx_sg_skb);
1678			if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1679				DBG(dev, "rx OOM %d" NL, slot);
1680				++dev->estats.rx_dropped_oom;
1681				emac_recycle_rx_skb(dev, slot, 0);
1682			} else {
1683				dev->rx_sg_skb = skb;
1684				skb_put(skb, len);
1685			}
1686		} else if (!emac_rx_sg_append(dev, slot) &&
1687			   (ctrl & MAL_RX_CTRL_LAST)) {
1688
1689			skb = dev->rx_sg_skb;
1690			dev->rx_sg_skb = NULL;
1691
1692			ctrl &= EMAC_BAD_RX_MASK;
1693			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1694				emac_parse_rx_error(dev, ctrl);
1695				++dev->estats.rx_dropped_error;
1696				dev_kfree_skb(skb);
1697				len = 0;
1698			} else
1699				goto push_packet;
1700		}
1701		goto skip;
1702	oom:
1703		DBG(dev, "rx OOM %d" NL, slot);
1704		/* Drop the packet and recycle skb */
1705		++dev->estats.rx_dropped_oom;
1706		emac_recycle_rx_skb(dev, slot, 0);
1707		goto next;
1708	}
1709
1710	if (received) {
1711		DBG2(dev, "rx %d BDs" NL, received);
1712		dev->rx_slot = slot;
1713	}
1714
1715	if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1716		mb();
1717		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1718			DBG2(dev, "rx restart" NL);
1719			received = 0;
1720			goto again;
1721		}
1722
1723		if (dev->rx_sg_skb) {
1724			DBG2(dev, "dropping partial rx packet" NL);
1725			++dev->estats.rx_dropped_error;
1726			dev_kfree_skb(dev->rx_sg_skb);
1727			dev->rx_sg_skb = NULL;
1728		}
1729
1730		clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1731		mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1732		emac_rx_enable(dev);
1733		dev->rx_slot = 0;
1734	}
1735	return received;
1736}
1737
1738/* NAPI poll context */
1739static int emac_peek_rx(void *param)
1740{
1741	struct emac_instance *dev = param;
1742
1743	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1744}
1745
1746/* NAPI poll context */
1747static int emac_peek_rx_sg(void *param)
1748{
1749	struct emac_instance *dev = param;
1750
1751	int slot = dev->rx_slot;
1752	while (1) {
1753		u16 ctrl = dev->rx_desc[slot].ctrl;
1754		if (ctrl & MAL_RX_CTRL_EMPTY)
1755			return 0;
1756		else if (ctrl & MAL_RX_CTRL_LAST)
1757			return 1;
1758
1759		slot = (slot + 1) % NUM_RX_BUFF;
1760
1761		/* I'm just being paranoid here :) */
1762		if (unlikely(slot == dev->rx_slot))
1763			return 0;
1764	}
1765}
1766
1767/* Hard IRQ */
1768static void emac_rxde(void *param)
1769{
1770	struct emac_instance *dev = param;
1771
1772	++dev->estats.rx_stopped;
1773	emac_rx_disable_async(dev);
1774}
1775
1776/* Hard IRQ */
1777static irqreturn_t emac_irq(int irq, void *dev_instance)
1778{
1779	struct emac_instance *dev = dev_instance;
1780	struct emac_regs __iomem *p = dev->emacp;
1781	struct emac_error_stats *st = &dev->estats;
1782	u32 isr;
1783
1784	spin_lock(&dev->lock);
1785
1786	isr = in_be32(&p->isr);
1787	out_be32(&p->isr, isr);
1788
1789	DBG(dev, "isr = %08x" NL, isr);
1790
1791	if (isr & EMAC4_ISR_TXPE)
1792		++st->tx_parity;
1793	if (isr & EMAC4_ISR_RXPE)
1794		++st->rx_parity;
1795	if (isr & EMAC4_ISR_TXUE)
1796		++st->tx_underrun;
1797	if (isr & EMAC4_ISR_RXOE)
1798		++st->rx_fifo_overrun;
1799	if (isr & EMAC_ISR_OVR)
1800		++st->rx_overrun;
1801	if (isr & EMAC_ISR_BP)
1802		++st->rx_bad_packet;
1803	if (isr & EMAC_ISR_RP)
1804		++st->rx_runt_packet;
1805	if (isr & EMAC_ISR_SE)
1806		++st->rx_short_event;
1807	if (isr & EMAC_ISR_ALE)
1808		++st->rx_alignment_error;
1809	if (isr & EMAC_ISR_BFCS)
1810		++st->rx_bad_fcs;
1811	if (isr & EMAC_ISR_PTLE)
1812		++st->rx_packet_too_long;
1813	if (isr & EMAC_ISR_ORE)
1814		++st->rx_out_of_range;
1815	if (isr & EMAC_ISR_IRE)
1816		++st->rx_in_range;
1817	if (isr & EMAC_ISR_SQE)
1818		++st->tx_sqe;
1819	if (isr & EMAC_ISR_TE)
1820		++st->tx_errors;
1821
1822	spin_unlock(&dev->lock);
1823
1824	return IRQ_HANDLED;
1825}
1826
1827static struct net_device_stats *emac_stats(struct net_device *ndev)
1828{
1829	struct emac_instance *dev = netdev_priv(ndev);
1830	struct emac_stats *st = &dev->stats;
1831	struct emac_error_stats *est = &dev->estats;
1832	struct net_device_stats *nst = &dev->nstats;
1833	unsigned long flags;
1834
1835	DBG2(dev, "stats" NL);
1836
1837	/* Compute "legacy" statistics */
1838	spin_lock_irqsave(&dev->lock, flags);
1839	nst->rx_packets = (unsigned long)st->rx_packets;
1840	nst->rx_bytes = (unsigned long)st->rx_bytes;
1841	nst->tx_packets = (unsigned long)st->tx_packets;
1842	nst->tx_bytes = (unsigned long)st->tx_bytes;
1843	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1844					  est->rx_dropped_error +
1845					  est->rx_dropped_resize +
1846					  est->rx_dropped_mtu);
1847	nst->tx_dropped = (unsigned long)est->tx_dropped;
1848
1849	nst->rx_errors = (unsigned long)est->rx_bd_errors;
1850	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1851					      est->rx_fifo_overrun +
1852					      est->rx_overrun);
1853	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1854					       est->rx_alignment_error);
1855	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1856					     est->rx_bad_fcs);
1857	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1858						est->rx_bd_short_event +
1859						est->rx_bd_packet_too_long +
1860						est->rx_bd_out_of_range +
1861						est->rx_bd_in_range +
1862						est->rx_runt_packet +
1863						est->rx_short_event +
1864						est->rx_packet_too_long +
1865						est->rx_out_of_range +
1866						est->rx_in_range);
1867
1868	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1869	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1870					      est->tx_underrun);
1871	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1872	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1873					  est->tx_bd_excessive_collisions +
1874					  est->tx_bd_late_collision +
1875					  est->tx_bd_multple_collisions);
1876	spin_unlock_irqrestore(&dev->lock, flags);
1877	return nst;
1878}
1879
1880static struct mal_commac_ops emac_commac_ops = {
1881	.poll_tx = &emac_poll_tx,
1882	.poll_rx = &emac_poll_rx,
1883	.peek_rx = &emac_peek_rx,
1884	.rxde = &emac_rxde,
1885};
1886
1887static struct mal_commac_ops emac_commac_sg_ops = {
1888	.poll_tx = &emac_poll_tx,
1889	.poll_rx = &emac_poll_rx,
1890	.peek_rx = &emac_peek_rx_sg,
1891	.rxde = &emac_rxde,
1892};
1893
1894/* Ethtool support */
1895static int emac_ethtool_get_settings(struct net_device *ndev,
1896				     struct ethtool_cmd *cmd)
1897{
1898	struct emac_instance *dev = netdev_priv(ndev);
1899
1900	cmd->supported = dev->phy.features;
1901	cmd->port = PORT_MII;
1902	cmd->phy_address = dev->phy.address;
1903	cmd->transceiver =
1904	    dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1905
1906	mutex_lock(&dev->link_lock);
1907	cmd->advertising = dev->phy.advertising;
1908	cmd->autoneg = dev->phy.autoneg;
1909	cmd->speed = dev->phy.speed;
1910	cmd->duplex = dev->phy.duplex;
1911	mutex_unlock(&dev->link_lock);
1912
1913	return 0;
1914}
1915
1916static int emac_ethtool_set_settings(struct net_device *ndev,
1917				     struct ethtool_cmd *cmd)
1918{
1919	struct emac_instance *dev = netdev_priv(ndev);
1920	u32 f = dev->phy.features;
1921
1922	DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1923	    cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1924
1925	/* Basic sanity checks */
1926	if (dev->phy.address < 0)
1927		return -EOPNOTSUPP;
1928	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1929		return -EINVAL;
1930	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1931		return -EINVAL;
1932	if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1933		return -EINVAL;
1934
1935	if (cmd->autoneg == AUTONEG_DISABLE) {
1936		switch (cmd->speed) {
1937		case SPEED_10:
1938			if (cmd->duplex == DUPLEX_HALF &&
1939			    !(f & SUPPORTED_10baseT_Half))
1940				return -EINVAL;
1941			if (cmd->duplex == DUPLEX_FULL &&
1942			    !(f & SUPPORTED_10baseT_Full))
1943				return -EINVAL;
1944			break;
1945		case SPEED_100:
1946			if (cmd->duplex == DUPLEX_HALF &&
1947			    !(f & SUPPORTED_100baseT_Half))
1948				return -EINVAL;
1949			if (cmd->duplex == DUPLEX_FULL &&
1950			    !(f & SUPPORTED_100baseT_Full))
1951				return -EINVAL;
1952			break;
1953		case SPEED_1000:
1954			if (cmd->duplex == DUPLEX_HALF &&
1955			    !(f & SUPPORTED_1000baseT_Half))
1956				return -EINVAL;
1957			if (cmd->duplex == DUPLEX_FULL &&
1958			    !(f & SUPPORTED_1000baseT_Full))
1959				return -EINVAL;
1960			break;
1961		default:
1962			return -EINVAL;
1963		}
1964
1965		mutex_lock(&dev->link_lock);
1966		dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1967						cmd->duplex);
1968		mutex_unlock(&dev->link_lock);
1969
1970	} else {
1971		if (!(f & SUPPORTED_Autoneg))
1972			return -EINVAL;
1973
1974		mutex_lock(&dev->link_lock);
1975		dev->phy.def->ops->setup_aneg(&dev->phy,
1976					      (cmd->advertising & f) |
1977					      (dev->phy.advertising &
1978					       (ADVERTISED_Pause |
1979						ADVERTISED_Asym_Pause)));
1980		mutex_unlock(&dev->link_lock);
1981	}
1982	emac_force_link_update(dev);
1983
1984	return 0;
1985}
1986
1987static void emac_ethtool_get_ringparam(struct net_device *ndev,
1988				       struct ethtool_ringparam *rp)
1989{
1990	rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1991	rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1992}
1993
1994static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1995					struct ethtool_pauseparam *pp)
1996{
1997	struct emac_instance *dev = netdev_priv(ndev);
1998
1999	mutex_lock(&dev->link_lock);
2000	if ((dev->phy.features & SUPPORTED_Autoneg) &&
2001	    (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2002		pp->autoneg = 1;
2003
2004	if (dev->phy.duplex == DUPLEX_FULL) {
2005		if (dev->phy.pause)
2006			pp->rx_pause = pp->tx_pause = 1;
2007		else if (dev->phy.asym_pause)
2008			pp->tx_pause = 1;
2009	}
2010	mutex_unlock(&dev->link_lock);
2011}
2012
2013static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2014{
2015	struct emac_instance *dev = netdev_priv(ndev);
2016
2017	return dev->tah_dev != NULL;
2018}
2019
2020static int emac_get_regs_len(struct emac_instance *dev)
2021{
2022	if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2023		return sizeof(struct emac_ethtool_regs_subhdr) +
2024			EMAC4_ETHTOOL_REGS_SIZE(dev);
2025	else
2026		return sizeof(struct emac_ethtool_regs_subhdr) +
2027			EMAC_ETHTOOL_REGS_SIZE(dev);
2028}
2029
2030static int emac_ethtool_get_regs_len(struct net_device *ndev)
2031{
2032	struct emac_instance *dev = netdev_priv(ndev);
2033	int size;
2034
2035	size = sizeof(struct emac_ethtool_regs_hdr) +
2036		emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2037	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2038		size += zmii_get_regs_len(dev->zmii_dev);
2039	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2040		size += rgmii_get_regs_len(dev->rgmii_dev);
2041	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2042		size += tah_get_regs_len(dev->tah_dev);
2043
2044	return size;
2045}
2046
2047static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2048{
2049	struct emac_ethtool_regs_subhdr *hdr = buf;
2050
2051	hdr->index = dev->cell_index;
2052	if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2053		hdr->version = EMAC4_ETHTOOL_REGS_VER;
2054		memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2055		return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2056	} else {
2057		hdr->version = EMAC_ETHTOOL_REGS_VER;
2058		memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2059		return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2060	}
2061}
2062
2063static void emac_ethtool_get_regs(struct net_device *ndev,
2064				  struct ethtool_regs *regs, void *buf)
2065{
2066	struct emac_instance *dev = netdev_priv(ndev);
2067	struct emac_ethtool_regs_hdr *hdr = buf;
2068
2069	hdr->components = 0;
2070	buf = hdr + 1;
2071
2072	buf = mal_dump_regs(dev->mal, buf);
2073	buf = emac_dump_regs(dev, buf);
2074	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2075		hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2076		buf = zmii_dump_regs(dev->zmii_dev, buf);
2077	}
2078	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2079		hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2080		buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2081	}
2082	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2083		hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2084		buf = tah_dump_regs(dev->tah_dev, buf);
2085	}
2086}
2087
2088static int emac_ethtool_nway_reset(struct net_device *ndev)
2089{
2090	struct emac_instance *dev = netdev_priv(ndev);
2091	int res = 0;
2092
2093	DBG(dev, "nway_reset" NL);
2094
2095	if (dev->phy.address < 0)
2096		return -EOPNOTSUPP;
2097
2098	mutex_lock(&dev->link_lock);
2099	if (!dev->phy.autoneg) {
2100		res = -EINVAL;
2101		goto out;
2102	}
2103
2104	dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2105 out:
2106	mutex_unlock(&dev->link_lock);
2107	emac_force_link_update(dev);
2108	return res;
2109}
2110
2111static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2112{
2113	if (stringset == ETH_SS_STATS)
2114		return EMAC_ETHTOOL_STATS_COUNT;
2115	else
2116		return -EINVAL;
2117}
2118
2119static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2120				     u8 * buf)
2121{
2122	if (stringset == ETH_SS_STATS)
2123		memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2124}
2125
2126static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2127					   struct ethtool_stats *estats,
2128					   u64 * tmp_stats)
2129{
2130	struct emac_instance *dev = netdev_priv(ndev);
2131
2132	memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2133	tmp_stats += sizeof(dev->stats) / sizeof(u64);
2134	memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2135}
2136
2137static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2138				     struct ethtool_drvinfo *info)
2139{
2140	struct emac_instance *dev = netdev_priv(ndev);
2141
2142	strcpy(info->driver, "ibm_emac");
2143	strcpy(info->version, DRV_VERSION);
2144	info->fw_version[0] = '\0';
2145	sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2146		dev->cell_index, dev->ofdev->dev.of_node->full_name);
2147	info->regdump_len = emac_ethtool_get_regs_len(ndev);
2148}
2149
2150static const struct ethtool_ops emac_ethtool_ops = {
2151	.get_settings = emac_ethtool_get_settings,
2152	.set_settings = emac_ethtool_set_settings,
2153	.get_drvinfo = emac_ethtool_get_drvinfo,
2154
2155	.get_regs_len = emac_ethtool_get_regs_len,
2156	.get_regs = emac_ethtool_get_regs,
2157
2158	.nway_reset = emac_ethtool_nway_reset,
2159
2160	.get_ringparam = emac_ethtool_get_ringparam,
2161	.get_pauseparam = emac_ethtool_get_pauseparam,
2162
2163	.get_rx_csum = emac_ethtool_get_rx_csum,
2164
2165	.get_strings = emac_ethtool_get_strings,
2166	.get_sset_count = emac_ethtool_get_sset_count,
2167	.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2168
2169	.get_link = ethtool_op_get_link,
2170	.get_tx_csum = ethtool_op_get_tx_csum,
2171	.get_sg = ethtool_op_get_sg,
2172};
2173
2174static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2175{
2176	struct emac_instance *dev = netdev_priv(ndev);
2177	struct mii_ioctl_data *data = if_mii(rq);
2178
2179	DBG(dev, "ioctl %08x" NL, cmd);
2180
2181	if (dev->phy.address < 0)
2182		return -EOPNOTSUPP;
2183
2184	switch (cmd) {
2185	case SIOCGMIIPHY:
2186		data->phy_id = dev->phy.address;
2187		/* Fall through */
2188	case SIOCGMIIREG:
2189		data->val_out = emac_mdio_read(ndev, dev->phy.address,
2190					       data->reg_num);
2191		return 0;
2192
2193	case SIOCSMIIREG:
2194		emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2195				data->val_in);
2196		return 0;
2197	default:
2198		return -EOPNOTSUPP;
2199	}
2200}
2201
2202struct emac_depentry {
2203	u32			phandle;
2204	struct device_node	*node;
2205	struct platform_device	*ofdev;
2206	void			*drvdata;
2207};
2208
2209#define	EMAC_DEP_MAL_IDX	0
2210#define	EMAC_DEP_ZMII_IDX	1
2211#define	EMAC_DEP_RGMII_IDX	2
2212#define	EMAC_DEP_TAH_IDX	3
2213#define	EMAC_DEP_MDIO_IDX	4
2214#define	EMAC_DEP_PREV_IDX	5
2215#define	EMAC_DEP_COUNT		6
2216
2217static int __devinit emac_check_deps(struct emac_instance *dev,
2218				     struct emac_depentry *deps)
2219{
2220	int i, there = 0;
2221	struct device_node *np;
2222
2223	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2224		/* no dependency on that item, allright */
2225		if (deps[i].phandle == 0) {
2226			there++;
2227			continue;
2228		}
2229		/* special case for blist as the dependency might go away */
2230		if (i == EMAC_DEP_PREV_IDX) {
2231			np = *(dev->blist - 1);
2232			if (np == NULL) {
2233				deps[i].phandle = 0;
2234				there++;
2235				continue;
2236			}
2237			if (deps[i].node == NULL)
2238				deps[i].node = of_node_get(np);
2239		}
2240		if (deps[i].node == NULL)
2241			deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2242		if (deps[i].node == NULL)
2243			continue;
2244		if (deps[i].ofdev == NULL)
2245			deps[i].ofdev = of_find_device_by_node(deps[i].node);
2246		if (deps[i].ofdev == NULL)
2247			continue;
2248		if (deps[i].drvdata == NULL)
2249			deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2250		if (deps[i].drvdata != NULL)
2251			there++;
2252	}
2253	return (there == EMAC_DEP_COUNT);
2254}
2255
2256static void emac_put_deps(struct emac_instance *dev)
2257{
2258	if (dev->mal_dev)
2259		of_dev_put(dev->mal_dev);
2260	if (dev->zmii_dev)
2261		of_dev_put(dev->zmii_dev);
2262	if (dev->rgmii_dev)
2263		of_dev_put(dev->rgmii_dev);
2264	if (dev->mdio_dev)
2265		of_dev_put(dev->mdio_dev);
2266	if (dev->tah_dev)
2267		of_dev_put(dev->tah_dev);
2268}
2269
2270static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2271					unsigned long action, void *data)
2272{
2273	/* We are only intereted in device addition */
2274	if (action == BUS_NOTIFY_BOUND_DRIVER)
2275		wake_up_all(&emac_probe_wait);
2276	return 0;
2277}
2278
2279static struct notifier_block emac_of_bus_notifier __devinitdata = {
2280	.notifier_call = emac_of_bus_notify
2281};
2282
2283static int __devinit emac_wait_deps(struct emac_instance *dev)
2284{
2285	struct emac_depentry deps[EMAC_DEP_COUNT];
2286	int i, err;
2287
2288	memset(&deps, 0, sizeof(deps));
2289
2290	deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2291	deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2292	deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2293	if (dev->tah_ph)
2294		deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2295	if (dev->mdio_ph)
2296		deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2297	if (dev->blist && dev->blist > emac_boot_list)
2298		deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2299	bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2300	wait_event_timeout(emac_probe_wait,
2301			   emac_check_deps(dev, deps),
2302			   EMAC_PROBE_DEP_TIMEOUT);
2303	bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2304	err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2305	for (i = 0; i < EMAC_DEP_COUNT; i++) {
2306		if (deps[i].node)
2307			of_node_put(deps[i].node);
2308		if (err && deps[i].ofdev)
2309			of_dev_put(deps[i].ofdev);
2310	}
2311	if (err == 0) {
2312		dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2313		dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2314		dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2315		dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2316		dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2317	}
2318	if (deps[EMAC_DEP_PREV_IDX].ofdev)
2319		of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2320	return err;
2321}
2322
2323static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2324					 u32 *val, int fatal)
2325{
2326	int len;
2327	const u32 *prop = of_get_property(np, name, &len);
2328	if (prop == NULL || len < sizeof(u32)) {
2329		if (fatal)
2330			printk(KERN_ERR "%s: missing %s property\n",
2331			       np->full_name, name);
2332		return -ENODEV;
2333	}
2334	*val = *prop;
2335	return 0;
2336}
2337
2338static int __devinit emac_init_phy(struct emac_instance *dev)
2339{
2340	struct device_node *np = dev->ofdev->dev.of_node;
2341	struct net_device *ndev = dev->ndev;
2342	u32 phy_map, adv;
2343	int i;
2344
2345	dev->phy.dev = ndev;
2346	dev->phy.mode = dev->phy_mode;
2347
2348	if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2349		emac_reset(dev);
2350
2351		dev->phy.address = -1;
2352		dev->phy.features = SUPPORTED_MII;
2353		if (emac_phy_supports_gige(dev->phy_mode))
2354			dev->phy.features |= SUPPORTED_1000baseT_Full;
2355		else
2356			dev->phy.features |= SUPPORTED_100baseT_Full;
2357		dev->phy.pause = 1;
2358
2359		return 0;
2360	}
2361
2362	mutex_lock(&emac_phy_map_lock);
2363	phy_map = dev->phy_map | busy_phy_map;
2364
2365	DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2366
2367	dev->phy.mdio_read = emac_mdio_read;
2368	dev->phy.mdio_write = emac_mdio_write;
2369
2370	/* Enable internal clock source */
2371#ifdef CONFIG_PPC_DCR_NATIVE
2372	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2373		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2374#endif
2375	emac_rx_clk_tx(dev);
2376
2377	/* Enable internal clock source on 440GX*/
2378#ifdef CONFIG_PPC_DCR_NATIVE
2379	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2380		dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2381#endif
2382	/* Configure EMAC with defaults so we can at least use MDIO
2383	 * This is needed mostly for 440GX
2384	 */
2385	if (emac_phy_gpcs(dev->phy.mode)) {
2386		dev->phy.gpcs_address = dev->gpcs_address;
2387		if (dev->phy.gpcs_address == 0xffffffff)
2388			dev->phy.address = dev->cell_index;
2389	}
2390
2391	emac_configure(dev);
2392
2393	if (dev->phy_address != 0xffffffff)
2394		phy_map = ~(1 << dev->phy_address);
2395
2396	for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2397		if (!(phy_map & 1)) {
2398			int r;
2399			busy_phy_map |= 1 << i;
2400
2401			/* Quick check if there is a PHY at the address */
2402			r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2403			if (r == 0xffff || r < 0)
2404				continue;
2405			if (!emac_mii_phy_probe(&dev->phy, i))
2406				break;
2407		}
2408
2409	/* Enable external clock source */
2410#ifdef CONFIG_PPC_DCR_NATIVE
2411	if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2412		dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2413#endif
2414	mutex_unlock(&emac_phy_map_lock);
2415	if (i == 0x20) {
2416		printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2417		return -ENXIO;
2418	}
2419
2420	/* Init PHY */
2421	if (dev->phy.def->ops->init)
2422		dev->phy.def->ops->init(&dev->phy);
2423
2424	/* Disable any PHY features not supported by the platform */
2425	dev->phy.def->features &= ~dev->phy_feat_exc;
2426
2427	/* Setup initial link parameters */
2428	if (dev->phy.features & SUPPORTED_Autoneg) {
2429		adv = dev->phy.features;
2430		if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2431			adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2432		/* Restart autonegotiation */
2433		dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2434	} else {
2435		u32 f = dev->phy.def->features;
2436		int speed = SPEED_10, fd = DUPLEX_HALF;
2437
2438		/* Select highest supported speed/duplex */
2439		if (f & SUPPORTED_1000baseT_Full) {
2440			speed = SPEED_1000;
2441			fd = DUPLEX_FULL;
2442		} else if (f & SUPPORTED_1000baseT_Half)
2443			speed = SPEED_1000;
2444		else if (f & SUPPORTED_100baseT_Full) {
2445			speed = SPEED_100;
2446			fd = DUPLEX_FULL;
2447		} else if (f & SUPPORTED_100baseT_Half)
2448			speed = SPEED_100;
2449		else if (f & SUPPORTED_10baseT_Full)
2450			fd = DUPLEX_FULL;
2451
2452		/* Force link parameters */
2453		dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2454	}
2455	return 0;
2456}
2457
2458static int __devinit emac_init_config(struct emac_instance *dev)
2459{
2460	struct device_node *np = dev->ofdev->dev.of_node;
2461	const void *p;
2462	unsigned int plen;
2463	const char *pm, *phy_modes[] = {
2464		[PHY_MODE_NA] = "",
2465		[PHY_MODE_MII] = "mii",
2466		[PHY_MODE_RMII] = "rmii",
2467		[PHY_MODE_SMII] = "smii",
2468		[PHY_MODE_RGMII] = "rgmii",
2469		[PHY_MODE_TBI] = "tbi",
2470		[PHY_MODE_GMII] = "gmii",
2471		[PHY_MODE_RTBI] = "rtbi",
2472		[PHY_MODE_SGMII] = "sgmii",
2473	};
2474
2475	/* Read config from device-tree */
2476	if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2477		return -ENXIO;
2478	if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2479		return -ENXIO;
2480	if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2481		return -ENXIO;
2482	if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2483		return -ENXIO;
2484	if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2485		dev->max_mtu = 1500;
2486	if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2487		dev->rx_fifo_size = 2048;
2488	if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2489		dev->tx_fifo_size = 2048;
2490	if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2491		dev->rx_fifo_size_gige = dev->rx_fifo_size;
2492	if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2493		dev->tx_fifo_size_gige = dev->tx_fifo_size;
2494	if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2495		dev->phy_address = 0xffffffff;
2496	if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2497		dev->phy_map = 0xffffffff;
2498	if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2499		dev->gpcs_address = 0xffffffff;
2500	if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2501		return -ENXIO;
2502	if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2503		dev->tah_ph = 0;
2504	if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2505		dev->tah_port = 0;
2506	if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2507		dev->mdio_ph = 0;
2508	if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2509		dev->zmii_ph = 0;
2510	if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2511		dev->zmii_port = 0xffffffff;
2512	if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2513		dev->rgmii_ph = 0;
2514	if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2515		dev->rgmii_port = 0xffffffff;
2516	if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2517		dev->fifo_entry_size = 16;
2518	if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2519		dev->mal_burst_size = 256;
2520
2521	/* PHY mode needs some decoding */
2522	dev->phy_mode = PHY_MODE_NA;
2523	pm = of_get_property(np, "phy-mode", &plen);
2524	if (pm != NULL) {
2525		int i;
2526		for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2527			if (!strcasecmp(pm, phy_modes[i])) {
2528				dev->phy_mode = i;
2529				break;
2530			}
2531	}
2532
2533	/* Backward compat with non-final DT */
2534	if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2535		u32 nmode = *(const u32 *)pm;
2536		if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2537			dev->phy_mode = nmode;
2538	}
2539
2540	/* Check EMAC version */
2541	if (of_device_is_compatible(np, "ibm,emac4sync")) {
2542		dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2543		if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2544		    of_device_is_compatible(np, "ibm,emac-460gt"))
2545			dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2546		if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2547		    of_device_is_compatible(np, "ibm,emac-405exr"))
2548			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2549	} else if (of_device_is_compatible(np, "ibm,emac4")) {
2550		dev->features |= EMAC_FTR_EMAC4;
2551		if (of_device_is_compatible(np, "ibm,emac-440gx"))
2552			dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2553	} else {
2554		if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2555		    of_device_is_compatible(np, "ibm,emac-440gr"))
2556			dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2557		if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2558#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2559			dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2560#else
2561			printk(KERN_ERR "%s: Flow control not disabled!\n",
2562					np->full_name);
2563			return -ENXIO;
2564#endif
2565		}
2566
2567	}
2568
2569	/* Fixup some feature bits based on the device tree */
2570	if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2571		dev->features |= EMAC_FTR_STACR_OC_INVERT;
2572	if (of_get_property(np, "has-new-stacr-staopc", NULL))
2573		dev->features |= EMAC_FTR_HAS_NEW_STACR;
2574
2575	/* CAB lacks the appropriate properties */
2576	if (of_device_is_compatible(np, "ibm,emac-axon"))
2577		dev->features |= EMAC_FTR_HAS_NEW_STACR |
2578			EMAC_FTR_STACR_OC_INVERT;
2579
2580	/* Enable TAH/ZMII/RGMII features as found */
2581	if (dev->tah_ph != 0) {
2582#ifdef CONFIG_IBM_NEW_EMAC_TAH
2583		dev->features |= EMAC_FTR_HAS_TAH;
2584#else
2585		printk(KERN_ERR "%s: TAH support not enabled !\n",
2586		       np->full_name);
2587		return -ENXIO;
2588#endif
2589	}
2590
2591	if (dev->zmii_ph != 0) {
2592#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2593		dev->features |= EMAC_FTR_HAS_ZMII;
2594#else
2595		printk(KERN_ERR "%s: ZMII support not enabled !\n",
2596		       np->full_name);
2597		return -ENXIO;
2598#endif
2599	}
2600
2601	if (dev->rgmii_ph != 0) {
2602#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2603		dev->features |= EMAC_FTR_HAS_RGMII;
2604#else
2605		printk(KERN_ERR "%s: RGMII support not enabled !\n",
2606		       np->full_name);
2607		return -ENXIO;
2608#endif
2609	}
2610
2611	/* Read MAC-address */
2612	p = of_get_property(np, "local-mac-address", NULL);
2613	if (p == NULL) {
2614		printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2615		       np->full_name);
2616		return -ENXIO;
2617	}
2618	memcpy(dev->ndev->dev_addr, p, 6);
2619
2620	/* IAHT and GAHT filter parameterization */
2621	if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2622		dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2623		dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2624	} else {
2625		dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2626		dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2627	}
2628
2629	DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2630	DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2631	DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2632	DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2633	DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2634
2635	return 0;
2636}
2637
2638static const struct net_device_ops emac_netdev_ops = {
2639	.ndo_open		= emac_open,
2640	.ndo_stop		= emac_close,
2641	.ndo_get_stats		= emac_stats,
2642	.ndo_set_multicast_list	= emac_set_multicast_list,
2643	.ndo_do_ioctl		= emac_ioctl,
2644	.ndo_tx_timeout		= emac_tx_timeout,
2645	.ndo_validate_addr	= eth_validate_addr,
2646	.ndo_set_mac_address	= eth_mac_addr,
2647	.ndo_start_xmit		= emac_start_xmit,
2648	.ndo_change_mtu		= eth_change_mtu,
2649};
2650
2651static const struct net_device_ops emac_gige_netdev_ops = {
2652	.ndo_open		= emac_open,
2653	.ndo_stop		= emac_close,
2654	.ndo_get_stats		= emac_stats,
2655	.ndo_set_multicast_list	= emac_set_multicast_list,
2656	.ndo_do_ioctl		= emac_ioctl,
2657	.ndo_tx_timeout		= emac_tx_timeout,
2658	.ndo_validate_addr	= eth_validate_addr,
2659	.ndo_set_mac_address	= eth_mac_addr,
2660	.ndo_start_xmit		= emac_start_xmit_sg,
2661	.ndo_change_mtu		= emac_change_mtu,
2662};
2663
2664static int __devinit emac_probe(struct platform_device *ofdev,
2665				const struct of_device_id *match)
2666{
2667	struct net_device *ndev;
2668	struct emac_instance *dev;
2669	struct device_node *np = ofdev->dev.of_node;
2670	struct device_node **blist = NULL;
2671	int err, i;
2672
2673	/* Skip unused/unwired EMACS.  We leave the check for an unused
2674	 * property here for now, but new flat device trees should set a
2675	 * status property to "disabled" instead.
2676	 */
2677	if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2678		return -ENODEV;
2679
2680	/* Find ourselves in the bootlist if we are there */
2681	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2682		if (emac_boot_list[i] == np)
2683			blist = &emac_boot_list[i];
2684
2685	/* Allocate our net_device structure */
2686	err = -ENOMEM;
2687	ndev = alloc_etherdev(sizeof(struct emac_instance));
2688	if (!ndev) {
2689		printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2690		       np->full_name);
2691		goto err_gone;
2692	}
2693	dev = netdev_priv(ndev);
2694	dev->ndev = ndev;
2695	dev->ofdev = ofdev;
2696	dev->blist = blist;
2697	SET_NETDEV_DEV(ndev, &ofdev->dev);
2698
2699	/* Initialize some embedded data structures */
2700	mutex_init(&dev->mdio_lock);
2701	mutex_init(&dev->link_lock);
2702	spin_lock_init(&dev->lock);
2703	INIT_WORK(&dev->reset_work, emac_reset_work);
2704
2705	/* Init various config data based on device-tree */
2706	err = emac_init_config(dev);
2707	if (err != 0)
2708		goto err_free;
2709
2710	/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2711	dev->emac_irq = irq_of_parse_and_map(np, 0);
2712	dev->wol_irq = irq_of_parse_and_map(np, 1);
2713	if (dev->emac_irq == NO_IRQ) {
2714		printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2715		goto err_free;
2716	}
2717	ndev->irq = dev->emac_irq;
2718
2719	/* Map EMAC regs */
2720	if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2721		printk(KERN_ERR "%s: Can't get registers address\n",
2722		       np->full_name);
2723		goto err_irq_unmap;
2724	}
2725	// TODO : request_mem_region
2726	dev->emacp = ioremap(dev->rsrc_regs.start,
2727			     dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2728	if (dev->emacp == NULL) {
2729		printk(KERN_ERR "%s: Can't map device registers!\n",
2730		       np->full_name);
2731		err = -ENOMEM;
2732		goto err_irq_unmap;
2733	}
2734
2735	/* Wait for dependent devices */
2736	err = emac_wait_deps(dev);
2737	if (err) {
2738		printk(KERN_ERR
2739		       "%s: Timeout waiting for dependent devices\n",
2740		       np->full_name);
2741		/*  display more info about what's missing ? */
2742		goto err_reg_unmap;
2743	}
2744	dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2745	if (dev->mdio_dev != NULL)
2746		dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2747
2748	/* Register with MAL */
2749	dev->commac.ops = &emac_commac_ops;
2750	dev->commac.dev = dev;
2751	dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2752	dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2753	err = mal_register_commac(dev->mal, &dev->commac);
2754	if (err) {
2755		printk(KERN_ERR "%s: failed to register with mal %s!\n",
2756		       np->full_name, dev->mal_dev->dev.of_node->full_name);
2757		goto err_rel_deps;
2758	}
2759	dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2760	dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2761
2762	/* Get pointers to BD rings */
2763	dev->tx_desc =
2764	    dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2765	dev->rx_desc =
2766	    dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2767
2768	DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2769	DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2770
2771	/* Clean rings */
2772	memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2773	memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2774	memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2775	memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2776
2777	/* Attach to ZMII, if needed */
2778	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2779	    (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2780		goto err_unreg_commac;
2781
2782	/* Attach to RGMII, if needed */
2783	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2784	    (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2785		goto err_detach_zmii;
2786
2787	/* Attach to TAH, if needed */
2788	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2789	    (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2790		goto err_detach_rgmii;
2791
2792	/* Set some link defaults before we can find out real parameters */
2793	dev->phy.speed = SPEED_100;
2794	dev->phy.duplex = DUPLEX_FULL;
2795	dev->phy.autoneg = AUTONEG_DISABLE;
2796	dev->phy.pause = dev->phy.asym_pause = 0;
2797	dev->stop_timeout = STOP_TIMEOUT_100;
2798	INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2799
2800	/* Find PHY if any */
2801	err = emac_init_phy(dev);
2802	if (err != 0)
2803		goto err_detach_tah;
2804
2805	if (dev->tah_dev)
2806		ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2807	ndev->watchdog_timeo = 5 * HZ;
2808	if (emac_phy_supports_gige(dev->phy_mode)) {
2809		ndev->netdev_ops = &emac_gige_netdev_ops;
2810		dev->commac.ops = &emac_commac_sg_ops;
2811	} else
2812		ndev->netdev_ops = &emac_netdev_ops;
2813	SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2814
2815	netif_carrier_off(ndev);
2816	netif_stop_queue(ndev);
2817
2818	err = register_netdev(ndev);
2819	if (err) {
2820		printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2821		       np->full_name, err);
2822		goto err_detach_tah;
2823	}
2824
2825	/* Set our drvdata last as we don't want them visible until we are
2826	 * fully initialized
2827	 */
2828	wmb();
2829	dev_set_drvdata(&ofdev->dev, dev);
2830
2831	/* There's a new kid in town ! Let's tell everybody */
2832	wake_up_all(&emac_probe_wait);
2833
2834
2835	printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2836	       ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2837
2838	if (dev->phy_mode == PHY_MODE_SGMII)
2839		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2840
2841	if (dev->phy.address >= 0)
2842		printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2843		       dev->phy.def->name, dev->phy.address);
2844
2845	emac_dbg_register(dev);
2846
2847	/* Life is good */
2848	return 0;
2849
2850	/* I have a bad feeling about this ... */
2851
2852 err_detach_tah:
2853	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2854		tah_detach(dev->tah_dev, dev->tah_port);
2855 err_detach_rgmii:
2856	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2857		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2858 err_detach_zmii:
2859	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2860		zmii_detach(dev->zmii_dev, dev->zmii_port);
2861 err_unreg_commac:
2862	mal_unregister_commac(dev->mal, &dev->commac);
2863 err_rel_deps:
2864	emac_put_deps(dev);
2865 err_reg_unmap:
2866	iounmap(dev->emacp);
2867 err_irq_unmap:
2868	if (dev->wol_irq != NO_IRQ)
2869		irq_dispose_mapping(dev->wol_irq);
2870	if (dev->emac_irq != NO_IRQ)
2871		irq_dispose_mapping(dev->emac_irq);
2872 err_free:
2873	free_netdev(ndev);
2874 err_gone:
2875	/* if we were on the bootlist, remove us as we won't show up and
2876	 * wake up all waiters to notify them in case they were waiting
2877	 * on us
2878	 */
2879	if (blist) {
2880		*blist = NULL;
2881		wake_up_all(&emac_probe_wait);
2882	}
2883	return err;
2884}
2885
2886static int __devexit emac_remove(struct platform_device *ofdev)
2887{
2888	struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2889
2890	DBG(dev, "remove" NL);
2891
2892	dev_set_drvdata(&ofdev->dev, NULL);
2893
2894	unregister_netdev(dev->ndev);
2895
2896	flush_scheduled_work();
2897
2898	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2899		tah_detach(dev->tah_dev, dev->tah_port);
2900	if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2901		rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2902	if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2903		zmii_detach(dev->zmii_dev, dev->zmii_port);
2904
2905	mal_unregister_commac(dev->mal, &dev->commac);
2906	emac_put_deps(dev);
2907
2908	emac_dbg_unregister(dev);
2909	iounmap(dev->emacp);
2910
2911	if (dev->wol_irq != NO_IRQ)
2912		irq_dispose_mapping(dev->wol_irq);
2913	if (dev->emac_irq != NO_IRQ)
2914		irq_dispose_mapping(dev->emac_irq);
2915
2916	free_netdev(dev->ndev);
2917
2918	return 0;
2919}
2920
2921static struct of_device_id emac_match[] =
2922{
2923	{
2924		.type		= "network",
2925		.compatible	= "ibm,emac",
2926	},
2927	{
2928		.type		= "network",
2929		.compatible	= "ibm,emac4",
2930	},
2931	{
2932		.type		= "network",
2933		.compatible	= "ibm,emac4sync",
2934	},
2935	{},
2936};
2937MODULE_DEVICE_TABLE(of, emac_match);
2938
2939static struct of_platform_driver emac_driver = {
2940	.driver = {
2941		.name = "emac",
2942		.owner = THIS_MODULE,
2943		.of_match_table = emac_match,
2944	},
2945	.probe = emac_probe,
2946	.remove = emac_remove,
2947};
2948
2949static void __init emac_make_bootlist(void)
2950{
2951	struct device_node *np = NULL;
2952	int j, max, i = 0, k;
2953	int cell_indices[EMAC_BOOT_LIST_SIZE];
2954
2955	/* Collect EMACs */
2956	while((np = of_find_all_nodes(np)) != NULL) {
2957		const u32 *idx;
2958
2959		if (of_match_node(emac_match, np) == NULL)
2960			continue;
2961		if (of_get_property(np, "unused", NULL))
2962			continue;
2963		idx = of_get_property(np, "cell-index", NULL);
2964		if (idx == NULL)
2965			continue;
2966		cell_indices[i] = *idx;
2967		emac_boot_list[i++] = of_node_get(np);
2968		if (i >= EMAC_BOOT_LIST_SIZE) {
2969			of_node_put(np);
2970			break;
2971		}
2972	}
2973	max = i;
2974
2975	/* Bubble sort them (doh, what a creative algorithm :-) */
2976	for (i = 0; max > 1 && (i < (max - 1)); i++)
2977		for (j = i; j < max; j++) {
2978			if (cell_indices[i] > cell_indices[j]) {
2979				np = emac_boot_list[i];
2980				emac_boot_list[i] = emac_boot_list[j];
2981				emac_boot_list[j] = np;
2982				k = cell_indices[i];
2983				cell_indices[i] = cell_indices[j];
2984				cell_indices[j] = k;
2985			}
2986		}
2987}
2988
2989static int __init emac_init(void)
2990{
2991	int rc;
2992
2993	printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2994
2995	/* Init debug stuff */
2996	emac_init_debug();
2997
2998	/* Build EMAC boot list */
2999	emac_make_bootlist();
3000
3001	/* Init submodules */
3002	rc = mal_init();
3003	if (rc)
3004		goto err;
3005	rc = zmii_init();
3006	if (rc)
3007		goto err_mal;
3008	rc = rgmii_init();
3009	if (rc)
3010		goto err_zmii;
3011	rc = tah_init();
3012	if (rc)
3013		goto err_rgmii;
3014	rc = of_register_platform_driver(&emac_driver);
3015	if (rc)
3016		goto err_tah;
3017
3018	return 0;
3019
3020 err_tah:
3021	tah_exit();
3022 err_rgmii:
3023	rgmii_exit();
3024 err_zmii:
3025	zmii_exit();
3026 err_mal:
3027	mal_exit();
3028 err:
3029	return rc;
3030}
3031
3032static void __exit emac_exit(void)
3033{
3034	int i;
3035
3036	of_unregister_platform_driver(&emac_driver);
3037
3038	tah_exit();
3039	rgmii_exit();
3040	zmii_exit();
3041	mal_exit();
3042	emac_fini_debug();
3043
3044	/* Destroy EMAC boot list */
3045	for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3046		if (emac_boot_list[i])
3047			of_node_put(emac_boot_list[i]);
3048}
3049
3050module_init(emac_init);
3051module_exit(emac_exit);
3052