• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/
1/*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
3 *
4 * 2005-2009 (c) Aeroflex Gaisler AB
5 *
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library.
8 *
9 * Full documentation of both cores can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
11 *
12 * The Gigabit version supports scatter/gather DMA, any alignment of
13 * buffers and checksum offloading.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Contributors: Kristoffer Glembo
21 *               Daniel Hellstrom
22 *               Marko Isomaki
23 */
24
25#include <linux/module.h>
26#include <linux/uaccess.h>
27#include <linux/init.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/skbuff.h>
32#include <linux/io.h>
33#include <linux/crc32.h>
34#include <linux/mii.h>
35#include <linux/of_device.h>
36#include <linux/of_platform.h>
37#include <linux/slab.h>
38#include <asm/cacheflush.h>
39#include <asm/byteorder.h>
40
41#ifdef CONFIG_SPARC
42#include <asm/idprom.h>
43#endif
44
45#include "greth.h"
46
47#define GRETH_DEF_MSG_ENABLE	  \
48	(NETIF_MSG_DRV		| \
49	 NETIF_MSG_PROBE	| \
50	 NETIF_MSG_LINK		| \
51	 NETIF_MSG_IFDOWN	| \
52	 NETIF_MSG_IFUP		| \
53	 NETIF_MSG_RX_ERR	| \
54	 NETIF_MSG_TX_ERR)
55
56static int greth_debug = -1;	/* -1 == use GRETH_DEF_MSG_ENABLE as value */
57module_param(greth_debug, int, 0);
58MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
59
60/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
61static int macaddr[6];
62module_param_array(macaddr, int, NULL, 0);
63MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
64
65static int greth_edcl = 1;
66module_param(greth_edcl, int, 0);
67MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
68
69static int greth_open(struct net_device *dev);
70static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
71	   struct net_device *dev);
72static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
73	   struct net_device *dev);
74static int greth_rx(struct net_device *dev, int limit);
75static int greth_rx_gbit(struct net_device *dev, int limit);
76static void greth_clean_tx(struct net_device *dev);
77static void greth_clean_tx_gbit(struct net_device *dev);
78static irqreturn_t greth_interrupt(int irq, void *dev_id);
79static int greth_close(struct net_device *dev);
80static int greth_set_mac_add(struct net_device *dev, void *p);
81static void greth_set_multicast_list(struct net_device *dev);
82
83#define GRETH_REGLOAD(a)	    (be32_to_cpu(__raw_readl(&(a))))
84#define GRETH_REGSAVE(a, v)         (__raw_writel(cpu_to_be32(v), &(a)))
85#define GRETH_REGORIN(a, v)         (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
86#define GRETH_REGANDIN(a, v)        (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
87
88#define NEXT_TX(N)      (((N) + 1) & GRETH_TXBD_NUM_MASK)
89#define SKIP_TX(N, C)   (((N) + C) & GRETH_TXBD_NUM_MASK)
90#define NEXT_RX(N)      (((N) + 1) & GRETH_RXBD_NUM_MASK)
91
92static void greth_print_rx_packet(void *addr, int len)
93{
94	print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
95			addr, len, true);
96}
97
98static void greth_print_tx_packet(struct sk_buff *skb)
99{
100	int i;
101	int length;
102
103	if (skb_shinfo(skb)->nr_frags == 0)
104		length = skb->len;
105	else
106		length = skb_headlen(skb);
107
108	print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
109			skb->data, length, true);
110
111	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
112
113		print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
114			       phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
115			       skb_shinfo(skb)->frags[i].page_offset,
116			       length, true);
117	}
118}
119
120static inline void greth_enable_tx(struct greth_private *greth)
121{
122	wmb();
123	GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
124}
125
126static inline void greth_disable_tx(struct greth_private *greth)
127{
128	GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
129}
130
131static inline void greth_enable_rx(struct greth_private *greth)
132{
133	wmb();
134	GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
135}
136
137static inline void greth_disable_rx(struct greth_private *greth)
138{
139	GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
140}
141
142static inline void greth_enable_irqs(struct greth_private *greth)
143{
144	GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
145}
146
147static inline void greth_disable_irqs(struct greth_private *greth)
148{
149	GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
150}
151
152static inline void greth_write_bd(u32 *bd, u32 val)
153{
154	__raw_writel(cpu_to_be32(val), bd);
155}
156
157static inline u32 greth_read_bd(u32 *bd)
158{
159	return be32_to_cpu(__raw_readl(bd));
160}
161
162static void greth_clean_rings(struct greth_private *greth)
163{
164	int i;
165	struct greth_bd *rx_bdp = greth->rx_bd_base;
166	struct greth_bd *tx_bdp = greth->tx_bd_base;
167
168	if (greth->gbit_mac) {
169
170		/* Free and unmap RX buffers */
171		for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
172			if (greth->rx_skbuff[i] != NULL) {
173				dev_kfree_skb(greth->rx_skbuff[i]);
174				dma_unmap_single(greth->dev,
175						 greth_read_bd(&rx_bdp->addr),
176						 MAX_FRAME_SIZE+NET_IP_ALIGN,
177						 DMA_FROM_DEVICE);
178			}
179		}
180
181		/* TX buffers */
182		while (greth->tx_free < GRETH_TXBD_NUM) {
183
184			struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
185			int nr_frags = skb_shinfo(skb)->nr_frags;
186			tx_bdp = greth->tx_bd_base + greth->tx_last;
187			greth->tx_last = NEXT_TX(greth->tx_last);
188
189			dma_unmap_single(greth->dev,
190					 greth_read_bd(&tx_bdp->addr),
191					 skb_headlen(skb),
192					 DMA_TO_DEVICE);
193
194			for (i = 0; i < nr_frags; i++) {
195				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
196				tx_bdp = greth->tx_bd_base + greth->tx_last;
197
198				dma_unmap_page(greth->dev,
199					       greth_read_bd(&tx_bdp->addr),
200					       frag->size,
201					       DMA_TO_DEVICE);
202
203				greth->tx_last = NEXT_TX(greth->tx_last);
204			}
205			greth->tx_free += nr_frags+1;
206			dev_kfree_skb(skb);
207		}
208
209
210	} else { /* 10/100 Mbps MAC */
211
212		for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
213			kfree(greth->rx_bufs[i]);
214			dma_unmap_single(greth->dev,
215					 greth_read_bd(&rx_bdp->addr),
216					 MAX_FRAME_SIZE,
217					 DMA_FROM_DEVICE);
218		}
219		for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
220			kfree(greth->tx_bufs[i]);
221			dma_unmap_single(greth->dev,
222					 greth_read_bd(&tx_bdp->addr),
223					 MAX_FRAME_SIZE,
224					 DMA_TO_DEVICE);
225		}
226	}
227}
228
229static int greth_init_rings(struct greth_private *greth)
230{
231	struct sk_buff *skb;
232	struct greth_bd *rx_bd, *tx_bd;
233	u32 dma_addr;
234	int i;
235
236	rx_bd = greth->rx_bd_base;
237	tx_bd = greth->tx_bd_base;
238
239	/* Initialize descriptor rings and buffers */
240	if (greth->gbit_mac) {
241
242		for (i = 0; i < GRETH_RXBD_NUM; i++) {
243			skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
244			if (skb == NULL) {
245				if (netif_msg_ifup(greth))
246					dev_err(greth->dev, "Error allocating DMA ring.\n");
247				goto cleanup;
248			}
249			skb_reserve(skb, NET_IP_ALIGN);
250			dma_addr = dma_map_single(greth->dev,
251						  skb->data,
252						  MAX_FRAME_SIZE+NET_IP_ALIGN,
253						  DMA_FROM_DEVICE);
254
255			if (dma_mapping_error(greth->dev, dma_addr)) {
256				if (netif_msg_ifup(greth))
257					dev_err(greth->dev, "Could not create initial DMA mapping\n");
258				goto cleanup;
259			}
260			greth->rx_skbuff[i] = skb;
261			greth_write_bd(&rx_bd[i].addr, dma_addr);
262			greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
263		}
264
265	} else {
266
267		/* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
268		for (i = 0; i < GRETH_RXBD_NUM; i++) {
269
270			greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
271
272			if (greth->rx_bufs[i] == NULL) {
273				if (netif_msg_ifup(greth))
274					dev_err(greth->dev, "Error allocating DMA ring.\n");
275				goto cleanup;
276			}
277
278			dma_addr = dma_map_single(greth->dev,
279						  greth->rx_bufs[i],
280						  MAX_FRAME_SIZE,
281						  DMA_FROM_DEVICE);
282
283			if (dma_mapping_error(greth->dev, dma_addr)) {
284				if (netif_msg_ifup(greth))
285					dev_err(greth->dev, "Could not create initial DMA mapping\n");
286				goto cleanup;
287			}
288			greth_write_bd(&rx_bd[i].addr, dma_addr);
289			greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
290		}
291		for (i = 0; i < GRETH_TXBD_NUM; i++) {
292
293			greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
294
295			if (greth->tx_bufs[i] == NULL) {
296				if (netif_msg_ifup(greth))
297					dev_err(greth->dev, "Error allocating DMA ring.\n");
298				goto cleanup;
299			}
300
301			dma_addr = dma_map_single(greth->dev,
302						  greth->tx_bufs[i],
303						  MAX_FRAME_SIZE,
304						  DMA_TO_DEVICE);
305
306			if (dma_mapping_error(greth->dev, dma_addr)) {
307				if (netif_msg_ifup(greth))
308					dev_err(greth->dev, "Could not create initial DMA mapping\n");
309				goto cleanup;
310			}
311			greth_write_bd(&tx_bd[i].addr, dma_addr);
312			greth_write_bd(&tx_bd[i].stat, 0);
313		}
314	}
315	greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
316		       greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
317
318	/* Initialize pointers. */
319	greth->rx_cur = 0;
320	greth->tx_next = 0;
321	greth->tx_last = 0;
322	greth->tx_free = GRETH_TXBD_NUM;
323
324	/* Initialize descriptor base address */
325	GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
326	GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
327
328	return 0;
329
330cleanup:
331	greth_clean_rings(greth);
332	return -ENOMEM;
333}
334
335static int greth_open(struct net_device *dev)
336{
337	struct greth_private *greth = netdev_priv(dev);
338	int err;
339
340	err = greth_init_rings(greth);
341	if (err) {
342		if (netif_msg_ifup(greth))
343			dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
344		return err;
345	}
346
347	err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
348	if (err) {
349		if (netif_msg_ifup(greth))
350			dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
351		greth_clean_rings(greth);
352		return err;
353	}
354
355	if (netif_msg_ifup(greth))
356		dev_dbg(&dev->dev, " starting queue\n");
357	netif_start_queue(dev);
358
359	napi_enable(&greth->napi);
360
361	greth_enable_irqs(greth);
362	greth_enable_tx(greth);
363	greth_enable_rx(greth);
364	return 0;
365
366}
367
368static int greth_close(struct net_device *dev)
369{
370	struct greth_private *greth = netdev_priv(dev);
371
372	napi_disable(&greth->napi);
373
374	greth_disable_tx(greth);
375
376	netif_stop_queue(dev);
377
378	free_irq(greth->irq, (void *) dev);
379
380	greth_clean_rings(greth);
381
382	return 0;
383}
384
385static netdev_tx_t
386greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
387{
388	struct greth_private *greth = netdev_priv(dev);
389	struct greth_bd *bdp;
390	int err = NETDEV_TX_OK;
391	u32 status, dma_addr;
392
393	bdp = greth->tx_bd_base + greth->tx_next;
394
395	if (unlikely(greth->tx_free <= 0)) {
396		netif_stop_queue(dev);
397		return NETDEV_TX_BUSY;
398	}
399
400	if (netif_msg_pktdata(greth))
401		greth_print_tx_packet(skb);
402
403
404	if (unlikely(skb->len > MAX_FRAME_SIZE)) {
405		dev->stats.tx_errors++;
406		goto out;
407	}
408
409	dma_addr = greth_read_bd(&bdp->addr);
410
411	memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
412
413	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
414
415	status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
416
417	/* Wrap around descriptor ring */
418	if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
419		status |= GRETH_BD_WR;
420	}
421
422	greth->tx_next = NEXT_TX(greth->tx_next);
423	greth->tx_free--;
424
425	/* No more descriptors */
426	if (unlikely(greth->tx_free == 0)) {
427
428		/* Free transmitted descriptors */
429		greth_clean_tx(dev);
430
431		/* If nothing was cleaned, stop queue & wait for irq */
432		if (unlikely(greth->tx_free == 0)) {
433			status |= GRETH_BD_IE;
434			netif_stop_queue(dev);
435		}
436	}
437
438	/* Write descriptor control word and enable transmission */
439	greth_write_bd(&bdp->stat, status);
440	greth_enable_tx(greth);
441
442out:
443	dev_kfree_skb(skb);
444	return err;
445}
446
447
448static netdev_tx_t
449greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
450{
451	struct greth_private *greth = netdev_priv(dev);
452	struct greth_bd *bdp;
453	u32 status = 0, dma_addr;
454	int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
455
456	nr_frags = skb_shinfo(skb)->nr_frags;
457
458	if (greth->tx_free < nr_frags + 1) {
459		netif_stop_queue(dev);
460		err = NETDEV_TX_BUSY;
461		goto out;
462	}
463
464	if (netif_msg_pktdata(greth))
465		greth_print_tx_packet(skb);
466
467	if (unlikely(skb->len > MAX_FRAME_SIZE)) {
468		dev->stats.tx_errors++;
469		goto out;
470	}
471
472	/* Save skb pointer. */
473	greth->tx_skbuff[greth->tx_next] = skb;
474
475	/* Linear buf */
476	if (nr_frags != 0)
477		status = GRETH_TXBD_MORE;
478
479	status |= GRETH_TXBD_CSALL;
480	status |= skb_headlen(skb) & GRETH_BD_LEN;
481	if (greth->tx_next == GRETH_TXBD_NUM_MASK)
482		status |= GRETH_BD_WR;
483
484
485	bdp = greth->tx_bd_base + greth->tx_next;
486	greth_write_bd(&bdp->stat, status);
487	dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
488
489	if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
490		goto map_error;
491
492	greth_write_bd(&bdp->addr, dma_addr);
493
494	curr_tx = NEXT_TX(greth->tx_next);
495
496	/* Frags */
497	for (i = 0; i < nr_frags; i++) {
498		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
499		greth->tx_skbuff[curr_tx] = NULL;
500		bdp = greth->tx_bd_base + curr_tx;
501
502		status = GRETH_TXBD_CSALL;
503		status |= frag->size & GRETH_BD_LEN;
504
505		/* Wrap around descriptor ring */
506		if (curr_tx == GRETH_TXBD_NUM_MASK)
507			status |= GRETH_BD_WR;
508
509		/* More fragments left */
510		if (i < nr_frags - 1)
511			status |= GRETH_TXBD_MORE;
512
513		/* ... last fragment, check if out of descriptors  */
514		else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
515
516			/* Enable interrupts and stop queue */
517			status |= GRETH_BD_IE;
518			netif_stop_queue(dev);
519		}
520
521		greth_write_bd(&bdp->stat, status);
522
523		dma_addr = dma_map_page(greth->dev,
524					frag->page,
525					frag->page_offset,
526					frag->size,
527					DMA_TO_DEVICE);
528
529		if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
530			goto frag_map_error;
531
532		greth_write_bd(&bdp->addr, dma_addr);
533
534		curr_tx = NEXT_TX(curr_tx);
535	}
536
537	wmb();
538
539	/* Enable the descriptors that we configured ...  */
540	for (i = 0; i < nr_frags + 1; i++) {
541		bdp = greth->tx_bd_base + greth->tx_next;
542		greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
543		greth->tx_next = NEXT_TX(greth->tx_next);
544		greth->tx_free--;
545	}
546
547	greth_enable_tx(greth);
548
549	return NETDEV_TX_OK;
550
551frag_map_error:
552	/* Unmap SKB mappings that succeeded */
553	for (i = 0; greth->tx_next + i != curr_tx; i++) {
554		bdp = greth->tx_bd_base + greth->tx_next + i;
555		dma_unmap_single(greth->dev,
556				 greth_read_bd(&bdp->addr),
557				 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
558				 DMA_TO_DEVICE);
559	}
560map_error:
561	if (net_ratelimit())
562		dev_warn(greth->dev, "Could not create TX DMA mapping\n");
563	dev_kfree_skb(skb);
564out:
565	return err;
566}
567
568
569static irqreturn_t greth_interrupt(int irq, void *dev_id)
570{
571	struct net_device *dev = dev_id;
572	struct greth_private *greth;
573	u32 status;
574	irqreturn_t retval = IRQ_NONE;
575
576	greth = netdev_priv(dev);
577
578	spin_lock(&greth->devlock);
579
580	/* Get the interrupt events that caused us to be here. */
581	status = GRETH_REGLOAD(greth->regs->status);
582
583	/* Handle rx and tx interrupts through poll */
584	if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
585
586		/* Clear interrupt status */
587		GRETH_REGORIN(greth->regs->status,
588			      status & (GRETH_INT_RX | GRETH_INT_TX));
589
590		retval = IRQ_HANDLED;
591
592		/* Disable interrupts and schedule poll() */
593		greth_disable_irqs(greth);
594		napi_schedule(&greth->napi);
595	}
596
597	mmiowb();
598	spin_unlock(&greth->devlock);
599
600	return retval;
601}
602
603static void greth_clean_tx(struct net_device *dev)
604{
605	struct greth_private *greth;
606	struct greth_bd *bdp;
607	u32 stat;
608
609	greth = netdev_priv(dev);
610
611	while (1) {
612		bdp = greth->tx_bd_base + greth->tx_last;
613		stat = greth_read_bd(&bdp->stat);
614
615		if (unlikely(stat & GRETH_BD_EN))
616			break;
617
618		if (greth->tx_free == GRETH_TXBD_NUM)
619			break;
620
621		/* Check status for errors */
622		if (unlikely(stat & GRETH_TXBD_STATUS)) {
623			dev->stats.tx_errors++;
624			if (stat & GRETH_TXBD_ERR_AL)
625				dev->stats.tx_aborted_errors++;
626			if (stat & GRETH_TXBD_ERR_UE)
627				dev->stats.tx_fifo_errors++;
628		}
629		dev->stats.tx_packets++;
630		greth->tx_last = NEXT_TX(greth->tx_last);
631		greth->tx_free++;
632	}
633
634	if (greth->tx_free > 0) {
635		netif_wake_queue(dev);
636	}
637
638}
639
640static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
641{
642	/* Check status for errors */
643	if (unlikely(stat & GRETH_TXBD_STATUS)) {
644		dev->stats.tx_errors++;
645		if (stat & GRETH_TXBD_ERR_AL)
646			dev->stats.tx_aborted_errors++;
647		if (stat & GRETH_TXBD_ERR_UE)
648			dev->stats.tx_fifo_errors++;
649		if (stat & GRETH_TXBD_ERR_LC)
650			dev->stats.tx_aborted_errors++;
651	}
652	dev->stats.tx_packets++;
653}
654
655static void greth_clean_tx_gbit(struct net_device *dev)
656{
657	struct greth_private *greth;
658	struct greth_bd *bdp, *bdp_last_frag;
659	struct sk_buff *skb;
660	u32 stat;
661	int nr_frags, i;
662
663	greth = netdev_priv(dev);
664
665	while (greth->tx_free < GRETH_TXBD_NUM) {
666
667		skb = greth->tx_skbuff[greth->tx_last];
668
669		nr_frags = skb_shinfo(skb)->nr_frags;
670
671		/* We only clean fully completed SKBs */
672		bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
673		stat = bdp_last_frag->stat;
674
675		if (stat & GRETH_BD_EN)
676			break;
677
678		greth->tx_skbuff[greth->tx_last] = NULL;
679
680		greth_update_tx_stats(dev, stat);
681
682		bdp = greth->tx_bd_base + greth->tx_last;
683
684		greth->tx_last = NEXT_TX(greth->tx_last);
685
686		dma_unmap_single(greth->dev,
687				 greth_read_bd(&bdp->addr),
688				 skb_headlen(skb),
689				 DMA_TO_DEVICE);
690
691		for (i = 0; i < nr_frags; i++) {
692			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
693			bdp = greth->tx_bd_base + greth->tx_last;
694
695			dma_unmap_page(greth->dev,
696				       greth_read_bd(&bdp->addr),
697				       frag->size,
698				       DMA_TO_DEVICE);
699
700			greth->tx_last = NEXT_TX(greth->tx_last);
701		}
702		greth->tx_free += nr_frags+1;
703		dev_kfree_skb(skb);
704	}
705	if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
706		netif_wake_queue(dev);
707	}
708}
709
710static int greth_pending_packets(struct greth_private *greth)
711{
712	struct greth_bd *bdp;
713	u32 status;
714	bdp = greth->rx_bd_base + greth->rx_cur;
715	status = greth_read_bd(&bdp->stat);
716	if (status & GRETH_BD_EN)
717		return 0;
718	else
719		return 1;
720}
721
722static int greth_rx(struct net_device *dev, int limit)
723{
724	struct greth_private *greth;
725	struct greth_bd *bdp;
726	struct sk_buff *skb;
727	int pkt_len;
728	int bad, count;
729	u32 status, dma_addr;
730
731	greth = netdev_priv(dev);
732
733	for (count = 0; count < limit; ++count) {
734
735		bdp = greth->rx_bd_base + greth->rx_cur;
736		status = greth_read_bd(&bdp->stat);
737		dma_addr = greth_read_bd(&bdp->addr);
738		bad = 0;
739
740		if (unlikely(status & GRETH_BD_EN)) {
741			break;
742		}
743
744		/* Check status for errors. */
745		if (unlikely(status & GRETH_RXBD_STATUS)) {
746			if (status & GRETH_RXBD_ERR_FT) {
747				dev->stats.rx_length_errors++;
748				bad = 1;
749			}
750			if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
751				dev->stats.rx_frame_errors++;
752				bad = 1;
753			}
754			if (status & GRETH_RXBD_ERR_CRC) {
755				dev->stats.rx_crc_errors++;
756				bad = 1;
757			}
758		}
759		if (unlikely(bad)) {
760			dev->stats.rx_errors++;
761
762		} else {
763
764			pkt_len = status & GRETH_BD_LEN;
765
766			skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
767
768			if (unlikely(skb == NULL)) {
769
770				if (net_ratelimit())
771					dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
772
773				dev->stats.rx_dropped++;
774
775			} else {
776				skb_reserve(skb, NET_IP_ALIGN);
777				skb->dev = dev;
778
779				dma_sync_single_for_cpu(greth->dev,
780							dma_addr,
781							pkt_len,
782							DMA_FROM_DEVICE);
783
784				if (netif_msg_pktdata(greth))
785					greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
786
787				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
788
789				skb->protocol = eth_type_trans(skb, dev);
790				dev->stats.rx_packets++;
791				netif_receive_skb(skb);
792			}
793		}
794
795		status = GRETH_BD_EN | GRETH_BD_IE;
796		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
797			status |= GRETH_BD_WR;
798		}
799
800		wmb();
801		greth_write_bd(&bdp->stat, status);
802
803		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
804
805		greth_enable_rx(greth);
806
807		greth->rx_cur = NEXT_RX(greth->rx_cur);
808	}
809
810	return count;
811}
812
813static inline int hw_checksummed(u32 status)
814{
815
816	if (status & GRETH_RXBD_IP_FRAG)
817		return 0;
818
819	if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
820		return 0;
821
822	if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
823		return 0;
824
825	if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
826		return 0;
827
828	return 1;
829}
830
831static int greth_rx_gbit(struct net_device *dev, int limit)
832{
833	struct greth_private *greth;
834	struct greth_bd *bdp;
835	struct sk_buff *skb, *newskb;
836	int pkt_len;
837	int bad, count = 0;
838	u32 status, dma_addr;
839
840	greth = netdev_priv(dev);
841
842	for (count = 0; count < limit; ++count) {
843
844		bdp = greth->rx_bd_base + greth->rx_cur;
845		skb = greth->rx_skbuff[greth->rx_cur];
846		status = greth_read_bd(&bdp->stat);
847		bad = 0;
848
849		if (status & GRETH_BD_EN)
850			break;
851
852		/* Check status for errors. */
853		if (unlikely(status & GRETH_RXBD_STATUS)) {
854
855			if (status & GRETH_RXBD_ERR_FT) {
856				dev->stats.rx_length_errors++;
857				bad = 1;
858			} else if (status &
859				   (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
860				dev->stats.rx_frame_errors++;
861				bad = 1;
862			} else if (status & GRETH_RXBD_ERR_CRC) {
863				dev->stats.rx_crc_errors++;
864				bad = 1;
865			}
866		}
867
868		/* Allocate new skb to replace current */
869		newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
870
871		if (!bad && newskb) {
872			skb_reserve(newskb, NET_IP_ALIGN);
873
874			dma_addr = dma_map_single(greth->dev,
875						      newskb->data,
876						      MAX_FRAME_SIZE + NET_IP_ALIGN,
877						      DMA_FROM_DEVICE);
878
879			if (!dma_mapping_error(greth->dev, dma_addr)) {
880				/* Process the incoming frame. */
881				pkt_len = status & GRETH_BD_LEN;
882
883				dma_unmap_single(greth->dev,
884						 greth_read_bd(&bdp->addr),
885						 MAX_FRAME_SIZE + NET_IP_ALIGN,
886						 DMA_FROM_DEVICE);
887
888				if (netif_msg_pktdata(greth))
889					greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
890
891				skb_put(skb, pkt_len);
892
893				if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
894					skb->ip_summed = CHECKSUM_UNNECESSARY;
895				else
896					skb->ip_summed = CHECKSUM_NONE;
897
898				skb->protocol = eth_type_trans(skb, dev);
899				dev->stats.rx_packets++;
900				netif_receive_skb(skb);
901
902				greth->rx_skbuff[greth->rx_cur] = newskb;
903				greth_write_bd(&bdp->addr, dma_addr);
904			} else {
905				if (net_ratelimit())
906					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
907				dev_kfree_skb(newskb);
908				dev->stats.rx_dropped++;
909			}
910		} else {
911			if (net_ratelimit())
912				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
913			dev->stats.rx_dropped++;
914		}
915
916		status = GRETH_BD_EN | GRETH_BD_IE;
917		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
918			status |= GRETH_BD_WR;
919		}
920
921		wmb();
922		greth_write_bd(&bdp->stat, status);
923		greth_enable_rx(greth);
924		greth->rx_cur = NEXT_RX(greth->rx_cur);
925	}
926
927	return count;
928
929}
930
931static int greth_poll(struct napi_struct *napi, int budget)
932{
933	struct greth_private *greth;
934	int work_done = 0;
935	greth = container_of(napi, struct greth_private, napi);
936
937	if (greth->gbit_mac) {
938		greth_clean_tx_gbit(greth->netdev);
939	} else {
940		greth_clean_tx(greth->netdev);
941	}
942
943restart_poll:
944	if (greth->gbit_mac) {
945		work_done += greth_rx_gbit(greth->netdev, budget - work_done);
946	} else {
947		work_done += greth_rx(greth->netdev, budget - work_done);
948	}
949
950	if (work_done < budget) {
951
952		napi_complete(napi);
953
954		if (greth_pending_packets(greth)) {
955			napi_reschedule(napi);
956			goto restart_poll;
957		}
958	}
959
960	greth_enable_irqs(greth);
961	return work_done;
962}
963
964static int greth_set_mac_add(struct net_device *dev, void *p)
965{
966	struct sockaddr *addr = p;
967	struct greth_private *greth;
968	struct greth_regs *regs;
969
970	greth = netdev_priv(dev);
971	regs = (struct greth_regs *) greth->regs;
972
973	if (!is_valid_ether_addr(addr->sa_data))
974		return -EINVAL;
975
976	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
977
978	GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
979	GRETH_REGSAVE(regs->esa_lsb,
980		      addr->sa_data[2] << 24 | addr->
981		      sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
982	return 0;
983}
984
985static u32 greth_hash_get_index(__u8 *addr)
986{
987	return (ether_crc(6, addr)) & 0x3F;
988}
989
990static void greth_set_hash_filter(struct net_device *dev)
991{
992	struct netdev_hw_addr *ha;
993	struct greth_private *greth = netdev_priv(dev);
994	struct greth_regs *regs = (struct greth_regs *) greth->regs;
995	u32 mc_filter[2];
996	unsigned int bitnr;
997
998	mc_filter[0] = mc_filter[1] = 0;
999
1000	netdev_for_each_mc_addr(ha, dev) {
1001		bitnr = greth_hash_get_index(ha->addr);
1002		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1003	}
1004
1005	GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1006	GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1007}
1008
1009static void greth_set_multicast_list(struct net_device *dev)
1010{
1011	int cfg;
1012	struct greth_private *greth = netdev_priv(dev);
1013	struct greth_regs *regs = (struct greth_regs *) greth->regs;
1014
1015	cfg = GRETH_REGLOAD(regs->control);
1016	if (dev->flags & IFF_PROMISC)
1017		cfg |= GRETH_CTRL_PR;
1018	else
1019		cfg &= ~GRETH_CTRL_PR;
1020
1021	if (greth->multicast) {
1022		if (dev->flags & IFF_ALLMULTI) {
1023			GRETH_REGSAVE(regs->hash_msb, -1);
1024			GRETH_REGSAVE(regs->hash_lsb, -1);
1025			cfg |= GRETH_CTRL_MCEN;
1026			GRETH_REGSAVE(regs->control, cfg);
1027			return;
1028		}
1029
1030		if (netdev_mc_empty(dev)) {
1031			cfg &= ~GRETH_CTRL_MCEN;
1032			GRETH_REGSAVE(regs->control, cfg);
1033			return;
1034		}
1035
1036		/* Setup multicast filter */
1037		greth_set_hash_filter(dev);
1038		cfg |= GRETH_CTRL_MCEN;
1039	}
1040	GRETH_REGSAVE(regs->control, cfg);
1041}
1042
1043static u32 greth_get_msglevel(struct net_device *dev)
1044{
1045	struct greth_private *greth = netdev_priv(dev);
1046	return greth->msg_enable;
1047}
1048
1049static void greth_set_msglevel(struct net_device *dev, u32 value)
1050{
1051	struct greth_private *greth = netdev_priv(dev);
1052	greth->msg_enable = value;
1053}
1054static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1055{
1056	struct greth_private *greth = netdev_priv(dev);
1057	struct phy_device *phy = greth->phy;
1058
1059	if (!phy)
1060		return -ENODEV;
1061
1062	return phy_ethtool_gset(phy, cmd);
1063}
1064
1065static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1066{
1067	struct greth_private *greth = netdev_priv(dev);
1068	struct phy_device *phy = greth->phy;
1069
1070	if (!phy)
1071		return -ENODEV;
1072
1073	return phy_ethtool_sset(phy, cmd);
1074}
1075
1076static int greth_get_regs_len(struct net_device *dev)
1077{
1078	return sizeof(struct greth_regs);
1079}
1080
1081static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1082{
1083	struct greth_private *greth = netdev_priv(dev);
1084
1085	strncpy(info->driver, dev_driver_string(greth->dev), 32);
1086	strncpy(info->version, "revision: 1.0", 32);
1087	strncpy(info->bus_info, greth->dev->bus->name, 32);
1088	strncpy(info->fw_version, "N/A", 32);
1089	info->eedump_len = 0;
1090	info->regdump_len = sizeof(struct greth_regs);
1091}
1092
1093static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1094{
1095	int i;
1096	struct greth_private *greth = netdev_priv(dev);
1097	u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1098	u32 *buff = p;
1099
1100	for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1101		buff[i] = greth_read_bd(&greth_regs[i]);
1102}
1103
1104static u32 greth_get_rx_csum(struct net_device *dev)
1105{
1106	struct greth_private *greth = netdev_priv(dev);
1107	return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
1108}
1109
1110static int greth_set_rx_csum(struct net_device *dev, u32 data)
1111{
1112	struct greth_private *greth = netdev_priv(dev);
1113
1114	spin_lock_bh(&greth->devlock);
1115
1116	if (data)
1117		greth->flags |= GRETH_FLAG_RX_CSUM;
1118	else
1119		greth->flags &= ~GRETH_FLAG_RX_CSUM;
1120
1121	spin_unlock_bh(&greth->devlock);
1122
1123	return 0;
1124}
1125
1126static u32 greth_get_tx_csum(struct net_device *dev)
1127{
1128	return (dev->features & NETIF_F_IP_CSUM) != 0;
1129}
1130
1131static int greth_set_tx_csum(struct net_device *dev, u32 data)
1132{
1133	netif_tx_lock_bh(dev);
1134	ethtool_op_set_tx_csum(dev, data);
1135	netif_tx_unlock_bh(dev);
1136	return 0;
1137}
1138
1139static const struct ethtool_ops greth_ethtool_ops = {
1140	.get_msglevel		= greth_get_msglevel,
1141	.set_msglevel		= greth_set_msglevel,
1142	.get_settings		= greth_get_settings,
1143	.set_settings		= greth_set_settings,
1144	.get_drvinfo		= greth_get_drvinfo,
1145	.get_regs_len           = greth_get_regs_len,
1146	.get_regs               = greth_get_regs,
1147	.get_rx_csum		= greth_get_rx_csum,
1148	.set_rx_csum		= greth_set_rx_csum,
1149	.get_tx_csum		= greth_get_tx_csum,
1150	.set_tx_csum		= greth_set_tx_csum,
1151	.get_link		= ethtool_op_get_link,
1152};
1153
1154static struct net_device_ops greth_netdev_ops = {
1155	.ndo_open = greth_open,
1156	.ndo_stop = greth_close,
1157	.ndo_start_xmit = greth_start_xmit,
1158	.ndo_set_mac_address = greth_set_mac_add,
1159	.ndo_validate_addr 	= eth_validate_addr,
1160};
1161
1162static inline int wait_for_mdio(struct greth_private *greth)
1163{
1164	unsigned long timeout = jiffies + 4*HZ/100;
1165	while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1166		if (time_after(jiffies, timeout))
1167			return 0;
1168	}
1169	return 1;
1170}
1171
1172static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1173{
1174	struct greth_private *greth = bus->priv;
1175	int data;
1176
1177	if (!wait_for_mdio(greth))
1178		return -EBUSY;
1179
1180	GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1181
1182	if (!wait_for_mdio(greth))
1183		return -EBUSY;
1184
1185	if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1186		data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1187		return data;
1188
1189	} else {
1190		return -1;
1191	}
1192}
1193
1194static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1195{
1196	struct greth_private *greth = bus->priv;
1197
1198	if (!wait_for_mdio(greth))
1199		return -EBUSY;
1200
1201	GRETH_REGSAVE(greth->regs->mdio,
1202		      ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1203
1204	if (!wait_for_mdio(greth))
1205		return -EBUSY;
1206
1207	return 0;
1208}
1209
1210static int greth_mdio_reset(struct mii_bus *bus)
1211{
1212	return 0;
1213}
1214
1215static void greth_link_change(struct net_device *dev)
1216{
1217	struct greth_private *greth = netdev_priv(dev);
1218	struct phy_device *phydev = greth->phy;
1219	unsigned long flags;
1220
1221	int status_change = 0;
1222
1223	spin_lock_irqsave(&greth->devlock, flags);
1224
1225	if (phydev->link) {
1226
1227		if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1228
1229			GRETH_REGANDIN(greth->regs->control,
1230				       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
1231
1232			if (phydev->duplex)
1233				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
1234
1235			if (phydev->speed == SPEED_100) {
1236
1237				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
1238			}
1239
1240			else if (phydev->speed == SPEED_1000)
1241				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
1242
1243			greth->speed = phydev->speed;
1244			greth->duplex = phydev->duplex;
1245			status_change = 1;
1246		}
1247	}
1248
1249	if (phydev->link != greth->link) {
1250		if (!phydev->link) {
1251			greth->speed = 0;
1252			greth->duplex = -1;
1253		}
1254		greth->link = phydev->link;
1255
1256		status_change = 1;
1257	}
1258
1259	spin_unlock_irqrestore(&greth->devlock, flags);
1260
1261	if (status_change) {
1262		if (phydev->link)
1263			pr_debug("%s: link up (%d/%s)\n",
1264				dev->name, phydev->speed,
1265				DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1266		else
1267			pr_debug("%s: link down\n", dev->name);
1268	}
1269}
1270
1271static int greth_mdio_probe(struct net_device *dev)
1272{
1273	struct greth_private *greth = netdev_priv(dev);
1274	struct phy_device *phy = NULL;
1275	int ret;
1276
1277	/* Find the first PHY */
1278	phy = phy_find_first(greth->mdio);
1279
1280	if (!phy) {
1281		if (netif_msg_probe(greth))
1282			dev_err(&dev->dev, "no PHY found\n");
1283		return -ENXIO;
1284	}
1285
1286	ret = phy_connect_direct(dev, phy, &greth_link_change,
1287			0, greth->gbit_mac ?
1288			PHY_INTERFACE_MODE_GMII :
1289			PHY_INTERFACE_MODE_MII);
1290	if (ret) {
1291		if (netif_msg_ifup(greth))
1292			dev_err(&dev->dev, "could not attach to PHY\n");
1293		return ret;
1294	}
1295
1296	if (greth->gbit_mac)
1297		phy->supported &= PHY_GBIT_FEATURES;
1298	else
1299		phy->supported &= PHY_BASIC_FEATURES;
1300
1301	phy->advertising = phy->supported;
1302
1303	greth->link = 0;
1304	greth->speed = 0;
1305	greth->duplex = -1;
1306	greth->phy = phy;
1307
1308	return 0;
1309}
1310
1311static inline int phy_aneg_done(struct phy_device *phydev)
1312{
1313	int retval;
1314
1315	retval = phy_read(phydev, MII_BMSR);
1316
1317	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
1318}
1319
1320static int greth_mdio_init(struct greth_private *greth)
1321{
1322	int ret, phy;
1323	unsigned long timeout;
1324
1325	greth->mdio = mdiobus_alloc();
1326	if (!greth->mdio) {
1327		return -ENOMEM;
1328	}
1329
1330	greth->mdio->name = "greth-mdio";
1331	snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1332	greth->mdio->read = greth_mdio_read;
1333	greth->mdio->write = greth_mdio_write;
1334	greth->mdio->reset = greth_mdio_reset;
1335	greth->mdio->priv = greth;
1336
1337	greth->mdio->irq = greth->mdio_irqs;
1338
1339	for (phy = 0; phy < PHY_MAX_ADDR; phy++)
1340		greth->mdio->irq[phy] = PHY_POLL;
1341
1342	ret = mdiobus_register(greth->mdio);
1343	if (ret) {
1344		goto error;
1345	}
1346
1347	ret = greth_mdio_probe(greth->netdev);
1348	if (ret) {
1349		if (netif_msg_probe(greth))
1350			dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1351		goto unreg_mdio;
1352	}
1353
1354	phy_start(greth->phy);
1355
1356	/* If Ethernet debug link is used make autoneg happen right away */
1357	if (greth->edcl && greth_edcl == 1) {
1358		phy_start_aneg(greth->phy);
1359		timeout = jiffies + 6*HZ;
1360		while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
1361		}
1362		genphy_read_status(greth->phy);
1363		greth_link_change(greth->netdev);
1364	}
1365
1366	return 0;
1367
1368unreg_mdio:
1369	mdiobus_unregister(greth->mdio);
1370error:
1371	mdiobus_free(greth->mdio);
1372	return ret;
1373}
1374
1375/* Initialize the GRETH MAC */
1376static int __devinit greth_of_probe(struct platform_device *ofdev, const struct of_device_id *match)
1377{
1378	struct net_device *dev;
1379	struct greth_private *greth;
1380	struct greth_regs *regs;
1381
1382	int i;
1383	int err;
1384	int tmp;
1385	unsigned long timeout;
1386
1387	dev = alloc_etherdev(sizeof(struct greth_private));
1388
1389	if (dev == NULL)
1390		return -ENOMEM;
1391
1392	greth = netdev_priv(dev);
1393	greth->netdev = dev;
1394	greth->dev = &ofdev->dev;
1395
1396	if (greth_debug > 0)
1397		greth->msg_enable = greth_debug;
1398	else
1399		greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1400
1401	spin_lock_init(&greth->devlock);
1402
1403	greth->regs = of_ioremap(&ofdev->resource[0], 0,
1404				 resource_size(&ofdev->resource[0]),
1405				 "grlib-greth regs");
1406
1407	if (greth->regs == NULL) {
1408		if (netif_msg_probe(greth))
1409			dev_err(greth->dev, "ioremap failure.\n");
1410		err = -EIO;
1411		goto error1;
1412	}
1413
1414	regs = (struct greth_regs *) greth->regs;
1415	greth->irq = ofdev->archdata.irqs[0];
1416
1417	dev_set_drvdata(greth->dev, dev);
1418	SET_NETDEV_DEV(dev, greth->dev);
1419
1420	if (netif_msg_probe(greth))
1421		dev_dbg(greth->dev, "reseting controller.\n");
1422
1423	/* Reset the controller. */
1424	GRETH_REGSAVE(regs->control, GRETH_RESET);
1425
1426	/* Wait for MAC to reset itself */
1427	timeout = jiffies + HZ/100;
1428	while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1429		if (time_after(jiffies, timeout)) {
1430			err = -EIO;
1431			if (netif_msg_probe(greth))
1432				dev_err(greth->dev, "timeout when waiting for reset.\n");
1433			goto error2;
1434		}
1435	}
1436
1437	/* Get default PHY address  */
1438	greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1439
1440	/* Check if we have GBIT capable MAC */
1441	tmp = GRETH_REGLOAD(regs->control);
1442	greth->gbit_mac = (tmp >> 27) & 1;
1443
1444	/* Check for multicast capability */
1445	greth->multicast = (tmp >> 25) & 1;
1446
1447	greth->edcl = (tmp >> 31) & 1;
1448
1449	/* If we have EDCL we disable the EDCL speed-duplex FSM so
1450	 * it doesn't interfere with the software */
1451	if (greth->edcl != 0)
1452		GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1453
1454	/* Check if MAC can handle MDIO interrupts */
1455	greth->mdio_int_en = (tmp >> 26) & 1;
1456
1457	err = greth_mdio_init(greth);
1458	if (err) {
1459		if (netif_msg_probe(greth))
1460			dev_err(greth->dev, "failed to register MDIO bus\n");
1461		goto error2;
1462	}
1463
1464	/* Allocate TX descriptor ring in coherent memory */
1465	greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1466								   1024,
1467								   &greth->tx_bd_base_phys,
1468								   GFP_KERNEL);
1469
1470	if (!greth->tx_bd_base) {
1471		if (netif_msg_probe(greth))
1472			dev_err(&dev->dev, "could not allocate descriptor memory.\n");
1473		err = -ENOMEM;
1474		goto error3;
1475	}
1476
1477	memset(greth->tx_bd_base, 0, 1024);
1478
1479	/* Allocate RX descriptor ring in coherent memory */
1480	greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1481								   1024,
1482								   &greth->rx_bd_base_phys,
1483								   GFP_KERNEL);
1484
1485	if (!greth->rx_bd_base) {
1486		if (netif_msg_probe(greth))
1487			dev_err(greth->dev, "could not allocate descriptor memory.\n");
1488		err = -ENOMEM;
1489		goto error4;
1490	}
1491
1492	memset(greth->rx_bd_base, 0, 1024);
1493
1494	/* Get MAC address from: module param, OF property or ID prom */
1495	for (i = 0; i < 6; i++) {
1496		if (macaddr[i] != 0)
1497			break;
1498	}
1499	if (i == 6) {
1500		const unsigned char *addr;
1501		int len;
1502		addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
1503					&len);
1504		if (addr != NULL && len == 6) {
1505			for (i = 0; i < 6; i++)
1506				macaddr[i] = (unsigned int) addr[i];
1507		} else {
1508#ifdef CONFIG_SPARC
1509			for (i = 0; i < 6; i++)
1510				macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1511#endif
1512		}
1513	}
1514
1515	for (i = 0; i < 6; i++)
1516		dev->dev_addr[i] = macaddr[i];
1517
1518	macaddr[5]++;
1519
1520	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1521		if (netif_msg_probe(greth))
1522			dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1523		err = -EINVAL;
1524		goto error5;
1525	}
1526
1527	GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1528	GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1529		      dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1530
1531	/* Clear all pending interrupts except PHY irq */
1532	GRETH_REGSAVE(regs->status, 0xFF);
1533
1534	if (greth->gbit_mac) {
1535		dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
1536		greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1537		greth->flags = GRETH_FLAG_RX_CSUM;
1538	}
1539
1540	if (greth->multicast) {
1541		greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
1542		dev->flags |= IFF_MULTICAST;
1543	} else {
1544		dev->flags &= ~IFF_MULTICAST;
1545	}
1546
1547	dev->netdev_ops = &greth_netdev_ops;
1548	dev->ethtool_ops = &greth_ethtool_ops;
1549
1550	if (register_netdev(dev)) {
1551		if (netif_msg_probe(greth))
1552			dev_err(greth->dev, "netdevice registration failed.\n");
1553		err = -ENOMEM;
1554		goto error5;
1555	}
1556
1557	/* setup NAPI */
1558	netif_napi_add(dev, &greth->napi, greth_poll, 64);
1559
1560	return 0;
1561
1562error5:
1563	dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1564error4:
1565	dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1566error3:
1567	mdiobus_unregister(greth->mdio);
1568error2:
1569	of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1570error1:
1571	free_netdev(dev);
1572	return err;
1573}
1574
1575static int __devexit greth_of_remove(struct platform_device *of_dev)
1576{
1577	struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
1578	struct greth_private *greth = netdev_priv(ndev);
1579
1580	/* Free descriptor areas */
1581	dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1582
1583	dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1584
1585	dev_set_drvdata(&of_dev->dev, NULL);
1586
1587	if (greth->phy)
1588		phy_stop(greth->phy);
1589	mdiobus_unregister(greth->mdio);
1590
1591	unregister_netdev(ndev);
1592	free_netdev(ndev);
1593
1594	of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1595
1596	return 0;
1597}
1598
1599static struct of_device_id greth_of_match[] = {
1600	{
1601	 .name = "GAISLER_ETHMAC",
1602	 },
1603	{},
1604};
1605
1606MODULE_DEVICE_TABLE(of, greth_of_match);
1607
1608static struct of_platform_driver greth_of_driver = {
1609	.driver = {
1610		.name = "grlib-greth",
1611		.owner = THIS_MODULE,
1612		.of_match_table = greth_of_match,
1613	},
1614	.probe = greth_of_probe,
1615	.remove = __devexit_p(greth_of_remove),
1616};
1617
1618static int __init greth_init(void)
1619{
1620	return of_register_platform_driver(&greth_of_driver);
1621}
1622
1623static void __exit greth_cleanup(void)
1624{
1625	of_unregister_platform_driver(&greth_of_driver);
1626}
1627
1628module_init(greth_init);
1629module_exit(greth_cleanup);
1630
1631MODULE_AUTHOR("Aeroflex Gaisler AB.");
1632MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1633MODULE_LICENSE("GPL");
1634