• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/
1/*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs.  Assumes it's under a DBDMA controller.
4 *
5 * Copyright (C) 1998 Randy Gobbel.
6 *
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/timer.h>
17#include <linux/proc_fs.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/crc32.h>
21#include <linux/bitrev.h>
22#include <linux/ethtool.h>
23#include <linux/slab.h>
24#include <asm/prom.h>
25#include <asm/dbdma.h>
26#include <asm/io.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/machdep.h>
30#include <asm/pmac_feature.h>
31#include <asm/macio.h>
32#include <asm/irq.h>
33
34#include "bmac.h"
35
36#define trunc_page(x)	((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
37#define round_page(x)	trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
38
39/*
40 * CRC polynomial - used in working out multicast filter bits.
41 */
42#define ENET_CRCPOLY 0x04c11db7
43
44/* switch to use multicast code lifted from sunhme driver */
45#define SUNHME_MULTICAST
46
47#define N_RX_RING	64
48#define N_TX_RING	32
49#define MAX_TX_ACTIVE	1
50#define ETHERCRC	4
51#define ETHERMINPACKET	64
52#define ETHERMTU	1500
53#define RX_BUFLEN	(ETHERMTU + 14 + ETHERCRC + 2)
54#define TX_TIMEOUT	HZ	/* 1 second */
55
56/* Bits in transmit DMA status */
57#define TX_DMA_ERR	0x80
58
59#define XXDEBUG(args)
60
61struct bmac_data {
62	/* volatile struct bmac *bmac; */
63	struct sk_buff_head *queue;
64	volatile struct dbdma_regs __iomem *tx_dma;
65	int tx_dma_intr;
66	volatile struct dbdma_regs __iomem *rx_dma;
67	int rx_dma_intr;
68	volatile struct dbdma_cmd *tx_cmds;	/* xmit dma command list */
69	volatile struct dbdma_cmd *rx_cmds;	/* recv dma command list */
70	struct macio_dev *mdev;
71	int is_bmac_plus;
72	struct sk_buff *rx_bufs[N_RX_RING];
73	int rx_fill;
74	int rx_empty;
75	struct sk_buff *tx_bufs[N_TX_RING];
76	int tx_fill;
77	int tx_empty;
78	unsigned char tx_fullup;
79	struct timer_list tx_timeout;
80	int timeout_active;
81	int sleeping;
82	int opened;
83	unsigned short hash_use_count[64];
84	unsigned short hash_table_mask[4];
85	spinlock_t lock;
86};
87
88
89static unsigned char *bmac_emergency_rxbuf;
90
91/*
92 * Number of bytes of private data per BMAC: allow enough for
93 * the rx and tx dma commands plus a branch dma command each,
94 * and another 16 bytes to allow us to align the dma command
95 * buffers on a 16 byte boundary.
96 */
97#define PRIV_BYTES	(sizeof(struct bmac_data) \
98	+ (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
99	+ sizeof(struct sk_buff_head))
100
101static int bmac_open(struct net_device *dev);
102static int bmac_close(struct net_device *dev);
103static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
104static void bmac_set_multicast(struct net_device *dev);
105static void bmac_reset_and_enable(struct net_device *dev);
106static void bmac_start_chip(struct net_device *dev);
107static void bmac_init_chip(struct net_device *dev);
108static void bmac_init_registers(struct net_device *dev);
109static void bmac_enable_and_reset_chip(struct net_device *dev);
110static int bmac_set_address(struct net_device *dev, void *addr);
111static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
112static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
113static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
114static void bmac_set_timeout(struct net_device *dev);
115static void bmac_tx_timeout(unsigned long data);
116static int bmac_output(struct sk_buff *skb, struct net_device *dev);
117static void bmac_start(struct net_device *dev);
118
119#define	DBDMA_SET(x)	( ((x) | (x) << 16) )
120#define	DBDMA_CLEAR(x)	( (x) << 16)
121
122static inline void
123dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
124{
125	__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
126}
127
128static inline unsigned long
129dbdma_ld32(volatile __u32 __iomem *a)
130{
131	__u32 swap;
132	__asm__ volatile ("lwbrx %0,0,%1" :  "=r" (swap) : "r" (a));
133	return swap;
134}
135
136static void
137dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
138{
139	dbdma_st32(&dmap->control,
140		   DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
141	eieio();
142}
143
144static void
145dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
146{
147	dbdma_st32(&dmap->control,
148		   DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
149	eieio();
150	while (dbdma_ld32(&dmap->status) & RUN)
151		eieio();
152}
153
154static void
155dbdma_setcmd(volatile struct dbdma_cmd *cp,
156	     unsigned short cmd, unsigned count, unsigned long addr,
157	     unsigned long cmd_dep)
158{
159	out_le16(&cp->command, cmd);
160	out_le16(&cp->req_count, count);
161	out_le32(&cp->phy_addr, addr);
162	out_le32(&cp->cmd_dep, cmd_dep);
163	out_le16(&cp->xfer_status, 0);
164	out_le16(&cp->res_count, 0);
165}
166
167static inline
168void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
169{
170	out_le16((void __iomem *)dev->base_addr + reg_offset, data);
171}
172
173
174static inline
175unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
176{
177	return in_le16((void __iomem *)dev->base_addr + reg_offset);
178}
179
180static void
181bmac_enable_and_reset_chip(struct net_device *dev)
182{
183	struct bmac_data *bp = netdev_priv(dev);
184	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
185	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
186
187	if (rd)
188		dbdma_reset(rd);
189	if (td)
190		dbdma_reset(td);
191
192	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
193}
194
195#define MIFDELAY	udelay(10)
196
197static unsigned int
198bmac_mif_readbits(struct net_device *dev, int nb)
199{
200	unsigned int val = 0;
201
202	while (--nb >= 0) {
203		bmwrite(dev, MIFCSR, 0);
204		MIFDELAY;
205		if (bmread(dev, MIFCSR) & 8)
206			val |= 1 << nb;
207		bmwrite(dev, MIFCSR, 1);
208		MIFDELAY;
209	}
210	bmwrite(dev, MIFCSR, 0);
211	MIFDELAY;
212	bmwrite(dev, MIFCSR, 1);
213	MIFDELAY;
214	return val;
215}
216
217static void
218bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
219{
220	int b;
221
222	while (--nb >= 0) {
223		b = (val & (1 << nb))? 6: 4;
224		bmwrite(dev, MIFCSR, b);
225		MIFDELAY;
226		bmwrite(dev, MIFCSR, b|1);
227		MIFDELAY;
228	}
229}
230
231static unsigned int
232bmac_mif_read(struct net_device *dev, unsigned int addr)
233{
234	unsigned int val;
235
236	bmwrite(dev, MIFCSR, 4);
237	MIFDELAY;
238	bmac_mif_writebits(dev, ~0U, 32);
239	bmac_mif_writebits(dev, 6, 4);
240	bmac_mif_writebits(dev, addr, 10);
241	bmwrite(dev, MIFCSR, 2);
242	MIFDELAY;
243	bmwrite(dev, MIFCSR, 1);
244	MIFDELAY;
245	val = bmac_mif_readbits(dev, 17);
246	bmwrite(dev, MIFCSR, 4);
247	MIFDELAY;
248	return val;
249}
250
251static void
252bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
253{
254	bmwrite(dev, MIFCSR, 4);
255	MIFDELAY;
256	bmac_mif_writebits(dev, ~0U, 32);
257	bmac_mif_writebits(dev, 5, 4);
258	bmac_mif_writebits(dev, addr, 10);
259	bmac_mif_writebits(dev, 2, 2);
260	bmac_mif_writebits(dev, val, 16);
261	bmac_mif_writebits(dev, 3, 2);
262}
263
264static void
265bmac_init_registers(struct net_device *dev)
266{
267	struct bmac_data *bp = netdev_priv(dev);
268	volatile unsigned short regValue;
269	unsigned short *pWord16;
270	int i;
271
272	/* XXDEBUG(("bmac: enter init_registers\n")); */
273
274	bmwrite(dev, RXRST, RxResetValue);
275	bmwrite(dev, TXRST, TxResetBit);
276
277	i = 100;
278	do {
279		--i;
280		udelay(10000);
281		regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
282	} while ((regValue & TxResetBit) && i > 0);
283
284	if (!bp->is_bmac_plus) {
285		regValue = bmread(dev, XCVRIF);
286		regValue |= ClkBit | SerialMode | COLActiveLow;
287		bmwrite(dev, XCVRIF, regValue);
288		udelay(10000);
289	}
290
291	bmwrite(dev, RSEED, (unsigned short)0x1968);
292
293	regValue = bmread(dev, XIFC);
294	regValue |= TxOutputEnable;
295	bmwrite(dev, XIFC, regValue);
296
297	bmread(dev, PAREG);
298
299	/* set collision counters to 0 */
300	bmwrite(dev, NCCNT, 0);
301	bmwrite(dev, NTCNT, 0);
302	bmwrite(dev, EXCNT, 0);
303	bmwrite(dev, LTCNT, 0);
304
305	/* set rx counters to 0 */
306	bmwrite(dev, FRCNT, 0);
307	bmwrite(dev, LECNT, 0);
308	bmwrite(dev, AECNT, 0);
309	bmwrite(dev, FECNT, 0);
310	bmwrite(dev, RXCV, 0);
311
312	/* set tx fifo information */
313	bmwrite(dev, TXTH, 4);	/* 4 octets before tx starts */
314
315	bmwrite(dev, TXFIFOCSR, 0);	/* first disable txFIFO */
316	bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
317
318	/* set rx fifo information */
319	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
320	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
321
322	//bmwrite(dev, TXCFG, TxMACEnable);	       	/* TxNeverGiveUp maybe later */
323	bmread(dev, STATUS);		/* read it just to clear it */
324
325	/* zero out the chip Hash Filter registers */
326	for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
327	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); 	/* bits 15 - 0 */
328	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); 	/* bits 31 - 16 */
329	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); 	/* bits 47 - 32 */
330	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); 	/* bits 63 - 48 */
331
332	pWord16 = (unsigned short *)dev->dev_addr;
333	bmwrite(dev, MADD0, *pWord16++);
334	bmwrite(dev, MADD1, *pWord16++);
335	bmwrite(dev, MADD2, *pWord16);
336
337	bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
338
339	bmwrite(dev, INTDISABLE, EnableNormal);
340}
341
342
343
344static void
345bmac_start_chip(struct net_device *dev)
346{
347	struct bmac_data *bp = netdev_priv(dev);
348	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
349	unsigned short	oldConfig;
350
351	/* enable rx dma channel */
352	dbdma_continue(rd);
353
354	oldConfig = bmread(dev, TXCFG);
355	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
356
357	/* turn on rx plus any other bits already on (promiscuous possibly) */
358	oldConfig = bmread(dev, RXCFG);
359	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
360	udelay(20000);
361}
362
363static void
364bmac_init_phy(struct net_device *dev)
365{
366	unsigned int addr;
367	struct bmac_data *bp = netdev_priv(dev);
368
369	printk(KERN_DEBUG "phy registers:");
370	for (addr = 0; addr < 32; ++addr) {
371		if ((addr & 7) == 0)
372			printk(KERN_DEBUG);
373		printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
374	}
375	printk(KERN_CONT "\n");
376
377	if (bp->is_bmac_plus) {
378		unsigned int capable, ctrl;
379
380		ctrl = bmac_mif_read(dev, 0);
381		capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
382		if (bmac_mif_read(dev, 4) != capable ||
383		    (ctrl & 0x1000) == 0) {
384			bmac_mif_write(dev, 4, capable);
385			bmac_mif_write(dev, 0, 0x1200);
386		} else
387			bmac_mif_write(dev, 0, 0x1000);
388	}
389}
390
391static void bmac_init_chip(struct net_device *dev)
392{
393	bmac_init_phy(dev);
394	bmac_init_registers(dev);
395}
396
397#ifdef CONFIG_PM
398static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
399{
400	struct net_device* dev = macio_get_drvdata(mdev);
401	struct bmac_data *bp = netdev_priv(dev);
402	unsigned long flags;
403	unsigned short config;
404	int i;
405
406	netif_device_detach(dev);
407	/* prolly should wait for dma to finish & turn off the chip */
408	spin_lock_irqsave(&bp->lock, flags);
409	if (bp->timeout_active) {
410		del_timer(&bp->tx_timeout);
411		bp->timeout_active = 0;
412	}
413	disable_irq(dev->irq);
414	disable_irq(bp->tx_dma_intr);
415	disable_irq(bp->rx_dma_intr);
416	bp->sleeping = 1;
417	spin_unlock_irqrestore(&bp->lock, flags);
418	if (bp->opened) {
419		volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
420		volatile struct dbdma_regs __iomem *td = bp->tx_dma;
421
422		config = bmread(dev, RXCFG);
423		bmwrite(dev, RXCFG, (config & ~RxMACEnable));
424		config = bmread(dev, TXCFG);
425       		bmwrite(dev, TXCFG, (config & ~TxMACEnable));
426		bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
427       		/* disable rx and tx dma */
428       		st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
429       		st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
430       		/* free some skb's */
431       		for (i=0; i<N_RX_RING; i++) {
432       			if (bp->rx_bufs[i] != NULL) {
433       				dev_kfree_skb(bp->rx_bufs[i]);
434       				bp->rx_bufs[i] = NULL;
435       			}
436       		}
437       		for (i = 0; i<N_TX_RING; i++) {
438			if (bp->tx_bufs[i] != NULL) {
439		       		dev_kfree_skb(bp->tx_bufs[i]);
440	       			bp->tx_bufs[i] = NULL;
441		       	}
442		}
443	}
444       	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
445	return 0;
446}
447
448static int bmac_resume(struct macio_dev *mdev)
449{
450	struct net_device* dev = macio_get_drvdata(mdev);
451	struct bmac_data *bp = netdev_priv(dev);
452
453	/* see if this is enough */
454	if (bp->opened)
455		bmac_reset_and_enable(dev);
456
457	enable_irq(dev->irq);
458       	enable_irq(bp->tx_dma_intr);
459       	enable_irq(bp->rx_dma_intr);
460       	netif_device_attach(dev);
461
462	return 0;
463}
464#endif /* CONFIG_PM */
465
466static int bmac_set_address(struct net_device *dev, void *addr)
467{
468	struct bmac_data *bp = netdev_priv(dev);
469	unsigned char *p = addr;
470	unsigned short *pWord16;
471	unsigned long flags;
472	int i;
473
474	XXDEBUG(("bmac: enter set_address\n"));
475	spin_lock_irqsave(&bp->lock, flags);
476
477	for (i = 0; i < 6; ++i) {
478		dev->dev_addr[i] = p[i];
479	}
480	/* load up the hardware address */
481	pWord16  = (unsigned short *)dev->dev_addr;
482	bmwrite(dev, MADD0, *pWord16++);
483	bmwrite(dev, MADD1, *pWord16++);
484	bmwrite(dev, MADD2, *pWord16);
485
486	spin_unlock_irqrestore(&bp->lock, flags);
487	XXDEBUG(("bmac: exit set_address\n"));
488	return 0;
489}
490
491static inline void bmac_set_timeout(struct net_device *dev)
492{
493	struct bmac_data *bp = netdev_priv(dev);
494	unsigned long flags;
495
496	spin_lock_irqsave(&bp->lock, flags);
497	if (bp->timeout_active)
498		del_timer(&bp->tx_timeout);
499	bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
500	bp->tx_timeout.function = bmac_tx_timeout;
501	bp->tx_timeout.data = (unsigned long) dev;
502	add_timer(&bp->tx_timeout);
503	bp->timeout_active = 1;
504	spin_unlock_irqrestore(&bp->lock, flags);
505}
506
507static void
508bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
509{
510	void *vaddr;
511	unsigned long baddr;
512	unsigned long len;
513
514	len = skb->len;
515	vaddr = skb->data;
516	baddr = virt_to_bus(vaddr);
517
518	dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
519}
520
521static void
522bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
523{
524	unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
525
526	dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
527		     virt_to_bus(addr), 0);
528}
529
530static void
531bmac_init_tx_ring(struct bmac_data *bp)
532{
533	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
534
535	memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
536
537	bp->tx_empty = 0;
538	bp->tx_fill = 0;
539	bp->tx_fullup = 0;
540
541	/* put a branch at the end of the tx command list */
542	dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
543		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
544
545	/* reset tx dma */
546	dbdma_reset(td);
547	out_le32(&td->wait_sel, 0x00200020);
548	out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
549}
550
551static int
552bmac_init_rx_ring(struct bmac_data *bp)
553{
554	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
555	int i;
556	struct sk_buff *skb;
557
558	/* initialize list of sk_buffs for receiving and set up recv dma */
559	memset((char *)bp->rx_cmds, 0,
560	       (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
561	for (i = 0; i < N_RX_RING; i++) {
562		if ((skb = bp->rx_bufs[i]) == NULL) {
563			bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
564			if (skb != NULL)
565				skb_reserve(skb, 2);
566		}
567		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
568	}
569
570	bp->rx_empty = 0;
571	bp->rx_fill = i;
572
573	/* Put a branch back to the beginning of the receive command list */
574	dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
575		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
576
577	/* start rx dma */
578	dbdma_reset(rd);
579	out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
580
581	return 1;
582}
583
584
585static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
586{
587	struct bmac_data *bp = netdev_priv(dev);
588	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
589	int i;
590
591	/* see if there's a free slot in the tx ring */
592	/* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
593	/* 	     bp->tx_empty, bp->tx_fill)); */
594	i = bp->tx_fill + 1;
595	if (i >= N_TX_RING)
596		i = 0;
597	if (i == bp->tx_empty) {
598		netif_stop_queue(dev);
599		bp->tx_fullup = 1;
600		XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
601		return -1;		/* can't take it at the moment */
602	}
603
604	dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
605
606	bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
607
608	bp->tx_bufs[bp->tx_fill] = skb;
609	bp->tx_fill = i;
610
611	dev->stats.tx_bytes += skb->len;
612
613	dbdma_continue(td);
614
615	return 0;
616}
617
618static int rxintcount;
619
620static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
621{
622	struct net_device *dev = (struct net_device *) dev_id;
623	struct bmac_data *bp = netdev_priv(dev);
624	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
625	volatile struct dbdma_cmd *cp;
626	int i, nb, stat;
627	struct sk_buff *skb;
628	unsigned int residual;
629	int last;
630	unsigned long flags;
631
632	spin_lock_irqsave(&bp->lock, flags);
633
634	if (++rxintcount < 10) {
635		XXDEBUG(("bmac_rxdma_intr\n"));
636	}
637
638	last = -1;
639	i = bp->rx_empty;
640
641	while (1) {
642		cp = &bp->rx_cmds[i];
643		stat = ld_le16(&cp->xfer_status);
644		residual = ld_le16(&cp->res_count);
645		if ((stat & ACTIVE) == 0)
646			break;
647		nb = RX_BUFLEN - residual - 2;
648		if (nb < (ETHERMINPACKET - ETHERCRC)) {
649			skb = NULL;
650			dev->stats.rx_length_errors++;
651			dev->stats.rx_errors++;
652		} else {
653			skb = bp->rx_bufs[i];
654			bp->rx_bufs[i] = NULL;
655		}
656		if (skb != NULL) {
657			nb -= ETHERCRC;
658			skb_put(skb, nb);
659			skb->protocol = eth_type_trans(skb, dev);
660			netif_rx(skb);
661			++dev->stats.rx_packets;
662			dev->stats.rx_bytes += nb;
663		} else {
664			++dev->stats.rx_dropped;
665		}
666		if ((skb = bp->rx_bufs[i]) == NULL) {
667			bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
668			if (skb != NULL)
669				skb_reserve(bp->rx_bufs[i], 2);
670		}
671		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
672		st_le16(&cp->res_count, 0);
673		st_le16(&cp->xfer_status, 0);
674		last = i;
675		if (++i >= N_RX_RING) i = 0;
676	}
677
678	if (last != -1) {
679		bp->rx_fill = last;
680		bp->rx_empty = i;
681	}
682
683	dbdma_continue(rd);
684	spin_unlock_irqrestore(&bp->lock, flags);
685
686	if (rxintcount < 10) {
687		XXDEBUG(("bmac_rxdma_intr done\n"));
688	}
689	return IRQ_HANDLED;
690}
691
692static int txintcount;
693
694static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
695{
696	struct net_device *dev = (struct net_device *) dev_id;
697	struct bmac_data *bp = netdev_priv(dev);
698	volatile struct dbdma_cmd *cp;
699	int stat;
700	unsigned long flags;
701
702	spin_lock_irqsave(&bp->lock, flags);
703
704	if (txintcount++ < 10) {
705		XXDEBUG(("bmac_txdma_intr\n"));
706	}
707
708	/*     del_timer(&bp->tx_timeout); */
709	/*     bp->timeout_active = 0; */
710
711	while (1) {
712		cp = &bp->tx_cmds[bp->tx_empty];
713		stat = ld_le16(&cp->xfer_status);
714		if (txintcount < 10) {
715			XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
716		}
717		if (!(stat & ACTIVE)) {
718			/*
719			 * status field might not have been filled by DBDMA
720			 */
721			if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
722				break;
723		}
724
725		if (bp->tx_bufs[bp->tx_empty]) {
726			++dev->stats.tx_packets;
727			dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
728		}
729		bp->tx_bufs[bp->tx_empty] = NULL;
730		bp->tx_fullup = 0;
731		netif_wake_queue(dev);
732		if (++bp->tx_empty >= N_TX_RING)
733			bp->tx_empty = 0;
734		if (bp->tx_empty == bp->tx_fill)
735			break;
736	}
737
738	spin_unlock_irqrestore(&bp->lock, flags);
739
740	if (txintcount < 10) {
741		XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
742	}
743
744	bmac_start(dev);
745	return IRQ_HANDLED;
746}
747
748#ifndef SUNHME_MULTICAST
749/* Real fast bit-reversal algorithm, 6-bit values */
750static int reverse6[64] = {
751	0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
752	0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
753	0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
754	0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
755	0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
756	0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
757	0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
758	0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
759};
760
761static unsigned int
762crc416(unsigned int curval, unsigned short nxtval)
763{
764	register unsigned int counter, cur = curval, next = nxtval;
765	register int high_crc_set, low_data_set;
766
767	/* Swap bytes */
768	next = ((next & 0x00FF) << 8) | (next >> 8);
769
770	/* Compute bit-by-bit */
771	for (counter = 0; counter < 16; ++counter) {
772		/* is high CRC bit set? */
773		if ((cur & 0x80000000) == 0) high_crc_set = 0;
774		else high_crc_set = 1;
775
776		cur = cur << 1;
777
778		if ((next & 0x0001) == 0) low_data_set = 0;
779		else low_data_set = 1;
780
781		next = next >> 1;
782
783		/* do the XOR */
784		if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
785	}
786	return cur;
787}
788
789static unsigned int
790bmac_crc(unsigned short *address)
791{
792	unsigned int newcrc;
793
794	XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
795	newcrc = crc416(0xffffffff, *address);	/* address bits 47 - 32 */
796	newcrc = crc416(newcrc, address[1]);	/* address bits 31 - 16 */
797	newcrc = crc416(newcrc, address[2]);	/* address bits 15 - 0  */
798
799	return(newcrc);
800}
801
802/*
803 * Add requested mcast addr to BMac's hash table filter.
804 *
805 */
806
807static void
808bmac_addhash(struct bmac_data *bp, unsigned char *addr)
809{
810	unsigned int	 crc;
811	unsigned short	 mask;
812
813	if (!(*addr)) return;
814	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
815	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
816	if (bp->hash_use_count[crc]++) return; /* This bit is already set */
817	mask = crc % 16;
818	mask = (unsigned char)1 << mask;
819	bp->hash_use_count[crc/16] |= mask;
820}
821
822static void
823bmac_removehash(struct bmac_data *bp, unsigned char *addr)
824{
825	unsigned int crc;
826	unsigned char mask;
827
828	/* Now, delete the address from the filter copy, as indicated */
829	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
830	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
831	if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
832	if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
833	mask = crc % 16;
834	mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
835	bp->hash_table_mask[crc/16] &= mask;
836}
837
838/*
839 * Sync the adapter with the software copy of the multicast mask
840 *  (logical address filter).
841 */
842
843static void
844bmac_rx_off(struct net_device *dev)
845{
846	unsigned short rx_cfg;
847
848	rx_cfg = bmread(dev, RXCFG);
849	rx_cfg &= ~RxMACEnable;
850	bmwrite(dev, RXCFG, rx_cfg);
851	do {
852		rx_cfg = bmread(dev, RXCFG);
853	}  while (rx_cfg & RxMACEnable);
854}
855
856unsigned short
857bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
858{
859	unsigned short rx_cfg;
860
861	rx_cfg = bmread(dev, RXCFG);
862	rx_cfg |= RxMACEnable;
863	if (hash_enable) rx_cfg |= RxHashFilterEnable;
864	else rx_cfg &= ~RxHashFilterEnable;
865	if (promisc_enable) rx_cfg |= RxPromiscEnable;
866	else rx_cfg &= ~RxPromiscEnable;
867	bmwrite(dev, RXRST, RxResetValue);
868	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
869	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
870	bmwrite(dev, RXCFG, rx_cfg );
871	return rx_cfg;
872}
873
874static void
875bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
876{
877	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
878	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
879	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
880	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
881}
882
883
884/* Set or clear the multicast filter for this adaptor.
885    num_addrs == -1	Promiscuous mode, receive all packets
886    num_addrs == 0	Normal mode, clear multicast list
887    num_addrs > 0	Multicast mode, receive normal and MC packets, and do
888			best-effort filtering.
889 */
890static void bmac_set_multicast(struct net_device *dev)
891{
892	struct netdev_hw_addr *ha;
893	struct bmac_data *bp = netdev_priv(dev);
894	int num_addrs = netdev_mc_count(dev);
895	unsigned short rx_cfg;
896	int i;
897
898	if (bp->sleeping)
899		return;
900
901	XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
902
903	if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
904		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
905		bmac_update_hash_table_mask(dev, bp);
906		rx_cfg = bmac_rx_on(dev, 1, 0);
907		XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
908	} else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
909		rx_cfg = bmread(dev, RXCFG);
910		rx_cfg |= RxPromiscEnable;
911		bmwrite(dev, RXCFG, rx_cfg);
912		rx_cfg = bmac_rx_on(dev, 0, 1);
913		XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
914	} else {
915		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
916		for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
917		if (num_addrs == 0) {
918			rx_cfg = bmac_rx_on(dev, 0, 0);
919			XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
920		} else {
921			netdev_for_each_mc_addr(ha, dev)
922				bmac_addhash(bp, ha->addr);
923			bmac_update_hash_table_mask(dev, bp);
924			rx_cfg = bmac_rx_on(dev, 1, 0);
925			XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
926		}
927	}
928	/* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
929}
930#else /* ifdef SUNHME_MULTICAST */
931
932/* The version of set_multicast below was lifted from sunhme.c */
933
934static void bmac_set_multicast(struct net_device *dev)
935{
936	struct netdev_hw_addr *ha;
937	char *addrs;
938	int i;
939	unsigned short rx_cfg;
940	u32 crc;
941
942	if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
943		bmwrite(dev, BHASH0, 0xffff);
944		bmwrite(dev, BHASH1, 0xffff);
945		bmwrite(dev, BHASH2, 0xffff);
946		bmwrite(dev, BHASH3, 0xffff);
947	} else if(dev->flags & IFF_PROMISC) {
948		rx_cfg = bmread(dev, RXCFG);
949		rx_cfg |= RxPromiscEnable;
950		bmwrite(dev, RXCFG, rx_cfg);
951	} else {
952		u16 hash_table[4];
953
954		rx_cfg = bmread(dev, RXCFG);
955		rx_cfg &= ~RxPromiscEnable;
956		bmwrite(dev, RXCFG, rx_cfg);
957
958		for(i = 0; i < 4; i++) hash_table[i] = 0;
959
960		netdev_for_each_mc_addr(ha, dev) {
961			addrs = ha->addr;
962
963			if(!(*addrs & 1))
964				continue;
965
966			crc = ether_crc_le(6, addrs);
967			crc >>= 26;
968			hash_table[crc >> 4] |= 1 << (crc & 0xf);
969		}
970		bmwrite(dev, BHASH0, hash_table[0]);
971		bmwrite(dev, BHASH1, hash_table[1]);
972		bmwrite(dev, BHASH2, hash_table[2]);
973		bmwrite(dev, BHASH3, hash_table[3]);
974	}
975}
976#endif /* SUNHME_MULTICAST */
977
978static int miscintcount;
979
980static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
981{
982	struct net_device *dev = (struct net_device *) dev_id;
983	unsigned int status = bmread(dev, STATUS);
984	if (miscintcount++ < 10) {
985		XXDEBUG(("bmac_misc_intr\n"));
986	}
987	/* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
988	/*     bmac_txdma_intr_inner(irq, dev_id); */
989	/*   if (status & FrameReceived) dev->stats.rx_dropped++; */
990	if (status & RxErrorMask) dev->stats.rx_errors++;
991	if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
992	if (status & RxLenCntExp) dev->stats.rx_length_errors++;
993	if (status & RxOverFlow) dev->stats.rx_over_errors++;
994	if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
995
996	/*   if (status & FrameSent) dev->stats.tx_dropped++; */
997	if (status & TxErrorMask) dev->stats.tx_errors++;
998	if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
999	if (status & TxNormalCollExp) dev->stats.collisions++;
1000	return IRQ_HANDLED;
1001}
1002
1003/*
1004 * Procedure for reading EEPROM
1005 */
1006#define SROMAddressLength	5
1007#define DataInOn		0x0008
1008#define DataInOff		0x0000
1009#define Clk			0x0002
1010#define ChipSelect		0x0001
1011#define SDIShiftCount		3
1012#define SD0ShiftCount		2
1013#define	DelayValue		1000	/* number of microseconds */
1014#define SROMStartOffset		10	/* this is in words */
1015#define SROMReadCount		3	/* number of words to read from SROM */
1016#define SROMAddressBits		6
1017#define EnetAddressOffset	20
1018
1019static unsigned char
1020bmac_clock_out_bit(struct net_device *dev)
1021{
1022	unsigned short         data;
1023	unsigned short         val;
1024
1025	bmwrite(dev, SROMCSR, ChipSelect | Clk);
1026	udelay(DelayValue);
1027
1028	data = bmread(dev, SROMCSR);
1029	udelay(DelayValue);
1030	val = (data >> SD0ShiftCount) & 1;
1031
1032	bmwrite(dev, SROMCSR, ChipSelect);
1033	udelay(DelayValue);
1034
1035	return val;
1036}
1037
1038static void
1039bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1040{
1041	unsigned short data;
1042
1043	if (val != 0 && val != 1) return;
1044
1045	data = (val << SDIShiftCount);
1046	bmwrite(dev, SROMCSR, data | ChipSelect  );
1047	udelay(DelayValue);
1048
1049	bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1050	udelay(DelayValue);
1051
1052	bmwrite(dev, SROMCSR, data | ChipSelect);
1053	udelay(DelayValue);
1054}
1055
1056static void
1057reset_and_select_srom(struct net_device *dev)
1058{
1059	/* first reset */
1060	bmwrite(dev, SROMCSR, 0);
1061	udelay(DelayValue);
1062
1063	/* send it the read command (110) */
1064	bmac_clock_in_bit(dev, 1);
1065	bmac_clock_in_bit(dev, 1);
1066	bmac_clock_in_bit(dev, 0);
1067}
1068
1069static unsigned short
1070read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1071{
1072	unsigned short data, val;
1073	int i;
1074
1075	/* send out the address we want to read from */
1076	for (i = 0; i < addr_len; i++)	{
1077		val = addr >> (addr_len-i-1);
1078		bmac_clock_in_bit(dev, val & 1);
1079	}
1080
1081	/* Now read in the 16-bit data */
1082	data = 0;
1083	for (i = 0; i < 16; i++)	{
1084		val = bmac_clock_out_bit(dev);
1085		data <<= 1;
1086		data |= val;
1087	}
1088	bmwrite(dev, SROMCSR, 0);
1089
1090	return data;
1091}
1092
1093/*
1094 * It looks like Cogent and SMC use different methods for calculating
1095 * checksums. What a pain..
1096 */
1097
1098static int
1099bmac_verify_checksum(struct net_device *dev)
1100{
1101	unsigned short data, storedCS;
1102
1103	reset_and_select_srom(dev);
1104	data = read_srom(dev, 3, SROMAddressBits);
1105	storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1106
1107	return 0;
1108}
1109
1110
1111static void
1112bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1113{
1114	int i;
1115	unsigned short data;
1116
1117	for (i = 0; i < 6; i++)
1118		{
1119			reset_and_select_srom(dev);
1120			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1121			ea[2*i]   = bitrev8(data & 0x0ff);
1122			ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1123		}
1124}
1125
1126static void bmac_reset_and_enable(struct net_device *dev)
1127{
1128	struct bmac_data *bp = netdev_priv(dev);
1129	unsigned long flags;
1130	struct sk_buff *skb;
1131	unsigned char *data;
1132
1133	spin_lock_irqsave(&bp->lock, flags);
1134	bmac_enable_and_reset_chip(dev);
1135	bmac_init_tx_ring(bp);
1136	bmac_init_rx_ring(bp);
1137	bmac_init_chip(dev);
1138	bmac_start_chip(dev);
1139	bmwrite(dev, INTDISABLE, EnableNormal);
1140	bp->sleeping = 0;
1141
1142	/*
1143	 * It seems that the bmac can't receive until it's transmitted
1144	 * a packet.  So we give it a dummy packet to transmit.
1145	 */
1146	skb = dev_alloc_skb(ETHERMINPACKET);
1147	if (skb != NULL) {
1148		data = skb_put(skb, ETHERMINPACKET);
1149		memset(data, 0, ETHERMINPACKET);
1150		memcpy(data, dev->dev_addr, 6);
1151		memcpy(data+6, dev->dev_addr, 6);
1152		bmac_transmit_packet(skb, dev);
1153	}
1154	spin_unlock_irqrestore(&bp->lock, flags);
1155}
1156static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1157{
1158	struct bmac_data *bp = netdev_priv(dev);
1159	strcpy(info->driver, "bmac");
1160	strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
1161}
1162
1163static const struct ethtool_ops bmac_ethtool_ops = {
1164	.get_drvinfo		= bmac_get_drvinfo,
1165	.get_link		= ethtool_op_get_link,
1166};
1167
1168static const struct net_device_ops bmac_netdev_ops = {
1169	.ndo_open		= bmac_open,
1170	.ndo_stop		= bmac_close,
1171	.ndo_start_xmit		= bmac_output,
1172	.ndo_set_multicast_list	= bmac_set_multicast,
1173	.ndo_set_mac_address	= bmac_set_address,
1174	.ndo_change_mtu		= eth_change_mtu,
1175	.ndo_validate_addr	= eth_validate_addr,
1176};
1177
1178static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1179{
1180	int j, rev, ret;
1181	struct bmac_data *bp;
1182	const unsigned char *prop_addr;
1183	unsigned char addr[6];
1184	struct net_device *dev;
1185	int is_bmac_plus = ((int)match->data) != 0;
1186
1187	if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1188		printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1189		return -ENODEV;
1190	}
1191	prop_addr = of_get_property(macio_get_of_node(mdev),
1192			"mac-address", NULL);
1193	if (prop_addr == NULL) {
1194		prop_addr = of_get_property(macio_get_of_node(mdev),
1195				"local-mac-address", NULL);
1196		if (prop_addr == NULL) {
1197			printk(KERN_ERR "BMAC: Can't get mac-address\n");
1198			return -ENODEV;
1199		}
1200	}
1201	memcpy(addr, prop_addr, sizeof(addr));
1202
1203	dev = alloc_etherdev(PRIV_BYTES);
1204	if (!dev) {
1205		printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1206		return -ENOMEM;
1207	}
1208
1209	bp = netdev_priv(dev);
1210	SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1211	macio_set_drvdata(mdev, dev);
1212
1213	bp->mdev = mdev;
1214	spin_lock_init(&bp->lock);
1215
1216	if (macio_request_resources(mdev, "bmac")) {
1217		printk(KERN_ERR "BMAC: can't request IO resource !\n");
1218		goto out_free;
1219	}
1220
1221	dev->base_addr = (unsigned long)
1222		ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1223	if (dev->base_addr == 0)
1224		goto out_release;
1225
1226	dev->irq = macio_irq(mdev, 0);
1227
1228	bmac_enable_and_reset_chip(dev);
1229	bmwrite(dev, INTDISABLE, DisableAll);
1230
1231	rev = addr[0] == 0 && addr[1] == 0xA0;
1232	for (j = 0; j < 6; ++j)
1233		dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1234
1235	/* Enable chip without interrupts for now */
1236	bmac_enable_and_reset_chip(dev);
1237	bmwrite(dev, INTDISABLE, DisableAll);
1238
1239	dev->netdev_ops = &bmac_netdev_ops;
1240	dev->ethtool_ops = &bmac_ethtool_ops;
1241
1242	bmac_get_station_address(dev, addr);
1243	if (bmac_verify_checksum(dev) != 0)
1244		goto err_out_iounmap;
1245
1246	bp->is_bmac_plus = is_bmac_plus;
1247	bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1248	if (!bp->tx_dma)
1249		goto err_out_iounmap;
1250	bp->tx_dma_intr = macio_irq(mdev, 1);
1251	bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1252	if (!bp->rx_dma)
1253		goto err_out_iounmap_tx;
1254	bp->rx_dma_intr = macio_irq(mdev, 2);
1255
1256	bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1257	bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1258
1259	bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1260	skb_queue_head_init(bp->queue);
1261
1262	init_timer(&bp->tx_timeout);
1263
1264	ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1265	if (ret) {
1266		printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1267		goto err_out_iounmap_rx;
1268	}
1269	ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1270	if (ret) {
1271		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1272		goto err_out_irq0;
1273	}
1274	ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1275	if (ret) {
1276		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1277		goto err_out_irq1;
1278	}
1279
1280	/* Mask chip interrupts and disable chip, will be
1281	 * re-enabled on open()
1282	 */
1283	disable_irq(dev->irq);
1284	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1285
1286	if (register_netdev(dev) != 0) {
1287		printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1288		goto err_out_irq2;
1289	}
1290
1291	printk(KERN_INFO "%s: BMAC%s at %pM",
1292	       dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1293	XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1294	printk("\n");
1295
1296	return 0;
1297
1298err_out_irq2:
1299	free_irq(bp->rx_dma_intr, dev);
1300err_out_irq1:
1301	free_irq(bp->tx_dma_intr, dev);
1302err_out_irq0:
1303	free_irq(dev->irq, dev);
1304err_out_iounmap_rx:
1305	iounmap(bp->rx_dma);
1306err_out_iounmap_tx:
1307	iounmap(bp->tx_dma);
1308err_out_iounmap:
1309	iounmap((void __iomem *)dev->base_addr);
1310out_release:
1311	macio_release_resources(mdev);
1312out_free:
1313	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1314	free_netdev(dev);
1315
1316	return -ENODEV;
1317}
1318
1319static int bmac_open(struct net_device *dev)
1320{
1321	struct bmac_data *bp = netdev_priv(dev);
1322	/* XXDEBUG(("bmac: enter open\n")); */
1323	/* reset the chip */
1324	bp->opened = 1;
1325	bmac_reset_and_enable(dev);
1326	enable_irq(dev->irq);
1327	return 0;
1328}
1329
1330static int bmac_close(struct net_device *dev)
1331{
1332	struct bmac_data *bp = netdev_priv(dev);
1333	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1334	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1335	unsigned short config;
1336	int i;
1337
1338	bp->sleeping = 1;
1339
1340	/* disable rx and tx */
1341	config = bmread(dev, RXCFG);
1342	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1343
1344	config = bmread(dev, TXCFG);
1345	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1346
1347	bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1348
1349	/* disable rx and tx dma */
1350	st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
1351	st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
1352
1353	/* free some skb's */
1354	XXDEBUG(("bmac: free rx bufs\n"));
1355	for (i=0; i<N_RX_RING; i++) {
1356		if (bp->rx_bufs[i] != NULL) {
1357			dev_kfree_skb(bp->rx_bufs[i]);
1358			bp->rx_bufs[i] = NULL;
1359		}
1360	}
1361	XXDEBUG(("bmac: free tx bufs\n"));
1362	for (i = 0; i<N_TX_RING; i++) {
1363		if (bp->tx_bufs[i] != NULL) {
1364			dev_kfree_skb(bp->tx_bufs[i]);
1365			bp->tx_bufs[i] = NULL;
1366		}
1367	}
1368	XXDEBUG(("bmac: all bufs freed\n"));
1369
1370	bp->opened = 0;
1371	disable_irq(dev->irq);
1372	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1373
1374	return 0;
1375}
1376
1377static void
1378bmac_start(struct net_device *dev)
1379{
1380	struct bmac_data *bp = netdev_priv(dev);
1381	int i;
1382	struct sk_buff *skb;
1383	unsigned long flags;
1384
1385	if (bp->sleeping)
1386		return;
1387
1388	spin_lock_irqsave(&bp->lock, flags);
1389	while (1) {
1390		i = bp->tx_fill + 1;
1391		if (i >= N_TX_RING)
1392			i = 0;
1393		if (i == bp->tx_empty)
1394			break;
1395		skb = skb_dequeue(bp->queue);
1396		if (skb == NULL)
1397			break;
1398		bmac_transmit_packet(skb, dev);
1399	}
1400	spin_unlock_irqrestore(&bp->lock, flags);
1401}
1402
1403static int
1404bmac_output(struct sk_buff *skb, struct net_device *dev)
1405{
1406	struct bmac_data *bp = netdev_priv(dev);
1407	skb_queue_tail(bp->queue, skb);
1408	bmac_start(dev);
1409	return NETDEV_TX_OK;
1410}
1411
1412static void bmac_tx_timeout(unsigned long data)
1413{
1414	struct net_device *dev = (struct net_device *) data;
1415	struct bmac_data *bp = netdev_priv(dev);
1416	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1417	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1418	volatile struct dbdma_cmd *cp;
1419	unsigned long flags;
1420	unsigned short config, oldConfig;
1421	int i;
1422
1423	XXDEBUG(("bmac: tx_timeout called\n"));
1424	spin_lock_irqsave(&bp->lock, flags);
1425	bp->timeout_active = 0;
1426
1427	/* update various counters */
1428/*     	bmac_handle_misc_intrs(bp, 0); */
1429
1430	cp = &bp->tx_cmds[bp->tx_empty];
1431/*	XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1432/* 	   ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1433/* 	   mb->pr, mb->xmtfs, mb->fifofc)); */
1434
1435	/* turn off both tx and rx and reset the chip */
1436	config = bmread(dev, RXCFG);
1437	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1438	config = bmread(dev, TXCFG);
1439	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1440	out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1441	printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1442	bmac_enable_and_reset_chip(dev);
1443
1444	/* restart rx dma */
1445	cp = bus_to_virt(ld_le32(&rd->cmdptr));
1446	out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1447	out_le16(&cp->xfer_status, 0);
1448	out_le32(&rd->cmdptr, virt_to_bus(cp));
1449	out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1450
1451	/* fix up the transmit side */
1452	XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1453		 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1454	i = bp->tx_empty;
1455	++dev->stats.tx_errors;
1456	if (i != bp->tx_fill) {
1457		dev_kfree_skb(bp->tx_bufs[i]);
1458		bp->tx_bufs[i] = NULL;
1459		if (++i >= N_TX_RING) i = 0;
1460		bp->tx_empty = i;
1461	}
1462	bp->tx_fullup = 0;
1463	netif_wake_queue(dev);
1464	if (i != bp->tx_fill) {
1465		cp = &bp->tx_cmds[i];
1466		out_le16(&cp->xfer_status, 0);
1467		out_le16(&cp->command, OUTPUT_LAST);
1468		out_le32(&td->cmdptr, virt_to_bus(cp));
1469		out_le32(&td->control, DBDMA_SET(RUN));
1470		/* 	bmac_set_timeout(dev); */
1471		XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1472	}
1473
1474	/* turn it back on */
1475	oldConfig = bmread(dev, RXCFG);
1476	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1477	oldConfig = bmread(dev, TXCFG);
1478	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1479
1480	spin_unlock_irqrestore(&bp->lock, flags);
1481}
1482
1483
1484
1485static int __devexit bmac_remove(struct macio_dev *mdev)
1486{
1487	struct net_device *dev = macio_get_drvdata(mdev);
1488	struct bmac_data *bp = netdev_priv(dev);
1489
1490	unregister_netdev(dev);
1491
1492       	free_irq(dev->irq, dev);
1493	free_irq(bp->tx_dma_intr, dev);
1494	free_irq(bp->rx_dma_intr, dev);
1495
1496	iounmap((void __iomem *)dev->base_addr);
1497	iounmap(bp->tx_dma);
1498	iounmap(bp->rx_dma);
1499
1500	macio_release_resources(mdev);
1501
1502	free_netdev(dev);
1503
1504	return 0;
1505}
1506
1507static struct of_device_id bmac_match[] =
1508{
1509	{
1510	.name 		= "bmac",
1511	.data		= (void *)0,
1512	},
1513	{
1514	.type		= "network",
1515	.compatible	= "bmac+",
1516	.data		= (void *)1,
1517	},
1518	{},
1519};
1520MODULE_DEVICE_TABLE (of, bmac_match);
1521
1522static struct macio_driver bmac_driver =
1523{
1524	.driver = {
1525		.name 		= "bmac",
1526		.owner		= THIS_MODULE,
1527		.of_match_table	= bmac_match,
1528	},
1529	.probe		= bmac_probe,
1530	.remove		= bmac_remove,
1531#ifdef CONFIG_PM
1532	.suspend	= bmac_suspend,
1533	.resume		= bmac_resume,
1534#endif
1535};
1536
1537
1538static int __init bmac_init(void)
1539{
1540	if (bmac_emergency_rxbuf == NULL) {
1541		bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1542		if (bmac_emergency_rxbuf == NULL) {
1543			printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1544			return -ENOMEM;
1545		}
1546	}
1547
1548	return macio_register_driver(&bmac_driver);
1549}
1550
1551static void __exit bmac_exit(void)
1552{
1553	macio_unregister_driver(&bmac_driver);
1554
1555	kfree(bmac_emergency_rxbuf);
1556	bmac_emergency_rxbuf = NULL;
1557}
1558
1559MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1560MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1561MODULE_LICENSE("GPL");
1562
1563module_init(bmac_init);
1564module_exit(bmac_exit);
1565