1/*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs.  Assumes it's under a DBDMA controller.
4 *
5 * Copyright (C) 1998 Randy Gobbel.
6 *
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/timer.h>
17#include <linux/proc_fs.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/crc32.h>
21#include <linux/bitrev.h>
22#include <asm/prom.h>
23#include <asm/dbdma.h>
24#include <asm/io.h>
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/machdep.h>
28#include <asm/pmac_feature.h>
29#include <asm/macio.h>
30#include <asm/irq.h>
31
32#include "bmac.h"
33
34#define trunc_page(x)	((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
35#define round_page(x)	trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
36
37/*
38 * CRC polynomial - used in working out multicast filter bits.
39 */
40#define ENET_CRCPOLY 0x04c11db7
41
42/* switch to use multicast code lifted from sunhme driver */
43#define SUNHME_MULTICAST
44
45#define N_RX_RING	64
46#define N_TX_RING	32
47#define MAX_TX_ACTIVE	1
48#define ETHERCRC	4
49#define ETHERMINPACKET	64
50#define ETHERMTU	1500
51#define RX_BUFLEN	(ETHERMTU + 14 + ETHERCRC + 2)
52#define TX_TIMEOUT	HZ	/* 1 second */
53
54/* Bits in transmit DMA status */
55#define TX_DMA_ERR	0x80
56
57#define XXDEBUG(args)
58
59struct bmac_data {
60	/* volatile struct bmac *bmac; */
61	struct sk_buff_head *queue;
62	volatile struct dbdma_regs __iomem *tx_dma;
63	int tx_dma_intr;
64	volatile struct dbdma_regs __iomem *rx_dma;
65	int rx_dma_intr;
66	volatile struct dbdma_cmd *tx_cmds;	/* xmit dma command list */
67	volatile struct dbdma_cmd *rx_cmds;	/* recv dma command list */
68	struct macio_dev *mdev;
69	int is_bmac_plus;
70	struct sk_buff *rx_bufs[N_RX_RING];
71	int rx_fill;
72	int rx_empty;
73	struct sk_buff *tx_bufs[N_TX_RING];
74	int tx_fill;
75	int tx_empty;
76	unsigned char tx_fullup;
77	struct net_device_stats stats;
78	struct timer_list tx_timeout;
79	int timeout_active;
80	int sleeping;
81	int opened;
82	unsigned short hash_use_count[64];
83	unsigned short hash_table_mask[4];
84	spinlock_t lock;
85};
86
87
88static unsigned char *bmac_emergency_rxbuf;
89
90/*
91 * Number of bytes of private data per BMAC: allow enough for
92 * the rx and tx dma commands plus a branch dma command each,
93 * and another 16 bytes to allow us to align the dma command
94 * buffers on a 16 byte boundary.
95 */
96#define PRIV_BYTES	(sizeof(struct bmac_data) \
97	+ (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
98	+ sizeof(struct sk_buff_head))
99
100static int bmac_open(struct net_device *dev);
101static int bmac_close(struct net_device *dev);
102static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
103static struct net_device_stats *bmac_stats(struct net_device *dev);
104static void bmac_set_multicast(struct net_device *dev);
105static void bmac_reset_and_enable(struct net_device *dev);
106static void bmac_start_chip(struct net_device *dev);
107static void bmac_init_chip(struct net_device *dev);
108static void bmac_init_registers(struct net_device *dev);
109static void bmac_enable_and_reset_chip(struct net_device *dev);
110static int bmac_set_address(struct net_device *dev, void *addr);
111static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
112static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
113static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
114static void bmac_set_timeout(struct net_device *dev);
115static void bmac_tx_timeout(unsigned long data);
116static int bmac_output(struct sk_buff *skb, struct net_device *dev);
117static void bmac_start(struct net_device *dev);
118
119#define	DBDMA_SET(x)	( ((x) | (x) << 16) )
120#define	DBDMA_CLEAR(x)	( (x) << 16)
121
122static inline void
123dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
124{
125	__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
126	return;
127}
128
129static inline unsigned long
130dbdma_ld32(volatile __u32 __iomem *a)
131{
132	__u32 swap;
133	__asm__ volatile ("lwbrx %0,0,%1" :  "=r" (swap) : "r" (a));
134	return swap;
135}
136
137static void
138dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
139{
140	dbdma_st32(&dmap->control,
141		   DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
142	eieio();
143}
144
145static void
146dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
147{
148	dbdma_st32(&dmap->control,
149		   DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
150	eieio();
151	while (dbdma_ld32(&dmap->status) & RUN)
152		eieio();
153}
154
155static void
156dbdma_setcmd(volatile struct dbdma_cmd *cp,
157	     unsigned short cmd, unsigned count, unsigned long addr,
158	     unsigned long cmd_dep)
159{
160	out_le16(&cp->command, cmd);
161	out_le16(&cp->req_count, count);
162	out_le32(&cp->phy_addr, addr);
163	out_le32(&cp->cmd_dep, cmd_dep);
164	out_le16(&cp->xfer_status, 0);
165	out_le16(&cp->res_count, 0);
166}
167
168static inline
169void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
170{
171	out_le16((void __iomem *)dev->base_addr + reg_offset, data);
172}
173
174
175static inline
176unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
177{
178	return in_le16((void __iomem *)dev->base_addr + reg_offset);
179}
180
181static void
182bmac_enable_and_reset_chip(struct net_device *dev)
183{
184	struct bmac_data *bp = netdev_priv(dev);
185	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
186	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
187
188	if (rd)
189		dbdma_reset(rd);
190	if (td)
191		dbdma_reset(td);
192
193	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
194}
195
196#define MIFDELAY	udelay(10)
197
198static unsigned int
199bmac_mif_readbits(struct net_device *dev, int nb)
200{
201	unsigned int val = 0;
202
203	while (--nb >= 0) {
204		bmwrite(dev, MIFCSR, 0);
205		MIFDELAY;
206		if (bmread(dev, MIFCSR) & 8)
207			val |= 1 << nb;
208		bmwrite(dev, MIFCSR, 1);
209		MIFDELAY;
210	}
211	bmwrite(dev, MIFCSR, 0);
212	MIFDELAY;
213	bmwrite(dev, MIFCSR, 1);
214	MIFDELAY;
215	return val;
216}
217
218static void
219bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
220{
221	int b;
222
223	while (--nb >= 0) {
224		b = (val & (1 << nb))? 6: 4;
225		bmwrite(dev, MIFCSR, b);
226		MIFDELAY;
227		bmwrite(dev, MIFCSR, b|1);
228		MIFDELAY;
229	}
230}
231
232static unsigned int
233bmac_mif_read(struct net_device *dev, unsigned int addr)
234{
235	unsigned int val;
236
237	bmwrite(dev, MIFCSR, 4);
238	MIFDELAY;
239	bmac_mif_writebits(dev, ~0U, 32);
240	bmac_mif_writebits(dev, 6, 4);
241	bmac_mif_writebits(dev, addr, 10);
242	bmwrite(dev, MIFCSR, 2);
243	MIFDELAY;
244	bmwrite(dev, MIFCSR, 1);
245	MIFDELAY;
246	val = bmac_mif_readbits(dev, 17);
247	bmwrite(dev, MIFCSR, 4);
248	MIFDELAY;
249	return val;
250}
251
252static void
253bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
254{
255	bmwrite(dev, MIFCSR, 4);
256	MIFDELAY;
257	bmac_mif_writebits(dev, ~0U, 32);
258	bmac_mif_writebits(dev, 5, 4);
259	bmac_mif_writebits(dev, addr, 10);
260	bmac_mif_writebits(dev, 2, 2);
261	bmac_mif_writebits(dev, val, 16);
262	bmac_mif_writebits(dev, 3, 2);
263}
264
265static void
266bmac_init_registers(struct net_device *dev)
267{
268	struct bmac_data *bp = netdev_priv(dev);
269	volatile unsigned short regValue;
270	unsigned short *pWord16;
271	int i;
272
273	/* XXDEBUG(("bmac: enter init_registers\n")); */
274
275	bmwrite(dev, RXRST, RxResetValue);
276	bmwrite(dev, TXRST, TxResetBit);
277
278	i = 100;
279	do {
280		--i;
281		udelay(10000);
282		regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
283	} while ((regValue & TxResetBit) && i > 0);
284
285	if (!bp->is_bmac_plus) {
286		regValue = bmread(dev, XCVRIF);
287		regValue |= ClkBit | SerialMode | COLActiveLow;
288		bmwrite(dev, XCVRIF, regValue);
289		udelay(10000);
290	}
291
292	bmwrite(dev, RSEED, (unsigned short)0x1968);
293
294	regValue = bmread(dev, XIFC);
295	regValue |= TxOutputEnable;
296	bmwrite(dev, XIFC, regValue);
297
298	bmread(dev, PAREG);
299
300	/* set collision counters to 0 */
301	bmwrite(dev, NCCNT, 0);
302	bmwrite(dev, NTCNT, 0);
303	bmwrite(dev, EXCNT, 0);
304	bmwrite(dev, LTCNT, 0);
305
306	/* set rx counters to 0 */
307	bmwrite(dev, FRCNT, 0);
308	bmwrite(dev, LECNT, 0);
309	bmwrite(dev, AECNT, 0);
310	bmwrite(dev, FECNT, 0);
311	bmwrite(dev, RXCV, 0);
312
313	/* set tx fifo information */
314	bmwrite(dev, TXTH, 4);	/* 4 octets before tx starts */
315
316	bmwrite(dev, TXFIFOCSR, 0);	/* first disable txFIFO */
317	bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
318
319	/* set rx fifo information */
320	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
321	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
322
323	//bmwrite(dev, TXCFG, TxMACEnable);	       	/* TxNeverGiveUp maybe later */
324	bmread(dev, STATUS);		/* read it just to clear it */
325
326	/* zero out the chip Hash Filter registers */
327	for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
328	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); 	/* bits 15 - 0 */
329	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); 	/* bits 31 - 16 */
330	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); 	/* bits 47 - 32 */
331	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); 	/* bits 63 - 48 */
332
333	pWord16 = (unsigned short *)dev->dev_addr;
334	bmwrite(dev, MADD0, *pWord16++);
335	bmwrite(dev, MADD1, *pWord16++);
336	bmwrite(dev, MADD2, *pWord16);
337
338	bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
339
340	bmwrite(dev, INTDISABLE, EnableNormal);
341
342	return;
343}
344
345
346
347static void
348bmac_start_chip(struct net_device *dev)
349{
350	struct bmac_data *bp = netdev_priv(dev);
351	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
352	unsigned short	oldConfig;
353
354	/* enable rx dma channel */
355	dbdma_continue(rd);
356
357	oldConfig = bmread(dev, TXCFG);
358	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
359
360	/* turn on rx plus any other bits already on (promiscuous possibly) */
361	oldConfig = bmread(dev, RXCFG);
362	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
363	udelay(20000);
364}
365
366static void
367bmac_init_phy(struct net_device *dev)
368{
369	unsigned int addr;
370	struct bmac_data *bp = netdev_priv(dev);
371
372	printk(KERN_DEBUG "phy registers:");
373	for (addr = 0; addr < 32; ++addr) {
374		if ((addr & 7) == 0)
375			printk("\n" KERN_DEBUG);
376		printk(" %.4x", bmac_mif_read(dev, addr));
377	}
378	printk("\n");
379	if (bp->is_bmac_plus) {
380		unsigned int capable, ctrl;
381
382		ctrl = bmac_mif_read(dev, 0);
383		capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
384		if (bmac_mif_read(dev, 4) != capable
385		    || (ctrl & 0x1000) == 0) {
386			bmac_mif_write(dev, 4, capable);
387			bmac_mif_write(dev, 0, 0x1200);
388		} else
389			bmac_mif_write(dev, 0, 0x1000);
390	}
391}
392
393static void bmac_init_chip(struct net_device *dev)
394{
395	bmac_init_phy(dev);
396	bmac_init_registers(dev);
397}
398
399#ifdef CONFIG_PM
400static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
401{
402	struct net_device* dev = macio_get_drvdata(mdev);
403	struct bmac_data *bp = netdev_priv(dev);
404	unsigned long flags;
405	unsigned short config;
406	int i;
407
408	netif_device_detach(dev);
409	/* prolly should wait for dma to finish & turn off the chip */
410	spin_lock_irqsave(&bp->lock, flags);
411	if (bp->timeout_active) {
412		del_timer(&bp->tx_timeout);
413		bp->timeout_active = 0;
414	}
415	disable_irq(dev->irq);
416	disable_irq(bp->tx_dma_intr);
417	disable_irq(bp->rx_dma_intr);
418	bp->sleeping = 1;
419	spin_unlock_irqrestore(&bp->lock, flags);
420	if (bp->opened) {
421		volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
422		volatile struct dbdma_regs __iomem *td = bp->tx_dma;
423
424		config = bmread(dev, RXCFG);
425		bmwrite(dev, RXCFG, (config & ~RxMACEnable));
426		config = bmread(dev, TXCFG);
427       		bmwrite(dev, TXCFG, (config & ~TxMACEnable));
428		bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
429       		/* disable rx and tx dma */
430       		st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
431       		st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
432       		/* free some skb's */
433       		for (i=0; i<N_RX_RING; i++) {
434       			if (bp->rx_bufs[i] != NULL) {
435       				dev_kfree_skb(bp->rx_bufs[i]);
436       				bp->rx_bufs[i] = NULL;
437       			}
438       		}
439       		for (i = 0; i<N_TX_RING; i++) {
440			if (bp->tx_bufs[i] != NULL) {
441		       		dev_kfree_skb(bp->tx_bufs[i]);
442	       			bp->tx_bufs[i] = NULL;
443		       	}
444		}
445	}
446       	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
447	return 0;
448}
449
450static int bmac_resume(struct macio_dev *mdev)
451{
452	struct net_device* dev = macio_get_drvdata(mdev);
453	struct bmac_data *bp = netdev_priv(dev);
454
455	/* see if this is enough */
456	if (bp->opened)
457		bmac_reset_and_enable(dev);
458
459	enable_irq(dev->irq);
460       	enable_irq(bp->tx_dma_intr);
461       	enable_irq(bp->rx_dma_intr);
462       	netif_device_attach(dev);
463
464	return 0;
465}
466#endif /* CONFIG_PM */
467
468static int bmac_set_address(struct net_device *dev, void *addr)
469{
470	struct bmac_data *bp = netdev_priv(dev);
471	unsigned char *p = addr;
472	unsigned short *pWord16;
473	unsigned long flags;
474	int i;
475
476	XXDEBUG(("bmac: enter set_address\n"));
477	spin_lock_irqsave(&bp->lock, flags);
478
479	for (i = 0; i < 6; ++i) {
480		dev->dev_addr[i] = p[i];
481	}
482	/* load up the hardware address */
483	pWord16  = (unsigned short *)dev->dev_addr;
484	bmwrite(dev, MADD0, *pWord16++);
485	bmwrite(dev, MADD1, *pWord16++);
486	bmwrite(dev, MADD2, *pWord16);
487
488	spin_unlock_irqrestore(&bp->lock, flags);
489	XXDEBUG(("bmac: exit set_address\n"));
490	return 0;
491}
492
493static inline void bmac_set_timeout(struct net_device *dev)
494{
495	struct bmac_data *bp = netdev_priv(dev);
496	unsigned long flags;
497
498	spin_lock_irqsave(&bp->lock, flags);
499	if (bp->timeout_active)
500		del_timer(&bp->tx_timeout);
501	bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
502	bp->tx_timeout.function = bmac_tx_timeout;
503	bp->tx_timeout.data = (unsigned long) dev;
504	add_timer(&bp->tx_timeout);
505	bp->timeout_active = 1;
506	spin_unlock_irqrestore(&bp->lock, flags);
507}
508
509static void
510bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
511{
512	void *vaddr;
513	unsigned long baddr;
514	unsigned long len;
515
516	len = skb->len;
517	vaddr = skb->data;
518	baddr = virt_to_bus(vaddr);
519
520	dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
521}
522
523static void
524bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
525{
526	unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
527
528	dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
529		     virt_to_bus(addr), 0);
530}
531
532static void
533bmac_init_tx_ring(struct bmac_data *bp)
534{
535	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
536
537	memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
538
539	bp->tx_empty = 0;
540	bp->tx_fill = 0;
541	bp->tx_fullup = 0;
542
543	/* put a branch at the end of the tx command list */
544	dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
545		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
546
547	/* reset tx dma */
548	dbdma_reset(td);
549	out_le32(&td->wait_sel, 0x00200020);
550	out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
551}
552
553static int
554bmac_init_rx_ring(struct bmac_data *bp)
555{
556	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
557	int i;
558	struct sk_buff *skb;
559
560	/* initialize list of sk_buffs for receiving and set up recv dma */
561	memset((char *)bp->rx_cmds, 0,
562	       (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
563	for (i = 0; i < N_RX_RING; i++) {
564		if ((skb = bp->rx_bufs[i]) == NULL) {
565			bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
566			if (skb != NULL)
567				skb_reserve(skb, 2);
568		}
569		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
570	}
571
572	bp->rx_empty = 0;
573	bp->rx_fill = i;
574
575	/* Put a branch back to the beginning of the receive command list */
576	dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
577		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
578
579	/* start rx dma */
580	dbdma_reset(rd);
581	out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
582
583	return 1;
584}
585
586
587static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
588{
589	struct bmac_data *bp = netdev_priv(dev);
590	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
591	int i;
592
593	/* see if there's a free slot in the tx ring */
594	/* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
595	/* 	     bp->tx_empty, bp->tx_fill)); */
596	i = bp->tx_fill + 1;
597	if (i >= N_TX_RING)
598		i = 0;
599	if (i == bp->tx_empty) {
600		netif_stop_queue(dev);
601		bp->tx_fullup = 1;
602		XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
603		return -1;		/* can't take it at the moment */
604	}
605
606	dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
607
608	bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
609
610	bp->tx_bufs[bp->tx_fill] = skb;
611	bp->tx_fill = i;
612
613	bp->stats.tx_bytes += skb->len;
614
615	dbdma_continue(td);
616
617	return 0;
618}
619
620static int rxintcount;
621
622static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
623{
624	struct net_device *dev = (struct net_device *) dev_id;
625	struct bmac_data *bp = netdev_priv(dev);
626	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
627	volatile struct dbdma_cmd *cp;
628	int i, nb, stat;
629	struct sk_buff *skb;
630	unsigned int residual;
631	int last;
632	unsigned long flags;
633
634	spin_lock_irqsave(&bp->lock, flags);
635
636	if (++rxintcount < 10) {
637		XXDEBUG(("bmac_rxdma_intr\n"));
638	}
639
640	last = -1;
641	i = bp->rx_empty;
642
643	while (1) {
644		cp = &bp->rx_cmds[i];
645		stat = ld_le16(&cp->xfer_status);
646		residual = ld_le16(&cp->res_count);
647		if ((stat & ACTIVE) == 0)
648			break;
649		nb = RX_BUFLEN - residual - 2;
650		if (nb < (ETHERMINPACKET - ETHERCRC)) {
651			skb = NULL;
652			bp->stats.rx_length_errors++;
653			bp->stats.rx_errors++;
654		} else {
655			skb = bp->rx_bufs[i];
656			bp->rx_bufs[i] = NULL;
657		}
658		if (skb != NULL) {
659			nb -= ETHERCRC;
660			skb_put(skb, nb);
661			skb->protocol = eth_type_trans(skb, dev);
662			netif_rx(skb);
663			dev->last_rx = jiffies;
664			++bp->stats.rx_packets;
665			bp->stats.rx_bytes += nb;
666		} else {
667			++bp->stats.rx_dropped;
668		}
669		dev->last_rx = jiffies;
670		if ((skb = bp->rx_bufs[i]) == NULL) {
671			bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
672			if (skb != NULL)
673				skb_reserve(bp->rx_bufs[i], 2);
674		}
675		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
676		st_le16(&cp->res_count, 0);
677		st_le16(&cp->xfer_status, 0);
678		last = i;
679		if (++i >= N_RX_RING) i = 0;
680	}
681
682	if (last != -1) {
683		bp->rx_fill = last;
684		bp->rx_empty = i;
685	}
686
687	dbdma_continue(rd);
688	spin_unlock_irqrestore(&bp->lock, flags);
689
690	if (rxintcount < 10) {
691		XXDEBUG(("bmac_rxdma_intr done\n"));
692	}
693	return IRQ_HANDLED;
694}
695
696static int txintcount;
697
698static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
699{
700	struct net_device *dev = (struct net_device *) dev_id;
701	struct bmac_data *bp = netdev_priv(dev);
702	volatile struct dbdma_cmd *cp;
703	int stat;
704	unsigned long flags;
705
706	spin_lock_irqsave(&bp->lock, flags);
707
708	if (txintcount++ < 10) {
709		XXDEBUG(("bmac_txdma_intr\n"));
710	}
711
712	/*     del_timer(&bp->tx_timeout); */
713	/*     bp->timeout_active = 0; */
714
715	while (1) {
716		cp = &bp->tx_cmds[bp->tx_empty];
717		stat = ld_le16(&cp->xfer_status);
718		if (txintcount < 10) {
719			XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
720		}
721		if (!(stat & ACTIVE)) {
722			/*
723			 * status field might not have been filled by DBDMA
724			 */
725			if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
726				break;
727		}
728
729		if (bp->tx_bufs[bp->tx_empty]) {
730			++bp->stats.tx_packets;
731			dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
732		}
733		bp->tx_bufs[bp->tx_empty] = NULL;
734		bp->tx_fullup = 0;
735		netif_wake_queue(dev);
736		if (++bp->tx_empty >= N_TX_RING)
737			bp->tx_empty = 0;
738		if (bp->tx_empty == bp->tx_fill)
739			break;
740	}
741
742	spin_unlock_irqrestore(&bp->lock, flags);
743
744	if (txintcount < 10) {
745		XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
746	}
747
748	bmac_start(dev);
749	return IRQ_HANDLED;
750}
751
752static struct net_device_stats *bmac_stats(struct net_device *dev)
753{
754	struct bmac_data *p = netdev_priv(dev);
755
756	return &p->stats;
757}
758
759#ifndef SUNHME_MULTICAST
760/* Real fast bit-reversal algorithm, 6-bit values */
761static int reverse6[64] = {
762	0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
763	0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
764	0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
765	0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
766	0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
767	0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
768	0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
769	0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
770};
771
772static unsigned int
773crc416(unsigned int curval, unsigned short nxtval)
774{
775	register unsigned int counter, cur = curval, next = nxtval;
776	register int high_crc_set, low_data_set;
777
778	/* Swap bytes */
779	next = ((next & 0x00FF) << 8) | (next >> 8);
780
781	/* Compute bit-by-bit */
782	for (counter = 0; counter < 16; ++counter) {
783		/* is high CRC bit set? */
784		if ((cur & 0x80000000) == 0) high_crc_set = 0;
785		else high_crc_set = 1;
786
787		cur = cur << 1;
788
789		if ((next & 0x0001) == 0) low_data_set = 0;
790		else low_data_set = 1;
791
792		next = next >> 1;
793
794		/* do the XOR */
795		if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
796	}
797	return cur;
798}
799
800static unsigned int
801bmac_crc(unsigned short *address)
802{
803	unsigned int newcrc;
804
805	XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
806	newcrc = crc416(0xffffffff, *address);	/* address bits 47 - 32 */
807	newcrc = crc416(newcrc, address[1]);	/* address bits 31 - 16 */
808	newcrc = crc416(newcrc, address[2]);	/* address bits 15 - 0  */
809
810	return(newcrc);
811}
812
813/*
814 * Add requested mcast addr to BMac's hash table filter.
815 *
816 */
817
818static void
819bmac_addhash(struct bmac_data *bp, unsigned char *addr)
820{
821	unsigned int	 crc;
822	unsigned short	 mask;
823
824	if (!(*addr)) return;
825	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
826	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
827	if (bp->hash_use_count[crc]++) return; /* This bit is already set */
828	mask = crc % 16;
829	mask = (unsigned char)1 << mask;
830	bp->hash_use_count[crc/16] |= mask;
831}
832
833static void
834bmac_removehash(struct bmac_data *bp, unsigned char *addr)
835{
836	unsigned int crc;
837	unsigned char mask;
838
839	/* Now, delete the address from the filter copy, as indicated */
840	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
841	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
842	if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
843	if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
844	mask = crc % 16;
845	mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
846	bp->hash_table_mask[crc/16] &= mask;
847}
848
849/*
850 * Sync the adapter with the software copy of the multicast mask
851 *  (logical address filter).
852 */
853
854static void
855bmac_rx_off(struct net_device *dev)
856{
857	unsigned short rx_cfg;
858
859	rx_cfg = bmread(dev, RXCFG);
860	rx_cfg &= ~RxMACEnable;
861	bmwrite(dev, RXCFG, rx_cfg);
862	do {
863		rx_cfg = bmread(dev, RXCFG);
864	}  while (rx_cfg & RxMACEnable);
865}
866
867unsigned short
868bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
869{
870	unsigned short rx_cfg;
871
872	rx_cfg = bmread(dev, RXCFG);
873	rx_cfg |= RxMACEnable;
874	if (hash_enable) rx_cfg |= RxHashFilterEnable;
875	else rx_cfg &= ~RxHashFilterEnable;
876	if (promisc_enable) rx_cfg |= RxPromiscEnable;
877	else rx_cfg &= ~RxPromiscEnable;
878	bmwrite(dev, RXRST, RxResetValue);
879	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
880	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
881	bmwrite(dev, RXCFG, rx_cfg );
882	return rx_cfg;
883}
884
885static void
886bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
887{
888	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
889	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
890	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
891	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
892}
893
894
895/* Set or clear the multicast filter for this adaptor.
896    num_addrs == -1	Promiscuous mode, receive all packets
897    num_addrs == 0	Normal mode, clear multicast list
898    num_addrs > 0	Multicast mode, receive normal and MC packets, and do
899			best-effort filtering.
900 */
901static void bmac_set_multicast(struct net_device *dev)
902{
903	struct dev_mc_list *dmi;
904	struct bmac_data *bp = netdev_priv(dev);
905	int num_addrs = dev->mc_count;
906	unsigned short rx_cfg;
907	int i;
908
909	if (bp->sleeping)
910		return;
911
912	XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
913
914	if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
915		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
916		bmac_update_hash_table_mask(dev, bp);
917		rx_cfg = bmac_rx_on(dev, 1, 0);
918		XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
919	} else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
920		rx_cfg = bmread(dev, RXCFG);
921		rx_cfg |= RxPromiscEnable;
922		bmwrite(dev, RXCFG, rx_cfg);
923		rx_cfg = bmac_rx_on(dev, 0, 1);
924		XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
925	} else {
926		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
927		for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
928		if (num_addrs == 0) {
929			rx_cfg = bmac_rx_on(dev, 0, 0);
930			XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
931		} else {
932			for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
933				bmac_addhash(bp, dmi->dmi_addr);
934			bmac_update_hash_table_mask(dev, bp);
935			rx_cfg = bmac_rx_on(dev, 1, 0);
936			XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
937		}
938	}
939	/* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
940}
941#else /* ifdef SUNHME_MULTICAST */
942
943/* The version of set_multicast below was lifted from sunhme.c */
944
945static void bmac_set_multicast(struct net_device *dev)
946{
947	struct dev_mc_list *dmi = dev->mc_list;
948	char *addrs;
949	int i;
950	unsigned short rx_cfg;
951	u32 crc;
952
953	if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
954		bmwrite(dev, BHASH0, 0xffff);
955		bmwrite(dev, BHASH1, 0xffff);
956		bmwrite(dev, BHASH2, 0xffff);
957		bmwrite(dev, BHASH3, 0xffff);
958	} else if(dev->flags & IFF_PROMISC) {
959		rx_cfg = bmread(dev, RXCFG);
960		rx_cfg |= RxPromiscEnable;
961		bmwrite(dev, RXCFG, rx_cfg);
962	} else {
963		u16 hash_table[4];
964
965		rx_cfg = bmread(dev, RXCFG);
966		rx_cfg &= ~RxPromiscEnable;
967		bmwrite(dev, RXCFG, rx_cfg);
968
969		for(i = 0; i < 4; i++) hash_table[i] = 0;
970
971		for(i = 0; i < dev->mc_count; i++) {
972			addrs = dmi->dmi_addr;
973			dmi = dmi->next;
974
975			if(!(*addrs & 1))
976				continue;
977
978			crc = ether_crc_le(6, addrs);
979			crc >>= 26;
980			hash_table[crc >> 4] |= 1 << (crc & 0xf);
981		}
982		bmwrite(dev, BHASH0, hash_table[0]);
983		bmwrite(dev, BHASH1, hash_table[1]);
984		bmwrite(dev, BHASH2, hash_table[2]);
985		bmwrite(dev, BHASH3, hash_table[3]);
986	}
987}
988#endif /* SUNHME_MULTICAST */
989
990static int miscintcount;
991
992static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
993{
994	struct net_device *dev = (struct net_device *) dev_id;
995	struct bmac_data *bp = netdev_priv(dev);
996	unsigned int status = bmread(dev, STATUS);
997	if (miscintcount++ < 10) {
998		XXDEBUG(("bmac_misc_intr\n"));
999	}
1000	/* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1001	/*     bmac_txdma_intr_inner(irq, dev_id); */
1002	/*   if (status & FrameReceived) bp->stats.rx_dropped++; */
1003	if (status & RxErrorMask) bp->stats.rx_errors++;
1004	if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1005	if (status & RxLenCntExp) bp->stats.rx_length_errors++;
1006	if (status & RxOverFlow) bp->stats.rx_over_errors++;
1007	if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1008
1009	/*   if (status & FrameSent) bp->stats.tx_dropped++; */
1010	if (status & TxErrorMask) bp->stats.tx_errors++;
1011	if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
1012	if (status & TxNormalCollExp) bp->stats.collisions++;
1013	return IRQ_HANDLED;
1014}
1015
1016/*
1017 * Procedure for reading EEPROM
1018 */
1019#define SROMAddressLength	5
1020#define DataInOn		0x0008
1021#define DataInOff		0x0000
1022#define Clk			0x0002
1023#define ChipSelect		0x0001
1024#define SDIShiftCount		3
1025#define SD0ShiftCount		2
1026#define	DelayValue		1000	/* number of microseconds */
1027#define SROMStartOffset		10	/* this is in words */
1028#define SROMReadCount		3	/* number of words to read from SROM */
1029#define SROMAddressBits		6
1030#define EnetAddressOffset	20
1031
1032static unsigned char
1033bmac_clock_out_bit(struct net_device *dev)
1034{
1035	unsigned short         data;
1036	unsigned short         val;
1037
1038	bmwrite(dev, SROMCSR, ChipSelect | Clk);
1039	udelay(DelayValue);
1040
1041	data = bmread(dev, SROMCSR);
1042	udelay(DelayValue);
1043	val = (data >> SD0ShiftCount) & 1;
1044
1045	bmwrite(dev, SROMCSR, ChipSelect);
1046	udelay(DelayValue);
1047
1048	return val;
1049}
1050
1051static void
1052bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1053{
1054	unsigned short data;
1055
1056	if (val != 0 && val != 1) return;
1057
1058	data = (val << SDIShiftCount);
1059	bmwrite(dev, SROMCSR, data | ChipSelect  );
1060	udelay(DelayValue);
1061
1062	bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1063	udelay(DelayValue);
1064
1065	bmwrite(dev, SROMCSR, data | ChipSelect);
1066	udelay(DelayValue);
1067}
1068
1069static void
1070reset_and_select_srom(struct net_device *dev)
1071{
1072	/* first reset */
1073	bmwrite(dev, SROMCSR, 0);
1074	udelay(DelayValue);
1075
1076	/* send it the read command (110) */
1077	bmac_clock_in_bit(dev, 1);
1078	bmac_clock_in_bit(dev, 1);
1079	bmac_clock_in_bit(dev, 0);
1080}
1081
1082static unsigned short
1083read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1084{
1085	unsigned short data, val;
1086	int i;
1087
1088	/* send out the address we want to read from */
1089	for (i = 0; i < addr_len; i++)	{
1090		val = addr >> (addr_len-i-1);
1091		bmac_clock_in_bit(dev, val & 1);
1092	}
1093
1094	/* Now read in the 16-bit data */
1095	data = 0;
1096	for (i = 0; i < 16; i++)	{
1097		val = bmac_clock_out_bit(dev);
1098		data <<= 1;
1099		data |= val;
1100	}
1101	bmwrite(dev, SROMCSR, 0);
1102
1103	return data;
1104}
1105
1106/*
1107 * It looks like Cogent and SMC use different methods for calculating
1108 * checksums. What a pain..
1109 */
1110
1111static int
1112bmac_verify_checksum(struct net_device *dev)
1113{
1114	unsigned short data, storedCS;
1115
1116	reset_and_select_srom(dev);
1117	data = read_srom(dev, 3, SROMAddressBits);
1118	storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1119
1120	return 0;
1121}
1122
1123
1124static void
1125bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1126{
1127	int i;
1128	unsigned short data;
1129
1130	for (i = 0; i < 6; i++)
1131		{
1132			reset_and_select_srom(dev);
1133			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1134			ea[2*i]   = bitrev8(data & 0x0ff);
1135			ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1136		}
1137}
1138
1139static void bmac_reset_and_enable(struct net_device *dev)
1140{
1141	struct bmac_data *bp = netdev_priv(dev);
1142	unsigned long flags;
1143	struct sk_buff *skb;
1144	unsigned char *data;
1145
1146	spin_lock_irqsave(&bp->lock, flags);
1147	bmac_enable_and_reset_chip(dev);
1148	bmac_init_tx_ring(bp);
1149	bmac_init_rx_ring(bp);
1150	bmac_init_chip(dev);
1151	bmac_start_chip(dev);
1152	bmwrite(dev, INTDISABLE, EnableNormal);
1153	bp->sleeping = 0;
1154
1155	/*
1156	 * It seems that the bmac can't receive until it's transmitted
1157	 * a packet.  So we give it a dummy packet to transmit.
1158	 */
1159	skb = dev_alloc_skb(ETHERMINPACKET);
1160	if (skb != NULL) {
1161		data = skb_put(skb, ETHERMINPACKET);
1162		memset(data, 0, ETHERMINPACKET);
1163		memcpy(data, dev->dev_addr, 6);
1164		memcpy(data+6, dev->dev_addr, 6);
1165		bmac_transmit_packet(skb, dev);
1166	}
1167	spin_unlock_irqrestore(&bp->lock, flags);
1168}
1169
1170static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1171{
1172	int j, rev, ret;
1173	struct bmac_data *bp;
1174	const unsigned char *prop_addr;
1175	unsigned char addr[6];
1176	struct net_device *dev;
1177	int is_bmac_plus = ((int)match->data) != 0;
1178
1179	if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1180		printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1181		return -ENODEV;
1182	}
1183	prop_addr = of_get_property(macio_get_of_node(mdev),
1184			"mac-address", NULL);
1185	if (prop_addr == NULL) {
1186		prop_addr = of_get_property(macio_get_of_node(mdev),
1187				"local-mac-address", NULL);
1188		if (prop_addr == NULL) {
1189			printk(KERN_ERR "BMAC: Can't get mac-address\n");
1190			return -ENODEV;
1191		}
1192	}
1193	memcpy(addr, prop_addr, sizeof(addr));
1194
1195	dev = alloc_etherdev(PRIV_BYTES);
1196	if (!dev) {
1197		printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1198		return -ENOMEM;
1199	}
1200
1201	bp = netdev_priv(dev);
1202	SET_MODULE_OWNER(dev);
1203	SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1204	macio_set_drvdata(mdev, dev);
1205
1206	bp->mdev = mdev;
1207	spin_lock_init(&bp->lock);
1208
1209	if (macio_request_resources(mdev, "bmac")) {
1210		printk(KERN_ERR "BMAC: can't request IO resource !\n");
1211		goto out_free;
1212	}
1213
1214	dev->base_addr = (unsigned long)
1215		ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1216	if (dev->base_addr == 0)
1217		goto out_release;
1218
1219	dev->irq = macio_irq(mdev, 0);
1220
1221	bmac_enable_and_reset_chip(dev);
1222	bmwrite(dev, INTDISABLE, DisableAll);
1223
1224	rev = addr[0] == 0 && addr[1] == 0xA0;
1225	for (j = 0; j < 6; ++j)
1226		dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1227
1228	/* Enable chip without interrupts for now */
1229	bmac_enable_and_reset_chip(dev);
1230	bmwrite(dev, INTDISABLE, DisableAll);
1231
1232	dev->open = bmac_open;
1233	dev->stop = bmac_close;
1234	dev->hard_start_xmit = bmac_output;
1235	dev->get_stats = bmac_stats;
1236	dev->set_multicast_list = bmac_set_multicast;
1237	dev->set_mac_address = bmac_set_address;
1238
1239	bmac_get_station_address(dev, addr);
1240	if (bmac_verify_checksum(dev) != 0)
1241		goto err_out_iounmap;
1242
1243	bp->is_bmac_plus = is_bmac_plus;
1244	bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1245	if (!bp->tx_dma)
1246		goto err_out_iounmap;
1247	bp->tx_dma_intr = macio_irq(mdev, 1);
1248	bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1249	if (!bp->rx_dma)
1250		goto err_out_iounmap_tx;
1251	bp->rx_dma_intr = macio_irq(mdev, 2);
1252
1253	bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1254	bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1255
1256	bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1257	skb_queue_head_init(bp->queue);
1258
1259	init_timer(&bp->tx_timeout);
1260
1261	ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1262	if (ret) {
1263		printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1264		goto err_out_iounmap_rx;
1265	}
1266	ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1267	if (ret) {
1268		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1269		goto err_out_irq0;
1270	}
1271	ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1272	if (ret) {
1273		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1274		goto err_out_irq1;
1275	}
1276
1277	/* Mask chip interrupts and disable chip, will be
1278	 * re-enabled on open()
1279	 */
1280	disable_irq(dev->irq);
1281	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1282
1283	if (register_netdev(dev) != 0) {
1284		printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1285		goto err_out_irq2;
1286	}
1287
1288	printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
1289	for (j = 0; j < 6; ++j)
1290		printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
1291	XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1292	printk("\n");
1293
1294	return 0;
1295
1296err_out_irq2:
1297	free_irq(bp->rx_dma_intr, dev);
1298err_out_irq1:
1299	free_irq(bp->tx_dma_intr, dev);
1300err_out_irq0:
1301	free_irq(dev->irq, dev);
1302err_out_iounmap_rx:
1303	iounmap(bp->rx_dma);
1304err_out_iounmap_tx:
1305	iounmap(bp->tx_dma);
1306err_out_iounmap:
1307	iounmap((void __iomem *)dev->base_addr);
1308out_release:
1309	macio_release_resources(mdev);
1310out_free:
1311	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1312	free_netdev(dev);
1313
1314	return -ENODEV;
1315}
1316
1317static int bmac_open(struct net_device *dev)
1318{
1319	struct bmac_data *bp = netdev_priv(dev);
1320	/* XXDEBUG(("bmac: enter open\n")); */
1321	/* reset the chip */
1322	bp->opened = 1;
1323	bmac_reset_and_enable(dev);
1324	enable_irq(dev->irq);
1325	return 0;
1326}
1327
1328static int bmac_close(struct net_device *dev)
1329{
1330	struct bmac_data *bp = netdev_priv(dev);
1331	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1332	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1333	unsigned short config;
1334	int i;
1335
1336	bp->sleeping = 1;
1337
1338	/* disable rx and tx */
1339	config = bmread(dev, RXCFG);
1340	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1341
1342	config = bmread(dev, TXCFG);
1343	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1344
1345	bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1346
1347	/* disable rx and tx dma */
1348	st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
1349	st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
1350
1351	/* free some skb's */
1352	XXDEBUG(("bmac: free rx bufs\n"));
1353	for (i=0; i<N_RX_RING; i++) {
1354		if (bp->rx_bufs[i] != NULL) {
1355			dev_kfree_skb(bp->rx_bufs[i]);
1356			bp->rx_bufs[i] = NULL;
1357		}
1358	}
1359	XXDEBUG(("bmac: free tx bufs\n"));
1360	for (i = 0; i<N_TX_RING; i++) {
1361		if (bp->tx_bufs[i] != NULL) {
1362			dev_kfree_skb(bp->tx_bufs[i]);
1363			bp->tx_bufs[i] = NULL;
1364		}
1365	}
1366	XXDEBUG(("bmac: all bufs freed\n"));
1367
1368	bp->opened = 0;
1369	disable_irq(dev->irq);
1370	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1371
1372	return 0;
1373}
1374
1375static void
1376bmac_start(struct net_device *dev)
1377{
1378	struct bmac_data *bp = netdev_priv(dev);
1379	int i;
1380	struct sk_buff *skb;
1381	unsigned long flags;
1382
1383	if (bp->sleeping)
1384		return;
1385
1386	spin_lock_irqsave(&bp->lock, flags);
1387	while (1) {
1388		i = bp->tx_fill + 1;
1389		if (i >= N_TX_RING)
1390			i = 0;
1391		if (i == bp->tx_empty)
1392			break;
1393		skb = skb_dequeue(bp->queue);
1394		if (skb == NULL)
1395			break;
1396		bmac_transmit_packet(skb, dev);
1397	}
1398	spin_unlock_irqrestore(&bp->lock, flags);
1399}
1400
1401static int
1402bmac_output(struct sk_buff *skb, struct net_device *dev)
1403{
1404	struct bmac_data *bp = netdev_priv(dev);
1405	skb_queue_tail(bp->queue, skb);
1406	bmac_start(dev);
1407	return 0;
1408}
1409
1410static void bmac_tx_timeout(unsigned long data)
1411{
1412	struct net_device *dev = (struct net_device *) data;
1413	struct bmac_data *bp = netdev_priv(dev);
1414	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1415	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1416	volatile struct dbdma_cmd *cp;
1417	unsigned long flags;
1418	unsigned short config, oldConfig;
1419	int i;
1420
1421	XXDEBUG(("bmac: tx_timeout called\n"));
1422	spin_lock_irqsave(&bp->lock, flags);
1423	bp->timeout_active = 0;
1424
1425	/* update various counters */
1426/*     	bmac_handle_misc_intrs(bp, 0); */
1427
1428	cp = &bp->tx_cmds[bp->tx_empty];
1429/*	XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1430/* 	   ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1431/* 	   mb->pr, mb->xmtfs, mb->fifofc)); */
1432
1433	/* turn off both tx and rx and reset the chip */
1434	config = bmread(dev, RXCFG);
1435	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1436	config = bmread(dev, TXCFG);
1437	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1438	out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1439	printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1440	bmac_enable_and_reset_chip(dev);
1441
1442	/* restart rx dma */
1443	cp = bus_to_virt(ld_le32(&rd->cmdptr));
1444	out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1445	out_le16(&cp->xfer_status, 0);
1446	out_le32(&rd->cmdptr, virt_to_bus(cp));
1447	out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1448
1449	/* fix up the transmit side */
1450	XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1451		 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1452	i = bp->tx_empty;
1453	++bp->stats.tx_errors;
1454	if (i != bp->tx_fill) {
1455		dev_kfree_skb(bp->tx_bufs[i]);
1456		bp->tx_bufs[i] = NULL;
1457		if (++i >= N_TX_RING) i = 0;
1458		bp->tx_empty = i;
1459	}
1460	bp->tx_fullup = 0;
1461	netif_wake_queue(dev);
1462	if (i != bp->tx_fill) {
1463		cp = &bp->tx_cmds[i];
1464		out_le16(&cp->xfer_status, 0);
1465		out_le16(&cp->command, OUTPUT_LAST);
1466		out_le32(&td->cmdptr, virt_to_bus(cp));
1467		out_le32(&td->control, DBDMA_SET(RUN));
1468		/* 	bmac_set_timeout(dev); */
1469		XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1470	}
1471
1472	/* turn it back on */
1473	oldConfig = bmread(dev, RXCFG);
1474	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1475	oldConfig = bmread(dev, TXCFG);
1476	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1477
1478	spin_unlock_irqrestore(&bp->lock, flags);
1479}
1480
1481
1482
1483static int __devexit bmac_remove(struct macio_dev *mdev)
1484{
1485	struct net_device *dev = macio_get_drvdata(mdev);
1486	struct bmac_data *bp = netdev_priv(dev);
1487
1488	unregister_netdev(dev);
1489
1490       	free_irq(dev->irq, dev);
1491	free_irq(bp->tx_dma_intr, dev);
1492	free_irq(bp->rx_dma_intr, dev);
1493
1494	iounmap((void __iomem *)dev->base_addr);
1495	iounmap(bp->tx_dma);
1496	iounmap(bp->rx_dma);
1497
1498	macio_release_resources(mdev);
1499
1500	free_netdev(dev);
1501
1502	return 0;
1503}
1504
1505static struct of_device_id bmac_match[] =
1506{
1507	{
1508	.name 		= "bmac",
1509	.data		= (void *)0,
1510	},
1511	{
1512	.type		= "network",
1513	.compatible	= "bmac+",
1514	.data		= (void *)1,
1515	},
1516	{},
1517};
1518MODULE_DEVICE_TABLE (of, bmac_match);
1519
1520static struct macio_driver bmac_driver =
1521{
1522	.name 		= "bmac",
1523	.match_table	= bmac_match,
1524	.probe		= bmac_probe,
1525	.remove		= bmac_remove,
1526#ifdef CONFIG_PM
1527	.suspend	= bmac_suspend,
1528	.resume		= bmac_resume,
1529#endif
1530};
1531
1532
1533static int __init bmac_init(void)
1534{
1535	if (bmac_emergency_rxbuf == NULL) {
1536		bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1537		if (bmac_emergency_rxbuf == NULL) {
1538			printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1539			return -ENOMEM;
1540		}
1541	}
1542
1543	return macio_register_driver(&bmac_driver);
1544}
1545
1546static void __exit bmac_exit(void)
1547{
1548	macio_unregister_driver(&bmac_driver);
1549
1550	kfree(bmac_emergency_rxbuf);
1551	bmac_emergency_rxbuf = NULL;
1552}
1553
1554MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1555MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1556MODULE_LICENSE("GPL");
1557
1558module_init(bmac_init);
1559module_exit(bmac_exit);
1560