1/*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs.  Assumes it's under a DBDMA controller.
4 *
5 * Copyright (C) 1998 Randy Gobbel.
6 *
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/timer.h>
18#include <linux/proc_fs.h>
19#include <linux/init.h>
20#include <linux/crc32.h>
21#include <asm/prom.h>
22#include <asm/dbdma.h>
23#include <asm/io.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/machdep.h>
27#include <asm/pmac_feature.h>
28#include <asm/irq.h>
29#ifdef CONFIG_PMAC_PBOOK
30#include <linux/adb.h>
31#include <linux/pmu.h>
32#endif /* CONFIG_PMAC_PBOOK */
33#include "bmac.h"
34
35#define trunc_page(x)	((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
36#define round_page(x)	trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
37
38/*
39 * CRC polynomial - used in working out multicast filter bits.
40 */
41#define ENET_CRCPOLY 0x04c11db7
42
43/* switch to use multicast code lifted from sunhme driver */
44#define SUNHME_MULTICAST
45
46#define N_RX_RING	64
47#define N_TX_RING	32
48#define MAX_TX_ACTIVE	1
49#define ETHERCRC	4
50#define ETHERMINPACKET	64
51#define ETHERMTU	1500
52#define RX_BUFLEN	(ETHERMTU + 14 + ETHERCRC + 2)
53#define TX_TIMEOUT	HZ	/* 1 second */
54
55/* Bits in transmit DMA status */
56#define TX_DMA_ERR	0x80
57
58#define XXDEBUG(args)
59
60struct bmac_data {
61	/* volatile struct bmac *bmac; */
62	struct sk_buff_head *queue;
63	volatile struct dbdma_regs *tx_dma;
64	int tx_dma_intr;
65	volatile struct dbdma_regs *rx_dma;
66	int rx_dma_intr;
67	volatile struct dbdma_cmd *tx_cmds;	/* xmit dma command list */
68	volatile struct dbdma_cmd *rx_cmds;	/* recv dma command list */
69	struct device_node *node;
70	int is_bmac_plus;
71	struct sk_buff *rx_bufs[N_RX_RING];
72	int rx_fill;
73	int rx_empty;
74	struct sk_buff *tx_bufs[N_TX_RING];
75	int tx_fill;
76	int tx_empty;
77	unsigned char tx_fullup;
78	struct net_device_stats stats;
79	struct timer_list tx_timeout;
80	int timeout_active;
81	int sleeping;
82	int opened;
83	unsigned short hash_use_count[64];
84	unsigned short hash_table_mask[4];
85	struct net_device *next_bmac;
86};
87
88typedef struct bmac_reg_entry {
89	char *name;
90	unsigned short reg_offset;
91} bmac_reg_entry_t;
92
93#define N_REG_ENTRIES 31
94
95static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
96	{"MEMADD", MEMADD},
97	{"MEMDATAHI", MEMDATAHI},
98	{"MEMDATALO", MEMDATALO},
99	{"TXPNTR", TXPNTR},
100	{"RXPNTR", RXPNTR},
101	{"IPG1", IPG1},
102	{"IPG2", IPG2},
103	{"ALIMIT", ALIMIT},
104	{"SLOT", SLOT},
105	{"PALEN", PALEN},
106	{"PAPAT", PAPAT},
107	{"TXSFD", TXSFD},
108	{"JAM", JAM},
109	{"TXCFG", TXCFG},
110	{"TXMAX", TXMAX},
111	{"TXMIN", TXMIN},
112	{"PAREG", PAREG},
113	{"DCNT", DCNT},
114	{"NCCNT", NCCNT},
115	{"NTCNT", NTCNT},
116	{"EXCNT", EXCNT},
117	{"LTCNT", LTCNT},
118	{"TXSM", TXSM},
119	{"RXCFG", RXCFG},
120	{"RXMAX", RXMAX},
121	{"RXMIN", RXMIN},
122	{"FRCNT", FRCNT},
123	{"AECNT", AECNT},
124	{"FECNT", FECNT},
125	{"RXSM", RXSM},
126	{"RXCV", RXCV}
127};
128
129static struct net_device *bmac_devs;
130static unsigned char *bmac_emergency_rxbuf;
131
132#ifdef CONFIG_PMAC_PBOOK
133static int bmac_sleep_notify(struct pmu_sleep_notifier *self, int when);
134static struct pmu_sleep_notifier bmac_sleep_notifier = {
135	bmac_sleep_notify, SLEEP_LEVEL_NET,
136};
137#endif
138
139/*
140 * Number of bytes of private data per BMAC: allow enough for
141 * the rx and tx dma commands plus a branch dma command each,
142 * and another 16 bytes to allow us to align the dma command
143 * buffers on a 16 byte boundary.
144 */
145#define PRIV_BYTES	(sizeof(struct bmac_data) \
146	+ (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
147	+ sizeof(struct sk_buff_head))
148
149static unsigned char bitrev(unsigned char b);
150static void bmac_probe1(struct device_node *bmac, int is_bmac_plus);
151static int bmac_open(struct net_device *dev);
152static int bmac_close(struct net_device *dev);
153static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
154static struct net_device_stats *bmac_stats(struct net_device *dev);
155static void bmac_set_multicast(struct net_device *dev);
156static void bmac_reset_and_enable(struct net_device *dev);
157static void bmac_start_chip(struct net_device *dev);
158static void bmac_init_chip(struct net_device *dev);
159static void bmac_init_registers(struct net_device *dev);
160static void bmac_enable_and_reset_chip(struct net_device *dev);
161static int bmac_set_address(struct net_device *dev, void *addr);
162static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
163static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
164static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
165static void bmac_set_timeout(struct net_device *dev);
166static void bmac_tx_timeout(unsigned long data);
167static int bmac_proc_info ( char *buffer, char **start, off_t offset, int length);
168static int bmac_output(struct sk_buff *skb, struct net_device *dev);
169static void bmac_start(struct net_device *dev);
170
171#define	DBDMA_SET(x)	( ((x) | (x) << 16) )
172#define	DBDMA_CLEAR(x)	( (x) << 16)
173
174static inline void
175dbdma_st32(volatile unsigned long *a, unsigned long x)
176{
177	__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
178	return;
179}
180
181static inline unsigned long
182dbdma_ld32(volatile unsigned long *a)
183{
184	unsigned long swap;
185	__asm__ volatile ("lwbrx %0,0,%1" :  "=r" (swap) : "r" (a));
186	return swap;
187}
188
189static void
190dbdma_continue(volatile struct dbdma_regs *dmap)
191{
192	dbdma_st32((volatile unsigned long *)&dmap->control,
193		   DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
194	eieio();
195}
196
197static void
198dbdma_reset(volatile struct dbdma_regs *dmap)
199{
200	dbdma_st32((volatile unsigned long *)&dmap->control,
201		   DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
202	eieio();
203	while (dbdma_ld32((volatile unsigned long *)&dmap->status) & RUN)
204		eieio();
205}
206
207static void
208dbdma_setcmd(volatile struct dbdma_cmd *cp,
209	     unsigned short cmd, unsigned count, unsigned long addr,
210	     unsigned long cmd_dep)
211{
212	out_le16(&cp->command, cmd);
213	out_le16(&cp->req_count, count);
214	out_le32(&cp->phy_addr, addr);
215	out_le32(&cp->cmd_dep, cmd_dep);
216	out_le16(&cp->xfer_status, 0);
217	out_le16(&cp->res_count, 0);
218}
219
220static inline
221void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
222{
223	out_le16((void *)dev->base_addr + reg_offset, data);
224}
225
226
227static inline
228volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
229{
230	return in_le16((void *)dev->base_addr + reg_offset);
231}
232
233static void
234bmac_enable_and_reset_chip(struct net_device *dev)
235{
236	struct bmac_data *bp = (struct bmac_data *) dev->priv;
237	volatile struct dbdma_regs *rd = bp->rx_dma;
238	volatile struct dbdma_regs *td = bp->tx_dma;
239
240	if (rd)
241		dbdma_reset(rd);
242	if (td)
243		dbdma_reset(td);
244
245	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 1);
246}
247
248#define MIFDELAY	udelay(10)
249
250static unsigned int
251bmac_mif_readbits(struct net_device *dev, int nb)
252{
253	unsigned int val = 0;
254
255	while (--nb >= 0) {
256		bmwrite(dev, MIFCSR, 0);
257		MIFDELAY;
258		if (bmread(dev, MIFCSR) & 8)
259			val |= 1 << nb;
260		bmwrite(dev, MIFCSR, 1);
261		MIFDELAY;
262	}
263	bmwrite(dev, MIFCSR, 0);
264	MIFDELAY;
265	bmwrite(dev, MIFCSR, 1);
266	MIFDELAY;
267	return val;
268}
269
270static void
271bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
272{
273	int b;
274
275	while (--nb >= 0) {
276		b = (val & (1 << nb))? 6: 4;
277		bmwrite(dev, MIFCSR, b);
278		MIFDELAY;
279		bmwrite(dev, MIFCSR, b|1);
280		MIFDELAY;
281	}
282}
283
284static unsigned int
285bmac_mif_read(struct net_device *dev, unsigned int addr)
286{
287	unsigned int val;
288
289	bmwrite(dev, MIFCSR, 4);
290	MIFDELAY;
291	bmac_mif_writebits(dev, ~0U, 32);
292	bmac_mif_writebits(dev, 6, 4);
293	bmac_mif_writebits(dev, addr, 10);
294	bmwrite(dev, MIFCSR, 2);
295	MIFDELAY;
296	bmwrite(dev, MIFCSR, 1);
297	MIFDELAY;
298	val = bmac_mif_readbits(dev, 17);
299	bmwrite(dev, MIFCSR, 4);
300	MIFDELAY;
301	return val;
302}
303
304static void
305bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
306{
307	bmwrite(dev, MIFCSR, 4);
308	MIFDELAY;
309	bmac_mif_writebits(dev, ~0U, 32);
310	bmac_mif_writebits(dev, 5, 4);
311	bmac_mif_writebits(dev, addr, 10);
312	bmac_mif_writebits(dev, 2, 2);
313	bmac_mif_writebits(dev, val, 16);
314	bmac_mif_writebits(dev, 3, 2);
315}
316
317static void
318bmac_init_registers(struct net_device *dev)
319{
320	struct bmac_data *bp = (struct bmac_data *) dev->priv;
321	volatile unsigned short regValue;
322	unsigned short *pWord16;
323	int i;
324
325	/* XXDEBUG(("bmac: enter init_registers\n")); */
326
327	bmwrite(dev, RXRST, RxResetValue);
328	bmwrite(dev, TXRST, TxResetBit);
329
330	i = 100;
331	do {
332		--i;
333		udelay(10000);
334		regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
335	} while ((regValue & TxResetBit) && i > 0);
336
337	if (!bp->is_bmac_plus) {
338		regValue = bmread(dev, XCVRIF);
339		regValue |= ClkBit | SerialMode | COLActiveLow;
340		bmwrite(dev, XCVRIF, regValue);
341		udelay(10000);
342	}
343
344	bmwrite(dev, RSEED, (unsigned short)0x1968);
345
346	regValue = bmread(dev, XIFC);
347	regValue |= TxOutputEnable;
348	bmwrite(dev, XIFC, regValue);
349
350	bmread(dev, PAREG);
351
352	/* set collision counters to 0 */
353	bmwrite(dev, NCCNT, 0);
354	bmwrite(dev, NTCNT, 0);
355	bmwrite(dev, EXCNT, 0);
356	bmwrite(dev, LTCNT, 0);
357
358	/* set rx counters to 0 */
359	bmwrite(dev, FRCNT, 0);
360	bmwrite(dev, LECNT, 0);
361	bmwrite(dev, AECNT, 0);
362	bmwrite(dev, FECNT, 0);
363	bmwrite(dev, RXCV, 0);
364
365	/* set tx fifo information */
366	bmwrite(dev, TXTH, 4);	/* 4 octets before tx starts */
367
368	bmwrite(dev, TXFIFOCSR, 0);	/* first disable txFIFO */
369	bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
370
371	/* set rx fifo information */
372	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
373	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
374
375	//bmwrite(dev, TXCFG, TxMACEnable);	       	/* TxNeverGiveUp maybe later */
376	bmread(dev, STATUS);		/* read it just to clear it */
377
378	/* zero out the chip Hash Filter registers */
379	for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
380	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); 	/* bits 15 - 0 */
381	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); 	/* bits 31 - 16 */
382	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); 	/* bits 47 - 32 */
383	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); 	/* bits 63 - 48 */
384
385	pWord16 = (unsigned short *)dev->dev_addr;
386	bmwrite(dev, MADD0, *pWord16++);
387	bmwrite(dev, MADD1, *pWord16++);
388	bmwrite(dev, MADD2, *pWord16);
389
390	bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
391
392	bmwrite(dev, INTDISABLE, EnableNormal);
393
394	return;
395}
396
397
398
399static void
400bmac_start_chip(struct net_device *dev)
401{
402	struct bmac_data *bp = (struct bmac_data *) dev->priv;
403	volatile struct dbdma_regs *rd = bp->rx_dma;
404	unsigned short	oldConfig;
405
406	/* enable rx dma channel */
407	dbdma_continue(rd);
408
409	oldConfig = bmread(dev, TXCFG);
410	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
411
412	/* turn on rx plus any other bits already on (promiscuous possibly) */
413	oldConfig = bmread(dev, RXCFG);
414	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
415	udelay(20000);
416}
417
418static void
419bmac_init_phy(struct net_device *dev)
420{
421	unsigned int addr;
422	struct bmac_data *bp = (struct bmac_data *) dev->priv;
423
424	printk(KERN_DEBUG "phy registers:");
425	for (addr = 0; addr < 32; ++addr) {
426		if ((addr & 7) == 0)
427			printk("\n" KERN_DEBUG);
428		printk(" %.4x", bmac_mif_read(dev, addr));
429	}
430	printk("\n");
431	if (bp->is_bmac_plus) {
432		unsigned int capable, ctrl;
433
434		ctrl = bmac_mif_read(dev, 0);
435		capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
436		if (bmac_mif_read(dev, 4) != capable
437		    || (ctrl & 0x1000) == 0) {
438			bmac_mif_write(dev, 4, capable);
439			bmac_mif_write(dev, 0, 0x1200);
440		} else
441			bmac_mif_write(dev, 0, 0x1000);
442	}
443}
444
445static void
446bmac_init_chip(struct net_device *dev)
447{
448	bmac_init_phy(dev);
449	bmac_init_registers(dev);
450}
451
452#ifdef CONFIG_PMAC_PBOOK
453static int
454bmac_sleep_notify(struct pmu_sleep_notifier *self, int when)
455{
456	struct bmac_data *bp;
457	unsigned long flags;
458	unsigned short config;
459	struct net_device* dev = bmac_devs;
460	int i;
461
462	if (bmac_devs == 0)
463		return PBOOK_SLEEP_OK;
464
465	bp = (struct bmac_data *) dev->priv;
466
467	switch (when) {
468	case PBOOK_SLEEP_REQUEST:
469		break;
470	case PBOOK_SLEEP_REJECT:
471		break;
472	case PBOOK_SLEEP_NOW:
473		netif_device_detach(dev);
474		/* prolly should wait for dma to finish & turn off the chip */
475		save_flags(flags); cli();
476		if (bp->timeout_active) {
477			del_timer(&bp->tx_timeout);
478			bp->timeout_active = 0;
479		}
480		disable_irq(dev->irq);
481		disable_irq(bp->tx_dma_intr);
482		disable_irq(bp->rx_dma_intr);
483		bp->sleeping = 1;
484		restore_flags(flags);
485		if (bp->opened) {
486			volatile struct dbdma_regs *rd = bp->rx_dma;
487			volatile struct dbdma_regs *td = bp->tx_dma;
488
489			config = bmread(dev, RXCFG);
490			bmwrite(dev, RXCFG, (config & ~RxMACEnable));
491			config = bmread(dev, TXCFG);
492			bmwrite(dev, TXCFG, (config & ~TxMACEnable));
493			bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
494			/* disable rx and tx dma */
495			st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
496			st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
497			/* free some skb's */
498			for (i=0; i<N_RX_RING; i++) {
499				if (bp->rx_bufs[i] != NULL) {
500					dev_kfree_skb(bp->rx_bufs[i]);
501					bp->rx_bufs[i] = NULL;
502				}
503			}
504			for (i = 0; i<N_TX_RING; i++) {
505				if (bp->tx_bufs[i] != NULL) {
506					dev_kfree_skb(bp->tx_bufs[i]);
507					bp->tx_bufs[i] = NULL;
508				}
509			}
510		}
511		pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
512		break;
513	case PBOOK_WAKE:
514		/* see if this is enough */
515		if (bp->opened)
516			bmac_reset_and_enable(dev);
517		enable_irq(dev->irq);
518		enable_irq(bp->tx_dma_intr);
519		enable_irq(bp->rx_dma_intr);
520		netif_device_attach(dev);
521		break;
522	}
523	return PBOOK_SLEEP_OK;
524}
525#endif
526
527static int bmac_set_address(struct net_device *dev, void *addr)
528{
529	unsigned char *p = addr;
530	unsigned short *pWord16;
531	unsigned long flags;
532	int i;
533
534	XXDEBUG(("bmac: enter set_address\n"));
535	save_flags(flags); cli();
536
537	for (i = 0; i < 6; ++i) {
538		dev->dev_addr[i] = p[i];
539	}
540	/* load up the hardware address */
541	pWord16  = (unsigned short *)dev->dev_addr;
542	bmwrite(dev, MADD0, *pWord16++);
543	bmwrite(dev, MADD1, *pWord16++);
544	bmwrite(dev, MADD2, *pWord16);
545
546	restore_flags(flags);
547	XXDEBUG(("bmac: exit set_address\n"));
548	return 0;
549}
550
551static inline void bmac_set_timeout(struct net_device *dev)
552{
553	struct bmac_data *bp = (struct bmac_data *) dev->priv;
554	unsigned long flags;
555
556	save_flags(flags);
557	cli();
558	if (bp->timeout_active)
559		del_timer(&bp->tx_timeout);
560	bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
561	bp->tx_timeout.function = bmac_tx_timeout;
562	bp->tx_timeout.data = (unsigned long) dev;
563	add_timer(&bp->tx_timeout);
564	bp->timeout_active = 1;
565	restore_flags(flags);
566}
567
568static void
569bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
570{
571	void *vaddr;
572	unsigned long baddr;
573	unsigned long len;
574
575	len = skb->len;
576	vaddr = skb->data;
577	baddr = virt_to_bus(vaddr);
578
579	dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
580}
581
582static void
583bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
584{
585	unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
586
587	dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
588		     virt_to_bus(addr), 0);
589}
590
591/* Bit-reverse one byte of an ethernet hardware address. */
592static unsigned char
593bitrev(unsigned char b)
594{
595	int d = 0, i;
596
597	for (i = 0; i < 8; ++i, b >>= 1)
598		d = (d << 1) | (b & 1);
599	return d;
600}
601
602
603static void
604bmac_init_tx_ring(struct bmac_data *bp)
605{
606	volatile struct dbdma_regs *td = bp->tx_dma;
607
608	memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
609
610	bp->tx_empty = 0;
611	bp->tx_fill = 0;
612	bp->tx_fullup = 0;
613
614	/* put a branch at the end of the tx command list */
615	dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
616		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
617
618	/* reset tx dma */
619	dbdma_reset(td);
620	out_le32(&td->wait_sel, 0x00200020);
621	out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
622}
623
624static int
625bmac_init_rx_ring(struct bmac_data *bp)
626{
627	volatile struct dbdma_regs *rd = bp->rx_dma;
628	int i;
629	struct sk_buff *skb;
630
631	/* initialize list of sk_buffs for receiving and set up recv dma */
632	memset((char *)bp->rx_cmds, 0,
633	       (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
634	for (i = 0; i < N_RX_RING; i++) {
635		if ((skb = bp->rx_bufs[i]) == NULL) {
636			bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
637			if (skb != NULL)
638				skb_reserve(skb, 2);
639		}
640		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
641	}
642
643	bp->rx_empty = 0;
644	bp->rx_fill = i;
645
646	/* Put a branch back to the beginning of the receive command list */
647	dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
648		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
649
650	/* start rx dma */
651	dbdma_reset(rd);
652	out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
653
654	return 1;
655}
656
657
658static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
659{
660	struct bmac_data *bp = (struct bmac_data *) dev->priv;
661	volatile struct dbdma_regs *td = bp->tx_dma;
662	int i;
663
664	/* see if there's a free slot in the tx ring */
665	/* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
666	/* 	     bp->tx_empty, bp->tx_fill)); */
667	i = bp->tx_fill + 1;
668	if (i >= N_TX_RING)
669		i = 0;
670	if (i == bp->tx_empty) {
671		netif_stop_queue(dev);
672		bp->tx_fullup = 1;
673		XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
674		return -1;		/* can't take it at the moment */
675	}
676
677	dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
678
679	bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
680
681	bp->tx_bufs[bp->tx_fill] = skb;
682	bp->tx_fill = i;
683
684	bp->stats.tx_bytes += skb->len;
685
686	dbdma_continue(td);
687
688	return 0;
689}
690
691static int rxintcount;
692
693static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
694{
695	struct net_device *dev = (struct net_device *) dev_id;
696	struct bmac_data *bp = (struct bmac_data *) dev->priv;
697	volatile struct dbdma_regs *rd = bp->rx_dma;
698	volatile struct dbdma_cmd *cp;
699	int i, nb, stat;
700	struct sk_buff *skb;
701	unsigned int residual;
702	int last;
703	unsigned long flags;
704
705	save_flags(flags); cli();
706
707	if (++rxintcount < 10) {
708		XXDEBUG(("bmac_rxdma_intr\n"));
709	}
710
711	last = -1;
712	i = bp->rx_empty;
713
714	while (1) {
715		cp = &bp->rx_cmds[i];
716		stat = ld_le16(&cp->xfer_status);
717		residual = ld_le16(&cp->res_count);
718		if ((stat & ACTIVE) == 0)
719			break;
720		nb = RX_BUFLEN - residual - 2;
721		if (nb < (ETHERMINPACKET - ETHERCRC)) {
722			skb = NULL;
723			bp->stats.rx_length_errors++;
724			bp->stats.rx_errors++;
725		} else {
726			skb = bp->rx_bufs[i];
727			bp->rx_bufs[i] = NULL;
728		}
729		if (skb != NULL) {
730			nb -= ETHERCRC;
731			skb_put(skb, nb);
732			skb->dev = dev;
733			skb->protocol = eth_type_trans(skb, dev);
734			netif_rx(skb);
735			dev->last_rx = jiffies;
736			++bp->stats.rx_packets;
737			bp->stats.rx_bytes += nb;
738		} else {
739			++bp->stats.rx_dropped;
740		}
741		dev->last_rx = jiffies;
742		if ((skb = bp->rx_bufs[i]) == NULL) {
743			bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
744			if (skb != NULL)
745				skb_reserve(bp->rx_bufs[i], 2);
746		}
747		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
748		st_le16(&cp->res_count, 0);
749		st_le16(&cp->xfer_status, 0);
750		last = i;
751		if (++i >= N_RX_RING) i = 0;
752	}
753
754	if (last != -1) {
755		bp->rx_fill = last;
756		bp->rx_empty = i;
757	}
758
759	restore_flags(flags);
760
761	dbdma_continue(rd);
762
763	if (rxintcount < 10) {
764		XXDEBUG(("bmac_rxdma_intr done\n"));
765	}
766}
767
768static int txintcount;
769
770static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
771{
772	struct net_device *dev = (struct net_device *) dev_id;
773	struct bmac_data *bp = (struct bmac_data *) dev->priv;
774	volatile struct dbdma_cmd *cp;
775	int stat;
776	unsigned long flags;
777
778	save_flags(flags); cli();
779
780	if (txintcount++ < 10) {
781		XXDEBUG(("bmac_txdma_intr\n"));
782	}
783
784	/*     del_timer(&bp->tx_timeout); */
785	/*     bp->timeout_active = 0; */
786
787	while (1) {
788		cp = &bp->tx_cmds[bp->tx_empty];
789		stat = ld_le16(&cp->xfer_status);
790		if (txintcount < 10) {
791			XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
792		}
793		if (!(stat & ACTIVE)) {
794			/*
795			 * status field might not have been filled by DBDMA
796			 */
797			if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
798				break;
799		}
800
801		if (bp->tx_bufs[bp->tx_empty]) {
802			++bp->stats.tx_packets;
803			dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
804		}
805		bp->tx_bufs[bp->tx_empty] = NULL;
806		bp->tx_fullup = 0;
807		netif_wake_queue(dev);
808		if (++bp->tx_empty >= N_TX_RING)
809			bp->tx_empty = 0;
810		if (bp->tx_empty == bp->tx_fill)
811			break;
812	}
813
814	restore_flags(flags);
815
816	if (txintcount < 10) {
817		XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
818	}
819
820	bmac_start(dev);
821}
822
823static struct net_device_stats *bmac_stats(struct net_device *dev)
824{
825	struct bmac_data *p = (struct bmac_data *) dev->priv;
826
827	return &p->stats;
828}
829
830#ifndef SUNHME_MULTICAST
831/* Real fast bit-reversal algorithm, 6-bit values */
832static int reverse6[64] = {
833	0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
834	0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
835	0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
836	0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
837	0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
838	0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
839	0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
840	0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
841};
842
843static unsigned int
844crc416(unsigned int curval, unsigned short nxtval)
845{
846	register unsigned int counter, cur = curval, next = nxtval;
847	register int high_crc_set, low_data_set;
848
849	/* Swap bytes */
850	next = ((next & 0x00FF) << 8) | (next >> 8);
851
852	/* Compute bit-by-bit */
853	for (counter = 0; counter < 16; ++counter) {
854		/* is high CRC bit set? */
855		if ((cur & 0x80000000) == 0) high_crc_set = 0;
856		else high_crc_set = 1;
857
858		cur = cur << 1;
859
860		if ((next & 0x0001) == 0) low_data_set = 0;
861		else low_data_set = 1;
862
863		next = next >> 1;
864
865		/* do the XOR */
866		if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
867	}
868	return cur;
869}
870
871static unsigned int
872bmac_crc(unsigned short *address)
873{
874	unsigned int newcrc;
875
876	XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
877	newcrc = crc416(0xffffffff, *address);	/* address bits 47 - 32 */
878	newcrc = crc416(newcrc, address[1]);	/* address bits 31 - 16 */
879	newcrc = crc416(newcrc, address[2]);	/* address bits 15 - 0  */
880
881	return(newcrc);
882}
883
884/*
885 * Add requested mcast addr to BMac's hash table filter.
886 *
887 */
888
889static void
890bmac_addhash(struct bmac_data *bp, unsigned char *addr)
891{
892	unsigned int	 crc;
893	unsigned short	 mask;
894
895	if (!(*addr)) return;
896	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
897	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
898	if (bp->hash_use_count[crc]++) return; /* This bit is already set */
899	mask = crc % 16;
900	mask = (unsigned char)1 << mask;
901	bp->hash_use_count[crc/16] |= mask;
902}
903
904static void
905bmac_removehash(struct bmac_data *bp, unsigned char *addr)
906{
907	unsigned int crc;
908	unsigned char mask;
909
910	/* Now, delete the address from the filter copy, as indicated */
911	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
912	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
913	if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
914	if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
915	mask = crc % 16;
916	mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
917	bp->hash_table_mask[crc/16] &= mask;
918}
919
920/*
921 * Sync the adapter with the software copy of the multicast mask
922 *  (logical address filter).
923 */
924
925static void
926bmac_rx_off(struct net_device *dev)
927{
928	unsigned short rx_cfg;
929
930	rx_cfg = bmread(dev, RXCFG);
931	rx_cfg &= ~RxMACEnable;
932	bmwrite(dev, RXCFG, rx_cfg);
933	do {
934		rx_cfg = bmread(dev, RXCFG);
935	}  while (rx_cfg & RxMACEnable);
936}
937
938unsigned short
939bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
940{
941	unsigned short rx_cfg;
942
943	rx_cfg = bmread(dev, RXCFG);
944	rx_cfg |= RxMACEnable;
945	if (hash_enable) rx_cfg |= RxHashFilterEnable;
946	else rx_cfg &= ~RxHashFilterEnable;
947	if (promisc_enable) rx_cfg |= RxPromiscEnable;
948	else rx_cfg &= ~RxPromiscEnable;
949	bmwrite(dev, RXRST, RxResetValue);
950	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
951	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
952	bmwrite(dev, RXCFG, rx_cfg );
953	return rx_cfg;
954}
955
956static void
957bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
958{
959	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
960	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
961	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
962	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
963}
964
965
966/* Set or clear the multicast filter for this adaptor.
967    num_addrs == -1	Promiscuous mode, receive all packets
968    num_addrs == 0	Normal mode, clear multicast list
969    num_addrs > 0	Multicast mode, receive normal and MC packets, and do
970			best-effort filtering.
971 */
972static void bmac_set_multicast(struct net_device *dev)
973{
974	struct dev_mc_list *dmi;
975	struct bmac_data *bp = (struct bmac_data *) dev->priv;
976	int num_addrs = dev->mc_count;
977	unsigned short rx_cfg;
978	int i;
979
980	if (bp->sleeping)
981		return;
982
983	XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
984
985	if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
986		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
987		bmac_update_hash_table_mask(dev, bp);
988		rx_cfg = bmac_rx_on(dev, 1, 0);
989		XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
990	} else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
991		rx_cfg = bmread(dev, RXCFG);
992		rx_cfg |= RxPromiscEnable;
993		bmwrite(dev, RXCFG, rx_cfg);
994		rx_cfg = bmac_rx_on(dev, 0, 1);
995		XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
996	} else {
997		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
998		for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
999		if (num_addrs == 0) {
1000			rx_cfg = bmac_rx_on(dev, 0, 0);
1001			XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1002		} else {
1003			for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
1004				bmac_addhash(bp, dmi->dmi_addr);
1005			bmac_update_hash_table_mask(dev, bp);
1006			rx_cfg = bmac_rx_on(dev, 1, 0);
1007			XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1008		}
1009	}
1010	/* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1011}
1012#else /* ifdef SUNHME_MULTICAST */
1013
1014/* The version of set_multicast below was lifted from sunhme.c */
1015
1016static void bmac_set_multicast(struct net_device *dev)
1017{
1018	struct dev_mc_list *dmi = dev->mc_list;
1019	char *addrs;
1020	int i, j, bit, byte;
1021	unsigned short rx_cfg;
1022	u32 crc;
1023
1024	if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1025		bmwrite(dev, BHASH0, 0xffff);
1026		bmwrite(dev, BHASH1, 0xffff);
1027		bmwrite(dev, BHASH2, 0xffff);
1028		bmwrite(dev, BHASH3, 0xffff);
1029	} else if(dev->flags & IFF_PROMISC) {
1030		rx_cfg = bmread(dev, RXCFG);
1031		rx_cfg |= RxPromiscEnable;
1032		bmwrite(dev, RXCFG, rx_cfg);
1033	} else {
1034		u16 hash_table[4];
1035
1036		rx_cfg = bmread(dev, RXCFG);
1037		rx_cfg &= ~RxPromiscEnable;
1038		bmwrite(dev, RXCFG, rx_cfg);
1039
1040		for(i = 0; i < 4; i++) hash_table[i] = 0;
1041
1042		for(i = 0; i < dev->mc_count; i++) {
1043			addrs = dmi->dmi_addr;
1044			dmi = dmi->next;
1045
1046			if(!(*addrs & 1))
1047				continue;
1048
1049			crc = ether_crc_le(6, addrs);
1050			crc >>= 26;
1051			hash_table[crc >> 4] |= 1 << (crc & 0xf);
1052		}
1053		bmwrite(dev, BHASH0, hash_table[0]);
1054		bmwrite(dev, BHASH1, hash_table[1]);
1055		bmwrite(dev, BHASH2, hash_table[2]);
1056		bmwrite(dev, BHASH3, hash_table[3]);
1057	}
1058}
1059#endif /* SUNHME_MULTICAST */
1060
1061static int miscintcount;
1062
1063static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
1064{
1065	struct net_device *dev = (struct net_device *) dev_id;
1066	struct bmac_data *bp = (struct bmac_data *)dev->priv;
1067	unsigned int status = bmread(dev, STATUS);
1068	if (miscintcount++ < 10) {
1069		XXDEBUG(("bmac_misc_intr\n"));
1070	}
1071	/* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1072	/*     bmac_txdma_intr_inner(irq, dev_id, regs); */
1073	/*   if (status & FrameReceived) bp->stats.rx_dropped++; */
1074	if (status & RxErrorMask) bp->stats.rx_errors++;
1075	if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1076	if (status & RxLenCntExp) bp->stats.rx_length_errors++;
1077	if (status & RxOverFlow) bp->stats.rx_over_errors++;
1078	if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1079
1080	/*   if (status & FrameSent) bp->stats.tx_dropped++; */
1081	if (status & TxErrorMask) bp->stats.tx_errors++;
1082	if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
1083	if (status & TxNormalCollExp) bp->stats.collisions++;
1084}
1085
1086/*
1087 * Procedure for reading EEPROM
1088 */
1089#define SROMAddressLength	5
1090#define DataInOn		0x0008
1091#define DataInOff		0x0000
1092#define Clk			0x0002
1093#define ChipSelect		0x0001
1094#define SDIShiftCount		3
1095#define SD0ShiftCount		2
1096#define	DelayValue		1000	/* number of microseconds */
1097#define SROMStartOffset		10	/* this is in words */
1098#define SROMReadCount		3	/* number of words to read from SROM */
1099#define SROMAddressBits		6
1100#define EnetAddressOffset	20
1101
1102static unsigned char
1103bmac_clock_out_bit(struct net_device *dev)
1104{
1105	unsigned short         data;
1106	unsigned short         val;
1107
1108	bmwrite(dev, SROMCSR, ChipSelect | Clk);
1109	udelay(DelayValue);
1110
1111	data = bmread(dev, SROMCSR);
1112	udelay(DelayValue);
1113	val = (data >> SD0ShiftCount) & 1;
1114
1115	bmwrite(dev, SROMCSR, ChipSelect);
1116	udelay(DelayValue);
1117
1118	return val;
1119}
1120
1121static void
1122bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1123{
1124	unsigned short data;
1125
1126	if (val != 0 && val != 1) return;
1127
1128	data = (val << SDIShiftCount);
1129	bmwrite(dev, SROMCSR, data | ChipSelect  );
1130	udelay(DelayValue);
1131
1132	bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1133	udelay(DelayValue);
1134
1135	bmwrite(dev, SROMCSR, data | ChipSelect);
1136	udelay(DelayValue);
1137}
1138
1139static void
1140reset_and_select_srom(struct net_device *dev)
1141{
1142	/* first reset */
1143	bmwrite(dev, SROMCSR, 0);
1144	udelay(DelayValue);
1145
1146	/* send it the read command (110) */
1147	bmac_clock_in_bit(dev, 1);
1148	bmac_clock_in_bit(dev, 1);
1149	bmac_clock_in_bit(dev, 0);
1150}
1151
1152static unsigned short
1153read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1154{
1155	unsigned short data, val;
1156	int i;
1157
1158	/* send out the address we want to read from */
1159	for (i = 0; i < addr_len; i++)	{
1160		val = addr >> (addr_len-i-1);
1161		bmac_clock_in_bit(dev, val & 1);
1162	}
1163
1164	/* Now read in the 16-bit data */
1165	data = 0;
1166	for (i = 0; i < 16; i++)	{
1167		val = bmac_clock_out_bit(dev);
1168		data <<= 1;
1169		data |= val;
1170	}
1171	bmwrite(dev, SROMCSR, 0);
1172
1173	return data;
1174}
1175
1176/*
1177 * It looks like Cogent and SMC use different methods for calculating
1178 * checksums. What a pain..
1179 */
1180
1181static int
1182bmac_verify_checksum(struct net_device *dev)
1183{
1184	unsigned short data, storedCS;
1185
1186	reset_and_select_srom(dev);
1187	data = read_srom(dev, 3, SROMAddressBits);
1188	storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1189
1190	return 0;
1191}
1192
1193
1194static void
1195bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1196{
1197	int i;
1198	unsigned short data;
1199
1200	for (i = 0; i < 6; i++)
1201		{
1202			reset_and_select_srom(dev);
1203			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1204			ea[2*i]   = bitrev(data & 0x0ff);
1205			ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
1206		}
1207}
1208
1209static void bmac_reset_and_enable(struct net_device *dev)
1210{
1211	struct bmac_data *bp = dev->priv;
1212	unsigned long flags;
1213	struct sk_buff *skb;
1214	unsigned char *data;
1215
1216	save_flags(flags); cli();
1217	bmac_enable_and_reset_chip(dev);
1218	bmac_init_tx_ring(bp);
1219	bmac_init_rx_ring(bp);
1220	bmac_init_chip(dev);
1221	bmac_start_chip(dev);
1222	bmwrite(dev, INTDISABLE, EnableNormal);
1223	bp->sleeping = 0;
1224
1225	/*
1226	 * It seems that the bmac can't receive until it's transmitted
1227	 * a packet.  So we give it a dummy packet to transmit.
1228	 */
1229	skb = dev_alloc_skb(ETHERMINPACKET);
1230	if (skb != NULL) {
1231		data = skb_put(skb, ETHERMINPACKET);
1232		memset(data, 0, ETHERMINPACKET);
1233		memcpy(data, dev->dev_addr, 6);
1234		memcpy(data+6, dev->dev_addr, 6);
1235		bmac_transmit_packet(skb, dev);
1236	}
1237	restore_flags(flags);
1238}
1239
1240static int __init bmac_probe(void)
1241{
1242	struct device_node *bmac;
1243
1244	MOD_INC_USE_COUNT;
1245
1246	for (bmac = find_devices("bmac"); bmac != 0; bmac = bmac->next)
1247		bmac_probe1(bmac, 0);
1248	for (bmac = find_compatible_devices("network", "bmac+"); bmac != 0;
1249	     bmac = bmac->next)
1250		bmac_probe1(bmac, 1);
1251
1252	if (bmac_devs != 0) {
1253		proc_net_create ("bmac", 0, bmac_proc_info);
1254#ifdef CONFIG_PMAC_PBOOK
1255		pmu_register_sleep_notifier(&bmac_sleep_notifier);
1256#endif
1257	}
1258
1259	MOD_DEC_USE_COUNT;
1260
1261	return bmac_devs? 0: -ENODEV;
1262}
1263
1264static void __init bmac_probe1(struct device_node *bmac, int is_bmac_plus)
1265{
1266	int j, rev, ret;
1267	struct bmac_data *bp;
1268	unsigned char *addr;
1269	struct net_device *dev;
1270
1271	if (bmac->n_addrs != 3 || bmac->n_intrs != 3) {
1272		printk(KERN_ERR "can't use BMAC %s: need 3 addrs and 3 intrs\n",
1273		       bmac->full_name);
1274		return;
1275	}
1276	addr = get_property(bmac, "mac-address", NULL);
1277	if (addr == NULL) {
1278		addr = get_property(bmac, "local-mac-address", NULL);
1279		if (addr == NULL) {
1280			printk(KERN_ERR "Can't get mac-address for BMAC %s\n",
1281			       bmac->full_name);
1282			return;
1283		}
1284	}
1285
1286	if (bmac_emergency_rxbuf == NULL) {
1287		bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1288		if (bmac_emergency_rxbuf == NULL) {
1289			printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1290			return;
1291		}
1292	}
1293
1294	dev = init_etherdev(NULL, PRIV_BYTES);
1295	if (!dev) {
1296		printk(KERN_ERR "init_etherdev failed, out of memory for BMAC %s\n",
1297		       bmac->full_name);
1298		return;
1299	}
1300	bp = (struct bmac_data *) dev->priv;
1301	SET_MODULE_OWNER(dev);
1302	bp->node = bmac;
1303
1304	if (!request_OF_resource(bmac, 0, " (bmac)")) {
1305		printk(KERN_ERR "BMAC: can't request IO resource !\n");
1306		goto err_out;
1307	}
1308	if (!request_OF_resource(bmac, 1, " (bmac tx dma)")) {
1309		printk(KERN_ERR "BMAC: can't request TX DMA resource !\n");
1310		goto err_out;
1311	}
1312
1313	if (!request_OF_resource(bmac, 2, " (bmac rx dma)")) {
1314		printk(KERN_ERR "BMAC: can't request RX DMA resource !\n");
1315		goto err_out;
1316	}
1317	dev->base_addr = (unsigned long)
1318		ioremap(bmac->addrs[0].address, bmac->addrs[0].size);
1319	if (!dev->base_addr)
1320		goto err_out;
1321	dev->irq = bmac->intrs[0].line;
1322
1323	bmac_enable_and_reset_chip(dev);
1324	bmwrite(dev, INTDISABLE, DisableAll);
1325
1326	printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
1327	rev = addr[0] == 0 && addr[1] == 0xA0;
1328	for (j = 0; j < 6; ++j) {
1329		dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
1330		printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
1331	}
1332	XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1333	printk("\n");
1334
1335	/* Enable chip without interrupts for now */
1336	bmac_enable_and_reset_chip(dev);
1337	bmwrite(dev, INTDISABLE, DisableAll);
1338
1339	dev->open = bmac_open;
1340	dev->stop = bmac_close;
1341	dev->hard_start_xmit = bmac_output;
1342	dev->get_stats = bmac_stats;
1343	dev->set_multicast_list = bmac_set_multicast;
1344	dev->set_mac_address = bmac_set_address;
1345
1346	bmac_get_station_address(dev, addr);
1347	if (bmac_verify_checksum(dev) != 0)
1348		goto err_out_iounmap;
1349
1350	bp->is_bmac_plus = is_bmac_plus;
1351	bp->tx_dma = (volatile struct dbdma_regs *)
1352		ioremap(bmac->addrs[1].address, bmac->addrs[1].size);
1353	if (!bp->tx_dma)
1354		goto err_out_iounmap;
1355	bp->tx_dma_intr = bmac->intrs[1].line;
1356	bp->rx_dma = (volatile struct dbdma_regs *)
1357		ioremap(bmac->addrs[2].address, bmac->addrs[2].size);
1358	if (!bp->rx_dma)
1359		goto err_out_iounmap_tx;
1360	bp->rx_dma_intr = bmac->intrs[2].line;
1361
1362	bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1363	bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1364
1365	bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1366	skb_queue_head_init(bp->queue);
1367
1368	memset((char *) bp->tx_cmds, 0,
1369	       (N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
1370	/*     init_timer(&bp->tx_timeout); */
1371	/*     bp->timeout_active = 0; */
1372
1373	ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1374	if (ret) {
1375		printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1376		goto err_out_iounmap_rx;
1377	}
1378	ret = request_irq(bmac->intrs[1].line, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1379	if (ret) {
1380		printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[1].line);
1381		goto err_out_irq0;
1382	}
1383	ret = request_irq(bmac->intrs[2].line, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1384	if (ret) {
1385		printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[2].line);
1386		goto err_out_irq1;
1387	}
1388
1389	/* Mask chip interrupts and disable chip, will be
1390	 * re-enabled on open()
1391	 */
1392	disable_irq(dev->irq);
1393	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1394
1395	bp->next_bmac = bmac_devs;
1396	bmac_devs = dev;
1397	return;
1398
1399err_out_irq1:
1400	free_irq(bmac->intrs[1].line, dev);
1401err_out_irq0:
1402	free_irq(dev->irq, dev);
1403err_out_iounmap_rx:
1404	iounmap((void *)bp->rx_dma);
1405err_out_iounmap_tx:
1406	iounmap((void *)bp->tx_dma);
1407err_out_iounmap:
1408	iounmap((void *)dev->base_addr);
1409err_out:
1410	if (bp->node) {
1411		release_OF_resource(bp->node, 0);
1412		release_OF_resource(bp->node, 1);
1413		release_OF_resource(bp->node, 2);
1414		pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1415	}
1416	unregister_netdev(dev);
1417	kfree(dev);
1418}
1419
1420static int bmac_open(struct net_device *dev)
1421{
1422	struct bmac_data *bp = (struct bmac_data *) dev->priv;
1423	/* XXDEBUG(("bmac: enter open\n")); */
1424	/* reset the chip */
1425	bp->opened = 1;
1426	bmac_reset_and_enable(dev);
1427	enable_irq(dev->irq);
1428	dev->flags |= IFF_RUNNING;
1429	return 0;
1430}
1431
1432static int bmac_close(struct net_device *dev)
1433{
1434	struct bmac_data *bp = (struct bmac_data *) dev->priv;
1435	volatile struct dbdma_regs *rd = bp->rx_dma;
1436	volatile struct dbdma_regs *td = bp->tx_dma;
1437	unsigned short config;
1438	int i;
1439
1440	bp->sleeping = 1;
1441	dev->flags &= ~(IFF_UP | IFF_RUNNING);
1442
1443	/* disable rx and tx */
1444	config = bmread(dev, RXCFG);
1445	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1446
1447	config = bmread(dev, TXCFG);
1448	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1449
1450	bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1451
1452	/* disable rx and tx dma */
1453	st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
1454	st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
1455
1456	/* free some skb's */
1457	XXDEBUG(("bmac: free rx bufs\n"));
1458	for (i=0; i<N_RX_RING; i++) {
1459		if (bp->rx_bufs[i] != NULL) {
1460			dev_kfree_skb(bp->rx_bufs[i]);
1461			bp->rx_bufs[i] = NULL;
1462		}
1463	}
1464	XXDEBUG(("bmac: free tx bufs\n"));
1465	for (i = 0; i<N_TX_RING; i++) {
1466		if (bp->tx_bufs[i] != NULL) {
1467			dev_kfree_skb(bp->tx_bufs[i]);
1468			bp->tx_bufs[i] = NULL;
1469		}
1470	}
1471	XXDEBUG(("bmac: all bufs freed\n"));
1472
1473	bp->opened = 0;
1474	disable_irq(dev->irq);
1475	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1476
1477	return 0;
1478}
1479
1480static void
1481bmac_start(struct net_device *dev)
1482{
1483	struct bmac_data *bp = dev->priv;
1484	int i;
1485	struct sk_buff *skb;
1486	unsigned long flags;
1487
1488	if (bp->sleeping)
1489		return;
1490
1491	save_flags(flags); cli();
1492	while (1) {
1493		i = bp->tx_fill + 1;
1494		if (i >= N_TX_RING)
1495			i = 0;
1496		if (i == bp->tx_empty)
1497			break;
1498		skb = skb_dequeue(bp->queue);
1499		if (skb == NULL)
1500			break;
1501		bmac_transmit_packet(skb, dev);
1502	}
1503	restore_flags(flags);
1504}
1505
1506static int
1507bmac_output(struct sk_buff *skb, struct net_device *dev)
1508{
1509	struct bmac_data *bp = dev->priv;
1510	skb_queue_tail(bp->queue, skb);
1511	bmac_start(dev);
1512	return 0;
1513}
1514
1515static void bmac_tx_timeout(unsigned long data)
1516{
1517	struct net_device *dev = (struct net_device *) data;
1518	struct bmac_data *bp = (struct bmac_data *) dev->priv;
1519	volatile struct dbdma_regs *td = bp->tx_dma;
1520	volatile struct dbdma_regs *rd = bp->rx_dma;
1521	volatile struct dbdma_cmd *cp;
1522	unsigned long flags;
1523	unsigned short config, oldConfig;
1524	int i;
1525
1526	XXDEBUG(("bmac: tx_timeout called\n"));
1527	save_flags(flags); cli();
1528	bp->timeout_active = 0;
1529
1530	/* update various counters */
1531/*     	bmac_handle_misc_intrs(bp, 0); */
1532
1533	cp = &bp->tx_cmds[bp->tx_empty];
1534/*	XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1535/* 	   ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1536/* 	   mb->pr, mb->xmtfs, mb->fifofc)); */
1537
1538	/* turn off both tx and rx and reset the chip */
1539	config = bmread(dev, RXCFG);
1540	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1541	config = bmread(dev, TXCFG);
1542	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1543	out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1544	printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1545	bmac_enable_and_reset_chip(dev);
1546
1547	/* restart rx dma */
1548	cp = bus_to_virt(ld_le32(&rd->cmdptr));
1549	out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1550	out_le16(&cp->xfer_status, 0);
1551	out_le32(&rd->cmdptr, virt_to_bus(cp));
1552	out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1553
1554	/* fix up the transmit side */
1555	XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1556		 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1557	i = bp->tx_empty;
1558	++bp->stats.tx_errors;
1559	if (i != bp->tx_fill) {
1560		dev_kfree_skb(bp->tx_bufs[i]);
1561		bp->tx_bufs[i] = NULL;
1562		if (++i >= N_TX_RING) i = 0;
1563		bp->tx_empty = i;
1564	}
1565	bp->tx_fullup = 0;
1566	netif_wake_queue(dev);
1567	if (i != bp->tx_fill) {
1568		cp = &bp->tx_cmds[i];
1569		out_le16(&cp->xfer_status, 0);
1570		out_le16(&cp->command, OUTPUT_LAST);
1571		out_le32(&td->cmdptr, virt_to_bus(cp));
1572		out_le32(&td->control, DBDMA_SET(RUN));
1573		/* 	bmac_set_timeout(dev); */
1574		XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1575	}
1576
1577	/* turn it back on */
1578	oldConfig = bmread(dev, RXCFG);
1579	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1580	oldConfig = bmread(dev, TXCFG);
1581	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1582
1583	restore_flags(flags);
1584}
1585
1586
1587static int
1588bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1589{
1590	int len = 0;
1591	off_t pos   = 0;
1592	off_t begin = 0;
1593	int i;
1594
1595	if (bmac_devs == NULL)
1596		return (-ENOSYS);
1597
1598	len += sprintf(buffer, "BMAC counters & registers\n");
1599
1600	for (i = 0; i<N_REG_ENTRIES; i++) {
1601		len += sprintf(buffer + len, "%s: %#08x\n",
1602			       reg_entries[i].name,
1603			       bmread(bmac_devs, reg_entries[i].reg_offset));
1604		pos = begin + len;
1605
1606		if (pos < offset) {
1607			len = 0;
1608			begin = pos;
1609		}
1610
1611		if (pos > offset+length) break;
1612	}
1613
1614	*start = buffer + (offset - begin);
1615	len -= (offset - begin);
1616
1617	if (len > length) len = length;
1618
1619	return len;
1620}
1621
1622
1623MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1624MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1625MODULE_LICENSE("GPL");
1626EXPORT_NO_SYMBOLS;
1627
1628static void __exit bmac_cleanup (void)
1629{
1630	struct bmac_data *bp;
1631	struct net_device *dev;
1632
1633	if (bmac_emergency_rxbuf != NULL) {
1634		kfree(bmac_emergency_rxbuf);
1635		bmac_emergency_rxbuf = NULL;
1636	}
1637
1638	if (bmac_devs == 0)
1639		return;
1640#ifdef CONFIG_PMAC_PBOOK
1641	pmu_unregister_sleep_notifier(&bmac_sleep_notifier);
1642#endif
1643	proc_net_remove("bmac");
1644
1645	do {
1646		dev = bmac_devs;
1647		bp = (struct bmac_data *) dev->priv;
1648		bmac_devs = bp->next_bmac;
1649
1650		unregister_netdev(dev);
1651
1652		release_OF_resource(bp->node, 0);
1653		release_OF_resource(bp->node, 1);
1654		release_OF_resource(bp->node, 2);
1655		free_irq(dev->irq, dev);
1656		free_irq(bp->tx_dma_intr, dev);
1657		free_irq(bp->rx_dma_intr, dev);
1658
1659		kfree(dev);
1660	} while (bmac_devs != NULL);
1661}
1662
1663module_init(bmac_probe);
1664module_exit(bmac_cleanup);
1665