1/*
2 *	Driver for the Macintosh 68K onboard MACE controller with PSC
3 *	driven DMA. The MACE driver code is derived from mace.c. The
4 *	Mac68k theory of operation is courtesy of the MacBSD wizards.
5 *
6 *	This program is free software; you can redistribute it and/or
7 *	modify it under the terms of the GNU General Public License
8 *	as published by the Free Software Foundation; either version
9 *	2 of the License, or (at your option) any later version.
10 *
11 *	Copyright (C) 1996 Paul Mackerras.
12 *	Copyright (C) 1998 Alan Cox <alan@redhat.com>
13 *
14 *	Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15 *
16 *	Copyright (C) 2007 Finn Thain
17 *
18 *	Converted to DMA API, converted to unified driver model,
19 *	sync'd some routines with mace.c and fixed various bugs.
20 */
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/crc32.h>
30#include <linux/bitrev.h>
31#include <linux/dma-mapping.h>
32#include <linux/platform_device.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/macintosh.h>
36#include <asm/macints.h>
37#include <asm/mac_psc.h>
38#include <asm/page.h>
39#include "mace.h"
40
41static char mac_mace_string[] = "macmace";
42static struct platform_device *mac_mace_device;
43
44#define N_TX_BUFF_ORDER	0
45#define N_TX_RING	(1 << N_TX_BUFF_ORDER)
46#define N_RX_BUFF_ORDER	3
47#define N_RX_RING	(1 << N_RX_BUFF_ORDER)
48
49#define TX_TIMEOUT	HZ
50
51#define MACE_BUFF_SIZE	0x800
52
53#define BROKEN_ADDRCHG_REV	0x0941
54
55/* The MACE is simply wired down on a Mac68K box */
56
57#define MACE_BASE	(void *)(0x50F1C000)
58#define MACE_PROM	(void *)(0x50F08001)
59
60struct mace_data {
61	volatile struct mace *mace;
62	unsigned char *tx_ring;
63	dma_addr_t tx_ring_phys;
64	unsigned char *rx_ring;
65	dma_addr_t rx_ring_phys;
66	int dma_intr;
67	struct net_device_stats stats;
68	int rx_slot, rx_tail;
69	int tx_slot, tx_sloti, tx_count;
70	int chipid;
71	struct device *device;
72};
73
74struct mace_frame {
75	u8	rcvcnt;
76	u8	pad1;
77	u8	rcvsts;
78	u8	pad2;
79	u8	rntpc;
80	u8	pad3;
81	u8	rcvcc;
82	u8	pad4;
83	u32	pad5;
84	u32	pad6;
85	u8	data[1];
86	/* And frame continues.. */
87};
88
89#define PRIV_BYTES	sizeof(struct mace_data)
90
91static int mace_open(struct net_device *dev);
92static int mace_close(struct net_device *dev);
93static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
94static struct net_device_stats *mace_stats(struct net_device *dev);
95static void mace_set_multicast(struct net_device *dev);
96static int mace_set_address(struct net_device *dev, void *addr);
97static void mace_reset(struct net_device *dev);
98static irqreturn_t mace_interrupt(int irq, void *dev_id);
99static irqreturn_t mace_dma_intr(int irq, void *dev_id);
100static void mace_tx_timeout(struct net_device *dev);
101static void __mace_set_address(struct net_device *dev, void *addr);
102
103/*
104 * Load a receive DMA channel with a base address and ring length
105 */
106
107static void mace_load_rxdma_base(struct net_device *dev, int set)
108{
109	struct mace_data *mp = netdev_priv(dev);
110
111	psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
112	psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
113	psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
114	psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
115	mp->rx_tail = 0;
116}
117
118/*
119 * Reset the receive DMA subsystem
120 */
121
122static void mace_rxdma_reset(struct net_device *dev)
123{
124	struct mace_data *mp = netdev_priv(dev);
125	volatile struct mace *mace = mp->mace;
126	u8 maccc = mace->maccc;
127
128	mace->maccc = maccc & ~ENRCV;
129
130	psc_write_word(PSC_ENETRD_CTL, 0x8800);
131	mace_load_rxdma_base(dev, 0x00);
132	psc_write_word(PSC_ENETRD_CTL, 0x0400);
133
134	psc_write_word(PSC_ENETRD_CTL, 0x8800);
135	mace_load_rxdma_base(dev, 0x10);
136	psc_write_word(PSC_ENETRD_CTL, 0x0400);
137
138	mace->maccc = maccc;
139	mp->rx_slot = 0;
140
141	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
142	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
143}
144
145/*
146 * Reset the transmit DMA subsystem
147 */
148
149static void mace_txdma_reset(struct net_device *dev)
150{
151	struct mace_data *mp = netdev_priv(dev);
152	volatile struct mace *mace = mp->mace;
153	u8 maccc;
154
155	psc_write_word(PSC_ENETWR_CTL, 0x8800);
156
157	maccc = mace->maccc;
158	mace->maccc = maccc & ~ENXMT;
159
160	mp->tx_slot = mp->tx_sloti = 0;
161	mp->tx_count = N_TX_RING;
162
163	psc_write_word(PSC_ENETWR_CTL, 0x0400);
164	mace->maccc = maccc;
165}
166
167/*
168 * Disable DMA
169 */
170
171static void mace_dma_off(struct net_device *dev)
172{
173	psc_write_word(PSC_ENETRD_CTL, 0x8800);
174	psc_write_word(PSC_ENETRD_CTL, 0x1000);
175	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
176	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
177
178	psc_write_word(PSC_ENETWR_CTL, 0x8800);
179	psc_write_word(PSC_ENETWR_CTL, 0x1000);
180	psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
181	psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
182}
183
184/*
185 * Not really much of a probe. The hardware table tells us if this
186 * model of Macintrash has a MACE (AV macintoshes)
187 */
188
189static int __devinit mace_probe(struct platform_device *pdev)
190{
191	int j;
192	struct mace_data *mp;
193	unsigned char *addr;
194	struct net_device *dev;
195	unsigned char checksum = 0;
196	static int found = 0;
197	int err;
198
199	if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
200		return -ENODEV;
201
202	found = 1;	/* prevent 'finding' one on every device probe */
203
204	dev = alloc_etherdev(PRIV_BYTES);
205	if (!dev)
206		return -ENOMEM;
207
208	mp = netdev_priv(dev);
209
210	mp->device = &pdev->dev;
211	SET_NETDEV_DEV(dev, &pdev->dev);
212 	SET_MODULE_OWNER(dev);
213
214	dev->base_addr = (u32)MACE_BASE;
215	mp->mace = (volatile struct mace *) MACE_BASE;
216
217	dev->irq = IRQ_MAC_MACE;
218	mp->dma_intr = IRQ_MAC_MACE_DMA;
219
220	mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
221
222	/*
223	 * The PROM contains 8 bytes which total 0xFF when XOR'd
224	 * together. Due to the usual peculiar apple brain damage
225	 * the bytes are spaced out in a strange boundary and the
226	 * bits are reversed.
227	 */
228
229	addr = (void *)MACE_PROM;
230
231	for (j = 0; j < 6; ++j) {
232		u8 v = bitrev8(addr[j<<4]);
233		checksum ^= v;
234		dev->dev_addr[j] = v;
235	}
236	for (; j < 8; ++j) {
237		checksum ^= bitrev8(addr[j<<4]);
238	}
239
240	if (checksum != 0xFF) {
241		free_netdev(dev);
242		return -ENODEV;
243	}
244
245	memset(&mp->stats, 0, sizeof(mp->stats));
246
247	dev->open		= mace_open;
248	dev->stop		= mace_close;
249	dev->hard_start_xmit	= mace_xmit_start;
250	dev->tx_timeout		= mace_tx_timeout;
251	dev->watchdog_timeo	= TX_TIMEOUT;
252	dev->get_stats		= mace_stats;
253	dev->set_multicast_list	= mace_set_multicast;
254	dev->set_mac_address	= mace_set_address;
255
256	printk(KERN_INFO "%s: 68K MACE, hardware address %.2X", dev->name, dev->dev_addr[0]);
257	for (j = 1 ; j < 6 ; j++) printk(":%.2X", dev->dev_addr[j]);
258	printk("\n");
259
260	err = register_netdev(dev);
261	if (!err)
262		return 0;
263
264	free_netdev(dev);
265	return err;
266}
267
268/*
269 * Reset the chip.
270 */
271
272static void mace_reset(struct net_device *dev)
273{
274	struct mace_data *mp = netdev_priv(dev);
275	volatile struct mace *mb = mp->mace;
276	int i;
277
278	/* soft-reset the chip */
279	i = 200;
280	while (--i) {
281		mb->biucc = SWRST;
282		if (mb->biucc & SWRST) {
283			udelay(10);
284			continue;
285		}
286		break;
287	}
288	if (!i) {
289		printk(KERN_ERR "macmace: cannot reset chip!\n");
290		return;
291	}
292
293	mb->maccc = 0;	/* turn off tx, rx */
294	mb->imr = 0xFF;	/* disable all intrs for now */
295	i = mb->ir;
296
297	mb->biucc = XMTSP_64;
298	mb->utr = RTRD;
299	mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
300
301	mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
302	mb->rcvfc = 0;
303
304	/* load up the hardware address */
305	__mace_set_address(dev, dev->dev_addr);
306
307	/* clear the multicast filter */
308	if (mp->chipid == BROKEN_ADDRCHG_REV)
309		mb->iac = LOGADDR;
310	else {
311		mb->iac = ADDRCHG | LOGADDR;
312		while ((mb->iac & ADDRCHG) != 0)
313			;
314	}
315	for (i = 0; i < 8; ++i)
316		mb->ladrf = 0;
317
318	/* done changing address */
319	if (mp->chipid != BROKEN_ADDRCHG_REV)
320		mb->iac = 0;
321
322	mb->plscc = PORTSEL_AUI;
323}
324
325/*
326 * Load the address on a mace controller.
327 */
328
329static void __mace_set_address(struct net_device *dev, void *addr)
330{
331	struct mace_data *mp = netdev_priv(dev);
332	volatile struct mace *mb = mp->mace;
333	unsigned char *p = addr;
334	int i;
335
336	/* load up the hardware address */
337	if (mp->chipid == BROKEN_ADDRCHG_REV)
338		mb->iac = PHYADDR;
339	else {
340		mb->iac = ADDRCHG | PHYADDR;
341		while ((mb->iac & ADDRCHG) != 0)
342			;
343	}
344	for (i = 0; i < 6; ++i)
345		mb->padr = dev->dev_addr[i] = p[i];
346	if (mp->chipid != BROKEN_ADDRCHG_REV)
347		mb->iac = 0;
348}
349
350static int mace_set_address(struct net_device *dev, void *addr)
351{
352	struct mace_data *mp = netdev_priv(dev);
353	volatile struct mace *mb = mp->mace;
354	unsigned long flags;
355	u8 maccc;
356
357	local_irq_save(flags);
358
359	maccc = mb->maccc;
360
361	__mace_set_address(dev, addr);
362
363	mb->maccc = maccc;
364
365	local_irq_restore(flags);
366
367	return 0;
368}
369
370/*
371 * Open the Macintosh MACE. Most of this is playing with the DMA
372 * engine. The ethernet chip is quite friendly.
373 */
374
375static int mace_open(struct net_device *dev)
376{
377	struct mace_data *mp = netdev_priv(dev);
378	volatile struct mace *mb = mp->mace;
379
380	/* reset the chip */
381	mace_reset(dev);
382
383	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
384		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
385		return -EAGAIN;
386	}
387	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
388		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
389		free_irq(dev->irq, dev);
390		return -EAGAIN;
391	}
392
393	/* Allocate the DMA ring buffers */
394
395	mp->tx_ring = dma_alloc_coherent(mp->device,
396			N_TX_RING * MACE_BUFF_SIZE,
397			&mp->tx_ring_phys, GFP_KERNEL);
398	if (mp->tx_ring == NULL) {
399		printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
400		goto out1;
401	}
402
403	mp->rx_ring = dma_alloc_coherent(mp->device,
404			N_RX_RING * MACE_BUFF_SIZE,
405			&mp->rx_ring_phys, GFP_KERNEL);
406	if (mp->rx_ring == NULL) {
407		printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
408		goto out2;
409	}
410
411	mace_dma_off(dev);
412
413	/* Not sure what these do */
414
415	psc_write_word(PSC_ENETWR_CTL, 0x9000);
416	psc_write_word(PSC_ENETRD_CTL, 0x9000);
417	psc_write_word(PSC_ENETWR_CTL, 0x0400);
418	psc_write_word(PSC_ENETRD_CTL, 0x0400);
419
420	mace_rxdma_reset(dev);
421	mace_txdma_reset(dev);
422
423	/* turn it on! */
424	mb->maccc = ENXMT | ENRCV;
425	/* enable all interrupts except receive interrupts */
426	mb->imr = RCVINT;
427	return 0;
428
429out2:
430	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
431	                  mp->tx_ring, mp->tx_ring_phys);
432out1:
433	free_irq(dev->irq, dev);
434	free_irq(mp->dma_intr, dev);
435	return -ENOMEM;
436}
437
438/*
439 * Shut down the mace and its interrupt channel
440 */
441
442static int mace_close(struct net_device *dev)
443{
444	struct mace_data *mp = netdev_priv(dev);
445	volatile struct mace *mb = mp->mace;
446
447	mb->maccc = 0;		/* disable rx and tx	 */
448	mb->imr = 0xFF;		/* disable all irqs	 */
449	mace_dma_off(dev);	/* disable rx and tx dma */
450
451	return 0;
452}
453
454/*
455 * Transmit a frame
456 */
457
458static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
459{
460	struct mace_data *mp = netdev_priv(dev);
461	unsigned long flags;
462
463	/* Stop the queue since there's only the one buffer */
464
465	local_irq_save(flags);
466	netif_stop_queue(dev);
467	if (!mp->tx_count) {
468		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
469		local_irq_restore(flags);
470		return NETDEV_TX_BUSY;
471	}
472	mp->tx_count--;
473	local_irq_restore(flags);
474
475	mp->stats.tx_packets++;
476	mp->stats.tx_bytes += skb->len;
477
478	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
479	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
480
481	/* load the Tx DMA and fire it off */
482
483	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
484	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
485	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
486
487	mp->tx_slot ^= 0x10;
488
489	dev_kfree_skb(skb);
490
491	dev->trans_start = jiffies;
492	return NETDEV_TX_OK;
493}
494
495static struct net_device_stats *mace_stats(struct net_device *dev)
496{
497	struct mace_data *mp = netdev_priv(dev);
498	return &mp->stats;
499}
500
501static void mace_set_multicast(struct net_device *dev)
502{
503	struct mace_data *mp = netdev_priv(dev);
504	volatile struct mace *mb = mp->mace;
505	int i, j;
506	u32 crc;
507	u8 maccc;
508	unsigned long flags;
509
510	local_irq_save(flags);
511	maccc = mb->maccc;
512	mb->maccc &= ~PROM;
513
514	if (dev->flags & IFF_PROMISC) {
515		mb->maccc |= PROM;
516	} else {
517		unsigned char multicast_filter[8];
518		struct dev_mc_list *dmi = dev->mc_list;
519
520		if (dev->flags & IFF_ALLMULTI) {
521			for (i = 0; i < 8; i++) {
522				multicast_filter[i] = 0xFF;
523			}
524		} else {
525			for (i = 0; i < 8; i++)
526				multicast_filter[i] = 0;
527			for (i = 0; i < dev->mc_count; i++) {
528				crc = ether_crc_le(6, dmi->dmi_addr);
529				j = crc >> 26;	/* bit number in multicast_filter */
530				multicast_filter[j >> 3] |= 1 << (j & 7);
531				dmi = dmi->next;
532			}
533		}
534
535		if (mp->chipid == BROKEN_ADDRCHG_REV)
536			mb->iac = LOGADDR;
537		else {
538			mb->iac = ADDRCHG | LOGADDR;
539			while ((mb->iac & ADDRCHG) != 0)
540				;
541		}
542		for (i = 0; i < 8; ++i)
543			mb->ladrf = multicast_filter[i];
544		if (mp->chipid != BROKEN_ADDRCHG_REV)
545			mb->iac = 0;
546	}
547
548	mb->maccc = maccc;
549	local_irq_restore(flags);
550}
551
552static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
553{
554	volatile struct mace *mb = mp->mace;
555	static int mace_babbles, mace_jabbers;
556
557	if (intr & MPCO)
558		mp->stats.rx_missed_errors += 256;
559	mp->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
560	if (intr & RNTPCO)
561		mp->stats.rx_length_errors += 256;
562	mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */
563	if (intr & CERR)
564		++mp->stats.tx_heartbeat_errors;
565	if (intr & BABBLE)
566		if (mace_babbles++ < 4)
567			printk(KERN_DEBUG "macmace: babbling transmitter\n");
568	if (intr & JABBER)
569		if (mace_jabbers++ < 4)
570			printk(KERN_DEBUG "macmace: jabbering transceiver\n");
571}
572
573static irqreturn_t mace_interrupt(int irq, void *dev_id)
574{
575	struct net_device *dev = (struct net_device *) dev_id;
576	struct mace_data *mp = netdev_priv(dev);
577	volatile struct mace *mb = mp->mace;
578	int intr, fs;
579	unsigned int flags;
580
581	/* don't want the dma interrupt handler to fire */
582	local_irq_save(flags);
583
584	intr = mb->ir; /* read interrupt register */
585	mace_handle_misc_intrs(mp, intr);
586
587	if (intr & XMTINT) {
588		fs = mb->xmtfs;
589		if ((fs & XMTSV) == 0) {
590			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
591			mace_reset(dev);
592		}
593		/* dma should have finished */
594		if (!mp->tx_count) {
595			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
596		}
597		/* Update stats */
598		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
599			++mp->stats.tx_errors;
600			if (fs & LCAR)
601				++mp->stats.tx_carrier_errors;
602			else if (fs & (UFLO|LCOL|RTRY)) {
603				++mp->stats.tx_aborted_errors;
604				if (mb->xmtfs & UFLO) {
605					printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
606					mp->stats.tx_fifo_errors++;
607					mace_txdma_reset(dev);
608				}
609			}
610		}
611	}
612
613	if (mp->tx_count)
614		netif_wake_queue(dev);
615
616	local_irq_restore(flags);
617
618	return IRQ_HANDLED;
619}
620
621static void mace_tx_timeout(struct net_device *dev)
622{
623	struct mace_data *mp = netdev_priv(dev);
624	volatile struct mace *mb = mp->mace;
625	unsigned long flags;
626
627	local_irq_save(flags);
628
629	/* turn off both tx and rx and reset the chip */
630	mb->maccc = 0;
631	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
632	mace_txdma_reset(dev);
633	mace_reset(dev);
634
635	/* restart rx dma */
636	mace_rxdma_reset(dev);
637
638	mp->tx_count = N_TX_RING;
639	netif_wake_queue(dev);
640
641	/* turn it on! */
642	mb->maccc = ENXMT | ENRCV;
643	/* enable all interrupts except receive interrupts */
644	mb->imr = RCVINT;
645
646	local_irq_restore(flags);
647}
648
649/*
650 * Handle a newly arrived frame
651 */
652
653static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
654{
655	struct mace_data *mp = netdev_priv(dev);
656	struct sk_buff *skb;
657	unsigned int frame_status = mf->rcvsts;
658
659	if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
660		mp->stats.rx_errors++;
661		if (frame_status & RS_OFLO) {
662			printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
663			mp->stats.rx_fifo_errors++;
664		}
665		if (frame_status & RS_CLSN)
666			mp->stats.collisions++;
667		if (frame_status & RS_FRAMERR)
668			mp->stats.rx_frame_errors++;
669		if (frame_status & RS_FCSERR)
670			mp->stats.rx_crc_errors++;
671	} else {
672		unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
673
674		skb = dev_alloc_skb(frame_length + 2);
675		if (!skb) {
676			mp->stats.rx_dropped++;
677			return;
678		}
679		skb_reserve(skb, 2);
680		memcpy(skb_put(skb, frame_length), mf->data, frame_length);
681
682		skb->protocol = eth_type_trans(skb, dev);
683		netif_rx(skb);
684		dev->last_rx = jiffies;
685		mp->stats.rx_packets++;
686		mp->stats.rx_bytes += frame_length;
687	}
688}
689
690/*
691 * The PSC has passed us a DMA interrupt event.
692 */
693
694static irqreturn_t mace_dma_intr(int irq, void *dev_id)
695{
696	struct net_device *dev = (struct net_device *) dev_id;
697	struct mace_data *mp = netdev_priv(dev);
698	int left, head;
699	u16 status;
700	u32 baka;
701
702	/* Not sure what this does */
703
704	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
705	if (!(baka & 0x60000000)) return IRQ_NONE;
706
707	/*
708	 * Process the read queue
709	 */
710
711	status = psc_read_word(PSC_ENETRD_CTL);
712
713	if (status & 0x2000) {
714		mace_rxdma_reset(dev);
715	} else if (status & 0x0100) {
716		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
717
718		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
719		head = N_RX_RING - left;
720
721		/* Loop through the ring buffer and process new packages */
722
723		while (mp->rx_tail < head) {
724			mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
725				+ (mp->rx_tail * MACE_BUFF_SIZE)));
726			mp->rx_tail++;
727		}
728
729		/* If we're out of buffers in this ring then switch to */
730		/* the other set, otherwise just reactivate this one.  */
731
732		if (!left) {
733			mace_load_rxdma_base(dev, mp->rx_slot);
734			mp->rx_slot ^= 0x10;
735		} else {
736			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
737		}
738	}
739
740	/*
741	 * Process the write queue
742	 */
743
744	status = psc_read_word(PSC_ENETWR_CTL);
745
746	if (status & 0x2000) {
747		mace_txdma_reset(dev);
748	} else if (status & 0x0100) {
749		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
750		mp->tx_sloti ^= 0x10;
751		mp->tx_count++;
752	}
753	return IRQ_HANDLED;
754}
755
756MODULE_LICENSE("GPL");
757MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
758
759static int __devexit mac_mace_device_remove (struct platform_device *pdev)
760{
761	struct net_device *dev = platform_get_drvdata(pdev);
762	struct mace_data *mp = netdev_priv(dev);
763
764	unregister_netdev(dev);
765
766	free_irq(dev->irq, dev);
767	free_irq(IRQ_MAC_MACE_DMA, dev);
768
769	dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
770	                  mp->rx_ring, mp->rx_ring_phys);
771	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
772	                  mp->tx_ring, mp->tx_ring_phys);
773
774	free_netdev(dev);
775
776	return 0;
777}
778
779static struct platform_driver mac_mace_driver = {
780	.probe  = mace_probe,
781	.remove = __devexit_p(mac_mace_device_remove),
782	.driver	= {
783		.name = mac_mace_string,
784	},
785};
786
787static int __init mac_mace_init_module(void)
788{
789	int err;
790
791	if ((err = platform_driver_register(&mac_mace_driver))) {
792		printk(KERN_ERR "Driver registration failed\n");
793		return err;
794	}
795
796	mac_mace_device = platform_device_alloc(mac_mace_string, 0);
797	if (!mac_mace_device)
798		goto out_unregister;
799
800	if (platform_device_add(mac_mace_device)) {
801		platform_device_put(mac_mace_device);
802		mac_mace_device = NULL;
803	}
804
805	return 0;
806
807out_unregister:
808	platform_driver_unregister(&mac_mace_driver);
809
810	return -ENOMEM;
811}
812
813static void __exit mac_mace_cleanup_module(void)
814{
815	platform_driver_unregister(&mac_mace_driver);
816
817	if (mac_mace_device) {
818		platform_device_unregister(mac_mace_device);
819		mac_mace_device = NULL;
820	}
821}
822
823module_init(mac_mace_init_module);
824module_exit(mac_mace_cleanup_module);
825