1/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
2 *
3 *	(c) Copyright 1998 Red Hat Software Inc
4 *	Written by Alan Cox.
5 *	Further debugging by Carl Drougge.
6 *      Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 *      Heavily modified by Richard Procter <rnp@paradise.net.nz>
8 *
9 *	Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 *	(for the MCA stuff) written by Wim Dumon.
11 *
12 *	Thanks to 3Com for making this possible by providing me with the
13 *	documentation.
14 *
15 *	This software may be used and distributed according to the terms
16 *	of the GNU General Public License, incorporated herein by reference.
17 *
18 */
19
20#define DRV_NAME		"3c527"
21#define DRV_VERSION		"0.7-SMP"
22#define DRV_RELDATE		"2003/09/21"
23
24static const char *version =
25DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
26
27/**
28 * DOC: Traps for the unwary
29 *
30 *	The diagram (Figure 1-1) and the POS summary disagree with the
31 *	"Interrupt Level" section in the manual.
32 *
33 *	The manual contradicts itself when describing the minimum number
34 *	buffers in the 'configure lists' command.
35 *	My card accepts a buffer config of 4/4.
36 *
37 *	Setting the SAV BP bit does not save bad packets, but
38 *	only enables RX on-card stats collection.
39 *
40 *	The documentation in places seems to miss things. In actual fact
41 *	I've always eventually found everything is documented, it just
42 *	requires careful study.
43 *
44 * DOC: Theory Of Operation
45 *
46 *	The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 *	amount of on board intelligence that housekeeps a somewhat dumber
48 *	Intel NIC. For performance we want to keep the transmit queue deep
49 *	as the card can transmit packets while fetching others from main
50 *	memory by bus master DMA. Transmission and reception are driven by
51 *	circular buffer queues.
52 *
53 *	The mailboxes can be used for controlling how the card traverses
54 *	its buffer rings, but are used only for inital setup in this
55 *	implementation.  The exec mailbox allows a variety of commands to
56 *	be executed. Each command must complete before the next is
57 *	executed. Primarily we use the exec mailbox for controlling the
58 *	multicast lists.  We have to do a certain amount of interesting
59 *	hoop jumping as the multicast list changes can occur in interrupt
60 *	state when the card has an exec command pending. We defer such
61 *	events until the command completion interrupt.
62 *
63 *	A copy break scheme (taken from 3c59x.c) is employed whereby
64 *	received frames exceeding a configurable length are passed
65 *	directly to the higher networking layers without incuring a copy,
66 *	in what amounts to a time/space trade-off.
67 *
68 *	The card also keeps a large amount of statistical information
69 *	on-board. In a perfect world, these could be used safely at no
70 *	cost. However, lacking information to the contrary, processing
71 *	them without races would involve so much extra complexity as to
72 *	make it unworthwhile to do so. In the end, a hybrid SW/HW
73 *	implementation was made necessary --- see mc32_update_stats().
74 *
75 * DOC: Notes
76 *
77 *	It should be possible to use two or more cards, but at this stage
78 *	only by loading two copies of the same module.
79 *
80 *	The on-board 82586 NIC has trouble receiving multiple
81 *	back-to-back frames and so is likely to drop packets from fast
82 *	senders.
83**/
84
85#include <linux/module.h>
86
87#include <linux/errno.h>
88#include <linux/netdevice.h>
89#include <linux/etherdevice.h>
90#include <linux/if_ether.h>
91#include <linux/init.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/mca-legacy.h>
97#include <linux/ioport.h>
98#include <linux/in.h>
99#include <linux/skbuff.h>
100#include <linux/slab.h>
101#include <linux/string.h>
102#include <linux/wait.h>
103#include <linux/ethtool.h>
104#include <linux/completion.h>
105#include <linux/bitops.h>
106
107#include <asm/semaphore.h>
108#include <asm/uaccess.h>
109#include <asm/system.h>
110#include <asm/io.h>
111#include <asm/dma.h>
112
113#include "3c527.h"
114
115MODULE_LICENSE("GPL");
116
117/*
118 * The name of the card. Is used for messages and in the requests for
119 * io regions, irqs and dma channels
120 */
121static const char* cardname = DRV_NAME;
122
123/* use 0 for production, 1 for verification, >2 for debug */
124#ifndef NET_DEBUG
125#define NET_DEBUG 2
126#endif
127
128#undef DEBUG_IRQ
129
130static unsigned int mc32_debug = NET_DEBUG;
131
132/* The number of low I/O ports used by the ethercard. */
133#define MC32_IO_EXTENT	8
134
135/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
136#define TX_RING_LEN     32       /* Typically the card supports 37  */
137#define RX_RING_LEN     8        /*     "       "        "          */
138
139/* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature.	*/
141#define RX_COPYBREAK    200      /* Value from 3c59x.c */
142
143static const int WORKAROUND_82586=1;
144
145/* Pointers to buffers and their on-card records */
146struct mc32_ring_desc
147{
148	volatile struct skb_header *p;
149	struct sk_buff *skb;
150};
151
152/* Information that needs to be kept for each board. */
153struct mc32_local
154{
155	int slot;
156
157	u32 base;
158	struct net_device_stats net_stats;
159	volatile struct mc32_mailbox *rx_box;
160	volatile struct mc32_mailbox *tx_box;
161	volatile struct mc32_mailbox *exec_box;
162        volatile struct mc32_stats *stats;    /* Start of on-card statistics */
163        u16 tx_chain;           /* Transmit list start offset */
164	u16 rx_chain;           /* Receive list start offset */
165        u16 tx_len;             /* Transmit list count */
166        u16 rx_len;             /* Receive list count */
167
168	u16 xceiver_desired_state; /* HALTED or RUNNING */
169	u16 cmd_nonblocking;    /* Thread is uninterested in command result */
170	u16 mc_reload_wait;	/* A multicast load request is pending */
171	u32 mc_list_valid;	/* True when the mclist is set */
172
173	struct mc32_ring_desc tx_ring[TX_RING_LEN];	/* Host Transmit ring */
174	struct mc32_ring_desc rx_ring[RX_RING_LEN];	/* Host Receive ring */
175
176	atomic_t tx_count;	/* buffers left */
177	atomic_t tx_ring_head;  /* index to tx en-queue end */
178	u16 tx_ring_tail;       /* index to tx de-queue end */
179
180	u16 rx_ring_tail;       /* index to rx de-queue end */
181
182	struct semaphore cmd_mutex;    /* Serialises issuing of execute commands */
183        struct completion execution_cmd; /* Card has completed an execute command */
184	struct completion xceiver_cmd;   /* Card has completed a tx or rx command */
185};
186
187/* The station (ethernet) address prefix, used for a sanity check. */
188#define SA_ADDR0 0x02
189#define SA_ADDR1 0x60
190#define SA_ADDR2 0xAC
191
192struct mca_adapters_t {
193	unsigned int	id;
194	char		*name;
195};
196
197static const struct mca_adapters_t mc32_adapters[] = {
198	{ 0x0041, "3COM EtherLink MC/32" },
199	{ 0x8EF5, "IBM High Performance Lan Adapter" },
200	{ 0x0000, NULL }
201};
202
203
204/* Macros for ring index manipulations */
205static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
206static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
207
208static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
209
210
211/* Index to functions, as function prototypes. */
212static int	mc32_probe1(struct net_device *dev, int ioaddr);
213static int      mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
214static int	mc32_open(struct net_device *dev);
215static void	mc32_timeout(struct net_device *dev);
216static int	mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
217static irqreturn_t mc32_interrupt(int irq, void *dev_id);
218static int	mc32_close(struct net_device *dev);
219static struct	net_device_stats *mc32_get_stats(struct net_device *dev);
220static void	mc32_set_multicast_list(struct net_device *dev);
221static void	mc32_reset_multicast_list(struct net_device *dev);
222static const struct ethtool_ops netdev_ethtool_ops;
223
224static void cleanup_card(struct net_device *dev)
225{
226	struct mc32_local *lp = netdev_priv(dev);
227	unsigned slot = lp->slot;
228	mca_mark_as_unused(slot);
229	mca_set_adapter_name(slot, NULL);
230	free_irq(dev->irq, dev);
231	release_region(dev->base_addr, MC32_IO_EXTENT);
232}
233
234/**
235 * mc32_probe 	-	Search for supported boards
236 * @unit: interface number to use
237 *
238 * Because MCA bus is a real bus and we can scan for cards we could do a
239 * single scan for all boards here. Right now we use the passed in device
240 * structure and scan for only one board. This needs fixing for modules
241 * in particular.
242 */
243
244struct net_device *__init mc32_probe(int unit)
245{
246	struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
247	static int current_mca_slot = -1;
248	int i;
249	int err;
250
251	if (!dev)
252		return ERR_PTR(-ENOMEM);
253
254	if (unit >= 0)
255		sprintf(dev->name, "eth%d", unit);
256
257	SET_MODULE_OWNER(dev);
258
259	/* Do not check any supplied i/o locations.
260	   POS registers usually don't fail :) */
261
262	/* MCA cards have POS registers.
263	   Autodetecting MCA cards is extremely simple.
264	   Just search for the card. */
265
266	for(i = 0; (mc32_adapters[i].name != NULL); i++) {
267		current_mca_slot =
268			mca_find_unused_adapter(mc32_adapters[i].id, 0);
269
270		if(current_mca_slot != MCA_NOTFOUND) {
271			if(!mc32_probe1(dev, current_mca_slot))
272			{
273				mca_set_adapter_name(current_mca_slot,
274						mc32_adapters[i].name);
275				mca_mark_as_used(current_mca_slot);
276				err = register_netdev(dev);
277				if (err) {
278					cleanup_card(dev);
279					free_netdev(dev);
280					dev = ERR_PTR(err);
281				}
282				return dev;
283			}
284
285		}
286	}
287	free_netdev(dev);
288	return ERR_PTR(-ENODEV);
289}
290
291/**
292 * mc32_probe1	-	Check a given slot for a board and test the card
293 * @dev:  Device structure to fill in
294 * @slot: The MCA bus slot being used by this card
295 *
296 * Decode the slot data and configure the card structures. Having done this we
297 * can reset the card and configure it. The card does a full self test cycle
298 * in firmware so we have to wait for it to return and post us either a
299 * failure case or some addresses we use to find the board internals.
300 */
301
302static int __init mc32_probe1(struct net_device *dev, int slot)
303{
304	static unsigned version_printed;
305	int i, err;
306	u8 POS;
307	u32 base;
308	struct mc32_local *lp = netdev_priv(dev);
309	static u16 mca_io_bases[]={
310		0x7280,0x7290,
311		0x7680,0x7690,
312		0x7A80,0x7A90,
313		0x7E80,0x7E90
314	};
315	static u32 mca_mem_bases[]={
316		0x00C0000,
317		0x00C4000,
318		0x00C8000,
319		0x00CC000,
320		0x00D0000,
321		0x00D4000,
322		0x00D8000,
323		0x00DC000
324	};
325	static char *failures[]={
326		"Processor instruction",
327		"Processor data bus",
328		"Processor data bus",
329		"Processor data bus",
330		"Adapter bus",
331		"ROM checksum",
332		"Base RAM",
333		"Extended RAM",
334		"82586 internal loopback",
335		"82586 initialisation failure",
336		"Adapter list configuration error"
337	};
338
339	/* Time to play MCA games */
340
341	if (mc32_debug  &&  version_printed++ == 0)
342		printk(KERN_DEBUG "%s", version);
343
344	printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
345
346	POS = mca_read_stored_pos(slot, 2);
347
348	if(!(POS&1))
349	{
350		printk(" disabled.\n");
351		return -ENODEV;
352	}
353
354	/* Fill in the 'dev' fields. */
355	dev->base_addr = mca_io_bases[(POS>>1)&7];
356	dev->mem_start = mca_mem_bases[(POS>>4)&7];
357
358	POS = mca_read_stored_pos(slot, 4);
359	if(!(POS&1))
360	{
361		printk("memory window disabled.\n");
362		return -ENODEV;
363	}
364
365	POS = mca_read_stored_pos(slot, 5);
366
367	i=(POS>>4)&3;
368	if(i==3)
369	{
370		printk("invalid memory window.\n");
371		return -ENODEV;
372	}
373
374	i*=16384;
375	i+=16384;
376
377	dev->mem_end=dev->mem_start + i;
378
379	dev->irq = ((POS>>2)&3)+9;
380
381	if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
382	{
383		printk("io 0x%3lX, which is busy.\n", dev->base_addr);
384		return -EBUSY;
385	}
386
387	printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
388		dev->base_addr, dev->irq, dev->mem_start, i/1024);
389
390
391	/* We ought to set the cache line size here.. */
392
393
394	/*
395	 *	Go PROM browsing
396	 */
397
398	printk("%s: Address ", dev->name);
399
400	/* Retrieve and print the ethernet address. */
401	for (i = 0; i < 6; i++)
402	{
403		mca_write_pos(slot, 6, i+12);
404		mca_write_pos(slot, 7, 0);
405
406		printk(" %2.2x", dev->dev_addr[i] = mca_read_pos(slot,3));
407	}
408
409	mca_write_pos(slot, 6, 0);
410	mca_write_pos(slot, 7, 0);
411
412	POS = mca_read_stored_pos(slot, 4);
413
414	if(POS&2)
415		printk(" : BNC port selected.\n");
416	else
417		printk(" : AUI port selected.\n");
418
419	POS=inb(dev->base_addr+HOST_CTRL);
420	POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
421	POS&=~HOST_CTRL_INTE;
422	outb(POS, dev->base_addr+HOST_CTRL);
423	/* Reset adapter */
424	udelay(100);
425	/* Reset off */
426	POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
427	outb(POS, dev->base_addr+HOST_CTRL);
428
429	udelay(300);
430
431	/*
432	 *	Grab the IRQ
433	 */
434
435	err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
436	if (err) {
437		release_region(dev->base_addr, MC32_IO_EXTENT);
438		printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
439		goto err_exit_ports;
440	}
441
442	memset(lp, 0, sizeof(struct mc32_local));
443	lp->slot = slot;
444
445	i=0;
446
447	base = inb(dev->base_addr);
448
449	while(base == 0xFF)
450	{
451		i++;
452		if(i == 1000)
453		{
454			printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
455			err = -ENODEV;
456			goto err_exit_irq;
457		}
458		udelay(1000);
459		if(inb(dev->base_addr+2)&(1<<5))
460			base = inb(dev->base_addr);
461	}
462
463	if(base>0)
464	{
465		if(base < 0x0C)
466			printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
467				base<0x0A?" test failure":"");
468		else
469			printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
470		err = -ENODEV;
471		goto err_exit_irq;
472	}
473
474	base=0;
475	for(i=0;i<4;i++)
476	{
477		int n=0;
478
479		while(!(inb(dev->base_addr+2)&(1<<5)))
480		{
481			n++;
482			udelay(50);
483			if(n>100)
484			{
485				printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
486				err = -ENODEV;
487				goto err_exit_irq;
488			}
489		}
490
491		base|=(inb(dev->base_addr)<<(8*i));
492	}
493
494	lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
495
496	base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
497
498	lp->base = dev->mem_start+base;
499
500	lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
501	lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
502
503	lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
504
505	/*
506	 *	Descriptor chains (card relative)
507	 */
508
509	lp->tx_chain 		= lp->exec_box->data[8];   /* Transmit list start offset */
510	lp->rx_chain 		= lp->exec_box->data[10];  /* Receive list start offset */
511	lp->tx_len 		= lp->exec_box->data[9];   /* Transmit list count */
512	lp->rx_len 		= lp->exec_box->data[11];  /* Receive list count */
513
514	init_MUTEX_LOCKED(&lp->cmd_mutex);
515	init_completion(&lp->execution_cmd);
516	init_completion(&lp->xceiver_cmd);
517
518	printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
519		dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
520
521	dev->open		= mc32_open;
522	dev->stop		= mc32_close;
523	dev->hard_start_xmit	= mc32_send_packet;
524	dev->get_stats		= mc32_get_stats;
525	dev->set_multicast_list = mc32_set_multicast_list;
526	dev->tx_timeout		= mc32_timeout;
527	dev->watchdog_timeo	= HZ*5;	/* Board does all the work */
528	dev->ethtool_ops	= &netdev_ethtool_ops;
529
530	return 0;
531
532err_exit_irq:
533	free_irq(dev->irq, dev);
534err_exit_ports:
535	release_region(dev->base_addr, MC32_IO_EXTENT);
536	return err;
537}
538
539
540/**
541 *	mc32_ready_poll		-	wait until we can feed it a command
542 *	@dev:	The device to wait for
543 *
544 *	Wait until the card becomes ready to accept a command via the
545 *	command register. This tells us nothing about the completion
546 *	status of any pending commands and takes very little time at all.
547 */
548
549static inline void mc32_ready_poll(struct net_device *dev)
550{
551	int ioaddr = dev->base_addr;
552	while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
553}
554
555
556/**
557 *	mc32_command_nowait	-	send a command non blocking
558 *	@dev: The 3c527 to issue the command to
559 *	@cmd: The command word to write to the mailbox
560 *	@data: A data block if the command expects one
561 *	@len: Length of the data block
562 *
563 *	Send a command from interrupt state. If there is a command
564 *	currently being executed then we return an error of -1. It
565 *	simply isn't viable to wait around as commands may be
566 *	slow. This can theoretically be starved on SMP, but it's hard
567 *	to see a realistic situation.  We do not wait for the command
568 *	to complete --- we rely on the interrupt handler to tidy up
569 *	after us.
570 */
571
572static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
573{
574	struct mc32_local *lp = netdev_priv(dev);
575	int ioaddr = dev->base_addr;
576	int ret = -1;
577
578	if (down_trylock(&lp->cmd_mutex) == 0)
579	{
580		lp->cmd_nonblocking=1;
581		lp->exec_box->mbox=0;
582		lp->exec_box->mbox=cmd;
583		memcpy((void *)lp->exec_box->data, data, len);
584		barrier();	/* the memcpy forgot the volatile so be sure */
585
586		/* Send the command */
587		mc32_ready_poll(dev);
588		outb(1<<6, ioaddr+HOST_CMD);
589
590		ret = 0;
591
592		/* Interrupt handler will signal mutex on completion */
593	}
594
595	return ret;
596}
597
598
599/**
600 *	mc32_command	-	send a command and sleep until completion
601 *	@dev: The 3c527 card to issue the command to
602 *	@cmd: The command word to write to the mailbox
603 *	@data: A data block if the command expects one
604 *	@len: Length of the data block
605 *
606 *	Sends exec commands in a user context. This permits us to wait around
607 *	for the replies and also to wait for the command buffer to complete
608 *	from a previous command before we execute our command. After our
609 *	command completes we will attempt any pending multicast reload
610 *	we blocked off by hogging the exec buffer.
611 *
612 *	You feed the card a command, you wait, it interrupts you get a
613 *	reply. All well and good. The complication arises because you use
614 *	commands for filter list changes which come in at bh level from things
615 *	like IPV6 group stuff.
616 */
617
618static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
619{
620	struct mc32_local *lp = netdev_priv(dev);
621	int ioaddr = dev->base_addr;
622	int ret = 0;
623
624	down(&lp->cmd_mutex);
625
626	/*
627	 *     My Turn
628	 */
629
630	lp->cmd_nonblocking=0;
631	lp->exec_box->mbox=0;
632	lp->exec_box->mbox=cmd;
633	memcpy((void *)lp->exec_box->data, data, len);
634	barrier();	/* the memcpy forgot the volatile so be sure */
635
636	mc32_ready_poll(dev);
637	outb(1<<6, ioaddr+HOST_CMD);
638
639	wait_for_completion(&lp->execution_cmd);
640
641	if(lp->exec_box->mbox&(1<<13))
642		ret = -1;
643
644	up(&lp->cmd_mutex);
645
646	/*
647	 *	A multicast set got blocked - try it now
648         */
649
650	if(lp->mc_reload_wait)
651	{
652		mc32_reset_multicast_list(dev);
653	}
654
655	return ret;
656}
657
658
659/**
660 *	mc32_start_transceiver	-	tell board to restart tx/rx
661 *	@dev: The 3c527 card to issue the command to
662 *
663 *	This may be called from the interrupt state, where it is used
664 *	to restart the rx ring if the card runs out of rx buffers.
665 *
666 * 	We must first check if it's ok to (re)start the transceiver. See
667 *      mc32_close for details.
668 */
669
670static void mc32_start_transceiver(struct net_device *dev) {
671
672	struct mc32_local *lp = netdev_priv(dev);
673	int ioaddr = dev->base_addr;
674
675	/* Ignore RX overflow on device closure */
676	if (lp->xceiver_desired_state==HALTED)
677		return;
678
679	/* Give the card the offset to the post-EOL-bit RX descriptor */
680	mc32_ready_poll(dev);
681	lp->rx_box->mbox=0;
682	lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
683	outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
684
685	mc32_ready_poll(dev);
686	lp->tx_box->mbox=0;
687	outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD);   /* card ignores this on RX restart */
688
689	/* We are not interrupted on start completion */
690}
691
692
693/**
694 *	mc32_halt_transceiver	-	tell board to stop tx/rx
695 *	@dev: The 3c527 card to issue the command to
696 *
697 *	We issue the commands to halt the card's transceiver. In fact,
698 *	after some experimenting we now simply tell the card to
699 *	suspend. When issuing aborts occasionally odd things happened.
700 *
701 *	We then sleep until the card has notified us that both rx and
702 *	tx have been suspended.
703 */
704
705static void mc32_halt_transceiver(struct net_device *dev)
706{
707	struct mc32_local *lp = netdev_priv(dev);
708	int ioaddr = dev->base_addr;
709
710	mc32_ready_poll(dev);
711	lp->rx_box->mbox=0;
712	outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
713	wait_for_completion(&lp->xceiver_cmd);
714
715	mc32_ready_poll(dev);
716	lp->tx_box->mbox=0;
717	outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
718	wait_for_completion(&lp->xceiver_cmd);
719}
720
721
722/**
723 *	mc32_load_rx_ring	-	load the ring of receive buffers
724 *	@dev: 3c527 to build the ring for
725 *
726 *	This initalises the on-card and driver datastructures to
727 *	the point where mc32_start_transceiver() can be called.
728 *
729 *	The card sets up the receive ring for us. We are required to use the
730 *	ring it provides, although the size of the ring is configurable.
731 *
732 * 	We allocate an sk_buff for each ring entry in turn and
733 * 	initalise its house-keeping info. At the same time, we read
734 * 	each 'next' pointer in our rx_ring array. This reduces slow
735 * 	shared-memory reads and makes it easy to access predecessor
736 * 	descriptors.
737 *
738 *	We then set the end-of-list bit for the last entry so that the
739 * 	card will know when it has run out of buffers.
740 */
741
742static int mc32_load_rx_ring(struct net_device *dev)
743{
744	struct mc32_local *lp = netdev_priv(dev);
745	int i;
746	u16 rx_base;
747	volatile struct skb_header *p;
748
749	rx_base=lp->rx_chain;
750
751	for(i=0; i<RX_RING_LEN; i++) {
752		lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
753		if (lp->rx_ring[i].skb==NULL) {
754			for (;i>=0;i--)
755				kfree_skb(lp->rx_ring[i].skb);
756			return -ENOBUFS;
757		}
758		skb_reserve(lp->rx_ring[i].skb, 18);
759
760		p=isa_bus_to_virt(lp->base+rx_base);
761
762		p->control=0;
763		p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
764		p->status=0;
765		p->length=1532;
766
767		lp->rx_ring[i].p=p;
768		rx_base=p->next;
769	}
770
771	lp->rx_ring[i-1].p->control |= CONTROL_EOL;
772
773	lp->rx_ring_tail=0;
774
775	return 0;
776}
777
778
779/**
780 *	mc32_flush_rx_ring	-	free the ring of receive buffers
781 *	@lp: Local data of 3c527 to flush the rx ring of
782 *
783 *	Free the buffer for each ring slot. This may be called
784 *      before mc32_load_rx_ring(), eg. on error in mc32_open().
785 *      Requires rx skb pointers to point to a valid skb, or NULL.
786 */
787
788static void mc32_flush_rx_ring(struct net_device *dev)
789{
790	struct mc32_local *lp = netdev_priv(dev);
791	int i;
792
793	for(i=0; i < RX_RING_LEN; i++)
794	{
795		if (lp->rx_ring[i].skb) {
796			dev_kfree_skb(lp->rx_ring[i].skb);
797			lp->rx_ring[i].skb = NULL;
798		}
799		lp->rx_ring[i].p=NULL;
800	}
801}
802
803
804/**
805 *	mc32_load_tx_ring	-	load transmit ring
806 *	@dev: The 3c527 card to issue the command to
807 *
808 *	This sets up the host transmit data-structures.
809 *
810 *	First, we obtain from the card it's current postion in the tx
811 *	ring, so that we will know where to begin transmitting
812 *	packets.
813 *
814 * 	Then, we read the 'next' pointers from the on-card tx ring into
815 *  	our tx_ring array to reduce slow shared-mem reads. Finally, we
816 * 	intitalise the tx house keeping variables.
817 *
818 */
819
820static void mc32_load_tx_ring(struct net_device *dev)
821{
822	struct mc32_local *lp = netdev_priv(dev);
823	volatile struct skb_header *p;
824	int i;
825	u16 tx_base;
826
827	tx_base=lp->tx_box->data[0];
828
829	for(i=0 ; i<TX_RING_LEN ; i++)
830	{
831		p=isa_bus_to_virt(lp->base+tx_base);
832		lp->tx_ring[i].p=p;
833		lp->tx_ring[i].skb=NULL;
834
835		tx_base=p->next;
836	}
837
838	/* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
839	/* see mc32_tx_ring */
840
841	atomic_set(&lp->tx_count, TX_RING_LEN-1);
842	atomic_set(&lp->tx_ring_head, 0);
843	lp->tx_ring_tail=0;
844}
845
846
847/**
848 *	mc32_flush_tx_ring 	-	free transmit ring
849 *	@lp: Local data of 3c527 to flush the tx ring of
850 *
851 *      If the ring is non-empty, zip over the it, freeing any
852 *      allocated skb_buffs.  The tx ring house-keeping variables are
853 *      then reset. Requires rx skb pointers to point to a valid skb,
854 *      or NULL.
855 */
856
857static void mc32_flush_tx_ring(struct net_device *dev)
858{
859	struct mc32_local *lp = netdev_priv(dev);
860	int i;
861
862	for (i=0; i < TX_RING_LEN; i++)
863	{
864		if (lp->tx_ring[i].skb)
865		{
866			dev_kfree_skb(lp->tx_ring[i].skb);
867			lp->tx_ring[i].skb = NULL;
868		}
869	}
870
871	atomic_set(&lp->tx_count, 0);
872	atomic_set(&lp->tx_ring_head, 0);
873	lp->tx_ring_tail=0;
874}
875
876
877
878static int mc32_open(struct net_device *dev)
879{
880	int ioaddr = dev->base_addr;
881	struct mc32_local *lp = netdev_priv(dev);
882	u8 one=1;
883	u8 regs;
884	u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
885
886	/*
887	 *	Interrupts enabled
888	 */
889
890	regs=inb(ioaddr+HOST_CTRL);
891	regs|=HOST_CTRL_INTE;
892	outb(regs, ioaddr+HOST_CTRL);
893
894	/*
895	 *      Allow ourselves to issue commands
896	 */
897
898	up(&lp->cmd_mutex);
899
900
901	/*
902	 *	Send the indications on command
903	 */
904
905	mc32_command(dev, 4, &one, 2);
906
907	/*
908	 *	Poke it to make sure it's really dead.
909	 */
910
911	mc32_halt_transceiver(dev);
912	mc32_flush_tx_ring(dev);
913
914	/*
915	 *	Ask card to set up on-card descriptors to our spec
916	 */
917
918	if(mc32_command(dev, 8, descnumbuffs, 4)) {
919		printk("%s: %s rejected our buffer configuration!\n",
920	 	       dev->name, cardname);
921		mc32_close(dev);
922		return -ENOBUFS;
923	}
924
925	/* Report new configuration */
926	mc32_command(dev, 6, NULL, 0);
927
928	lp->tx_chain 		= lp->exec_box->data[8];   /* Transmit list start offset */
929	lp->rx_chain 		= lp->exec_box->data[10];  /* Receive list start offset */
930	lp->tx_len 		= lp->exec_box->data[9];   /* Transmit list count */
931	lp->rx_len 		= lp->exec_box->data[11];  /* Receive list count */
932
933	/* Set Network Address */
934	mc32_command(dev, 1, dev->dev_addr, 6);
935
936	/* Set the filters */
937	mc32_set_multicast_list(dev);
938
939	if (WORKAROUND_82586) {
940		u16 zero_word=0;
941		mc32_command(dev, 0x0D, &zero_word, 2);
942	}
943
944	mc32_load_tx_ring(dev);
945
946	if(mc32_load_rx_ring(dev))
947	{
948		mc32_close(dev);
949		return -ENOBUFS;
950	}
951
952	lp->xceiver_desired_state = RUNNING;
953
954	/* And finally, set the ball rolling... */
955	mc32_start_transceiver(dev);
956
957	netif_start_queue(dev);
958
959	return 0;
960}
961
962
963/**
964 *	mc32_timeout	-	handle a timeout from the network layer
965 *	@dev: 3c527 that timed out
966 *
967 *	Handle a timeout on transmit from the 3c527. This normally means
968 *	bad things as the hardware handles cable timeouts and mess for
969 *	us.
970 *
971 */
972
973static void mc32_timeout(struct net_device *dev)
974{
975	printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
976	/* Try to restart the adaptor. */
977	netif_wake_queue(dev);
978}
979
980
981/**
982 *	mc32_send_packet	-	queue a frame for transmit
983 *	@skb: buffer to transmit
984 *	@dev: 3c527 to send it out of
985 *
986 *	Transmit a buffer. This normally means throwing the buffer onto
987 *	the transmit queue as the queue is quite large. If the queue is
988 *	full then we set tx_busy and return. Once the interrupt handler
989 *	gets messages telling it to reclaim transmit queue entries, we will
990 *	clear tx_busy and the kernel will start calling this again.
991 *
992 *      We do not disable interrupts or acquire any locks; this can
993 *      run concurrently with mc32_tx_ring(), and the function itself
994 *      is serialised at a higher layer. However, similarly for the
995 *      card itself, we must ensure that we update tx_ring_head only
996 *      after we've established a valid packet on the tx ring (and
997 *      before we let the card "see" it, to prevent it racing with the
998 *      irq handler).
999 *
1000 */
1001
1002static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1003{
1004	struct mc32_local *lp = netdev_priv(dev);
1005	u32 head = atomic_read(&lp->tx_ring_head);
1006
1007	volatile struct skb_header *p, *np;
1008
1009	netif_stop_queue(dev);
1010
1011	if(atomic_read(&lp->tx_count)==0) {
1012		return 1;
1013	}
1014
1015	if (skb_padto(skb, ETH_ZLEN)) {
1016		netif_wake_queue(dev);
1017		return 0;
1018	}
1019
1020	atomic_dec(&lp->tx_count);
1021
1022	/* P is the last sending/sent buffer as a pointer */
1023	p=lp->tx_ring[head].p;
1024
1025	head = next_tx(head);
1026
1027	/* NP is the buffer we will be loading */
1028	np=lp->tx_ring[head].p;
1029
1030	/* We will need this to flush the buffer out */
1031	lp->tx_ring[head].skb=skb;
1032
1033	np->length      = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1034	np->data	= isa_virt_to_bus(skb->data);
1035	np->status	= 0;
1036	np->control     = CONTROL_EOP | CONTROL_EOL;
1037	wmb();
1038
1039	/*
1040	 * The new frame has been setup; we can now
1041	 * let the interrupt handler and card "see" it
1042	 */
1043
1044	atomic_set(&lp->tx_ring_head, head);
1045	p->control     &= ~CONTROL_EOL;
1046
1047	netif_wake_queue(dev);
1048	return 0;
1049}
1050
1051
1052/**
1053 *	mc32_update_stats	-	pull off the on board statistics
1054 *	@dev: 3c527 to service
1055 *
1056 *
1057 *	Query and reset the on-card stats. There's the small possibility
1058 *	of a race here, which would result in an underestimation of
1059 *	actual errors. As such, we'd prefer to keep all our stats
1060 *	collection in software. As a rule, we do. However it can't be
1061 *	used for rx errors and collisions as, by default, the card discards
1062 *	bad rx packets.
1063 *
1064 *	Setting the SAV BP in the rx filter command supposedly
1065 *	stops this behaviour. However, testing shows that it only seems to
1066 *	enable the collation of on-card rx statistics --- the driver
1067 *	never sees an RX descriptor with an error status set.
1068 *
1069 */
1070
1071static void mc32_update_stats(struct net_device *dev)
1072{
1073	struct mc32_local *lp = netdev_priv(dev);
1074	volatile struct mc32_stats *st = lp->stats;
1075
1076	u32 rx_errors=0;
1077
1078	rx_errors+=lp->net_stats.rx_crc_errors   +=st->rx_crc_errors;
1079	                                           st->rx_crc_errors=0;
1080	rx_errors+=lp->net_stats.rx_fifo_errors  +=st->rx_overrun_errors;
1081	                                           st->rx_overrun_errors=0;
1082	rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors;
1083 	                                           st->rx_alignment_errors=0;
1084	rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors;
1085	                                           st->rx_tooshort_errors=0;
1086	rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors;
1087	                                           st->rx_outofresource_errors=0;
1088        lp->net_stats.rx_errors=rx_errors;
1089
1090	/* Number of packets which saw one collision */
1091	lp->net_stats.collisions+=st->dataC[10];
1092	st->dataC[10]=0;
1093
1094	/* Number of packets which saw 2--15 collisions */
1095	lp->net_stats.collisions+=st->dataC[11];
1096	st->dataC[11]=0;
1097}
1098
1099
1100/**
1101 *	mc32_rx_ring	-	process the receive ring
1102 *	@dev: 3c527 that needs its receive ring processing
1103 *
1104 *
1105 *	We have received one or more indications from the card that a
1106 *	receive has completed. The buffer ring thus contains dirty
1107 *	entries. We walk the ring by iterating over the circular rx_ring
1108 *	array, starting at the next dirty buffer (which happens to be the
1109 *	one we finished up at last time around).
1110 *
1111 *	For each completed packet, we will either copy it and pass it up
1112 * 	the stack or, if the packet is near MTU sized, we allocate
1113 *	another buffer and flip the old one up the stack.
1114 *
1115 *	We must succeed in keeping a buffer on the ring. If necessary we
1116 *	will toss a received packet rather than lose a ring entry. Once
1117 *	the first uncompleted descriptor is found, we move the
1118 *	End-Of-List bit to include the buffers just processed.
1119 *
1120 */
1121
1122static void mc32_rx_ring(struct net_device *dev)
1123{
1124	struct mc32_local *lp = netdev_priv(dev);
1125	volatile struct skb_header *p;
1126	u16 rx_ring_tail;
1127	u16 rx_old_tail;
1128	int x=0;
1129
1130	rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1131
1132	do
1133	{
1134		p=lp->rx_ring[rx_ring_tail].p;
1135
1136		if(!(p->status & (1<<7))) { /* Not COMPLETED */
1137			break;
1138		}
1139		if(p->status & (1<<6)) /* COMPLETED_OK */
1140		{
1141
1142			u16 length=p->length;
1143			struct sk_buff *skb;
1144			struct sk_buff *newskb;
1145
1146			/* Try to save time by avoiding a copy on big frames */
1147
1148			if ((length > RX_COPYBREAK)
1149			    && ((newskb=dev_alloc_skb(1532)) != NULL))
1150			{
1151				skb=lp->rx_ring[rx_ring_tail].skb;
1152				skb_put(skb, length);
1153
1154				skb_reserve(newskb,18);
1155				lp->rx_ring[rx_ring_tail].skb=newskb;
1156				p->data=isa_virt_to_bus(newskb->data);
1157			}
1158			else
1159			{
1160				skb=dev_alloc_skb(length+2);
1161
1162				if(skb==NULL) {
1163					lp->net_stats.rx_dropped++;
1164					goto dropped;
1165				}
1166
1167				skb_reserve(skb,2);
1168				memcpy(skb_put(skb, length),
1169				       lp->rx_ring[rx_ring_tail].skb->data, length);
1170			}
1171
1172			skb->protocol=eth_type_trans(skb,dev);
1173			dev->last_rx = jiffies;
1174 			lp->net_stats.rx_packets++;
1175 			lp->net_stats.rx_bytes += length;
1176			netif_rx(skb);
1177		}
1178
1179	dropped:
1180		p->length = 1532;
1181		p->status = 0;
1182
1183		rx_ring_tail=next_rx(rx_ring_tail);
1184	}
1185        while(x++<48);
1186
1187	/* If there was actually a frame to be processed, place the EOL bit */
1188	/* at the descriptor prior to the one to be filled next */
1189
1190	if (rx_ring_tail != rx_old_tail)
1191	{
1192		lp->rx_ring[prev_rx(rx_ring_tail)].p->control |=  CONTROL_EOL;
1193		lp->rx_ring[prev_rx(rx_old_tail)].p->control  &= ~CONTROL_EOL;
1194
1195		lp->rx_ring_tail=rx_ring_tail;
1196	}
1197}
1198
1199
1200/**
1201 *	mc32_tx_ring	-	process completed transmits
1202 *	@dev: 3c527 that needs its transmit ring processing
1203 *
1204 *
1205 *	This operates in a similar fashion to mc32_rx_ring. We iterate
1206 *	over the transmit ring. For each descriptor which has been
1207 *	processed by the card, we free its associated buffer and note
1208 *	any errors. This continues until the transmit ring is emptied
1209 *	or we reach a descriptor that hasn't yet been processed by the
1210 *	card.
1211 *
1212 */
1213
1214static void mc32_tx_ring(struct net_device *dev)
1215{
1216	struct mc32_local *lp = netdev_priv(dev);
1217	volatile struct skb_header *np;
1218
1219	/*
1220	 * We rely on head==tail to mean 'queue empty'.
1221	 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1222	 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1223	 * condition with 'queue full'
1224	 */
1225
1226	while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1227	{
1228		u16 t;
1229
1230		t=next_tx(lp->tx_ring_tail);
1231		np=lp->tx_ring[t].p;
1232
1233		if(!(np->status & (1<<7)))
1234		{
1235			/* Not COMPLETED */
1236			break;
1237		}
1238		lp->net_stats.tx_packets++;
1239		if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1240		{
1241			lp->net_stats.tx_errors++;
1242
1243			switch(np->status&0x0F)
1244			{
1245				case 1:
1246					lp->net_stats.tx_aborted_errors++;
1247					break; /* Max collisions */
1248				case 2:
1249					lp->net_stats.tx_fifo_errors++;
1250					break;
1251				case 3:
1252					lp->net_stats.tx_carrier_errors++;
1253					break;
1254				case 4:
1255					lp->net_stats.tx_window_errors++;
1256					break;  /* CTS Lost */
1257				case 5:
1258					lp->net_stats.tx_aborted_errors++;
1259					break; /* Transmit timeout */
1260			}
1261		}
1262		/* Packets are sent in order - this is
1263		    basically a FIFO queue of buffers matching
1264		    the card ring */
1265		lp->net_stats.tx_bytes+=lp->tx_ring[t].skb->len;
1266		dev_kfree_skb_irq(lp->tx_ring[t].skb);
1267		lp->tx_ring[t].skb=NULL;
1268		atomic_inc(&lp->tx_count);
1269		netif_wake_queue(dev);
1270
1271		lp->tx_ring_tail=t;
1272	}
1273
1274}
1275
1276
1277/**
1278 *	mc32_interrupt		-	handle an interrupt from a 3c527
1279 *	@irq: Interrupt number
1280 *	@dev_id: 3c527 that requires servicing
1281 *	@regs: Registers (unused)
1282 *
1283 *
1284 *	An interrupt is raised whenever the 3c527 writes to the command
1285 *	register. This register contains the message it wishes to send us
1286 *	packed into a single byte field. We keep reading status entries
1287 *	until we have processed all the control items, but simply count
1288 *	transmit and receive reports. When all reports are in we empty the
1289 *	transceiver rings as appropriate. This saves the overhead of
1290 *	multiple command requests.
1291 *
1292 *	Because MCA is level-triggered, we shouldn't miss indications.
1293 *	Therefore, we needn't ask the card to suspend interrupts within
1294 *	this handler. The card receives an implicit acknowledgment of the
1295 *	current interrupt when we read the command register.
1296 *
1297 */
1298
1299static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1300{
1301	struct net_device *dev = dev_id;
1302	struct mc32_local *lp;
1303	int ioaddr, status, boguscount = 0;
1304	int rx_event = 0;
1305	int tx_event = 0;
1306
1307	ioaddr = dev->base_addr;
1308	lp = netdev_priv(dev);
1309
1310	/* See whats cooking */
1311
1312	while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1313	{
1314		status=inb(ioaddr+HOST_CMD);
1315
1316#ifdef DEBUG_IRQ
1317		printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1318			(status&7), (status>>3)&7, (status>>6)&1,
1319			(status>>7)&1, boguscount);
1320#endif
1321
1322		switch(status&7)
1323		{
1324			case 0:
1325				break;
1326			case 6: /* TX fail */
1327			case 2:	/* TX ok */
1328				tx_event = 1;
1329				break;
1330			case 3: /* Halt */
1331			case 4: /* Abort */
1332				complete(&lp->xceiver_cmd);
1333				break;
1334			default:
1335				printk("%s: strange tx ack %d\n", dev->name, status&7);
1336		}
1337		status>>=3;
1338		switch(status&7)
1339		{
1340			case 0:
1341				break;
1342			case 2:	/* RX */
1343				rx_event=1;
1344				break;
1345			case 3: /* Halt */
1346			case 4: /* Abort */
1347				complete(&lp->xceiver_cmd);
1348				break;
1349			case 6:
1350				/* Out of RX buffers stat */
1351				/* Must restart rx */
1352				lp->net_stats.rx_dropped++;
1353				mc32_rx_ring(dev);
1354				mc32_start_transceiver(dev);
1355				break;
1356			default:
1357				printk("%s: strange rx ack %d\n",
1358					dev->name, status&7);
1359		}
1360		status>>=3;
1361		if(status&1)
1362		{
1363			/*
1364			 * No thread is waiting: we need to tidy
1365			 * up ourself.
1366			 */
1367
1368			if (lp->cmd_nonblocking) {
1369				up(&lp->cmd_mutex);
1370				if (lp->mc_reload_wait)
1371					mc32_reset_multicast_list(dev);
1372			}
1373			else complete(&lp->execution_cmd);
1374		}
1375		if(status&2)
1376		{
1377			/*
1378			 *	We get interrupted once per
1379			 *	counter that is about to overflow.
1380			 */
1381
1382			mc32_update_stats(dev);
1383		}
1384	}
1385
1386
1387	/*
1388	 *	Process the transmit and receive rings
1389         */
1390
1391	if(tx_event)
1392		mc32_tx_ring(dev);
1393
1394	if(rx_event)
1395		mc32_rx_ring(dev);
1396
1397	return IRQ_HANDLED;
1398}
1399
1400
1401/**
1402 *	mc32_close	-	user configuring the 3c527 down
1403 *	@dev: 3c527 card to shut down
1404 *
1405 *	The 3c527 is a bus mastering device. We must be careful how we
1406 *	shut it down. It may also be running shared interrupt so we have
1407 *	to be sure to silence it properly
1408 *
1409 *	We indicate that the card is closing to the rest of the
1410 *	driver.  Otherwise, it is possible that the card may run out
1411 *	of receive buffers and restart the transceiver while we're
1412 *	trying to close it.
1413 *
1414 *	We abort any receive and transmits going on and then wait until
1415 *	any pending exec commands have completed in other code threads.
1416 *	In theory we can't get here while that is true, in practice I am
1417 *	paranoid
1418 *
1419 *	We turn off the interrupt enable for the board to be sure it can't
1420 *	intefere with other devices.
1421 */
1422
1423static int mc32_close(struct net_device *dev)
1424{
1425	struct mc32_local *lp = netdev_priv(dev);
1426	int ioaddr = dev->base_addr;
1427
1428	u8 regs;
1429	u16 one=1;
1430
1431	lp->xceiver_desired_state = HALTED;
1432	netif_stop_queue(dev);
1433
1434	/*
1435	 *	Send the indications on command (handy debug check)
1436	 */
1437
1438	mc32_command(dev, 4, &one, 2);
1439
1440	/* Shut down the transceiver */
1441
1442	mc32_halt_transceiver(dev);
1443
1444	/* Ensure we issue no more commands beyond this point */
1445
1446	down(&lp->cmd_mutex);
1447
1448	/* Ok the card is now stopping */
1449
1450	regs=inb(ioaddr+HOST_CTRL);
1451	regs&=~HOST_CTRL_INTE;
1452	outb(regs, ioaddr+HOST_CTRL);
1453
1454	mc32_flush_rx_ring(dev);
1455	mc32_flush_tx_ring(dev);
1456
1457	mc32_update_stats(dev);
1458
1459	return 0;
1460}
1461
1462
1463/**
1464 *	mc32_get_stats		-	hand back stats to network layer
1465 *	@dev: The 3c527 card to handle
1466 *
1467 *	We've collected all the stats we can in software already. Now
1468 *	it's time to update those kept on-card and return the lot.
1469 *
1470 */
1471
1472static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1473{
1474	struct mc32_local *lp = netdev_priv(dev);
1475
1476	mc32_update_stats(dev);
1477	return &lp->net_stats;
1478}
1479
1480
1481/**
1482 *	do_mc32_set_multicast_list	-	attempt to update multicasts
1483 *	@dev: 3c527 device to load the list on
1484 *	@retry: indicates this is not the first call.
1485 *
1486 *
1487 * 	Actually set or clear the multicast filter for this adaptor. The
1488 *	locking issues are handled by this routine. We have to track
1489 *	state as it may take multiple calls to get the command sequence
1490 *	completed. We just keep trying to schedule the loads until we
1491 *	manage to process them all.
1492 *
1493 *	num_addrs == -1	Promiscuous mode, receive all packets
1494 *
1495 *	num_addrs == 0	Normal mode, clear multicast list
1496 *
1497 *	num_addrs > 0	Multicast mode, receive normal and MC packets,
1498 *			and do best-effort filtering.
1499 *
1500 *	See mc32_update_stats() regards setting the SAV BP bit.
1501 *
1502 */
1503
1504static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1505{
1506	struct mc32_local *lp = netdev_priv(dev);
1507	u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1508
1509	if (dev->flags&IFF_PROMISC)
1510		/* Enable promiscuous mode */
1511		filt |= 1;
1512	else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
1513	{
1514		dev->flags|=IFF_PROMISC;
1515		filt |= 1;
1516	}
1517	else if(dev->mc_count)
1518	{
1519		unsigned char block[62];
1520		unsigned char *bp;
1521		struct dev_mc_list *dmc=dev->mc_list;
1522
1523		int i;
1524
1525		if(retry==0)
1526			lp->mc_list_valid = 0;
1527		if(!lp->mc_list_valid)
1528		{
1529			block[1]=0;
1530			block[0]=dev->mc_count;
1531			bp=block+2;
1532
1533			for(i=0;i<dev->mc_count;i++)
1534			{
1535				memcpy(bp, dmc->dmi_addr, 6);
1536				bp+=6;
1537				dmc=dmc->next;
1538			}
1539			if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1540			{
1541				lp->mc_reload_wait = 1;
1542				return;
1543			}
1544			lp->mc_list_valid=1;
1545		}
1546	}
1547
1548	if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1549	{
1550		lp->mc_reload_wait = 1;
1551	}
1552	else {
1553		lp->mc_reload_wait = 0;
1554	}
1555}
1556
1557
1558/**
1559 *	mc32_set_multicast_list	-	queue multicast list update
1560 *	@dev: The 3c527 to use
1561 *
1562 *	Commence loading the multicast list. This is called when the kernel
1563 *	changes the lists. It will override any pending list we are trying to
1564 *	load.
1565 */
1566
1567static void mc32_set_multicast_list(struct net_device *dev)
1568{
1569	do_mc32_set_multicast_list(dev,0);
1570}
1571
1572
1573/**
1574 *	mc32_reset_multicast_list	-	reset multicast list
1575 *	@dev: The 3c527 to use
1576 *
1577 *	Attempt the next step in loading the multicast lists. If this attempt
1578 *	fails to complete then it will be scheduled and this function called
1579 *	again later from elsewhere.
1580 */
1581
1582static void mc32_reset_multicast_list(struct net_device *dev)
1583{
1584	do_mc32_set_multicast_list(dev,1);
1585}
1586
1587static void netdev_get_drvinfo(struct net_device *dev,
1588			       struct ethtool_drvinfo *info)
1589{
1590	strcpy(info->driver, DRV_NAME);
1591	strcpy(info->version, DRV_VERSION);
1592	sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1593}
1594
1595static u32 netdev_get_msglevel(struct net_device *dev)
1596{
1597	return mc32_debug;
1598}
1599
1600static void netdev_set_msglevel(struct net_device *dev, u32 level)
1601{
1602	mc32_debug = level;
1603}
1604
1605static const struct ethtool_ops netdev_ethtool_ops = {
1606	.get_drvinfo		= netdev_get_drvinfo,
1607	.get_msglevel		= netdev_get_msglevel,
1608	.set_msglevel		= netdev_set_msglevel,
1609};
1610
1611#ifdef MODULE
1612
1613static struct net_device *this_device;
1614
1615/**
1616 *	init_module		-	entry point
1617 *
1618 *	Probe and locate a 3c527 card. This really should probe and locate
1619 *	all the 3c527 cards in the machine not just one of them. Yes you can
1620 *	insmod multiple modules for now but it's a hack.
1621 */
1622
1623int __init init_module(void)
1624{
1625	this_device = mc32_probe(-1);
1626	if (IS_ERR(this_device))
1627		return PTR_ERR(this_device);
1628	return 0;
1629}
1630
1631/**
1632 *	cleanup_module	-	free resources for an unload
1633 *
1634 *	Unloading time. We release the MCA bus resources and the interrupt
1635 *	at which point everything is ready to unload. The card must be stopped
1636 *	at this point or we would not have been called. When we unload we
1637 *	leave the card stopped but not totally shut down. When the card is
1638 *	initialized it must be rebooted or the rings reloaded before any
1639 *	transmit operations are allowed to start scribbling into memory.
1640 */
1641
1642void __exit cleanup_module(void)
1643{
1644	unregister_netdev(this_device);
1645	cleanup_card(this_device);
1646	free_netdev(this_device);
1647}
1648
1649#endif /* MODULE */
1650