1/* $Id: sunlance.c,v 1.1.1.1 2008/10/15 03:26:40 james26_jang Exp $
2 * lance.c: Linux/Sparc/Lance driver
3 *
4 *	Written 1995, 1996 by Miguel de Icaza
5 * Sources:
6 *	The Linux  depca driver
7 *	The Linux  lance driver.
8 *	The Linux  skeleton driver.
9 *	The NetBSD Sparc/Lance driver.
10 *	Theo de Raadt (deraadt@openbsd.org)
11 *	NCR92C990 Lan Controller manual
12 *
13 * 1.4:
14 *	Added support to run with a ledma on the Sun4m
15 *
16 * 1.5:
17 *	Added multiple card detection.
18 *
19 *	 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost
20 *		  (ecd@skynet.be)
21 *
22 *	 5/15/96: auto carrier detection on sun4m by Eddie C. Dost
23 *		  (ecd@skynet.be)
24 *
25 *	 5/17/96: lebuffer on scsi/ether cards now work David S. Miller
26 *		  (davem@caip.rutgers.edu)
27 *
28 *	 5/29/96: override option 'tpe-link-test?', if it is 'false', as
29 *		  this disables auto carrier detection on sun4m. Eddie C. Dost
30 *		  (ecd@skynet.be)
31 *
32 * 1.7:
33 *	 6/26/96: Bug fix for multiple ledmas, miguel.
34 *
35 * 1.8:
36 *		  Stole multicast code from depca.c, fixed lance_tx.
37 *
38 * 1.9:
39 *	 8/21/96: Fixed the multicast code (Pedro Roque)
40 *
41 *	 8/28/96: Send fake packet in lance_open() if auto_select is true,
42 *		  so we can detect the carrier loss condition in time.
43 *		  Eddie C. Dost (ecd@skynet.be)
44 *
45 *	 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an
46 *		  MNA trap during chksum_partial_copy(). (ecd@skynet.be)
47 *
48 *	11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be)
49 *
50 *	12/22/96: Don't loop forever in lance_rx() on incomplete packets.
51 *		  This was the sun4c killer. Shit, stupid bug.
52 *		  (ecd@skynet.be)
53 *
54 * 1.10:
55 *	 1/26/97: Modularize driver. (ecd@skynet.be)
56 *
57 * 1.11:
58 *	12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz)
59 *
60 * 1.12:
61 * 	 11/3/99: Fixed SMP race in lance_start_xmit found by davem.
62 * 	          Anton Blanchard (anton@progsoc.uts.edu.au)
63 * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces.
64 *		  David S. Miller (davem@redhat.com)
65 * 2.01:
66 *      11/08/01: Use library crc32 functions (Matt_Domsch@dell.com)
67 *
68 */
69
70#undef DEBUG_DRIVER
71
72static char version[] =
73	"sunlance.c:v2.01 08/Nov/01 Miguel de Icaza (miguel@nuclecu.unam.mx)\n";
74
75static char lancestr[] = "LANCE";
76
77#include <linux/config.h>
78#include <linux/module.h>
79
80#include <linux/kernel.h>
81#include <linux/sched.h>
82#include <linux/types.h>
83#include <linux/fcntl.h>
84#include <linux/interrupt.h>
85#include <linux/ptrace.h>
86#include <linux/ioport.h>
87#include <linux/in.h>
88#include <linux/slab.h>
89#include <linux/string.h>
90#include <linux/delay.h>
91#include <linux/init.h>
92#include <linux/crc32.h>
93#include <asm/system.h>
94#include <asm/bitops.h>
95#include <asm/io.h>
96#include <asm/dma.h>
97#include <asm/pgtable.h>
98#include <linux/errno.h>
99#include <asm/byteorder.h>	/* Used by the checksum routines */
100
101/* Used for the temporal inet entries and routing */
102#include <linux/socket.h>
103#include <linux/route.h>
104
105#include <asm/idprom.h>
106#include <asm/sbus.h>
107#include <asm/openprom.h>
108#include <asm/oplib.h>
109#include <asm/auxio.h>		/* For tpe-link-test? setting */
110#include <asm/irq.h>
111
112#include <linux/netdevice.h>
113#include <linux/etherdevice.h>
114#include <linux/skbuff.h>
115
116/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
117#ifndef LANCE_LOG_TX_BUFFERS
118#define LANCE_LOG_TX_BUFFERS 4
119#define LANCE_LOG_RX_BUFFERS 4
120#endif
121
122#define LE_CSR0 0
123#define LE_CSR1 1
124#define LE_CSR2 2
125#define LE_CSR3 3
126
127#define LE_MO_PROM      0x8000  /* Enable promiscuous mode */
128
129#define	LE_C0_ERR	0x8000	/* Error: set if BAB, SQE, MISS or ME is set */
130#define	LE_C0_BABL	0x4000	/* BAB:  Babble: tx timeout. */
131#define	LE_C0_CERR	0x2000	/* SQE:  Signal quality error */
132#define	LE_C0_MISS	0x1000	/* MISS: Missed a packet */
133#define	LE_C0_MERR	0x0800	/* ME:   Memory error */
134#define	LE_C0_RINT	0x0400	/* Received interrupt */
135#define	LE_C0_TINT	0x0200	/* Transmitter Interrupt */
136#define	LE_C0_IDON	0x0100	/* IFIN: Init finished. */
137#define	LE_C0_INTR	0x0080	/* Interrupt or error */
138#define	LE_C0_INEA	0x0040	/* Interrupt enable */
139#define	LE_C0_RXON	0x0020	/* Receiver on */
140#define	LE_C0_TXON	0x0010	/* Transmitter on */
141#define	LE_C0_TDMD	0x0008	/* Transmitter demand */
142#define	LE_C0_STOP	0x0004	/* Stop the card */
143#define	LE_C0_STRT	0x0002	/* Start the card */
144#define	LE_C0_INIT	0x0001	/* Init the card */
145
146#define	LE_C3_BSWP	0x4     /* SWAP */
147#define	LE_C3_ACON	0x2	/* ALE Control */
148#define	LE_C3_BCON	0x1	/* Byte control */
149
150/* Receive message descriptor 1 */
151#define LE_R1_OWN       0x80    /* Who owns the entry */
152#define LE_R1_ERR       0x40    /* Error: if FRA, OFL, CRC or BUF is set */
153#define LE_R1_FRA       0x20    /* FRA: Frame error */
154#define LE_R1_OFL       0x10    /* OFL: Frame overflow */
155#define LE_R1_CRC       0x08    /* CRC error */
156#define LE_R1_BUF       0x04    /* BUF: Buffer error */
157#define LE_R1_SOP       0x02    /* Start of packet */
158#define LE_R1_EOP       0x01    /* End of packet */
159#define LE_R1_POK       0x03    /* Packet is complete: SOP + EOP */
160
161#define LE_T1_OWN       0x80    /* Lance owns the packet */
162#define LE_T1_ERR       0x40    /* Error summary */
163#define LE_T1_EMORE     0x10    /* Error: more than one retry needed */
164#define LE_T1_EONE      0x08    /* Error: one retry needed */
165#define LE_T1_EDEF      0x04    /* Error: deferred */
166#define LE_T1_SOP       0x02    /* Start of packet */
167#define LE_T1_EOP       0x01    /* End of packet */
168#define LE_T1_POK	0x03	/* Packet is complete: SOP + EOP */
169
170#define LE_T3_BUF       0x8000  /* Buffer error */
171#define LE_T3_UFL       0x4000  /* Error underflow */
172#define LE_T3_LCOL      0x1000  /* Error late collision */
173#define LE_T3_CLOS      0x0800  /* Error carrier loss */
174#define LE_T3_RTY       0x0400  /* Error retry */
175#define LE_T3_TDR       0x03ff  /* Time Domain Reflectometry counter */
176
177#define TX_RING_SIZE			(1 << (LANCE_LOG_TX_BUFFERS))
178#define TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
179#define TX_RING_LEN_BITS		((LANCE_LOG_TX_BUFFERS) << 29)
180#define TX_NEXT(__x)			(((__x)+1) & TX_RING_MOD_MASK)
181
182#define RX_RING_SIZE			(1 << (LANCE_LOG_RX_BUFFERS))
183#define RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
184#define RX_RING_LEN_BITS		((LANCE_LOG_RX_BUFFERS) << 29)
185#define RX_NEXT(__x)			(((__x)+1) & RX_RING_MOD_MASK)
186
187#define PKT_BUF_SZ		1544
188#define RX_BUFF_SIZE            PKT_BUF_SZ
189#define TX_BUFF_SIZE            PKT_BUF_SZ
190
191struct lance_rx_desc {
192	u16	rmd0;		/* low address of packet */
193	u8	rmd1_bits;	/* descriptor bits */
194	u8	rmd1_hadr;	/* high address of packet */
195	s16	length;		/* This length is 2s complement (negative)!
196				 * Buffer length
197				 */
198	u16	mblength;	/* This is the actual number of bytes received */
199};
200
201struct lance_tx_desc {
202	u16	tmd0;		/* low address of packet */
203	u8 	tmd1_bits;	/* descriptor bits */
204	u8 	tmd1_hadr;	/* high address of packet */
205	s16 	length;		/* Length is 2s complement (negative)! */
206	u16 	misc;
207};
208
209/* The LANCE initialization block, described in databook. */
210/* On the Sparc, this block should be on a DMA region     */
211struct lance_init_block {
212	u16	mode;		/* Pre-set mode (reg. 15) */
213	u8	phys_addr[6];	/* Physical ethernet address */
214	u32	filter[2];	/* Multicast filter. */
215
216	/* Receive and transmit ring base, along with extra bits. */
217	u16	rx_ptr;		/* receive descriptor addr */
218	u16	rx_len;		/* receive len and high addr */
219	u16	tx_ptr;		/* transmit descriptor addr */
220	u16	tx_len;		/* transmit len and high addr */
221
222	/* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
223	struct lance_rx_desc brx_ring[RX_RING_SIZE];
224	struct lance_tx_desc btx_ring[TX_RING_SIZE];
225
226	u8	tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
227	u8	pad[2];		/* align rx_buf for copy_and_sum(). */
228	u8	rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
229};
230
231#define libdesc_offset(rt, elem) \
232((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
233
234#define libbuff_offset(rt, elem) \
235((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
236
237struct lance_private {
238	unsigned long	lregs;		/* Lance RAP/RDP regs.		*/
239	unsigned long	dregs;		/* DMA controller regs.		*/
240	volatile struct lance_init_block *init_block;
241
242	spinlock_t	lock;
243
244	int		rx_new, tx_new;
245	int		rx_old, tx_old;
246
247	struct net_device_stats	stats;
248	struct sbus_dma *ledma;	/* If set this points to ledma	*/
249	char		tpe;		/* cable-selection is TPE	*/
250	char		auto_select;	/* cable-selection by carrier	*/
251	char		burst_sizes;	/* ledma SBus burst sizes	*/
252	char		pio_buffer;	/* init block in PIO space?	*/
253
254	unsigned short	busmaster_regval;
255
256	void (*init_ring)(struct net_device *);
257	void (*rx)(struct net_device *);
258	void (*tx)(struct net_device *);
259
260	char	       	       *name;
261	dma_addr_t		init_block_dvma;
262	struct net_device      *dev;		  /* Backpointer	*/
263	struct lance_private   *next_module;
264	struct sbus_dev	       *sdev;
265	struct timer_list       multicast_timer;
266};
267
268#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
269			lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
270			lp->tx_old - lp->tx_new-1)
271
272/* Lance registers. */
273#define RDP		0x00UL		/* register data port		*/
274#define RAP		0x02UL		/* register address port	*/
275#define LANCE_REG_SIZE	0x04UL
276
277#define STOP_LANCE(__lp) \
278do {	unsigned long __base = (__lp)->lregs; \
279	sbus_writew(LE_CSR0,	__base + RAP); \
280	sbus_writew(LE_C0_STOP,	__base + RDP); \
281} while (0)
282
283int sparc_lance_debug = 2;
284
285/* The Lance uses 24 bit addresses */
286/* On the Sun4c the DVMA will provide the remaining bytes for us */
287/* On the Sun4m we have to instruct the ledma to provide them    */
288/* Even worse, on scsi/ether SBUS cards, the init block and the
289 * transmit/receive buffers are addresses as offsets from absolute
290 * zero on the lebuffer PIO area. -DaveM
291 */
292
293#define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
294
295static struct lance_private *root_lance_dev;
296
297/* Load the CSR registers */
298static void load_csrs(struct lance_private *lp)
299{
300	u32 leptr;
301
302	if (lp->pio_buffer)
303		leptr = 0;
304	else
305		leptr = LANCE_ADDR(lp->init_block_dvma);
306
307	sbus_writew(LE_CSR1,		  lp->lregs + RAP);
308	sbus_writew(leptr & 0xffff,	  lp->lregs + RDP);
309	sbus_writew(LE_CSR2,		  lp->lregs + RAP);
310	sbus_writew(leptr >> 16,	  lp->lregs + RDP);
311	sbus_writew(LE_CSR3,		  lp->lregs + RAP);
312	sbus_writew(lp->busmaster_regval, lp->lregs + RDP);
313
314	/* Point back to csr0 */
315	sbus_writew(LE_CSR0, lp->lregs + RAP);
316}
317
318/* Setup the Lance Rx and Tx rings */
319static void lance_init_ring_dvma(struct net_device *dev)
320{
321	struct lance_private *lp = (struct lance_private *) dev->priv;
322	volatile struct lance_init_block *ib = lp->init_block;
323	dma_addr_t aib = lp->init_block_dvma;
324	__u32 leptr;
325	int i;
326
327	/* Lock out other processes while setting up hardware */
328	netif_stop_queue(dev);
329	lp->rx_new = lp->tx_new = 0;
330	lp->rx_old = lp->tx_old = 0;
331
332	/* Copy the ethernet address to the lance init block
333	 * Note that on the sparc you need to swap the ethernet address.
334	 */
335	ib->phys_addr [0] = dev->dev_addr [1];
336	ib->phys_addr [1] = dev->dev_addr [0];
337	ib->phys_addr [2] = dev->dev_addr [3];
338	ib->phys_addr [3] = dev->dev_addr [2];
339	ib->phys_addr [4] = dev->dev_addr [5];
340	ib->phys_addr [5] = dev->dev_addr [4];
341
342	/* Setup the Tx ring entries */
343	for (i = 0; i <= TX_RING_SIZE; i++) {
344		leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
345		ib->btx_ring [i].tmd0      = leptr;
346		ib->btx_ring [i].tmd1_hadr = leptr >> 16;
347		ib->btx_ring [i].tmd1_bits = 0;
348		ib->btx_ring [i].length    = 0xf000; /* The ones required by tmd2 */
349		ib->btx_ring [i].misc      = 0;
350	}
351
352	/* Setup the Rx ring entries */
353	for (i = 0; i < RX_RING_SIZE; i++) {
354		leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
355
356		ib->brx_ring [i].rmd0      = leptr;
357		ib->brx_ring [i].rmd1_hadr = leptr >> 16;
358		ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
359		ib->brx_ring [i].length    = -RX_BUFF_SIZE | 0xf000;
360		ib->brx_ring [i].mblength  = 0;
361	}
362
363	/* Setup the initialization block */
364
365	/* Setup rx descriptor pointer */
366	leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
367	ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
368	ib->rx_ptr = leptr;
369
370	/* Setup tx descriptor pointer */
371	leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
372	ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
373	ib->tx_ptr = leptr;
374}
375
376static void lance_init_ring_pio(struct net_device *dev)
377{
378	struct lance_private *lp = (struct lance_private *) dev->priv;
379	volatile struct lance_init_block *ib = lp->init_block;
380	u32 leptr;
381	int i;
382
383	/* Lock out other processes while setting up hardware */
384	netif_stop_queue(dev);
385	lp->rx_new = lp->tx_new = 0;
386	lp->rx_old = lp->tx_old = 0;
387
388	/* Copy the ethernet address to the lance init block
389	 * Note that on the sparc you need to swap the ethernet address.
390	 */
391	sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]);
392	sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]);
393	sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]);
394	sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]);
395	sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]);
396	sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
397
398	/* Setup the Tx ring entries */
399	for (i = 0; i <= TX_RING_SIZE; i++) {
400		leptr = libbuff_offset(tx_buf, i);
401		sbus_writew(leptr,	&ib->btx_ring [i].tmd0);
402		sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
403		sbus_writeb(0,		&ib->btx_ring [i].tmd1_bits);
404
405		/* The ones required by tmd2 */
406		sbus_writew(0xf000,	&ib->btx_ring [i].length);
407		sbus_writew(0,		&ib->btx_ring [i].misc);
408	}
409
410	/* Setup the Rx ring entries */
411	for (i = 0; i < RX_RING_SIZE; i++) {
412		leptr = libbuff_offset(rx_buf, i);
413
414		sbus_writew(leptr,	&ib->brx_ring [i].rmd0);
415		sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr);
416		sbus_writeb(LE_R1_OWN,	&ib->brx_ring [i].rmd1_bits);
417		sbus_writew(-RX_BUFF_SIZE|0xf000,
418			    &ib->brx_ring [i].length);
419		sbus_writew(0,		&ib->brx_ring [i].mblength);
420	}
421
422	/* Setup the initialization block */
423
424	/* Setup rx descriptor pointer */
425	leptr = libdesc_offset(brx_ring, 0);
426	sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16),
427		    &ib->rx_len);
428	sbus_writew(leptr, &ib->rx_ptr);
429
430	/* Setup tx descriptor pointer */
431	leptr = libdesc_offset(btx_ring, 0);
432	sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16),
433		    &ib->tx_len);
434	sbus_writew(leptr, &ib->tx_ptr);
435}
436
437static void init_restart_ledma(struct lance_private *lp)
438{
439	u32 csr = sbus_readl(lp->dregs + DMA_CSR);
440
441	if (!(csr & DMA_HNDL_ERROR)) {
442		/* E-Cache draining */
443		while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
444			barrier();
445	}
446
447	csr = sbus_readl(lp->dregs + DMA_CSR);
448	csr &= ~DMA_E_BURSTS;
449	if (lp->burst_sizes & DMA_BURST32)
450		csr |= DMA_E_BURST32;
451	else
452		csr |= DMA_E_BURST16;
453
454	csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
455
456	if (lp->tpe)
457		csr |= DMA_EN_ENETAUI;
458	else
459		csr &= ~DMA_EN_ENETAUI;
460	udelay(20);
461	sbus_writel(csr, lp->dregs + DMA_CSR);
462	udelay(200);
463}
464
465static int init_restart_lance(struct lance_private *lp)
466{
467	u16 regval = 0;
468	int i;
469
470	if (lp->dregs)
471		init_restart_ledma(lp);
472
473	sbus_writew(LE_CSR0,	lp->lregs + RAP);
474	sbus_writew(LE_C0_INIT,	lp->lregs + RDP);
475
476	/* Wait for the lance to complete initialization */
477	for (i = 0; i < 100; i++) {
478		regval = sbus_readw(lp->lregs + RDP);
479
480		if (regval & (LE_C0_ERR | LE_C0_IDON))
481			break;
482		barrier();
483	}
484	if (i == 100 || (regval & LE_C0_ERR)) {
485		printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
486		       i, regval);
487		if (lp->dregs)
488			printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR));
489		return -1;
490	}
491
492	/* Clear IDON by writing a "1", enable interrupts and start lance */
493	sbus_writew(LE_C0_IDON,			lp->lregs + RDP);
494	sbus_writew(LE_C0_INEA | LE_C0_STRT,	lp->lregs + RDP);
495
496	if (lp->dregs) {
497		u32 csr = sbus_readl(lp->dregs + DMA_CSR);
498
499		csr |= DMA_INT_ENAB;
500		sbus_writel(csr, lp->dregs + DMA_CSR);
501	}
502
503	return 0;
504}
505
506static void lance_rx_dvma(struct net_device *dev)
507{
508	struct lance_private *lp = (struct lance_private *) dev->priv;
509	volatile struct lance_init_block *ib = lp->init_block;
510	volatile struct lance_rx_desc *rd;
511	u8 bits;
512	int len, entry = lp->rx_new;
513	struct sk_buff *skb;
514
515	for (rd = &ib->brx_ring [entry];
516	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
517	     rd = &ib->brx_ring [entry]) {
518
519		/* We got an incomplete frame? */
520		if ((bits & LE_R1_POK) != LE_R1_POK) {
521			lp->stats.rx_over_errors++;
522			lp->stats.rx_errors++;
523		} else if (bits & LE_R1_ERR) {
524			/* Count only the end frame as a rx error,
525			 * not the beginning
526			 */
527			if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
528			if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
529			if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
530			if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
531			if (bits & LE_R1_EOP) lp->stats.rx_errors++;
532		} else {
533			len = (rd->mblength & 0xfff) - 4;
534			skb = dev_alloc_skb(len + 2);
535
536			if (skb == NULL) {
537				printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
538				       dev->name);
539				lp->stats.rx_dropped++;
540				rd->mblength = 0;
541				rd->rmd1_bits = LE_R1_OWN;
542				lp->rx_new = RX_NEXT(entry);
543				return;
544			}
545
546			lp->stats.rx_bytes += len;
547
548			skb->dev = dev;
549			skb_reserve(skb, 2);		/* 16 byte align */
550			skb_put(skb, len);		/* make room */
551			eth_copy_and_sum(skb,
552					 (unsigned char *)&(ib->rx_buf [entry][0]),
553					 len, 0);
554			skb->protocol = eth_type_trans(skb, dev);
555			netif_rx(skb);
556			dev->last_rx = jiffies;
557			lp->stats.rx_packets++;
558		}
559
560		/* Return the packet to the pool */
561		rd->mblength = 0;
562		rd->rmd1_bits = LE_R1_OWN;
563		entry = RX_NEXT(entry);
564	}
565
566	lp->rx_new = entry;
567}
568
569static void lance_tx_dvma(struct net_device *dev)
570{
571	struct lance_private *lp = (struct lance_private *) dev->priv;
572	volatile struct lance_init_block *ib = lp->init_block;
573	int i, j;
574
575	spin_lock(&lp->lock);
576
577	j = lp->tx_old;
578	for (i = j; i != lp->tx_new; i = j) {
579		volatile struct lance_tx_desc *td = &ib->btx_ring [i];
580		u8 bits = td->tmd1_bits;
581
582		/* If we hit a packet not owned by us, stop */
583		if (bits & LE_T1_OWN)
584			break;
585
586		if (bits & LE_T1_ERR) {
587			u16 status = td->misc;
588
589			lp->stats.tx_errors++;
590			if (status & LE_T3_RTY)  lp->stats.tx_aborted_errors++;
591			if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
592
593			if (status & LE_T3_CLOS) {
594				lp->stats.tx_carrier_errors++;
595				if (lp->auto_select) {
596					lp->tpe = 1 - lp->tpe;
597					printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
598					       dev->name, lp->tpe?"TPE":"AUI");
599					STOP_LANCE(lp);
600					lp->init_ring(dev);
601					load_csrs(lp);
602					init_restart_lance(lp);
603					goto out;
604				}
605			}
606
607			/* Buffer errors and underflows turn off the
608			 * transmitter, restart the adapter.
609			 */
610			if (status & (LE_T3_BUF|LE_T3_UFL)) {
611				lp->stats.tx_fifo_errors++;
612
613				printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
614				       dev->name);
615				STOP_LANCE(lp);
616				lp->init_ring(dev);
617				load_csrs(lp);
618				init_restart_lance(lp);
619				goto out;
620			}
621		} else if ((bits & LE_T1_POK) == LE_T1_POK) {
622			/*
623			 * So we don't count the packet more than once.
624			 */
625			td->tmd1_bits = bits & ~(LE_T1_POK);
626
627			/* One collision before packet was sent. */
628			if (bits & LE_T1_EONE)
629				lp->stats.collisions++;
630
631			/* More than one collision, be optimistic. */
632			if (bits & LE_T1_EMORE)
633				lp->stats.collisions += 2;
634
635			lp->stats.tx_packets++;
636		}
637
638		j = TX_NEXT(j);
639	}
640	lp->tx_old = j;
641out:
642	if (netif_queue_stopped(dev) &&
643	    TX_BUFFS_AVAIL > 0)
644		netif_wake_queue(dev);
645
646	spin_unlock(&lp->lock);
647}
648
649static void lance_piocopy_to_skb(struct sk_buff *skb, volatile void *piobuf, int len)
650{
651	u16 *p16 = (u16 *) skb->data;
652	u32 *p32;
653	u8 *p8;
654	unsigned long pbuf = (unsigned long) piobuf;
655
656	/* We know here that both src and dest are on a 16bit boundry. */
657	*p16++ = sbus_readw(pbuf);
658	p32 = (u32 *) p16;
659	pbuf += 2;
660	len -= 2;
661
662	while (len >= 4) {
663		*p32++ = sbus_readl(pbuf);
664		pbuf += 4;
665		len -= 4;
666	}
667	p8 = (u8 *) p32;
668	if (len >= 2) {
669		p16 = (u16 *) p32;
670		*p16++ = sbus_readw(pbuf);
671		pbuf += 2;
672		len -= 2;
673		p8 = (u8 *) p16;
674	}
675	if (len >= 1)
676		*p8 = sbus_readb(pbuf);
677}
678
679static void lance_rx_pio(struct net_device *dev)
680{
681	struct lance_private *lp = (struct lance_private *) dev->priv;
682	volatile struct lance_init_block *ib = lp->init_block;
683	volatile struct lance_rx_desc *rd;
684	unsigned char bits;
685	int len, entry;
686	struct sk_buff *skb;
687
688	entry = lp->rx_new;
689	for (rd = &ib->brx_ring [entry];
690	     !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
691	     rd = &ib->brx_ring [entry]) {
692
693		/* We got an incomplete frame? */
694		if ((bits & LE_R1_POK) != LE_R1_POK) {
695			lp->stats.rx_over_errors++;
696			lp->stats.rx_errors++;
697		} else if (bits & LE_R1_ERR) {
698			/* Count only the end frame as a rx error,
699			 * not the beginning
700			 */
701			if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
702			if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
703			if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
704			if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
705			if (bits & LE_R1_EOP) lp->stats.rx_errors++;
706		} else {
707			len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
708			skb = dev_alloc_skb(len + 2);
709
710			if (skb == NULL) {
711				printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
712				       dev->name);
713				lp->stats.rx_dropped++;
714				sbus_writew(0, &rd->mblength);
715				sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
716				lp->rx_new = RX_NEXT(entry);
717				return;
718			}
719
720			lp->stats.rx_bytes += len;
721
722			skb->dev = dev;
723			skb_reserve (skb, 2);		/* 16 byte align */
724			skb_put(skb, len);		/* make room */
725			lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
726			skb->protocol = eth_type_trans(skb, dev);
727			netif_rx(skb);
728			dev->last_rx = jiffies;
729			lp->stats.rx_packets++;
730		}
731
732		/* Return the packet to the pool */
733		sbus_writew(0, &rd->mblength);
734		sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
735		entry = RX_NEXT(entry);
736	}
737
738	lp->rx_new = entry;
739}
740
741static void lance_tx_pio(struct net_device *dev)
742{
743	struct lance_private *lp = (struct lance_private *) dev->priv;
744	volatile struct lance_init_block *ib = lp->init_block;
745	int i, j;
746
747	spin_lock(&lp->lock);
748
749	j = lp->tx_old;
750	for (i = j; i != lp->tx_new; i = j) {
751		volatile struct lance_tx_desc *td = &ib->btx_ring [i];
752		u8 bits = sbus_readb(&td->tmd1_bits);
753
754		/* If we hit a packet not owned by us, stop */
755		if (bits & LE_T1_OWN)
756			break;
757
758		if (bits & LE_T1_ERR) {
759			u16 status = sbus_readw(&td->misc);
760
761			lp->stats.tx_errors++;
762			if (status & LE_T3_RTY)  lp->stats.tx_aborted_errors++;
763			if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
764
765			if (status & LE_T3_CLOS) {
766				lp->stats.tx_carrier_errors++;
767				if (lp->auto_select) {
768					lp->tpe = 1 - lp->tpe;
769					printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
770					       dev->name, lp->tpe?"TPE":"AUI");
771					STOP_LANCE(lp);
772					lp->init_ring(dev);
773					load_csrs(lp);
774					init_restart_lance(lp);
775					goto out;
776				}
777			}
778
779			/* Buffer errors and underflows turn off the
780			 * transmitter, restart the adapter.
781			 */
782			if (status & (LE_T3_BUF|LE_T3_UFL)) {
783				lp->stats.tx_fifo_errors++;
784
785				printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
786				       dev->name);
787				STOP_LANCE(lp);
788				lp->init_ring(dev);
789				load_csrs(lp);
790				init_restart_lance(lp);
791				goto out;
792			}
793		} else if ((bits & LE_T1_POK) == LE_T1_POK) {
794			/*
795			 * So we don't count the packet more than once.
796			 */
797			sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
798
799			/* One collision before packet was sent. */
800			if (bits & LE_T1_EONE)
801				lp->stats.collisions++;
802
803			/* More than one collision, be optimistic. */
804			if (bits & LE_T1_EMORE)
805				lp->stats.collisions += 2;
806
807			lp->stats.tx_packets++;
808		}
809
810		j = TX_NEXT(j);
811	}
812	lp->tx_old = j;
813
814	if (netif_queue_stopped(dev) &&
815	    TX_BUFFS_AVAIL > 0)
816		netif_wake_queue(dev);
817out:
818	spin_unlock(&lp->lock);
819}
820
821static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs)
822{
823	struct net_device *dev = (struct net_device *)dev_id;
824	struct lance_private *lp = (struct lance_private *)dev->priv;
825	int csr0;
826
827	sbus_writew(LE_CSR0, lp->lregs + RAP);
828	csr0 = sbus_readw(lp->lregs + RDP);
829
830	/* Acknowledge all the interrupt sources ASAP */
831	sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT),
832		    lp->lregs + RDP);
833
834	if ((csr0 & LE_C0_ERR) != 0) {
835		/* Clear the error condition */
836		sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
837			     LE_C0_CERR | LE_C0_MERR),
838			    lp->lregs + RDP);
839	}
840
841	if (csr0 & LE_C0_RINT)
842		lp->rx(dev);
843
844	if (csr0 & LE_C0_TINT)
845		lp->tx(dev);
846
847	if (csr0 & LE_C0_BABL)
848		lp->stats.tx_errors++;
849
850	if (csr0 & LE_C0_MISS)
851		lp->stats.rx_errors++;
852
853	if (csr0 & LE_C0_MERR) {
854		if (lp->dregs) {
855			u32 addr = sbus_readl(lp->dregs + DMA_ADDR);
856
857			printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n",
858			       dev->name, csr0, addr & 0xffffff);
859		} else {
860			printk(KERN_ERR "%s: Memory error, status %04x\n",
861			       dev->name, csr0);
862		}
863
864		sbus_writew(LE_C0_STOP, lp->lregs + RDP);
865
866		if (lp->dregs) {
867			u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR);
868
869			dma_csr |= DMA_FIFO_INV;
870			sbus_writel(dma_csr, lp->dregs + DMA_CSR);
871		}
872
873		lp->init_ring(dev);
874		load_csrs(lp);
875		init_restart_lance(lp);
876		netif_wake_queue(dev);
877	}
878
879	sbus_writew(LE_C0_INEA, lp->lregs + RDP);
880}
881
882/* Build a fake network packet and send it to ourselves. */
883static void build_fake_packet(struct lance_private *lp)
884{
885	struct net_device *dev = lp->dev;
886	volatile struct lance_init_block *ib = lp->init_block;
887	u16 *packet;
888	struct ethhdr *eth;
889	int i, entry;
890
891	entry = lp->tx_new & TX_RING_MOD_MASK;
892	packet = (u16 *) &(ib->tx_buf[entry][0]);
893	eth = (struct ethhdr *) packet;
894	if (lp->pio_buffer) {
895		for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++)
896			sbus_writew(0, &packet[i]);
897		for (i = 0; i < 6; i++) {
898			sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]);
899			sbus_writeb(dev->dev_addr[i], &eth->h_source[i]);
900		}
901		sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length);
902		sbus_writew(0, &ib->btx_ring[entry].misc);
903		sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
904	} else {
905		memset(packet, 0, ETH_ZLEN);
906		for (i = 0; i < 6; i++) {
907			eth->h_dest[i] = dev->dev_addr[i];
908			eth->h_source[i] = dev->dev_addr[i];
909		}
910		ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
911		ib->btx_ring[entry].misc = 0;
912		ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
913	}
914	lp->tx_new = TX_NEXT(entry);
915}
916
917struct net_device *last_dev = 0;
918
919static int lance_open(struct net_device *dev)
920{
921	struct lance_private *lp = (struct lance_private *)dev->priv;
922	volatile struct lance_init_block *ib = lp->init_block;
923	int status = 0;
924
925	last_dev = dev;
926
927	STOP_LANCE(lp);
928
929	if (request_irq(dev->irq, &lance_interrupt, SA_SHIRQ,
930			lancestr, (void *) dev)) {
931		printk(KERN_ERR "Lance: Can't get irq %s\n", __irq_itoa(dev->irq));
932		return -EAGAIN;
933	}
934
935	/* On the 4m, setup the ledma to provide the upper bits for buffers */
936	if (lp->dregs) {
937		u32 regval = lp->init_block_dvma & 0xff000000;
938
939		sbus_writel(regval, lp->dregs + DMA_TEST);
940	}
941
942	/* Set mode and clear multicast filter only at device open,
943	 * so that lance_init_ring() called at any error will not
944	 * forget multicast filters.
945	 *
946	 * BTW it is common bug in all lance drivers! --ANK
947	 */
948	if (lp->pio_buffer) {
949		sbus_writew(0, &ib->mode);
950		sbus_writel(0, &ib->filter[0]);
951		sbus_writel(0, &ib->filter[1]);
952	} else {
953		ib->mode = 0;
954		ib->filter [0] = 0;
955		ib->filter [1] = 0;
956	}
957
958	lp->init_ring(dev);
959	load_csrs(lp);
960
961	netif_start_queue(dev);
962
963	status = init_restart_lance(lp);
964	if (!status && lp->auto_select) {
965		build_fake_packet(lp);
966		sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
967	}
968
969	return status;
970}
971
972static int lance_close(struct net_device *dev)
973{
974	struct lance_private *lp = (struct lance_private *) dev->priv;
975
976	netif_stop_queue(dev);
977	del_timer_sync(&lp->multicast_timer);
978
979	STOP_LANCE(lp);
980
981	free_irq(dev->irq, (void *) dev);
982	return 0;
983}
984
985static int lance_reset(struct net_device *dev)
986{
987	struct lance_private *lp = (struct lance_private *) dev->priv;
988	int status;
989
990	STOP_LANCE(lp);
991
992	/* On the 4m, reset the dma too */
993	if (lp->dregs) {
994		u32 csr, addr;
995
996		printk(KERN_ERR "resetting ledma\n");
997		csr = sbus_readl(lp->dregs + DMA_CSR);
998		sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
999		udelay(200);
1000		sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
1001
1002		addr = lp->init_block_dvma & 0xff000000;
1003		sbus_writel(addr, lp->dregs + DMA_TEST);
1004	}
1005	lp->init_ring(dev);
1006	load_csrs(lp);
1007	dev->trans_start = jiffies;
1008	status = init_restart_lance(lp);
1009	return status;
1010}
1011
1012static void lance_piocopy_from_skb(volatile void *dest, unsigned char *src, int len)
1013{
1014	unsigned long piobuf = (unsigned long) dest;
1015	u32 *p32;
1016	u16 *p16;
1017	u8 *p8;
1018
1019	switch ((unsigned long)src & 0x3) {
1020	case 0:
1021		p32 = (u32 *) src;
1022		while (len >= 4) {
1023			sbus_writel(*p32, piobuf);
1024			p32++;
1025			piobuf += 4;
1026			len -= 4;
1027		}
1028		src = (char *) p32;
1029		break;
1030	case 1:
1031	case 3:
1032		p8 = (u8 *) src;
1033		while (len >= 4) {
1034			u32 val;
1035
1036			val  = p8[0] << 24;
1037			val |= p8[1] << 16;
1038			val |= p8[2] << 8;
1039			val |= p8[3];
1040			sbus_writel(val, piobuf);
1041			p8 += 4;
1042			piobuf += 4;
1043			len -= 4;
1044		}
1045		src = (char *) p8;
1046		break;
1047	case 2:
1048		p16 = (u16 *) src;
1049		while (len >= 4) {
1050			u32 val = p16[0]<<16 | p16[1];
1051			sbus_writel(val, piobuf);
1052			p16 += 2;
1053			piobuf += 4;
1054			len -= 4;
1055		}
1056		src = (char *) p16;
1057		break;
1058	};
1059	if (len >= 2) {
1060		u16 val = src[0] << 8 | src[1];
1061		sbus_writew(val, piobuf);
1062		src += 2;
1063		piobuf += 2;
1064		len -= 2;
1065	}
1066	if (len >= 1)
1067		sbus_writeb(src[0], piobuf);
1068}
1069
1070static void lance_piozero(volatile void *dest, int len)
1071{
1072	unsigned long piobuf = (unsigned long) dest;
1073
1074	if (piobuf & 1) {
1075		sbus_writeb(0, piobuf);
1076		piobuf += 1;
1077		len -= 1;
1078		if (len == 0)
1079			return;
1080	}
1081	if (len == 1) {
1082		sbus_writeb(0, piobuf);
1083		return;
1084	}
1085	if (piobuf & 2) {
1086		sbus_writew(0, piobuf);
1087		piobuf += 2;
1088		len -= 2;
1089		if (len == 0)
1090			return;
1091	}
1092	while (len >= 4) {
1093		sbus_writel(0, piobuf);
1094		piobuf += 4;
1095		len -= 4;
1096	}
1097	if (len >= 2) {
1098		sbus_writew(0, piobuf);
1099		piobuf += 2;
1100		len -= 2;
1101	}
1102	if (len >= 1)
1103		sbus_writeb(0, piobuf);
1104}
1105
1106static void lance_tx_timeout(struct net_device *dev)
1107{
1108	struct lance_private *lp = (struct lance_private *) dev->priv;
1109
1110	printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
1111	       dev->name, sbus_readw(lp->lregs + RDP));
1112	lance_reset(dev);
1113	netif_wake_queue(dev);
1114}
1115
1116static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1117{
1118	struct lance_private *lp = (struct lance_private *) dev->priv;
1119	volatile struct lance_init_block *ib = lp->init_block;
1120	int entry, skblen, len;
1121
1122	skblen = skb->len;
1123
1124	len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
1125
1126	spin_lock_irq(&lp->lock);
1127
1128	lp->stats.tx_bytes += len;
1129
1130	entry = lp->tx_new & TX_RING_MOD_MASK;
1131	if (lp->pio_buffer) {
1132		sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length);
1133		sbus_writew(0, &ib->btx_ring[entry].misc);
1134		lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
1135		if (len != skblen)
1136			lance_piozero(&ib->tx_buf[entry][skblen], len - skblen);
1137		sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
1138	} else {
1139		ib->btx_ring [entry].length = (-len) | 0xf000;
1140		ib->btx_ring [entry].misc = 0;
1141		memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen);
1142		if (len != skblen)
1143			memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
1144		ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
1145	}
1146
1147	lp->tx_new = TX_NEXT(entry);
1148
1149	if (TX_BUFFS_AVAIL <= 0)
1150		netif_stop_queue(dev);
1151
1152	/* Kick the lance: transmit now */
1153	sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
1154
1155	/* Read back CSR to invalidate the E-Cache.
1156	 * This is needed, because DMA_DSBL_WR_INV is set.
1157	 */
1158	if (lp->dregs)
1159		sbus_readw(lp->lregs + RDP);
1160
1161	spin_unlock_irq(&lp->lock);
1162
1163	dev->trans_start = jiffies;
1164	dev_kfree_skb(skb);
1165
1166	return 0;
1167}
1168
1169static struct net_device_stats *lance_get_stats(struct net_device *dev)
1170{
1171	struct lance_private *lp = (struct lance_private *) dev->priv;
1172
1173	return &lp->stats;
1174}
1175
1176/* taken from the depca driver */
1177static void lance_load_multicast(struct net_device *dev)
1178{
1179	struct lance_private *lp = (struct lance_private *) dev->priv;
1180	volatile struct lance_init_block *ib = lp->init_block;
1181	volatile u16 *mcast_table = (u16 *) &ib->filter;
1182	struct dev_mc_list *dmi = dev->mc_list;
1183	char *addrs;
1184	int i;
1185	u32 crc;
1186
1187	/* set all multicast bits */
1188	if (dev->flags & IFF_ALLMULTI) {
1189		if (lp->pio_buffer) {
1190			sbus_writel(0xffffffff, &ib->filter[0]);
1191			sbus_writel(0xffffffff, &ib->filter[1]);
1192		} else {
1193			ib->filter [0] = 0xffffffff;
1194			ib->filter [1] = 0xffffffff;
1195		}
1196		return;
1197	}
1198	/* clear the multicast filter */
1199	if (lp->pio_buffer) {
1200		sbus_writel(0, &ib->filter[0]);
1201		sbus_writel(0, &ib->filter[1]);
1202	} else {
1203		ib->filter [0] = 0;
1204		ib->filter [1] = 0;
1205	}
1206
1207	/* Add addresses */
1208	for (i = 0; i < dev->mc_count; i++) {
1209		addrs = dmi->dmi_addr;
1210		dmi   = dmi->next;
1211
1212		/* multicast address? */
1213		if (!(*addrs & 1))
1214			continue;
1215		crc = ether_crc_le(6, addrs);
1216		crc = crc >> 26;
1217		if (lp->pio_buffer) {
1218			u16 tmp = sbus_readw(&mcast_table[crc>>4]);
1219			tmp |= 1 << (crc & 0xf);
1220			sbus_writew(tmp, &mcast_table[crc>>4]);
1221		} else {
1222			mcast_table [crc >> 4] |= 1 << (crc & 0xf);
1223		}
1224	}
1225}
1226
1227static void lance_set_multicast(struct net_device *dev)
1228{
1229	struct lance_private *lp = (struct lance_private *) dev->priv;
1230	volatile struct lance_init_block *ib = lp->init_block;
1231	u16 mode;
1232
1233	if (!netif_running(dev))
1234		return;
1235
1236	if (lp->tx_old != lp->tx_new) {
1237		mod_timer(&lp->multicast_timer, jiffies + 4);
1238		netif_wake_queue(dev);
1239		return;
1240	}
1241
1242	netif_stop_queue(dev);
1243
1244	STOP_LANCE(lp);
1245	lp->init_ring(dev);
1246
1247	if (lp->pio_buffer)
1248		mode = sbus_readw(&ib->mode);
1249	else
1250		mode = ib->mode;
1251	if (dev->flags & IFF_PROMISC) {
1252		mode |= LE_MO_PROM;
1253		if (lp->pio_buffer)
1254			sbus_writew(mode, &ib->mode);
1255		else
1256			ib->mode = mode;
1257	} else {
1258		mode &= ~LE_MO_PROM;
1259		if (lp->pio_buffer)
1260			sbus_writew(mode, &ib->mode);
1261		else
1262			ib->mode = mode;
1263		lance_load_multicast(dev);
1264	}
1265	load_csrs(lp);
1266	init_restart_lance(lp);
1267	netif_wake_queue(dev);
1268}
1269
1270static void lance_set_multicast_retry(unsigned long _opaque)
1271{
1272	struct net_device *dev = (struct net_device *) _opaque;
1273
1274	lance_set_multicast(dev);
1275}
1276
1277static void lance_free_hwresources(struct lance_private *lp)
1278{
1279	if (lp->lregs)
1280		sbus_iounmap(lp->lregs, LANCE_REG_SIZE);
1281	if (lp->init_block != NULL) {
1282		if (lp->pio_buffer) {
1283			sbus_iounmap((unsigned long)lp->init_block,
1284				     sizeof(struct lance_init_block));
1285		} else {
1286			sbus_free_consistent(lp->sdev,
1287					     sizeof(struct lance_init_block),
1288					     (void *)lp->init_block,
1289					     lp->init_block_dvma);
1290		}
1291	}
1292}
1293
1294static int __init sparc_lance_init(struct net_device *dev,
1295				   struct sbus_dev *sdev,
1296				   struct sbus_dma *ledma,
1297				   struct sbus_dev *lebuffer)
1298{
1299	static unsigned version_printed;
1300	struct lance_private *lp = NULL;
1301	int    i;
1302
1303	if (dev == NULL) {
1304		dev = init_etherdev (0, sizeof (struct lance_private) + 8);
1305	} else {
1306		dev->priv = kmalloc(sizeof (struct lance_private) + 8,
1307				    GFP_KERNEL);
1308		if (dev->priv == NULL)
1309			return -ENOMEM;
1310		memset(dev->priv, 0, sizeof (struct lance_private) + 8);
1311	}
1312	if (sparc_lance_debug && version_printed++ == 0)
1313		printk (KERN_INFO "%s", version);
1314
1315	printk(KERN_INFO "%s: LANCE ", dev->name);
1316
1317	/* Make certain the data structures used by the LANCE are aligned. */
1318	dev->priv = (void *)(((unsigned long)dev->priv + 7) & ~7);
1319	lp = (struct lance_private *) dev->priv;
1320	spin_lock_init(&lp->lock);
1321
1322	/* Copy the IDPROM ethernet address to the device structure, later we
1323	 * will copy the address in the device structure to the lance
1324	 * initialization block.
1325	 */
1326	for (i = 0; i < 6; i++)
1327		printk("%2.2x%c", dev->dev_addr[i] = idprom->id_ethaddr[i],
1328		       i == 5 ? ' ': ':');
1329	printk("\n");
1330
1331	/* Get the IO region */
1332	lp->lregs = sbus_ioremap(&sdev->resource[0], 0,
1333				 LANCE_REG_SIZE, lancestr);
1334	if (lp->lregs == 0UL) {
1335		printk(KERN_ERR "%s: Cannot map SunLance registers.\n",
1336		       dev->name);
1337		goto fail;
1338	}
1339
1340	lp->sdev = sdev;
1341	if (lebuffer) {
1342		lp->init_block = (volatile struct lance_init_block *)
1343			sbus_ioremap(&lebuffer->resource[0], 0,
1344				     sizeof(struct lance_init_block), "lebuffer");
1345		if (lp->init_block == NULL) {
1346			printk(KERN_ERR "%s: Cannot map SunLance PIO buffer.\n",
1347			       dev->name);
1348			goto fail;
1349		}
1350		lp->init_block_dvma = 0;
1351		lp->pio_buffer = 1;
1352		lp->init_ring = lance_init_ring_pio;
1353		lp->rx = lance_rx_pio;
1354		lp->tx = lance_tx_pio;
1355	} else {
1356		lp->init_block = (volatile struct lance_init_block *)
1357			sbus_alloc_consistent(sdev, sizeof(struct lance_init_block),
1358					      &lp->init_block_dvma);
1359		if (lp->init_block == NULL ||
1360		    lp->init_block_dvma == 0) {
1361			printk(KERN_ERR "%s: Cannot allocate consistent DMA memory.\n",
1362			       dev->name);
1363			goto fail;
1364		}
1365		lp->pio_buffer = 0;
1366		lp->init_ring = lance_init_ring_dvma;
1367		lp->rx = lance_rx_dvma;
1368		lp->tx = lance_tx_dvma;
1369	}
1370	lp->busmaster_regval = prom_getintdefault(sdev->prom_node,
1371						  "busmaster-regval",
1372						  (LE_C3_BSWP | LE_C3_ACON |
1373						   LE_C3_BCON));
1374
1375	lp->name = lancestr;
1376	lp->ledma = ledma;
1377
1378	lp->burst_sizes = 0;
1379	if (lp->ledma) {
1380		char prop[6];
1381		unsigned int sbmask;
1382		u32 csr;
1383
1384		/* Find burst-size property for ledma */
1385		lp->burst_sizes = prom_getintdefault(ledma->sdev->prom_node,
1386						     "burst-sizes", 0);
1387
1388		/* ledma may be capable of fast bursts, but sbus may not. */
1389		sbmask = prom_getintdefault(ledma->sdev->bus->prom_node,
1390					    "burst-sizes", DMA_BURSTBITS);
1391		lp->burst_sizes &= sbmask;
1392
1393		/* Get the cable-selection property */
1394		memset(prop, 0, sizeof(prop));
1395		prom_getstring(ledma->sdev->prom_node, "cable-selection",
1396			       prop, sizeof(prop));
1397		if (prop[0] == 0) {
1398			int topnd, nd;
1399
1400			printk(KERN_INFO "%s: using auto-carrier-detection.\n",
1401			       dev->name);
1402
1403			topnd = prom_getchild(prom_root_node);
1404
1405			nd = prom_searchsiblings(topnd, "options");
1406			if (!nd)
1407				goto no_link_test;
1408
1409			if (!prom_node_has_property(nd, "tpe-link-test?"))
1410				goto no_link_test;
1411
1412			memset(prop, 0, sizeof(prop));
1413			prom_getstring(nd, "tpe-link-test?", prop,
1414				       sizeof(prop));
1415
1416			if (strcmp(prop, "true")) {
1417				printk(KERN_NOTICE "%s: warning: overriding option "
1418				       "'tpe-link-test?'\n", dev->name);
1419				printk(KERN_NOTICE "%s: warning: mail any problems "
1420				       "to ecd@skynet.be\n", dev->name);
1421				set_auxio(AUXIO_LINK_TEST, 0);
1422			}
1423no_link_test:
1424			lp->auto_select = 1;
1425			lp->tpe = 0;
1426		} else if (!strcmp(prop, "aui")) {
1427			lp->auto_select = 0;
1428			lp->tpe = 0;
1429		} else {
1430			lp->auto_select = 0;
1431			lp->tpe = 1;
1432		}
1433
1434		lp->dregs = ledma->regs;
1435
1436		/* Reset ledma */
1437		csr = sbus_readl(lp->dregs + DMA_CSR);
1438		sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
1439		udelay(200);
1440		sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
1441	} else
1442		lp->dregs = 0;
1443
1444	/* This should never happen. */
1445	if ((unsigned long)(lp->init_block->brx_ring) & 0x07) {
1446		printk(KERN_ERR "%s: ERROR: Rx and Tx rings not on even boundary.\n",
1447		       dev->name);
1448		goto fail;
1449	}
1450
1451	lp->dev = dev;
1452	SET_MODULE_OWNER(dev);
1453	dev->open = &lance_open;
1454	dev->stop = &lance_close;
1455	dev->hard_start_xmit = &lance_start_xmit;
1456	dev->tx_timeout = &lance_tx_timeout;
1457	dev->watchdog_timeo = 5*HZ;
1458	dev->get_stats = &lance_get_stats;
1459	dev->set_multicast_list = &lance_set_multicast;
1460
1461	dev->irq = sdev->irqs[0];
1462
1463	dev->dma = 0;
1464	ether_setup(dev);
1465
1466	/* We cannot sleep if the chip is busy during a
1467	 * multicast list update event, because such events
1468	 * can occur from interrupts (ex. IPv6).  So we
1469	 * use a timer to try again later when necessary. -DaveM
1470	 */
1471	init_timer(&lp->multicast_timer);
1472	lp->multicast_timer.data = (unsigned long) dev;
1473	lp->multicast_timer.function = &lance_set_multicast_retry;
1474
1475	dev->ifindex = dev_new_index();
1476	lp->next_module = root_lance_dev;
1477	root_lance_dev = lp;
1478
1479	return 0;
1480
1481fail:
1482	if (lp != NULL)
1483		lance_free_hwresources(lp);
1484	return -ENODEV;
1485}
1486
1487/* On 4m, find the associated dma for the lance chip */
1488static inline struct sbus_dma *find_ledma(struct sbus_dev *sdev)
1489{
1490	struct sbus_dma *p;
1491
1492	for_each_dvma(p) {
1493		if (p->sdev == sdev)
1494			return p;
1495	}
1496	return NULL;
1497}
1498
1499#ifdef CONFIG_SUN4
1500
1501#include <asm/sun4paddr.h>
1502
1503/* Find all the lance cards on the system and initialize them */
1504static int __init sparc_lance_probe(void)
1505{
1506	static struct sbus_dev sdev;
1507	static int called;
1508
1509	root_lance_dev = NULL;
1510
1511	if (called)
1512		return -ENODEV;
1513	called++;
1514
1515	if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
1516	    (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
1517		memset(&sdev, 0, sizeof(sdev));
1518		sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
1519		sdev.irqs[0] = 6;
1520		return sparc_lance_init(NULL, &sdev, 0, 0);
1521	}
1522	return -ENODEV;
1523}
1524
1525#else /* !CONFIG_SUN4 */
1526
1527/* Find all the lance cards on the system and initialize them */
1528static int __init sparc_lance_probe(void)
1529{
1530	struct sbus_bus *bus;
1531	struct sbus_dev *sdev = 0;
1532	struct net_device *dev = NULL;
1533	struct sbus_dma *ledma = 0;
1534	static int called;
1535	int cards = 0, v;
1536
1537	root_lance_dev = NULL;
1538
1539	if (called)
1540		return -ENODEV;
1541	called++;
1542
1543	for_each_sbus (bus) {
1544		for_each_sbusdev (sdev, bus) {
1545			if (cards)
1546				dev = NULL;
1547			if (strcmp(sdev->prom_name, "le") == 0) {
1548				cards++;
1549				if ((v = sparc_lance_init(dev, sdev, 0, 0)))
1550					return v;
1551				continue;
1552			}
1553			if (strcmp(sdev->prom_name, "ledma") == 0) {
1554				cards++;
1555				ledma = find_ledma(sdev);
1556				if ((v = sparc_lance_init(dev, sdev->child,
1557							  ledma, 0)))
1558					return v;
1559				continue;
1560			}
1561			if (strcmp(sdev->prom_name, "lebuffer") == 0){
1562				cards++;
1563				if ((v = sparc_lance_init(dev, sdev->child,
1564							  0, sdev)))
1565					return v;
1566				continue;
1567			}
1568		} /* for each sbusdev */
1569	} /* for each sbus */
1570	if (!cards)
1571		return -ENODEV;
1572	return 0;
1573}
1574#endif /* !CONFIG_SUN4 */
1575
1576static void __exit sparc_lance_cleanup(void)
1577{
1578	struct lance_private *lp;
1579
1580	while (root_lance_dev) {
1581		lp = root_lance_dev->next_module;
1582
1583		unregister_netdev(root_lance_dev->dev);
1584		lance_free_hwresources(root_lance_dev);
1585		kfree(root_lance_dev->dev);
1586		root_lance_dev = lp;
1587	}
1588}
1589
1590module_init(sparc_lance_probe);
1591module_exit(sparc_lance_cleanup);
1592MODULE_LICENSE("GPL");
1593