if_nge.c revision 113038
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001
4 *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/*
35 * National Semiconductor DP83820/DP83821 gigabit ethernet driver
36 * for FreeBSD. Datasheets are available from:
37 *
38 * http://www.national.com/ds/DP/DP83820.pdf
39 * http://www.national.com/ds/DP/DP83821.pdf
40 *
41 * These chips are used on several low cost gigabit ethernet NICs
42 * sold by D-Link, Addtron, SMC and Asante. Both parts are
43 * virtually the same, except the 83820 is a 64-bit/32-bit part,
44 * while the 83821 is 32-bit only.
45 *
46 * Many cards also use National gigE transceivers, such as the
47 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
48 * contains a full register description that applies to all of these
49 * components:
50 *
51 * http://www.national.com/ds/DP/DP83861.pdf
52 *
53 * Written by Bill Paul <wpaul@bsdi.com>
54 * BSDi Open Source Solutions
55 */
56
57/*
58 * The NatSemi DP83820 and 83821 controllers are enhanced versions
59 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
60 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
61 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
62 * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
63 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
64 * matching buffers, one perfect address filter buffer and interrupt
65 * moderation. The 83820 supports both 64-bit and 32-bit addressing
66 * and data transfers: the 64-bit support can be toggled on or off
67 * via software. This affects the size of certain fields in the DMA
68 * descriptors.
69 *
70 * There are two bugs/misfeatures in the 83820/83821 that I have
71 * discovered so far:
72 *
73 * - Receive buffers must be aligned on 64-bit boundaries, which means
74 *   you must resort to copying data in order to fix up the payload
75 *   alignment.
76 *
77 * - In order to transmit jumbo frames larger than 8170 bytes, you have
78 *   to turn off transmit checksum offloading, because the chip can't
79 *   compute the checksum on an outgoing frame unless it fits entirely
80 *   within the TX FIFO, which is only 8192 bytes in size. If you have
81 *   TX checksum offload enabled and you transmit attempt to transmit a
82 *   frame larger than 8170 bytes, the transmitter will wedge.
83 *
84 * To work around the latter problem, TX checksum offload is disabled
85 * if the user selects an MTU larger than 8152 (8170 - 18).
86 */
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 113038 2003-04-03 21:36:33Z obrien $");
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/socket.h>
98
99#include <net/if.h>
100#include <net/if_arp.h>
101#include <net/ethernet.h>
102#include <net/if_dl.h>
103#include <net/if_media.h>
104#include <net/if_types.h>
105#include <net/if_vlan_var.h>
106
107#include <net/bpf.h>
108
109#include <vm/vm.h>              /* for vtophys */
110#include <vm/pmap.h>            /* for vtophys */
111#include <machine/clock.h>      /* for DELAY */
112#include <machine/bus_pio.h>
113#include <machine/bus_memio.h>
114#include <machine/bus.h>
115#include <machine/resource.h>
116#include <sys/bus.h>
117#include <sys/rman.h>
118
119#include <dev/mii/mii.h>
120#include <dev/mii/miivar.h>
121
122#include <pci/pcireg.h>
123#include <pci/pcivar.h>
124
125#define NGE_USEIOSPACE
126
127#include <dev/nge/if_ngereg.h>
128
129MODULE_DEPEND(nge, miibus, 1, 1, 1);
130
131/* "controller miibus0" required.  See GENERIC if you get errors here. */
132#include "miibus_if.h"
133
134#define NGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
135
136/*
137 * Various supported device vendors/types and their names.
138 */
139static struct nge_type nge_devs[] = {
140	{ NGE_VENDORID, NGE_DEVICEID,
141	    "National Semiconductor Gigabit Ethernet" },
142	{ 0, 0, NULL }
143};
144
145static int nge_probe(device_t);
146static int nge_attach(device_t);
147static int nge_detach(device_t);
148
149static int nge_alloc_jumbo_mem(struct nge_softc *);
150static void nge_free_jumbo_mem(struct nge_softc *);
151static void *nge_jalloc(struct nge_softc *);
152static void nge_jfree(void *, void *);
153
154static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *);
155static int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
156static void nge_rxeof(struct nge_softc *);
157static void nge_txeof(struct nge_softc *);
158static void nge_intr(void *);
159static void nge_tick(void *);
160static void nge_start(struct ifnet *);
161static int nge_ioctl(struct ifnet *, u_long, caddr_t);
162static void nge_init(void *);
163static void nge_stop(struct nge_softc *);
164static void nge_watchdog(struct ifnet *);
165static void nge_shutdown(device_t);
166static int nge_ifmedia_upd(struct ifnet *);
167static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
168
169static void nge_delay(struct nge_softc *);
170static void nge_eeprom_idle(struct nge_softc *);
171static void nge_eeprom_putbyte(struct nge_softc *, int);
172static void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
173static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
174
175static void nge_mii_sync(struct nge_softc *);
176static void nge_mii_send(struct nge_softc *, u_int32_t, int);
177static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
178static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
179
180static int nge_miibus_readreg(device_t, int, int);
181static int nge_miibus_writereg(device_t, int, int, int);
182static void nge_miibus_statchg(device_t);
183
184static void nge_setmulti(struct nge_softc *);
185static u_int32_t nge_crc(struct nge_softc *, caddr_t);
186static void nge_reset(struct nge_softc *);
187static int nge_list_rx_init(struct nge_softc *);
188static int nge_list_tx_init(struct nge_softc *);
189
190#ifdef NGE_USEIOSPACE
191#define NGE_RES			SYS_RES_IOPORT
192#define NGE_RID			NGE_PCI_LOIO
193#else
194#define NGE_RES			SYS_RES_MEMORY
195#define NGE_RID			NGE_PCI_LOMEM
196#endif
197
198static device_method_t nge_methods[] = {
199	/* Device interface */
200	DEVMETHOD(device_probe,		nge_probe),
201	DEVMETHOD(device_attach,	nge_attach),
202	DEVMETHOD(device_detach,	nge_detach),
203	DEVMETHOD(device_shutdown,	nge_shutdown),
204
205	/* bus interface */
206	DEVMETHOD(bus_print_child,	bus_generic_print_child),
207	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
208
209	/* MII interface */
210	DEVMETHOD(miibus_readreg,	nge_miibus_readreg),
211	DEVMETHOD(miibus_writereg,	nge_miibus_writereg),
212	DEVMETHOD(miibus_statchg,	nge_miibus_statchg),
213
214	{ 0, 0 }
215};
216
217static driver_t nge_driver = {
218	"nge",
219	nge_methods,
220	sizeof(struct nge_softc)
221};
222
223static devclass_t nge_devclass;
224
225DRIVER_MODULE(if_nge, pci, nge_driver, nge_devclass, 0, 0);
226DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0);
227
228#define NGE_SETBIT(sc, reg, x)				\
229	CSR_WRITE_4(sc, reg,				\
230		CSR_READ_4(sc, reg) | (x))
231
232#define NGE_CLRBIT(sc, reg, x)				\
233	CSR_WRITE_4(sc, reg,				\
234		CSR_READ_4(sc, reg) & ~(x))
235
236#define SIO_SET(x)					\
237	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
238
239#define SIO_CLR(x)					\
240	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
241
242static void
243nge_delay(sc)
244	struct nge_softc	*sc;
245{
246	int			idx;
247
248	for (idx = (300 / 33) + 1; idx > 0; idx--)
249		CSR_READ_4(sc, NGE_CSR);
250
251	return;
252}
253
254static void
255nge_eeprom_idle(sc)
256	struct nge_softc	*sc;
257{
258	register int		i;
259
260	SIO_SET(NGE_MEAR_EE_CSEL);
261	nge_delay(sc);
262	SIO_SET(NGE_MEAR_EE_CLK);
263	nge_delay(sc);
264
265	for (i = 0; i < 25; i++) {
266		SIO_CLR(NGE_MEAR_EE_CLK);
267		nge_delay(sc);
268		SIO_SET(NGE_MEAR_EE_CLK);
269		nge_delay(sc);
270	}
271
272	SIO_CLR(NGE_MEAR_EE_CLK);
273	nge_delay(sc);
274	SIO_CLR(NGE_MEAR_EE_CSEL);
275	nge_delay(sc);
276	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
277
278	return;
279}
280
281/*
282 * Send a read command and address to the EEPROM, check for ACK.
283 */
284static void
285nge_eeprom_putbyte(sc, addr)
286	struct nge_softc	*sc;
287	int			addr;
288{
289	register int		d, i;
290
291	d = addr | NGE_EECMD_READ;
292
293	/*
294	 * Feed in each bit and stobe the clock.
295	 */
296	for (i = 0x400; i; i >>= 1) {
297		if (d & i) {
298			SIO_SET(NGE_MEAR_EE_DIN);
299		} else {
300			SIO_CLR(NGE_MEAR_EE_DIN);
301		}
302		nge_delay(sc);
303		SIO_SET(NGE_MEAR_EE_CLK);
304		nge_delay(sc);
305		SIO_CLR(NGE_MEAR_EE_CLK);
306		nge_delay(sc);
307	}
308
309	return;
310}
311
312/*
313 * Read a word of data stored in the EEPROM at address 'addr.'
314 */
315static void
316nge_eeprom_getword(sc, addr, dest)
317	struct nge_softc	*sc;
318	int			addr;
319	u_int16_t		*dest;
320{
321	register int		i;
322	u_int16_t		word = 0;
323
324	/* Force EEPROM to idle state. */
325	nge_eeprom_idle(sc);
326
327	/* Enter EEPROM access mode. */
328	nge_delay(sc);
329	SIO_CLR(NGE_MEAR_EE_CLK);
330	nge_delay(sc);
331	SIO_SET(NGE_MEAR_EE_CSEL);
332	nge_delay(sc);
333
334	/*
335	 * Send address of word we want to read.
336	 */
337	nge_eeprom_putbyte(sc, addr);
338
339	/*
340	 * Start reading bits from EEPROM.
341	 */
342	for (i = 0x8000; i; i >>= 1) {
343		SIO_SET(NGE_MEAR_EE_CLK);
344		nge_delay(sc);
345		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
346			word |= i;
347		nge_delay(sc);
348		SIO_CLR(NGE_MEAR_EE_CLK);
349		nge_delay(sc);
350	}
351
352	/* Turn off EEPROM access mode. */
353	nge_eeprom_idle(sc);
354
355	*dest = word;
356
357	return;
358}
359
360/*
361 * Read a sequence of words from the EEPROM.
362 */
363static void
364nge_read_eeprom(sc, dest, off, cnt, swap)
365	struct nge_softc	*sc;
366	caddr_t			dest;
367	int			off;
368	int			cnt;
369	int			swap;
370{
371	int			i;
372	u_int16_t		word = 0, *ptr;
373
374	for (i = 0; i < cnt; i++) {
375		nge_eeprom_getword(sc, off + i, &word);
376		ptr = (u_int16_t *)(dest + (i * 2));
377		if (swap)
378			*ptr = ntohs(word);
379		else
380			*ptr = word;
381	}
382
383	return;
384}
385
386/*
387 * Sync the PHYs by setting data bit and strobing the clock 32 times.
388 */
389static void
390nge_mii_sync(sc)
391	struct nge_softc		*sc;
392{
393	register int		i;
394
395	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
396
397	for (i = 0; i < 32; i++) {
398		SIO_SET(NGE_MEAR_MII_CLK);
399		DELAY(1);
400		SIO_CLR(NGE_MEAR_MII_CLK);
401		DELAY(1);
402	}
403
404	return;
405}
406
407/*
408 * Clock a series of bits through the MII.
409 */
410static void
411nge_mii_send(sc, bits, cnt)
412	struct nge_softc		*sc;
413	u_int32_t		bits;
414	int			cnt;
415{
416	int			i;
417
418	SIO_CLR(NGE_MEAR_MII_CLK);
419
420	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
421                if (bits & i) {
422			SIO_SET(NGE_MEAR_MII_DATA);
423                } else {
424			SIO_CLR(NGE_MEAR_MII_DATA);
425                }
426		DELAY(1);
427		SIO_CLR(NGE_MEAR_MII_CLK);
428		DELAY(1);
429		SIO_SET(NGE_MEAR_MII_CLK);
430	}
431}
432
433/*
434 * Read an PHY register through the MII.
435 */
436static int
437nge_mii_readreg(sc, frame)
438	struct nge_softc		*sc;
439	struct nge_mii_frame	*frame;
440
441{
442	int			i, ack, s;
443
444	s = splimp();
445
446	/*
447	 * Set up frame for RX.
448	 */
449	frame->mii_stdelim = NGE_MII_STARTDELIM;
450	frame->mii_opcode = NGE_MII_READOP;
451	frame->mii_turnaround = 0;
452	frame->mii_data = 0;
453
454	CSR_WRITE_4(sc, NGE_MEAR, 0);
455
456	/*
457 	 * Turn on data xmit.
458	 */
459	SIO_SET(NGE_MEAR_MII_DIR);
460
461	nge_mii_sync(sc);
462
463	/*
464	 * Send command/address info.
465	 */
466	nge_mii_send(sc, frame->mii_stdelim, 2);
467	nge_mii_send(sc, frame->mii_opcode, 2);
468	nge_mii_send(sc, frame->mii_phyaddr, 5);
469	nge_mii_send(sc, frame->mii_regaddr, 5);
470
471	/* Idle bit */
472	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
473	DELAY(1);
474	SIO_SET(NGE_MEAR_MII_CLK);
475	DELAY(1);
476
477	/* Turn off xmit. */
478	SIO_CLR(NGE_MEAR_MII_DIR);
479	/* Check for ack */
480	SIO_CLR(NGE_MEAR_MII_CLK);
481	DELAY(1);
482	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
483	SIO_SET(NGE_MEAR_MII_CLK);
484	DELAY(1);
485
486	/*
487	 * Now try reading data bits. If the ack failed, we still
488	 * need to clock through 16 cycles to keep the PHY(s) in sync.
489	 */
490	if (ack) {
491		for(i = 0; i < 16; i++) {
492			SIO_CLR(NGE_MEAR_MII_CLK);
493			DELAY(1);
494			SIO_SET(NGE_MEAR_MII_CLK);
495			DELAY(1);
496		}
497		goto fail;
498	}
499
500	for (i = 0x8000; i; i >>= 1) {
501		SIO_CLR(NGE_MEAR_MII_CLK);
502		DELAY(1);
503		if (!ack) {
504			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
505				frame->mii_data |= i;
506			DELAY(1);
507		}
508		SIO_SET(NGE_MEAR_MII_CLK);
509		DELAY(1);
510	}
511
512fail:
513
514	SIO_CLR(NGE_MEAR_MII_CLK);
515	DELAY(1);
516	SIO_SET(NGE_MEAR_MII_CLK);
517	DELAY(1);
518
519	splx(s);
520
521	if (ack)
522		return(1);
523	return(0);
524}
525
526/*
527 * Write to a PHY register through the MII.
528 */
529static int
530nge_mii_writereg(sc, frame)
531	struct nge_softc		*sc;
532	struct nge_mii_frame	*frame;
533
534{
535	int			s;
536
537	s = splimp();
538	/*
539	 * Set up frame for TX.
540	 */
541
542	frame->mii_stdelim = NGE_MII_STARTDELIM;
543	frame->mii_opcode = NGE_MII_WRITEOP;
544	frame->mii_turnaround = NGE_MII_TURNAROUND;
545
546	/*
547 	 * Turn on data output.
548	 */
549	SIO_SET(NGE_MEAR_MII_DIR);
550
551	nge_mii_sync(sc);
552
553	nge_mii_send(sc, frame->mii_stdelim, 2);
554	nge_mii_send(sc, frame->mii_opcode, 2);
555	nge_mii_send(sc, frame->mii_phyaddr, 5);
556	nge_mii_send(sc, frame->mii_regaddr, 5);
557	nge_mii_send(sc, frame->mii_turnaround, 2);
558	nge_mii_send(sc, frame->mii_data, 16);
559
560	/* Idle bit. */
561	SIO_SET(NGE_MEAR_MII_CLK);
562	DELAY(1);
563	SIO_CLR(NGE_MEAR_MII_CLK);
564	DELAY(1);
565
566	/*
567	 * Turn off xmit.
568	 */
569	SIO_CLR(NGE_MEAR_MII_DIR);
570
571	splx(s);
572
573	return(0);
574}
575
576static int
577nge_miibus_readreg(dev, phy, reg)
578	device_t		dev;
579	int			phy, reg;
580{
581	struct nge_softc	*sc;
582	struct nge_mii_frame	frame;
583
584	sc = device_get_softc(dev);
585
586	bzero((char *)&frame, sizeof(frame));
587
588	frame.mii_phyaddr = phy;
589	frame.mii_regaddr = reg;
590	nge_mii_readreg(sc, &frame);
591
592	return(frame.mii_data);
593}
594
595static int
596nge_miibus_writereg(dev, phy, reg, data)
597	device_t		dev;
598	int			phy, reg, data;
599{
600	struct nge_softc	*sc;
601	struct nge_mii_frame	frame;
602
603	sc = device_get_softc(dev);
604
605	bzero((char *)&frame, sizeof(frame));
606
607	frame.mii_phyaddr = phy;
608	frame.mii_regaddr = reg;
609	frame.mii_data = data;
610	nge_mii_writereg(sc, &frame);
611
612	return(0);
613}
614
615static void
616nge_miibus_statchg(dev)
617	device_t		dev;
618{
619	int			status;
620	struct nge_softc	*sc;
621	struct mii_data		*mii;
622
623	sc = device_get_softc(dev);
624	if (sc->nge_tbi) {
625		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
626		    == IFM_AUTO) {
627			status = CSR_READ_4(sc, NGE_TBI_ANLPAR);
628			if (status == 0 || status & NGE_TBIANAR_FDX) {
629				NGE_SETBIT(sc, NGE_TX_CFG,
630				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
631				NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
632			} else {
633				NGE_CLRBIT(sc, NGE_TX_CFG,
634				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
635				NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
636			}
637
638		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
639			!= IFM_FDX) {
640			NGE_CLRBIT(sc, NGE_TX_CFG,
641			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
642			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
643		} else {
644			NGE_SETBIT(sc, NGE_TX_CFG,
645			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
646			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
647		}
648	} else {
649		mii = device_get_softc(sc->nge_miibus);
650
651		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
652		        NGE_SETBIT(sc, NGE_TX_CFG,
653			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
654			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
655		} else {
656			NGE_CLRBIT(sc, NGE_TX_CFG,
657			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
658			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
659		}
660
661		/* If we have a 1000Mbps link, set the mode_1000 bit. */
662		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
663		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
664			NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
665		} else {
666			NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
667		}
668	}
669	return;
670}
671
672static u_int32_t
673nge_crc(sc, addr)
674	struct nge_softc	*sc;
675	caddr_t			addr;
676{
677	u_int32_t		crc, carry;
678	int			i, j;
679	u_int8_t		c;
680
681	/* Compute CRC for the address value. */
682	crc = 0xFFFFFFFF; /* initial value */
683
684	for (i = 0; i < 6; i++) {
685		c = *(addr + i);
686		for (j = 0; j < 8; j++) {
687			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
688			crc <<= 1;
689			c >>= 1;
690			if (carry)
691				crc = (crc ^ 0x04c11db6) | carry;
692		}
693	}
694
695	/*
696	 * return the filter bit position
697	 */
698
699	return((crc >> 21) & 0x00000FFF);
700}
701
702static void
703nge_setmulti(sc)
704	struct nge_softc	*sc;
705{
706	struct ifnet		*ifp;
707	struct ifmultiaddr	*ifma;
708	u_int32_t		h = 0, i, filtsave;
709	int			bit, index;
710
711	ifp = &sc->arpcom.ac_if;
712
713	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
714		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
715		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
716		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
717		return;
718	}
719
720	/*
721	 * We have to explicitly enable the multicast hash table
722	 * on the NatSemi chip if we want to use it, which we do.
723	 * We also have to tell it that we don't want to use the
724	 * hash table for matching unicast addresses.
725	 */
726	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
727	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
728	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
729
730	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
731
732	/* first, zot all the existing hash bits */
733	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
734		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
735		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
736	}
737
738	/*
739	 * From the 11 bits returned by the crc routine, the top 7
740	 * bits represent the 16-bit word in the mcast hash table
741	 * that needs to be updated, and the lower 4 bits represent
742	 * which bit within that byte needs to be set.
743	 */
744	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
745		if (ifma->ifma_addr->sa_family != AF_LINK)
746			continue;
747		h = nge_crc(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
748		index = (h >> 4) & 0x7F;
749		bit = h & 0xF;
750		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
751		    NGE_FILTADDR_MCAST_LO + (index * 2));
752		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
753	}
754
755	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
756
757	return;
758}
759
760static void
761nge_reset(sc)
762	struct nge_softc	*sc;
763{
764	register int		i;
765
766	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
767
768	for (i = 0; i < NGE_TIMEOUT; i++) {
769		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
770			break;
771	}
772
773	if (i == NGE_TIMEOUT)
774		printf("nge%d: reset never completed\n", sc->nge_unit);
775
776	/* Wait a little while for the chip to get its brains in order. */
777	DELAY(1000);
778
779	/*
780	 * If this is a NetSemi chip, make sure to clear
781	 * PME mode.
782	 */
783	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
784	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
785
786        return;
787}
788
789/*
790 * Probe for a NatSemi chip. Check the PCI vendor and device
791 * IDs against our list and return a device name if we find a match.
792 */
793static int
794nge_probe(dev)
795	device_t		dev;
796{
797	struct nge_type		*t;
798
799	t = nge_devs;
800
801	while(t->nge_name != NULL) {
802		if ((pci_get_vendor(dev) == t->nge_vid) &&
803		    (pci_get_device(dev) == t->nge_did)) {
804			device_set_desc(dev, t->nge_name);
805			return(0);
806		}
807		t++;
808	}
809
810	return(ENXIO);
811}
812
813/*
814 * Attach the interface. Allocate softc structures, do ifmedia
815 * setup and ethernet/BPF attach.
816 */
817static int
818nge_attach(dev)
819	device_t		dev;
820{
821	int			s;
822	u_char			eaddr[ETHER_ADDR_LEN];
823	u_int32_t		command;
824	struct nge_softc	*sc;
825	struct ifnet		*ifp;
826	int			unit, error = 0, rid;
827	const char		*sep = "";
828
829	s = splimp();
830
831	sc = device_get_softc(dev);
832	unit = device_get_unit(dev);
833	bzero(sc, sizeof(struct nge_softc));
834
835	mtx_init(&sc->nge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
836	    MTX_DEF | MTX_RECURSE);
837
838	/*
839	 * Handle power management nonsense.
840	 */
841	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
842		u_int32_t		iobase, membase, irq;
843
844		/* Save important PCI config data. */
845		iobase = pci_read_config(dev, NGE_PCI_LOIO, 4);
846		membase = pci_read_config(dev, NGE_PCI_LOMEM, 4);
847		irq = pci_read_config(dev, NGE_PCI_INTLINE, 4);
848
849		/* Reset the power state. */
850		printf("nge%d: chip is in D%d power mode "
851		    "-- setting to D0\n", unit,
852		    pci_get_powerstate(dev));
853		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
854
855		/* Restore PCI config data. */
856		pci_write_config(dev, NGE_PCI_LOIO, iobase, 4);
857		pci_write_config(dev, NGE_PCI_LOMEM, membase, 4);
858		pci_write_config(dev, NGE_PCI_INTLINE, irq, 4);
859	}
860
861	/*
862	 * Map control/status registers.
863	 */
864	pci_enable_busmaster(dev);
865	pci_enable_io(dev, SYS_RES_IOPORT);
866	pci_enable_io(dev, SYS_RES_MEMORY);
867	command = pci_read_config(dev, PCIR_COMMAND, 4);
868
869#ifdef NGE_USEIOSPACE
870	if (!(command & PCIM_CMD_PORTEN)) {
871		printf("nge%d: failed to enable I/O ports!\n", unit);
872		error = ENXIO;;
873		goto fail;
874	}
875#else
876	if (!(command & PCIM_CMD_MEMEN)) {
877		printf("nge%d: failed to enable memory mapping!\n", unit);
878		error = ENXIO;;
879		goto fail;
880	}
881#endif
882
883	rid = NGE_RID;
884	sc->nge_res = bus_alloc_resource(dev, NGE_RES, &rid,
885	    0, ~0, 1, RF_ACTIVE);
886
887	if (sc->nge_res == NULL) {
888		printf("nge%d: couldn't map ports/memory\n", unit);
889		error = ENXIO;
890		goto fail;
891	}
892
893	sc->nge_btag = rman_get_bustag(sc->nge_res);
894	sc->nge_bhandle = rman_get_bushandle(sc->nge_res);
895
896	/* Allocate interrupt */
897	rid = 0;
898	sc->nge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
899	    RF_SHAREABLE | RF_ACTIVE);
900
901	if (sc->nge_irq == NULL) {
902		printf("nge%d: couldn't map interrupt\n", unit);
903		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
904		error = ENXIO;
905		goto fail;
906	}
907
908	error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET,
909	    nge_intr, sc, &sc->nge_intrhand);
910
911	if (error) {
912		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
913		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
914		printf("nge%d: couldn't set up irq\n", unit);
915		goto fail;
916	}
917
918	/* Reset the adapter. */
919	nge_reset(sc);
920
921	/*
922	 * Get station address from the EEPROM.
923	 */
924	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
925	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
926	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
927
928	/*
929	 * A NatSemi chip was detected. Inform the world.
930	 */
931	printf("nge%d: Ethernet address: %6D\n", unit, eaddr, ":");
932
933	sc->nge_unit = unit;
934	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
935
936	sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF,
937	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
938
939	if (sc->nge_ldata == NULL) {
940		printf("nge%d: no memory for list buffers!\n", unit);
941		bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
942		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
943		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
944		error = ENXIO;
945		goto fail;
946	}
947	bzero(sc->nge_ldata, sizeof(struct nge_list_data));
948
949	/* Try to allocate memory for jumbo buffers. */
950	if (nge_alloc_jumbo_mem(sc)) {
951		printf("nge%d: jumbo buffer allocation failed\n",
952                    sc->nge_unit);
953		contigfree(sc->nge_ldata,
954		    sizeof(struct nge_list_data), M_DEVBUF);
955		bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
956		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
957		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
958		error = ENXIO;
959		goto fail;
960	}
961
962	ifp = &sc->arpcom.ac_if;
963	ifp->if_softc = sc;
964	ifp->if_unit = unit;
965	ifp->if_name = "nge";
966	ifp->if_mtu = ETHERMTU;
967	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
968	ifp->if_ioctl = nge_ioctl;
969	ifp->if_output = ether_output;
970	ifp->if_start = nge_start;
971	ifp->if_watchdog = nge_watchdog;
972	ifp->if_init = nge_init;
973	ifp->if_baudrate = 1000000000;
974	ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1;
975	ifp->if_hwassist = NGE_CSUM_FEATURES;
976	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING;
977	ifp->if_capenable = ifp->if_capabilities;
978
979	/*
980	 * Do MII setup.
981	 */
982	if (mii_phy_probe(dev, &sc->nge_miibus,
983			  nge_ifmedia_upd, nge_ifmedia_sts)) {
984		if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
985			sc->nge_tbi = 1;
986			device_printf(dev, "Using TBI\n");
987
988			sc->nge_miibus = dev;
989
990			ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd,
991				nge_ifmedia_sts);
992#define	ADD(m, c)	ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL)
993#define PRINT(s)	printf("%s%s", sep, s); sep = ", "
994			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0);
995			device_printf(dev, " ");
996			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0);
997			PRINT("1000baseSX");
998			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0);
999			PRINT("1000baseSX-FDX");
1000			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0);
1001			PRINT("auto");
1002
1003			printf("\n");
1004#undef ADD
1005#undef PRINT
1006			ifmedia_set(&sc->nge_ifmedia,
1007				IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0));
1008
1009			CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1010				| NGE_GPIO_GP4_OUT
1011				| NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
1012				| NGE_GPIO_GP3_OUTENB
1013				| NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
1014
1015		} else {
1016			printf("nge%d: MII without any PHY!\n", sc->nge_unit);
1017			nge_free_jumbo_mem(sc);
1018			bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
1019			bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
1020			bus_release_resource(dev, NGE_RES, NGE_RID,
1021					 sc->nge_res);
1022			error = ENXIO;
1023			goto fail;
1024		}
1025	}
1026
1027	/*
1028	 * Call MI attach routine.
1029	 */
1030	ether_ifattach(ifp, eaddr);
1031	callout_handle_init(&sc->nge_stat_ch);
1032
1033fail:
1034
1035	splx(s);
1036	mtx_destroy(&sc->nge_mtx);
1037	return(error);
1038}
1039
1040static int
1041nge_detach(dev)
1042	device_t		dev;
1043{
1044	struct nge_softc	*sc;
1045	struct ifnet		*ifp;
1046	int			s;
1047
1048	s = splimp();
1049
1050	sc = device_get_softc(dev);
1051	ifp = &sc->arpcom.ac_if;
1052
1053	nge_reset(sc);
1054	nge_stop(sc);
1055	ether_ifdetach(ifp);
1056
1057	bus_generic_detach(dev);
1058	if (!sc->nge_tbi) {
1059		device_delete_child(dev, sc->nge_miibus);
1060	}
1061	bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
1062	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
1063	bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
1064
1065	contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF);
1066	nge_free_jumbo_mem(sc);
1067
1068	splx(s);
1069	mtx_destroy(&sc->nge_mtx);
1070
1071	return(0);
1072}
1073
1074/*
1075 * Initialize the transmit descriptors.
1076 */
1077static int
1078nge_list_tx_init(sc)
1079	struct nge_softc	*sc;
1080{
1081	struct nge_list_data	*ld;
1082	struct nge_ring_data	*cd;
1083	int			i;
1084
1085	cd = &sc->nge_cdata;
1086	ld = sc->nge_ldata;
1087
1088	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
1089		if (i == (NGE_TX_LIST_CNT - 1)) {
1090			ld->nge_tx_list[i].nge_nextdesc =
1091			    &ld->nge_tx_list[0];
1092			ld->nge_tx_list[i].nge_next =
1093			    vtophys(&ld->nge_tx_list[0]);
1094		} else {
1095			ld->nge_tx_list[i].nge_nextdesc =
1096			    &ld->nge_tx_list[i + 1];
1097			ld->nge_tx_list[i].nge_next =
1098			    vtophys(&ld->nge_tx_list[i + 1]);
1099		}
1100		ld->nge_tx_list[i].nge_mbuf = NULL;
1101		ld->nge_tx_list[i].nge_ptr = 0;
1102		ld->nge_tx_list[i].nge_ctl = 0;
1103	}
1104
1105	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1106
1107	return(0);
1108}
1109
1110
1111/*
1112 * Initialize the RX descriptors and allocate mbufs for them. Note that
1113 * we arrange the descriptors in a closed ring, so that the last descriptor
1114 * points back to the first.
1115 */
1116static int
1117nge_list_rx_init(sc)
1118	struct nge_softc	*sc;
1119{
1120	struct nge_list_data	*ld;
1121	struct nge_ring_data	*cd;
1122	int			i;
1123
1124	ld = sc->nge_ldata;
1125	cd = &sc->nge_cdata;
1126
1127	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1128		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1129			return(ENOBUFS);
1130		if (i == (NGE_RX_LIST_CNT - 1)) {
1131			ld->nge_rx_list[i].nge_nextdesc =
1132			    &ld->nge_rx_list[0];
1133			ld->nge_rx_list[i].nge_next =
1134			    vtophys(&ld->nge_rx_list[0]);
1135		} else {
1136			ld->nge_rx_list[i].nge_nextdesc =
1137			    &ld->nge_rx_list[i + 1];
1138			ld->nge_rx_list[i].nge_next =
1139			    vtophys(&ld->nge_rx_list[i + 1]);
1140		}
1141	}
1142
1143	cd->nge_rx_prod = 0;
1144
1145	return(0);
1146}
1147
1148/*
1149 * Initialize an RX descriptor and attach an MBUF cluster.
1150 */
1151static int
1152nge_newbuf(sc, c, m)
1153	struct nge_softc	*sc;
1154	struct nge_desc		*c;
1155	struct mbuf		*m;
1156{
1157	struct mbuf		*m_new = NULL;
1158	caddr_t			*buf = NULL;
1159
1160	if (m == NULL) {
1161		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1162		if (m_new == NULL) {
1163			printf("nge%d: no memory for rx list "
1164			    "-- packet dropped!\n", sc->nge_unit);
1165			return(ENOBUFS);
1166		}
1167
1168		/* Allocate the jumbo buffer */
1169		buf = nge_jalloc(sc);
1170		if (buf == NULL) {
1171#ifdef NGE_VERBOSE
1172			printf("nge%d: jumbo allocation failed "
1173			    "-- packet dropped!\n", sc->nge_unit);
1174#endif
1175			m_freem(m_new);
1176			return(ENOBUFS);
1177		}
1178		/* Attach the buffer to the mbuf */
1179		m_new->m_data = (void *)buf;
1180		m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN;
1181		MEXTADD(m_new, buf, NGE_JUMBO_FRAMELEN, nge_jfree,
1182		    (struct nge_softc *)sc, 0, EXT_NET_DRV);
1183	} else {
1184		m_new = m;
1185		m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN;
1186		m_new->m_data = m_new->m_ext.ext_buf;
1187	}
1188
1189	m_adj(m_new, sizeof(u_int64_t));
1190
1191	c->nge_mbuf = m_new;
1192	c->nge_ptr = vtophys(mtod(m_new, caddr_t));
1193	c->nge_ctl = m_new->m_len;
1194	c->nge_extsts = 0;
1195
1196	return(0);
1197}
1198
1199static int
1200nge_alloc_jumbo_mem(sc)
1201	struct nge_softc	*sc;
1202{
1203	caddr_t			ptr;
1204	register int		i;
1205	struct nge_jpool_entry   *entry;
1206
1207	/* Grab a big chunk o' storage. */
1208	sc->nge_cdata.nge_jumbo_buf = contigmalloc(NGE_JMEM, M_DEVBUF,
1209	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1210
1211	if (sc->nge_cdata.nge_jumbo_buf == NULL) {
1212		printf("nge%d: no memory for jumbo buffers!\n", sc->nge_unit);
1213		return(ENOBUFS);
1214	}
1215
1216	SLIST_INIT(&sc->nge_jfree_listhead);
1217	SLIST_INIT(&sc->nge_jinuse_listhead);
1218
1219	/*
1220	 * Now divide it up into 9K pieces and save the addresses
1221	 * in an array.
1222	 */
1223	ptr = sc->nge_cdata.nge_jumbo_buf;
1224	for (i = 0; i < NGE_JSLOTS; i++) {
1225		sc->nge_cdata.nge_jslots[i] = ptr;
1226		ptr += NGE_JLEN;
1227		entry = malloc(sizeof(struct nge_jpool_entry),
1228		    M_DEVBUF, M_NOWAIT);
1229		if (entry == NULL) {
1230			printf("nge%d: no memory for jumbo "
1231			    "buffer queue!\n", sc->nge_unit);
1232			return(ENOBUFS);
1233		}
1234		entry->slot = i;
1235		SLIST_INSERT_HEAD(&sc->nge_jfree_listhead,
1236		    entry, jpool_entries);
1237	}
1238
1239	return(0);
1240}
1241
1242static void
1243nge_free_jumbo_mem(sc)
1244	struct nge_softc	*sc;
1245{
1246	register int		i;
1247	struct nge_jpool_entry   *entry;
1248
1249	for (i = 0; i < NGE_JSLOTS; i++) {
1250		entry = SLIST_FIRST(&sc->nge_jfree_listhead);
1251		SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries);
1252		free(entry, M_DEVBUF);
1253	}
1254
1255	contigfree(sc->nge_cdata.nge_jumbo_buf, NGE_JMEM, M_DEVBUF);
1256
1257	return;
1258}
1259
1260/*
1261 * Allocate a jumbo buffer.
1262 */
1263static void *
1264nge_jalloc(sc)
1265	struct nge_softc	*sc;
1266{
1267	struct nge_jpool_entry   *entry;
1268
1269	entry = SLIST_FIRST(&sc->nge_jfree_listhead);
1270
1271	if (entry == NULL) {
1272#ifdef NGE_VERBOSE
1273		printf("nge%d: no free jumbo buffers\n", sc->nge_unit);
1274#endif
1275		return(NULL);
1276	}
1277
1278	SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries);
1279	SLIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries);
1280	return(sc->nge_cdata.nge_jslots[entry->slot]);
1281}
1282
1283/*
1284 * Release a jumbo buffer.
1285 */
1286static void
1287nge_jfree(buf, args)
1288	void			*buf;
1289	void			*args;
1290{
1291	struct nge_softc	*sc;
1292	int		        i;
1293	struct nge_jpool_entry   *entry;
1294
1295	/* Extract the softc struct pointer. */
1296	sc = args;
1297
1298	if (sc == NULL)
1299		panic("nge_jfree: can't find softc pointer!");
1300
1301	/* calculate the slot this buffer belongs to */
1302	i = ((vm_offset_t)buf
1303	     - (vm_offset_t)sc->nge_cdata.nge_jumbo_buf) / NGE_JLEN;
1304
1305	if ((i < 0) || (i >= NGE_JSLOTS))
1306		panic("nge_jfree: asked to free buffer that we don't manage!");
1307
1308	entry = SLIST_FIRST(&sc->nge_jinuse_listhead);
1309	if (entry == NULL)
1310		panic("nge_jfree: buffer not in use!");
1311	entry->slot = i;
1312	SLIST_REMOVE_HEAD(&sc->nge_jinuse_listhead, jpool_entries);
1313	SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, jpool_entries);
1314
1315	return;
1316}
1317/*
1318 * A frame has been uploaded: pass the resulting mbuf chain up to
1319 * the higher level protocols.
1320 */
1321static void
1322nge_rxeof(sc)
1323	struct nge_softc	*sc;
1324{
1325        struct mbuf		*m;
1326        struct ifnet		*ifp;
1327	struct nge_desc		*cur_rx;
1328	int			i, total_len = 0;
1329	u_int32_t		rxstat;
1330
1331	ifp = &sc->arpcom.ac_if;
1332	i = sc->nge_cdata.nge_rx_prod;
1333
1334	while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1335		struct mbuf		*m0 = NULL;
1336		u_int32_t		extsts;
1337
1338#ifdef DEVICE_POLLING
1339		if (ifp->if_ipending & IFF_POLLING) {
1340			if (sc->rxcycles <= 0)
1341				break;
1342			sc->rxcycles--;
1343		}
1344#endif /* DEVICE_POLLING */
1345
1346		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1347		rxstat = cur_rx->nge_rxstat;
1348		extsts = cur_rx->nge_extsts;
1349		m = cur_rx->nge_mbuf;
1350		cur_rx->nge_mbuf = NULL;
1351		total_len = NGE_RXBYTES(cur_rx);
1352		NGE_INC(i, NGE_RX_LIST_CNT);
1353		/*
1354		 * If an error occurs, update stats, clear the
1355		 * status word and leave the mbuf cluster in place:
1356		 * it should simply get re-used next time this descriptor
1357	 	 * comes up in the ring.
1358		 */
1359		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1360			ifp->if_ierrors++;
1361			nge_newbuf(sc, cur_rx, m);
1362			continue;
1363		}
1364
1365		/*
1366		 * Ok. NatSemi really screwed up here. This is the
1367		 * only gigE chip I know of with alignment constraints
1368		 * on receive buffers. RX buffers must be 64-bit aligned.
1369		 */
1370#ifdef __i386__
1371		/*
1372		 * By popular demand, ignore the alignment problems
1373		 * on the Intel x86 platform. The performance hit
1374		 * incurred due to unaligned accesses is much smaller
1375		 * than the hit produced by forcing buffer copies all
1376		 * the time, especially with jumbo frames. We still
1377		 * need to fix up the alignment everywhere else though.
1378		 */
1379		if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
1380#endif
1381			m0 = m_devget(mtod(m, char *), total_len,
1382			    ETHER_ALIGN, ifp, NULL);
1383			nge_newbuf(sc, cur_rx, m);
1384			if (m0 == NULL) {
1385				printf("nge%d: no receive buffers "
1386				    "available -- packet dropped!\n",
1387				    sc->nge_unit);
1388				ifp->if_ierrors++;
1389				continue;
1390			}
1391			m = m0;
1392#ifdef __i386__
1393		} else {
1394			m->m_pkthdr.rcvif = ifp;
1395			m->m_pkthdr.len = m->m_len = total_len;
1396		}
1397#endif
1398
1399		ifp->if_ipackets++;
1400
1401		/* Do IP checksum checking. */
1402		if (extsts & NGE_RXEXTSTS_IPPKT)
1403			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1404		if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1405			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1406		if ((extsts & NGE_RXEXTSTS_TCPPKT &&
1407		    !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
1408		    (extsts & NGE_RXEXTSTS_UDPPKT &&
1409		    !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
1410			m->m_pkthdr.csum_flags |=
1411			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1412			m->m_pkthdr.csum_data = 0xffff;
1413		}
1414
1415		/*
1416		 * If we received a packet with a vlan tag, pass it
1417		 * to vlan_input() instead of ether_input().
1418		 */
1419		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1420			VLAN_INPUT_TAG(ifp, m,
1421				extsts & NGE_RXEXTSTS_VTCI, continue);
1422		}
1423
1424		(*ifp->if_input)(ifp, m);
1425	}
1426
1427	sc->nge_cdata.nge_rx_prod = i;
1428
1429	return;
1430}
1431
1432/*
1433 * A frame was downloaded to the chip. It's safe for us to clean up
1434 * the list buffers.
1435 */
1436
1437static void
1438nge_txeof(sc)
1439	struct nge_softc	*sc;
1440{
1441	struct nge_desc		*cur_tx = NULL;
1442	struct ifnet		*ifp;
1443	u_int32_t		idx;
1444
1445	ifp = &sc->arpcom.ac_if;
1446
1447	/* Clear the timeout timer. */
1448	ifp->if_timer = 0;
1449
1450	/*
1451	 * Go through our tx list and free mbufs for those
1452	 * frames that have been transmitted.
1453	 */
1454	idx = sc->nge_cdata.nge_tx_cons;
1455	while (idx != sc->nge_cdata.nge_tx_prod) {
1456		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1457
1458		if (NGE_OWNDESC(cur_tx))
1459			break;
1460
1461		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1462			sc->nge_cdata.nge_tx_cnt--;
1463			NGE_INC(idx, NGE_TX_LIST_CNT);
1464			continue;
1465		}
1466
1467		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1468			ifp->if_oerrors++;
1469			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1470				ifp->if_collisions++;
1471			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1472				ifp->if_collisions++;
1473		}
1474
1475		ifp->if_collisions +=
1476		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1477
1478		ifp->if_opackets++;
1479		if (cur_tx->nge_mbuf != NULL) {
1480			m_freem(cur_tx->nge_mbuf);
1481			cur_tx->nge_mbuf = NULL;
1482		}
1483
1484		sc->nge_cdata.nge_tx_cnt--;
1485		NGE_INC(idx, NGE_TX_LIST_CNT);
1486		ifp->if_timer = 0;
1487	}
1488
1489	sc->nge_cdata.nge_tx_cons = idx;
1490
1491	if (cur_tx != NULL)
1492		ifp->if_flags &= ~IFF_OACTIVE;
1493
1494	return;
1495}
1496
1497static void
1498nge_tick(xsc)
1499	void			*xsc;
1500{
1501	struct nge_softc	*sc;
1502	struct mii_data		*mii;
1503	struct ifnet		*ifp;
1504	int			s;
1505
1506	s = splimp();
1507
1508	sc = xsc;
1509	ifp = &sc->arpcom.ac_if;
1510
1511	if (sc->nge_tbi) {
1512		if (!sc->nge_link) {
1513			if (CSR_READ_4(sc, NGE_TBI_BMSR)
1514			    & NGE_TBIBMSR_ANEG_DONE) {
1515				printf("nge%d: gigabit link up\n",
1516				    sc->nge_unit);
1517				nge_miibus_statchg(sc->nge_miibus);
1518				sc->nge_link++;
1519				if (ifp->if_snd.ifq_head != NULL)
1520					nge_start(ifp);
1521			}
1522		}
1523	} else {
1524		mii = device_get_softc(sc->nge_miibus);
1525		mii_tick(mii);
1526
1527		if (!sc->nge_link) {
1528			if (mii->mii_media_status & IFM_ACTIVE &&
1529			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1530				sc->nge_link++;
1531				if (IFM_SUBTYPE(mii->mii_media_active)
1532				    == IFM_1000_T)
1533					printf("nge%d: gigabit link up\n",
1534					    sc->nge_unit);
1535				if (ifp->if_snd.ifq_head != NULL)
1536					nge_start(ifp);
1537			}
1538		}
1539	}
1540	sc->nge_stat_ch = timeout(nge_tick, sc, hz);
1541
1542	splx(s);
1543
1544	return;
1545}
1546
1547#ifdef DEVICE_POLLING
1548static poll_handler_t nge_poll;
1549
1550static void
1551nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1552{
1553	struct  nge_softc *sc = ifp->if_softc;
1554
1555	if (cmd == POLL_DEREGISTER) {	/* final call, enable interrupts */
1556		CSR_WRITE_4(sc, NGE_IER, 1);
1557		return;
1558	}
1559
1560	/*
1561	 * On the nge, reading the status register also clears it.
1562	 * So before returning to intr mode we must make sure that all
1563	 * possible pending sources of interrupts have been served.
1564	 * In practice this means run to completion the *eof routines,
1565	 * and then call the interrupt routine
1566	 */
1567	sc->rxcycles = count;
1568	nge_rxeof(sc);
1569	nge_txeof(sc);
1570	if (ifp->if_snd.ifq_head != NULL)
1571		nge_start(ifp);
1572
1573	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1574		u_int32_t	status;
1575
1576		/* Reading the ISR register clears all interrupts. */
1577		status = CSR_READ_4(sc, NGE_ISR);
1578
1579		if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW))
1580			nge_rxeof(sc);
1581
1582		if (status & (NGE_ISR_RX_IDLE))
1583			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1584
1585		if (status & NGE_ISR_SYSERR) {
1586			nge_reset(sc);
1587			nge_init(sc);
1588		}
1589	}
1590}
1591#endif /* DEVICE_POLLING */
1592
1593static void
1594nge_intr(arg)
1595	void			*arg;
1596{
1597	struct nge_softc	*sc;
1598	struct ifnet		*ifp;
1599	u_int32_t		status;
1600
1601	sc = arg;
1602	ifp = &sc->arpcom.ac_if;
1603
1604#ifdef DEVICE_POLLING
1605	if (ifp->if_ipending & IFF_POLLING)
1606		return;
1607	if (ether_poll_register(nge_poll, ifp)) { /* ok, disable interrupts */
1608		CSR_WRITE_4(sc, NGE_IER, 0);
1609		nge_poll(ifp, 0, 1);
1610		return;
1611	}
1612#endif /* DEVICE_POLLING */
1613
1614	/* Supress unwanted interrupts */
1615	if (!(ifp->if_flags & IFF_UP)) {
1616		nge_stop(sc);
1617		return;
1618	}
1619
1620	/* Disable interrupts. */
1621	CSR_WRITE_4(sc, NGE_IER, 0);
1622
1623	/* Data LED on for TBI mode */
1624	if(sc->nge_tbi)
1625		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1626			     | NGE_GPIO_GP3_OUT);
1627
1628	for (;;) {
1629		/* Reading the ISR register clears all interrupts. */
1630		status = CSR_READ_4(sc, NGE_ISR);
1631
1632		if ((status & NGE_INTRS) == 0)
1633			break;
1634
1635		if ((status & NGE_ISR_TX_DESC_OK) ||
1636		    (status & NGE_ISR_TX_ERR) ||
1637		    (status & NGE_ISR_TX_OK) ||
1638		    (status & NGE_ISR_TX_IDLE))
1639			nge_txeof(sc);
1640
1641		if ((status & NGE_ISR_RX_DESC_OK) ||
1642		    (status & NGE_ISR_RX_ERR) ||
1643		    (status & NGE_ISR_RX_OFLOW) ||
1644		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1645		    (status & NGE_ISR_RX_IDLE) ||
1646		    (status & NGE_ISR_RX_OK))
1647			nge_rxeof(sc);
1648
1649		if ((status & NGE_ISR_RX_IDLE))
1650			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1651
1652		if (status & NGE_ISR_SYSERR) {
1653			nge_reset(sc);
1654			ifp->if_flags &= ~IFF_RUNNING;
1655			nge_init(sc);
1656		}
1657
1658#if 0
1659		/*
1660		 * XXX: nge_tick() is not ready to be called this way
1661		 * it screws up the aneg timeout because mii_tick() is
1662		 * only to be called once per second.
1663		 */
1664		if (status & NGE_IMR_PHY_INTR) {
1665			sc->nge_link = 0;
1666			nge_tick(sc);
1667		}
1668#endif
1669	}
1670
1671	/* Re-enable interrupts. */
1672	CSR_WRITE_4(sc, NGE_IER, 1);
1673
1674	if (ifp->if_snd.ifq_head != NULL)
1675		nge_start(ifp);
1676
1677	/* Data LED off for TBI mode */
1678
1679	if(sc->nge_tbi)
1680		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1681			    & ~NGE_GPIO_GP3_OUT);
1682
1683	return;
1684}
1685
1686/*
1687 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1688 * pointers to the fragment pointers.
1689 */
1690static int
1691nge_encap(sc, m_head, txidx)
1692	struct nge_softc	*sc;
1693	struct mbuf		*m_head;
1694	u_int32_t		*txidx;
1695{
1696	struct nge_desc		*f = NULL;
1697	struct mbuf		*m;
1698	int			frag, cur, cnt = 0;
1699	struct m_tag		*mtag;
1700
1701	/*
1702 	 * Start packing the mbufs in this chain into
1703	 * the fragment pointers. Stop when we run out
1704 	 * of fragments or hit the end of the mbuf chain.
1705	 */
1706	m = m_head;
1707	cur = frag = *txidx;
1708
1709	for (m = m_head; m != NULL; m = m->m_next) {
1710		if (m->m_len != 0) {
1711			if ((NGE_TX_LIST_CNT -
1712			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1713				return(ENOBUFS);
1714			f = &sc->nge_ldata->nge_tx_list[frag];
1715			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1716			f->nge_ptr = vtophys(mtod(m, vm_offset_t));
1717			if (cnt != 0)
1718				f->nge_ctl |= NGE_CMDSTS_OWN;
1719			cur = frag;
1720			NGE_INC(frag, NGE_TX_LIST_CNT);
1721			cnt++;
1722		}
1723	}
1724
1725	if (m != NULL)
1726		return(ENOBUFS);
1727
1728	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1729	if (m_head->m_pkthdr.csum_flags) {
1730		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1731			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1732			    NGE_TXEXTSTS_IPCSUM;
1733		if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1734			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1735			    NGE_TXEXTSTS_TCPCSUM;
1736		if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1737			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1738			    NGE_TXEXTSTS_UDPCSUM;
1739	}
1740
1741	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m);
1742	if (mtag != NULL) {
1743		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1744			(NGE_TXEXTSTS_VLANPKT|VLAN_TAG_VALUE(mtag));
1745	}
1746
1747	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1748	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1749	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1750	sc->nge_cdata.nge_tx_cnt += cnt;
1751	*txidx = frag;
1752
1753	return(0);
1754}
1755
1756/*
1757 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1758 * to the mbuf data regions directly in the transmit lists. We also save a
1759 * copy of the pointers since the transmit list fragment pointers are
1760 * physical addresses.
1761 */
1762
1763static void
1764nge_start(ifp)
1765	struct ifnet		*ifp;
1766{
1767	struct nge_softc	*sc;
1768	struct mbuf		*m_head = NULL;
1769	u_int32_t		idx;
1770
1771	sc = ifp->if_softc;
1772
1773	if (!sc->nge_link)
1774		return;
1775
1776	idx = sc->nge_cdata.nge_tx_prod;
1777
1778	if (ifp->if_flags & IFF_OACTIVE)
1779		return;
1780
1781	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1782		IF_DEQUEUE(&ifp->if_snd, m_head);
1783		if (m_head == NULL)
1784			break;
1785
1786		if (nge_encap(sc, m_head, &idx)) {
1787			IF_PREPEND(&ifp->if_snd, m_head);
1788			ifp->if_flags |= IFF_OACTIVE;
1789			break;
1790		}
1791
1792		/*
1793		 * If there's a BPF listener, bounce a copy of this frame
1794		 * to him.
1795		 */
1796		BPF_MTAP(ifp, m_head);
1797
1798	}
1799
1800	/* Transmit */
1801	sc->nge_cdata.nge_tx_prod = idx;
1802	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1803
1804	/*
1805	 * Set a timeout in case the chip goes out to lunch.
1806	 */
1807	ifp->if_timer = 5;
1808
1809	return;
1810}
1811
1812static void
1813nge_init(xsc)
1814	void			*xsc;
1815{
1816	struct nge_softc	*sc = xsc;
1817	struct ifnet		*ifp = &sc->arpcom.ac_if;
1818	struct mii_data		*mii;
1819	int			s;
1820
1821	if (ifp->if_flags & IFF_RUNNING)
1822		return;
1823
1824	s = splimp();
1825
1826	/*
1827	 * Cancel pending I/O and free all RX/TX buffers.
1828	 */
1829	nge_stop(sc);
1830
1831	if (sc->nge_tbi) {
1832		mii = NULL;
1833	} else {
1834		mii = device_get_softc(sc->nge_miibus);
1835	}
1836
1837	/* Set MAC address */
1838	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1839	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1840	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1841	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1842	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1843	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1844	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1845	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1846	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1847
1848	/* Init circular RX list. */
1849	if (nge_list_rx_init(sc) == ENOBUFS) {
1850		printf("nge%d: initialization failed: no "
1851			"memory for rx buffers\n", sc->nge_unit);
1852		nge_stop(sc);
1853		(void)splx(s);
1854		return;
1855	}
1856
1857	/*
1858	 * Init tx descriptors.
1859	 */
1860	nge_list_tx_init(sc);
1861
1862	/*
1863	 * For the NatSemi chip, we have to explicitly enable the
1864	 * reception of ARP frames, as well as turn on the 'perfect
1865	 * match' filter where we store the station address, otherwise
1866	 * we won't receive unicasts meant for this host.
1867	 */
1868	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1869	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1870
1871	 /* If we want promiscuous mode, set the allframes bit. */
1872	if (ifp->if_flags & IFF_PROMISC) {
1873		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1874	} else {
1875		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1876	}
1877
1878	/*
1879	 * Set the capture broadcast bit to capture broadcast frames.
1880	 */
1881	if (ifp->if_flags & IFF_BROADCAST) {
1882		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1883	} else {
1884		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1885	}
1886
1887	/*
1888	 * Load the multicast filter.
1889	 */
1890	nge_setmulti(sc);
1891
1892	/* Turn the receive filter on */
1893	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1894
1895	/*
1896	 * Load the address of the RX and TX lists.
1897	 */
1898	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1899	    vtophys(&sc->nge_ldata->nge_rx_list[0]));
1900	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1901	    vtophys(&sc->nge_ldata->nge_tx_list[0]));
1902
1903	/* Set RX configuration */
1904	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1905	/*
1906	 * Enable hardware checksum validation for all IPv4
1907	 * packets, do not reject packets with bad checksums.
1908	 */
1909	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1910
1911	/*
1912	 * Tell the chip to detect and strip VLAN tag info from
1913	 * received frames. The tag will be provided in the extsts
1914	 * field in the RX descriptors.
1915	 */
1916	NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1917	    NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB);
1918
1919	/* Set TX configuration */
1920	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1921
1922	/*
1923	 * Enable TX IPv4 checksumming on a per-packet basis.
1924	 */
1925	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
1926
1927	/*
1928	 * Tell the chip to insert VLAN tags on a per-packet basis as
1929	 * dictated by the code in the frame encapsulation routine.
1930	 */
1931	NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1932
1933	/* Set full/half duplex mode. */
1934	if (sc->nge_tbi) {
1935		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1936		    == IFM_FDX) {
1937			NGE_SETBIT(sc, NGE_TX_CFG,
1938			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1939			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1940		} else {
1941			NGE_CLRBIT(sc, NGE_TX_CFG,
1942			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1943			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1944		}
1945	} else {
1946		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1947			NGE_SETBIT(sc, NGE_TX_CFG,
1948			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1949			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1950		} else {
1951			NGE_CLRBIT(sc, NGE_TX_CFG,
1952			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1953			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1954		}
1955	}
1956
1957	nge_tick(sc);
1958
1959	/*
1960	 * Enable the delivery of PHY interrupts based on
1961	 * link/speed/duplex status changes. Also enable the
1962	 * extsts field in the DMA descriptors (needed for
1963	 * TCP/IP checksum offload on transmit).
1964	 */
1965	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|
1966	    NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1967
1968	/*
1969	 * Configure interrupt holdoff (moderation). We can
1970	 * have the chip delay interrupt delivery for a certain
1971	 * period. Units are in 100us, and the max setting
1972	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1973	 */
1974	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1975
1976	/*
1977	 * Enable interrupts.
1978	 */
1979	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1980#ifdef DEVICE_POLLING
1981	/*
1982	 * ... only enable interrupts if we are not polling, make sure
1983	 * they are off otherwise.
1984	 */
1985	if (ifp->if_ipending & IFF_POLLING)
1986		CSR_WRITE_4(sc, NGE_IER, 0);
1987	else
1988#endif /* DEVICE_POLLING */
1989	CSR_WRITE_4(sc, NGE_IER, 1);
1990
1991	/* Enable receiver and transmitter. */
1992	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1993	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1994
1995	nge_ifmedia_upd(ifp);
1996
1997	ifp->if_flags |= IFF_RUNNING;
1998	ifp->if_flags &= ~IFF_OACTIVE;
1999
2000	(void)splx(s);
2001
2002	return;
2003}
2004
2005/*
2006 * Set media options.
2007 */
2008static int
2009nge_ifmedia_upd(ifp)
2010	struct ifnet		*ifp;
2011{
2012	struct nge_softc	*sc;
2013	struct mii_data		*mii;
2014
2015	sc = ifp->if_softc;
2016
2017	if (sc->nge_tbi) {
2018		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
2019		     == IFM_AUTO) {
2020			CSR_WRITE_4(sc, NGE_TBI_ANAR,
2021				CSR_READ_4(sc, NGE_TBI_ANAR)
2022					| NGE_TBIANAR_HDX | NGE_TBIANAR_FDX
2023					| NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2);
2024			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG
2025				| NGE_TBIBMCR_RESTART_ANEG);
2026			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG);
2027		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media
2028			    & IFM_GMASK) == IFM_FDX) {
2029			NGE_SETBIT(sc, NGE_TX_CFG,
2030			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
2031			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
2032
2033			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
2034			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
2035		} else {
2036			NGE_CLRBIT(sc, NGE_TX_CFG,
2037			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
2038			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
2039
2040			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
2041			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
2042		}
2043
2044		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
2045			    & ~NGE_GPIO_GP3_OUT);
2046	} else {
2047		mii = device_get_softc(sc->nge_miibus);
2048		sc->nge_link = 0;
2049		if (mii->mii_instance) {
2050			struct mii_softc	*miisc;
2051			for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2052			    miisc = LIST_NEXT(miisc, mii_list))
2053				mii_phy_reset(miisc);
2054		}
2055		mii_mediachg(mii);
2056	}
2057
2058	return(0);
2059}
2060
2061/*
2062 * Report current media status.
2063 */
2064static void
2065nge_ifmedia_sts(ifp, ifmr)
2066	struct ifnet		*ifp;
2067	struct ifmediareq	*ifmr;
2068{
2069	struct nge_softc	*sc;
2070	struct mii_data		*mii;
2071
2072	sc = ifp->if_softc;
2073
2074	if (sc->nge_tbi) {
2075		ifmr->ifm_status = IFM_AVALID;
2076		ifmr->ifm_active = IFM_ETHER;
2077
2078		if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
2079			ifmr->ifm_status |= IFM_ACTIVE;
2080		}
2081		if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK)
2082			ifmr->ifm_active |= IFM_LOOP;
2083		if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
2084			ifmr->ifm_active |= IFM_NONE;
2085			ifmr->ifm_status = 0;
2086			return;
2087		}
2088		ifmr->ifm_active |= IFM_1000_SX;
2089		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
2090		    == IFM_AUTO) {
2091			ifmr->ifm_active |= IFM_AUTO;
2092			if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
2093			    & NGE_TBIANAR_FDX) {
2094				ifmr->ifm_active |= IFM_FDX;
2095			}else if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
2096				  & NGE_TBIANAR_HDX) {
2097				ifmr->ifm_active |= IFM_HDX;
2098			}
2099		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
2100			== IFM_FDX)
2101			ifmr->ifm_active |= IFM_FDX;
2102		else
2103			ifmr->ifm_active |= IFM_HDX;
2104
2105	} else {
2106		mii = device_get_softc(sc->nge_miibus);
2107		mii_pollstat(mii);
2108		ifmr->ifm_active = mii->mii_media_active;
2109		ifmr->ifm_status = mii->mii_media_status;
2110	}
2111
2112	return;
2113}
2114
2115static int
2116nge_ioctl(ifp, command, data)
2117	struct ifnet		*ifp;
2118	u_long			command;
2119	caddr_t			data;
2120{
2121	struct nge_softc	*sc = ifp->if_softc;
2122	struct ifreq		*ifr = (struct ifreq *) data;
2123	struct mii_data		*mii;
2124	int			s, error = 0;
2125
2126	s = splimp();
2127
2128	switch(command) {
2129	case SIOCSIFMTU:
2130		if (ifr->ifr_mtu > NGE_JUMBO_MTU)
2131			error = EINVAL;
2132		else {
2133			ifp->if_mtu = ifr->ifr_mtu;
2134			/*
2135			 * Workaround: if the MTU is larger than
2136			 * 8152 (TX FIFO size minus 64 minus 18), turn off
2137			 * TX checksum offloading.
2138			 */
2139			if (ifr->ifr_mtu >= 8152)
2140				ifp->if_hwassist = 0;
2141			else
2142				ifp->if_hwassist = NGE_CSUM_FEATURES;
2143		}
2144		break;
2145	case SIOCSIFFLAGS:
2146		if (ifp->if_flags & IFF_UP) {
2147			if (ifp->if_flags & IFF_RUNNING &&
2148			    ifp->if_flags & IFF_PROMISC &&
2149			    !(sc->nge_if_flags & IFF_PROMISC)) {
2150				NGE_SETBIT(sc, NGE_RXFILT_CTL,
2151				    NGE_RXFILTCTL_ALLPHYS|
2152				    NGE_RXFILTCTL_ALLMULTI);
2153			} else if (ifp->if_flags & IFF_RUNNING &&
2154			    !(ifp->if_flags & IFF_PROMISC) &&
2155			    sc->nge_if_flags & IFF_PROMISC) {
2156				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2157				    NGE_RXFILTCTL_ALLPHYS);
2158				if (!(ifp->if_flags & IFF_ALLMULTI))
2159					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2160					    NGE_RXFILTCTL_ALLMULTI);
2161			} else {
2162				ifp->if_flags &= ~IFF_RUNNING;
2163				nge_init(sc);
2164			}
2165		} else {
2166			if (ifp->if_flags & IFF_RUNNING)
2167				nge_stop(sc);
2168		}
2169		sc->nge_if_flags = ifp->if_flags;
2170		error = 0;
2171		break;
2172	case SIOCADDMULTI:
2173	case SIOCDELMULTI:
2174		nge_setmulti(sc);
2175		error = 0;
2176		break;
2177	case SIOCGIFMEDIA:
2178	case SIOCSIFMEDIA:
2179		if (sc->nge_tbi) {
2180			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2181					      command);
2182		} else {
2183			mii = device_get_softc(sc->nge_miibus);
2184			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2185					      command);
2186		}
2187		break;
2188	default:
2189		error = ether_ioctl(ifp, command, data);
2190		break;
2191	}
2192
2193	(void)splx(s);
2194
2195	return(error);
2196}
2197
2198static void
2199nge_watchdog(ifp)
2200	struct ifnet		*ifp;
2201{
2202	struct nge_softc	*sc;
2203
2204	sc = ifp->if_softc;
2205
2206	ifp->if_oerrors++;
2207	printf("nge%d: watchdog timeout\n", sc->nge_unit);
2208
2209	nge_stop(sc);
2210	nge_reset(sc);
2211	ifp->if_flags &= ~IFF_RUNNING;
2212	nge_init(sc);
2213
2214	if (ifp->if_snd.ifq_head != NULL)
2215		nge_start(ifp);
2216
2217	return;
2218}
2219
2220/*
2221 * Stop the adapter and free any mbufs allocated to the
2222 * RX and TX lists.
2223 */
2224static void
2225nge_stop(sc)
2226	struct nge_softc	*sc;
2227{
2228	register int		i;
2229	struct ifnet		*ifp;
2230	struct mii_data		*mii;
2231
2232	ifp = &sc->arpcom.ac_if;
2233	ifp->if_timer = 0;
2234	if (sc->nge_tbi) {
2235		mii = NULL;
2236	} else {
2237		mii = device_get_softc(sc->nge_miibus);
2238	}
2239
2240	untimeout(nge_tick, sc, sc->nge_stat_ch);
2241#ifdef DEVICE_POLLING
2242	ether_poll_deregister(ifp);
2243#endif
2244	CSR_WRITE_4(sc, NGE_IER, 0);
2245	CSR_WRITE_4(sc, NGE_IMR, 0);
2246	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2247	DELAY(1000);
2248	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2249	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2250
2251	if (!sc->nge_tbi)
2252		mii_down(mii);
2253
2254	sc->nge_link = 0;
2255
2256	/*
2257	 * Free data in the RX lists.
2258	 */
2259	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2260		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2261			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2262			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2263		}
2264	}
2265	bzero((char *)&sc->nge_ldata->nge_rx_list,
2266		sizeof(sc->nge_ldata->nge_rx_list));
2267
2268	/*
2269	 * Free the TX list buffers.
2270	 */
2271	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2272		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2273			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2274			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2275		}
2276	}
2277
2278	bzero((char *)&sc->nge_ldata->nge_tx_list,
2279		sizeof(sc->nge_ldata->nge_tx_list));
2280
2281	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2282
2283	return;
2284}
2285
2286/*
2287 * Stop all chip I/O so that the kernel's probe routines don't
2288 * get confused by errant DMAs when rebooting.
2289 */
2290static void
2291nge_shutdown(dev)
2292	device_t		dev;
2293{
2294	struct nge_softc	*sc;
2295
2296	sc = device_get_softc(dev);
2297
2298	nge_reset(sc);
2299	nge_stop(sc);
2300
2301	return;
2302}
2303