if_nge.c revision 128122
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001
4 *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 128122 2004-04-11 16:26:39Z rwatson $");
36
37/*
38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39 * for FreeBSD. Datasheets are available from:
40 *
41 * http://www.national.com/ds/DP/DP83820.pdf
42 * http://www.national.com/ds/DP/DP83821.pdf
43 *
44 * These chips are used on several low cost gigabit ethernet NICs
45 * sold by D-Link, Addtron, SMC and Asante. Both parts are
46 * virtually the same, except the 83820 is a 64-bit/32-bit part,
47 * while the 83821 is 32-bit only.
48 *
49 * Many cards also use National gigE transceivers, such as the
50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51 * contains a full register description that applies to all of these
52 * components:
53 *
54 * http://www.national.com/ds/DP/DP83861.pdf
55 *
56 * Written by Bill Paul <wpaul@bsdi.com>
57 * BSDi Open Source Solutions
58 */
59
60/*
61 * The NatSemi DP83820 and 83821 controllers are enhanced versions
62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67 * matching buffers, one perfect address filter buffer and interrupt
68 * moderation. The 83820 supports both 64-bit and 32-bit addressing
69 * and data transfers: the 64-bit support can be toggled on or off
70 * via software. This affects the size of certain fields in the DMA
71 * descriptors.
72 *
73 * There are two bugs/misfeatures in the 83820/83821 that I have
74 * discovered so far:
75 *
76 * - Receive buffers must be aligned on 64-bit boundaries, which means
77 *   you must resort to copying data in order to fix up the payload
78 *   alignment.
79 *
80 * - In order to transmit jumbo frames larger than 8170 bytes, you have
81 *   to turn off transmit checksum offloading, because the chip can't
82 *   compute the checksum on an outgoing frame unless it fits entirely
83 *   within the TX FIFO, which is only 8192 bytes in size. If you have
84 *   TX checksum offload enabled and you transmit attempt to transmit a
85 *   frame larger than 8170 bytes, the transmitter will wedge.
86 *
87 * To work around the latter problem, TX checksum offload is disabled
88 * if the user selects an MTU larger than 8152 (8170 - 18).
89 */
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/socket.h>
98
99#include <net/if.h>
100#include <net/if_arp.h>
101#include <net/ethernet.h>
102#include <net/if_dl.h>
103#include <net/if_media.h>
104#include <net/if_types.h>
105#include <net/if_vlan_var.h>
106
107#include <net/bpf.h>
108
109#include <vm/vm.h>              /* for vtophys */
110#include <vm/pmap.h>            /* for vtophys */
111#include <machine/clock.h>      /* for DELAY */
112#include <machine/bus_pio.h>
113#include <machine/bus_memio.h>
114#include <machine/bus.h>
115#include <machine/resource.h>
116#include <sys/bus.h>
117#include <sys/rman.h>
118
119#include <dev/mii/mii.h>
120#include <dev/mii/miivar.h>
121
122#include <dev/pci/pcireg.h>
123#include <dev/pci/pcivar.h>
124
125#define NGE_USEIOSPACE
126
127#include <dev/nge/if_ngereg.h>
128
129MODULE_DEPEND(nge, pci, 1, 1, 1);
130MODULE_DEPEND(nge, ether, 1, 1, 1);
131MODULE_DEPEND(nge, miibus, 1, 1, 1);
132
133/* "controller miibus0" required.  See GENERIC if you get errors here. */
134#include "miibus_if.h"
135
136#define NGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
137
138/*
139 * Various supported device vendors/types and their names.
140 */
141static struct nge_type nge_devs[] = {
142	{ NGE_VENDORID, NGE_DEVICEID,
143	    "National Semiconductor Gigabit Ethernet" },
144	{ 0, 0, NULL }
145};
146
147static int nge_probe(device_t);
148static int nge_attach(device_t);
149static int nge_detach(device_t);
150
151static int nge_alloc_jumbo_mem(struct nge_softc *);
152static void nge_free_jumbo_mem(struct nge_softc *);
153static void *nge_jalloc(struct nge_softc *);
154static void nge_jfree(void *, void *);
155
156static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *);
157static int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
158static void nge_rxeof(struct nge_softc *);
159static void nge_txeof(struct nge_softc *);
160static void nge_intr(void *);
161static void nge_tick(void *);
162static void nge_start(struct ifnet *);
163static int nge_ioctl(struct ifnet *, u_long, caddr_t);
164static void nge_init(void *);
165static void nge_stop(struct nge_softc *);
166static void nge_watchdog(struct ifnet *);
167static void nge_shutdown(device_t);
168static int nge_ifmedia_upd(struct ifnet *);
169static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
170
171static void nge_delay(struct nge_softc *);
172static void nge_eeprom_idle(struct nge_softc *);
173static void nge_eeprom_putbyte(struct nge_softc *, int);
174static void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
175static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
176
177static void nge_mii_sync(struct nge_softc *);
178static void nge_mii_send(struct nge_softc *, u_int32_t, int);
179static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
180static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
181
182static int nge_miibus_readreg(device_t, int, int);
183static int nge_miibus_writereg(device_t, int, int, int);
184static void nge_miibus_statchg(device_t);
185
186static void nge_setmulti(struct nge_softc *);
187static uint32_t nge_mchash(const uint8_t *);
188static void nge_reset(struct nge_softc *);
189static int nge_list_rx_init(struct nge_softc *);
190static int nge_list_tx_init(struct nge_softc *);
191
192#ifdef NGE_USEIOSPACE
193#define NGE_RES			SYS_RES_IOPORT
194#define NGE_RID			NGE_PCI_LOIO
195#else
196#define NGE_RES			SYS_RES_MEMORY
197#define NGE_RID			NGE_PCI_LOMEM
198#endif
199
200static device_method_t nge_methods[] = {
201	/* Device interface */
202	DEVMETHOD(device_probe,		nge_probe),
203	DEVMETHOD(device_attach,	nge_attach),
204	DEVMETHOD(device_detach,	nge_detach),
205	DEVMETHOD(device_shutdown,	nge_shutdown),
206
207	/* bus interface */
208	DEVMETHOD(bus_print_child,	bus_generic_print_child),
209	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
210
211	/* MII interface */
212	DEVMETHOD(miibus_readreg,	nge_miibus_readreg),
213	DEVMETHOD(miibus_writereg,	nge_miibus_writereg),
214	DEVMETHOD(miibus_statchg,	nge_miibus_statchg),
215
216	{ 0, 0 }
217};
218
219static driver_t nge_driver = {
220	"nge",
221	nge_methods,
222	sizeof(struct nge_softc)
223};
224
225static devclass_t nge_devclass;
226
227DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0);
228DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0);
229
230#define NGE_SETBIT(sc, reg, x)				\
231	CSR_WRITE_4(sc, reg,				\
232		CSR_READ_4(sc, reg) | (x))
233
234#define NGE_CLRBIT(sc, reg, x)				\
235	CSR_WRITE_4(sc, reg,				\
236		CSR_READ_4(sc, reg) & ~(x))
237
238#define SIO_SET(x)					\
239	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
240
241#define SIO_CLR(x)					\
242	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
243
244static void
245nge_delay(sc)
246	struct nge_softc	*sc;
247{
248	int			idx;
249
250	for (idx = (300 / 33) + 1; idx > 0; idx--)
251		CSR_READ_4(sc, NGE_CSR);
252
253	return;
254}
255
256static void
257nge_eeprom_idle(sc)
258	struct nge_softc	*sc;
259{
260	register int		i;
261
262	SIO_SET(NGE_MEAR_EE_CSEL);
263	nge_delay(sc);
264	SIO_SET(NGE_MEAR_EE_CLK);
265	nge_delay(sc);
266
267	for (i = 0; i < 25; i++) {
268		SIO_CLR(NGE_MEAR_EE_CLK);
269		nge_delay(sc);
270		SIO_SET(NGE_MEAR_EE_CLK);
271		nge_delay(sc);
272	}
273
274	SIO_CLR(NGE_MEAR_EE_CLK);
275	nge_delay(sc);
276	SIO_CLR(NGE_MEAR_EE_CSEL);
277	nge_delay(sc);
278	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
279
280	return;
281}
282
283/*
284 * Send a read command and address to the EEPROM, check for ACK.
285 */
286static void
287nge_eeprom_putbyte(sc, addr)
288	struct nge_softc	*sc;
289	int			addr;
290{
291	register int		d, i;
292
293	d = addr | NGE_EECMD_READ;
294
295	/*
296	 * Feed in each bit and stobe the clock.
297	 */
298	for (i = 0x400; i; i >>= 1) {
299		if (d & i) {
300			SIO_SET(NGE_MEAR_EE_DIN);
301		} else {
302			SIO_CLR(NGE_MEAR_EE_DIN);
303		}
304		nge_delay(sc);
305		SIO_SET(NGE_MEAR_EE_CLK);
306		nge_delay(sc);
307		SIO_CLR(NGE_MEAR_EE_CLK);
308		nge_delay(sc);
309	}
310
311	return;
312}
313
314/*
315 * Read a word of data stored in the EEPROM at address 'addr.'
316 */
317static void
318nge_eeprom_getword(sc, addr, dest)
319	struct nge_softc	*sc;
320	int			addr;
321	u_int16_t		*dest;
322{
323	register int		i;
324	u_int16_t		word = 0;
325
326	/* Force EEPROM to idle state. */
327	nge_eeprom_idle(sc);
328
329	/* Enter EEPROM access mode. */
330	nge_delay(sc);
331	SIO_CLR(NGE_MEAR_EE_CLK);
332	nge_delay(sc);
333	SIO_SET(NGE_MEAR_EE_CSEL);
334	nge_delay(sc);
335
336	/*
337	 * Send address of word we want to read.
338	 */
339	nge_eeprom_putbyte(sc, addr);
340
341	/*
342	 * Start reading bits from EEPROM.
343	 */
344	for (i = 0x8000; i; i >>= 1) {
345		SIO_SET(NGE_MEAR_EE_CLK);
346		nge_delay(sc);
347		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
348			word |= i;
349		nge_delay(sc);
350		SIO_CLR(NGE_MEAR_EE_CLK);
351		nge_delay(sc);
352	}
353
354	/* Turn off EEPROM access mode. */
355	nge_eeprom_idle(sc);
356
357	*dest = word;
358
359	return;
360}
361
362/*
363 * Read a sequence of words from the EEPROM.
364 */
365static void
366nge_read_eeprom(sc, dest, off, cnt, swap)
367	struct nge_softc	*sc;
368	caddr_t			dest;
369	int			off;
370	int			cnt;
371	int			swap;
372{
373	int			i;
374	u_int16_t		word = 0, *ptr;
375
376	for (i = 0; i < cnt; i++) {
377		nge_eeprom_getword(sc, off + i, &word);
378		ptr = (u_int16_t *)(dest + (i * 2));
379		if (swap)
380			*ptr = ntohs(word);
381		else
382			*ptr = word;
383	}
384
385	return;
386}
387
388/*
389 * Sync the PHYs by setting data bit and strobing the clock 32 times.
390 */
391static void
392nge_mii_sync(sc)
393	struct nge_softc		*sc;
394{
395	register int		i;
396
397	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
398
399	for (i = 0; i < 32; i++) {
400		SIO_SET(NGE_MEAR_MII_CLK);
401		DELAY(1);
402		SIO_CLR(NGE_MEAR_MII_CLK);
403		DELAY(1);
404	}
405
406	return;
407}
408
409/*
410 * Clock a series of bits through the MII.
411 */
412static void
413nge_mii_send(sc, bits, cnt)
414	struct nge_softc		*sc;
415	u_int32_t		bits;
416	int			cnt;
417{
418	int			i;
419
420	SIO_CLR(NGE_MEAR_MII_CLK);
421
422	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
423                if (bits & i) {
424			SIO_SET(NGE_MEAR_MII_DATA);
425                } else {
426			SIO_CLR(NGE_MEAR_MII_DATA);
427                }
428		DELAY(1);
429		SIO_CLR(NGE_MEAR_MII_CLK);
430		DELAY(1);
431		SIO_SET(NGE_MEAR_MII_CLK);
432	}
433}
434
435/*
436 * Read an PHY register through the MII.
437 */
438static int
439nge_mii_readreg(sc, frame)
440	struct nge_softc		*sc;
441	struct nge_mii_frame	*frame;
442
443{
444	int			i, ack, s;
445
446	s = splimp();
447
448	/*
449	 * Set up frame for RX.
450	 */
451	frame->mii_stdelim = NGE_MII_STARTDELIM;
452	frame->mii_opcode = NGE_MII_READOP;
453	frame->mii_turnaround = 0;
454	frame->mii_data = 0;
455
456	CSR_WRITE_4(sc, NGE_MEAR, 0);
457
458	/*
459 	 * Turn on data xmit.
460	 */
461	SIO_SET(NGE_MEAR_MII_DIR);
462
463	nge_mii_sync(sc);
464
465	/*
466	 * Send command/address info.
467	 */
468	nge_mii_send(sc, frame->mii_stdelim, 2);
469	nge_mii_send(sc, frame->mii_opcode, 2);
470	nge_mii_send(sc, frame->mii_phyaddr, 5);
471	nge_mii_send(sc, frame->mii_regaddr, 5);
472
473	/* Idle bit */
474	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
475	DELAY(1);
476	SIO_SET(NGE_MEAR_MII_CLK);
477	DELAY(1);
478
479	/* Turn off xmit. */
480	SIO_CLR(NGE_MEAR_MII_DIR);
481	/* Check for ack */
482	SIO_CLR(NGE_MEAR_MII_CLK);
483	DELAY(1);
484	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
485	SIO_SET(NGE_MEAR_MII_CLK);
486	DELAY(1);
487
488	/*
489	 * Now try reading data bits. If the ack failed, we still
490	 * need to clock through 16 cycles to keep the PHY(s) in sync.
491	 */
492	if (ack) {
493		for(i = 0; i < 16; i++) {
494			SIO_CLR(NGE_MEAR_MII_CLK);
495			DELAY(1);
496			SIO_SET(NGE_MEAR_MII_CLK);
497			DELAY(1);
498		}
499		goto fail;
500	}
501
502	for (i = 0x8000; i; i >>= 1) {
503		SIO_CLR(NGE_MEAR_MII_CLK);
504		DELAY(1);
505		if (!ack) {
506			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
507				frame->mii_data |= i;
508			DELAY(1);
509		}
510		SIO_SET(NGE_MEAR_MII_CLK);
511		DELAY(1);
512	}
513
514fail:
515
516	SIO_CLR(NGE_MEAR_MII_CLK);
517	DELAY(1);
518	SIO_SET(NGE_MEAR_MII_CLK);
519	DELAY(1);
520
521	splx(s);
522
523	if (ack)
524		return(1);
525	return(0);
526}
527
528/*
529 * Write to a PHY register through the MII.
530 */
531static int
532nge_mii_writereg(sc, frame)
533	struct nge_softc		*sc;
534	struct nge_mii_frame	*frame;
535
536{
537	int			s;
538
539	s = splimp();
540	/*
541	 * Set up frame for TX.
542	 */
543
544	frame->mii_stdelim = NGE_MII_STARTDELIM;
545	frame->mii_opcode = NGE_MII_WRITEOP;
546	frame->mii_turnaround = NGE_MII_TURNAROUND;
547
548	/*
549 	 * Turn on data output.
550	 */
551	SIO_SET(NGE_MEAR_MII_DIR);
552
553	nge_mii_sync(sc);
554
555	nge_mii_send(sc, frame->mii_stdelim, 2);
556	nge_mii_send(sc, frame->mii_opcode, 2);
557	nge_mii_send(sc, frame->mii_phyaddr, 5);
558	nge_mii_send(sc, frame->mii_regaddr, 5);
559	nge_mii_send(sc, frame->mii_turnaround, 2);
560	nge_mii_send(sc, frame->mii_data, 16);
561
562	/* Idle bit. */
563	SIO_SET(NGE_MEAR_MII_CLK);
564	DELAY(1);
565	SIO_CLR(NGE_MEAR_MII_CLK);
566	DELAY(1);
567
568	/*
569	 * Turn off xmit.
570	 */
571	SIO_CLR(NGE_MEAR_MII_DIR);
572
573	splx(s);
574
575	return(0);
576}
577
578static int
579nge_miibus_readreg(dev, phy, reg)
580	device_t		dev;
581	int			phy, reg;
582{
583	struct nge_softc	*sc;
584	struct nge_mii_frame	frame;
585
586	sc = device_get_softc(dev);
587
588	bzero((char *)&frame, sizeof(frame));
589
590	frame.mii_phyaddr = phy;
591	frame.mii_regaddr = reg;
592	nge_mii_readreg(sc, &frame);
593
594	return(frame.mii_data);
595}
596
597static int
598nge_miibus_writereg(dev, phy, reg, data)
599	device_t		dev;
600	int			phy, reg, data;
601{
602	struct nge_softc	*sc;
603	struct nge_mii_frame	frame;
604
605	sc = device_get_softc(dev);
606
607	bzero((char *)&frame, sizeof(frame));
608
609	frame.mii_phyaddr = phy;
610	frame.mii_regaddr = reg;
611	frame.mii_data = data;
612	nge_mii_writereg(sc, &frame);
613
614	return(0);
615}
616
617static void
618nge_miibus_statchg(dev)
619	device_t		dev;
620{
621	int			status;
622	struct nge_softc	*sc;
623	struct mii_data		*mii;
624
625	sc = device_get_softc(dev);
626	if (sc->nge_tbi) {
627		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
628		    == IFM_AUTO) {
629			status = CSR_READ_4(sc, NGE_TBI_ANLPAR);
630			if (status == 0 || status & NGE_TBIANAR_FDX) {
631				NGE_SETBIT(sc, NGE_TX_CFG,
632				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
633				NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
634			} else {
635				NGE_CLRBIT(sc, NGE_TX_CFG,
636				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
637				NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
638			}
639
640		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
641			!= IFM_FDX) {
642			NGE_CLRBIT(sc, NGE_TX_CFG,
643			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
644			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
645		} else {
646			NGE_SETBIT(sc, NGE_TX_CFG,
647			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
648			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
649		}
650	} else {
651		mii = device_get_softc(sc->nge_miibus);
652
653		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
654		        NGE_SETBIT(sc, NGE_TX_CFG,
655			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
656			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
657		} else {
658			NGE_CLRBIT(sc, NGE_TX_CFG,
659			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
660			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
661		}
662
663		/* If we have a 1000Mbps link, set the mode_1000 bit. */
664		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
665		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
666			NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
667		} else {
668			NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
669		}
670	}
671	return;
672}
673
674static u_int32_t
675nge_mchash(addr)
676	const uint8_t *addr;
677{
678	uint32_t crc, carry;
679	int idx, bit;
680	uint8_t data;
681
682	/* Compute CRC for the address value. */
683	crc = 0xFFFFFFFF; /* initial value */
684
685	for (idx = 0; idx < 6; idx++) {
686		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
687			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
688			crc <<= 1;
689			if (carry)
690				crc = (crc ^ 0x04c11db6) | carry;
691		}
692	}
693
694	/*
695	 * return the filter bit position
696	 */
697
698	return((crc >> 21) & 0x00000FFF);
699}
700
701static void
702nge_setmulti(sc)
703	struct nge_softc	*sc;
704{
705	struct ifnet		*ifp;
706	struct ifmultiaddr	*ifma;
707	u_int32_t		h = 0, i, filtsave;
708	int			bit, index;
709
710	ifp = &sc->arpcom.ac_if;
711
712	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
713		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
714		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
715		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
716		return;
717	}
718
719	/*
720	 * We have to explicitly enable the multicast hash table
721	 * on the NatSemi chip if we want to use it, which we do.
722	 * We also have to tell it that we don't want to use the
723	 * hash table for matching unicast addresses.
724	 */
725	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
726	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
727	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
728
729	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
730
731	/* first, zot all the existing hash bits */
732	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
733		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
734		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
735	}
736
737	/*
738	 * From the 11 bits returned by the crc routine, the top 7
739	 * bits represent the 16-bit word in the mcast hash table
740	 * that needs to be updated, and the lower 4 bits represent
741	 * which bit within that byte needs to be set.
742	 */
743	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
744		if (ifma->ifma_addr->sa_family != AF_LINK)
745			continue;
746		h = nge_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
747		index = (h >> 4) & 0x7F;
748		bit = h & 0xF;
749		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
750		    NGE_FILTADDR_MCAST_LO + (index * 2));
751		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
752	}
753
754	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
755
756	return;
757}
758
759static void
760nge_reset(sc)
761	struct nge_softc	*sc;
762{
763	register int		i;
764
765	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
766
767	for (i = 0; i < NGE_TIMEOUT; i++) {
768		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
769			break;
770	}
771
772	if (i == NGE_TIMEOUT)
773		printf("nge%d: reset never completed\n", sc->nge_unit);
774
775	/* Wait a little while for the chip to get its brains in order. */
776	DELAY(1000);
777
778	/*
779	 * If this is a NetSemi chip, make sure to clear
780	 * PME mode.
781	 */
782	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
783	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
784
785        return;
786}
787
788/*
789 * Probe for a NatSemi chip. Check the PCI vendor and device
790 * IDs against our list and return a device name if we find a match.
791 */
792static int
793nge_probe(dev)
794	device_t		dev;
795{
796	struct nge_type		*t;
797
798	t = nge_devs;
799
800	while(t->nge_name != NULL) {
801		if ((pci_get_vendor(dev) == t->nge_vid) &&
802		    (pci_get_device(dev) == t->nge_did)) {
803			device_set_desc(dev, t->nge_name);
804			return(0);
805		}
806		t++;
807	}
808
809	return(ENXIO);
810}
811
812/*
813 * Attach the interface. Allocate softc structures, do ifmedia
814 * setup and ethernet/BPF attach.
815 */
816static int
817nge_attach(dev)
818	device_t		dev;
819{
820	int			s;
821	u_char			eaddr[ETHER_ADDR_LEN];
822	struct nge_softc	*sc;
823	struct ifnet		*ifp;
824	int			unit, error = 0, rid;
825	const char		*sep = "";
826
827	s = splimp();
828
829	sc = device_get_softc(dev);
830	unit = device_get_unit(dev);
831	bzero(sc, sizeof(struct nge_softc));
832
833	mtx_init(&sc->nge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
834	    MTX_DEF | MTX_RECURSE);
835#ifndef BURN_BRIDGES
836	/*
837	 * Handle power management nonsense.
838	 */
839	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
840		u_int32_t		iobase, membase, irq;
841
842		/* Save important PCI config data. */
843		iobase = pci_read_config(dev, NGE_PCI_LOIO, 4);
844		membase = pci_read_config(dev, NGE_PCI_LOMEM, 4);
845		irq = pci_read_config(dev, NGE_PCI_INTLINE, 4);
846
847		/* Reset the power state. */
848		printf("nge%d: chip is in D%d power mode "
849		    "-- setting to D0\n", unit,
850		    pci_get_powerstate(dev));
851		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
852
853		/* Restore PCI config data. */
854		pci_write_config(dev, NGE_PCI_LOIO, iobase, 4);
855		pci_write_config(dev, NGE_PCI_LOMEM, membase, 4);
856		pci_write_config(dev, NGE_PCI_INTLINE, irq, 4);
857	}
858#endif
859	/*
860	 * Map control/status registers.
861	 */
862	pci_enable_busmaster(dev);
863
864	rid = NGE_RID;
865	sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE);
866
867	if (sc->nge_res == NULL) {
868		printf("nge%d: couldn't map ports/memory\n", unit);
869		error = ENXIO;
870		goto fail;
871	}
872
873	sc->nge_btag = rman_get_bustag(sc->nge_res);
874	sc->nge_bhandle = rman_get_bushandle(sc->nge_res);
875
876	/* Allocate interrupt */
877	rid = 0;
878	sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
879	    RF_SHAREABLE | RF_ACTIVE);
880
881	if (sc->nge_irq == NULL) {
882		printf("nge%d: couldn't map interrupt\n", unit);
883		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
884		error = ENXIO;
885		goto fail;
886	}
887
888	error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET,
889	    nge_intr, sc, &sc->nge_intrhand);
890
891	if (error) {
892		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
893		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
894		printf("nge%d: couldn't set up irq\n", unit);
895		goto fail;
896	}
897
898	/* Reset the adapter. */
899	nge_reset(sc);
900
901	/*
902	 * Get station address from the EEPROM.
903	 */
904	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
905	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
906	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
907
908	sc->nge_unit = unit;
909	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
910
911	sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF,
912	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
913
914	if (sc->nge_ldata == NULL) {
915		printf("nge%d: no memory for list buffers!\n", unit);
916		bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
917		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
918		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
919		error = ENXIO;
920		goto fail;
921	}
922	bzero(sc->nge_ldata, sizeof(struct nge_list_data));
923
924	/* Try to allocate memory for jumbo buffers. */
925	if (nge_alloc_jumbo_mem(sc)) {
926		printf("nge%d: jumbo buffer allocation failed\n",
927                    sc->nge_unit);
928		contigfree(sc->nge_ldata,
929		    sizeof(struct nge_list_data), M_DEVBUF);
930		bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
931		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
932		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
933		error = ENXIO;
934		goto fail;
935	}
936
937	ifp = &sc->arpcom.ac_if;
938	ifp->if_softc = sc;
939	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
940	ifp->if_mtu = ETHERMTU;
941	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
942	ifp->if_ioctl = nge_ioctl;
943	ifp->if_output = ether_output;
944	ifp->if_start = nge_start;
945	ifp->if_watchdog = nge_watchdog;
946	ifp->if_init = nge_init;
947	ifp->if_baudrate = 1000000000;
948	ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1;
949	ifp->if_hwassist = NGE_CSUM_FEATURES;
950	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING;
951	ifp->if_capenable = ifp->if_capabilities;
952
953	/*
954	 * Do MII setup.
955	 */
956	if (mii_phy_probe(dev, &sc->nge_miibus,
957			  nge_ifmedia_upd, nge_ifmedia_sts)) {
958		if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
959			sc->nge_tbi = 1;
960			device_printf(dev, "Using TBI\n");
961
962			sc->nge_miibus = dev;
963
964			ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd,
965				nge_ifmedia_sts);
966#define	ADD(m, c)	ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL)
967#define PRINT(s)	printf("%s%s", sep, s); sep = ", "
968			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0);
969			device_printf(dev, " ");
970			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0);
971			PRINT("1000baseSX");
972			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0);
973			PRINT("1000baseSX-FDX");
974			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0);
975			PRINT("auto");
976
977			printf("\n");
978#undef ADD
979#undef PRINT
980			ifmedia_set(&sc->nge_ifmedia,
981				IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0));
982
983			CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
984				| NGE_GPIO_GP4_OUT
985				| NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
986				| NGE_GPIO_GP3_OUTENB
987				| NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
988
989		} else {
990			printf("nge%d: MII without any PHY!\n", sc->nge_unit);
991			nge_free_jumbo_mem(sc);
992			bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
993			bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
994			bus_release_resource(dev, NGE_RES, NGE_RID,
995					 sc->nge_res);
996			error = ENXIO;
997			goto fail;
998		}
999	}
1000
1001	/*
1002	 * Call MI attach routine.
1003	 */
1004	ether_ifattach(ifp, eaddr);
1005	callout_handle_init(&sc->nge_stat_ch);
1006
1007fail:
1008
1009	splx(s);
1010	mtx_destroy(&sc->nge_mtx);
1011	return(error);
1012}
1013
1014static int
1015nge_detach(dev)
1016	device_t		dev;
1017{
1018	struct nge_softc	*sc;
1019	struct ifnet		*ifp;
1020	int			s;
1021
1022	s = splimp();
1023
1024	sc = device_get_softc(dev);
1025	ifp = &sc->arpcom.ac_if;
1026
1027	nge_reset(sc);
1028	nge_stop(sc);
1029	ether_ifdetach(ifp);
1030
1031	bus_generic_detach(dev);
1032	if (!sc->nge_tbi) {
1033		device_delete_child(dev, sc->nge_miibus);
1034	}
1035	bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
1036	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
1037	bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
1038
1039	contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF);
1040	nge_free_jumbo_mem(sc);
1041
1042	splx(s);
1043	mtx_destroy(&sc->nge_mtx);
1044
1045	return(0);
1046}
1047
1048/*
1049 * Initialize the transmit descriptors.
1050 */
1051static int
1052nge_list_tx_init(sc)
1053	struct nge_softc	*sc;
1054{
1055	struct nge_list_data	*ld;
1056	struct nge_ring_data	*cd;
1057	int			i;
1058
1059	cd = &sc->nge_cdata;
1060	ld = sc->nge_ldata;
1061
1062	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
1063		if (i == (NGE_TX_LIST_CNT - 1)) {
1064			ld->nge_tx_list[i].nge_nextdesc =
1065			    &ld->nge_tx_list[0];
1066			ld->nge_tx_list[i].nge_next =
1067			    vtophys(&ld->nge_tx_list[0]);
1068		} else {
1069			ld->nge_tx_list[i].nge_nextdesc =
1070			    &ld->nge_tx_list[i + 1];
1071			ld->nge_tx_list[i].nge_next =
1072			    vtophys(&ld->nge_tx_list[i + 1]);
1073		}
1074		ld->nge_tx_list[i].nge_mbuf = NULL;
1075		ld->nge_tx_list[i].nge_ptr = 0;
1076		ld->nge_tx_list[i].nge_ctl = 0;
1077	}
1078
1079	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1080
1081	return(0);
1082}
1083
1084
1085/*
1086 * Initialize the RX descriptors and allocate mbufs for them. Note that
1087 * we arrange the descriptors in a closed ring, so that the last descriptor
1088 * points back to the first.
1089 */
1090static int
1091nge_list_rx_init(sc)
1092	struct nge_softc	*sc;
1093{
1094	struct nge_list_data	*ld;
1095	struct nge_ring_data	*cd;
1096	int			i;
1097
1098	ld = sc->nge_ldata;
1099	cd = &sc->nge_cdata;
1100
1101	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1102		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1103			return(ENOBUFS);
1104		if (i == (NGE_RX_LIST_CNT - 1)) {
1105			ld->nge_rx_list[i].nge_nextdesc =
1106			    &ld->nge_rx_list[0];
1107			ld->nge_rx_list[i].nge_next =
1108			    vtophys(&ld->nge_rx_list[0]);
1109		} else {
1110			ld->nge_rx_list[i].nge_nextdesc =
1111			    &ld->nge_rx_list[i + 1];
1112			ld->nge_rx_list[i].nge_next =
1113			    vtophys(&ld->nge_rx_list[i + 1]);
1114		}
1115	}
1116
1117	cd->nge_rx_prod = 0;
1118
1119	return(0);
1120}
1121
1122/*
1123 * Initialize an RX descriptor and attach an MBUF cluster.
1124 */
1125static int
1126nge_newbuf(sc, c, m)
1127	struct nge_softc	*sc;
1128	struct nge_desc		*c;
1129	struct mbuf		*m;
1130{
1131	struct mbuf		*m_new = NULL;
1132	caddr_t			*buf = NULL;
1133
1134	if (m == NULL) {
1135		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1136		if (m_new == NULL) {
1137			printf("nge%d: no memory for rx list "
1138			    "-- packet dropped!\n", sc->nge_unit);
1139			return(ENOBUFS);
1140		}
1141
1142		/* Allocate the jumbo buffer */
1143		buf = nge_jalloc(sc);
1144		if (buf == NULL) {
1145#ifdef NGE_VERBOSE
1146			printf("nge%d: jumbo allocation failed "
1147			    "-- packet dropped!\n", sc->nge_unit);
1148#endif
1149			m_freem(m_new);
1150			return(ENOBUFS);
1151		}
1152		/* Attach the buffer to the mbuf */
1153		m_new->m_data = (void *)buf;
1154		m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN;
1155		MEXTADD(m_new, buf, NGE_JUMBO_FRAMELEN, nge_jfree,
1156		    (struct nge_softc *)sc, 0, EXT_NET_DRV);
1157	} else {
1158		m_new = m;
1159		m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN;
1160		m_new->m_data = m_new->m_ext.ext_buf;
1161	}
1162
1163	m_adj(m_new, sizeof(u_int64_t));
1164
1165	c->nge_mbuf = m_new;
1166	c->nge_ptr = vtophys(mtod(m_new, caddr_t));
1167	c->nge_ctl = m_new->m_len;
1168	c->nge_extsts = 0;
1169
1170	return(0);
1171}
1172
1173static int
1174nge_alloc_jumbo_mem(sc)
1175	struct nge_softc	*sc;
1176{
1177	caddr_t			ptr;
1178	register int		i;
1179	struct nge_jpool_entry   *entry;
1180
1181	/* Grab a big chunk o' storage. */
1182	sc->nge_cdata.nge_jumbo_buf = contigmalloc(NGE_JMEM, M_DEVBUF,
1183	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1184
1185	if (sc->nge_cdata.nge_jumbo_buf == NULL) {
1186		printf("nge%d: no memory for jumbo buffers!\n", sc->nge_unit);
1187		return(ENOBUFS);
1188	}
1189
1190	SLIST_INIT(&sc->nge_jfree_listhead);
1191	SLIST_INIT(&sc->nge_jinuse_listhead);
1192
1193	/*
1194	 * Now divide it up into 9K pieces and save the addresses
1195	 * in an array.
1196	 */
1197	ptr = sc->nge_cdata.nge_jumbo_buf;
1198	for (i = 0; i < NGE_JSLOTS; i++) {
1199		sc->nge_cdata.nge_jslots[i] = ptr;
1200		ptr += NGE_JLEN;
1201		entry = malloc(sizeof(struct nge_jpool_entry),
1202		    M_DEVBUF, M_NOWAIT);
1203		if (entry == NULL) {
1204			printf("nge%d: no memory for jumbo "
1205			    "buffer queue!\n", sc->nge_unit);
1206			return(ENOBUFS);
1207		}
1208		entry->slot = i;
1209		SLIST_INSERT_HEAD(&sc->nge_jfree_listhead,
1210		    entry, jpool_entries);
1211	}
1212
1213	return(0);
1214}
1215
1216static void
1217nge_free_jumbo_mem(sc)
1218	struct nge_softc	*sc;
1219{
1220	register int		i;
1221	struct nge_jpool_entry   *entry;
1222
1223	for (i = 0; i < NGE_JSLOTS; i++) {
1224		entry = SLIST_FIRST(&sc->nge_jfree_listhead);
1225		SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries);
1226		free(entry, M_DEVBUF);
1227	}
1228
1229	contigfree(sc->nge_cdata.nge_jumbo_buf, NGE_JMEM, M_DEVBUF);
1230
1231	return;
1232}
1233
1234/*
1235 * Allocate a jumbo buffer.
1236 */
1237static void *
1238nge_jalloc(sc)
1239	struct nge_softc	*sc;
1240{
1241	struct nge_jpool_entry   *entry;
1242
1243	entry = SLIST_FIRST(&sc->nge_jfree_listhead);
1244
1245	if (entry == NULL) {
1246#ifdef NGE_VERBOSE
1247		printf("nge%d: no free jumbo buffers\n", sc->nge_unit);
1248#endif
1249		return(NULL);
1250	}
1251
1252	SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries);
1253	SLIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries);
1254	return(sc->nge_cdata.nge_jslots[entry->slot]);
1255}
1256
1257/*
1258 * Release a jumbo buffer.
1259 */
1260static void
1261nge_jfree(buf, args)
1262	void			*buf;
1263	void			*args;
1264{
1265	struct nge_softc	*sc;
1266	int		        i;
1267	struct nge_jpool_entry   *entry;
1268
1269	/* Extract the softc struct pointer. */
1270	sc = args;
1271
1272	if (sc == NULL)
1273		panic("nge_jfree: can't find softc pointer!");
1274
1275	/* calculate the slot this buffer belongs to */
1276	i = ((vm_offset_t)buf
1277	     - (vm_offset_t)sc->nge_cdata.nge_jumbo_buf) / NGE_JLEN;
1278
1279	if ((i < 0) || (i >= NGE_JSLOTS))
1280		panic("nge_jfree: asked to free buffer that we don't manage!");
1281
1282	entry = SLIST_FIRST(&sc->nge_jinuse_listhead);
1283	if (entry == NULL)
1284		panic("nge_jfree: buffer not in use!");
1285	entry->slot = i;
1286	SLIST_REMOVE_HEAD(&sc->nge_jinuse_listhead, jpool_entries);
1287	SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, jpool_entries);
1288
1289	return;
1290}
1291/*
1292 * A frame has been uploaded: pass the resulting mbuf chain up to
1293 * the higher level protocols.
1294 */
1295static void
1296nge_rxeof(sc)
1297	struct nge_softc	*sc;
1298{
1299        struct mbuf		*m;
1300        struct ifnet		*ifp;
1301	struct nge_desc		*cur_rx;
1302	int			i, total_len = 0;
1303	u_int32_t		rxstat;
1304
1305	ifp = &sc->arpcom.ac_if;
1306	i = sc->nge_cdata.nge_rx_prod;
1307
1308	while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1309		struct mbuf		*m0 = NULL;
1310		u_int32_t		extsts;
1311
1312#ifdef DEVICE_POLLING
1313		if (ifp->if_flags & IFF_POLLING) {
1314			if (sc->rxcycles <= 0)
1315				break;
1316			sc->rxcycles--;
1317		}
1318#endif /* DEVICE_POLLING */
1319
1320		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1321		rxstat = cur_rx->nge_rxstat;
1322		extsts = cur_rx->nge_extsts;
1323		m = cur_rx->nge_mbuf;
1324		cur_rx->nge_mbuf = NULL;
1325		total_len = NGE_RXBYTES(cur_rx);
1326		NGE_INC(i, NGE_RX_LIST_CNT);
1327		/*
1328		 * If an error occurs, update stats, clear the
1329		 * status word and leave the mbuf cluster in place:
1330		 * it should simply get re-used next time this descriptor
1331	 	 * comes up in the ring.
1332		 */
1333		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1334			ifp->if_ierrors++;
1335			nge_newbuf(sc, cur_rx, m);
1336			continue;
1337		}
1338
1339		/*
1340		 * Ok. NatSemi really screwed up here. This is the
1341		 * only gigE chip I know of with alignment constraints
1342		 * on receive buffers. RX buffers must be 64-bit aligned.
1343		 */
1344#ifdef __i386__
1345		/*
1346		 * By popular demand, ignore the alignment problems
1347		 * on the Intel x86 platform. The performance hit
1348		 * incurred due to unaligned accesses is much smaller
1349		 * than the hit produced by forcing buffer copies all
1350		 * the time, especially with jumbo frames. We still
1351		 * need to fix up the alignment everywhere else though.
1352		 */
1353		if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
1354#endif
1355			m0 = m_devget(mtod(m, char *), total_len,
1356			    ETHER_ALIGN, ifp, NULL);
1357			nge_newbuf(sc, cur_rx, m);
1358			if (m0 == NULL) {
1359				printf("nge%d: no receive buffers "
1360				    "available -- packet dropped!\n",
1361				    sc->nge_unit);
1362				ifp->if_ierrors++;
1363				continue;
1364			}
1365			m = m0;
1366#ifdef __i386__
1367		} else {
1368			m->m_pkthdr.rcvif = ifp;
1369			m->m_pkthdr.len = m->m_len = total_len;
1370		}
1371#endif
1372
1373		ifp->if_ipackets++;
1374
1375		/* Do IP checksum checking. */
1376		if (extsts & NGE_RXEXTSTS_IPPKT)
1377			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1378		if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1379			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1380		if ((extsts & NGE_RXEXTSTS_TCPPKT &&
1381		    !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
1382		    (extsts & NGE_RXEXTSTS_UDPPKT &&
1383		    !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
1384			m->m_pkthdr.csum_flags |=
1385			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1386			m->m_pkthdr.csum_data = 0xffff;
1387		}
1388
1389		/*
1390		 * If we received a packet with a vlan tag, pass it
1391		 * to vlan_input() instead of ether_input().
1392		 */
1393		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1394			VLAN_INPUT_TAG(ifp, m,
1395			    ntohs(extsts & NGE_RXEXTSTS_VTCI), continue);
1396		}
1397
1398		(*ifp->if_input)(ifp, m);
1399	}
1400
1401	sc->nge_cdata.nge_rx_prod = i;
1402
1403	return;
1404}
1405
1406/*
1407 * A frame was downloaded to the chip. It's safe for us to clean up
1408 * the list buffers.
1409 */
1410
1411static void
1412nge_txeof(sc)
1413	struct nge_softc	*sc;
1414{
1415	struct nge_desc		*cur_tx = NULL;
1416	struct ifnet		*ifp;
1417	u_int32_t		idx;
1418
1419	ifp = &sc->arpcom.ac_if;
1420
1421	/* Clear the timeout timer. */
1422	ifp->if_timer = 0;
1423
1424	/*
1425	 * Go through our tx list and free mbufs for those
1426	 * frames that have been transmitted.
1427	 */
1428	idx = sc->nge_cdata.nge_tx_cons;
1429	while (idx != sc->nge_cdata.nge_tx_prod) {
1430		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1431
1432		if (NGE_OWNDESC(cur_tx))
1433			break;
1434
1435		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1436			sc->nge_cdata.nge_tx_cnt--;
1437			NGE_INC(idx, NGE_TX_LIST_CNT);
1438			continue;
1439		}
1440
1441		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1442			ifp->if_oerrors++;
1443			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1444				ifp->if_collisions++;
1445			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1446				ifp->if_collisions++;
1447		}
1448
1449		ifp->if_collisions +=
1450		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1451
1452		ifp->if_opackets++;
1453		if (cur_tx->nge_mbuf != NULL) {
1454			m_freem(cur_tx->nge_mbuf);
1455			cur_tx->nge_mbuf = NULL;
1456		}
1457
1458		sc->nge_cdata.nge_tx_cnt--;
1459		NGE_INC(idx, NGE_TX_LIST_CNT);
1460		ifp->if_timer = 0;
1461	}
1462
1463	sc->nge_cdata.nge_tx_cons = idx;
1464
1465	if (cur_tx != NULL)
1466		ifp->if_flags &= ~IFF_OACTIVE;
1467
1468	return;
1469}
1470
1471static void
1472nge_tick(xsc)
1473	void			*xsc;
1474{
1475	struct nge_softc	*sc;
1476	struct mii_data		*mii;
1477	struct ifnet		*ifp;
1478	int			s;
1479
1480	s = splimp();
1481
1482	sc = xsc;
1483	ifp = &sc->arpcom.ac_if;
1484
1485	if (sc->nge_tbi) {
1486		if (!sc->nge_link) {
1487			if (CSR_READ_4(sc, NGE_TBI_BMSR)
1488			    & NGE_TBIBMSR_ANEG_DONE) {
1489				printf("nge%d: gigabit link up\n",
1490				    sc->nge_unit);
1491				nge_miibus_statchg(sc->nge_miibus);
1492				sc->nge_link++;
1493				if (ifp->if_snd.ifq_head != NULL)
1494					nge_start(ifp);
1495			}
1496		}
1497	} else {
1498		mii = device_get_softc(sc->nge_miibus);
1499		mii_tick(mii);
1500
1501		if (!sc->nge_link) {
1502			if (mii->mii_media_status & IFM_ACTIVE &&
1503			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1504				sc->nge_link++;
1505				if (IFM_SUBTYPE(mii->mii_media_active)
1506				    == IFM_1000_T)
1507					printf("nge%d: gigabit link up\n",
1508					    sc->nge_unit);
1509				if (ifp->if_snd.ifq_head != NULL)
1510					nge_start(ifp);
1511			}
1512		}
1513	}
1514	sc->nge_stat_ch = timeout(nge_tick, sc, hz);
1515
1516	splx(s);
1517
1518	return;
1519}
1520
1521#ifdef DEVICE_POLLING
1522static poll_handler_t nge_poll;
1523
1524static void
1525nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1526{
1527	struct  nge_softc *sc = ifp->if_softc;
1528
1529	if (cmd == POLL_DEREGISTER) {	/* final call, enable interrupts */
1530		CSR_WRITE_4(sc, NGE_IER, 1);
1531		return;
1532	}
1533
1534	/*
1535	 * On the nge, reading the status register also clears it.
1536	 * So before returning to intr mode we must make sure that all
1537	 * possible pending sources of interrupts have been served.
1538	 * In practice this means run to completion the *eof routines,
1539	 * and then call the interrupt routine
1540	 */
1541	sc->rxcycles = count;
1542	nge_rxeof(sc);
1543	nge_txeof(sc);
1544	if (ifp->if_snd.ifq_head != NULL)
1545		nge_start(ifp);
1546
1547	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1548		u_int32_t	status;
1549
1550		/* Reading the ISR register clears all interrupts. */
1551		status = CSR_READ_4(sc, NGE_ISR);
1552
1553		if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW))
1554			nge_rxeof(sc);
1555
1556		if (status & (NGE_ISR_RX_IDLE))
1557			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1558
1559		if (status & NGE_ISR_SYSERR) {
1560			nge_reset(sc);
1561			nge_init(sc);
1562		}
1563	}
1564}
1565#endif /* DEVICE_POLLING */
1566
1567static void
1568nge_intr(arg)
1569	void			*arg;
1570{
1571	struct nge_softc	*sc;
1572	struct ifnet		*ifp;
1573	u_int32_t		status;
1574
1575	sc = arg;
1576	ifp = &sc->arpcom.ac_if;
1577
1578#ifdef DEVICE_POLLING
1579	if (ifp->if_flags & IFF_POLLING)
1580		return;
1581	if (ether_poll_register(nge_poll, ifp)) { /* ok, disable interrupts */
1582		CSR_WRITE_4(sc, NGE_IER, 0);
1583		nge_poll(ifp, 0, 1);
1584		return;
1585	}
1586#endif /* DEVICE_POLLING */
1587
1588	/* Supress unwanted interrupts */
1589	if (!(ifp->if_flags & IFF_UP)) {
1590		nge_stop(sc);
1591		return;
1592	}
1593
1594	/* Disable interrupts. */
1595	CSR_WRITE_4(sc, NGE_IER, 0);
1596
1597	/* Data LED on for TBI mode */
1598	if(sc->nge_tbi)
1599		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1600			     | NGE_GPIO_GP3_OUT);
1601
1602	for (;;) {
1603		/* Reading the ISR register clears all interrupts. */
1604		status = CSR_READ_4(sc, NGE_ISR);
1605
1606		if ((status & NGE_INTRS) == 0)
1607			break;
1608
1609		if ((status & NGE_ISR_TX_DESC_OK) ||
1610		    (status & NGE_ISR_TX_ERR) ||
1611		    (status & NGE_ISR_TX_OK) ||
1612		    (status & NGE_ISR_TX_IDLE))
1613			nge_txeof(sc);
1614
1615		if ((status & NGE_ISR_RX_DESC_OK) ||
1616		    (status & NGE_ISR_RX_ERR) ||
1617		    (status & NGE_ISR_RX_OFLOW) ||
1618		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1619		    (status & NGE_ISR_RX_IDLE) ||
1620		    (status & NGE_ISR_RX_OK))
1621			nge_rxeof(sc);
1622
1623		if ((status & NGE_ISR_RX_IDLE))
1624			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1625
1626		if (status & NGE_ISR_SYSERR) {
1627			nge_reset(sc);
1628			ifp->if_flags &= ~IFF_RUNNING;
1629			nge_init(sc);
1630		}
1631
1632#if 0
1633		/*
1634		 * XXX: nge_tick() is not ready to be called this way
1635		 * it screws up the aneg timeout because mii_tick() is
1636		 * only to be called once per second.
1637		 */
1638		if (status & NGE_IMR_PHY_INTR) {
1639			sc->nge_link = 0;
1640			nge_tick(sc);
1641		}
1642#endif
1643	}
1644
1645	/* Re-enable interrupts. */
1646	CSR_WRITE_4(sc, NGE_IER, 1);
1647
1648	if (ifp->if_snd.ifq_head != NULL)
1649		nge_start(ifp);
1650
1651	/* Data LED off for TBI mode */
1652
1653	if(sc->nge_tbi)
1654		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1655			    & ~NGE_GPIO_GP3_OUT);
1656
1657	return;
1658}
1659
1660/*
1661 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1662 * pointers to the fragment pointers.
1663 */
1664static int
1665nge_encap(sc, m_head, txidx)
1666	struct nge_softc	*sc;
1667	struct mbuf		*m_head;
1668	u_int32_t		*txidx;
1669{
1670	struct nge_desc		*f = NULL;
1671	struct mbuf		*m;
1672	int			frag, cur, cnt = 0;
1673	struct m_tag		*mtag;
1674
1675	/*
1676 	 * Start packing the mbufs in this chain into
1677	 * the fragment pointers. Stop when we run out
1678 	 * of fragments or hit the end of the mbuf chain.
1679	 */
1680	m = m_head;
1681	cur = frag = *txidx;
1682
1683	for (m = m_head; m != NULL; m = m->m_next) {
1684		if (m->m_len != 0) {
1685			if ((NGE_TX_LIST_CNT -
1686			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1687				return(ENOBUFS);
1688			f = &sc->nge_ldata->nge_tx_list[frag];
1689			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1690			f->nge_ptr = vtophys(mtod(m, vm_offset_t));
1691			if (cnt != 0)
1692				f->nge_ctl |= NGE_CMDSTS_OWN;
1693			cur = frag;
1694			NGE_INC(frag, NGE_TX_LIST_CNT);
1695			cnt++;
1696		}
1697	}
1698
1699	if (m != NULL)
1700		return(ENOBUFS);
1701
1702	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1703	if (m_head->m_pkthdr.csum_flags) {
1704		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1705			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1706			    NGE_TXEXTSTS_IPCSUM;
1707		if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1708			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1709			    NGE_TXEXTSTS_TCPCSUM;
1710		if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1711			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1712			    NGE_TXEXTSTS_UDPCSUM;
1713	}
1714
1715	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m);
1716	if (mtag != NULL) {
1717		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1718		    (NGE_TXEXTSTS_VLANPKT|htons(VLAN_TAG_VALUE(mtag)));
1719	}
1720
1721	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1722	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1723	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1724	sc->nge_cdata.nge_tx_cnt += cnt;
1725	*txidx = frag;
1726
1727	return(0);
1728}
1729
1730/*
1731 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1732 * to the mbuf data regions directly in the transmit lists. We also save a
1733 * copy of the pointers since the transmit list fragment pointers are
1734 * physical addresses.
1735 */
1736
1737static void
1738nge_start(ifp)
1739	struct ifnet		*ifp;
1740{
1741	struct nge_softc	*sc;
1742	struct mbuf		*m_head = NULL;
1743	u_int32_t		idx;
1744
1745	sc = ifp->if_softc;
1746
1747	if (!sc->nge_link)
1748		return;
1749
1750	idx = sc->nge_cdata.nge_tx_prod;
1751
1752	if (ifp->if_flags & IFF_OACTIVE)
1753		return;
1754
1755	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1756		IF_DEQUEUE(&ifp->if_snd, m_head);
1757		if (m_head == NULL)
1758			break;
1759
1760		if (nge_encap(sc, m_head, &idx)) {
1761			IF_PREPEND(&ifp->if_snd, m_head);
1762			ifp->if_flags |= IFF_OACTIVE;
1763			break;
1764		}
1765
1766		/*
1767		 * If there's a BPF listener, bounce a copy of this frame
1768		 * to him.
1769		 */
1770		BPF_MTAP(ifp, m_head);
1771
1772	}
1773
1774	/* Transmit */
1775	sc->nge_cdata.nge_tx_prod = idx;
1776	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1777
1778	/*
1779	 * Set a timeout in case the chip goes out to lunch.
1780	 */
1781	ifp->if_timer = 5;
1782
1783	return;
1784}
1785
1786static void
1787nge_init(xsc)
1788	void			*xsc;
1789{
1790	struct nge_softc	*sc = xsc;
1791	struct ifnet		*ifp = &sc->arpcom.ac_if;
1792	struct mii_data		*mii;
1793	int			s;
1794
1795	if (ifp->if_flags & IFF_RUNNING)
1796		return;
1797
1798	s = splimp();
1799
1800	/*
1801	 * Cancel pending I/O and free all RX/TX buffers.
1802	 */
1803	nge_stop(sc);
1804
1805	if (sc->nge_tbi) {
1806		mii = NULL;
1807	} else {
1808		mii = device_get_softc(sc->nge_miibus);
1809	}
1810
1811	/* Set MAC address */
1812	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1813	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1814	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1815	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1816	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1817	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1818	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1819	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1820	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1821
1822	/* Init circular RX list. */
1823	if (nge_list_rx_init(sc) == ENOBUFS) {
1824		printf("nge%d: initialization failed: no "
1825			"memory for rx buffers\n", sc->nge_unit);
1826		nge_stop(sc);
1827		(void)splx(s);
1828		return;
1829	}
1830
1831	/*
1832	 * Init tx descriptors.
1833	 */
1834	nge_list_tx_init(sc);
1835
1836	/*
1837	 * For the NatSemi chip, we have to explicitly enable the
1838	 * reception of ARP frames, as well as turn on the 'perfect
1839	 * match' filter where we store the station address, otherwise
1840	 * we won't receive unicasts meant for this host.
1841	 */
1842	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1843	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1844
1845	 /* If we want promiscuous mode, set the allframes bit. */
1846	if (ifp->if_flags & IFF_PROMISC) {
1847		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1848	} else {
1849		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1850	}
1851
1852	/*
1853	 * Set the capture broadcast bit to capture broadcast frames.
1854	 */
1855	if (ifp->if_flags & IFF_BROADCAST) {
1856		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1857	} else {
1858		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1859	}
1860
1861	/*
1862	 * Load the multicast filter.
1863	 */
1864	nge_setmulti(sc);
1865
1866	/* Turn the receive filter on */
1867	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1868
1869	/*
1870	 * Load the address of the RX and TX lists.
1871	 */
1872	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1873	    vtophys(&sc->nge_ldata->nge_rx_list[0]));
1874	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1875	    vtophys(&sc->nge_ldata->nge_tx_list[0]));
1876
1877	/* Set RX configuration */
1878	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1879	/*
1880	 * Enable hardware checksum validation for all IPv4
1881	 * packets, do not reject packets with bad checksums.
1882	 */
1883	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1884
1885	/*
1886	 * Tell the chip to detect and strip VLAN tag info from
1887	 * received frames. The tag will be provided in the extsts
1888	 * field in the RX descriptors.
1889	 */
1890	NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1891	    NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB);
1892
1893	/* Set TX configuration */
1894	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1895
1896	/*
1897	 * Enable TX IPv4 checksumming on a per-packet basis.
1898	 */
1899	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
1900
1901	/*
1902	 * Tell the chip to insert VLAN tags on a per-packet basis as
1903	 * dictated by the code in the frame encapsulation routine.
1904	 */
1905	NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1906
1907	/* Set full/half duplex mode. */
1908	if (sc->nge_tbi) {
1909		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1910		    == IFM_FDX) {
1911			NGE_SETBIT(sc, NGE_TX_CFG,
1912			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1913			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1914		} else {
1915			NGE_CLRBIT(sc, NGE_TX_CFG,
1916			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1917			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1918		}
1919	} else {
1920		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1921			NGE_SETBIT(sc, NGE_TX_CFG,
1922			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1923			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1924		} else {
1925			NGE_CLRBIT(sc, NGE_TX_CFG,
1926			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1927			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1928		}
1929	}
1930
1931	nge_tick(sc);
1932
1933	/*
1934	 * Enable the delivery of PHY interrupts based on
1935	 * link/speed/duplex status changes. Also enable the
1936	 * extsts field in the DMA descriptors (needed for
1937	 * TCP/IP checksum offload on transmit).
1938	 */
1939	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|
1940	    NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1941
1942	/*
1943	 * Configure interrupt holdoff (moderation). We can
1944	 * have the chip delay interrupt delivery for a certain
1945	 * period. Units are in 100us, and the max setting
1946	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1947	 */
1948	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1949
1950	/*
1951	 * Enable interrupts.
1952	 */
1953	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1954#ifdef DEVICE_POLLING
1955	/*
1956	 * ... only enable interrupts if we are not polling, make sure
1957	 * they are off otherwise.
1958	 */
1959	if (ifp->if_flags & IFF_POLLING)
1960		CSR_WRITE_4(sc, NGE_IER, 0);
1961	else
1962#endif /* DEVICE_POLLING */
1963	CSR_WRITE_4(sc, NGE_IER, 1);
1964
1965	/* Enable receiver and transmitter. */
1966	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1967	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1968
1969	nge_ifmedia_upd(ifp);
1970
1971	ifp->if_flags |= IFF_RUNNING;
1972	ifp->if_flags &= ~IFF_OACTIVE;
1973
1974	(void)splx(s);
1975
1976	return;
1977}
1978
1979/*
1980 * Set media options.
1981 */
1982static int
1983nge_ifmedia_upd(ifp)
1984	struct ifnet		*ifp;
1985{
1986	struct nge_softc	*sc;
1987	struct mii_data		*mii;
1988
1989	sc = ifp->if_softc;
1990
1991	if (sc->nge_tbi) {
1992		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1993		     == IFM_AUTO) {
1994			CSR_WRITE_4(sc, NGE_TBI_ANAR,
1995				CSR_READ_4(sc, NGE_TBI_ANAR)
1996					| NGE_TBIANAR_HDX | NGE_TBIANAR_FDX
1997					| NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2);
1998			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG
1999				| NGE_TBIBMCR_RESTART_ANEG);
2000			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG);
2001		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media
2002			    & IFM_GMASK) == IFM_FDX) {
2003			NGE_SETBIT(sc, NGE_TX_CFG,
2004			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
2005			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
2006
2007			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
2008			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
2009		} else {
2010			NGE_CLRBIT(sc, NGE_TX_CFG,
2011			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
2012			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
2013
2014			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
2015			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
2016		}
2017
2018		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
2019			    & ~NGE_GPIO_GP3_OUT);
2020	} else {
2021		mii = device_get_softc(sc->nge_miibus);
2022		sc->nge_link = 0;
2023		if (mii->mii_instance) {
2024			struct mii_softc	*miisc;
2025			for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2026			    miisc = LIST_NEXT(miisc, mii_list))
2027				mii_phy_reset(miisc);
2028		}
2029		mii_mediachg(mii);
2030	}
2031
2032	return(0);
2033}
2034
2035/*
2036 * Report current media status.
2037 */
2038static void
2039nge_ifmedia_sts(ifp, ifmr)
2040	struct ifnet		*ifp;
2041	struct ifmediareq	*ifmr;
2042{
2043	struct nge_softc	*sc;
2044	struct mii_data		*mii;
2045
2046	sc = ifp->if_softc;
2047
2048	if (sc->nge_tbi) {
2049		ifmr->ifm_status = IFM_AVALID;
2050		ifmr->ifm_active = IFM_ETHER;
2051
2052		if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
2053			ifmr->ifm_status |= IFM_ACTIVE;
2054		}
2055		if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK)
2056			ifmr->ifm_active |= IFM_LOOP;
2057		if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
2058			ifmr->ifm_active |= IFM_NONE;
2059			ifmr->ifm_status = 0;
2060			return;
2061		}
2062		ifmr->ifm_active |= IFM_1000_SX;
2063		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
2064		    == IFM_AUTO) {
2065			ifmr->ifm_active |= IFM_AUTO;
2066			if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
2067			    & NGE_TBIANAR_FDX) {
2068				ifmr->ifm_active |= IFM_FDX;
2069			}else if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
2070				  & NGE_TBIANAR_HDX) {
2071				ifmr->ifm_active |= IFM_HDX;
2072			}
2073		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
2074			== IFM_FDX)
2075			ifmr->ifm_active |= IFM_FDX;
2076		else
2077			ifmr->ifm_active |= IFM_HDX;
2078
2079	} else {
2080		mii = device_get_softc(sc->nge_miibus);
2081		mii_pollstat(mii);
2082		ifmr->ifm_active = mii->mii_media_active;
2083		ifmr->ifm_status = mii->mii_media_status;
2084	}
2085
2086	return;
2087}
2088
2089static int
2090nge_ioctl(ifp, command, data)
2091	struct ifnet		*ifp;
2092	u_long			command;
2093	caddr_t			data;
2094{
2095	struct nge_softc	*sc = ifp->if_softc;
2096	struct ifreq		*ifr = (struct ifreq *) data;
2097	struct mii_data		*mii;
2098	int			s, error = 0;
2099
2100	s = splimp();
2101
2102	switch(command) {
2103	case SIOCSIFMTU:
2104		if (ifr->ifr_mtu > NGE_JUMBO_MTU)
2105			error = EINVAL;
2106		else {
2107			ifp->if_mtu = ifr->ifr_mtu;
2108			/*
2109			 * Workaround: if the MTU is larger than
2110			 * 8152 (TX FIFO size minus 64 minus 18), turn off
2111			 * TX checksum offloading.
2112			 */
2113			if (ifr->ifr_mtu >= 8152)
2114				ifp->if_hwassist = 0;
2115			else
2116				ifp->if_hwassist = NGE_CSUM_FEATURES;
2117		}
2118		break;
2119	case SIOCSIFFLAGS:
2120		if (ifp->if_flags & IFF_UP) {
2121			if (ifp->if_flags & IFF_RUNNING &&
2122			    ifp->if_flags & IFF_PROMISC &&
2123			    !(sc->nge_if_flags & IFF_PROMISC)) {
2124				NGE_SETBIT(sc, NGE_RXFILT_CTL,
2125				    NGE_RXFILTCTL_ALLPHYS|
2126				    NGE_RXFILTCTL_ALLMULTI);
2127			} else if (ifp->if_flags & IFF_RUNNING &&
2128			    !(ifp->if_flags & IFF_PROMISC) &&
2129			    sc->nge_if_flags & IFF_PROMISC) {
2130				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2131				    NGE_RXFILTCTL_ALLPHYS);
2132				if (!(ifp->if_flags & IFF_ALLMULTI))
2133					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2134					    NGE_RXFILTCTL_ALLMULTI);
2135			} else {
2136				ifp->if_flags &= ~IFF_RUNNING;
2137				nge_init(sc);
2138			}
2139		} else {
2140			if (ifp->if_flags & IFF_RUNNING)
2141				nge_stop(sc);
2142		}
2143		sc->nge_if_flags = ifp->if_flags;
2144		error = 0;
2145		break;
2146	case SIOCADDMULTI:
2147	case SIOCDELMULTI:
2148		nge_setmulti(sc);
2149		error = 0;
2150		break;
2151	case SIOCGIFMEDIA:
2152	case SIOCSIFMEDIA:
2153		if (sc->nge_tbi) {
2154			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2155					      command);
2156		} else {
2157			mii = device_get_softc(sc->nge_miibus);
2158			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2159					      command);
2160		}
2161		break;
2162	default:
2163		error = ether_ioctl(ifp, command, data);
2164		break;
2165	}
2166
2167	(void)splx(s);
2168
2169	return(error);
2170}
2171
2172static void
2173nge_watchdog(ifp)
2174	struct ifnet		*ifp;
2175{
2176	struct nge_softc	*sc;
2177
2178	sc = ifp->if_softc;
2179
2180	ifp->if_oerrors++;
2181	printf("nge%d: watchdog timeout\n", sc->nge_unit);
2182
2183	nge_stop(sc);
2184	nge_reset(sc);
2185	ifp->if_flags &= ~IFF_RUNNING;
2186	nge_init(sc);
2187
2188	if (ifp->if_snd.ifq_head != NULL)
2189		nge_start(ifp);
2190
2191	return;
2192}
2193
2194/*
2195 * Stop the adapter and free any mbufs allocated to the
2196 * RX and TX lists.
2197 */
2198static void
2199nge_stop(sc)
2200	struct nge_softc	*sc;
2201{
2202	register int		i;
2203	struct ifnet		*ifp;
2204	struct mii_data		*mii;
2205
2206	ifp = &sc->arpcom.ac_if;
2207	ifp->if_timer = 0;
2208	if (sc->nge_tbi) {
2209		mii = NULL;
2210	} else {
2211		mii = device_get_softc(sc->nge_miibus);
2212	}
2213
2214	untimeout(nge_tick, sc, sc->nge_stat_ch);
2215#ifdef DEVICE_POLLING
2216	ether_poll_deregister(ifp);
2217#endif
2218	CSR_WRITE_4(sc, NGE_IER, 0);
2219	CSR_WRITE_4(sc, NGE_IMR, 0);
2220	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2221	DELAY(1000);
2222	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2223	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2224
2225	if (!sc->nge_tbi)
2226		mii_down(mii);
2227
2228	sc->nge_link = 0;
2229
2230	/*
2231	 * Free data in the RX lists.
2232	 */
2233	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2234		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2235			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2236			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2237		}
2238	}
2239	bzero((char *)&sc->nge_ldata->nge_rx_list,
2240		sizeof(sc->nge_ldata->nge_rx_list));
2241
2242	/*
2243	 * Free the TX list buffers.
2244	 */
2245	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2246		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2247			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2248			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2249		}
2250	}
2251
2252	bzero((char *)&sc->nge_ldata->nge_tx_list,
2253		sizeof(sc->nge_ldata->nge_tx_list));
2254
2255	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2256
2257	return;
2258}
2259
2260/*
2261 * Stop all chip I/O so that the kernel's probe routines don't
2262 * get confused by errant DMAs when rebooting.
2263 */
2264static void
2265nge_shutdown(dev)
2266	device_t		dev;
2267{
2268	struct nge_softc	*sc;
2269
2270	sc = device_get_softc(dev);
2271
2272	nge_reset(sc);
2273	nge_stop(sc);
2274
2275	return;
2276}
2277