if_nge.c revision 128130
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001
4 *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 128130 2004-04-11 18:28:14Z ru $");
36
37/*
38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39 * for FreeBSD. Datasheets are available from:
40 *
41 * http://www.national.com/ds/DP/DP83820.pdf
42 * http://www.national.com/ds/DP/DP83821.pdf
43 *
44 * These chips are used on several low cost gigabit ethernet NICs
45 * sold by D-Link, Addtron, SMC and Asante. Both parts are
46 * virtually the same, except the 83820 is a 64-bit/32-bit part,
47 * while the 83821 is 32-bit only.
48 *
49 * Many cards also use National gigE transceivers, such as the
50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51 * contains a full register description that applies to all of these
52 * components:
53 *
54 * http://www.national.com/ds/DP/DP83861.pdf
55 *
56 * Written by Bill Paul <wpaul@bsdi.com>
57 * BSDi Open Source Solutions
58 */
59
60/*
61 * The NatSemi DP83820 and 83821 controllers are enhanced versions
62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67 * matching buffers, one perfect address filter buffer and interrupt
68 * moderation. The 83820 supports both 64-bit and 32-bit addressing
69 * and data transfers: the 64-bit support can be toggled on or off
70 * via software. This affects the size of certain fields in the DMA
71 * descriptors.
72 *
73 * There are two bugs/misfeatures in the 83820/83821 that I have
74 * discovered so far:
75 *
76 * - Receive buffers must be aligned on 64-bit boundaries, which means
77 *   you must resort to copying data in order to fix up the payload
78 *   alignment.
79 *
80 * - In order to transmit jumbo frames larger than 8170 bytes, you have
81 *   to turn off transmit checksum offloading, because the chip can't
82 *   compute the checksum on an outgoing frame unless it fits entirely
83 *   within the TX FIFO, which is only 8192 bytes in size. If you have
84 *   TX checksum offload enabled and you transmit attempt to transmit a
85 *   frame larger than 8170 bytes, the transmitter will wedge.
86 *
87 * To work around the latter problem, TX checksum offload is disabled
88 * if the user selects an MTU larger than 8152 (8170 - 18).
89 */
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/socket.h>
98
99#include <net/if.h>
100#include <net/if_arp.h>
101#include <net/ethernet.h>
102#include <net/if_dl.h>
103#include <net/if_media.h>
104#include <net/if_types.h>
105#include <net/if_vlan_var.h>
106
107#include <net/bpf.h>
108
109#include <vm/vm.h>              /* for vtophys */
110#include <vm/pmap.h>            /* for vtophys */
111#include <machine/clock.h>      /* for DELAY */
112#include <machine/bus_pio.h>
113#include <machine/bus_memio.h>
114#include <machine/bus.h>
115#include <machine/resource.h>
116#include <sys/bus.h>
117#include <sys/rman.h>
118
119#include <dev/mii/mii.h>
120#include <dev/mii/miivar.h>
121
122#include <dev/pci/pcireg.h>
123#include <dev/pci/pcivar.h>
124
125#define NGE_USEIOSPACE
126
127#include <dev/nge/if_ngereg.h>
128
129MODULE_DEPEND(nge, pci, 1, 1, 1);
130MODULE_DEPEND(nge, ether, 1, 1, 1);
131MODULE_DEPEND(nge, miibus, 1, 1, 1);
132
133/* "controller miibus0" required.  See GENERIC if you get errors here. */
134#include "miibus_if.h"
135
136#define NGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
137
138/*
139 * Various supported device vendors/types and their names.
140 */
141static struct nge_type nge_devs[] = {
142	{ NGE_VENDORID, NGE_DEVICEID,
143	    "National Semiconductor Gigabit Ethernet" },
144	{ 0, 0, NULL }
145};
146
147static int nge_probe(device_t);
148static int nge_attach(device_t);
149static int nge_detach(device_t);
150
151static int nge_alloc_jumbo_mem(struct nge_softc *);
152static void nge_free_jumbo_mem(struct nge_softc *);
153static void *nge_jalloc(struct nge_softc *);
154static void nge_jfree(void *, void *);
155
156static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *);
157static int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
158static void nge_rxeof(struct nge_softc *);
159static void nge_txeof(struct nge_softc *);
160static void nge_intr(void *);
161static void nge_tick(void *);
162static void nge_start(struct ifnet *);
163static int nge_ioctl(struct ifnet *, u_long, caddr_t);
164static void nge_init(void *);
165static void nge_stop(struct nge_softc *);
166static void nge_watchdog(struct ifnet *);
167static void nge_shutdown(device_t);
168static int nge_ifmedia_upd(struct ifnet *);
169static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
170
171static void nge_delay(struct nge_softc *);
172static void nge_eeprom_idle(struct nge_softc *);
173static void nge_eeprom_putbyte(struct nge_softc *, int);
174static void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
175static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
176
177static void nge_mii_sync(struct nge_softc *);
178static void nge_mii_send(struct nge_softc *, u_int32_t, int);
179static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
180static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
181
182static int nge_miibus_readreg(device_t, int, int);
183static int nge_miibus_writereg(device_t, int, int, int);
184static void nge_miibus_statchg(device_t);
185
186static void nge_setmulti(struct nge_softc *);
187static uint32_t nge_mchash(const uint8_t *);
188static void nge_reset(struct nge_softc *);
189static int nge_list_rx_init(struct nge_softc *);
190static int nge_list_tx_init(struct nge_softc *);
191
192#ifdef NGE_USEIOSPACE
193#define NGE_RES			SYS_RES_IOPORT
194#define NGE_RID			NGE_PCI_LOIO
195#else
196#define NGE_RES			SYS_RES_MEMORY
197#define NGE_RID			NGE_PCI_LOMEM
198#endif
199
200static device_method_t nge_methods[] = {
201	/* Device interface */
202	DEVMETHOD(device_probe,		nge_probe),
203	DEVMETHOD(device_attach,	nge_attach),
204	DEVMETHOD(device_detach,	nge_detach),
205	DEVMETHOD(device_shutdown,	nge_shutdown),
206
207	/* bus interface */
208	DEVMETHOD(bus_print_child,	bus_generic_print_child),
209	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
210
211	/* MII interface */
212	DEVMETHOD(miibus_readreg,	nge_miibus_readreg),
213	DEVMETHOD(miibus_writereg,	nge_miibus_writereg),
214	DEVMETHOD(miibus_statchg,	nge_miibus_statchg),
215
216	{ 0, 0 }
217};
218
219static driver_t nge_driver = {
220	"nge",
221	nge_methods,
222	sizeof(struct nge_softc)
223};
224
225static devclass_t nge_devclass;
226
227DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0);
228DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0);
229
230#define NGE_SETBIT(sc, reg, x)				\
231	CSR_WRITE_4(sc, reg,				\
232		CSR_READ_4(sc, reg) | (x))
233
234#define NGE_CLRBIT(sc, reg, x)				\
235	CSR_WRITE_4(sc, reg,				\
236		CSR_READ_4(sc, reg) & ~(x))
237
238#define SIO_SET(x)					\
239	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
240
241#define SIO_CLR(x)					\
242	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
243
244static void
245nge_delay(sc)
246	struct nge_softc	*sc;
247{
248	int			idx;
249
250	for (idx = (300 / 33) + 1; idx > 0; idx--)
251		CSR_READ_4(sc, NGE_CSR);
252
253	return;
254}
255
256static void
257nge_eeprom_idle(sc)
258	struct nge_softc	*sc;
259{
260	register int		i;
261
262	SIO_SET(NGE_MEAR_EE_CSEL);
263	nge_delay(sc);
264	SIO_SET(NGE_MEAR_EE_CLK);
265	nge_delay(sc);
266
267	for (i = 0; i < 25; i++) {
268		SIO_CLR(NGE_MEAR_EE_CLK);
269		nge_delay(sc);
270		SIO_SET(NGE_MEAR_EE_CLK);
271		nge_delay(sc);
272	}
273
274	SIO_CLR(NGE_MEAR_EE_CLK);
275	nge_delay(sc);
276	SIO_CLR(NGE_MEAR_EE_CSEL);
277	nge_delay(sc);
278	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
279
280	return;
281}
282
283/*
284 * Send a read command and address to the EEPROM, check for ACK.
285 */
286static void
287nge_eeprom_putbyte(sc, addr)
288	struct nge_softc	*sc;
289	int			addr;
290{
291	register int		d, i;
292
293	d = addr | NGE_EECMD_READ;
294
295	/*
296	 * Feed in each bit and stobe the clock.
297	 */
298	for (i = 0x400; i; i >>= 1) {
299		if (d & i) {
300			SIO_SET(NGE_MEAR_EE_DIN);
301		} else {
302			SIO_CLR(NGE_MEAR_EE_DIN);
303		}
304		nge_delay(sc);
305		SIO_SET(NGE_MEAR_EE_CLK);
306		nge_delay(sc);
307		SIO_CLR(NGE_MEAR_EE_CLK);
308		nge_delay(sc);
309	}
310
311	return;
312}
313
314/*
315 * Read a word of data stored in the EEPROM at address 'addr.'
316 */
317static void
318nge_eeprom_getword(sc, addr, dest)
319	struct nge_softc	*sc;
320	int			addr;
321	u_int16_t		*dest;
322{
323	register int		i;
324	u_int16_t		word = 0;
325
326	/* Force EEPROM to idle state. */
327	nge_eeprom_idle(sc);
328
329	/* Enter EEPROM access mode. */
330	nge_delay(sc);
331	SIO_CLR(NGE_MEAR_EE_CLK);
332	nge_delay(sc);
333	SIO_SET(NGE_MEAR_EE_CSEL);
334	nge_delay(sc);
335
336	/*
337	 * Send address of word we want to read.
338	 */
339	nge_eeprom_putbyte(sc, addr);
340
341	/*
342	 * Start reading bits from EEPROM.
343	 */
344	for (i = 0x8000; i; i >>= 1) {
345		SIO_SET(NGE_MEAR_EE_CLK);
346		nge_delay(sc);
347		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
348			word |= i;
349		nge_delay(sc);
350		SIO_CLR(NGE_MEAR_EE_CLK);
351		nge_delay(sc);
352	}
353
354	/* Turn off EEPROM access mode. */
355	nge_eeprom_idle(sc);
356
357	*dest = word;
358
359	return;
360}
361
362/*
363 * Read a sequence of words from the EEPROM.
364 */
365static void
366nge_read_eeprom(sc, dest, off, cnt, swap)
367	struct nge_softc	*sc;
368	caddr_t			dest;
369	int			off;
370	int			cnt;
371	int			swap;
372{
373	int			i;
374	u_int16_t		word = 0, *ptr;
375
376	for (i = 0; i < cnt; i++) {
377		nge_eeprom_getword(sc, off + i, &word);
378		ptr = (u_int16_t *)(dest + (i * 2));
379		if (swap)
380			*ptr = ntohs(word);
381		else
382			*ptr = word;
383	}
384
385	return;
386}
387
388/*
389 * Sync the PHYs by setting data bit and strobing the clock 32 times.
390 */
391static void
392nge_mii_sync(sc)
393	struct nge_softc		*sc;
394{
395	register int		i;
396
397	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
398
399	for (i = 0; i < 32; i++) {
400		SIO_SET(NGE_MEAR_MII_CLK);
401		DELAY(1);
402		SIO_CLR(NGE_MEAR_MII_CLK);
403		DELAY(1);
404	}
405
406	return;
407}
408
409/*
410 * Clock a series of bits through the MII.
411 */
412static void
413nge_mii_send(sc, bits, cnt)
414	struct nge_softc		*sc;
415	u_int32_t		bits;
416	int			cnt;
417{
418	int			i;
419
420	SIO_CLR(NGE_MEAR_MII_CLK);
421
422	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
423                if (bits & i) {
424			SIO_SET(NGE_MEAR_MII_DATA);
425                } else {
426			SIO_CLR(NGE_MEAR_MII_DATA);
427                }
428		DELAY(1);
429		SIO_CLR(NGE_MEAR_MII_CLK);
430		DELAY(1);
431		SIO_SET(NGE_MEAR_MII_CLK);
432	}
433}
434
435/*
436 * Read an PHY register through the MII.
437 */
438static int
439nge_mii_readreg(sc, frame)
440	struct nge_softc		*sc;
441	struct nge_mii_frame	*frame;
442
443{
444	int			i, ack, s;
445
446	s = splimp();
447
448	/*
449	 * Set up frame for RX.
450	 */
451	frame->mii_stdelim = NGE_MII_STARTDELIM;
452	frame->mii_opcode = NGE_MII_READOP;
453	frame->mii_turnaround = 0;
454	frame->mii_data = 0;
455
456	CSR_WRITE_4(sc, NGE_MEAR, 0);
457
458	/*
459 	 * Turn on data xmit.
460	 */
461	SIO_SET(NGE_MEAR_MII_DIR);
462
463	nge_mii_sync(sc);
464
465	/*
466	 * Send command/address info.
467	 */
468	nge_mii_send(sc, frame->mii_stdelim, 2);
469	nge_mii_send(sc, frame->mii_opcode, 2);
470	nge_mii_send(sc, frame->mii_phyaddr, 5);
471	nge_mii_send(sc, frame->mii_regaddr, 5);
472
473	/* Idle bit */
474	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
475	DELAY(1);
476	SIO_SET(NGE_MEAR_MII_CLK);
477	DELAY(1);
478
479	/* Turn off xmit. */
480	SIO_CLR(NGE_MEAR_MII_DIR);
481	/* Check for ack */
482	SIO_CLR(NGE_MEAR_MII_CLK);
483	DELAY(1);
484	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
485	SIO_SET(NGE_MEAR_MII_CLK);
486	DELAY(1);
487
488	/*
489	 * Now try reading data bits. If the ack failed, we still
490	 * need to clock through 16 cycles to keep the PHY(s) in sync.
491	 */
492	if (ack) {
493		for(i = 0; i < 16; i++) {
494			SIO_CLR(NGE_MEAR_MII_CLK);
495			DELAY(1);
496			SIO_SET(NGE_MEAR_MII_CLK);
497			DELAY(1);
498		}
499		goto fail;
500	}
501
502	for (i = 0x8000; i; i >>= 1) {
503		SIO_CLR(NGE_MEAR_MII_CLK);
504		DELAY(1);
505		if (!ack) {
506			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
507				frame->mii_data |= i;
508			DELAY(1);
509		}
510		SIO_SET(NGE_MEAR_MII_CLK);
511		DELAY(1);
512	}
513
514fail:
515
516	SIO_CLR(NGE_MEAR_MII_CLK);
517	DELAY(1);
518	SIO_SET(NGE_MEAR_MII_CLK);
519	DELAY(1);
520
521	splx(s);
522
523	if (ack)
524		return(1);
525	return(0);
526}
527
528/*
529 * Write to a PHY register through the MII.
530 */
531static int
532nge_mii_writereg(sc, frame)
533	struct nge_softc		*sc;
534	struct nge_mii_frame	*frame;
535
536{
537	int			s;
538
539	s = splimp();
540	/*
541	 * Set up frame for TX.
542	 */
543
544	frame->mii_stdelim = NGE_MII_STARTDELIM;
545	frame->mii_opcode = NGE_MII_WRITEOP;
546	frame->mii_turnaround = NGE_MII_TURNAROUND;
547
548	/*
549 	 * Turn on data output.
550	 */
551	SIO_SET(NGE_MEAR_MII_DIR);
552
553	nge_mii_sync(sc);
554
555	nge_mii_send(sc, frame->mii_stdelim, 2);
556	nge_mii_send(sc, frame->mii_opcode, 2);
557	nge_mii_send(sc, frame->mii_phyaddr, 5);
558	nge_mii_send(sc, frame->mii_regaddr, 5);
559	nge_mii_send(sc, frame->mii_turnaround, 2);
560	nge_mii_send(sc, frame->mii_data, 16);
561
562	/* Idle bit. */
563	SIO_SET(NGE_MEAR_MII_CLK);
564	DELAY(1);
565	SIO_CLR(NGE_MEAR_MII_CLK);
566	DELAY(1);
567
568	/*
569	 * Turn off xmit.
570	 */
571	SIO_CLR(NGE_MEAR_MII_DIR);
572
573	splx(s);
574
575	return(0);
576}
577
578static int
579nge_miibus_readreg(dev, phy, reg)
580	device_t		dev;
581	int			phy, reg;
582{
583	struct nge_softc	*sc;
584	struct nge_mii_frame	frame;
585
586	sc = device_get_softc(dev);
587
588	bzero((char *)&frame, sizeof(frame));
589
590	frame.mii_phyaddr = phy;
591	frame.mii_regaddr = reg;
592	nge_mii_readreg(sc, &frame);
593
594	return(frame.mii_data);
595}
596
597static int
598nge_miibus_writereg(dev, phy, reg, data)
599	device_t		dev;
600	int			phy, reg, data;
601{
602	struct nge_softc	*sc;
603	struct nge_mii_frame	frame;
604
605	sc = device_get_softc(dev);
606
607	bzero((char *)&frame, sizeof(frame));
608
609	frame.mii_phyaddr = phy;
610	frame.mii_regaddr = reg;
611	frame.mii_data = data;
612	nge_mii_writereg(sc, &frame);
613
614	return(0);
615}
616
617static void
618nge_miibus_statchg(dev)
619	device_t		dev;
620{
621	int			status;
622	struct nge_softc	*sc;
623	struct mii_data		*mii;
624
625	sc = device_get_softc(dev);
626	if (sc->nge_tbi) {
627		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
628		    == IFM_AUTO) {
629			status = CSR_READ_4(sc, NGE_TBI_ANLPAR);
630			if (status == 0 || status & NGE_TBIANAR_FDX) {
631				NGE_SETBIT(sc, NGE_TX_CFG,
632				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
633				NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
634			} else {
635				NGE_CLRBIT(sc, NGE_TX_CFG,
636				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
637				NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
638			}
639
640		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
641			!= IFM_FDX) {
642			NGE_CLRBIT(sc, NGE_TX_CFG,
643			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
644			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
645		} else {
646			NGE_SETBIT(sc, NGE_TX_CFG,
647			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
648			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
649		}
650	} else {
651		mii = device_get_softc(sc->nge_miibus);
652
653		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
654		        NGE_SETBIT(sc, NGE_TX_CFG,
655			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
656			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
657		} else {
658			NGE_CLRBIT(sc, NGE_TX_CFG,
659			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
660			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
661		}
662
663		/* If we have a 1000Mbps link, set the mode_1000 bit. */
664		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
665		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
666			NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
667		} else {
668			NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
669		}
670	}
671	return;
672}
673
674static u_int32_t
675nge_mchash(addr)
676	const uint8_t *addr;
677{
678	uint32_t crc, carry;
679	int idx, bit;
680	uint8_t data;
681
682	/* Compute CRC for the address value. */
683	crc = 0xFFFFFFFF; /* initial value */
684
685	for (idx = 0; idx < 6; idx++) {
686		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
687			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
688			crc <<= 1;
689			if (carry)
690				crc = (crc ^ 0x04c11db6) | carry;
691		}
692	}
693
694	/*
695	 * return the filter bit position
696	 */
697
698	return((crc >> 21) & 0x00000FFF);
699}
700
701static void
702nge_setmulti(sc)
703	struct nge_softc	*sc;
704{
705	struct ifnet		*ifp;
706	struct ifmultiaddr	*ifma;
707	u_int32_t		h = 0, i, filtsave;
708	int			bit, index;
709
710	ifp = &sc->arpcom.ac_if;
711
712	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
713		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
714		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
715		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
716		return;
717	}
718
719	/*
720	 * We have to explicitly enable the multicast hash table
721	 * on the NatSemi chip if we want to use it, which we do.
722	 * We also have to tell it that we don't want to use the
723	 * hash table for matching unicast addresses.
724	 */
725	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
726	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
727	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
728
729	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
730
731	/* first, zot all the existing hash bits */
732	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
733		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
734		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
735	}
736
737	/*
738	 * From the 11 bits returned by the crc routine, the top 7
739	 * bits represent the 16-bit word in the mcast hash table
740	 * that needs to be updated, and the lower 4 bits represent
741	 * which bit within that byte needs to be set.
742	 */
743	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
744		if (ifma->ifma_addr->sa_family != AF_LINK)
745			continue;
746		h = nge_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
747		index = (h >> 4) & 0x7F;
748		bit = h & 0xF;
749		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
750		    NGE_FILTADDR_MCAST_LO + (index * 2));
751		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
752	}
753
754	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
755
756	return;
757}
758
759static void
760nge_reset(sc)
761	struct nge_softc	*sc;
762{
763	register int		i;
764
765	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
766
767	for (i = 0; i < NGE_TIMEOUT; i++) {
768		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
769			break;
770	}
771
772	if (i == NGE_TIMEOUT)
773		printf("nge%d: reset never completed\n", sc->nge_unit);
774
775	/* Wait a little while for the chip to get its brains in order. */
776	DELAY(1000);
777
778	/*
779	 * If this is a NetSemi chip, make sure to clear
780	 * PME mode.
781	 */
782	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
783	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
784
785        return;
786}
787
788/*
789 * Probe for a NatSemi chip. Check the PCI vendor and device
790 * IDs against our list and return a device name if we find a match.
791 */
792static int
793nge_probe(dev)
794	device_t		dev;
795{
796	struct nge_type		*t;
797
798	t = nge_devs;
799
800	while(t->nge_name != NULL) {
801		if ((pci_get_vendor(dev) == t->nge_vid) &&
802		    (pci_get_device(dev) == t->nge_did)) {
803			device_set_desc(dev, t->nge_name);
804			return(0);
805		}
806		t++;
807	}
808
809	return(ENXIO);
810}
811
812/*
813 * Attach the interface. Allocate softc structures, do ifmedia
814 * setup and ethernet/BPF attach.
815 */
816static int
817nge_attach(dev)
818	device_t		dev;
819{
820	int			s;
821	u_char			eaddr[ETHER_ADDR_LEN];
822	struct nge_softc	*sc;
823	struct ifnet		*ifp;
824	int			unit, error = 0, rid;
825	const char		*sep = "";
826
827	s = splimp();
828
829	sc = device_get_softc(dev);
830	unit = device_get_unit(dev);
831	bzero(sc, sizeof(struct nge_softc));
832
833	mtx_init(&sc->nge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
834	    MTX_DEF | MTX_RECURSE);
835#ifndef BURN_BRIDGES
836	/*
837	 * Handle power management nonsense.
838	 */
839	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
840		u_int32_t		iobase, membase, irq;
841
842		/* Save important PCI config data. */
843		iobase = pci_read_config(dev, NGE_PCI_LOIO, 4);
844		membase = pci_read_config(dev, NGE_PCI_LOMEM, 4);
845		irq = pci_read_config(dev, NGE_PCI_INTLINE, 4);
846
847		/* Reset the power state. */
848		printf("nge%d: chip is in D%d power mode "
849		    "-- setting to D0\n", unit,
850		    pci_get_powerstate(dev));
851		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
852
853		/* Restore PCI config data. */
854		pci_write_config(dev, NGE_PCI_LOIO, iobase, 4);
855		pci_write_config(dev, NGE_PCI_LOMEM, membase, 4);
856		pci_write_config(dev, NGE_PCI_INTLINE, irq, 4);
857	}
858#endif
859	/*
860	 * Map control/status registers.
861	 */
862	pci_enable_busmaster(dev);
863
864	rid = NGE_RID;
865	sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE);
866
867	if (sc->nge_res == NULL) {
868		printf("nge%d: couldn't map ports/memory\n", unit);
869		error = ENXIO;
870		goto fail;
871	}
872
873	sc->nge_btag = rman_get_bustag(sc->nge_res);
874	sc->nge_bhandle = rman_get_bushandle(sc->nge_res);
875
876	/* Allocate interrupt */
877	rid = 0;
878	sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
879	    RF_SHAREABLE | RF_ACTIVE);
880
881	if (sc->nge_irq == NULL) {
882		printf("nge%d: couldn't map interrupt\n", unit);
883		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
884		error = ENXIO;
885		goto fail;
886	}
887
888	error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET,
889	    nge_intr, sc, &sc->nge_intrhand);
890
891	if (error) {
892		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
893		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
894		printf("nge%d: couldn't set up irq\n", unit);
895		goto fail;
896	}
897
898	/* Reset the adapter. */
899	nge_reset(sc);
900
901	/*
902	 * Get station address from the EEPROM.
903	 */
904	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
905	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
906	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
907
908	sc->nge_unit = unit;
909	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
910
911	sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF,
912	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
913
914	if (sc->nge_ldata == NULL) {
915		printf("nge%d: no memory for list buffers!\n", unit);
916		bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
917		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
918		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
919		error = ENXIO;
920		goto fail;
921	}
922	bzero(sc->nge_ldata, sizeof(struct nge_list_data));
923
924	/* Try to allocate memory for jumbo buffers. */
925	if (nge_alloc_jumbo_mem(sc)) {
926		printf("nge%d: jumbo buffer allocation failed\n",
927                    sc->nge_unit);
928		contigfree(sc->nge_ldata,
929		    sizeof(struct nge_list_data), M_DEVBUF);
930		bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
931		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
932		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
933		error = ENXIO;
934		goto fail;
935	}
936
937	ifp = &sc->arpcom.ac_if;
938	ifp->if_softc = sc;
939	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
940	ifp->if_mtu = ETHERMTU;
941	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
942	ifp->if_ioctl = nge_ioctl;
943	ifp->if_output = ether_output;
944	ifp->if_start = nge_start;
945	ifp->if_watchdog = nge_watchdog;
946	ifp->if_init = nge_init;
947	ifp->if_baudrate = 1000000000;
948	ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1;
949	ifp->if_hwassist = NGE_CSUM_FEATURES;
950	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING;
951	ifp->if_capenable = ifp->if_capabilities;
952
953	/*
954	 * Do MII setup.
955	 */
956	if (mii_phy_probe(dev, &sc->nge_miibus,
957			  nge_ifmedia_upd, nge_ifmedia_sts)) {
958		if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
959			sc->nge_tbi = 1;
960			device_printf(dev, "Using TBI\n");
961
962			sc->nge_miibus = dev;
963
964			ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd,
965				nge_ifmedia_sts);
966#define	ADD(m, c)	ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL)
967#define PRINT(s)	printf("%s%s", sep, s); sep = ", "
968			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0);
969			device_printf(dev, " ");
970			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0);
971			PRINT("1000baseSX");
972			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0);
973			PRINT("1000baseSX-FDX");
974			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0);
975			PRINT("auto");
976
977			printf("\n");
978#undef ADD
979#undef PRINT
980			ifmedia_set(&sc->nge_ifmedia,
981				IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0));
982
983			CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
984				| NGE_GPIO_GP4_OUT
985				| NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
986				| NGE_GPIO_GP3_OUTENB
987				| NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
988
989		} else {
990			printf("nge%d: MII without any PHY!\n", sc->nge_unit);
991			nge_free_jumbo_mem(sc);
992			bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
993			bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
994			bus_release_resource(dev, NGE_RES, NGE_RID,
995					 sc->nge_res);
996			error = ENXIO;
997			goto fail;
998		}
999	}
1000
1001	/*
1002	 * Call MI attach routine.
1003	 */
1004	ether_ifattach(ifp, eaddr);
1005	callout_handle_init(&sc->nge_stat_ch);
1006
1007fail:
1008
1009	splx(s);
1010	mtx_destroy(&sc->nge_mtx);
1011	return(error);
1012}
1013
1014static int
1015nge_detach(dev)
1016	device_t		dev;
1017{
1018	struct nge_softc	*sc;
1019	struct ifnet		*ifp;
1020	int			s;
1021
1022	s = splimp();
1023
1024	sc = device_get_softc(dev);
1025	ifp = &sc->arpcom.ac_if;
1026
1027	nge_reset(sc);
1028	nge_stop(sc);
1029	ether_ifdetach(ifp);
1030
1031	bus_generic_detach(dev);
1032	if (!sc->nge_tbi) {
1033		device_delete_child(dev, sc->nge_miibus);
1034	}
1035	bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
1036	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
1037	bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
1038
1039	contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF);
1040	nge_free_jumbo_mem(sc);
1041
1042	splx(s);
1043	mtx_destroy(&sc->nge_mtx);
1044
1045	return(0);
1046}
1047
1048/*
1049 * Initialize the transmit descriptors.
1050 */
1051static int
1052nge_list_tx_init(sc)
1053	struct nge_softc	*sc;
1054{
1055	struct nge_list_data	*ld;
1056	struct nge_ring_data	*cd;
1057	int			i;
1058
1059	cd = &sc->nge_cdata;
1060	ld = sc->nge_ldata;
1061
1062	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
1063		if (i == (NGE_TX_LIST_CNT - 1)) {
1064			ld->nge_tx_list[i].nge_nextdesc =
1065			    &ld->nge_tx_list[0];
1066			ld->nge_tx_list[i].nge_next =
1067			    vtophys(&ld->nge_tx_list[0]);
1068		} else {
1069			ld->nge_tx_list[i].nge_nextdesc =
1070			    &ld->nge_tx_list[i + 1];
1071			ld->nge_tx_list[i].nge_next =
1072			    vtophys(&ld->nge_tx_list[i + 1]);
1073		}
1074		ld->nge_tx_list[i].nge_mbuf = NULL;
1075		ld->nge_tx_list[i].nge_ptr = 0;
1076		ld->nge_tx_list[i].nge_ctl = 0;
1077	}
1078
1079	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1080
1081	return(0);
1082}
1083
1084
1085/*
1086 * Initialize the RX descriptors and allocate mbufs for them. Note that
1087 * we arrange the descriptors in a closed ring, so that the last descriptor
1088 * points back to the first.
1089 */
1090static int
1091nge_list_rx_init(sc)
1092	struct nge_softc	*sc;
1093{
1094	struct nge_list_data	*ld;
1095	struct nge_ring_data	*cd;
1096	int			i;
1097
1098	ld = sc->nge_ldata;
1099	cd = &sc->nge_cdata;
1100
1101	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1102		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1103			return(ENOBUFS);
1104		if (i == (NGE_RX_LIST_CNT - 1)) {
1105			ld->nge_rx_list[i].nge_nextdesc =
1106			    &ld->nge_rx_list[0];
1107			ld->nge_rx_list[i].nge_next =
1108			    vtophys(&ld->nge_rx_list[0]);
1109		} else {
1110			ld->nge_rx_list[i].nge_nextdesc =
1111			    &ld->nge_rx_list[i + 1];
1112			ld->nge_rx_list[i].nge_next =
1113			    vtophys(&ld->nge_rx_list[i + 1]);
1114		}
1115	}
1116
1117	cd->nge_rx_prod = 0;
1118
1119	return(0);
1120}
1121
1122/*
1123 * Initialize an RX descriptor and attach an MBUF cluster.
1124 */
1125static int
1126nge_newbuf(sc, c, m)
1127	struct nge_softc	*sc;
1128	struct nge_desc		*c;
1129	struct mbuf		*m;
1130{
1131	struct mbuf		*m_new = NULL;
1132	caddr_t			*buf = NULL;
1133
1134	if (m == NULL) {
1135		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1136		if (m_new == NULL) {
1137			printf("nge%d: no memory for rx list "
1138			    "-- packet dropped!\n", sc->nge_unit);
1139			return(ENOBUFS);
1140		}
1141
1142		/* Allocate the jumbo buffer */
1143		buf = nge_jalloc(sc);
1144		if (buf == NULL) {
1145#ifdef NGE_VERBOSE
1146			printf("nge%d: jumbo allocation failed "
1147			    "-- packet dropped!\n", sc->nge_unit);
1148#endif
1149			m_freem(m_new);
1150			return(ENOBUFS);
1151		}
1152		/* Attach the buffer to the mbuf */
1153		m_new->m_data = (void *)buf;
1154		m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN;
1155		MEXTADD(m_new, buf, NGE_JUMBO_FRAMELEN, nge_jfree,
1156		    (struct nge_softc *)sc, 0, EXT_NET_DRV);
1157	} else {
1158		m_new = m;
1159		m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN;
1160		m_new->m_data = m_new->m_ext.ext_buf;
1161	}
1162
1163	m_adj(m_new, sizeof(u_int64_t));
1164
1165	c->nge_mbuf = m_new;
1166	c->nge_ptr = vtophys(mtod(m_new, caddr_t));
1167	c->nge_ctl = m_new->m_len;
1168	c->nge_extsts = 0;
1169
1170	return(0);
1171}
1172
1173static int
1174nge_alloc_jumbo_mem(sc)
1175	struct nge_softc	*sc;
1176{
1177	caddr_t			ptr;
1178	register int		i;
1179	struct nge_jpool_entry   *entry;
1180
1181	/* Grab a big chunk o' storage. */
1182	sc->nge_cdata.nge_jumbo_buf = contigmalloc(NGE_JMEM, M_DEVBUF,
1183	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1184
1185	if (sc->nge_cdata.nge_jumbo_buf == NULL) {
1186		printf("nge%d: no memory for jumbo buffers!\n", sc->nge_unit);
1187		return(ENOBUFS);
1188	}
1189
1190	SLIST_INIT(&sc->nge_jfree_listhead);
1191	SLIST_INIT(&sc->nge_jinuse_listhead);
1192
1193	/*
1194	 * Now divide it up into 9K pieces and save the addresses
1195	 * in an array.
1196	 */
1197	ptr = sc->nge_cdata.nge_jumbo_buf;
1198	for (i = 0; i < NGE_JSLOTS; i++) {
1199		sc->nge_cdata.nge_jslots[i] = ptr;
1200		ptr += NGE_JLEN;
1201		entry = malloc(sizeof(struct nge_jpool_entry),
1202		    M_DEVBUF, M_NOWAIT);
1203		if (entry == NULL) {
1204			printf("nge%d: no memory for jumbo "
1205			    "buffer queue!\n", sc->nge_unit);
1206			return(ENOBUFS);
1207		}
1208		entry->slot = i;
1209		SLIST_INSERT_HEAD(&sc->nge_jfree_listhead,
1210		    entry, jpool_entries);
1211	}
1212
1213	return(0);
1214}
1215
1216static void
1217nge_free_jumbo_mem(sc)
1218	struct nge_softc	*sc;
1219{
1220	register int		i;
1221	struct nge_jpool_entry   *entry;
1222
1223	for (i = 0; i < NGE_JSLOTS; i++) {
1224		entry = SLIST_FIRST(&sc->nge_jfree_listhead);
1225		SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries);
1226		free(entry, M_DEVBUF);
1227	}
1228
1229	contigfree(sc->nge_cdata.nge_jumbo_buf, NGE_JMEM, M_DEVBUF);
1230
1231	return;
1232}
1233
1234/*
1235 * Allocate a jumbo buffer.
1236 */
1237static void *
1238nge_jalloc(sc)
1239	struct nge_softc	*sc;
1240{
1241	struct nge_jpool_entry   *entry;
1242
1243	entry = SLIST_FIRST(&sc->nge_jfree_listhead);
1244
1245	if (entry == NULL) {
1246#ifdef NGE_VERBOSE
1247		printf("nge%d: no free jumbo buffers\n", sc->nge_unit);
1248#endif
1249		return(NULL);
1250	}
1251
1252	SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries);
1253	SLIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries);
1254	return(sc->nge_cdata.nge_jslots[entry->slot]);
1255}
1256
1257/*
1258 * Release a jumbo buffer.
1259 */
1260static void
1261nge_jfree(buf, args)
1262	void			*buf;
1263	void			*args;
1264{
1265	struct nge_softc	*sc;
1266	int		        i;
1267	struct nge_jpool_entry   *entry;
1268
1269	/* Extract the softc struct pointer. */
1270	sc = args;
1271
1272	if (sc == NULL)
1273		panic("nge_jfree: can't find softc pointer!");
1274
1275	/* calculate the slot this buffer belongs to */
1276	i = ((vm_offset_t)buf
1277	     - (vm_offset_t)sc->nge_cdata.nge_jumbo_buf) / NGE_JLEN;
1278
1279	if ((i < 0) || (i >= NGE_JSLOTS))
1280		panic("nge_jfree: asked to free buffer that we don't manage!");
1281
1282	entry = SLIST_FIRST(&sc->nge_jinuse_listhead);
1283	if (entry == NULL)
1284		panic("nge_jfree: buffer not in use!");
1285	entry->slot = i;
1286	SLIST_REMOVE_HEAD(&sc->nge_jinuse_listhead, jpool_entries);
1287	SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, jpool_entries);
1288
1289	return;
1290}
1291/*
1292 * A frame has been uploaded: pass the resulting mbuf chain up to
1293 * the higher level protocols.
1294 */
1295static void
1296nge_rxeof(sc)
1297	struct nge_softc	*sc;
1298{
1299        struct mbuf		*m;
1300        struct ifnet		*ifp;
1301	struct nge_desc		*cur_rx;
1302	int			i, total_len = 0;
1303	u_int32_t		rxstat;
1304
1305	ifp = &sc->arpcom.ac_if;
1306	i = sc->nge_cdata.nge_rx_prod;
1307
1308	while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1309		struct mbuf		*m0 = NULL;
1310		u_int32_t		extsts;
1311
1312#ifdef DEVICE_POLLING
1313		if (ifp->if_flags & IFF_POLLING) {
1314			if (sc->rxcycles <= 0)
1315				break;
1316			sc->rxcycles--;
1317		}
1318#endif /* DEVICE_POLLING */
1319
1320		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1321		rxstat = cur_rx->nge_rxstat;
1322		extsts = cur_rx->nge_extsts;
1323		m = cur_rx->nge_mbuf;
1324		cur_rx->nge_mbuf = NULL;
1325		total_len = NGE_RXBYTES(cur_rx);
1326		NGE_INC(i, NGE_RX_LIST_CNT);
1327		/*
1328		 * If an error occurs, update stats, clear the
1329		 * status word and leave the mbuf cluster in place:
1330		 * it should simply get re-used next time this descriptor
1331	 	 * comes up in the ring.
1332		 */
1333		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1334			ifp->if_ierrors++;
1335			nge_newbuf(sc, cur_rx, m);
1336			continue;
1337		}
1338
1339		/*
1340		 * Ok. NatSemi really screwed up here. This is the
1341		 * only gigE chip I know of with alignment constraints
1342		 * on receive buffers. RX buffers must be 64-bit aligned.
1343		 */
1344#ifdef __i386__
1345		/*
1346		 * By popular demand, ignore the alignment problems
1347		 * on the Intel x86 platform. The performance hit
1348		 * incurred due to unaligned accesses is much smaller
1349		 * than the hit produced by forcing buffer copies all
1350		 * the time, especially with jumbo frames. We still
1351		 * need to fix up the alignment everywhere else though.
1352		 */
1353		if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
1354#endif
1355			m0 = m_devget(mtod(m, char *), total_len,
1356			    ETHER_ALIGN, ifp, NULL);
1357			nge_newbuf(sc, cur_rx, m);
1358			if (m0 == NULL) {
1359				printf("nge%d: no receive buffers "
1360				    "available -- packet dropped!\n",
1361				    sc->nge_unit);
1362				ifp->if_ierrors++;
1363				continue;
1364			}
1365			m = m0;
1366#ifdef __i386__
1367		} else {
1368			m->m_pkthdr.rcvif = ifp;
1369			m->m_pkthdr.len = m->m_len = total_len;
1370		}
1371#endif
1372
1373		ifp->if_ipackets++;
1374
1375		/* Do IP checksum checking. */
1376		if (extsts & NGE_RXEXTSTS_IPPKT)
1377			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1378		if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1379			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1380		if ((extsts & NGE_RXEXTSTS_TCPPKT &&
1381		    !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
1382		    (extsts & NGE_RXEXTSTS_UDPPKT &&
1383		    !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
1384			m->m_pkthdr.csum_flags |=
1385			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1386			m->m_pkthdr.csum_data = 0xffff;
1387		}
1388
1389		/*
1390		 * If we received a packet with a vlan tag, pass it
1391		 * to vlan_input() instead of ether_input().
1392		 */
1393		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1394			VLAN_INPUT_TAG(ifp, m,
1395			    ntohs(extsts & NGE_RXEXTSTS_VTCI), continue);
1396		}
1397
1398		(*ifp->if_input)(ifp, m);
1399	}
1400
1401	sc->nge_cdata.nge_rx_prod = i;
1402
1403	return;
1404}
1405
1406/*
1407 * A frame was downloaded to the chip. It's safe for us to clean up
1408 * the list buffers.
1409 */
1410
1411static void
1412nge_txeof(sc)
1413	struct nge_softc	*sc;
1414{
1415	struct nge_desc		*cur_tx;
1416	struct ifnet		*ifp;
1417	u_int32_t		idx;
1418
1419	ifp = &sc->arpcom.ac_if;
1420
1421	/*
1422	 * Go through our tx list and free mbufs for those
1423	 * frames that have been transmitted.
1424	 */
1425	idx = sc->nge_cdata.nge_tx_cons;
1426	while (idx != sc->nge_cdata.nge_tx_prod) {
1427		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1428
1429		if (NGE_OWNDESC(cur_tx))
1430			break;
1431
1432		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1433			sc->nge_cdata.nge_tx_cnt--;
1434			NGE_INC(idx, NGE_TX_LIST_CNT);
1435			continue;
1436		}
1437
1438		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1439			ifp->if_oerrors++;
1440			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1441				ifp->if_collisions++;
1442			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1443				ifp->if_collisions++;
1444		}
1445
1446		ifp->if_collisions +=
1447		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1448
1449		ifp->if_opackets++;
1450		if (cur_tx->nge_mbuf != NULL) {
1451			m_freem(cur_tx->nge_mbuf);
1452			cur_tx->nge_mbuf = NULL;
1453			ifp->if_flags &= ~IFF_OACTIVE;
1454		}
1455
1456		sc->nge_cdata.nge_tx_cnt--;
1457		NGE_INC(idx, NGE_TX_LIST_CNT);
1458	}
1459
1460	sc->nge_cdata.nge_tx_cons = idx;
1461
1462	if (idx == sc->nge_cdata.nge_tx_prod)
1463		ifp->if_timer = 0;
1464
1465	return;
1466}
1467
1468static void
1469nge_tick(xsc)
1470	void			*xsc;
1471{
1472	struct nge_softc	*sc;
1473	struct mii_data		*mii;
1474	struct ifnet		*ifp;
1475	int			s;
1476
1477	s = splimp();
1478
1479	sc = xsc;
1480	ifp = &sc->arpcom.ac_if;
1481
1482	if (sc->nge_tbi) {
1483		if (!sc->nge_link) {
1484			if (CSR_READ_4(sc, NGE_TBI_BMSR)
1485			    & NGE_TBIBMSR_ANEG_DONE) {
1486				printf("nge%d: gigabit link up\n",
1487				    sc->nge_unit);
1488				nge_miibus_statchg(sc->nge_miibus);
1489				sc->nge_link++;
1490				if (ifp->if_snd.ifq_head != NULL)
1491					nge_start(ifp);
1492			}
1493		}
1494	} else {
1495		mii = device_get_softc(sc->nge_miibus);
1496		mii_tick(mii);
1497
1498		if (!sc->nge_link) {
1499			if (mii->mii_media_status & IFM_ACTIVE &&
1500			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1501				sc->nge_link++;
1502				if (IFM_SUBTYPE(mii->mii_media_active)
1503				    == IFM_1000_T)
1504					printf("nge%d: gigabit link up\n",
1505					    sc->nge_unit);
1506				if (ifp->if_snd.ifq_head != NULL)
1507					nge_start(ifp);
1508			}
1509		}
1510	}
1511	sc->nge_stat_ch = timeout(nge_tick, sc, hz);
1512
1513	splx(s);
1514
1515	return;
1516}
1517
1518#ifdef DEVICE_POLLING
1519static poll_handler_t nge_poll;
1520
1521static void
1522nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1523{
1524	struct  nge_softc *sc = ifp->if_softc;
1525
1526	if (cmd == POLL_DEREGISTER) {	/* final call, enable interrupts */
1527		CSR_WRITE_4(sc, NGE_IER, 1);
1528		return;
1529	}
1530
1531	/*
1532	 * On the nge, reading the status register also clears it.
1533	 * So before returning to intr mode we must make sure that all
1534	 * possible pending sources of interrupts have been served.
1535	 * In practice this means run to completion the *eof routines,
1536	 * and then call the interrupt routine
1537	 */
1538	sc->rxcycles = count;
1539	nge_rxeof(sc);
1540	nge_txeof(sc);
1541	if (ifp->if_snd.ifq_head != NULL)
1542		nge_start(ifp);
1543
1544	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1545		u_int32_t	status;
1546
1547		/* Reading the ISR register clears all interrupts. */
1548		status = CSR_READ_4(sc, NGE_ISR);
1549
1550		if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW))
1551			nge_rxeof(sc);
1552
1553		if (status & (NGE_ISR_RX_IDLE))
1554			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1555
1556		if (status & NGE_ISR_SYSERR) {
1557			nge_reset(sc);
1558			nge_init(sc);
1559		}
1560	}
1561}
1562#endif /* DEVICE_POLLING */
1563
1564static void
1565nge_intr(arg)
1566	void			*arg;
1567{
1568	struct nge_softc	*sc;
1569	struct ifnet		*ifp;
1570	u_int32_t		status;
1571
1572	sc = arg;
1573	ifp = &sc->arpcom.ac_if;
1574
1575#ifdef DEVICE_POLLING
1576	if (ifp->if_flags & IFF_POLLING)
1577		return;
1578	if (ether_poll_register(nge_poll, ifp)) { /* ok, disable interrupts */
1579		CSR_WRITE_4(sc, NGE_IER, 0);
1580		nge_poll(ifp, 0, 1);
1581		return;
1582	}
1583#endif /* DEVICE_POLLING */
1584
1585	/* Supress unwanted interrupts */
1586	if (!(ifp->if_flags & IFF_UP)) {
1587		nge_stop(sc);
1588		return;
1589	}
1590
1591	/* Disable interrupts. */
1592	CSR_WRITE_4(sc, NGE_IER, 0);
1593
1594	/* Data LED on for TBI mode */
1595	if(sc->nge_tbi)
1596		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1597			     | NGE_GPIO_GP3_OUT);
1598
1599	for (;;) {
1600		/* Reading the ISR register clears all interrupts. */
1601		status = CSR_READ_4(sc, NGE_ISR);
1602
1603		if ((status & NGE_INTRS) == 0)
1604			break;
1605
1606		if ((status & NGE_ISR_TX_DESC_OK) ||
1607		    (status & NGE_ISR_TX_ERR) ||
1608		    (status & NGE_ISR_TX_OK) ||
1609		    (status & NGE_ISR_TX_IDLE))
1610			nge_txeof(sc);
1611
1612		if ((status & NGE_ISR_RX_DESC_OK) ||
1613		    (status & NGE_ISR_RX_ERR) ||
1614		    (status & NGE_ISR_RX_OFLOW) ||
1615		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1616		    (status & NGE_ISR_RX_IDLE) ||
1617		    (status & NGE_ISR_RX_OK))
1618			nge_rxeof(sc);
1619
1620		if ((status & NGE_ISR_RX_IDLE))
1621			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1622
1623		if (status & NGE_ISR_SYSERR) {
1624			nge_reset(sc);
1625			ifp->if_flags &= ~IFF_RUNNING;
1626			nge_init(sc);
1627		}
1628
1629#if 0
1630		/*
1631		 * XXX: nge_tick() is not ready to be called this way
1632		 * it screws up the aneg timeout because mii_tick() is
1633		 * only to be called once per second.
1634		 */
1635		if (status & NGE_IMR_PHY_INTR) {
1636			sc->nge_link = 0;
1637			nge_tick(sc);
1638		}
1639#endif
1640	}
1641
1642	/* Re-enable interrupts. */
1643	CSR_WRITE_4(sc, NGE_IER, 1);
1644
1645	if (ifp->if_snd.ifq_head != NULL)
1646		nge_start(ifp);
1647
1648	/* Data LED off for TBI mode */
1649
1650	if(sc->nge_tbi)
1651		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1652			    & ~NGE_GPIO_GP3_OUT);
1653
1654	return;
1655}
1656
1657/*
1658 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1659 * pointers to the fragment pointers.
1660 */
1661static int
1662nge_encap(sc, m_head, txidx)
1663	struct nge_softc	*sc;
1664	struct mbuf		*m_head;
1665	u_int32_t		*txidx;
1666{
1667	struct nge_desc		*f = NULL;
1668	struct mbuf		*m;
1669	int			frag, cur, cnt = 0;
1670	struct m_tag		*mtag;
1671
1672	/*
1673 	 * Start packing the mbufs in this chain into
1674	 * the fragment pointers. Stop when we run out
1675 	 * of fragments or hit the end of the mbuf chain.
1676	 */
1677	m = m_head;
1678	cur = frag = *txidx;
1679
1680	for (m = m_head; m != NULL; m = m->m_next) {
1681		if (m->m_len != 0) {
1682			if ((NGE_TX_LIST_CNT -
1683			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1684				return(ENOBUFS);
1685			f = &sc->nge_ldata->nge_tx_list[frag];
1686			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1687			f->nge_ptr = vtophys(mtod(m, vm_offset_t));
1688			if (cnt != 0)
1689				f->nge_ctl |= NGE_CMDSTS_OWN;
1690			cur = frag;
1691			NGE_INC(frag, NGE_TX_LIST_CNT);
1692			cnt++;
1693		}
1694	}
1695
1696	if (m != NULL)
1697		return(ENOBUFS);
1698
1699	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1700	if (m_head->m_pkthdr.csum_flags) {
1701		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1702			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1703			    NGE_TXEXTSTS_IPCSUM;
1704		if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1705			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1706			    NGE_TXEXTSTS_TCPCSUM;
1707		if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1708			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1709			    NGE_TXEXTSTS_UDPCSUM;
1710	}
1711
1712	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m);
1713	if (mtag != NULL) {
1714		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1715		    (NGE_TXEXTSTS_VLANPKT|htons(VLAN_TAG_VALUE(mtag)));
1716	}
1717
1718	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1719	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1720	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1721	sc->nge_cdata.nge_tx_cnt += cnt;
1722	*txidx = frag;
1723
1724	return(0);
1725}
1726
1727/*
1728 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1729 * to the mbuf data regions directly in the transmit lists. We also save a
1730 * copy of the pointers since the transmit list fragment pointers are
1731 * physical addresses.
1732 */
1733
1734static void
1735nge_start(ifp)
1736	struct ifnet		*ifp;
1737{
1738	struct nge_softc	*sc;
1739	struct mbuf		*m_head = NULL;
1740	u_int32_t		idx;
1741
1742	sc = ifp->if_softc;
1743
1744	if (!sc->nge_link)
1745		return;
1746
1747	idx = sc->nge_cdata.nge_tx_prod;
1748
1749	if (ifp->if_flags & IFF_OACTIVE)
1750		return;
1751
1752	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1753		IF_DEQUEUE(&ifp->if_snd, m_head);
1754		if (m_head == NULL)
1755			break;
1756
1757		if (nge_encap(sc, m_head, &idx)) {
1758			IF_PREPEND(&ifp->if_snd, m_head);
1759			ifp->if_flags |= IFF_OACTIVE;
1760			break;
1761		}
1762
1763		/*
1764		 * If there's a BPF listener, bounce a copy of this frame
1765		 * to him.
1766		 */
1767		BPF_MTAP(ifp, m_head);
1768
1769	}
1770
1771	/* Transmit */
1772	sc->nge_cdata.nge_tx_prod = idx;
1773	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1774
1775	/*
1776	 * Set a timeout in case the chip goes out to lunch.
1777	 */
1778	ifp->if_timer = 5;
1779
1780	return;
1781}
1782
1783static void
1784nge_init(xsc)
1785	void			*xsc;
1786{
1787	struct nge_softc	*sc = xsc;
1788	struct ifnet		*ifp = &sc->arpcom.ac_if;
1789	struct mii_data		*mii;
1790	int			s;
1791
1792	if (ifp->if_flags & IFF_RUNNING)
1793		return;
1794
1795	s = splimp();
1796
1797	/*
1798	 * Cancel pending I/O and free all RX/TX buffers.
1799	 */
1800	nge_stop(sc);
1801
1802	if (sc->nge_tbi) {
1803		mii = NULL;
1804	} else {
1805		mii = device_get_softc(sc->nge_miibus);
1806	}
1807
1808	/* Set MAC address */
1809	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1810	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1811	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1812	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1813	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1814	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1815	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1816	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1817	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1818
1819	/* Init circular RX list. */
1820	if (nge_list_rx_init(sc) == ENOBUFS) {
1821		printf("nge%d: initialization failed: no "
1822			"memory for rx buffers\n", sc->nge_unit);
1823		nge_stop(sc);
1824		(void)splx(s);
1825		return;
1826	}
1827
1828	/*
1829	 * Init tx descriptors.
1830	 */
1831	nge_list_tx_init(sc);
1832
1833	/*
1834	 * For the NatSemi chip, we have to explicitly enable the
1835	 * reception of ARP frames, as well as turn on the 'perfect
1836	 * match' filter where we store the station address, otherwise
1837	 * we won't receive unicasts meant for this host.
1838	 */
1839	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1840	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1841
1842	 /* If we want promiscuous mode, set the allframes bit. */
1843	if (ifp->if_flags & IFF_PROMISC) {
1844		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1845	} else {
1846		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1847	}
1848
1849	/*
1850	 * Set the capture broadcast bit to capture broadcast frames.
1851	 */
1852	if (ifp->if_flags & IFF_BROADCAST) {
1853		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1854	} else {
1855		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1856	}
1857
1858	/*
1859	 * Load the multicast filter.
1860	 */
1861	nge_setmulti(sc);
1862
1863	/* Turn the receive filter on */
1864	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1865
1866	/*
1867	 * Load the address of the RX and TX lists.
1868	 */
1869	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1870	    vtophys(&sc->nge_ldata->nge_rx_list[0]));
1871	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1872	    vtophys(&sc->nge_ldata->nge_tx_list[0]));
1873
1874	/* Set RX configuration */
1875	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1876	/*
1877	 * Enable hardware checksum validation for all IPv4
1878	 * packets, do not reject packets with bad checksums.
1879	 */
1880	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1881
1882	/*
1883	 * Tell the chip to detect and strip VLAN tag info from
1884	 * received frames. The tag will be provided in the extsts
1885	 * field in the RX descriptors.
1886	 */
1887	NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1888	    NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB);
1889
1890	/* Set TX configuration */
1891	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1892
1893	/*
1894	 * Enable TX IPv4 checksumming on a per-packet basis.
1895	 */
1896	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
1897
1898	/*
1899	 * Tell the chip to insert VLAN tags on a per-packet basis as
1900	 * dictated by the code in the frame encapsulation routine.
1901	 */
1902	NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1903
1904	/* Set full/half duplex mode. */
1905	if (sc->nge_tbi) {
1906		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1907		    == IFM_FDX) {
1908			NGE_SETBIT(sc, NGE_TX_CFG,
1909			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1910			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1911		} else {
1912			NGE_CLRBIT(sc, NGE_TX_CFG,
1913			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1914			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1915		}
1916	} else {
1917		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1918			NGE_SETBIT(sc, NGE_TX_CFG,
1919			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1920			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1921		} else {
1922			NGE_CLRBIT(sc, NGE_TX_CFG,
1923			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1924			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1925		}
1926	}
1927
1928	nge_tick(sc);
1929
1930	/*
1931	 * Enable the delivery of PHY interrupts based on
1932	 * link/speed/duplex status changes. Also enable the
1933	 * extsts field in the DMA descriptors (needed for
1934	 * TCP/IP checksum offload on transmit).
1935	 */
1936	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|
1937	    NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1938
1939	/*
1940	 * Configure interrupt holdoff (moderation). We can
1941	 * have the chip delay interrupt delivery for a certain
1942	 * period. Units are in 100us, and the max setting
1943	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1944	 */
1945	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1946
1947	/*
1948	 * Enable interrupts.
1949	 */
1950	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1951#ifdef DEVICE_POLLING
1952	/*
1953	 * ... only enable interrupts if we are not polling, make sure
1954	 * they are off otherwise.
1955	 */
1956	if (ifp->if_flags & IFF_POLLING)
1957		CSR_WRITE_4(sc, NGE_IER, 0);
1958	else
1959#endif /* DEVICE_POLLING */
1960	CSR_WRITE_4(sc, NGE_IER, 1);
1961
1962	/* Enable receiver and transmitter. */
1963	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1964	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1965
1966	nge_ifmedia_upd(ifp);
1967
1968	ifp->if_flags |= IFF_RUNNING;
1969	ifp->if_flags &= ~IFF_OACTIVE;
1970
1971	(void)splx(s);
1972
1973	return;
1974}
1975
1976/*
1977 * Set media options.
1978 */
1979static int
1980nge_ifmedia_upd(ifp)
1981	struct ifnet		*ifp;
1982{
1983	struct nge_softc	*sc;
1984	struct mii_data		*mii;
1985
1986	sc = ifp->if_softc;
1987
1988	if (sc->nge_tbi) {
1989		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1990		     == IFM_AUTO) {
1991			CSR_WRITE_4(sc, NGE_TBI_ANAR,
1992				CSR_READ_4(sc, NGE_TBI_ANAR)
1993					| NGE_TBIANAR_HDX | NGE_TBIANAR_FDX
1994					| NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2);
1995			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG
1996				| NGE_TBIBMCR_RESTART_ANEG);
1997			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG);
1998		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media
1999			    & IFM_GMASK) == IFM_FDX) {
2000			NGE_SETBIT(sc, NGE_TX_CFG,
2001			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
2002			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
2003
2004			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
2005			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
2006		} else {
2007			NGE_CLRBIT(sc, NGE_TX_CFG,
2008			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
2009			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
2010
2011			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
2012			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
2013		}
2014
2015		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
2016			    & ~NGE_GPIO_GP3_OUT);
2017	} else {
2018		mii = device_get_softc(sc->nge_miibus);
2019		sc->nge_link = 0;
2020		if (mii->mii_instance) {
2021			struct mii_softc	*miisc;
2022			for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2023			    miisc = LIST_NEXT(miisc, mii_list))
2024				mii_phy_reset(miisc);
2025		}
2026		mii_mediachg(mii);
2027	}
2028
2029	return(0);
2030}
2031
2032/*
2033 * Report current media status.
2034 */
2035static void
2036nge_ifmedia_sts(ifp, ifmr)
2037	struct ifnet		*ifp;
2038	struct ifmediareq	*ifmr;
2039{
2040	struct nge_softc	*sc;
2041	struct mii_data		*mii;
2042
2043	sc = ifp->if_softc;
2044
2045	if (sc->nge_tbi) {
2046		ifmr->ifm_status = IFM_AVALID;
2047		ifmr->ifm_active = IFM_ETHER;
2048
2049		if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
2050			ifmr->ifm_status |= IFM_ACTIVE;
2051		}
2052		if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK)
2053			ifmr->ifm_active |= IFM_LOOP;
2054		if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
2055			ifmr->ifm_active |= IFM_NONE;
2056			ifmr->ifm_status = 0;
2057			return;
2058		}
2059		ifmr->ifm_active |= IFM_1000_SX;
2060		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
2061		    == IFM_AUTO) {
2062			ifmr->ifm_active |= IFM_AUTO;
2063			if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
2064			    & NGE_TBIANAR_FDX) {
2065				ifmr->ifm_active |= IFM_FDX;
2066			}else if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
2067				  & NGE_TBIANAR_HDX) {
2068				ifmr->ifm_active |= IFM_HDX;
2069			}
2070		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
2071			== IFM_FDX)
2072			ifmr->ifm_active |= IFM_FDX;
2073		else
2074			ifmr->ifm_active |= IFM_HDX;
2075
2076	} else {
2077		mii = device_get_softc(sc->nge_miibus);
2078		mii_pollstat(mii);
2079		ifmr->ifm_active = mii->mii_media_active;
2080		ifmr->ifm_status = mii->mii_media_status;
2081	}
2082
2083	return;
2084}
2085
2086static int
2087nge_ioctl(ifp, command, data)
2088	struct ifnet		*ifp;
2089	u_long			command;
2090	caddr_t			data;
2091{
2092	struct nge_softc	*sc = ifp->if_softc;
2093	struct ifreq		*ifr = (struct ifreq *) data;
2094	struct mii_data		*mii;
2095	int			s, error = 0;
2096
2097	s = splimp();
2098
2099	switch(command) {
2100	case SIOCSIFMTU:
2101		if (ifr->ifr_mtu > NGE_JUMBO_MTU)
2102			error = EINVAL;
2103		else {
2104			ifp->if_mtu = ifr->ifr_mtu;
2105			/*
2106			 * Workaround: if the MTU is larger than
2107			 * 8152 (TX FIFO size minus 64 minus 18), turn off
2108			 * TX checksum offloading.
2109			 */
2110			if (ifr->ifr_mtu >= 8152)
2111				ifp->if_hwassist = 0;
2112			else
2113				ifp->if_hwassist = NGE_CSUM_FEATURES;
2114		}
2115		break;
2116	case SIOCSIFFLAGS:
2117		if (ifp->if_flags & IFF_UP) {
2118			if (ifp->if_flags & IFF_RUNNING &&
2119			    ifp->if_flags & IFF_PROMISC &&
2120			    !(sc->nge_if_flags & IFF_PROMISC)) {
2121				NGE_SETBIT(sc, NGE_RXFILT_CTL,
2122				    NGE_RXFILTCTL_ALLPHYS|
2123				    NGE_RXFILTCTL_ALLMULTI);
2124			} else if (ifp->if_flags & IFF_RUNNING &&
2125			    !(ifp->if_flags & IFF_PROMISC) &&
2126			    sc->nge_if_flags & IFF_PROMISC) {
2127				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2128				    NGE_RXFILTCTL_ALLPHYS);
2129				if (!(ifp->if_flags & IFF_ALLMULTI))
2130					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2131					    NGE_RXFILTCTL_ALLMULTI);
2132			} else {
2133				ifp->if_flags &= ~IFF_RUNNING;
2134				nge_init(sc);
2135			}
2136		} else {
2137			if (ifp->if_flags & IFF_RUNNING)
2138				nge_stop(sc);
2139		}
2140		sc->nge_if_flags = ifp->if_flags;
2141		error = 0;
2142		break;
2143	case SIOCADDMULTI:
2144	case SIOCDELMULTI:
2145		nge_setmulti(sc);
2146		error = 0;
2147		break;
2148	case SIOCGIFMEDIA:
2149	case SIOCSIFMEDIA:
2150		if (sc->nge_tbi) {
2151			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2152					      command);
2153		} else {
2154			mii = device_get_softc(sc->nge_miibus);
2155			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2156					      command);
2157		}
2158		break;
2159	default:
2160		error = ether_ioctl(ifp, command, data);
2161		break;
2162	}
2163
2164	(void)splx(s);
2165
2166	return(error);
2167}
2168
2169static void
2170nge_watchdog(ifp)
2171	struct ifnet		*ifp;
2172{
2173	struct nge_softc	*sc;
2174
2175	sc = ifp->if_softc;
2176
2177	ifp->if_oerrors++;
2178	printf("nge%d: watchdog timeout\n", sc->nge_unit);
2179
2180	nge_stop(sc);
2181	nge_reset(sc);
2182	ifp->if_flags &= ~IFF_RUNNING;
2183	nge_init(sc);
2184
2185	if (ifp->if_snd.ifq_head != NULL)
2186		nge_start(ifp);
2187
2188	return;
2189}
2190
2191/*
2192 * Stop the adapter and free any mbufs allocated to the
2193 * RX and TX lists.
2194 */
2195static void
2196nge_stop(sc)
2197	struct nge_softc	*sc;
2198{
2199	register int		i;
2200	struct ifnet		*ifp;
2201	struct mii_data		*mii;
2202
2203	ifp = &sc->arpcom.ac_if;
2204	ifp->if_timer = 0;
2205	if (sc->nge_tbi) {
2206		mii = NULL;
2207	} else {
2208		mii = device_get_softc(sc->nge_miibus);
2209	}
2210
2211	untimeout(nge_tick, sc, sc->nge_stat_ch);
2212#ifdef DEVICE_POLLING
2213	ether_poll_deregister(ifp);
2214#endif
2215	CSR_WRITE_4(sc, NGE_IER, 0);
2216	CSR_WRITE_4(sc, NGE_IMR, 0);
2217	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2218	DELAY(1000);
2219	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2220	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2221
2222	if (!sc->nge_tbi)
2223		mii_down(mii);
2224
2225	sc->nge_link = 0;
2226
2227	/*
2228	 * Free data in the RX lists.
2229	 */
2230	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2231		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2232			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2233			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2234		}
2235	}
2236	bzero((char *)&sc->nge_ldata->nge_rx_list,
2237		sizeof(sc->nge_ldata->nge_rx_list));
2238
2239	/*
2240	 * Free the TX list buffers.
2241	 */
2242	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2243		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2244			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2245			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2246		}
2247	}
2248
2249	bzero((char *)&sc->nge_ldata->nge_tx_list,
2250		sizeof(sc->nge_ldata->nge_tx_list));
2251
2252	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2253
2254	return;
2255}
2256
2257/*
2258 * Stop all chip I/O so that the kernel's probe routines don't
2259 * get confused by errant DMAs when rebooting.
2260 */
2261static void
2262nge_shutdown(dev)
2263	device_t		dev;
2264{
2265	struct nge_softc	*sc;
2266
2267	sc = device_get_softc(dev);
2268
2269	nge_reset(sc);
2270	nge_stop(sc);
2271
2272	return;
2273}
2274