if_nge.c revision 192506
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001
4 *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 192506 2009-05-21 02:12:10Z yongari $");
36
37/*
38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39 * for FreeBSD. Datasheets are available from:
40 *
41 * http://www.national.com/ds/DP/DP83820.pdf
42 * http://www.national.com/ds/DP/DP83821.pdf
43 *
44 * These chips are used on several low cost gigabit ethernet NICs
45 * sold by D-Link, Addtron, SMC and Asante. Both parts are
46 * virtually the same, except the 83820 is a 64-bit/32-bit part,
47 * while the 83821 is 32-bit only.
48 *
49 * Many cards also use National gigE transceivers, such as the
50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51 * contains a full register description that applies to all of these
52 * components:
53 *
54 * http://www.national.com/ds/DP/DP83861.pdf
55 *
56 * Written by Bill Paul <wpaul@bsdi.com>
57 * BSDi Open Source Solutions
58 */
59
60/*
61 * The NatSemi DP83820 and 83821 controllers are enhanced versions
62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67 * matching buffers, one perfect address filter buffer and interrupt
68 * moderation. The 83820 supports both 64-bit and 32-bit addressing
69 * and data transfers: the 64-bit support can be toggled on or off
70 * via software. This affects the size of certain fields in the DMA
71 * descriptors.
72 *
73 * There are two bugs/misfeatures in the 83820/83821 that I have
74 * discovered so far:
75 *
76 * - Receive buffers must be aligned on 64-bit boundaries, which means
77 *   you must resort to copying data in order to fix up the payload
78 *   alignment.
79 *
80 * - In order to transmit jumbo frames larger than 8170 bytes, you have
81 *   to turn off transmit checksum offloading, because the chip can't
82 *   compute the checksum on an outgoing frame unless it fits entirely
83 *   within the TX FIFO, which is only 8192 bytes in size. If you have
84 *   TX checksum offload enabled and you transmit attempt to transmit a
85 *   frame larger than 8170 bytes, the transmitter will wedge.
86 *
87 * To work around the latter problem, TX checksum offload is disabled
88 * if the user selects an MTU larger than 8152 (8170 - 18).
89 */
90
91#ifdef HAVE_KERNEL_OPTION_HEADERS
92#include "opt_device_polling.h"
93#endif
94
95#include <sys/param.h>
96#include <sys/systm.h>
97#include <sys/bus.h>
98#include <sys/endian.h>
99#include <sys/kernel.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mbuf.h>
103#include <sys/module.h>
104#include <sys/mutex.h>
105#include <sys/rman.h>
106#include <sys/socket.h>
107#include <sys/sockio.h>
108#include <sys/sysctl.h>
109
110#include <net/bpf.h>
111#include <net/if.h>
112#include <net/if_arp.h>
113#include <net/ethernet.h>
114#include <net/if_dl.h>
115#include <net/if_media.h>
116#include <net/if_types.h>
117#include <net/if_vlan_var.h>
118
119#include <dev/mii/mii.h>
120#include <dev/mii/miivar.h>
121
122#include <dev/pci/pcireg.h>
123#include <dev/pci/pcivar.h>
124
125#include <machine/bus.h>
126
127#include <dev/nge/if_ngereg.h>
128
129/* "device miibus" required.  See GENERIC if you get errors here. */
130#include "miibus_if.h"
131
132MODULE_DEPEND(nge, pci, 1, 1, 1);
133MODULE_DEPEND(nge, ether, 1, 1, 1);
134MODULE_DEPEND(nge, miibus, 1, 1, 1);
135
136#define NGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
137
138/*
139 * Various supported device vendors/types and their names.
140 */
141static struct nge_type nge_devs[] = {
142	{ NGE_VENDORID, NGE_DEVICEID,
143	    "National Semiconductor Gigabit Ethernet" },
144	{ 0, 0, NULL }
145};
146
147static int nge_probe(device_t);
148static int nge_attach(device_t);
149static int nge_detach(device_t);
150static int nge_shutdown(device_t);
151static int nge_suspend(device_t);
152static int nge_resume(device_t);
153
154static __inline void nge_discard_rxbuf(struct nge_softc *, int);
155static int nge_newbuf(struct nge_softc *, int);
156static int nge_encap(struct nge_softc *, struct mbuf **);
157#ifndef __NO_STRICT_ALIGNMENT
158static __inline void nge_fixup_rx(struct mbuf *);
159#endif
160static void nge_rxeof(struct nge_softc *);
161static void nge_txeof(struct nge_softc *);
162static void nge_intr(void *);
163static void nge_tick(void *);
164static void nge_stats_update(struct nge_softc *);
165static void nge_start(struct ifnet *);
166static void nge_start_locked(struct ifnet *);
167static int nge_ioctl(struct ifnet *, u_long, caddr_t);
168static void nge_init(void *);
169static void nge_init_locked(struct nge_softc *);
170static int nge_stop_mac(struct nge_softc *);
171static void nge_stop(struct nge_softc *);
172static void nge_wol(struct nge_softc *);
173static void nge_watchdog(struct nge_softc *);
174static int nge_mediachange(struct ifnet *);
175static void nge_mediastatus(struct ifnet *, struct ifmediareq *);
176
177static void nge_delay(struct nge_softc *);
178static void nge_eeprom_idle(struct nge_softc *);
179static void nge_eeprom_putbyte(struct nge_softc *, int);
180static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *);
181static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int);
182
183static void nge_mii_sync(struct nge_softc *);
184static void nge_mii_send(struct nge_softc *, uint32_t, int);
185static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
186static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
187
188static int nge_miibus_readreg(device_t, int, int);
189static int nge_miibus_writereg(device_t, int, int, int);
190static void nge_miibus_statchg(device_t);
191
192static void nge_rxfilter(struct nge_softc *);
193static void nge_reset(struct nge_softc *);
194static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
195static int nge_dma_alloc(struct nge_softc *);
196static void nge_dma_free(struct nge_softc *);
197static int nge_list_rx_init(struct nge_softc *);
198static int nge_list_tx_init(struct nge_softc *);
199static void nge_sysctl_node(struct nge_softc *);
200static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
201static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS);
202
203static device_method_t nge_methods[] = {
204	/* Device interface */
205	DEVMETHOD(device_probe,		nge_probe),
206	DEVMETHOD(device_attach,	nge_attach),
207	DEVMETHOD(device_detach,	nge_detach),
208	DEVMETHOD(device_shutdown,	nge_shutdown),
209	DEVMETHOD(device_suspend,	nge_suspend),
210	DEVMETHOD(device_resume,	nge_resume),
211
212	/* bus interface */
213	DEVMETHOD(bus_print_child,	bus_generic_print_child),
214	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
215
216	/* MII interface */
217	DEVMETHOD(miibus_readreg,	nge_miibus_readreg),
218	DEVMETHOD(miibus_writereg,	nge_miibus_writereg),
219	DEVMETHOD(miibus_statchg,	nge_miibus_statchg),
220
221	{ NULL, NULL }
222};
223
224static driver_t nge_driver = {
225	"nge",
226	nge_methods,
227	sizeof(struct nge_softc)
228};
229
230static devclass_t nge_devclass;
231
232DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0);
233DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0);
234
235#define NGE_SETBIT(sc, reg, x)				\
236	CSR_WRITE_4(sc, reg,				\
237		CSR_READ_4(sc, reg) | (x))
238
239#define NGE_CLRBIT(sc, reg, x)				\
240	CSR_WRITE_4(sc, reg,				\
241		CSR_READ_4(sc, reg) & ~(x))
242
243#define SIO_SET(x)					\
244	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
245
246#define SIO_CLR(x)					\
247	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
248
249static void
250nge_delay(struct nge_softc *sc)
251{
252	int idx;
253
254	for (idx = (300 / 33) + 1; idx > 0; idx--)
255		CSR_READ_4(sc, NGE_CSR);
256}
257
258static void
259nge_eeprom_idle(struct nge_softc *sc)
260{
261	int i;
262
263	SIO_SET(NGE_MEAR_EE_CSEL);
264	nge_delay(sc);
265	SIO_SET(NGE_MEAR_EE_CLK);
266	nge_delay(sc);
267
268	for (i = 0; i < 25; i++) {
269		SIO_CLR(NGE_MEAR_EE_CLK);
270		nge_delay(sc);
271		SIO_SET(NGE_MEAR_EE_CLK);
272		nge_delay(sc);
273	}
274
275	SIO_CLR(NGE_MEAR_EE_CLK);
276	nge_delay(sc);
277	SIO_CLR(NGE_MEAR_EE_CSEL);
278	nge_delay(sc);
279	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
280}
281
282/*
283 * Send a read command and address to the EEPROM, check for ACK.
284 */
285static void
286nge_eeprom_putbyte(struct nge_softc *sc, int addr)
287{
288	int d, i;
289
290	d = addr | NGE_EECMD_READ;
291
292	/*
293	 * Feed in each bit and stobe the clock.
294	 */
295	for (i = 0x400; i; i >>= 1) {
296		if (d & i) {
297			SIO_SET(NGE_MEAR_EE_DIN);
298		} else {
299			SIO_CLR(NGE_MEAR_EE_DIN);
300		}
301		nge_delay(sc);
302		SIO_SET(NGE_MEAR_EE_CLK);
303		nge_delay(sc);
304		SIO_CLR(NGE_MEAR_EE_CLK);
305		nge_delay(sc);
306	}
307}
308
309/*
310 * Read a word of data stored in the EEPROM at address 'addr.'
311 */
312static void
313nge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest)
314{
315	int i;
316	uint16_t word = 0;
317
318	/* Force EEPROM to idle state. */
319	nge_eeprom_idle(sc);
320
321	/* Enter EEPROM access mode. */
322	nge_delay(sc);
323	SIO_CLR(NGE_MEAR_EE_CLK);
324	nge_delay(sc);
325	SIO_SET(NGE_MEAR_EE_CSEL);
326	nge_delay(sc);
327
328	/*
329	 * Send address of word we want to read.
330	 */
331	nge_eeprom_putbyte(sc, addr);
332
333	/*
334	 * Start reading bits from EEPROM.
335	 */
336	for (i = 0x8000; i; i >>= 1) {
337		SIO_SET(NGE_MEAR_EE_CLK);
338		nge_delay(sc);
339		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
340			word |= i;
341		nge_delay(sc);
342		SIO_CLR(NGE_MEAR_EE_CLK);
343		nge_delay(sc);
344	}
345
346	/* Turn off EEPROM access mode. */
347	nge_eeprom_idle(sc);
348
349	*dest = word;
350}
351
352/*
353 * Read a sequence of words from the EEPROM.
354 */
355static void
356nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt)
357{
358	int i;
359	uint16_t word = 0, *ptr;
360
361	for (i = 0; i < cnt; i++) {
362		nge_eeprom_getword(sc, off + i, &word);
363		ptr = (uint16_t *)(dest + (i * 2));
364		*ptr = word;
365	}
366}
367
368/*
369 * Sync the PHYs by setting data bit and strobing the clock 32 times.
370 */
371static void
372nge_mii_sync(struct nge_softc *sc)
373{
374	int i;
375
376	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
377
378	for (i = 0; i < 32; i++) {
379		SIO_SET(NGE_MEAR_MII_CLK);
380		DELAY(1);
381		SIO_CLR(NGE_MEAR_MII_CLK);
382		DELAY(1);
383	}
384}
385
386/*
387 * Clock a series of bits through the MII.
388 */
389static void
390nge_mii_send(struct nge_softc *sc, uint32_t bits, int cnt)
391{
392	int i;
393
394	SIO_CLR(NGE_MEAR_MII_CLK);
395
396	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
397		if (bits & i) {
398			SIO_SET(NGE_MEAR_MII_DATA);
399		} else {
400			SIO_CLR(NGE_MEAR_MII_DATA);
401		}
402		DELAY(1);
403		SIO_CLR(NGE_MEAR_MII_CLK);
404		DELAY(1);
405		SIO_SET(NGE_MEAR_MII_CLK);
406	}
407}
408
409/*
410 * Read an PHY register through the MII.
411 */
412static int
413nge_mii_readreg(struct nge_softc *sc, struct nge_mii_frame *frame)
414{
415	int i, ack;
416
417	/*
418	 * Set up frame for RX.
419	 */
420	frame->mii_stdelim = NGE_MII_STARTDELIM;
421	frame->mii_opcode = NGE_MII_READOP;
422	frame->mii_turnaround = 0;
423	frame->mii_data = 0;
424
425	CSR_WRITE_4(sc, NGE_MEAR, 0);
426
427	/*
428 	 * Turn on data xmit.
429	 */
430	SIO_SET(NGE_MEAR_MII_DIR);
431
432	nge_mii_sync(sc);
433
434	/*
435	 * Send command/address info.
436	 */
437	nge_mii_send(sc, frame->mii_stdelim, 2);
438	nge_mii_send(sc, frame->mii_opcode, 2);
439	nge_mii_send(sc, frame->mii_phyaddr, 5);
440	nge_mii_send(sc, frame->mii_regaddr, 5);
441
442	/* Idle bit */
443	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
444	DELAY(1);
445	SIO_SET(NGE_MEAR_MII_CLK);
446	DELAY(1);
447
448	/* Turn off xmit. */
449	SIO_CLR(NGE_MEAR_MII_DIR);
450	/* Check for ack */
451	SIO_CLR(NGE_MEAR_MII_CLK);
452	DELAY(1);
453	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
454	SIO_SET(NGE_MEAR_MII_CLK);
455	DELAY(1);
456
457	/*
458	 * Now try reading data bits. If the ack failed, we still
459	 * need to clock through 16 cycles to keep the PHY(s) in sync.
460	 */
461	if (ack) {
462		for (i = 0; i < 16; i++) {
463			SIO_CLR(NGE_MEAR_MII_CLK);
464			DELAY(1);
465			SIO_SET(NGE_MEAR_MII_CLK);
466			DELAY(1);
467		}
468		goto fail;
469	}
470
471	for (i = 0x8000; i; i >>= 1) {
472		SIO_CLR(NGE_MEAR_MII_CLK);
473		DELAY(1);
474		if (!ack) {
475			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
476				frame->mii_data |= i;
477			DELAY(1);
478		}
479		SIO_SET(NGE_MEAR_MII_CLK);
480		DELAY(1);
481	}
482
483fail:
484
485	SIO_CLR(NGE_MEAR_MII_CLK);
486	DELAY(1);
487	SIO_SET(NGE_MEAR_MII_CLK);
488	DELAY(1);
489
490	if (ack)
491		return (1);
492	return (0);
493}
494
495/*
496 * Write to a PHY register through the MII.
497 */
498static int
499nge_mii_writereg(struct nge_softc *sc, struct nge_mii_frame *frame)
500{
501
502	/*
503	 * Set up frame for TX.
504	 */
505
506	frame->mii_stdelim = NGE_MII_STARTDELIM;
507	frame->mii_opcode = NGE_MII_WRITEOP;
508	frame->mii_turnaround = NGE_MII_TURNAROUND;
509
510	/*
511 	 * Turn on data output.
512	 */
513	SIO_SET(NGE_MEAR_MII_DIR);
514
515	nge_mii_sync(sc);
516
517	nge_mii_send(sc, frame->mii_stdelim, 2);
518	nge_mii_send(sc, frame->mii_opcode, 2);
519	nge_mii_send(sc, frame->mii_phyaddr, 5);
520	nge_mii_send(sc, frame->mii_regaddr, 5);
521	nge_mii_send(sc, frame->mii_turnaround, 2);
522	nge_mii_send(sc, frame->mii_data, 16);
523
524	/* Idle bit. */
525	SIO_SET(NGE_MEAR_MII_CLK);
526	DELAY(1);
527	SIO_CLR(NGE_MEAR_MII_CLK);
528	DELAY(1);
529
530	/*
531	 * Turn off xmit.
532	 */
533	SIO_CLR(NGE_MEAR_MII_DIR);
534
535	return (0);
536}
537
538static int
539nge_miibus_readreg(device_t dev, int phy, int reg)
540{
541	struct nge_softc *sc;
542	struct nge_mii_frame frame;
543	int rv;
544
545	sc = device_get_softc(dev);
546	if ((sc->nge_flags & NGE_FLAG_TBI) != 0) {
547		/* Pretend PHY is at address 0. */
548		if (phy != 0)
549			return (0);
550		switch (reg) {
551		case MII_BMCR:
552			reg = NGE_TBI_BMCR;
553			break;
554		case MII_BMSR:
555			/* 83820/83821 has different bit layout for BMSR. */
556			rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT;
557			reg = CSR_READ_4(sc, NGE_TBI_BMSR);
558			if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0)
559				rv |= BMSR_ACOMP;
560			if ((reg & NGE_TBIBMSR_LINKSTAT) != 0)
561				rv |= BMSR_LINK;
562			return (rv);
563		case MII_ANAR:
564			reg = NGE_TBI_ANAR;
565			break;
566		case MII_ANLPAR:
567			reg = NGE_TBI_ANLPAR;
568			break;
569		case MII_ANER:
570			reg = NGE_TBI_ANER;
571			break;
572		case MII_EXTSR:
573			reg = NGE_TBI_ESR;
574			break;
575		case MII_PHYIDR1:
576		case MII_PHYIDR2:
577			return (0);
578		default:
579			device_printf(sc->nge_dev,
580			    "bad phy register read : %d\n", reg);
581			return (0);
582		}
583		return (CSR_READ_4(sc, reg));
584	}
585
586	bzero((char *)&frame, sizeof(frame));
587
588	frame.mii_phyaddr = phy;
589	frame.mii_regaddr = reg;
590	nge_mii_readreg(sc, &frame);
591
592	return (frame.mii_data);
593}
594
595static int
596nge_miibus_writereg(device_t dev, int phy, int reg, int data)
597{
598	struct nge_softc *sc;
599	struct nge_mii_frame frame;
600
601	sc = device_get_softc(dev);
602	if ((sc->nge_flags & NGE_FLAG_TBI) != 0) {
603		/* Pretend PHY is at address 0. */
604		if (phy != 0)
605			return (0);
606		switch (reg) {
607		case MII_BMCR:
608			reg = NGE_TBI_BMCR;
609			break;
610		case MII_BMSR:
611			return (0);
612		case MII_ANAR:
613			reg = NGE_TBI_ANAR;
614			break;
615		case MII_ANLPAR:
616			reg = NGE_TBI_ANLPAR;
617			break;
618		case MII_ANER:
619			reg = NGE_TBI_ANER;
620			break;
621		case MII_EXTSR:
622			reg = NGE_TBI_ESR;
623			break;
624		case MII_PHYIDR1:
625		case MII_PHYIDR2:
626			return (0);
627		default:
628			device_printf(sc->nge_dev,
629			    "bad phy register write : %d\n", reg);
630			return (0);
631		}
632		CSR_WRITE_4(sc, reg, data);
633		return (0);
634	}
635
636	bzero((char *)&frame, sizeof(frame));
637
638	frame.mii_phyaddr = phy;
639	frame.mii_regaddr = reg;
640	frame.mii_data = data;
641	nge_mii_writereg(sc, &frame);
642
643	return (0);
644}
645
646/*
647 * media status/link state change handler.
648 */
649static void
650nge_miibus_statchg(device_t dev)
651{
652	struct nge_softc *sc;
653	struct mii_data *mii;
654	struct ifnet *ifp;
655	struct nge_txdesc *txd;
656	uint32_t done, reg, status;
657	int i;
658
659	sc = device_get_softc(dev);
660	NGE_LOCK_ASSERT(sc);
661
662	mii = device_get_softc(sc->nge_miibus);
663	ifp = sc->nge_ifp;
664	if (mii == NULL || ifp == NULL ||
665	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
666		return;
667
668	sc->nge_flags &= ~NGE_FLAG_LINK;
669	if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
670	    (IFM_AVALID | IFM_ACTIVE)) {
671		switch (IFM_SUBTYPE(mii->mii_media_active)) {
672		case IFM_10_T:
673		case IFM_100_TX:
674		case IFM_1000_T:
675		case IFM_1000_SX:
676		case IFM_1000_LX:
677		case IFM_1000_CX:
678			sc->nge_flags |= NGE_FLAG_LINK;
679			break;
680		default:
681			break;
682		}
683	}
684
685	/* Stop Tx/Rx MACs. */
686	if (nge_stop_mac(sc) == ETIMEDOUT)
687		device_printf(sc->nge_dev,
688		    "%s: unable to stop Tx/Rx MAC\n", __func__);
689	nge_txeof(sc);
690	nge_rxeof(sc);
691	if (sc->nge_head != NULL) {
692		m_freem(sc->nge_head);
693		sc->nge_head = sc->nge_tail = NULL;
694	}
695
696	/* Release queued frames. */
697	for (i = 0; i < NGE_TX_RING_CNT; i++) {
698		txd = &sc->nge_cdata.nge_txdesc[i];
699		if (txd->tx_m != NULL) {
700			bus_dmamap_sync(sc->nge_cdata.nge_tx_tag,
701			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
702			bus_dmamap_unload(sc->nge_cdata.nge_tx_tag,
703			    txd->tx_dmamap);
704			m_freem(txd->tx_m);
705			txd->tx_m = NULL;
706		}
707	}
708
709	/* Program MAC with resolved speed/duplex. */
710	if ((sc->nge_flags & NGE_FLAG_LINK) != 0) {
711		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
712			NGE_SETBIT(sc, NGE_TX_CFG,
713			    (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR));
714			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
715#ifdef notyet
716			/* Enable flow-control. */
717			if ((IFM_OPTIONS(mii->mii_media_active) &
718			    (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0)
719				NGE_SETBIT(sc, NGE_PAUSECSR,
720				    NGE_PAUSECSR_PAUSE_ENB);
721#endif
722		} else {
723			NGE_CLRBIT(sc, NGE_TX_CFG,
724			    (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR));
725			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
726			NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB);
727		}
728		/* If we have a 1000Mbps link, set the mode_1000 bit. */
729		reg = CSR_READ_4(sc, NGE_CFG);
730		switch (IFM_SUBTYPE(mii->mii_media_active)) {
731		case IFM_1000_SX:
732		case IFM_1000_LX:
733		case IFM_1000_CX:
734		case IFM_1000_T:
735			reg |= NGE_CFG_MODE_1000;
736			break;
737		default:
738			reg &= ~NGE_CFG_MODE_1000;
739			break;
740		}
741		CSR_WRITE_4(sc, NGE_CFG, reg);
742
743		/* Reset Tx/Rx MAC. */
744		reg = CSR_READ_4(sc, NGE_CSR);
745		reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET;
746		CSR_WRITE_4(sc, NGE_CSR, reg);
747		/* Check the completion of reset. */
748		done = 0;
749		for (i = 0; i < NGE_TIMEOUT; i++) {
750			DELAY(1);
751			status = CSR_READ_4(sc, NGE_ISR);
752			if ((status & NGE_ISR_RX_RESET_DONE) != 0)
753				done |= NGE_ISR_RX_RESET_DONE;
754			if ((status & NGE_ISR_TX_RESET_DONE) != 0)
755				done |= NGE_ISR_TX_RESET_DONE;
756			if (done ==
757			    (NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE))
758				break;
759		}
760		if (i == NGE_TIMEOUT)
761			device_printf(sc->nge_dev,
762			    "%s: unable to reset Tx/Rx MAC\n", __func__);
763		/* Reuse Rx buffer and reset consumer pointer. */
764		sc->nge_cdata.nge_rx_cons = 0;
765		/*
766		 * It seems that resetting Rx/Tx MAC results in
767		 * resetting Tx/Rx descriptor pointer registers such
768		 * that reloading Tx/Rx lists address are needed.
769		 */
770		CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI,
771		    NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr));
772		CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO,
773		    NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr));
774		CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI,
775		    NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr));
776		CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO,
777		    NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr));
778		/* Reinitialize Tx buffers. */
779		nge_list_tx_init(sc);
780
781		/* Restart Rx MAC. */
782		reg = CSR_READ_4(sc, NGE_CSR);
783		reg |= NGE_CSR_RX_ENABLE;
784		CSR_WRITE_4(sc, NGE_CSR, reg);
785		for (i = 0; i < NGE_TIMEOUT; i++) {
786			if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0)
787				break;
788			DELAY(1);
789		}
790		if (i == NGE_TIMEOUT)
791			device_printf(sc->nge_dev,
792			    "%s: unable to restart Rx MAC\n", __func__);
793	}
794
795	/* Data LED off for TBI mode */
796	if ((sc->nge_flags & NGE_FLAG_TBI) != 0)
797		CSR_WRITE_4(sc, NGE_GPIO,
798		    CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT);
799}
800
801static void
802nge_rxfilter(struct nge_softc *sc)
803{
804	struct ifnet *ifp;
805	struct ifmultiaddr *ifma;
806	uint32_t h, i, rxfilt;
807	int bit, index;
808
809	NGE_LOCK_ASSERT(sc);
810	ifp = sc->nge_ifp;
811
812	/* Make sure to stop Rx filtering. */
813	rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL);
814	rxfilt &= ~NGE_RXFILTCTL_ENABLE;
815	CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
816	CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL);
817
818	rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS);
819	rxfilt &= ~NGE_RXFILTCTL_BROAD;
820	/*
821	 * We don't want to use the hash table for matching unicast
822	 * addresses.
823	 */
824	rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH);
825
826	/*
827	 * For the NatSemi chip, we have to explicitly enable the
828	 * reception of ARP frames, as well as turn on the 'perfect
829	 * match' filter where we store the station address, otherwise
830	 * we won't receive unicasts meant for this host.
831	 */
832	rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT;
833
834	/*
835	 * Set the capture broadcast bit to capture broadcast frames.
836	 */
837	if ((ifp->if_flags & IFF_BROADCAST) != 0)
838		rxfilt |= NGE_RXFILTCTL_BROAD;
839
840	if ((ifp->if_flags & IFF_PROMISC) != 0 ||
841	    (ifp->if_flags & IFF_ALLMULTI) != 0) {
842		rxfilt |= NGE_RXFILTCTL_ALLMULTI;
843		if ((ifp->if_flags & IFF_PROMISC) != 0)
844			rxfilt |= NGE_RXFILTCTL_ALLPHYS;
845		goto done;
846	}
847
848	/*
849	 * We have to explicitly enable the multicast hash table
850	 * on the NatSemi chip if we want to use it, which we do.
851	 */
852	rxfilt |= NGE_RXFILTCTL_MCHASH;
853
854	/* first, zot all the existing hash bits */
855	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
856		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
857		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
858	}
859
860	/*
861	 * From the 11 bits returned by the crc routine, the top 7
862	 * bits represent the 16-bit word in the mcast hash table
863	 * that needs to be updated, and the lower 4 bits represent
864	 * which bit within that byte needs to be set.
865	 */
866	IF_ADDR_LOCK(ifp);
867	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
868		if (ifma->ifma_addr->sa_family != AF_LINK)
869			continue;
870		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
871		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 21;
872		index = (h >> 4) & 0x7F;
873		bit = h & 0xF;
874		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
875		    NGE_FILTADDR_MCAST_LO + (index * 2));
876		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
877	}
878	IF_ADDR_UNLOCK(ifp);
879
880done:
881	CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
882	/* Turn the receive filter on. */
883	rxfilt |= NGE_RXFILTCTL_ENABLE;
884	CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt);
885	CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL);
886}
887
888static void
889nge_reset(struct nge_softc *sc)
890{
891	uint32_t v;
892	int i;
893
894	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
895
896	for (i = 0; i < NGE_TIMEOUT; i++) {
897		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
898			break;
899		DELAY(1);
900	}
901
902	if (i == NGE_TIMEOUT)
903		device_printf(sc->nge_dev, "reset never completed\n");
904
905	/* Wait a little while for the chip to get its brains in order. */
906	DELAY(1000);
907
908	/*
909	 * If this is a NetSemi chip, make sure to clear
910	 * PME mode.
911	 */
912	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
913	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
914
915	/* Clear WOL events which may interfere normal Rx filter opertaion. */
916	CSR_WRITE_4(sc, NGE_WOLCSR, 0);
917
918	/*
919	 * Only DP83820 supports 64bits addressing/data transfers and
920	 * 64bit addressing requires different descriptor structures.
921	 * To make it simple, disable 64bit addressing/data transfers.
922	 */
923	v = CSR_READ_4(sc, NGE_CFG);
924	v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB);
925	CSR_WRITE_4(sc, NGE_CFG, v);
926}
927
928/*
929 * Probe for a NatSemi chip. Check the PCI vendor and device
930 * IDs against our list and return a device name if we find a match.
931 */
932static int
933nge_probe(device_t dev)
934{
935	struct nge_type *t;
936
937	t = nge_devs;
938
939	while (t->nge_name != NULL) {
940		if ((pci_get_vendor(dev) == t->nge_vid) &&
941		    (pci_get_device(dev) == t->nge_did)) {
942			device_set_desc(dev, t->nge_name);
943			return (BUS_PROBE_DEFAULT);
944		}
945		t++;
946	}
947
948	return (ENXIO);
949}
950
951/*
952 * Attach the interface. Allocate softc structures, do ifmedia
953 * setup and ethernet/BPF attach.
954 */
955static int
956nge_attach(device_t dev)
957{
958	uint8_t eaddr[ETHER_ADDR_LEN];
959	uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg;
960	struct nge_softc *sc;
961	struct ifnet *ifp;
962	int error, i, rid;
963
964	error = 0;
965	sc = device_get_softc(dev);
966	sc->nge_dev = dev;
967
968	NGE_LOCK_INIT(sc, device_get_nameunit(dev));
969	callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0);
970
971	/*
972	 * Map control/status registers.
973	 */
974	pci_enable_busmaster(dev);
975
976#ifdef NGE_USEIOSPACE
977	sc->nge_res_type = SYS_RES_IOPORT;
978	sc->nge_res_id = PCIR_BAR(0);
979#else
980	sc->nge_res_type = SYS_RES_MEMORY;
981	sc->nge_res_id = PCIR_BAR(1);
982#endif
983	sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type,
984	    &sc->nge_res_id, RF_ACTIVE);
985
986	if (sc->nge_res == NULL) {
987		if (sc->nge_res_type == SYS_RES_MEMORY) {
988			sc->nge_res_type = SYS_RES_IOPORT;
989			sc->nge_res_id = PCIR_BAR(0);
990		} else {
991			sc->nge_res_type = SYS_RES_MEMORY;
992			sc->nge_res_id = PCIR_BAR(1);
993		}
994		sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type,
995		    &sc->nge_res_id, RF_ACTIVE);
996		if (sc->nge_res == NULL) {
997			device_printf(dev, "couldn't allocate %s resources\n",
998			    sc->nge_res_type == SYS_RES_MEMORY ? "memory" :
999			    "I/O");
1000			NGE_LOCK_DESTROY(sc);
1001			return (ENXIO);
1002		}
1003	}
1004
1005	/* Allocate interrupt */
1006	rid = 0;
1007	sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1008	    RF_SHAREABLE | RF_ACTIVE);
1009
1010	if (sc->nge_irq == NULL) {
1011		device_printf(dev, "couldn't map interrupt\n");
1012		error = ENXIO;
1013		goto fail;
1014	}
1015
1016	/* Enable MWI. */
1017	reg = pci_read_config(dev, PCIR_COMMAND, 2);
1018	reg |= PCIM_CMD_MWRICEN;
1019	pci_write_config(dev, PCIR_COMMAND, reg, 2);
1020
1021	/* Reset the adapter. */
1022	nge_reset(sc);
1023
1024	/*
1025	 * Get station address from the EEPROM.
1026	 */
1027	nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3);
1028	for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1029		ea[i] = le16toh(ea[i]);
1030	ea_temp = ea[0];
1031	ea[0] = ea[2];
1032	ea[2] = ea_temp;
1033	bcopy(ea, eaddr, sizeof(eaddr));
1034
1035	if (nge_dma_alloc(sc) != 0) {
1036		error = ENXIO;
1037		goto fail;
1038	}
1039
1040	nge_sysctl_node(sc);
1041
1042	ifp = sc->nge_ifp = if_alloc(IFT_ETHER);
1043	if (ifp == NULL) {
1044		device_printf(dev, "can not allocate ifnet structure\n");
1045		error = ENOSPC;
1046		goto fail;
1047	}
1048	ifp->if_softc = sc;
1049	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1050	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1051	ifp->if_ioctl = nge_ioctl;
1052	ifp->if_start = nge_start;
1053	ifp->if_init = nge_init;
1054	ifp->if_snd.ifq_drv_maxlen = NGE_TX_RING_CNT - 1;
1055	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1056	IFQ_SET_READY(&ifp->if_snd);
1057	ifp->if_hwassist = NGE_CSUM_FEATURES;
1058	ifp->if_capabilities = IFCAP_HWCSUM;
1059	/*
1060	 * It seems that some hardwares doesn't provide 3.3V auxiliary
1061	 * supply(3VAUX) to drive PME such that checking PCI power
1062	 * management capability is necessary.
1063	 */
1064	if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &i) == 0)
1065		ifp->if_capabilities |= IFCAP_WOL;
1066	ifp->if_capenable = ifp->if_capabilities;
1067
1068	if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) {
1069		sc->nge_flags |= NGE_FLAG_TBI;
1070		device_printf(dev, "Using TBI\n");
1071		/* Configure GPIO. */
1072		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1073		    | NGE_GPIO_GP4_OUT
1074		    | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
1075		    | NGE_GPIO_GP3_OUTENB
1076		    | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
1077	}
1078
1079	/*
1080	 * Do MII setup.
1081	 */
1082	error = mii_phy_probe(dev, &sc->nge_miibus, nge_mediachange,
1083	    nge_mediastatus);
1084	if (error != 0) {
1085		device_printf(dev, "no PHY found!\n");
1086		goto fail;
1087	}
1088
1089	/*
1090	 * Call MI attach routine.
1091	 */
1092	ether_ifattach(ifp, eaddr);
1093
1094	/* VLAN capability setup. */
1095	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1096	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1097	ifp->if_capenable = ifp->if_capabilities;
1098#ifdef DEVICE_POLLING
1099	ifp->if_capabilities |= IFCAP_POLLING;
1100#endif
1101	/*
1102	 * Tell the upper layer(s) we support long frames.
1103	 * Must appear after the call to ether_ifattach() because
1104	 * ether_ifattach() sets ifi_hdrlen to the default value.
1105	 */
1106	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1107
1108	/*
1109	 * Hookup IRQ last.
1110	 */
1111	error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE,
1112	    NULL, nge_intr, sc, &sc->nge_intrhand);
1113	if (error) {
1114		device_printf(dev, "couldn't set up irq\n");
1115		goto fail;
1116	}
1117
1118fail:
1119	if (error != 0)
1120		nge_detach(dev);
1121	return (error);
1122}
1123
1124static int
1125nge_detach(device_t dev)
1126{
1127	struct nge_softc *sc;
1128	struct ifnet *ifp;
1129
1130	sc = device_get_softc(dev);
1131	ifp = sc->nge_ifp;
1132
1133#ifdef DEVICE_POLLING
1134	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
1135		ether_poll_deregister(ifp);
1136#endif
1137
1138	if (device_is_attached(dev)) {
1139		NGE_LOCK(sc);
1140		sc->nge_flags |= NGE_FLAG_DETACH;
1141		nge_stop(sc);
1142		NGE_UNLOCK(sc);
1143		callout_drain(&sc->nge_stat_ch);
1144		if (ifp != NULL)
1145			ether_ifdetach(ifp);
1146	}
1147
1148	if (sc->nge_miibus != NULL) {
1149		device_delete_child(dev, sc->nge_miibus);
1150		sc->nge_miibus = NULL;
1151	}
1152	bus_generic_detach(dev);
1153	if (sc->nge_intrhand != NULL)
1154		bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
1155	if (sc->nge_irq != NULL)
1156		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
1157	if (sc->nge_res != NULL)
1158		bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id,
1159		    sc->nge_res);
1160
1161	nge_dma_free(sc);
1162	if (ifp != NULL)
1163		if_free(ifp);
1164
1165	NGE_LOCK_DESTROY(sc);
1166
1167	return (0);
1168}
1169
1170struct nge_dmamap_arg {
1171	bus_addr_t	nge_busaddr;
1172};
1173
1174static void
1175nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1176{
1177	struct nge_dmamap_arg *ctx;
1178
1179	if (error != 0)
1180		return;
1181	ctx = arg;
1182	ctx->nge_busaddr = segs[0].ds_addr;
1183}
1184
1185static int
1186nge_dma_alloc(struct nge_softc *sc)
1187{
1188	struct nge_dmamap_arg ctx;
1189	struct nge_txdesc *txd;
1190	struct nge_rxdesc *rxd;
1191	int error, i;
1192
1193	/* Create parent DMA tag. */
1194	error = bus_dma_tag_create(
1195	    bus_get_dma_tag(sc->nge_dev),	/* parent */
1196	    1, 0,			/* alignment, boundary */
1197	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1198	    BUS_SPACE_MAXADDR,		/* highaddr */
1199	    NULL, NULL,			/* filter, filterarg */
1200	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1201	    0,				/* nsegments */
1202	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1203	    0,				/* flags */
1204	    NULL, NULL,			/* lockfunc, lockarg */
1205	    &sc->nge_cdata.nge_parent_tag);
1206	if (error != 0) {
1207		device_printf(sc->nge_dev, "failed to create parent DMA tag\n");
1208		goto fail;
1209	}
1210	/* Create tag for Tx ring. */
1211	error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
1212	    NGE_RING_ALIGN, 0,		/* alignment, boundary */
1213	    BUS_SPACE_MAXADDR,		/* lowaddr */
1214	    BUS_SPACE_MAXADDR,		/* highaddr */
1215	    NULL, NULL,			/* filter, filterarg */
1216	    NGE_TX_RING_SIZE,		/* maxsize */
1217	    1,				/* nsegments */
1218	    NGE_TX_RING_SIZE,		/* maxsegsize */
1219	    0,				/* flags */
1220	    NULL, NULL,			/* lockfunc, lockarg */
1221	    &sc->nge_cdata.nge_tx_ring_tag);
1222	if (error != 0) {
1223		device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n");
1224		goto fail;
1225	}
1226
1227	/* Create tag for Rx ring. */
1228	error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
1229	    NGE_RING_ALIGN, 0,		/* alignment, boundary */
1230	    BUS_SPACE_MAXADDR,		/* lowaddr */
1231	    BUS_SPACE_MAXADDR,		/* highaddr */
1232	    NULL, NULL,			/* filter, filterarg */
1233	    NGE_RX_RING_SIZE,		/* maxsize */
1234	    1,				/* nsegments */
1235	    NGE_RX_RING_SIZE,		/* maxsegsize */
1236	    0,				/* flags */
1237	    NULL, NULL,			/* lockfunc, lockarg */
1238	    &sc->nge_cdata.nge_rx_ring_tag);
1239	if (error != 0) {
1240		device_printf(sc->nge_dev,
1241		    "failed to create Rx ring DMA tag\n");
1242		goto fail;
1243	}
1244
1245	/* Create tag for Tx buffers. */
1246	error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
1247	    1, 0,			/* alignment, boundary */
1248	    BUS_SPACE_MAXADDR,		/* lowaddr */
1249	    BUS_SPACE_MAXADDR,		/* highaddr */
1250	    NULL, NULL,			/* filter, filterarg */
1251	    MCLBYTES * NGE_MAXTXSEGS,	/* maxsize */
1252	    NGE_MAXTXSEGS,		/* nsegments */
1253	    MCLBYTES,			/* maxsegsize */
1254	    0,				/* flags */
1255	    NULL, NULL,			/* lockfunc, lockarg */
1256	    &sc->nge_cdata.nge_tx_tag);
1257	if (error != 0) {
1258		device_printf(sc->nge_dev, "failed to create Tx DMA tag\n");
1259		goto fail;
1260	}
1261
1262	/* Create tag for Rx buffers. */
1263	error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */
1264	    NGE_RX_ALIGN, 0,		/* alignment, boundary */
1265	    BUS_SPACE_MAXADDR,		/* lowaddr */
1266	    BUS_SPACE_MAXADDR,		/* highaddr */
1267	    NULL, NULL,			/* filter, filterarg */
1268	    MCLBYTES,			/* maxsize */
1269	    1,				/* nsegments */
1270	    MCLBYTES,			/* maxsegsize */
1271	    0,				/* flags */
1272	    NULL, NULL,			/* lockfunc, lockarg */
1273	    &sc->nge_cdata.nge_rx_tag);
1274	if (error != 0) {
1275		device_printf(sc->nge_dev, "failed to create Rx DMA tag\n");
1276		goto fail;
1277	}
1278
1279	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1280	error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag,
1281	    (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK |
1282	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map);
1283	if (error != 0) {
1284		device_printf(sc->nge_dev,
1285		    "failed to allocate DMA'able memory for Tx ring\n");
1286		goto fail;
1287	}
1288
1289	ctx.nge_busaddr = 0;
1290	error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag,
1291	    sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring,
1292	    NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0);
1293	if (error != 0 || ctx.nge_busaddr == 0) {
1294		device_printf(sc->nge_dev,
1295		    "failed to load DMA'able memory for Tx ring\n");
1296		goto fail;
1297	}
1298	sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr;
1299
1300	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1301	error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag,
1302	    (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK |
1303	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map);
1304	if (error != 0) {
1305		device_printf(sc->nge_dev,
1306		    "failed to allocate DMA'able memory for Rx ring\n");
1307		goto fail;
1308	}
1309
1310	ctx.nge_busaddr = 0;
1311	error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag,
1312	    sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring,
1313	    NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0);
1314	if (error != 0 || ctx.nge_busaddr == 0) {
1315		device_printf(sc->nge_dev,
1316		    "failed to load DMA'able memory for Rx ring\n");
1317		goto fail;
1318	}
1319	sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr;
1320
1321	/* Create DMA maps for Tx buffers. */
1322	for (i = 0; i < NGE_TX_RING_CNT; i++) {
1323		txd = &sc->nge_cdata.nge_txdesc[i];
1324		txd->tx_m = NULL;
1325		txd->tx_dmamap = NULL;
1326		error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0,
1327		    &txd->tx_dmamap);
1328		if (error != 0) {
1329			device_printf(sc->nge_dev,
1330			    "failed to create Tx dmamap\n");
1331			goto fail;
1332		}
1333	}
1334	/* Create DMA maps for Rx buffers. */
1335	if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0,
1336	    &sc->nge_cdata.nge_rx_sparemap)) != 0) {
1337		device_printf(sc->nge_dev,
1338		    "failed to create spare Rx dmamap\n");
1339		goto fail;
1340	}
1341	for (i = 0; i < NGE_RX_RING_CNT; i++) {
1342		rxd = &sc->nge_cdata.nge_rxdesc[i];
1343		rxd->rx_m = NULL;
1344		rxd->rx_dmamap = NULL;
1345		error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0,
1346		    &rxd->rx_dmamap);
1347		if (error != 0) {
1348			device_printf(sc->nge_dev,
1349			    "failed to create Rx dmamap\n");
1350			goto fail;
1351		}
1352	}
1353
1354fail:
1355	return (error);
1356}
1357
1358static void
1359nge_dma_free(struct nge_softc *sc)
1360{
1361	struct nge_txdesc *txd;
1362	struct nge_rxdesc *rxd;
1363	int i;
1364
1365	/* Tx ring. */
1366	if (sc->nge_cdata.nge_tx_ring_tag) {
1367		if (sc->nge_cdata.nge_tx_ring_map)
1368			bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag,
1369			    sc->nge_cdata.nge_tx_ring_map);
1370		if (sc->nge_cdata.nge_tx_ring_map &&
1371		    sc->nge_rdata.nge_tx_ring)
1372			bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag,
1373			    sc->nge_rdata.nge_tx_ring,
1374			    sc->nge_cdata.nge_tx_ring_map);
1375		sc->nge_rdata.nge_tx_ring = NULL;
1376		sc->nge_cdata.nge_tx_ring_map = NULL;
1377		bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag);
1378		sc->nge_cdata.nge_tx_ring_tag = NULL;
1379	}
1380	/* Rx ring. */
1381	if (sc->nge_cdata.nge_rx_ring_tag) {
1382		if (sc->nge_cdata.nge_rx_ring_map)
1383			bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag,
1384			    sc->nge_cdata.nge_rx_ring_map);
1385		if (sc->nge_cdata.nge_rx_ring_map &&
1386		    sc->nge_rdata.nge_rx_ring)
1387			bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag,
1388			    sc->nge_rdata.nge_rx_ring,
1389			    sc->nge_cdata.nge_rx_ring_map);
1390		sc->nge_rdata.nge_rx_ring = NULL;
1391		sc->nge_cdata.nge_rx_ring_map = NULL;
1392		bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag);
1393		sc->nge_cdata.nge_rx_ring_tag = NULL;
1394	}
1395	/* Tx buffers. */
1396	if (sc->nge_cdata.nge_tx_tag) {
1397		for (i = 0; i < NGE_TX_RING_CNT; i++) {
1398			txd = &sc->nge_cdata.nge_txdesc[i];
1399			if (txd->tx_dmamap) {
1400				bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag,
1401				    txd->tx_dmamap);
1402				txd->tx_dmamap = NULL;
1403			}
1404		}
1405		bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag);
1406		sc->nge_cdata.nge_tx_tag = NULL;
1407	}
1408	/* Rx buffers. */
1409	if (sc->nge_cdata.nge_rx_tag) {
1410		for (i = 0; i < NGE_RX_RING_CNT; i++) {
1411			rxd = &sc->nge_cdata.nge_rxdesc[i];
1412			if (rxd->rx_dmamap) {
1413				bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag,
1414				    rxd->rx_dmamap);
1415				rxd->rx_dmamap = NULL;
1416			}
1417		}
1418		if (sc->nge_cdata.nge_rx_sparemap) {
1419			bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag,
1420			    sc->nge_cdata.nge_rx_sparemap);
1421			sc->nge_cdata.nge_rx_sparemap = 0;
1422		}
1423		bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag);
1424		sc->nge_cdata.nge_rx_tag = NULL;
1425	}
1426
1427	if (sc->nge_cdata.nge_parent_tag) {
1428		bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag);
1429		sc->nge_cdata.nge_parent_tag = NULL;
1430	}
1431}
1432
1433/*
1434 * Initialize the transmit descriptors.
1435 */
1436static int
1437nge_list_tx_init(struct nge_softc *sc)
1438{
1439	struct nge_ring_data *rd;
1440	struct nge_txdesc *txd;
1441	bus_addr_t addr;
1442	int i;
1443
1444	sc->nge_cdata.nge_tx_prod = 0;
1445	sc->nge_cdata.nge_tx_cons = 0;
1446	sc->nge_cdata.nge_tx_cnt = 0;
1447
1448	rd = &sc->nge_rdata;
1449	bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT);
1450	for (i = 0; i < NGE_TX_RING_CNT; i++) {
1451		if (i == NGE_TX_RING_CNT - 1)
1452			addr = NGE_TX_RING_ADDR(sc, 0);
1453		else
1454			addr = NGE_TX_RING_ADDR(sc, i + 1);
1455		rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr));
1456		txd = &sc->nge_cdata.nge_txdesc[i];
1457		txd->tx_m = NULL;
1458	}
1459
1460	bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag,
1461	    sc->nge_cdata.nge_tx_ring_map,
1462	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1463
1464	return (0);
1465}
1466
1467/*
1468 * Initialize the RX descriptors and allocate mbufs for them. Note that
1469 * we arrange the descriptors in a closed ring, so that the last descriptor
1470 * points back to the first.
1471 */
1472static int
1473nge_list_rx_init(struct nge_softc *sc)
1474{
1475	struct nge_ring_data *rd;
1476	bus_addr_t addr;
1477	int i;
1478
1479	sc->nge_cdata.nge_rx_cons = 0;
1480	sc->nge_head = sc->nge_tail = NULL;
1481
1482	rd = &sc->nge_rdata;
1483	bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT);
1484	for (i = 0; i < NGE_RX_RING_CNT; i++) {
1485		if (nge_newbuf(sc, i) != 0)
1486			return (ENOBUFS);
1487		if (i == NGE_RX_RING_CNT - 1)
1488			addr = NGE_RX_RING_ADDR(sc, 0);
1489		else
1490			addr = NGE_RX_RING_ADDR(sc, i + 1);
1491		rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr));
1492	}
1493
1494	bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag,
1495	    sc->nge_cdata.nge_rx_ring_map,
1496	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1497
1498	return (0);
1499}
1500
1501static __inline void
1502nge_discard_rxbuf(struct nge_softc *sc, int idx)
1503{
1504	struct nge_desc *desc;
1505
1506	desc = &sc->nge_rdata.nge_rx_ring[idx];
1507	desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t));
1508	desc->nge_extsts = 0;
1509}
1510
1511/*
1512 * Initialize an RX descriptor and attach an MBUF cluster.
1513 */
1514static int
1515nge_newbuf(struct nge_softc *sc, int idx)
1516{
1517	struct nge_desc *desc;
1518	struct nge_rxdesc *rxd;
1519	struct mbuf *m;
1520	bus_dma_segment_t segs[1];
1521	bus_dmamap_t map;
1522	int nsegs;
1523
1524	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1525	if (m == NULL)
1526		return (ENOBUFS);
1527	m->m_len = m->m_pkthdr.len = MCLBYTES;
1528	m_adj(m, sizeof(uint64_t));
1529
1530	if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag,
1531	    sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1532		m_freem(m);
1533		return (ENOBUFS);
1534	}
1535	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1536
1537	rxd = &sc->nge_cdata.nge_rxdesc[idx];
1538	if (rxd->rx_m != NULL) {
1539		bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap,
1540		    BUS_DMASYNC_POSTREAD);
1541		bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap);
1542	}
1543	map = rxd->rx_dmamap;
1544	rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap;
1545	sc->nge_cdata.nge_rx_sparemap = map;
1546	bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap,
1547	    BUS_DMASYNC_PREREAD);
1548	rxd->rx_m = m;
1549	desc = &sc->nge_rdata.nge_rx_ring[idx];
1550	desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr));
1551	desc->nge_cmdsts = htole32(segs[0].ds_len);
1552	desc->nge_extsts = 0;
1553
1554	return (0);
1555}
1556
1557#ifndef __NO_STRICT_ALIGNMENT
1558static __inline void
1559nge_fixup_rx(struct mbuf *m)
1560{
1561	int			i;
1562	uint16_t		*src, *dst;
1563
1564	src = mtod(m, uint16_t *);
1565	dst = src - 1;
1566
1567	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1568		*dst++ = *src++;
1569
1570	m->m_data -= ETHER_ALIGN;
1571}
1572#endif
1573
1574/*
1575 * A frame has been uploaded: pass the resulting mbuf chain up to
1576 * the higher level protocols.
1577 */
1578static void
1579nge_rxeof(struct nge_softc *sc)
1580{
1581	struct mbuf *m;
1582	struct ifnet *ifp;
1583	struct nge_desc *cur_rx;
1584	struct nge_rxdesc *rxd;
1585	int cons, prog, total_len;
1586	uint32_t cmdsts, extsts;
1587
1588	NGE_LOCK_ASSERT(sc);
1589
1590	ifp = sc->nge_ifp;
1591	cons = sc->nge_cdata.nge_rx_cons;
1592
1593	bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag,
1594	    sc->nge_cdata.nge_rx_ring_map,
1595	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1596
1597	for (prog = 0; prog < NGE_RX_RING_CNT &&
1598	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1599	    NGE_INC(cons, NGE_RX_RING_CNT)) {
1600#ifdef DEVICE_POLLING
1601		if (ifp->if_capenable & IFCAP_POLLING) {
1602			if (sc->rxcycles <= 0)
1603				break;
1604			sc->rxcycles--;
1605		}
1606#endif
1607		cur_rx = &sc->nge_rdata.nge_rx_ring[cons];
1608		cmdsts = le32toh(cur_rx->nge_cmdsts);
1609		extsts = le32toh(cur_rx->nge_extsts);
1610		if ((cmdsts & NGE_CMDSTS_OWN) == 0)
1611			break;
1612		prog++;
1613		rxd = &sc->nge_cdata.nge_rxdesc[cons];
1614		m = rxd->rx_m;
1615		total_len = cmdsts & NGE_CMDSTS_BUFLEN;
1616
1617		if ((cmdsts & NGE_CMDSTS_MORE) != 0) {
1618			if (nge_newbuf(sc, cons) != 0) {
1619				ifp->if_iqdrops++;
1620				if (sc->nge_head != NULL) {
1621					m_freem(sc->nge_head);
1622					sc->nge_head = sc->nge_tail = NULL;
1623				}
1624				nge_discard_rxbuf(sc, cons);
1625				continue;
1626			}
1627			m->m_len = total_len;
1628			if (sc->nge_head == NULL) {
1629				m->m_pkthdr.len = total_len;
1630				sc->nge_head = sc->nge_tail = m;
1631			} else {
1632				m->m_flags &= ~M_PKTHDR;
1633				sc->nge_head->m_pkthdr.len += total_len;
1634				sc->nge_tail->m_next = m;
1635				sc->nge_tail = m;
1636			}
1637			continue;
1638		}
1639
1640		/*
1641		 * If an error occurs, update stats, clear the
1642		 * status word and leave the mbuf cluster in place:
1643		 * it should simply get re-used next time this descriptor
1644	 	 * comes up in the ring.
1645		 */
1646		if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) {
1647			if ((cmdsts & NGE_RXSTAT_RUNT) &&
1648			    total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) {
1649				/*
1650				 * Work-around hardware bug, accept runt frames
1651				 * if its length is larger than or equal to 56.
1652				 */
1653			} else {
1654				/*
1655				 * Input error counters are updated by hardware.
1656				 */
1657				if (sc->nge_head != NULL) {
1658					m_freem(sc->nge_head);
1659					sc->nge_head = sc->nge_tail = NULL;
1660				}
1661				nge_discard_rxbuf(sc, cons);
1662				continue;
1663			}
1664		}
1665
1666		/* Try conjure up a replacement mbuf. */
1667
1668		if (nge_newbuf(sc, cons) != 0) {
1669			ifp->if_iqdrops++;
1670			if (sc->nge_head != NULL) {
1671				m_freem(sc->nge_head);
1672				sc->nge_head = sc->nge_tail = NULL;
1673			}
1674			nge_discard_rxbuf(sc, cons);
1675			continue;
1676		}
1677
1678		/* Chain received mbufs. */
1679		if (sc->nge_head != NULL) {
1680			m->m_len = total_len;
1681			m->m_flags &= ~M_PKTHDR;
1682			sc->nge_tail->m_next = m;
1683			m = sc->nge_head;
1684			m->m_pkthdr.len += total_len;
1685			sc->nge_head = sc->nge_tail = NULL;
1686		} else
1687			m->m_pkthdr.len = m->m_len = total_len;
1688
1689		/*
1690		 * Ok. NatSemi really screwed up here. This is the
1691		 * only gigE chip I know of with alignment constraints
1692		 * on receive buffers. RX buffers must be 64-bit aligned.
1693		 */
1694		/*
1695		 * By popular demand, ignore the alignment problems
1696		 * on the non-strict alignment platform. The performance hit
1697		 * incurred due to unaligned accesses is much smaller
1698		 * than the hit produced by forcing buffer copies all
1699		 * the time, especially with jumbo frames. We still
1700		 * need to fix up the alignment everywhere else though.
1701		 */
1702#ifndef __NO_STRICT_ALIGNMENT
1703		nge_fixup_rx(m);
1704#endif
1705		m->m_pkthdr.rcvif = ifp;
1706		ifp->if_ipackets++;
1707
1708		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1709			/* Do IP checksum checking. */
1710			if ((extsts & NGE_RXEXTSTS_IPPKT) != 0)
1711				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1712			if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0)
1713				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1714			if ((extsts & NGE_RXEXTSTS_TCPPKT &&
1715			    !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
1716			    (extsts & NGE_RXEXTSTS_UDPPKT &&
1717			    !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
1718				m->m_pkthdr.csum_flags |=
1719				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1720				m->m_pkthdr.csum_data = 0xffff;
1721			}
1722		}
1723
1724		/*
1725		 * If we received a packet with a vlan tag, pass it
1726		 * to vlan_input() instead of ether_input().
1727		 */
1728		if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 &&
1729		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1730			m->m_pkthdr.ether_vtag =
1731			    bswap16(extsts & NGE_RXEXTSTS_VTCI);
1732			m->m_flags |= M_VLANTAG;
1733		}
1734		NGE_UNLOCK(sc);
1735		(*ifp->if_input)(ifp, m);
1736		NGE_LOCK(sc);
1737	}
1738
1739	if (prog > 0) {
1740		sc->nge_cdata.nge_rx_cons = cons;
1741		bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag,
1742		    sc->nge_cdata.nge_rx_ring_map,
1743		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1744	}
1745}
1746
1747/*
1748 * A frame was downloaded to the chip. It's safe for us to clean up
1749 * the list buffers.
1750 */
1751static void
1752nge_txeof(struct nge_softc *sc)
1753{
1754	struct nge_desc	*cur_tx;
1755	struct nge_txdesc *txd;
1756	struct ifnet *ifp;
1757	uint32_t cmdsts;
1758	int cons, prod;
1759
1760	NGE_LOCK_ASSERT(sc);
1761	ifp = sc->nge_ifp;
1762
1763	cons = sc->nge_cdata.nge_tx_cons;
1764	prod = sc->nge_cdata.nge_tx_prod;
1765	if (cons == prod)
1766		return;
1767
1768	bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag,
1769	    sc->nge_cdata.nge_tx_ring_map,
1770	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1771
1772	/*
1773	 * Go through our tx list and free mbufs for those
1774	 * frames that have been transmitted.
1775	 */
1776	for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) {
1777		cur_tx = &sc->nge_rdata.nge_tx_ring[cons];
1778		cmdsts = le32toh(cur_tx->nge_cmdsts);
1779		if ((cmdsts & NGE_CMDSTS_OWN) != 0)
1780			break;
1781		sc->nge_cdata.nge_tx_cnt--;
1782		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1783		if ((cmdsts & NGE_CMDSTS_MORE) != 0)
1784			continue;
1785
1786		txd = &sc->nge_cdata.nge_txdesc[cons];
1787		bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap,
1788		    BUS_DMASYNC_POSTWRITE);
1789		bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap);
1790		if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) {
1791			ifp->if_oerrors++;
1792			if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0)
1793				ifp->if_collisions++;
1794			if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0)
1795				ifp->if_collisions++;
1796		} else
1797			ifp->if_opackets++;
1798
1799		ifp->if_collisions += (cmdsts & NGE_TXSTAT_COLLCNT) >> 16;
1800		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1801		    __func__));
1802		m_freem(txd->tx_m);
1803		txd->tx_m = NULL;
1804	}
1805
1806	sc->nge_cdata.nge_tx_cons = cons;
1807	if (sc->nge_cdata.nge_tx_cnt == 0)
1808		sc->nge_watchdog_timer = 0;
1809}
1810
1811static void
1812nge_tick(void *xsc)
1813{
1814	struct nge_softc *sc;
1815	struct mii_data *mii;
1816
1817	sc = xsc;
1818	NGE_LOCK_ASSERT(sc);
1819	mii = device_get_softc(sc->nge_miibus);
1820	mii_tick(mii);
1821	/*
1822	 * For PHYs that does not reset established link, it is
1823	 * necessary to check whether driver still have a valid
1824	 * link(e.g link state change callback is not called).
1825	 * Otherwise, driver think it lost link because driver
1826	 * initialization routine clears link state flag.
1827	 */
1828	if ((sc->nge_flags & NGE_FLAG_LINK) == 0)
1829		nge_miibus_statchg(sc->nge_dev);
1830	nge_stats_update(sc);
1831	nge_watchdog(sc);
1832	callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc);
1833}
1834
1835static void
1836nge_stats_update(struct nge_softc *sc)
1837{
1838	struct ifnet *ifp;
1839	struct nge_stats now, *stats, *nstats;
1840
1841	NGE_LOCK_ASSERT(sc);
1842
1843	ifp = sc->nge_ifp;
1844	stats = &now;
1845	stats->rx_pkts_errs =
1846	    CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF;
1847	stats->rx_crc_errs =
1848	    CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF;
1849	stats->rx_fifo_oflows =
1850	    CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF;
1851	stats->rx_align_errs =
1852	    CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF;
1853	stats->rx_sym_errs =
1854	    CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF;
1855	stats->rx_pkts_jumbos =
1856	    CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF;
1857	stats->rx_len_errs =
1858	    CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF;
1859	stats->rx_unctl_frames =
1860	    CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF;
1861	stats->rx_pause =
1862	    CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF;
1863	stats->tx_pause =
1864	    CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF;
1865	stats->tx_seq_errs =
1866	    CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF;
1867
1868	/*
1869	 * Since we've accept errored frames exclude Rx length errors.
1870	 */
1871	ifp->if_ierrors += stats->rx_pkts_errs + stats->rx_crc_errs +
1872	    stats->rx_fifo_oflows + stats->rx_sym_errs;
1873
1874	nstats = &sc->nge_stats;
1875	nstats->rx_pkts_errs += stats->rx_pkts_errs;
1876	nstats->rx_crc_errs += stats->rx_crc_errs;
1877	nstats->rx_fifo_oflows += stats->rx_fifo_oflows;
1878	nstats->rx_align_errs += stats->rx_align_errs;
1879	nstats->rx_sym_errs += stats->rx_sym_errs;
1880	nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos;
1881	nstats->rx_len_errs += stats->rx_len_errs;
1882	nstats->rx_unctl_frames += stats->rx_unctl_frames;
1883	nstats->rx_pause += stats->rx_pause;
1884	nstats->tx_pause += stats->tx_pause;
1885	nstats->tx_seq_errs += stats->tx_seq_errs;
1886}
1887
1888#ifdef DEVICE_POLLING
1889static poll_handler_t nge_poll;
1890
1891static void
1892nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1893{
1894	struct nge_softc *sc;
1895
1896	sc = ifp->if_softc;
1897
1898	NGE_LOCK(sc);
1899	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1900		NGE_UNLOCK(sc);
1901		return;
1902	}
1903
1904	/*
1905	 * On the nge, reading the status register also clears it.
1906	 * So before returning to intr mode we must make sure that all
1907	 * possible pending sources of interrupts have been served.
1908	 * In practice this means run to completion the *eof routines,
1909	 * and then call the interrupt routine.
1910	 */
1911	sc->rxcycles = count;
1912	nge_rxeof(sc);
1913	nge_txeof(sc);
1914	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1915		nge_start_locked(ifp);
1916
1917	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1918		uint32_t	status;
1919
1920		/* Reading the ISR register clears all interrupts. */
1921		status = CSR_READ_4(sc, NGE_ISR);
1922
1923		if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0)
1924			nge_rxeof(sc);
1925
1926		if ((status & NGE_ISR_RX_IDLE) != 0)
1927			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1928
1929		if ((status & NGE_ISR_SYSERR) != 0) {
1930			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1931			nge_init_locked(sc);
1932		}
1933	}
1934	NGE_UNLOCK(sc);
1935}
1936#endif /* DEVICE_POLLING */
1937
1938static void
1939nge_intr(void *arg)
1940{
1941	struct nge_softc *sc;
1942	struct ifnet *ifp;
1943	uint32_t status;
1944
1945	sc = (struct nge_softc *)arg;
1946	ifp = sc->nge_ifp;
1947
1948	NGE_LOCK(sc);
1949
1950	if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0)
1951		goto done_locked;
1952
1953	/* Reading the ISR register clears all interrupts. */
1954	status = CSR_READ_4(sc, NGE_ISR);
1955	if (status == 0xffffffff || (status & NGE_INTRS) == 0)
1956		goto done_locked;
1957#ifdef DEVICE_POLLING
1958	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1959		goto done_locked;
1960#endif
1961	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1962		goto done_locked;
1963
1964	/* Disable interrupts. */
1965	CSR_WRITE_4(sc, NGE_IER, 0);
1966
1967	/* Data LED on for TBI mode */
1968	if ((sc->nge_flags & NGE_FLAG_TBI) != 0)
1969		CSR_WRITE_4(sc, NGE_GPIO,
1970		    CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT);
1971
1972	for (; (status & NGE_INTRS) != 0;) {
1973		if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR |
1974		    NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0)
1975			nge_txeof(sc);
1976
1977		if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR |
1978		    NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW |
1979		    NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0)
1980			nge_rxeof(sc);
1981
1982		if ((status & NGE_ISR_RX_IDLE) != 0)
1983			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1984
1985		if ((status & NGE_ISR_SYSERR) != 0) {
1986			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1987			nge_init_locked(sc);
1988		}
1989		/* Reading the ISR register clears all interrupts. */
1990		status = CSR_READ_4(sc, NGE_ISR);
1991	}
1992
1993	/* Re-enable interrupts. */
1994	CSR_WRITE_4(sc, NGE_IER, 1);
1995
1996	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1997		nge_start_locked(ifp);
1998
1999	/* Data LED off for TBI mode */
2000	if ((sc->nge_flags & NGE_FLAG_TBI) != 0)
2001		CSR_WRITE_4(sc, NGE_GPIO,
2002		    CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT);
2003
2004done_locked:
2005	NGE_UNLOCK(sc);
2006}
2007
2008/*
2009 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2010 * pointers to the fragment pointers.
2011 */
2012static int
2013nge_encap(struct nge_softc *sc, struct mbuf **m_head)
2014{
2015	struct nge_txdesc *txd, *txd_last;
2016	struct nge_desc *desc;
2017	struct mbuf *m;
2018	bus_dmamap_t map;
2019	bus_dma_segment_t txsegs[NGE_MAXTXSEGS];
2020	int error, i, nsegs, prod, si;
2021
2022	NGE_LOCK_ASSERT(sc);
2023
2024	m = *m_head;
2025	prod = sc->nge_cdata.nge_tx_prod;
2026	txd = &sc->nge_cdata.nge_txdesc[prod];
2027	txd_last = txd;
2028	map = txd->tx_dmamap;
2029	error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map,
2030	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2031	if (error == EFBIG) {
2032		m = m_collapse(*m_head, M_DONTWAIT, NGE_MAXTXSEGS);
2033		if (m == NULL) {
2034			m_freem(*m_head);
2035			*m_head = NULL;
2036			return (ENOBUFS);
2037		}
2038		*m_head = m;
2039		error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag,
2040		    map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2041		if (error != 0) {
2042			m_freem(*m_head);
2043			*m_head = NULL;
2044			return (error);
2045		}
2046	} else if (error != 0)
2047		return (error);
2048	if (nsegs == 0) {
2049		m_freem(*m_head);
2050		*m_head = NULL;
2051		return (EIO);
2052	}
2053
2054	/* Check number of available descriptors. */
2055	if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) {
2056		bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map);
2057		return (ENOBUFS);
2058	}
2059
2060	bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE);
2061
2062	si = prod;
2063	for (i = 0; i < nsegs; i++) {
2064		desc = &sc->nge_rdata.nge_tx_ring[prod];
2065		desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr));
2066		if (i == 0)
2067			desc->nge_cmdsts = htole32(txsegs[i].ds_len |
2068			    NGE_CMDSTS_MORE);
2069		else
2070			desc->nge_cmdsts = htole32(txsegs[i].ds_len |
2071			    NGE_CMDSTS_MORE | NGE_CMDSTS_OWN);
2072		desc->nge_extsts = 0;
2073		sc->nge_cdata.nge_tx_cnt++;
2074		NGE_INC(prod, NGE_TX_RING_CNT);
2075	}
2076	/* Update producer index. */
2077	sc->nge_cdata.nge_tx_prod = prod;
2078
2079	prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT;
2080	desc = &sc->nge_rdata.nge_tx_ring[prod];
2081	/* Check if we have a VLAN tag to insert. */
2082	if ((m->m_flags & M_VLANTAG) != 0)
2083		desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT |
2084		    bswap16(m->m_pkthdr.ether_vtag));
2085	/* Set EOP on the last desciptor. */
2086	desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE);
2087
2088	/* Set checksum offload in the first descriptor. */
2089	desc = &sc->nge_rdata.nge_tx_ring[si];
2090	if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) {
2091		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2092			desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM);
2093		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2094			desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM);
2095		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2096			desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM);
2097	}
2098	/* Lastly, turn the first descriptor ownership to hardware. */
2099	desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN);
2100
2101	txd = &sc->nge_cdata.nge_txdesc[prod];
2102	map = txd_last->tx_dmamap;
2103	txd_last->tx_dmamap = txd->tx_dmamap;
2104	txd->tx_dmamap = map;
2105	txd->tx_m = m;
2106
2107	return (0);
2108}
2109
2110/*
2111 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2112 * to the mbuf data regions directly in the transmit lists. We also save a
2113 * copy of the pointers since the transmit list fragment pointers are
2114 * physical addresses.
2115 */
2116
2117static void
2118nge_start(struct ifnet *ifp)
2119{
2120	struct nge_softc *sc;
2121
2122	sc = ifp->if_softc;
2123	NGE_LOCK(sc);
2124	nge_start_locked(ifp);
2125	NGE_UNLOCK(sc);
2126}
2127
2128static void
2129nge_start_locked(struct ifnet *ifp)
2130{
2131	struct nge_softc *sc;
2132	struct mbuf *m_head;
2133	int enq;
2134
2135	sc = ifp->if_softc;
2136
2137	NGE_LOCK_ASSERT(sc);
2138
2139	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2140	    IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0)
2141		return;
2142
2143	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2144	    sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) {
2145		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2146		if (m_head == NULL)
2147			break;
2148		/*
2149		 * Pack the data into the transmit ring. If we
2150		 * don't have room, set the OACTIVE flag and wait
2151		 * for the NIC to drain the ring.
2152		 */
2153		if (nge_encap(sc, &m_head)) {
2154			if (m_head == NULL)
2155				break;
2156			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2157			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2158			break;
2159		}
2160
2161		enq++;
2162		/*
2163		 * If there's a BPF listener, bounce a copy of this frame
2164		 * to him.
2165		 */
2166		ETHER_BPF_MTAP(ifp, m_head);
2167	}
2168
2169	if (enq > 0) {
2170		bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag,
2171		    sc->nge_cdata.nge_tx_ring_map,
2172		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2173		/* Transmit */
2174		NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
2175
2176		/* Set a timeout in case the chip goes out to lunch. */
2177		sc->nge_watchdog_timer = 5;
2178	}
2179}
2180
2181static void
2182nge_init(void *xsc)
2183{
2184	struct nge_softc *sc = xsc;
2185
2186	NGE_LOCK(sc);
2187	nge_init_locked(sc);
2188	NGE_UNLOCK(sc);
2189}
2190
2191static void
2192nge_init_locked(struct nge_softc *sc)
2193{
2194	struct ifnet *ifp = sc->nge_ifp;
2195	struct mii_data *mii;
2196	uint8_t *eaddr;
2197	uint32_t reg;
2198
2199	NGE_LOCK_ASSERT(sc);
2200
2201	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2202		return;
2203
2204	/*
2205	 * Cancel pending I/O and free all RX/TX buffers.
2206	 */
2207	nge_stop(sc);
2208
2209	/* Reset the adapter. */
2210	nge_reset(sc);
2211
2212	/* Disable Rx filter prior to programming Rx filter. */
2213	CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0);
2214	CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL);
2215
2216	mii = device_get_softc(sc->nge_miibus);
2217
2218	/* Set MAC address. */
2219	eaddr = IF_LLADDR(sc->nge_ifp);
2220	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
2221	CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]);
2222	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
2223	CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]);
2224	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
2225	CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]);
2226
2227	/* Init circular RX list. */
2228	if (nge_list_rx_init(sc) == ENOBUFS) {
2229		device_printf(sc->nge_dev, "initialization failed: no "
2230			"memory for rx buffers\n");
2231		nge_stop(sc);
2232		return;
2233	}
2234
2235	/*
2236	 * Init tx descriptors.
2237	 */
2238	nge_list_tx_init(sc);
2239
2240	/*
2241	 * For the NatSemi chip, we have to explicitly enable the
2242	 * reception of ARP frames, as well as turn on the 'perfect
2243	 * match' filter where we store the station address, otherwise
2244	 * we won't receive unicasts meant for this host.
2245	 */
2246	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
2247	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
2248
2249	/*
2250	 * Set the capture broadcast bit to capture broadcast frames.
2251	 */
2252	if (ifp->if_flags & IFF_BROADCAST) {
2253		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
2254	} else {
2255		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
2256	}
2257
2258	/* Turn the receive filter on. */
2259	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
2260
2261	/* Set Rx filter. */
2262	nge_rxfilter(sc);
2263
2264	/* Disable PRIQ ctl. */
2265	CSR_WRITE_4(sc, NGE_PRIOQCTL, 0);
2266
2267	/*
2268	 * Set pause frames paramters.
2269	 *  Rx stat FIFO hi-threshold : 2 or more packets
2270	 *  Rx stat FIFO lo-threshold : less than 2 packets
2271	 *  Rx data FIFO hi-threshold : 2K or more bytes
2272	 *  Rx data FIFO lo-threshold : less than 2K bytes
2273	 *  pause time : (512ns * 0xffff) -> 33.55ms
2274	 */
2275	CSR_WRITE_4(sc, NGE_PAUSECSR,
2276	    NGE_PAUSECSR_PAUSE_ON_MCAST |
2277	    NGE_PAUSECSR_PAUSE_ON_DA |
2278	    ((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) |
2279	    ((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) |
2280	    ((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) |
2281	    ((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) |
2282	    NGE_PAUSECSR_CNT);
2283
2284	/*
2285	 * Load the address of the RX and TX lists.
2286	 */
2287	CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI,
2288	    NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr));
2289	CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO,
2290	    NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr));
2291	CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI,
2292	    NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr));
2293	CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO,
2294	    NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr));
2295
2296	/* Set RX configuration. */
2297	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
2298
2299	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0);
2300	/*
2301	 * Enable hardware checksum validation for all IPv4
2302	 * packets, do not reject packets with bad checksums.
2303	 */
2304	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2305		NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
2306
2307	/*
2308	 * Tell the chip to detect and strip VLAN tag info from
2309	 * received frames. The tag will be provided in the extsts
2310	 * field in the RX descriptors.
2311	 */
2312	NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB);
2313	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2314		NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB);
2315
2316	/* Set TX configuration. */
2317	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
2318
2319	/*
2320	 * Enable TX IPv4 checksumming on a per-packet basis.
2321	 */
2322	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
2323
2324	/*
2325	 * Tell the chip to insert VLAN tags on a per-packet basis as
2326	 * dictated by the code in the frame encapsulation routine.
2327	 */
2328	NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
2329
2330	/*
2331	 * Enable the delivery of PHY interrupts based on
2332	 * link/speed/duplex status changes. Also enable the
2333	 * extsts field in the DMA descriptors (needed for
2334	 * TCP/IP checksum offload on transmit).
2335	 */
2336	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD |
2337	    NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB);
2338
2339	/*
2340	 * Configure interrupt holdoff (moderation). We can
2341	 * have the chip delay interrupt delivery for a certain
2342	 * period. Units are in 100us, and the max setting
2343	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
2344	 */
2345	CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff);
2346
2347	/*
2348	 * Enable MAC statistics counters and clear.
2349	 */
2350	reg = CSR_READ_4(sc, NGE_MIBCTL);
2351	reg &= ~NGE_MIBCTL_FREEZE_CNT;
2352	reg |= NGE_MIBCTL_CLEAR_CNT;
2353	CSR_WRITE_4(sc, NGE_MIBCTL, reg);
2354
2355	/*
2356	 * Enable interrupts.
2357	 */
2358	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
2359#ifdef DEVICE_POLLING
2360	/*
2361	 * ... only enable interrupts if we are not polling, make sure
2362	 * they are off otherwise.
2363	 */
2364	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2365		CSR_WRITE_4(sc, NGE_IER, 0);
2366	else
2367#endif
2368	CSR_WRITE_4(sc, NGE_IER, 1);
2369
2370	sc->nge_flags &= ~NGE_FLAG_LINK;
2371	mii_mediachg(mii);
2372
2373	sc->nge_watchdog_timer = 0;
2374	callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc);
2375
2376	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2377	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2378}
2379
2380/*
2381 * Set media options.
2382 */
2383static int
2384nge_mediachange(struct ifnet *ifp)
2385{
2386	struct nge_softc *sc;
2387	struct mii_data	*mii;
2388	struct mii_softc *miisc;
2389	int error;
2390
2391	sc = ifp->if_softc;
2392	NGE_LOCK(sc);
2393	mii = device_get_softc(sc->nge_miibus);
2394	if (mii->mii_instance) {
2395		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2396			mii_phy_reset(miisc);
2397	}
2398	error = mii_mediachg(mii);
2399	NGE_UNLOCK(sc);
2400
2401	return (error);
2402}
2403
2404/*
2405 * Report current media status.
2406 */
2407static void
2408nge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2409{
2410	struct nge_softc *sc;
2411	struct mii_data *mii;
2412
2413	sc = ifp->if_softc;
2414	NGE_LOCK(sc);
2415	mii = device_get_softc(sc->nge_miibus);
2416	mii_pollstat(mii);
2417	NGE_UNLOCK(sc);
2418	ifmr->ifm_active = mii->mii_media_active;
2419	ifmr->ifm_status = mii->mii_media_status;
2420}
2421
2422static int
2423nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2424{
2425	struct nge_softc *sc = ifp->if_softc;
2426	struct ifreq *ifr = (struct ifreq *) data;
2427	struct mii_data *mii;
2428	int error = 0, mask;
2429
2430	switch (command) {
2431	case SIOCSIFMTU:
2432		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU)
2433			error = EINVAL;
2434		else {
2435			NGE_LOCK(sc);
2436			ifp->if_mtu = ifr->ifr_mtu;
2437			/*
2438			 * Workaround: if the MTU is larger than
2439			 * 8152 (TX FIFO size minus 64 minus 18), turn off
2440			 * TX checksum offloading.
2441			 */
2442			if (ifr->ifr_mtu >= 8152) {
2443				ifp->if_capenable &= ~IFCAP_TXCSUM;
2444				ifp->if_hwassist &= ~NGE_CSUM_FEATURES;
2445			} else {
2446				ifp->if_capenable |= IFCAP_TXCSUM;
2447				ifp->if_hwassist |= NGE_CSUM_FEATURES;
2448			}
2449			NGE_UNLOCK(sc);
2450			VLAN_CAPABILITIES(ifp);
2451		}
2452		break;
2453	case SIOCSIFFLAGS:
2454		NGE_LOCK(sc);
2455		if ((ifp->if_flags & IFF_UP) != 0) {
2456			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2457				if ((ifp->if_flags ^ sc->nge_if_flags) &
2458				    (IFF_PROMISC | IFF_ALLMULTI))
2459					nge_rxfilter(sc);
2460			} else {
2461				if ((sc->nge_flags & NGE_FLAG_DETACH) == 0)
2462					nge_init_locked(sc);
2463			}
2464		} else {
2465			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2466				nge_stop(sc);
2467		}
2468		sc->nge_if_flags = ifp->if_flags;
2469		NGE_UNLOCK(sc);
2470		error = 0;
2471		break;
2472	case SIOCADDMULTI:
2473	case SIOCDELMULTI:
2474		NGE_LOCK(sc);
2475		nge_rxfilter(sc);
2476		NGE_UNLOCK(sc);
2477		error = 0;
2478		break;
2479	case SIOCGIFMEDIA:
2480	case SIOCSIFMEDIA:
2481		mii = device_get_softc(sc->nge_miibus);
2482		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2483		break;
2484	case SIOCSIFCAP:
2485		NGE_LOCK(sc);
2486		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2487#ifdef DEVICE_POLLING
2488		if ((mask & IFCAP_POLLING) != 0 &&
2489		    (IFCAP_POLLING & ifp->if_capabilities) != 0) {
2490			ifp->if_capenable ^= IFCAP_POLLING;
2491			if ((IFCAP_POLLING & ifp->if_capenable) != 0) {
2492				error = ether_poll_register(nge_poll, ifp);
2493				if (error != 0) {
2494					NGE_UNLOCK(sc);
2495					break;
2496				}
2497				/* Disable interrupts. */
2498				CSR_WRITE_4(sc, NGE_IER, 0);
2499			} else {
2500				error = ether_poll_deregister(ifp);
2501				/* Enable interrupts. */
2502				CSR_WRITE_4(sc, NGE_IER, 1);
2503			}
2504		}
2505#endif /* DEVICE_POLLING */
2506		if ((mask & IFCAP_TXCSUM) != 0 &&
2507		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2508			ifp->if_capenable ^= IFCAP_TXCSUM;
2509			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2510				ifp->if_hwassist |= NGE_CSUM_FEATURES;
2511			else
2512				ifp->if_hwassist &= ~NGE_CSUM_FEATURES;
2513		}
2514		if ((mask & IFCAP_RXCSUM) != 0 &&
2515		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
2516			ifp->if_capenable ^= IFCAP_RXCSUM;
2517
2518		if ((mask & IFCAP_WOL) != 0 &&
2519		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
2520			if ((mask & IFCAP_WOL_UCAST) != 0)
2521				ifp->if_capenable ^= IFCAP_WOL_UCAST;
2522			if ((mask & IFCAP_WOL_MCAST) != 0)
2523				ifp->if_capenable ^= IFCAP_WOL_MCAST;
2524			if ((mask & IFCAP_WOL_MAGIC) != 0)
2525				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2526		}
2527
2528		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2529		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2530			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2531		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2532		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2533			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2534			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2535				if ((ifp->if_capenable &
2536				    IFCAP_VLAN_HWTAGGING) != 0)
2537					NGE_SETBIT(sc,
2538					    NGE_VLAN_IP_RXCTL,
2539					    NGE_VIPRXCTL_TAG_STRIP_ENB);
2540				else
2541					NGE_CLRBIT(sc,
2542					    NGE_VLAN_IP_RXCTL,
2543					    NGE_VIPRXCTL_TAG_STRIP_ENB);
2544			}
2545		}
2546		/*
2547		 * Both VLAN hardware tagging and checksum offload is
2548		 * required to do checksum offload on VLAN interface.
2549		 */
2550		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
2551			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
2552		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2553			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
2554		NGE_UNLOCK(sc);
2555		VLAN_CAPABILITIES(ifp);
2556		break;
2557	default:
2558		error = ether_ioctl(ifp, command, data);
2559		break;
2560	}
2561
2562	return (error);
2563}
2564
2565static void
2566nge_watchdog(struct nge_softc *sc)
2567{
2568	struct ifnet *ifp;
2569
2570	NGE_LOCK_ASSERT(sc);
2571
2572	if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer)
2573		return;
2574
2575	ifp = sc->nge_ifp;
2576	ifp->if_oerrors++;
2577	if_printf(ifp, "watchdog timeout\n");
2578
2579	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2580	nge_init_locked(sc);
2581
2582	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2583		nge_start_locked(ifp);
2584}
2585
2586static int
2587nge_stop_mac(struct nge_softc *sc)
2588{
2589	uint32_t reg;
2590	int i;
2591
2592	NGE_LOCK_ASSERT(sc);
2593
2594	reg = CSR_READ_4(sc, NGE_CSR);
2595	if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) {
2596		reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE);
2597		reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE;
2598		CSR_WRITE_4(sc, NGE_CSR, reg);
2599		for (i = 0; i < NGE_TIMEOUT; i++) {
2600			DELAY(1);
2601			if ((CSR_READ_4(sc, NGE_CSR) &
2602			    (NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0)
2603				break;
2604		}
2605		if (i == NGE_TIMEOUT)
2606			return (ETIMEDOUT);
2607	}
2608
2609	return (0);
2610}
2611
2612/*
2613 * Stop the adapter and free any mbufs allocated to the
2614 * RX and TX lists.
2615 */
2616static void
2617nge_stop(struct nge_softc *sc)
2618{
2619	struct nge_txdesc *txd;
2620	struct nge_rxdesc *rxd;
2621	int i;
2622	struct ifnet *ifp;
2623
2624	NGE_LOCK_ASSERT(sc);
2625	ifp = sc->nge_ifp;
2626
2627	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2628	sc->nge_flags &= ~NGE_FLAG_LINK;
2629	callout_stop(&sc->nge_stat_ch);
2630	sc->nge_watchdog_timer = 0;
2631
2632	CSR_WRITE_4(sc, NGE_IER, 0);
2633	CSR_WRITE_4(sc, NGE_IMR, 0);
2634	if (nge_stop_mac(sc) == ETIMEDOUT)
2635		device_printf(sc->nge_dev,
2636		   "%s: unable to stop Tx/Rx MAC\n", __func__);
2637	CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0);
2638	CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0);
2639	CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0);
2640	CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0);
2641	nge_stats_update(sc);
2642	if (sc->nge_head != NULL) {
2643		m_freem(sc->nge_head);
2644		sc->nge_head = sc->nge_tail = NULL;
2645	}
2646
2647	/*
2648	 * Free RX and TX mbufs still in the queues.
2649	 */
2650	for (i = 0; i < NGE_RX_RING_CNT; i++) {
2651		rxd = &sc->nge_cdata.nge_rxdesc[i];
2652		if (rxd->rx_m != NULL) {
2653			bus_dmamap_sync(sc->nge_cdata.nge_rx_tag,
2654			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2655			bus_dmamap_unload(sc->nge_cdata.nge_rx_tag,
2656			    rxd->rx_dmamap);
2657			m_freem(rxd->rx_m);
2658			rxd->rx_m = NULL;
2659		}
2660	}
2661	for (i = 0; i < NGE_TX_RING_CNT; i++) {
2662		txd = &sc->nge_cdata.nge_txdesc[i];
2663		if (txd->tx_m != NULL) {
2664			bus_dmamap_sync(sc->nge_cdata.nge_tx_tag,
2665			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2666			bus_dmamap_unload(sc->nge_cdata.nge_tx_tag,
2667			    txd->tx_dmamap);
2668			m_freem(txd->tx_m);
2669			txd->tx_m = NULL;
2670		}
2671	}
2672}
2673
2674/*
2675 * Before setting WOL bits, caller should have stopped Receiver.
2676 */
2677static void
2678nge_wol(struct nge_softc *sc)
2679{
2680	struct ifnet *ifp;
2681	uint32_t reg;
2682	uint16_t pmstat;
2683	int pmc;
2684
2685	NGE_LOCK_ASSERT(sc);
2686
2687	if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &pmc) != 0)
2688		return;
2689
2690	ifp = sc->nge_ifp;
2691	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
2692		/* Disable WOL & disconnect CLKRUN to save power. */
2693		CSR_WRITE_4(sc, NGE_WOLCSR, 0);
2694		CSR_WRITE_4(sc, NGE_CLKRUN, 0);
2695	} else {
2696		if (nge_stop_mac(sc) == ETIMEDOUT)
2697			device_printf(sc->nge_dev,
2698			    "%s: unable to stop Tx/Rx MAC\n", __func__);
2699		/*
2700		 * Make sure wake frames will be buffered in the Rx FIFO.
2701		 * (i.e. Silent Rx mode.)
2702		 */
2703		CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0);
2704		CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_HI);
2705		CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0);
2706		CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_LO);
2707		/* Enable Rx again. */
2708		NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
2709		CSR_BARRIER_WRITE_4(sc, NGE_CSR);
2710
2711		/* Configure WOL events. */
2712		reg = 0;
2713		if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2714			reg |= NGE_WOLCSR_WAKE_ON_UNICAST;
2715		if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2716			reg |= NGE_WOLCSR_WAKE_ON_MULTICAST;
2717		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2718			reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT;
2719		CSR_WRITE_4(sc, NGE_WOLCSR, reg);
2720
2721		/* Activate CLKRUN. */
2722		reg = CSR_READ_4(sc, NGE_CLKRUN);
2723		reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB;
2724		CSR_WRITE_4(sc, NGE_CLKRUN, reg);
2725	}
2726
2727	/* Request PME. */
2728	pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2);
2729	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2730	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2731		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2732	pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2733}
2734
2735/*
2736 * Stop all chip I/O so that the kernel's probe routines don't
2737 * get confused by errant DMAs when rebooting.
2738 */
2739static int
2740nge_shutdown(device_t dev)
2741{
2742
2743	return (nge_suspend(dev));
2744}
2745
2746static int
2747nge_suspend(device_t dev)
2748{
2749	struct nge_softc *sc;
2750
2751	sc = device_get_softc(dev);
2752
2753	NGE_LOCK(sc);
2754	nge_stop(sc);
2755	nge_wol(sc);
2756	sc->nge_flags |= NGE_FLAG_SUSPENDED;
2757	NGE_UNLOCK(sc);
2758
2759	return (0);
2760}
2761
2762static int
2763nge_resume(device_t dev)
2764{
2765	struct nge_softc *sc;
2766	struct ifnet *ifp;
2767	uint16_t pmstat;
2768	int pmc;
2769
2770	sc = device_get_softc(dev);
2771
2772	NGE_LOCK(sc);
2773	ifp = sc->nge_ifp;
2774	if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &pmc) == 0) {
2775		/* Disable PME and clear PME status. */
2776		pmstat = pci_read_config(sc->nge_dev,
2777		    pmc + PCIR_POWER_STATUS, 2);
2778		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2779			pmstat &= ~PCIM_PSTAT_PMEENABLE;
2780			pci_write_config(sc->nge_dev,
2781			    pmc + PCIR_POWER_STATUS, pmstat, 2);
2782		}
2783	}
2784	if (ifp->if_flags & IFF_UP) {
2785		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2786		nge_init_locked(sc);
2787	}
2788
2789	sc->nge_flags &= ~NGE_FLAG_SUSPENDED;
2790	NGE_UNLOCK(sc);
2791
2792	return (0);
2793}
2794
2795#define	NGE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2796	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2797
2798static void
2799nge_sysctl_node(struct nge_softc *sc)
2800{
2801	struct sysctl_ctx_list *ctx;
2802	struct sysctl_oid_list *child, *parent;
2803	struct sysctl_oid *tree;
2804	struct nge_stats *stats;
2805	int error;
2806
2807	ctx = device_get_sysctl_ctx(sc->nge_dev);
2808	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev));
2809	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff",
2810	    CTLTYPE_INT | CTLFLAG_RW, &sc->nge_int_holdoff, 0,
2811	    sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation");
2812	/* Pull in device tunables. */
2813	sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT;
2814	error = resource_int_value(device_get_name(sc->nge_dev),
2815	    device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff);
2816	if (error == 0) {
2817		if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN ||
2818		    sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) {
2819			device_printf(sc->nge_dev,
2820			    "int_holdoff value out of range; "
2821			    "using default: %d(%d us)\n",
2822			    NGE_INT_HOLDOFF_DEFAULT,
2823			    NGE_INT_HOLDOFF_DEFAULT * 100);
2824			sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT;
2825		}
2826	}
2827
2828	stats = &sc->nge_stats;
2829	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2830	    NULL, "NGE statistics");
2831	parent = SYSCTL_CHILDREN(tree);
2832
2833	/* Rx statistics. */
2834	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2835	    NULL, "Rx MAC statistics");
2836	child = SYSCTL_CHILDREN(tree);
2837	NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs",
2838	    &stats->rx_pkts_errs,
2839	    "Packet errors including both wire errors and FIFO overruns");
2840	NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
2841	    &stats->rx_crc_errs, "CRC errors");
2842	NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2843	    &stats->rx_fifo_oflows, "FIFO overflows");
2844	NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2845	    &stats->rx_align_errs, "Frame alignment errors");
2846	NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2847	    &stats->rx_sym_errs, "One or more symbol errors");
2848	NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos",
2849	    &stats->rx_pkts_jumbos,
2850	    "Packets received with length greater than 1518 bytes");
2851	NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2852	    &stats->rx_len_errs, "In Range Length errors");
2853	NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames",
2854	    &stats->rx_unctl_frames, "Control frames with unsupported opcode");
2855	NGE_SYSCTL_STAT_ADD32(ctx, child, "pause",
2856	    &stats->rx_pause, "Pause frames");
2857
2858	/* Tx statistics. */
2859	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2860	    NULL, "Tx MAC statistics");
2861	child = SYSCTL_CHILDREN(tree);
2862	NGE_SYSCTL_STAT_ADD32(ctx, child, "pause",
2863	    &stats->tx_pause, "Pause frames");
2864	NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs",
2865	    &stats->tx_seq_errs,
2866	    "Loss of collision heartbeat during transmission");
2867}
2868
2869#undef NGE_SYSCTL_STAT_ADD32
2870
2871static int
2872sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2873{
2874	int error, value;
2875
2876	if (arg1 == NULL)
2877		return (EINVAL);
2878	value = *(int *)arg1;
2879	error = sysctl_handle_int(oidp, &value, 0, req);
2880	if (error != 0 || req->newptr == NULL)
2881		return (error);
2882	if (value < low || value > high)
2883		return (EINVAL);
2884	*(int *)arg1 = value;
2885
2886	return (0);
2887}
2888
2889static int
2890sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS)
2891{
2892
2893	return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN,
2894	    NGE_INT_HOLDOFF_MAX));
2895}
2896