if_nge.c revision 192297
11592Srgrimes/*-
21592Srgrimes * Copyright (c) 2001 Wind River Systems
31592Srgrimes * Copyright (c) 1997, 1998, 1999, 2000, 2001
41592Srgrimes *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
51592Srgrimes *
61592Srgrimes * Redistribution and use in source and binary forms, with or without
71592Srgrimes * modification, are permitted provided that the following conditions
81592Srgrimes * are met:
91592Srgrimes * 1. Redistributions of source code must retain the above copyright
101592Srgrimes *    notice, this list of conditions and the following disclaimer.
111592Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
121592Srgrimes *    notice, this list of conditions and the following disclaimer in the
131592Srgrimes *    documentation and/or other materials provided with the distribution.
141592Srgrimes * 3. All advertising materials mentioning features or use of this software
151592Srgrimes *    must display the following acknowledgement:
161592Srgrimes *	This product includes software developed by Bill Paul.
171592Srgrimes * 4. Neither the name of the author nor the names of any co-contributors
181592Srgrimes *    may be used to endorse or promote products derived from this software
191592Srgrimes *    without specific prior written permission.
201592Srgrimes *
211592Srgrimes * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
221592Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
231592Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
241592Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
251592Srgrimes * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
261592Srgrimes * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
271592Srgrimes * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
281592Srgrimes * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
291592Srgrimes * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
301592Srgrimes * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
311592Srgrimes * THE POSSIBILITY OF SUCH DAMAGE.
321592Srgrimes */
331592Srgrimes
341592Srgrimes#include <sys/cdefs.h>
351592Srgrimes__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 192297 2009-05-18 07:04:03Z yongari $");
361592Srgrimes
371592Srgrimes/*
381592Srgrimes * National Semiconductor DP83820/DP83821 gigabit ethernet driver
391592Srgrimes * for FreeBSD. Datasheets are available from:
401592Srgrimes *
4127077Ssteve * http://www.national.com/ds/DP/DP83820.pdf
421592Srgrimes * http://www.national.com/ds/DP/DP83821.pdf
4327077Ssteve *
441592Srgrimes * These chips are used on several low cost gigabit ethernet NICs
451592Srgrimes * sold by D-Link, Addtron, SMC and Asante. Both parts are
461592Srgrimes * virtually the same, except the 83820 is a 64-bit/32-bit part,
471592Srgrimes * while the 83821 is 32-bit only.
4831386Scharnier *
4927077Ssteve * Many cards also use National gigE transceivers, such as the
5031386Scharnier * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
5131386Scharnier * contains a full register description that applies to all of these
5250476Speter * components:
531592Srgrimes *
541592Srgrimes * http://www.national.com/ds/DP/DP83861.pdf
551592Srgrimes *
561592Srgrimes * Written by Bill Paul <wpaul@bsdi.com>
5766907Swollman * BSDi Open Source Solutions
581592Srgrimes */
591592Srgrimes
601592Srgrimes/*
611592Srgrimes * The NatSemi DP83820 and 83821 controllers are enhanced versions
621592Srgrimes * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
631592Srgrimes * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
641592Srgrimes * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
651592Srgrimes * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
661592Srgrimes * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
671592Srgrimes * matching buffers, one perfect address filter buffer and interrupt
681592Srgrimes * moderation. The 83820 supports both 64-bit and 32-bit addressing
691592Srgrimes * and data transfers: the 64-bit support can be toggled on or off
701592Srgrimes * via software. This affects the size of certain fields in the DMA
711592Srgrimes * descriptors.
721592Srgrimes *
731592Srgrimes * There are two bugs/misfeatures in the 83820/83821 that I have
741592Srgrimes * discovered so far:
751592Srgrimes *
761592Srgrimes * - Receive buffers must be aligned on 64-bit boundaries, which means
771592Srgrimes *   you must resort to copying data in order to fix up the payload
781592Srgrimes *   alignment.
791592Srgrimes *
801592Srgrimes * - In order to transmit jumbo frames larger than 8170 bytes, you have
811592Srgrimes *   to turn off transmit checksum offloading, because the chip can't
821592Srgrimes *   compute the checksum on an outgoing frame unless it fits entirely
831592Srgrimes *   within the TX FIFO, which is only 8192 bytes in size. If you have
841592Srgrimes *   TX checksum offload enabled and you transmit attempt to transmit a
851592Srgrimes *   frame larger than 8170 bytes, the transmitter will wedge.
8690377Simp *
871592Srgrimes * To work around the latter problem, TX checksum offload is disabled
881592Srgrimes * if the user selects an MTU larger than 8152 (8170 - 18).
891592Srgrimes */
9027077Ssteve
911592Srgrimes#ifdef HAVE_KERNEL_OPTION_HEADERS
9227079Ssteve#include "opt_device_polling.h"
9327079Ssteve#endif
941592Srgrimes
951592Srgrimes#include <sys/param.h>
961592Srgrimes#include <sys/systm.h>
971592Srgrimes#include <sys/sockio.h>
981592Srgrimes#include <sys/mbuf.h>
991592Srgrimes#include <sys/malloc.h>
1001592Srgrimes#include <sys/module.h>
1011592Srgrimes#include <sys/kernel.h>
1021592Srgrimes#include <sys/socket.h>
1031592Srgrimes
1041592Srgrimes#include <net/if.h>
1051592Srgrimes#include <net/if_arp.h>
1061592Srgrimes#include <net/ethernet.h>
1071592Srgrimes#include <net/if_dl.h>
1081592Srgrimes#include <net/if_media.h>
1091592Srgrimes#include <net/if_types.h>
1101592Srgrimes#include <net/if_vlan_var.h>
1111592Srgrimes
1121592Srgrimes#include <net/bpf.h>
1131592Srgrimes
1141592Srgrimes#include <vm/vm.h>              /* for vtophys */
1151592Srgrimes#include <vm/pmap.h>            /* for vtophys */
1161592Srgrimes#include <machine/bus.h>
1171592Srgrimes#include <machine/resource.h>
1181592Srgrimes#include <sys/bus.h>
1191592Srgrimes#include <sys/rman.h>
1201592Srgrimes
1211592Srgrimes#include <dev/mii/mii.h>
1221592Srgrimes#include <dev/mii/miivar.h>
1231592Srgrimes
1241592Srgrimes#include <dev/pci/pcireg.h>
1251592Srgrimes#include <dev/pci/pcivar.h>
1261592Srgrimes
1271592Srgrimes#define NGE_USEIOSPACE
1281592Srgrimes
1291592Srgrimes#include <dev/nge/if_ngereg.h>
1301592Srgrimes
1311592SrgrimesMODULE_DEPEND(nge, pci, 1, 1, 1);
1321592SrgrimesMODULE_DEPEND(nge, ether, 1, 1, 1);
1331592SrgrimesMODULE_DEPEND(nge, miibus, 1, 1, 1);
1341592Srgrimes
13527079Ssteve/* "device miibus" required.  See GENERIC if you get errors here. */
1361592Srgrimes#include "miibus_if.h"
1371592Srgrimes
1381592Srgrimes#define NGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
1391592Srgrimes
1401592Srgrimes/*
1411592Srgrimes * Various supported device vendors/types and their names.
1421592Srgrimes */
1431592Srgrimesstatic struct nge_type nge_devs[] = {
1441592Srgrimes	{ NGE_VENDORID, NGE_DEVICEID,
1451592Srgrimes	    "National Semiconductor Gigabit Ethernet" },
1461592Srgrimes	{ 0, 0, NULL }
1471592Srgrimes};
1481592Srgrimes
1491592Srgrimesstatic int nge_probe(device_t);
1501592Srgrimesstatic int nge_attach(device_t);
1511592Srgrimesstatic int nge_detach(device_t);
1521592Srgrimes
1531592Srgrimesstatic int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *);
1541592Srgrimesstatic int nge_encap(struct nge_softc *, struct mbuf *, uint32_t *);
1551592Srgrimes#ifdef NGE_FIXUP_RX
1561592Srgrimesstatic __inline void nge_fixup_rx (struct mbuf *);
1571592Srgrimes#endif
1581592Srgrimesstatic void nge_rxeof(struct nge_softc *);
1591592Srgrimesstatic void nge_txeof(struct nge_softc *);
1601592Srgrimesstatic void nge_intr(void *);
1611592Srgrimesstatic void nge_tick(void *);
1621592Srgrimesstatic void nge_start(struct ifnet *);
1631592Srgrimesstatic void nge_start_locked(struct ifnet *);
1641592Srgrimesstatic int nge_ioctl(struct ifnet *, u_long, caddr_t);
1651592Srgrimesstatic void nge_init(void *);
1661592Srgrimesstatic void nge_init_locked(struct nge_softc *);
1671592Srgrimesstatic void nge_stop(struct nge_softc *);
1681592Srgrimesstatic void nge_watchdog(struct ifnet *);
1691592Srgrimesstatic int nge_shutdown(device_t);
1701592Srgrimesstatic int nge_ifmedia_upd(struct ifnet *);
1711592Srgrimesstatic void nge_ifmedia_upd_locked(struct ifnet *);
1721592Srgrimesstatic void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
1731592Srgrimes
1741592Srgrimesstatic void nge_delay(struct nge_softc *);
1751592Srgrimesstatic void nge_eeprom_idle(struct nge_softc *);
1761592Srgrimesstatic void nge_eeprom_putbyte(struct nge_softc *, int);
1771592Srgrimesstatic void nge_eeprom_getword(struct nge_softc *, int, uint16_t *);
1781592Srgrimesstatic void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
1791592Srgrimes
1801592Srgrimesstatic void nge_mii_sync(struct nge_softc *);
1811592Srgrimesstatic void nge_mii_send(struct nge_softc *, uint32_t, int);
1821592Srgrimesstatic int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
1831592Srgrimesstatic int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
1841592Srgrimes
1851592Srgrimesstatic int nge_miibus_readreg(device_t, int, int);
1861592Srgrimesstatic int nge_miibus_writereg(device_t, int, int, int);
1871592Srgrimesstatic void nge_miibus_statchg(device_t);
1881592Srgrimes
1891592Srgrimesstatic void nge_setmulti(struct nge_softc *);
1901592Srgrimesstatic void nge_reset(struct nge_softc *);
1911592Srgrimesstatic int nge_list_rx_init(struct nge_softc *);
1921592Srgrimesstatic int nge_list_tx_init(struct nge_softc *);
1931592Srgrimes
1941592Srgrimes#ifdef NGE_USEIOSPACE
1951592Srgrimes#define NGE_RES			SYS_RES_IOPORT
1961592Srgrimes#define NGE_RID			NGE_PCI_LOIO
1971592Srgrimes#else
1981592Srgrimes#define NGE_RES			SYS_RES_MEMORY
1991592Srgrimes#define NGE_RID			NGE_PCI_LOMEM
2008870Srgrimes#endif
2011592Srgrimes
2021592Srgrimesstatic device_method_t nge_methods[] = {
2031592Srgrimes	/* Device interface */
2041592Srgrimes	DEVMETHOD(device_probe,		nge_probe),
2051592Srgrimes	DEVMETHOD(device_attach,	nge_attach),
2061592Srgrimes	DEVMETHOD(device_detach,	nge_detach),
2071592Srgrimes	DEVMETHOD(device_shutdown,	nge_shutdown),
2081592Srgrimes
2091592Srgrimes	/* bus interface */
2101592Srgrimes	DEVMETHOD(bus_print_child,	bus_generic_print_child),
2111592Srgrimes	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
2121592Srgrimes
2131592Srgrimes	/* MII interface */
2141592Srgrimes	DEVMETHOD(miibus_readreg,	nge_miibus_readreg),
2151592Srgrimes	DEVMETHOD(miibus_writereg,	nge_miibus_writereg),
2161592Srgrimes	DEVMETHOD(miibus_statchg,	nge_miibus_statchg),
2171592Srgrimes
2181592Srgrimes	{ 0, 0 }
2191592Srgrimes};
2201592Srgrimes
2211592Srgrimesstatic driver_t nge_driver = {
2221592Srgrimes	"nge",
2231592Srgrimes	nge_methods,
2241592Srgrimes	sizeof(struct nge_softc)
2251592Srgrimes};
2261592Srgrimes
2271592Srgrimesstatic devclass_t nge_devclass;
2281592Srgrimes
2291592SrgrimesDRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0);
2301592SrgrimesDRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0);
2311592Srgrimes
2321592Srgrimes#define NGE_SETBIT(sc, reg, x)				\
2331592Srgrimes	CSR_WRITE_4(sc, reg,				\
2341592Srgrimes		CSR_READ_4(sc, reg) | (x))
2351592Srgrimes
2361592Srgrimes#define NGE_CLRBIT(sc, reg, x)				\
2371592Srgrimes	CSR_WRITE_4(sc, reg,				\
2381592Srgrimes		CSR_READ_4(sc, reg) & ~(x))
2391592Srgrimes
2401592Srgrimes#define SIO_SET(x)					\
2411592Srgrimes	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
2421592Srgrimes
2431592Srgrimes#define SIO_CLR(x)					\
2441592Srgrimes	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
2451592Srgrimes
2461592Srgrimesstatic void
2471592Srgrimesnge_delay(struct nge_softc *sc)
2481592Srgrimes{
2491592Srgrimes	int idx;
25027077Ssteve
25190377Simp	for (idx = (300 / 33) + 1; idx > 0; idx--)
2521592Srgrimes		CSR_READ_4(sc, NGE_CSR);
25327077Ssteve}
25427079Ssteve
25527079Sstevestatic void
25627079Sstevenge_eeprom_idle(struct nge_softc *sc)
2571592Srgrimes{
25827079Ssteve	int i;
2591592Srgrimes
2601592Srgrimes	SIO_SET(NGE_MEAR_EE_CSEL);
2611592Srgrimes	nge_delay(sc);
2621592Srgrimes	SIO_SET(NGE_MEAR_EE_CLK);
2631592Srgrimes	nge_delay(sc);
2641592Srgrimes
2651592Srgrimes	for (i = 0; i < 25; i++) {
2661592Srgrimes		SIO_CLR(NGE_MEAR_EE_CLK);
2671592Srgrimes		nge_delay(sc);
2681592Srgrimes		SIO_SET(NGE_MEAR_EE_CLK);
2691592Srgrimes		nge_delay(sc);
2701592Srgrimes	}
2711592Srgrimes
2721592Srgrimes	SIO_CLR(NGE_MEAR_EE_CLK);
2731592Srgrimes	nge_delay(sc);
2741592Srgrimes	SIO_CLR(NGE_MEAR_EE_CSEL);
2751592Srgrimes	nge_delay(sc);
2761592Srgrimes	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
2771592Srgrimes}
2781592Srgrimes
2791592Srgrimes/*
2801592Srgrimes * Send a read command and address to the EEPROM, check for ACK.
2811592Srgrimes */
2821592Srgrimesstatic void
2831592Srgrimesnge_eeprom_putbyte(struct nge_softc *sc, int addr)
2841592Srgrimes{
2851592Srgrimes	int d, i;
2861592Srgrimes
2871592Srgrimes	d = addr | NGE_EECMD_READ;
2881592Srgrimes
2891592Srgrimes	/*
2901592Srgrimes	 * Feed in each bit and stobe the clock.
2911592Srgrimes	 */
2921592Srgrimes	for (i = 0x400; i; i >>= 1) {
2931592Srgrimes		if (d & i) {
2941592Srgrimes			SIO_SET(NGE_MEAR_EE_DIN);
2951592Srgrimes		} else {
2961592Srgrimes			SIO_CLR(NGE_MEAR_EE_DIN);
2971592Srgrimes		}
2981592Srgrimes		nge_delay(sc);
2991592Srgrimes		SIO_SET(NGE_MEAR_EE_CLK);
3001592Srgrimes		nge_delay(sc);
3011592Srgrimes		SIO_CLR(NGE_MEAR_EE_CLK);
3021592Srgrimes		nge_delay(sc);
3031592Srgrimes	}
3041592Srgrimes}
3051592Srgrimes
3061592Srgrimes/*
3071592Srgrimes * Read a word of data stored in the EEPROM at address 'addr.'
3081592Srgrimes */
3091592Srgrimesstatic void
3101592Srgrimesnge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest)
3111592Srgrimes{
3121592Srgrimes	int i;
3131592Srgrimes	uint16_t word = 0;
31490377Simp
3151592Srgrimes	/* Force EEPROM to idle state. */
3161592Srgrimes	nge_eeprom_idle(sc);
3171592Srgrimes
31827079Ssteve	/* Enter EEPROM access mode. */
31927079Ssteve	nge_delay(sc);
3201592Srgrimes	SIO_CLR(NGE_MEAR_EE_CLK);
3211592Srgrimes	nge_delay(sc);
3221592Srgrimes	SIO_SET(NGE_MEAR_EE_CSEL);
3231592Srgrimes	nge_delay(sc);
3241592Srgrimes
3251592Srgrimes	/*
3261592Srgrimes	 * Send address of word we want to read.
3271592Srgrimes	 */
3281592Srgrimes	nge_eeprom_putbyte(sc, addr);
3291592Srgrimes
3301592Srgrimes	/*
3311592Srgrimes	 * Start reading bits from EEPROM.
3321592Srgrimes	 */
3331592Srgrimes	for (i = 0x8000; i; i >>= 1) {
3341592Srgrimes		SIO_SET(NGE_MEAR_EE_CLK);
3351592Srgrimes		nge_delay(sc);
3361592Srgrimes		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
3371592Srgrimes			word |= i;
3381592Srgrimes		nge_delay(sc);
3391592Srgrimes		SIO_CLR(NGE_MEAR_EE_CLK);
3401592Srgrimes		nge_delay(sc);
3411592Srgrimes	}
3421592Srgrimes
3431592Srgrimes	/* Turn off EEPROM access mode. */
3441592Srgrimes	nge_eeprom_idle(sc);
3451592Srgrimes
3461592Srgrimes	*dest = word;
3471592Srgrimes}
3481592Srgrimes
3491592Srgrimes/*
3501592Srgrimes * Read a sequence of words from the EEPROM.
3511592Srgrimes */
3521592Srgrimesstatic void
3531592Srgrimesnge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt, int swap)
3541592Srgrimes{
3551592Srgrimes	int i;
3561592Srgrimes	uint16_t word = 0, *ptr;
3571592Srgrimes
3581592Srgrimes	for (i = 0; i < cnt; i++) {
3591592Srgrimes		nge_eeprom_getword(sc, off + i, &word);
3601592Srgrimes		ptr = (uint16_t *)(dest + (i * 2));
3611592Srgrimes		if (swap)
3621592Srgrimes			*ptr = ntohs(word);
363		else
364			*ptr = word;
365	}
366}
367
368/*
369 * Sync the PHYs by setting data bit and strobing the clock 32 times.
370 */
371static void
372nge_mii_sync(struct nge_softc *sc)
373{
374	int i;
375
376	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
377
378	for (i = 0; i < 32; i++) {
379		SIO_SET(NGE_MEAR_MII_CLK);
380		DELAY(1);
381		SIO_CLR(NGE_MEAR_MII_CLK);
382		DELAY(1);
383	}
384}
385
386/*
387 * Clock a series of bits through the MII.
388 */
389static void
390nge_mii_send(struct nge_softc *sc, uint32_t bits, int cnt)
391{
392	int i;
393
394	SIO_CLR(NGE_MEAR_MII_CLK);
395
396	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
397                if (bits & i) {
398			SIO_SET(NGE_MEAR_MII_DATA);
399                } else {
400			SIO_CLR(NGE_MEAR_MII_DATA);
401                }
402		DELAY(1);
403		SIO_CLR(NGE_MEAR_MII_CLK);
404		DELAY(1);
405		SIO_SET(NGE_MEAR_MII_CLK);
406	}
407}
408
409/*
410 * Read an PHY register through the MII.
411 */
412static int
413nge_mii_readreg(struct nge_softc *sc, struct nge_mii_frame *frame)
414{
415	int i, ack;
416
417	/*
418	 * Set up frame for RX.
419	 */
420	frame->mii_stdelim = NGE_MII_STARTDELIM;
421	frame->mii_opcode = NGE_MII_READOP;
422	frame->mii_turnaround = 0;
423	frame->mii_data = 0;
424
425	CSR_WRITE_4(sc, NGE_MEAR, 0);
426
427	/*
428 	 * Turn on data xmit.
429	 */
430	SIO_SET(NGE_MEAR_MII_DIR);
431
432	nge_mii_sync(sc);
433
434	/*
435	 * Send command/address info.
436	 */
437	nge_mii_send(sc, frame->mii_stdelim, 2);
438	nge_mii_send(sc, frame->mii_opcode, 2);
439	nge_mii_send(sc, frame->mii_phyaddr, 5);
440	nge_mii_send(sc, frame->mii_regaddr, 5);
441
442	/* Idle bit */
443	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
444	DELAY(1);
445	SIO_SET(NGE_MEAR_MII_CLK);
446	DELAY(1);
447
448	/* Turn off xmit. */
449	SIO_CLR(NGE_MEAR_MII_DIR);
450	/* Check for ack */
451	SIO_CLR(NGE_MEAR_MII_CLK);
452	DELAY(1);
453	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
454	SIO_SET(NGE_MEAR_MII_CLK);
455	DELAY(1);
456
457	/*
458	 * Now try reading data bits. If the ack failed, we still
459	 * need to clock through 16 cycles to keep the PHY(s) in sync.
460	 */
461	if (ack) {
462		for (i = 0; i < 16; i++) {
463			SIO_CLR(NGE_MEAR_MII_CLK);
464			DELAY(1);
465			SIO_SET(NGE_MEAR_MII_CLK);
466			DELAY(1);
467		}
468		goto fail;
469	}
470
471	for (i = 0x8000; i; i >>= 1) {
472		SIO_CLR(NGE_MEAR_MII_CLK);
473		DELAY(1);
474		if (!ack) {
475			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
476				frame->mii_data |= i;
477			DELAY(1);
478		}
479		SIO_SET(NGE_MEAR_MII_CLK);
480		DELAY(1);
481	}
482
483fail:
484
485	SIO_CLR(NGE_MEAR_MII_CLK);
486	DELAY(1);
487	SIO_SET(NGE_MEAR_MII_CLK);
488	DELAY(1);
489
490	if (ack)
491		return (1);
492	return (0);
493}
494
495/*
496 * Write to a PHY register through the MII.
497 */
498static int
499nge_mii_writereg(struct nge_softc *sc, struct nge_mii_frame *frame)
500{
501
502	/*
503	 * Set up frame for TX.
504	 */
505
506	frame->mii_stdelim = NGE_MII_STARTDELIM;
507	frame->mii_opcode = NGE_MII_WRITEOP;
508	frame->mii_turnaround = NGE_MII_TURNAROUND;
509
510	/*
511 	 * Turn on data output.
512	 */
513	SIO_SET(NGE_MEAR_MII_DIR);
514
515	nge_mii_sync(sc);
516
517	nge_mii_send(sc, frame->mii_stdelim, 2);
518	nge_mii_send(sc, frame->mii_opcode, 2);
519	nge_mii_send(sc, frame->mii_phyaddr, 5);
520	nge_mii_send(sc, frame->mii_regaddr, 5);
521	nge_mii_send(sc, frame->mii_turnaround, 2);
522	nge_mii_send(sc, frame->mii_data, 16);
523
524	/* Idle bit. */
525	SIO_SET(NGE_MEAR_MII_CLK);
526	DELAY(1);
527	SIO_CLR(NGE_MEAR_MII_CLK);
528	DELAY(1);
529
530	/*
531	 * Turn off xmit.
532	 */
533	SIO_CLR(NGE_MEAR_MII_DIR);
534
535	return (0);
536}
537
538static int
539nge_miibus_readreg(device_t dev, int phy, int reg)
540{
541	struct nge_softc *sc;
542	struct nge_mii_frame frame;
543
544	sc = device_get_softc(dev);
545
546	bzero((char *)&frame, sizeof(frame));
547
548	frame.mii_phyaddr = phy;
549	frame.mii_regaddr = reg;
550	nge_mii_readreg(sc, &frame);
551
552	return (frame.mii_data);
553}
554
555static int
556nge_miibus_writereg(device_t dev, int phy, int reg, int data)
557{
558	struct nge_softc *sc;
559	struct nge_mii_frame frame;
560
561	sc = device_get_softc(dev);
562
563	bzero((char *)&frame, sizeof(frame));
564
565	frame.mii_phyaddr = phy;
566	frame.mii_regaddr = reg;
567	frame.mii_data = data;
568	nge_mii_writereg(sc, &frame);
569
570	return (0);
571}
572
573static void
574nge_miibus_statchg(device_t dev)
575{
576	int status;
577	struct nge_softc *sc;
578	struct mii_data *mii;
579
580	sc = device_get_softc(dev);
581	if (sc->nge_tbi) {
582		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
583		    == IFM_AUTO) {
584			status = CSR_READ_4(sc, NGE_TBI_ANLPAR);
585			if (status == 0 || status & NGE_TBIANAR_FDX) {
586				NGE_SETBIT(sc, NGE_TX_CFG,
587				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
588				NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
589			} else {
590				NGE_CLRBIT(sc, NGE_TX_CFG,
591				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
592				NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
593			}
594
595		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
596			!= IFM_FDX) {
597			NGE_CLRBIT(sc, NGE_TX_CFG,
598			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
599			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
600		} else {
601			NGE_SETBIT(sc, NGE_TX_CFG,
602			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
603			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
604		}
605	} else {
606		mii = device_get_softc(sc->nge_miibus);
607
608		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
609		        NGE_SETBIT(sc, NGE_TX_CFG,
610			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
611			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
612		} else {
613			NGE_CLRBIT(sc, NGE_TX_CFG,
614			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
615			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
616		}
617
618		/* If we have a 1000Mbps link, set the mode_1000 bit. */
619		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
620		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
621			NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
622		} else {
623			NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
624		}
625	}
626}
627
628static void
629nge_setmulti(struct nge_softc *sc)
630{
631	struct ifnet *ifp;
632	struct ifmultiaddr *ifma;
633	uint32_t h = 0, i, filtsave;
634	int bit, index;
635
636	NGE_LOCK_ASSERT(sc);
637	ifp = sc->nge_ifp;
638
639	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
640		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
641		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
642		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
643		return;
644	}
645
646	/*
647	 * We have to explicitly enable the multicast hash table
648	 * on the NatSemi chip if we want to use it, which we do.
649	 * We also have to tell it that we don't want to use the
650	 * hash table for matching unicast addresses.
651	 */
652	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
653	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
654	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
655
656	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
657
658	/* first, zot all the existing hash bits */
659	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
660		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
661		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
662	}
663
664	/*
665	 * From the 11 bits returned by the crc routine, the top 7
666	 * bits represent the 16-bit word in the mcast hash table
667	 * that needs to be updated, and the lower 4 bits represent
668	 * which bit within that byte needs to be set.
669	 */
670	IF_ADDR_LOCK(ifp);
671	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
672		if (ifma->ifma_addr->sa_family != AF_LINK)
673			continue;
674		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
675		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 21;
676		index = (h >> 4) & 0x7F;
677		bit = h & 0xF;
678		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
679		    NGE_FILTADDR_MCAST_LO + (index * 2));
680		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
681	}
682	IF_ADDR_UNLOCK(ifp);
683
684	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
685}
686
687static void
688nge_reset(struct nge_softc *sc)
689{
690	int i;
691
692	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
693
694	for (i = 0; i < NGE_TIMEOUT; i++) {
695		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
696			break;
697	}
698
699	if (i == NGE_TIMEOUT)
700		device_printf(sc->nge_dev, "reset never completed\n");
701
702	/* Wait a little while for the chip to get its brains in order. */
703	DELAY(1000);
704
705	/*
706	 * If this is a NetSemi chip, make sure to clear
707	 * PME mode.
708	 */
709	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
710	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
711}
712
713/*
714 * Probe for a NatSemi chip. Check the PCI vendor and device
715 * IDs against our list and return a device name if we find a match.
716 */
717static int
718nge_probe(device_t dev)
719{
720	struct nge_type *t;
721
722	t = nge_devs;
723
724	while (t->nge_name != NULL) {
725		if ((pci_get_vendor(dev) == t->nge_vid) &&
726		    (pci_get_device(dev) == t->nge_did)) {
727			device_set_desc(dev, t->nge_name);
728			return (BUS_PROBE_DEFAULT);
729		}
730		t++;
731	}
732
733	return (ENXIO);
734}
735
736/*
737 * Attach the interface. Allocate softc structures, do ifmedia
738 * setup and ethernet/BPF attach.
739 */
740static int
741nge_attach(device_t dev)
742{
743	u_char eaddr[ETHER_ADDR_LEN];
744	struct nge_softc *sc;
745	struct ifnet *ifp = NULL;
746	int error = 0, rid;
747
748	sc = device_get_softc(dev);
749	sc->nge_dev = dev;
750
751	NGE_LOCK_INIT(sc, device_get_nameunit(dev));
752	callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0);
753
754	/*
755	 * Map control/status registers.
756	 */
757	pci_enable_busmaster(dev);
758
759	rid = NGE_RID;
760	sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE);
761
762	if (sc->nge_res == NULL) {
763		device_printf(dev, "couldn't map ports/memory\n");
764		error = ENXIO;
765		goto fail;
766	}
767
768	sc->nge_btag = rman_get_bustag(sc->nge_res);
769	sc->nge_bhandle = rman_get_bushandle(sc->nge_res);
770
771	/* Allocate interrupt */
772	rid = 0;
773	sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
774	    RF_SHAREABLE | RF_ACTIVE);
775
776	if (sc->nge_irq == NULL) {
777		device_printf(dev, "couldn't map interrupt\n");
778		error = ENXIO;
779		goto fail;
780	}
781
782	/* Reset the adapter. */
783	nge_reset(sc);
784
785	/*
786	 * Get station address from the EEPROM.
787	 */
788	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
789	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
790	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
791
792	sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF,
793	    M_NOWAIT|M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
794
795	if (sc->nge_ldata == NULL) {
796		device_printf(dev, "no memory for list buffers!\n");
797		error = ENXIO;
798		goto fail;
799	}
800
801	ifp = sc->nge_ifp = if_alloc(IFT_ETHER);
802	if (ifp == NULL) {
803		device_printf(dev, "can not if_alloc()\n");
804		error = ENOSPC;
805		goto fail;
806	}
807	ifp->if_softc = sc;
808	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
809	ifp->if_mtu = ETHERMTU;
810	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
811	ifp->if_ioctl = nge_ioctl;
812	ifp->if_start = nge_start;
813	ifp->if_watchdog = nge_watchdog;
814	ifp->if_init = nge_init;
815	ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1;
816	ifp->if_hwassist = NGE_CSUM_FEATURES;
817	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING;
818	ifp->if_capenable = ifp->if_capabilities;
819#ifdef DEVICE_POLLING
820	ifp->if_capabilities |= IFCAP_POLLING;
821#endif
822
823	/*
824	 * Do MII setup.
825	 */
826	/* XXX: leaked on error */
827	if (mii_phy_probe(dev, &sc->nge_miibus,
828			  nge_ifmedia_upd, nge_ifmedia_sts)) {
829		if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
830			sc->nge_tbi = 1;
831			device_printf(dev, "Using TBI\n");
832
833			sc->nge_miibus = dev;
834
835			ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd,
836				nge_ifmedia_sts);
837#define	ADD(m, c)	ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL)
838			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0);
839			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0);
840			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0);
841			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0);
842#undef ADD
843			device_printf(dev, " 1000baseSX, 1000baseSX-FDX, auto\n");
844
845			ifmedia_set(&sc->nge_ifmedia,
846				IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0));
847
848			CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
849				| NGE_GPIO_GP4_OUT
850				| NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
851				| NGE_GPIO_GP3_OUTENB
852				| NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
853
854		} else {
855			device_printf(dev, "MII without any PHY!\n");
856			error = ENXIO;
857			goto fail;
858		}
859	}
860
861	/*
862	 * Call MI attach routine.
863	 */
864	ether_ifattach(ifp, eaddr);
865
866	/*
867	 * Hookup IRQ last.
868	 */
869	error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE,
870	    NULL, nge_intr, sc, &sc->nge_intrhand);
871	if (error) {
872		device_printf(dev, "couldn't set up irq\n");
873		goto fail;
874	}
875
876	return (0);
877
878fail:
879	if (sc->nge_ldata)
880		contigfree(sc->nge_ldata,
881		  sizeof(struct nge_list_data), M_DEVBUF);
882	if (ifp)
883		if_free(ifp);
884	if (sc->nge_irq)
885		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
886	if (sc->nge_res)
887		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
888	NGE_LOCK_DESTROY(sc);
889	return (error);
890}
891
892static int
893nge_detach(device_t dev)
894{
895	struct nge_softc *sc;
896	struct ifnet *ifp;
897
898	sc = device_get_softc(dev);
899	ifp = sc->nge_ifp;
900
901#ifdef DEVICE_POLLING
902	if (ifp->if_capenable & IFCAP_POLLING)
903		ether_poll_deregister(ifp);
904#endif
905	NGE_LOCK(sc);
906	nge_reset(sc);
907	nge_stop(sc);
908	NGE_UNLOCK(sc);
909	callout_drain(&sc->nge_stat_ch);
910	ether_ifdetach(ifp);
911
912	bus_generic_detach(dev);
913	if (!sc->nge_tbi) {
914		device_delete_child(dev, sc->nge_miibus);
915	}
916	bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
917	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
918	bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
919
920	contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF);
921	if_free(ifp);
922
923	NGE_LOCK_DESTROY(sc);
924
925	return (0);
926}
927
928/*
929 * Initialize the transmit descriptors.
930 */
931static int
932nge_list_tx_init(struct nge_softc *sc)
933{
934	struct nge_list_data *ld;
935	struct nge_ring_data *cd;
936	int i;
937
938	cd = &sc->nge_cdata;
939	ld = sc->nge_ldata;
940
941	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
942		if (i == (NGE_TX_LIST_CNT - 1)) {
943			ld->nge_tx_list[i].nge_nextdesc =
944			    &ld->nge_tx_list[0];
945			ld->nge_tx_list[i].nge_next =
946			    vtophys(&ld->nge_tx_list[0]);
947		} else {
948			ld->nge_tx_list[i].nge_nextdesc =
949			    &ld->nge_tx_list[i + 1];
950			ld->nge_tx_list[i].nge_next =
951			    vtophys(&ld->nge_tx_list[i + 1]);
952		}
953		ld->nge_tx_list[i].nge_mbuf = NULL;
954		ld->nge_tx_list[i].nge_ptr = 0;
955		ld->nge_tx_list[i].nge_ctl = 0;
956	}
957
958	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
959
960	return (0);
961}
962
963
964/*
965 * Initialize the RX descriptors and allocate mbufs for them. Note that
966 * we arrange the descriptors in a closed ring, so that the last descriptor
967 * points back to the first.
968 */
969static int
970nge_list_rx_init(struct nge_softc *sc)
971{
972	struct nge_list_data *ld;
973	struct nge_ring_data *cd;
974	int i;
975
976	ld = sc->nge_ldata;
977	cd = &sc->nge_cdata;
978
979	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
980		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
981			return (ENOBUFS);
982		if (i == (NGE_RX_LIST_CNT - 1)) {
983			ld->nge_rx_list[i].nge_nextdesc =
984			    &ld->nge_rx_list[0];
985			ld->nge_rx_list[i].nge_next =
986			    vtophys(&ld->nge_rx_list[0]);
987		} else {
988			ld->nge_rx_list[i].nge_nextdesc =
989			    &ld->nge_rx_list[i + 1];
990			ld->nge_rx_list[i].nge_next =
991			    vtophys(&ld->nge_rx_list[i + 1]);
992		}
993	}
994
995	cd->nge_rx_prod = 0;
996	sc->nge_head = sc->nge_tail = NULL;
997
998	return (0);
999}
1000
1001/*
1002 * Initialize an RX descriptor and attach an MBUF cluster.
1003 */
1004static int
1005nge_newbuf(struct nge_softc *sc, struct nge_desc *c, struct mbuf *m)
1006{
1007
1008	if (m == NULL) {
1009		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1010		if (m == NULL)
1011			return (ENOBUFS);
1012	} else
1013		m->m_data = m->m_ext.ext_buf;
1014
1015	m->m_len = m->m_pkthdr.len = MCLBYTES;
1016
1017	m_adj(m, sizeof(uint64_t));
1018
1019	c->nge_mbuf = m;
1020	c->nge_ptr = vtophys(mtod(m, caddr_t));
1021	c->nge_ctl = m->m_len;
1022	c->nge_extsts = 0;
1023
1024	return (0);
1025}
1026
1027#ifdef NGE_FIXUP_RX
1028static __inline void
1029nge_fixup_rx(struct mbuf *m)
1030{
1031        int i;
1032        uint16_t *src, *dst;
1033
1034	src = mtod(m, uint16_t *);
1035	dst = src - 1;
1036
1037	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1038		*dst++ = *src++;
1039
1040	m->m_data -= ETHER_ALIGN;
1041
1042	return;
1043}
1044#endif
1045
1046/*
1047 * A frame has been uploaded: pass the resulting mbuf chain up to
1048 * the higher level protocols.
1049 */
1050static void
1051nge_rxeof(struct nge_softc *sc)
1052{
1053        struct mbuf *m;
1054        struct ifnet *ifp;
1055	struct nge_desc *cur_rx;
1056	int i, total_len = 0;
1057	uint32_t rxstat;
1058
1059	NGE_LOCK_ASSERT(sc);
1060	ifp = sc->nge_ifp;
1061	i = sc->nge_cdata.nge_rx_prod;
1062
1063	while (NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1064		uint32_t		extsts;
1065
1066#ifdef DEVICE_POLLING
1067		if (ifp->if_capenable & IFCAP_POLLING) {
1068			if (sc->rxcycles <= 0)
1069				break;
1070			sc->rxcycles--;
1071		}
1072#endif
1073
1074		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1075		rxstat = cur_rx->nge_rxstat;
1076		extsts = cur_rx->nge_extsts;
1077		m = cur_rx->nge_mbuf;
1078		cur_rx->nge_mbuf = NULL;
1079		total_len = NGE_RXBYTES(cur_rx);
1080		NGE_INC(i, NGE_RX_LIST_CNT);
1081
1082		if (rxstat & NGE_CMDSTS_MORE) {
1083			m->m_len = total_len;
1084			if (sc->nge_head == NULL) {
1085				m->m_pkthdr.len = total_len;
1086				sc->nge_head = sc->nge_tail = m;
1087			} else {
1088				m->m_flags &= ~M_PKTHDR;
1089				sc->nge_head->m_pkthdr.len += total_len;
1090				sc->nge_tail->m_next = m;
1091				sc->nge_tail = m;
1092			}
1093			nge_newbuf(sc, cur_rx, NULL);
1094			continue;
1095		}
1096
1097		/*
1098		 * If an error occurs, update stats, clear the
1099		 * status word and leave the mbuf cluster in place:
1100		 * it should simply get re-used next time this descriptor
1101	 	 * comes up in the ring.
1102		 */
1103		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1104			ifp->if_ierrors++;
1105			if (sc->nge_head != NULL) {
1106				m_freem(sc->nge_head);
1107				sc->nge_head = sc->nge_tail = NULL;
1108			}
1109			nge_newbuf(sc, cur_rx, m);
1110			continue;
1111		}
1112
1113		/* Try conjure up a replacement mbuf. */
1114
1115		if (nge_newbuf(sc, cur_rx, NULL)) {
1116			ifp->if_ierrors++;
1117			if (sc->nge_head != NULL) {
1118				m_freem(sc->nge_head);
1119				sc->nge_head = sc->nge_tail = NULL;
1120			}
1121			nge_newbuf(sc, cur_rx, m);
1122			continue;
1123		}
1124
1125		if (sc->nge_head != NULL) {
1126			m->m_len = total_len;
1127			m->m_flags &= ~M_PKTHDR;
1128			sc->nge_tail->m_next = m;
1129			m = sc->nge_head;
1130			m->m_pkthdr.len += total_len;
1131			sc->nge_head = sc->nge_tail = NULL;
1132		} else
1133			m->m_pkthdr.len = m->m_len = total_len;
1134
1135		/*
1136		 * Ok. NatSemi really screwed up here. This is the
1137		 * only gigE chip I know of with alignment constraints
1138		 * on receive buffers. RX buffers must be 64-bit aligned.
1139		 */
1140		/*
1141		 * By popular demand, ignore the alignment problems
1142		 * on the Intel x86 platform. The performance hit
1143		 * incurred due to unaligned accesses is much smaller
1144		 * than the hit produced by forcing buffer copies all
1145		 * the time, especially with jumbo frames. We still
1146		 * need to fix up the alignment everywhere else though.
1147		 */
1148#ifdef NGE_FIXUP_RX
1149		nge_fixup_rx(m);
1150#endif
1151
1152		ifp->if_ipackets++;
1153		m->m_pkthdr.rcvif = ifp;
1154
1155		/* Do IP checksum checking. */
1156		if (extsts & NGE_RXEXTSTS_IPPKT)
1157			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1158		if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1159			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1160		if ((extsts & NGE_RXEXTSTS_TCPPKT &&
1161		    !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
1162		    (extsts & NGE_RXEXTSTS_UDPPKT &&
1163		    !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
1164			m->m_pkthdr.csum_flags |=
1165			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1166			m->m_pkthdr.csum_data = 0xffff;
1167		}
1168
1169		/*
1170		 * If we received a packet with a vlan tag, pass it
1171		 * to vlan_input() instead of ether_input().
1172		 */
1173		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1174			m->m_pkthdr.ether_vtag =
1175			    ntohs(extsts & NGE_RXEXTSTS_VTCI);
1176			m->m_flags |= M_VLANTAG;
1177		}
1178		NGE_UNLOCK(sc);
1179		(*ifp->if_input)(ifp, m);
1180		NGE_LOCK(sc);
1181	}
1182
1183	sc->nge_cdata.nge_rx_prod = i;
1184}
1185
1186/*
1187 * A frame was downloaded to the chip. It's safe for us to clean up
1188 * the list buffers.
1189 */
1190
1191static void
1192nge_txeof(struct nge_softc *sc)
1193{
1194	struct nge_desc *cur_tx;
1195	struct ifnet *ifp;
1196	uint32_t idx;
1197
1198	NGE_LOCK_ASSERT(sc);
1199	ifp = sc->nge_ifp;
1200
1201	/*
1202	 * Go through our tx list and free mbufs for those
1203	 * frames that have been transmitted.
1204	 */
1205	idx = sc->nge_cdata.nge_tx_cons;
1206	while (idx != sc->nge_cdata.nge_tx_prod) {
1207		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1208
1209		if (NGE_OWNDESC(cur_tx))
1210			break;
1211
1212		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1213			sc->nge_cdata.nge_tx_cnt--;
1214			NGE_INC(idx, NGE_TX_LIST_CNT);
1215			continue;
1216		}
1217
1218		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1219			ifp->if_oerrors++;
1220			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1221				ifp->if_collisions++;
1222			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1223				ifp->if_collisions++;
1224		}
1225
1226		ifp->if_collisions +=
1227		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1228
1229		ifp->if_opackets++;
1230		if (cur_tx->nge_mbuf != NULL) {
1231			m_freem(cur_tx->nge_mbuf);
1232			cur_tx->nge_mbuf = NULL;
1233			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1234		}
1235
1236		sc->nge_cdata.nge_tx_cnt--;
1237		NGE_INC(idx, NGE_TX_LIST_CNT);
1238	}
1239
1240	sc->nge_cdata.nge_tx_cons = idx;
1241
1242	if (idx == sc->nge_cdata.nge_tx_prod)
1243		ifp->if_timer = 0;
1244}
1245
1246static void
1247nge_tick(void *xsc)
1248{
1249	struct nge_softc *sc;
1250	struct mii_data *mii;
1251	struct ifnet *ifp;
1252
1253	sc = xsc;
1254	NGE_LOCK_ASSERT(sc);
1255	ifp = sc->nge_ifp;
1256
1257	if (sc->nge_tbi) {
1258		if (!sc->nge_link) {
1259			if (CSR_READ_4(sc, NGE_TBI_BMSR)
1260			    & NGE_TBIBMSR_ANEG_DONE) {
1261				if (bootverbose)
1262					device_printf(sc->nge_dev,
1263					    "gigabit link up\n");
1264				nge_miibus_statchg(sc->nge_miibus);
1265				sc->nge_link++;
1266				if (ifp->if_snd.ifq_head != NULL)
1267					nge_start_locked(ifp);
1268			}
1269		}
1270	} else {
1271		mii = device_get_softc(sc->nge_miibus);
1272		mii_tick(mii);
1273
1274		if (!sc->nge_link) {
1275			if (mii->mii_media_status & IFM_ACTIVE &&
1276			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1277				sc->nge_link++;
1278				if (IFM_SUBTYPE(mii->mii_media_active)
1279				    == IFM_1000_T && bootverbose)
1280					device_printf(sc->nge_dev,
1281					    "gigabit link up\n");
1282				if (ifp->if_snd.ifq_head != NULL)
1283					nge_start_locked(ifp);
1284			}
1285		}
1286	}
1287	callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc);
1288}
1289
1290#ifdef DEVICE_POLLING
1291static poll_handler_t nge_poll;
1292
1293static void
1294nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1295{
1296	struct nge_softc *sc = ifp->if_softc;
1297
1298	NGE_LOCK(sc);
1299	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1300		NGE_UNLOCK(sc);
1301		return;
1302	}
1303
1304	/*
1305	 * On the nge, reading the status register also clears it.
1306	 * So before returning to intr mode we must make sure that all
1307	 * possible pending sources of interrupts have been served.
1308	 * In practice this means run to completion the *eof routines,
1309	 * and then call the interrupt routine
1310	 */
1311	sc->rxcycles = count;
1312	nge_rxeof(sc);
1313	nge_txeof(sc);
1314	if (ifp->if_snd.ifq_head != NULL)
1315		nge_start_locked(ifp);
1316
1317	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1318		uint32_t	status;
1319
1320		/* Reading the ISR register clears all interrupts. */
1321		status = CSR_READ_4(sc, NGE_ISR);
1322
1323		if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW))
1324			nge_rxeof(sc);
1325
1326		if (status & (NGE_ISR_RX_IDLE))
1327			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1328
1329		if (status & NGE_ISR_SYSERR) {
1330			nge_reset(sc);
1331			nge_init_locked(sc);
1332		}
1333	}
1334	NGE_UNLOCK(sc);
1335}
1336#endif /* DEVICE_POLLING */
1337
1338static void
1339nge_intr(void *arg)
1340{
1341	struct nge_softc *sc;
1342	struct ifnet *ifp;
1343	uint32_t status;
1344
1345	sc = arg;
1346	ifp = sc->nge_ifp;
1347
1348	NGE_LOCK(sc);
1349#ifdef DEVICE_POLLING
1350	if (ifp->if_capenable & IFCAP_POLLING) {
1351		NGE_UNLOCK(sc);
1352		return;
1353	}
1354#endif
1355
1356	/* Supress unwanted interrupts */
1357	if (!(ifp->if_flags & IFF_UP)) {
1358		nge_stop(sc);
1359		NGE_UNLOCK(sc);
1360		return;
1361	}
1362
1363	/* Disable interrupts. */
1364	CSR_WRITE_4(sc, NGE_IER, 0);
1365
1366	/* Data LED on for TBI mode */
1367	if (sc->nge_tbi)
1368		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1369			     | NGE_GPIO_GP3_OUT);
1370
1371	for (;;) {
1372		/* Reading the ISR register clears all interrupts. */
1373		status = CSR_READ_4(sc, NGE_ISR);
1374
1375		if ((status & NGE_INTRS) == 0)
1376			break;
1377
1378		if ((status & NGE_ISR_TX_DESC_OK) ||
1379		    (status & NGE_ISR_TX_ERR) ||
1380		    (status & NGE_ISR_TX_OK) ||
1381		    (status & NGE_ISR_TX_IDLE))
1382			nge_txeof(sc);
1383
1384		if ((status & NGE_ISR_RX_DESC_OK) ||
1385		    (status & NGE_ISR_RX_ERR) ||
1386		    (status & NGE_ISR_RX_OFLOW) ||
1387		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1388		    (status & NGE_ISR_RX_IDLE) ||
1389		    (status & NGE_ISR_RX_OK))
1390			nge_rxeof(sc);
1391
1392		if ((status & NGE_ISR_RX_IDLE))
1393			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1394
1395		if (status & NGE_ISR_SYSERR) {
1396			nge_reset(sc);
1397			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1398			nge_init_locked(sc);
1399		}
1400
1401#if 0
1402		/*
1403		 * XXX: nge_tick() is not ready to be called this way
1404		 * it screws up the aneg timeout because mii_tick() is
1405		 * only to be called once per second.
1406		 */
1407		if (status & NGE_IMR_PHY_INTR) {
1408			sc->nge_link = 0;
1409			nge_tick(sc);
1410		}
1411#endif
1412	}
1413
1414	/* Re-enable interrupts. */
1415	CSR_WRITE_4(sc, NGE_IER, 1);
1416
1417	if (ifp->if_snd.ifq_head != NULL)
1418		nge_start_locked(ifp);
1419
1420	/* Data LED off for TBI mode */
1421
1422	if (sc->nge_tbi)
1423		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1424			    & ~NGE_GPIO_GP3_OUT);
1425
1426	NGE_UNLOCK(sc);
1427}
1428
1429/*
1430 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1431 * pointers to the fragment pointers.
1432 */
1433static int
1434nge_encap(struct nge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1435{
1436	struct nge_desc *f = NULL;
1437	struct mbuf *m;
1438	int frag, cur, cnt = 0;
1439
1440	/*
1441 	 * Start packing the mbufs in this chain into
1442	 * the fragment pointers. Stop when we run out
1443 	 * of fragments or hit the end of the mbuf chain.
1444	 */
1445	m = m_head;
1446	cur = frag = *txidx;
1447
1448	for (m = m_head; m != NULL; m = m->m_next) {
1449		if (m->m_len != 0) {
1450			if ((NGE_TX_LIST_CNT -
1451			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1452				return (ENOBUFS);
1453			f = &sc->nge_ldata->nge_tx_list[frag];
1454			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1455			f->nge_ptr = vtophys(mtod(m, vm_offset_t));
1456			if (cnt != 0)
1457				f->nge_ctl |= NGE_CMDSTS_OWN;
1458			cur = frag;
1459			NGE_INC(frag, NGE_TX_LIST_CNT);
1460			cnt++;
1461		}
1462	}
1463
1464	if (m != NULL)
1465		return (ENOBUFS);
1466
1467	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1468	if (m_head->m_pkthdr.csum_flags) {
1469		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1470			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1471			    NGE_TXEXTSTS_IPCSUM;
1472		if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1473			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1474			    NGE_TXEXTSTS_TCPCSUM;
1475		if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1476			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1477			    NGE_TXEXTSTS_UDPCSUM;
1478	}
1479
1480	if (m_head->m_flags & M_VLANTAG) {
1481		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1482		    (NGE_TXEXTSTS_VLANPKT|htons(m_head->m_pkthdr.ether_vtag));
1483	}
1484
1485	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1486	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1487	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1488	sc->nge_cdata.nge_tx_cnt += cnt;
1489	*txidx = frag;
1490
1491	return (0);
1492}
1493
1494/*
1495 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1496 * to the mbuf data regions directly in the transmit lists. We also save a
1497 * copy of the pointers since the transmit list fragment pointers are
1498 * physical addresses.
1499 */
1500
1501static void
1502nge_start(struct ifnet *ifp)
1503{
1504	struct nge_softc *sc;
1505
1506	sc = ifp->if_softc;
1507	NGE_LOCK(sc);
1508	nge_start_locked(ifp);
1509	NGE_UNLOCK(sc);
1510}
1511
1512static void
1513nge_start_locked(struct ifnet *ifp)
1514{
1515	struct nge_softc *sc;
1516	struct mbuf *m_head = NULL;
1517	uint32_t idx;
1518
1519	sc = ifp->if_softc;
1520
1521	if (!sc->nge_link)
1522		return;
1523
1524	idx = sc->nge_cdata.nge_tx_prod;
1525
1526	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1527		return;
1528
1529	while (sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1530		IF_DEQUEUE(&ifp->if_snd, m_head);
1531		if (m_head == NULL)
1532			break;
1533
1534		if (nge_encap(sc, m_head, &idx)) {
1535			IF_PREPEND(&ifp->if_snd, m_head);
1536			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1537			break;
1538		}
1539
1540		/*
1541		 * If there's a BPF listener, bounce a copy of this frame
1542		 * to him.
1543		 */
1544		ETHER_BPF_MTAP(ifp, m_head);
1545
1546	}
1547
1548	/* Transmit */
1549	sc->nge_cdata.nge_tx_prod = idx;
1550	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1551
1552	/*
1553	 * Set a timeout in case the chip goes out to lunch.
1554	 */
1555	ifp->if_timer = 5;
1556}
1557
1558static void
1559nge_init(void *xsc)
1560{
1561	struct nge_softc *sc = xsc;
1562
1563	NGE_LOCK(sc);
1564	nge_init_locked(sc);
1565	NGE_UNLOCK(sc);
1566}
1567
1568static void
1569nge_init_locked(struct nge_softc *sc)
1570{
1571	struct ifnet *ifp = sc->nge_ifp;
1572	struct mii_data *mii;
1573
1574	NGE_LOCK_ASSERT(sc);
1575
1576	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1577		return;
1578
1579	/*
1580	 * Cancel pending I/O and free all RX/TX buffers.
1581	 */
1582	nge_stop(sc);
1583
1584	if (sc->nge_tbi) {
1585		mii = NULL;
1586	} else {
1587		mii = device_get_softc(sc->nge_miibus);
1588	}
1589
1590	/* Set MAC address */
1591	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1592	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1593	    ((uint16_t *)IF_LLADDR(sc->nge_ifp))[0]);
1594	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1595	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1596	    ((uint16_t *)IF_LLADDR(sc->nge_ifp))[1]);
1597	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1598	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1599	    ((uint16_t *)IF_LLADDR(sc->nge_ifp))[2]);
1600
1601	/* Init circular RX list. */
1602	if (nge_list_rx_init(sc) == ENOBUFS) {
1603		device_printf(sc->nge_dev, "initialization failed: no "
1604			"memory for rx buffers\n");
1605		nge_stop(sc);
1606		return;
1607	}
1608
1609	/*
1610	 * Init tx descriptors.
1611	 */
1612	nge_list_tx_init(sc);
1613
1614	/*
1615	 * For the NatSemi chip, we have to explicitly enable the
1616	 * reception of ARP frames, as well as turn on the 'perfect
1617	 * match' filter where we store the station address, otherwise
1618	 * we won't receive unicasts meant for this host.
1619	 */
1620	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1621	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1622
1623	 /* If we want promiscuous mode, set the allframes bit. */
1624	if (ifp->if_flags & IFF_PROMISC) {
1625		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1626	} else {
1627		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1628	}
1629
1630	/*
1631	 * Set the capture broadcast bit to capture broadcast frames.
1632	 */
1633	if (ifp->if_flags & IFF_BROADCAST) {
1634		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1635	} else {
1636		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1637	}
1638
1639	/*
1640	 * Load the multicast filter.
1641	 */
1642	nge_setmulti(sc);
1643
1644	/* Turn the receive filter on */
1645	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1646
1647	/*
1648	 * Load the address of the RX and TX lists.
1649	 */
1650	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1651	    vtophys(&sc->nge_ldata->nge_rx_list[0]));
1652	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1653	    vtophys(&sc->nge_ldata->nge_tx_list[0]));
1654
1655	/* Set RX configuration */
1656	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1657	/*
1658	 * Enable hardware checksum validation for all IPv4
1659	 * packets, do not reject packets with bad checksums.
1660	 */
1661	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1662
1663	/*
1664	 * Tell the chip to detect and strip VLAN tag info from
1665	 * received frames. The tag will be provided in the extsts
1666	 * field in the RX descriptors.
1667	 */
1668	NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1669	    NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB);
1670
1671	/* Set TX configuration */
1672	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1673
1674	/*
1675	 * Enable TX IPv4 checksumming on a per-packet basis.
1676	 */
1677	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
1678
1679	/*
1680	 * Tell the chip to insert VLAN tags on a per-packet basis as
1681	 * dictated by the code in the frame encapsulation routine.
1682	 */
1683	NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1684
1685	/* Set full/half duplex mode. */
1686	if (sc->nge_tbi) {
1687		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1688		    == IFM_FDX) {
1689			NGE_SETBIT(sc, NGE_TX_CFG,
1690			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1691			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1692		} else {
1693			NGE_CLRBIT(sc, NGE_TX_CFG,
1694			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1695			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1696		}
1697	} else {
1698		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1699			NGE_SETBIT(sc, NGE_TX_CFG,
1700			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1701			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1702		} else {
1703			NGE_CLRBIT(sc, NGE_TX_CFG,
1704			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1705			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1706		}
1707	}
1708
1709	nge_tick(sc);
1710
1711	/*
1712	 * Enable the delivery of PHY interrupts based on
1713	 * link/speed/duplex status changes. Also enable the
1714	 * extsts field in the DMA descriptors (needed for
1715	 * TCP/IP checksum offload on transmit).
1716	 */
1717	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|
1718	    NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1719
1720	/*
1721	 * Configure interrupt holdoff (moderation). We can
1722	 * have the chip delay interrupt delivery for a certain
1723	 * period. Units are in 100us, and the max setting
1724	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1725	 */
1726	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1727
1728	/*
1729	 * Enable interrupts.
1730	 */
1731	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1732#ifdef DEVICE_POLLING
1733	/*
1734	 * ... only enable interrupts if we are not polling, make sure
1735	 * they are off otherwise.
1736	 */
1737	if (ifp->if_capenable & IFCAP_POLLING)
1738		CSR_WRITE_4(sc, NGE_IER, 0);
1739	else
1740#endif
1741	CSR_WRITE_4(sc, NGE_IER, 1);
1742
1743	/* Enable receiver and transmitter. */
1744	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1745	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1746
1747	nge_ifmedia_upd_locked(ifp);
1748
1749	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1750	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1751}
1752
1753/*
1754 * Set media options.
1755 */
1756static int
1757nge_ifmedia_upd(struct ifnet *ifp)
1758{
1759	struct nge_softc *sc;
1760
1761	sc = ifp->if_softc;
1762	NGE_LOCK(sc);
1763	nge_ifmedia_upd_locked(ifp);
1764	NGE_UNLOCK(sc);
1765	return (0);
1766}
1767
1768static void
1769nge_ifmedia_upd_locked(struct ifnet *ifp)
1770{
1771	struct nge_softc *sc;
1772	struct mii_data *mii;
1773
1774	sc = ifp->if_softc;
1775	NGE_LOCK_ASSERT(sc);
1776
1777	if (sc->nge_tbi) {
1778		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1779		     == IFM_AUTO) {
1780			CSR_WRITE_4(sc, NGE_TBI_ANAR,
1781				CSR_READ_4(sc, NGE_TBI_ANAR)
1782					| NGE_TBIANAR_HDX | NGE_TBIANAR_FDX
1783					| NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2);
1784			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG
1785				| NGE_TBIBMCR_RESTART_ANEG);
1786			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG);
1787		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media
1788			    & IFM_GMASK) == IFM_FDX) {
1789			NGE_SETBIT(sc, NGE_TX_CFG,
1790			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1791			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1792
1793			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
1794			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1795		} else {
1796			NGE_CLRBIT(sc, NGE_TX_CFG,
1797			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1798			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1799
1800			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
1801			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1802		}
1803
1804		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1805			    & ~NGE_GPIO_GP3_OUT);
1806	} else {
1807		mii = device_get_softc(sc->nge_miibus);
1808		sc->nge_link = 0;
1809		if (mii->mii_instance) {
1810			struct mii_softc	*miisc;
1811
1812			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1813				mii_phy_reset(miisc);
1814		}
1815		mii_mediachg(mii);
1816	}
1817}
1818
1819/*
1820 * Report current media status.
1821 */
1822static void
1823nge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1824{
1825	struct nge_softc *sc;
1826	struct mii_data *mii;
1827
1828	sc = ifp->if_softc;
1829
1830	NGE_LOCK(sc);
1831	if (sc->nge_tbi) {
1832		ifmr->ifm_status = IFM_AVALID;
1833		ifmr->ifm_active = IFM_ETHER;
1834
1835		if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
1836			ifmr->ifm_status |= IFM_ACTIVE;
1837		}
1838		if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK)
1839			ifmr->ifm_active |= IFM_LOOP;
1840		if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
1841			ifmr->ifm_active |= IFM_NONE;
1842			ifmr->ifm_status = 0;
1843			NGE_UNLOCK(sc);
1844			return;
1845		}
1846		ifmr->ifm_active |= IFM_1000_SX;
1847		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1848		    == IFM_AUTO) {
1849			ifmr->ifm_active |= IFM_AUTO;
1850			if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
1851			    & NGE_TBIANAR_FDX) {
1852				ifmr->ifm_active |= IFM_FDX;
1853			}else if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
1854				  & NGE_TBIANAR_HDX) {
1855				ifmr->ifm_active |= IFM_HDX;
1856			}
1857		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1858			== IFM_FDX)
1859			ifmr->ifm_active |= IFM_FDX;
1860		else
1861			ifmr->ifm_active |= IFM_HDX;
1862
1863	} else {
1864		mii = device_get_softc(sc->nge_miibus);
1865		mii_pollstat(mii);
1866		ifmr->ifm_active = mii->mii_media_active;
1867		ifmr->ifm_status = mii->mii_media_status;
1868	}
1869	NGE_UNLOCK(sc);
1870}
1871
1872static int
1873nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1874{
1875	struct nge_softc *sc = ifp->if_softc;
1876	struct ifreq *ifr = (struct ifreq *) data;
1877	struct mii_data *mii;
1878	int error = 0;
1879
1880	switch (command) {
1881	case SIOCSIFMTU:
1882		if (ifr->ifr_mtu > NGE_JUMBO_MTU)
1883			error = EINVAL;
1884		else {
1885			NGE_LOCK(sc);
1886			ifp->if_mtu = ifr->ifr_mtu;
1887			/*
1888			 * Workaround: if the MTU is larger than
1889			 * 8152 (TX FIFO size minus 64 minus 18), turn off
1890			 * TX checksum offloading.
1891			 */
1892			if (ifr->ifr_mtu >= 8152) {
1893				ifp->if_capenable &= ~IFCAP_TXCSUM;
1894				ifp->if_hwassist = 0;
1895			} else {
1896				ifp->if_capenable |= IFCAP_TXCSUM;
1897				ifp->if_hwassist = NGE_CSUM_FEATURES;
1898			}
1899			NGE_UNLOCK(sc);
1900		}
1901		break;
1902	case SIOCSIFFLAGS:
1903		NGE_LOCK(sc);
1904		if (ifp->if_flags & IFF_UP) {
1905			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1906			    ifp->if_flags & IFF_PROMISC &&
1907			    !(sc->nge_if_flags & IFF_PROMISC)) {
1908				NGE_SETBIT(sc, NGE_RXFILT_CTL,
1909				    NGE_RXFILTCTL_ALLPHYS|
1910				    NGE_RXFILTCTL_ALLMULTI);
1911			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1912			    !(ifp->if_flags & IFF_PROMISC) &&
1913			    sc->nge_if_flags & IFF_PROMISC) {
1914				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
1915				    NGE_RXFILTCTL_ALLPHYS);
1916				if (!(ifp->if_flags & IFF_ALLMULTI))
1917					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
1918					    NGE_RXFILTCTL_ALLMULTI);
1919			} else {
1920				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1921				nge_init_locked(sc);
1922			}
1923		} else {
1924			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1925				nge_stop(sc);
1926		}
1927		sc->nge_if_flags = ifp->if_flags;
1928		NGE_UNLOCK(sc);
1929		error = 0;
1930		break;
1931	case SIOCADDMULTI:
1932	case SIOCDELMULTI:
1933		NGE_LOCK(sc);
1934		nge_setmulti(sc);
1935		NGE_UNLOCK(sc);
1936		error = 0;
1937		break;
1938	case SIOCGIFMEDIA:
1939	case SIOCSIFMEDIA:
1940		if (sc->nge_tbi) {
1941			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
1942					      command);
1943		} else {
1944			mii = device_get_softc(sc->nge_miibus);
1945			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
1946					      command);
1947		}
1948		break;
1949	case SIOCSIFCAP:
1950#ifdef DEVICE_POLLING
1951		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1952		    !(ifp->if_capenable & IFCAP_POLLING)) {
1953			error = ether_poll_register(nge_poll, ifp);
1954			if (error)
1955				return (error);
1956			NGE_LOCK(sc);
1957			/* Disable interrupts */
1958			CSR_WRITE_4(sc, NGE_IER, 0);
1959			ifp->if_capenable |= IFCAP_POLLING;
1960			NGE_UNLOCK(sc);
1961			return (error);
1962
1963		}
1964		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1965		    ifp->if_capenable & IFCAP_POLLING) {
1966			error = ether_poll_deregister(ifp);
1967			/* Enable interrupts. */
1968			NGE_LOCK(sc);
1969			CSR_WRITE_4(sc, NGE_IER, 1);
1970			ifp->if_capenable &= ~IFCAP_POLLING;
1971			NGE_UNLOCK(sc);
1972			return (error);
1973		}
1974#endif /* DEVICE_POLLING */
1975		break;
1976	default:
1977		error = ether_ioctl(ifp, command, data);
1978		break;
1979	}
1980
1981	return (error);
1982}
1983
1984static void
1985nge_watchdog(struct ifnet *ifp)
1986{
1987	struct nge_softc *sc;
1988
1989	sc = ifp->if_softc;
1990
1991	ifp->if_oerrors++;
1992	if_printf(ifp, "watchdog timeout\n");
1993
1994	NGE_LOCK(sc);
1995	nge_stop(sc);
1996	nge_reset(sc);
1997	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1998	nge_init_locked(sc);
1999
2000	if (ifp->if_snd.ifq_head != NULL)
2001		nge_start_locked(ifp);
2002
2003	NGE_UNLOCK(sc);
2004}
2005
2006/*
2007 * Stop the adapter and free any mbufs allocated to the
2008 * RX and TX lists.
2009 */
2010static void
2011nge_stop(struct nge_softc *sc)
2012{
2013	int i;
2014	struct ifnet *ifp;
2015	struct mii_data *mii;
2016
2017	NGE_LOCK_ASSERT(sc);
2018	ifp = sc->nge_ifp;
2019	ifp->if_timer = 0;
2020	if (sc->nge_tbi) {
2021		mii = NULL;
2022	} else {
2023		mii = device_get_softc(sc->nge_miibus);
2024	}
2025
2026	callout_stop(&sc->nge_stat_ch);
2027	CSR_WRITE_4(sc, NGE_IER, 0);
2028	CSR_WRITE_4(sc, NGE_IMR, 0);
2029	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2030	DELAY(1000);
2031	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2032	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2033
2034	if (!sc->nge_tbi)
2035		mii_down(mii);
2036
2037	sc->nge_link = 0;
2038
2039	/*
2040	 * Free data in the RX lists.
2041	 */
2042	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2043		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2044			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2045			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2046		}
2047	}
2048	bzero((char *)&sc->nge_ldata->nge_rx_list,
2049		sizeof(sc->nge_ldata->nge_rx_list));
2050
2051	/*
2052	 * Free the TX list buffers.
2053	 */
2054	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2055		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2056			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2057			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2058		}
2059	}
2060
2061	bzero((char *)&sc->nge_ldata->nge_tx_list,
2062		sizeof(sc->nge_ldata->nge_tx_list));
2063
2064	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2065}
2066
2067/*
2068 * Stop all chip I/O so that the kernel's probe routines don't
2069 * get confused by errant DMAs when rebooting.
2070 */
2071static int
2072nge_shutdown(device_t dev)
2073{
2074	struct nge_softc *sc;
2075
2076	sc = device_get_softc(dev);
2077
2078	NGE_LOCK(sc);
2079	nge_reset(sc);
2080	nge_stop(sc);
2081	NGE_UNLOCK(sc);
2082
2083	return (0);
2084}
2085