if_nge.c revision 151545
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001
4 *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 151545 2005-10-22 05:06:55Z imp $");
36
37/*
38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39 * for FreeBSD. Datasheets are available from:
40 *
41 * http://www.national.com/ds/DP/DP83820.pdf
42 * http://www.national.com/ds/DP/DP83821.pdf
43 *
44 * These chips are used on several low cost gigabit ethernet NICs
45 * sold by D-Link, Addtron, SMC and Asante. Both parts are
46 * virtually the same, except the 83820 is a 64-bit/32-bit part,
47 * while the 83821 is 32-bit only.
48 *
49 * Many cards also use National gigE transceivers, such as the
50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51 * contains a full register description that applies to all of these
52 * components:
53 *
54 * http://www.national.com/ds/DP/DP83861.pdf
55 *
56 * Written by Bill Paul <wpaul@bsdi.com>
57 * BSDi Open Source Solutions
58 */
59
60/*
61 * The NatSemi DP83820 and 83821 controllers are enhanced versions
62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67 * matching buffers, one perfect address filter buffer and interrupt
68 * moderation. The 83820 supports both 64-bit and 32-bit addressing
69 * and data transfers: the 64-bit support can be toggled on or off
70 * via software. This affects the size of certain fields in the DMA
71 * descriptors.
72 *
73 * There are two bugs/misfeatures in the 83820/83821 that I have
74 * discovered so far:
75 *
76 * - Receive buffers must be aligned on 64-bit boundaries, which means
77 *   you must resort to copying data in order to fix up the payload
78 *   alignment.
79 *
80 * - In order to transmit jumbo frames larger than 8170 bytes, you have
81 *   to turn off transmit checksum offloading, because the chip can't
82 *   compute the checksum on an outgoing frame unless it fits entirely
83 *   within the TX FIFO, which is only 8192 bytes in size. If you have
84 *   TX checksum offload enabled and you transmit attempt to transmit a
85 *   frame larger than 8170 bytes, the transmitter will wedge.
86 *
87 * To work around the latter problem, TX checksum offload is disabled
88 * if the user selects an MTU larger than 8152 (8170 - 18).
89 */
90
91#ifdef HAVE_KERNEL_OPTION_HEADERS
92#include "opt_device_polling.h"
93#endif
94
95#include <sys/param.h>
96#include <sys/systm.h>
97#include <sys/sockio.h>
98#include <sys/mbuf.h>
99#include <sys/malloc.h>
100#include <sys/module.h>
101#include <sys/kernel.h>
102#include <sys/socket.h>
103
104#include <net/if.h>
105#include <net/if_arp.h>
106#include <net/ethernet.h>
107#include <net/if_dl.h>
108#include <net/if_media.h>
109#include <net/if_types.h>
110#include <net/if_vlan_var.h>
111
112#include <net/bpf.h>
113
114#include <vm/vm.h>              /* for vtophys */
115#include <vm/pmap.h>            /* for vtophys */
116#include <machine/clock.h>      /* for DELAY */
117#include <machine/bus.h>
118#include <machine/resource.h>
119#include <sys/bus.h>
120#include <sys/rman.h>
121
122#include <dev/mii/mii.h>
123#include <dev/mii/miivar.h>
124
125#include <dev/pci/pcireg.h>
126#include <dev/pci/pcivar.h>
127
128#define NGE_USEIOSPACE
129
130#include <dev/nge/if_ngereg.h>
131
132MODULE_DEPEND(nge, pci, 1, 1, 1);
133MODULE_DEPEND(nge, ether, 1, 1, 1);
134MODULE_DEPEND(nge, miibus, 1, 1, 1);
135
136/* "device miibus" required.  See GENERIC if you get errors here. */
137#include "miibus_if.h"
138
139#define NGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
140
141/*
142 * Various supported device vendors/types and their names.
143 */
144static struct nge_type nge_devs[] = {
145	{ NGE_VENDORID, NGE_DEVICEID,
146	    "National Semiconductor Gigabit Ethernet" },
147	{ 0, 0, NULL }
148};
149
150static int nge_probe(device_t);
151static int nge_attach(device_t);
152static int nge_detach(device_t);
153
154static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *);
155static int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
156#ifdef NGE_FIXUP_RX
157static __inline void nge_fixup_rx (struct mbuf *);
158#endif
159static void nge_rxeof(struct nge_softc *);
160static void nge_txeof(struct nge_softc *);
161static void nge_intr(void *);
162static void nge_tick(void *);
163static void nge_start(struct ifnet *);
164static void nge_start_locked(struct ifnet *);
165static int nge_ioctl(struct ifnet *, u_long, caddr_t);
166static void nge_init(void *);
167static void nge_init_locked(struct nge_softc *);
168static void nge_stop(struct nge_softc *);
169static void nge_watchdog(struct ifnet *);
170static void nge_shutdown(device_t);
171static int nge_ifmedia_upd(struct ifnet *);
172static void nge_ifmedia_upd_locked(struct ifnet *);
173static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
174
175static void nge_delay(struct nge_softc *);
176static void nge_eeprom_idle(struct nge_softc *);
177static void nge_eeprom_putbyte(struct nge_softc *, int);
178static void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
179static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
180
181static void nge_mii_sync(struct nge_softc *);
182static void nge_mii_send(struct nge_softc *, u_int32_t, int);
183static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
184static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
185
186static int nge_miibus_readreg(device_t, int, int);
187static int nge_miibus_writereg(device_t, int, int, int);
188static void nge_miibus_statchg(device_t);
189
190static void nge_setmulti(struct nge_softc *);
191static void nge_reset(struct nge_softc *);
192static int nge_list_rx_init(struct nge_softc *);
193static int nge_list_tx_init(struct nge_softc *);
194
195#ifdef NGE_USEIOSPACE
196#define NGE_RES			SYS_RES_IOPORT
197#define NGE_RID			NGE_PCI_LOIO
198#else
199#define NGE_RES			SYS_RES_MEMORY
200#define NGE_RID			NGE_PCI_LOMEM
201#endif
202
203static device_method_t nge_methods[] = {
204	/* Device interface */
205	DEVMETHOD(device_probe,		nge_probe),
206	DEVMETHOD(device_attach,	nge_attach),
207	DEVMETHOD(device_detach,	nge_detach),
208	DEVMETHOD(device_shutdown,	nge_shutdown),
209
210	/* bus interface */
211	DEVMETHOD(bus_print_child,	bus_generic_print_child),
212	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
213
214	/* MII interface */
215	DEVMETHOD(miibus_readreg,	nge_miibus_readreg),
216	DEVMETHOD(miibus_writereg,	nge_miibus_writereg),
217	DEVMETHOD(miibus_statchg,	nge_miibus_statchg),
218
219	{ 0, 0 }
220};
221
222static driver_t nge_driver = {
223	"nge",
224	nge_methods,
225	sizeof(struct nge_softc)
226};
227
228static devclass_t nge_devclass;
229
230DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0);
231DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0);
232
233#define NGE_SETBIT(sc, reg, x)				\
234	CSR_WRITE_4(sc, reg,				\
235		CSR_READ_4(sc, reg) | (x))
236
237#define NGE_CLRBIT(sc, reg, x)				\
238	CSR_WRITE_4(sc, reg,				\
239		CSR_READ_4(sc, reg) & ~(x))
240
241#define SIO_SET(x)					\
242	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
243
244#define SIO_CLR(x)					\
245	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
246
247static void
248nge_delay(sc)
249	struct nge_softc	*sc;
250{
251	int			idx;
252
253	for (idx = (300 / 33) + 1; idx > 0; idx--)
254		CSR_READ_4(sc, NGE_CSR);
255
256	return;
257}
258
259static void
260nge_eeprom_idle(sc)
261	struct nge_softc	*sc;
262{
263	register int		i;
264
265	SIO_SET(NGE_MEAR_EE_CSEL);
266	nge_delay(sc);
267	SIO_SET(NGE_MEAR_EE_CLK);
268	nge_delay(sc);
269
270	for (i = 0; i < 25; i++) {
271		SIO_CLR(NGE_MEAR_EE_CLK);
272		nge_delay(sc);
273		SIO_SET(NGE_MEAR_EE_CLK);
274		nge_delay(sc);
275	}
276
277	SIO_CLR(NGE_MEAR_EE_CLK);
278	nge_delay(sc);
279	SIO_CLR(NGE_MEAR_EE_CSEL);
280	nge_delay(sc);
281	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
282
283	return;
284}
285
286/*
287 * Send a read command and address to the EEPROM, check for ACK.
288 */
289static void
290nge_eeprom_putbyte(sc, addr)
291	struct nge_softc	*sc;
292	int			addr;
293{
294	register int		d, i;
295
296	d = addr | NGE_EECMD_READ;
297
298	/*
299	 * Feed in each bit and stobe the clock.
300	 */
301	for (i = 0x400; i; i >>= 1) {
302		if (d & i) {
303			SIO_SET(NGE_MEAR_EE_DIN);
304		} else {
305			SIO_CLR(NGE_MEAR_EE_DIN);
306		}
307		nge_delay(sc);
308		SIO_SET(NGE_MEAR_EE_CLK);
309		nge_delay(sc);
310		SIO_CLR(NGE_MEAR_EE_CLK);
311		nge_delay(sc);
312	}
313
314	return;
315}
316
317/*
318 * Read a word of data stored in the EEPROM at address 'addr.'
319 */
320static void
321nge_eeprom_getword(sc, addr, dest)
322	struct nge_softc	*sc;
323	int			addr;
324	u_int16_t		*dest;
325{
326	register int		i;
327	u_int16_t		word = 0;
328
329	/* Force EEPROM to idle state. */
330	nge_eeprom_idle(sc);
331
332	/* Enter EEPROM access mode. */
333	nge_delay(sc);
334	SIO_CLR(NGE_MEAR_EE_CLK);
335	nge_delay(sc);
336	SIO_SET(NGE_MEAR_EE_CSEL);
337	nge_delay(sc);
338
339	/*
340	 * Send address of word we want to read.
341	 */
342	nge_eeprom_putbyte(sc, addr);
343
344	/*
345	 * Start reading bits from EEPROM.
346	 */
347	for (i = 0x8000; i; i >>= 1) {
348		SIO_SET(NGE_MEAR_EE_CLK);
349		nge_delay(sc);
350		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
351			word |= i;
352		nge_delay(sc);
353		SIO_CLR(NGE_MEAR_EE_CLK);
354		nge_delay(sc);
355	}
356
357	/* Turn off EEPROM access mode. */
358	nge_eeprom_idle(sc);
359
360	*dest = word;
361
362	return;
363}
364
365/*
366 * Read a sequence of words from the EEPROM.
367 */
368static void
369nge_read_eeprom(sc, dest, off, cnt, swap)
370	struct nge_softc	*sc;
371	caddr_t			dest;
372	int			off;
373	int			cnt;
374	int			swap;
375{
376	int			i;
377	u_int16_t		word = 0, *ptr;
378
379	for (i = 0; i < cnt; i++) {
380		nge_eeprom_getword(sc, off + i, &word);
381		ptr = (u_int16_t *)(dest + (i * 2));
382		if (swap)
383			*ptr = ntohs(word);
384		else
385			*ptr = word;
386	}
387
388	return;
389}
390
391/*
392 * Sync the PHYs by setting data bit and strobing the clock 32 times.
393 */
394static void
395nge_mii_sync(sc)
396	struct nge_softc		*sc;
397{
398	register int		i;
399
400	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
401
402	for (i = 0; i < 32; i++) {
403		SIO_SET(NGE_MEAR_MII_CLK);
404		DELAY(1);
405		SIO_CLR(NGE_MEAR_MII_CLK);
406		DELAY(1);
407	}
408
409	return;
410}
411
412/*
413 * Clock a series of bits through the MII.
414 */
415static void
416nge_mii_send(sc, bits, cnt)
417	struct nge_softc		*sc;
418	u_int32_t		bits;
419	int			cnt;
420{
421	int			i;
422
423	SIO_CLR(NGE_MEAR_MII_CLK);
424
425	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
426                if (bits & i) {
427			SIO_SET(NGE_MEAR_MII_DATA);
428                } else {
429			SIO_CLR(NGE_MEAR_MII_DATA);
430                }
431		DELAY(1);
432		SIO_CLR(NGE_MEAR_MII_CLK);
433		DELAY(1);
434		SIO_SET(NGE_MEAR_MII_CLK);
435	}
436}
437
438/*
439 * Read an PHY register through the MII.
440 */
441static int
442nge_mii_readreg(sc, frame)
443	struct nge_softc		*sc;
444	struct nge_mii_frame	*frame;
445
446{
447	int			i, ack;
448
449	/*
450	 * Set up frame for RX.
451	 */
452	frame->mii_stdelim = NGE_MII_STARTDELIM;
453	frame->mii_opcode = NGE_MII_READOP;
454	frame->mii_turnaround = 0;
455	frame->mii_data = 0;
456
457	CSR_WRITE_4(sc, NGE_MEAR, 0);
458
459	/*
460 	 * Turn on data xmit.
461	 */
462	SIO_SET(NGE_MEAR_MII_DIR);
463
464	nge_mii_sync(sc);
465
466	/*
467	 * Send command/address info.
468	 */
469	nge_mii_send(sc, frame->mii_stdelim, 2);
470	nge_mii_send(sc, frame->mii_opcode, 2);
471	nge_mii_send(sc, frame->mii_phyaddr, 5);
472	nge_mii_send(sc, frame->mii_regaddr, 5);
473
474	/* Idle bit */
475	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
476	DELAY(1);
477	SIO_SET(NGE_MEAR_MII_CLK);
478	DELAY(1);
479
480	/* Turn off xmit. */
481	SIO_CLR(NGE_MEAR_MII_DIR);
482	/* Check for ack */
483	SIO_CLR(NGE_MEAR_MII_CLK);
484	DELAY(1);
485	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
486	SIO_SET(NGE_MEAR_MII_CLK);
487	DELAY(1);
488
489	/*
490	 * Now try reading data bits. If the ack failed, we still
491	 * need to clock through 16 cycles to keep the PHY(s) in sync.
492	 */
493	if (ack) {
494		for(i = 0; i < 16; i++) {
495			SIO_CLR(NGE_MEAR_MII_CLK);
496			DELAY(1);
497			SIO_SET(NGE_MEAR_MII_CLK);
498			DELAY(1);
499		}
500		goto fail;
501	}
502
503	for (i = 0x8000; i; i >>= 1) {
504		SIO_CLR(NGE_MEAR_MII_CLK);
505		DELAY(1);
506		if (!ack) {
507			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
508				frame->mii_data |= i;
509			DELAY(1);
510		}
511		SIO_SET(NGE_MEAR_MII_CLK);
512		DELAY(1);
513	}
514
515fail:
516
517	SIO_CLR(NGE_MEAR_MII_CLK);
518	DELAY(1);
519	SIO_SET(NGE_MEAR_MII_CLK);
520	DELAY(1);
521
522	if (ack)
523		return(1);
524	return(0);
525}
526
527/*
528 * Write to a PHY register through the MII.
529 */
530static int
531nge_mii_writereg(sc, frame)
532	struct nge_softc		*sc;
533	struct nge_mii_frame	*frame;
534
535{
536
537	/*
538	 * Set up frame for TX.
539	 */
540
541	frame->mii_stdelim = NGE_MII_STARTDELIM;
542	frame->mii_opcode = NGE_MII_WRITEOP;
543	frame->mii_turnaround = NGE_MII_TURNAROUND;
544
545	/*
546 	 * Turn on data output.
547	 */
548	SIO_SET(NGE_MEAR_MII_DIR);
549
550	nge_mii_sync(sc);
551
552	nge_mii_send(sc, frame->mii_stdelim, 2);
553	nge_mii_send(sc, frame->mii_opcode, 2);
554	nge_mii_send(sc, frame->mii_phyaddr, 5);
555	nge_mii_send(sc, frame->mii_regaddr, 5);
556	nge_mii_send(sc, frame->mii_turnaround, 2);
557	nge_mii_send(sc, frame->mii_data, 16);
558
559	/* Idle bit. */
560	SIO_SET(NGE_MEAR_MII_CLK);
561	DELAY(1);
562	SIO_CLR(NGE_MEAR_MII_CLK);
563	DELAY(1);
564
565	/*
566	 * Turn off xmit.
567	 */
568	SIO_CLR(NGE_MEAR_MII_DIR);
569
570	return(0);
571}
572
573static int
574nge_miibus_readreg(dev, phy, reg)
575	device_t		dev;
576	int			phy, reg;
577{
578	struct nge_softc	*sc;
579	struct nge_mii_frame	frame;
580
581	sc = device_get_softc(dev);
582
583	bzero((char *)&frame, sizeof(frame));
584
585	frame.mii_phyaddr = phy;
586	frame.mii_regaddr = reg;
587	nge_mii_readreg(sc, &frame);
588
589	return(frame.mii_data);
590}
591
592static int
593nge_miibus_writereg(dev, phy, reg, data)
594	device_t		dev;
595	int			phy, reg, data;
596{
597	struct nge_softc	*sc;
598	struct nge_mii_frame	frame;
599
600	sc = device_get_softc(dev);
601
602	bzero((char *)&frame, sizeof(frame));
603
604	frame.mii_phyaddr = phy;
605	frame.mii_regaddr = reg;
606	frame.mii_data = data;
607	nge_mii_writereg(sc, &frame);
608
609	return(0);
610}
611
612static void
613nge_miibus_statchg(dev)
614	device_t		dev;
615{
616	int			status;
617	struct nge_softc	*sc;
618	struct mii_data		*mii;
619
620	sc = device_get_softc(dev);
621	if (sc->nge_tbi) {
622		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
623		    == IFM_AUTO) {
624			status = CSR_READ_4(sc, NGE_TBI_ANLPAR);
625			if (status == 0 || status & NGE_TBIANAR_FDX) {
626				NGE_SETBIT(sc, NGE_TX_CFG,
627				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
628				NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
629			} else {
630				NGE_CLRBIT(sc, NGE_TX_CFG,
631				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
632				NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
633			}
634
635		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
636			!= IFM_FDX) {
637			NGE_CLRBIT(sc, NGE_TX_CFG,
638			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
639			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
640		} else {
641			NGE_SETBIT(sc, NGE_TX_CFG,
642			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
643			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
644		}
645	} else {
646		mii = device_get_softc(sc->nge_miibus);
647
648		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
649		        NGE_SETBIT(sc, NGE_TX_CFG,
650			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
651			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
652		} else {
653			NGE_CLRBIT(sc, NGE_TX_CFG,
654			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
655			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
656		}
657
658		/* If we have a 1000Mbps link, set the mode_1000 bit. */
659		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
660		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
661			NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
662		} else {
663			NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
664		}
665	}
666	return;
667}
668
669static void
670nge_setmulti(sc)
671	struct nge_softc	*sc;
672{
673	struct ifnet		*ifp;
674	struct ifmultiaddr	*ifma;
675	u_int32_t		h = 0, i, filtsave;
676	int			bit, index;
677
678	NGE_LOCK_ASSERT(sc);
679	ifp = sc->nge_ifp;
680
681	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
682		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
683		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
684		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
685		return;
686	}
687
688	/*
689	 * We have to explicitly enable the multicast hash table
690	 * on the NatSemi chip if we want to use it, which we do.
691	 * We also have to tell it that we don't want to use the
692	 * hash table for matching unicast addresses.
693	 */
694	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
695	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
696	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
697
698	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
699
700	/* first, zot all the existing hash bits */
701	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
702		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
703		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
704	}
705
706	/*
707	 * From the 11 bits returned by the crc routine, the top 7
708	 * bits represent the 16-bit word in the mcast hash table
709	 * that needs to be updated, and the lower 4 bits represent
710	 * which bit within that byte needs to be set.
711	 */
712	IF_ADDR_LOCK(ifp);
713	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
714		if (ifma->ifma_addr->sa_family != AF_LINK)
715			continue;
716		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
717		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 21;
718		index = (h >> 4) & 0x7F;
719		bit = h & 0xF;
720		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
721		    NGE_FILTADDR_MCAST_LO + (index * 2));
722		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
723	}
724	IF_ADDR_UNLOCK(ifp);
725
726	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
727
728	return;
729}
730
731static void
732nge_reset(sc)
733	struct nge_softc	*sc;
734{
735	register int		i;
736
737	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
738
739	for (i = 0; i < NGE_TIMEOUT; i++) {
740		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
741			break;
742	}
743
744	if (i == NGE_TIMEOUT)
745		if_printf(sc->nge_ifp, "reset never completed\n");
746
747	/* Wait a little while for the chip to get its brains in order. */
748	DELAY(1000);
749
750	/*
751	 * If this is a NetSemi chip, make sure to clear
752	 * PME mode.
753	 */
754	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
755	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
756
757        return;
758}
759
760/*
761 * Probe for a NatSemi chip. Check the PCI vendor and device
762 * IDs against our list and return a device name if we find a match.
763 */
764static int
765nge_probe(dev)
766	device_t		dev;
767{
768	struct nge_type		*t;
769
770	t = nge_devs;
771
772	while(t->nge_name != NULL) {
773		if ((pci_get_vendor(dev) == t->nge_vid) &&
774		    (pci_get_device(dev) == t->nge_did)) {
775			device_set_desc(dev, t->nge_name);
776			return(BUS_PROBE_DEFAULT);
777		}
778		t++;
779	}
780
781	return(ENXIO);
782}
783
784/*
785 * Attach the interface. Allocate softc structures, do ifmedia
786 * setup and ethernet/BPF attach.
787 */
788static int
789nge_attach(dev)
790	device_t		dev;
791{
792	u_char			eaddr[ETHER_ADDR_LEN];
793	struct nge_softc	*sc;
794	struct ifnet		*ifp = NULL;
795	int			error = 0, rid;
796
797	sc = device_get_softc(dev);
798
799	NGE_LOCK_INIT(sc, device_get_nameunit(dev));
800	callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0);
801
802	/*
803	 * Map control/status registers.
804	 */
805	pci_enable_busmaster(dev);
806
807	rid = NGE_RID;
808	sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE);
809
810	if (sc->nge_res == NULL) {
811		device_printf(dev, "couldn't map ports/memory\n");
812		error = ENXIO;
813		goto fail;
814	}
815
816	sc->nge_btag = rman_get_bustag(sc->nge_res);
817	sc->nge_bhandle = rman_get_bushandle(sc->nge_res);
818
819	/* Allocate interrupt */
820	rid = 0;
821	sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
822	    RF_SHAREABLE | RF_ACTIVE);
823
824	if (sc->nge_irq == NULL) {
825		device_printf(dev, "couldn't map interrupt\n");
826		error = ENXIO;
827		goto fail;
828	}
829
830	/* Reset the adapter. */
831	nge_reset(sc);
832
833	/*
834	 * Get station address from the EEPROM.
835	 */
836	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
837	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
838	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
839
840	sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF,
841	    M_NOWAIT|M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
842
843	if (sc->nge_ldata == NULL) {
844		device_printf(dev, "no memory for list buffers!\n");
845		error = ENXIO;
846		goto fail;
847	}
848
849	ifp = sc->nge_ifp = if_alloc(IFT_ETHER);
850	if (ifp == NULL) {
851		device_printf(dev, "can not if_alloc()\n");
852		error = ENOSPC;
853		goto fail;
854	}
855	ifp->if_softc = sc;
856	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
857	ifp->if_mtu = ETHERMTU;
858	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
859	ifp->if_ioctl = nge_ioctl;
860	ifp->if_start = nge_start;
861	ifp->if_watchdog = nge_watchdog;
862	ifp->if_init = nge_init;
863	ifp->if_baudrate = 1000000000;
864	ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1;
865	ifp->if_hwassist = NGE_CSUM_FEATURES;
866	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING;
867	ifp->if_capenable = ifp->if_capabilities;
868#ifdef DEVICE_POLLING
869	ifp->if_capabilities |= IFCAP_POLLING;
870#endif
871
872	/*
873	 * Do MII setup.
874	 */
875	/* XXX: leaked on error */
876	if (mii_phy_probe(dev, &sc->nge_miibus,
877			  nge_ifmedia_upd, nge_ifmedia_sts)) {
878		if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
879			sc->nge_tbi = 1;
880			device_printf(dev, "Using TBI\n");
881
882			sc->nge_miibus = dev;
883
884			ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd,
885				nge_ifmedia_sts);
886#define	ADD(m, c)	ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL)
887			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0);
888			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0);
889			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0);
890			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0);
891#undef ADD
892			device_printf(dev, " 1000baseSX, 1000baseSX-FDX, auto\n");
893
894			ifmedia_set(&sc->nge_ifmedia,
895				IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0));
896
897			CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
898				| NGE_GPIO_GP4_OUT
899				| NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
900				| NGE_GPIO_GP3_OUTENB
901				| NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
902
903		} else {
904			device_printf(dev, "MII without any PHY!\n");
905			error = ENXIO;
906			goto fail;
907		}
908	}
909
910	/*
911	 * Call MI attach routine.
912	 */
913	ether_ifattach(ifp, eaddr);
914
915	/*
916	 * Hookup IRQ last.
917	 */
918	error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE,
919	    nge_intr, sc, &sc->nge_intrhand);
920	if (error) {
921		device_printf(dev, "couldn't set up irq\n");
922		goto fail;
923	}
924
925	return (0);
926
927fail:
928	if (sc->nge_ldata)
929		contigfree(sc->nge_ldata,
930		  sizeof(struct nge_list_data), M_DEVBUF);
931	if (ifp)
932		if_free(ifp);
933	if (sc->nge_irq)
934		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
935	if (sc->nge_res)
936		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
937	NGE_LOCK_DESTROY(sc);
938	return(error);
939}
940
941static int
942nge_detach(dev)
943	device_t		dev;
944{
945	struct nge_softc	*sc;
946	struct ifnet		*ifp;
947
948	sc = device_get_softc(dev);
949	ifp = sc->nge_ifp;
950
951#ifdef DEVICE_POLLING
952	if (ifp->if_capenable & IFCAP_POLLING)
953		ether_poll_deregister(ifp);
954#endif
955	NGE_LOCK(sc);
956	nge_reset(sc);
957	nge_stop(sc);
958	NGE_UNLOCK(sc);
959	callout_drain(&sc->nge_stat_ch);
960	ether_ifdetach(ifp);
961
962	bus_generic_detach(dev);
963	if (!sc->nge_tbi) {
964		device_delete_child(dev, sc->nge_miibus);
965	}
966	bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
967	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
968	bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
969
970	contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF);
971	if_free(ifp);
972
973	NGE_LOCK_DESTROY(sc);
974
975	return(0);
976}
977
978/*
979 * Initialize the transmit descriptors.
980 */
981static int
982nge_list_tx_init(sc)
983	struct nge_softc	*sc;
984{
985	struct nge_list_data	*ld;
986	struct nge_ring_data	*cd;
987	int			i;
988
989	cd = &sc->nge_cdata;
990	ld = sc->nge_ldata;
991
992	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
993		if (i == (NGE_TX_LIST_CNT - 1)) {
994			ld->nge_tx_list[i].nge_nextdesc =
995			    &ld->nge_tx_list[0];
996			ld->nge_tx_list[i].nge_next =
997			    vtophys(&ld->nge_tx_list[0]);
998		} else {
999			ld->nge_tx_list[i].nge_nextdesc =
1000			    &ld->nge_tx_list[i + 1];
1001			ld->nge_tx_list[i].nge_next =
1002			    vtophys(&ld->nge_tx_list[i + 1]);
1003		}
1004		ld->nge_tx_list[i].nge_mbuf = NULL;
1005		ld->nge_tx_list[i].nge_ptr = 0;
1006		ld->nge_tx_list[i].nge_ctl = 0;
1007	}
1008
1009	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1010
1011	return(0);
1012}
1013
1014
1015/*
1016 * Initialize the RX descriptors and allocate mbufs for them. Note that
1017 * we arrange the descriptors in a closed ring, so that the last descriptor
1018 * points back to the first.
1019 */
1020static int
1021nge_list_rx_init(sc)
1022	struct nge_softc	*sc;
1023{
1024	struct nge_list_data	*ld;
1025	struct nge_ring_data	*cd;
1026	int			i;
1027
1028	ld = sc->nge_ldata;
1029	cd = &sc->nge_cdata;
1030
1031	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1032		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1033			return(ENOBUFS);
1034		if (i == (NGE_RX_LIST_CNT - 1)) {
1035			ld->nge_rx_list[i].nge_nextdesc =
1036			    &ld->nge_rx_list[0];
1037			ld->nge_rx_list[i].nge_next =
1038			    vtophys(&ld->nge_rx_list[0]);
1039		} else {
1040			ld->nge_rx_list[i].nge_nextdesc =
1041			    &ld->nge_rx_list[i + 1];
1042			ld->nge_rx_list[i].nge_next =
1043			    vtophys(&ld->nge_rx_list[i + 1]);
1044		}
1045	}
1046
1047	cd->nge_rx_prod = 0;
1048	sc->nge_head = sc->nge_tail = NULL;
1049
1050	return(0);
1051}
1052
1053/*
1054 * Initialize an RX descriptor and attach an MBUF cluster.
1055 */
1056static int
1057nge_newbuf(sc, c, m)
1058	struct nge_softc	*sc;
1059	struct nge_desc		*c;
1060	struct mbuf		*m;
1061{
1062
1063	if (m == NULL) {
1064		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1065		if (m == NULL)
1066			return (ENOBUFS);
1067	} else
1068		m->m_data = m->m_ext.ext_buf;
1069
1070	m->m_len = m->m_pkthdr.len = MCLBYTES;
1071
1072	m_adj(m, sizeof(u_int64_t));
1073
1074	c->nge_mbuf = m;
1075	c->nge_ptr = vtophys(mtod(m, caddr_t));
1076	c->nge_ctl = m->m_len;
1077	c->nge_extsts = 0;
1078
1079	return(0);
1080}
1081
1082#ifdef NGE_FIXUP_RX
1083static __inline void
1084nge_fixup_rx(m)
1085	struct mbuf		*m;
1086{
1087        int			i;
1088        uint16_t		*src, *dst;
1089
1090	src = mtod(m, uint16_t *);
1091	dst = src - 1;
1092
1093	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1094		*dst++ = *src++;
1095
1096	m->m_data -= ETHER_ALIGN;
1097
1098	return;
1099}
1100#endif
1101
1102/*
1103 * A frame has been uploaded: pass the resulting mbuf chain up to
1104 * the higher level protocols.
1105 */
1106static void
1107nge_rxeof(sc)
1108	struct nge_softc	*sc;
1109{
1110        struct mbuf		*m;
1111        struct ifnet		*ifp;
1112	struct nge_desc		*cur_rx;
1113	int			i, total_len = 0;
1114	u_int32_t		rxstat;
1115
1116	NGE_LOCK_ASSERT(sc);
1117	ifp = sc->nge_ifp;
1118	i = sc->nge_cdata.nge_rx_prod;
1119
1120	while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1121		u_int32_t		extsts;
1122
1123#ifdef DEVICE_POLLING
1124		if (ifp->if_capenable & IFCAP_POLLING) {
1125			if (sc->rxcycles <= 0)
1126				break;
1127			sc->rxcycles--;
1128		}
1129#endif
1130
1131		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1132		rxstat = cur_rx->nge_rxstat;
1133		extsts = cur_rx->nge_extsts;
1134		m = cur_rx->nge_mbuf;
1135		cur_rx->nge_mbuf = NULL;
1136		total_len = NGE_RXBYTES(cur_rx);
1137		NGE_INC(i, NGE_RX_LIST_CNT);
1138
1139		if (rxstat & NGE_CMDSTS_MORE) {
1140			m->m_len = total_len;
1141			if (sc->nge_head == NULL) {
1142				m->m_pkthdr.len = total_len;
1143				sc->nge_head = sc->nge_tail = m;
1144			} else {
1145				m->m_flags &= ~M_PKTHDR;
1146				sc->nge_head->m_pkthdr.len += total_len;
1147				sc->nge_tail->m_next = m;
1148				sc->nge_tail = m;
1149			}
1150			nge_newbuf(sc, cur_rx, NULL);
1151			continue;
1152		}
1153
1154		/*
1155		 * If an error occurs, update stats, clear the
1156		 * status word and leave the mbuf cluster in place:
1157		 * it should simply get re-used next time this descriptor
1158	 	 * comes up in the ring.
1159		 */
1160		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1161			ifp->if_ierrors++;
1162			if (sc->nge_head != NULL) {
1163				m_freem(sc->nge_head);
1164				sc->nge_head = sc->nge_tail = NULL;
1165			}
1166			nge_newbuf(sc, cur_rx, m);
1167			continue;
1168		}
1169
1170		/* Try conjure up a replacement mbuf. */
1171
1172		if (nge_newbuf(sc, cur_rx, NULL)) {
1173			ifp->if_ierrors++;
1174			if (sc->nge_head != NULL) {
1175				m_freem(sc->nge_head);
1176				sc->nge_head = sc->nge_tail = NULL;
1177			}
1178			nge_newbuf(sc, cur_rx, m);
1179			continue;
1180		}
1181
1182		if (sc->nge_head != NULL) {
1183			m->m_len = total_len;
1184			m->m_flags &= ~M_PKTHDR;
1185			sc->nge_tail->m_next = m;
1186			m = sc->nge_head;
1187			m->m_pkthdr.len += total_len;
1188			sc->nge_head = sc->nge_tail = NULL;
1189		} else
1190			m->m_pkthdr.len = m->m_len = total_len;
1191
1192		/*
1193		 * Ok. NatSemi really screwed up here. This is the
1194		 * only gigE chip I know of with alignment constraints
1195		 * on receive buffers. RX buffers must be 64-bit aligned.
1196		 */
1197		/*
1198		 * By popular demand, ignore the alignment problems
1199		 * on the Intel x86 platform. The performance hit
1200		 * incurred due to unaligned accesses is much smaller
1201		 * than the hit produced by forcing buffer copies all
1202		 * the time, especially with jumbo frames. We still
1203		 * need to fix up the alignment everywhere else though.
1204		 */
1205#ifdef NGE_FIXUP_RX
1206		nge_fixup_rx(m);
1207#endif
1208
1209		ifp->if_ipackets++;
1210		m->m_pkthdr.rcvif = ifp;
1211
1212		/* Do IP checksum checking. */
1213		if (extsts & NGE_RXEXTSTS_IPPKT)
1214			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1215		if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1216			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1217		if ((extsts & NGE_RXEXTSTS_TCPPKT &&
1218		    !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
1219		    (extsts & NGE_RXEXTSTS_UDPPKT &&
1220		    !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
1221			m->m_pkthdr.csum_flags |=
1222			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1223			m->m_pkthdr.csum_data = 0xffff;
1224		}
1225
1226		/*
1227		 * If we received a packet with a vlan tag, pass it
1228		 * to vlan_input() instead of ether_input().
1229		 */
1230		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1231			VLAN_INPUT_TAG(ifp, m,
1232			    ntohs(extsts & NGE_RXEXTSTS_VTCI), continue);
1233		}
1234		NGE_UNLOCK(sc);
1235		(*ifp->if_input)(ifp, m);
1236		NGE_LOCK(sc);
1237	}
1238
1239	sc->nge_cdata.nge_rx_prod = i;
1240
1241	return;
1242}
1243
1244/*
1245 * A frame was downloaded to the chip. It's safe for us to clean up
1246 * the list buffers.
1247 */
1248
1249static void
1250nge_txeof(sc)
1251	struct nge_softc	*sc;
1252{
1253	struct nge_desc		*cur_tx;
1254	struct ifnet		*ifp;
1255	u_int32_t		idx;
1256
1257	NGE_LOCK_ASSERT(sc);
1258	ifp = sc->nge_ifp;
1259
1260	/*
1261	 * Go through our tx list and free mbufs for those
1262	 * frames that have been transmitted.
1263	 */
1264	idx = sc->nge_cdata.nge_tx_cons;
1265	while (idx != sc->nge_cdata.nge_tx_prod) {
1266		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1267
1268		if (NGE_OWNDESC(cur_tx))
1269			break;
1270
1271		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1272			sc->nge_cdata.nge_tx_cnt--;
1273			NGE_INC(idx, NGE_TX_LIST_CNT);
1274			continue;
1275		}
1276
1277		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1278			ifp->if_oerrors++;
1279			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1280				ifp->if_collisions++;
1281			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1282				ifp->if_collisions++;
1283		}
1284
1285		ifp->if_collisions +=
1286		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1287
1288		ifp->if_opackets++;
1289		if (cur_tx->nge_mbuf != NULL) {
1290			m_freem(cur_tx->nge_mbuf);
1291			cur_tx->nge_mbuf = NULL;
1292			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1293		}
1294
1295		sc->nge_cdata.nge_tx_cnt--;
1296		NGE_INC(idx, NGE_TX_LIST_CNT);
1297	}
1298
1299	sc->nge_cdata.nge_tx_cons = idx;
1300
1301	if (idx == sc->nge_cdata.nge_tx_prod)
1302		ifp->if_timer = 0;
1303
1304	return;
1305}
1306
1307static void
1308nge_tick(xsc)
1309	void			*xsc;
1310{
1311	struct nge_softc	*sc;
1312	struct mii_data		*mii;
1313	struct ifnet		*ifp;
1314
1315	sc = xsc;
1316	NGE_LOCK_ASSERT(sc);
1317	ifp = sc->nge_ifp;
1318
1319	if (sc->nge_tbi) {
1320		if (!sc->nge_link) {
1321			if (CSR_READ_4(sc, NGE_TBI_BMSR)
1322			    & NGE_TBIBMSR_ANEG_DONE) {
1323				if (bootverbose)
1324					if_printf(sc->nge_ifp,
1325					    "gigabit link up\n");
1326				nge_miibus_statchg(sc->nge_miibus);
1327				sc->nge_link++;
1328				if (ifp->if_snd.ifq_head != NULL)
1329					nge_start_locked(ifp);
1330			}
1331		}
1332	} else {
1333		mii = device_get_softc(sc->nge_miibus);
1334		mii_tick(mii);
1335
1336		if (!sc->nge_link) {
1337			if (mii->mii_media_status & IFM_ACTIVE &&
1338			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1339				sc->nge_link++;
1340				if (IFM_SUBTYPE(mii->mii_media_active)
1341				    == IFM_1000_T && bootverbose)
1342					if_printf(sc->nge_ifp,
1343					    "gigabit link up\n");
1344				if (ifp->if_snd.ifq_head != NULL)
1345					nge_start_locked(ifp);
1346			}
1347		}
1348	}
1349	callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc);
1350
1351	return;
1352}
1353
1354#ifdef DEVICE_POLLING
1355static poll_handler_t nge_poll;
1356
1357static void
1358nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1359{
1360	struct  nge_softc *sc = ifp->if_softc;
1361
1362	NGE_LOCK(sc);
1363	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1364		NGE_UNLOCK(sc);
1365		return;
1366	}
1367
1368	/*
1369	 * On the nge, reading the status register also clears it.
1370	 * So before returning to intr mode we must make sure that all
1371	 * possible pending sources of interrupts have been served.
1372	 * In practice this means run to completion the *eof routines,
1373	 * and then call the interrupt routine
1374	 */
1375	sc->rxcycles = count;
1376	nge_rxeof(sc);
1377	nge_txeof(sc);
1378	if (ifp->if_snd.ifq_head != NULL)
1379		nge_start_locked(ifp);
1380
1381	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1382		u_int32_t	status;
1383
1384		/* Reading the ISR register clears all interrupts. */
1385		status = CSR_READ_4(sc, NGE_ISR);
1386
1387		if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW))
1388			nge_rxeof(sc);
1389
1390		if (status & (NGE_ISR_RX_IDLE))
1391			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1392
1393		if (status & NGE_ISR_SYSERR) {
1394			nge_reset(sc);
1395			nge_init_locked(sc);
1396		}
1397	}
1398	NGE_UNLOCK(sc);
1399}
1400#endif /* DEVICE_POLLING */
1401
1402static void
1403nge_intr(arg)
1404	void			*arg;
1405{
1406	struct nge_softc	*sc;
1407	struct ifnet		*ifp;
1408	u_int32_t		status;
1409
1410	sc = arg;
1411	ifp = sc->nge_ifp;
1412
1413	NGE_LOCK(sc);
1414#ifdef DEVICE_POLLING
1415	if (ifp->if_capenable & IFCAP_POLLING) {
1416		NGE_UNLOCK(sc);
1417		return;
1418	}
1419#endif
1420
1421	/* Supress unwanted interrupts */
1422	if (!(ifp->if_flags & IFF_UP)) {
1423		nge_stop(sc);
1424		NGE_UNLOCK(sc);
1425		return;
1426	}
1427
1428	/* Disable interrupts. */
1429	CSR_WRITE_4(sc, NGE_IER, 0);
1430
1431	/* Data LED on for TBI mode */
1432	if(sc->nge_tbi)
1433		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1434			     | NGE_GPIO_GP3_OUT);
1435
1436	for (;;) {
1437		/* Reading the ISR register clears all interrupts. */
1438		status = CSR_READ_4(sc, NGE_ISR);
1439
1440		if ((status & NGE_INTRS) == 0)
1441			break;
1442
1443		if ((status & NGE_ISR_TX_DESC_OK) ||
1444		    (status & NGE_ISR_TX_ERR) ||
1445		    (status & NGE_ISR_TX_OK) ||
1446		    (status & NGE_ISR_TX_IDLE))
1447			nge_txeof(sc);
1448
1449		if ((status & NGE_ISR_RX_DESC_OK) ||
1450		    (status & NGE_ISR_RX_ERR) ||
1451		    (status & NGE_ISR_RX_OFLOW) ||
1452		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1453		    (status & NGE_ISR_RX_IDLE) ||
1454		    (status & NGE_ISR_RX_OK))
1455			nge_rxeof(sc);
1456
1457		if ((status & NGE_ISR_RX_IDLE))
1458			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1459
1460		if (status & NGE_ISR_SYSERR) {
1461			nge_reset(sc);
1462			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1463			nge_init_locked(sc);
1464		}
1465
1466#if 0
1467		/*
1468		 * XXX: nge_tick() is not ready to be called this way
1469		 * it screws up the aneg timeout because mii_tick() is
1470		 * only to be called once per second.
1471		 */
1472		if (status & NGE_IMR_PHY_INTR) {
1473			sc->nge_link = 0;
1474			nge_tick(sc);
1475		}
1476#endif
1477	}
1478
1479	/* Re-enable interrupts. */
1480	CSR_WRITE_4(sc, NGE_IER, 1);
1481
1482	if (ifp->if_snd.ifq_head != NULL)
1483		nge_start_locked(ifp);
1484
1485	/* Data LED off for TBI mode */
1486
1487	if(sc->nge_tbi)
1488		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1489			    & ~NGE_GPIO_GP3_OUT);
1490
1491	NGE_UNLOCK(sc);
1492
1493	return;
1494}
1495
1496/*
1497 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1498 * pointers to the fragment pointers.
1499 */
1500static int
1501nge_encap(sc, m_head, txidx)
1502	struct nge_softc	*sc;
1503	struct mbuf		*m_head;
1504	u_int32_t		*txidx;
1505{
1506	struct nge_desc		*f = NULL;
1507	struct mbuf		*m;
1508	int			frag, cur, cnt = 0;
1509	struct m_tag		*mtag;
1510
1511	/*
1512 	 * Start packing the mbufs in this chain into
1513	 * the fragment pointers. Stop when we run out
1514 	 * of fragments or hit the end of the mbuf chain.
1515	 */
1516	m = m_head;
1517	cur = frag = *txidx;
1518
1519	for (m = m_head; m != NULL; m = m->m_next) {
1520		if (m->m_len != 0) {
1521			if ((NGE_TX_LIST_CNT -
1522			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1523				return(ENOBUFS);
1524			f = &sc->nge_ldata->nge_tx_list[frag];
1525			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1526			f->nge_ptr = vtophys(mtod(m, vm_offset_t));
1527			if (cnt != 0)
1528				f->nge_ctl |= NGE_CMDSTS_OWN;
1529			cur = frag;
1530			NGE_INC(frag, NGE_TX_LIST_CNT);
1531			cnt++;
1532		}
1533	}
1534
1535	if (m != NULL)
1536		return(ENOBUFS);
1537
1538	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1539	if (m_head->m_pkthdr.csum_flags) {
1540		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1541			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1542			    NGE_TXEXTSTS_IPCSUM;
1543		if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1544			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1545			    NGE_TXEXTSTS_TCPCSUM;
1546		if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1547			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1548			    NGE_TXEXTSTS_UDPCSUM;
1549	}
1550
1551	mtag = VLAN_OUTPUT_TAG(sc->nge_ifp, m_head);
1552	if (mtag != NULL) {
1553		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1554		    (NGE_TXEXTSTS_VLANPKT|htons(VLAN_TAG_VALUE(mtag)));
1555	}
1556
1557	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1558	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1559	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1560	sc->nge_cdata.nge_tx_cnt += cnt;
1561	*txidx = frag;
1562
1563	return(0);
1564}
1565
1566/*
1567 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1568 * to the mbuf data regions directly in the transmit lists. We also save a
1569 * copy of the pointers since the transmit list fragment pointers are
1570 * physical addresses.
1571 */
1572
1573static void
1574nge_start(ifp)
1575	struct ifnet		*ifp;
1576{
1577	struct nge_softc	*sc;
1578
1579	sc = ifp->if_softc;
1580	NGE_LOCK(sc);
1581	nge_start_locked(ifp);
1582	NGE_UNLOCK(sc);
1583}
1584
1585static void
1586nge_start_locked(ifp)
1587	struct ifnet		*ifp;
1588{
1589	struct nge_softc	*sc;
1590	struct mbuf		*m_head = NULL;
1591	u_int32_t		idx;
1592
1593	sc = ifp->if_softc;
1594
1595	if (!sc->nge_link)
1596		return;
1597
1598	idx = sc->nge_cdata.nge_tx_prod;
1599
1600	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1601		return;
1602
1603	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1604		IF_DEQUEUE(&ifp->if_snd, m_head);
1605		if (m_head == NULL)
1606			break;
1607
1608		if (nge_encap(sc, m_head, &idx)) {
1609			IF_PREPEND(&ifp->if_snd, m_head);
1610			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1611			break;
1612		}
1613
1614		/*
1615		 * If there's a BPF listener, bounce a copy of this frame
1616		 * to him.
1617		 */
1618		BPF_MTAP(ifp, m_head);
1619
1620	}
1621
1622	/* Transmit */
1623	sc->nge_cdata.nge_tx_prod = idx;
1624	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1625
1626	/*
1627	 * Set a timeout in case the chip goes out to lunch.
1628	 */
1629	ifp->if_timer = 5;
1630
1631	return;
1632}
1633
1634static void
1635nge_init(xsc)
1636	void			*xsc;
1637{
1638	struct nge_softc	*sc = xsc;
1639
1640	NGE_LOCK(sc);
1641	nge_init_locked(sc);
1642	NGE_UNLOCK(sc);
1643}
1644
1645static void
1646nge_init_locked(sc)
1647	struct nge_softc	*sc;
1648{
1649	struct ifnet		*ifp = sc->nge_ifp;
1650	struct mii_data		*mii;
1651
1652	NGE_LOCK_ASSERT(sc);
1653
1654	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1655		return;
1656
1657	/*
1658	 * Cancel pending I/O and free all RX/TX buffers.
1659	 */
1660	nge_stop(sc);
1661
1662	if (sc->nge_tbi) {
1663		mii = NULL;
1664	} else {
1665		mii = device_get_softc(sc->nge_miibus);
1666	}
1667
1668	/* Set MAC address */
1669	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1670	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1671	    ((u_int16_t *)IFP2ENADDR(sc->nge_ifp))[0]);
1672	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1673	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1674	    ((u_int16_t *)IFP2ENADDR(sc->nge_ifp))[1]);
1675	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1676	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1677	    ((u_int16_t *)IFP2ENADDR(sc->nge_ifp))[2]);
1678
1679	/* Init circular RX list. */
1680	if (nge_list_rx_init(sc) == ENOBUFS) {
1681		if_printf(sc->nge_ifp, "initialization failed: no "
1682			"memory for rx buffers\n");
1683		nge_stop(sc);
1684		return;
1685	}
1686
1687	/*
1688	 * Init tx descriptors.
1689	 */
1690	nge_list_tx_init(sc);
1691
1692	/*
1693	 * For the NatSemi chip, we have to explicitly enable the
1694	 * reception of ARP frames, as well as turn on the 'perfect
1695	 * match' filter where we store the station address, otherwise
1696	 * we won't receive unicasts meant for this host.
1697	 */
1698	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1699	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1700
1701	 /* If we want promiscuous mode, set the allframes bit. */
1702	if (ifp->if_flags & IFF_PROMISC) {
1703		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1704	} else {
1705		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1706	}
1707
1708	/*
1709	 * Set the capture broadcast bit to capture broadcast frames.
1710	 */
1711	if (ifp->if_flags & IFF_BROADCAST) {
1712		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1713	} else {
1714		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1715	}
1716
1717	/*
1718	 * Load the multicast filter.
1719	 */
1720	nge_setmulti(sc);
1721
1722	/* Turn the receive filter on */
1723	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1724
1725	/*
1726	 * Load the address of the RX and TX lists.
1727	 */
1728	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1729	    vtophys(&sc->nge_ldata->nge_rx_list[0]));
1730	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1731	    vtophys(&sc->nge_ldata->nge_tx_list[0]));
1732
1733	/* Set RX configuration */
1734	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1735	/*
1736	 * Enable hardware checksum validation for all IPv4
1737	 * packets, do not reject packets with bad checksums.
1738	 */
1739	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1740
1741	/*
1742	 * Tell the chip to detect and strip VLAN tag info from
1743	 * received frames. The tag will be provided in the extsts
1744	 * field in the RX descriptors.
1745	 */
1746	NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1747	    NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB);
1748
1749	/* Set TX configuration */
1750	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1751
1752	/*
1753	 * Enable TX IPv4 checksumming on a per-packet basis.
1754	 */
1755	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
1756
1757	/*
1758	 * Tell the chip to insert VLAN tags on a per-packet basis as
1759	 * dictated by the code in the frame encapsulation routine.
1760	 */
1761	NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1762
1763	/* Set full/half duplex mode. */
1764	if (sc->nge_tbi) {
1765		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1766		    == IFM_FDX) {
1767			NGE_SETBIT(sc, NGE_TX_CFG,
1768			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1769			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1770		} else {
1771			NGE_CLRBIT(sc, NGE_TX_CFG,
1772			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1773			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1774		}
1775	} else {
1776		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1777			NGE_SETBIT(sc, NGE_TX_CFG,
1778			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1779			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1780		} else {
1781			NGE_CLRBIT(sc, NGE_TX_CFG,
1782			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1783			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1784		}
1785	}
1786
1787	nge_tick(sc);
1788
1789	/*
1790	 * Enable the delivery of PHY interrupts based on
1791	 * link/speed/duplex status changes. Also enable the
1792	 * extsts field in the DMA descriptors (needed for
1793	 * TCP/IP checksum offload on transmit).
1794	 */
1795	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|
1796	    NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1797
1798	/*
1799	 * Configure interrupt holdoff (moderation). We can
1800	 * have the chip delay interrupt delivery for a certain
1801	 * period. Units are in 100us, and the max setting
1802	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1803	 */
1804	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1805
1806	/*
1807	 * Enable interrupts.
1808	 */
1809	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1810#ifdef DEVICE_POLLING
1811	/*
1812	 * ... only enable interrupts if we are not polling, make sure
1813	 * they are off otherwise.
1814	 */
1815	if (ifp->if_capenable & IFCAP_POLLING)
1816		CSR_WRITE_4(sc, NGE_IER, 0);
1817	else
1818#endif
1819	CSR_WRITE_4(sc, NGE_IER, 1);
1820
1821	/* Enable receiver and transmitter. */
1822	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1823	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1824
1825	nge_ifmedia_upd_locked(ifp);
1826
1827	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1828	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1829
1830	return;
1831}
1832
1833/*
1834 * Set media options.
1835 */
1836static int
1837nge_ifmedia_upd(ifp)
1838	struct ifnet		*ifp;
1839{
1840	struct nge_softc	*sc;
1841
1842	sc = ifp->if_softc;
1843	NGE_LOCK(sc);
1844	nge_ifmedia_upd_locked(ifp);
1845	NGE_UNLOCK(sc);
1846	return (0);
1847}
1848
1849static void
1850nge_ifmedia_upd_locked(ifp)
1851	struct ifnet		*ifp;
1852{
1853	struct nge_softc	*sc;
1854	struct mii_data		*mii;
1855
1856	sc = ifp->if_softc;
1857	NGE_LOCK_ASSERT(sc);
1858
1859	if (sc->nge_tbi) {
1860		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1861		     == IFM_AUTO) {
1862			CSR_WRITE_4(sc, NGE_TBI_ANAR,
1863				CSR_READ_4(sc, NGE_TBI_ANAR)
1864					| NGE_TBIANAR_HDX | NGE_TBIANAR_FDX
1865					| NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2);
1866			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG
1867				| NGE_TBIBMCR_RESTART_ANEG);
1868			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG);
1869		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media
1870			    & IFM_GMASK) == IFM_FDX) {
1871			NGE_SETBIT(sc, NGE_TX_CFG,
1872			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1873			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1874
1875			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
1876			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1877		} else {
1878			NGE_CLRBIT(sc, NGE_TX_CFG,
1879			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1880			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1881
1882			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
1883			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1884		}
1885
1886		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1887			    & ~NGE_GPIO_GP3_OUT);
1888	} else {
1889		mii = device_get_softc(sc->nge_miibus);
1890		sc->nge_link = 0;
1891		if (mii->mii_instance) {
1892			struct mii_softc	*miisc;
1893
1894			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1895				mii_phy_reset(miisc);
1896		}
1897		mii_mediachg(mii);
1898	}
1899}
1900
1901/*
1902 * Report current media status.
1903 */
1904static void
1905nge_ifmedia_sts(ifp, ifmr)
1906	struct ifnet		*ifp;
1907	struct ifmediareq	*ifmr;
1908{
1909	struct nge_softc	*sc;
1910	struct mii_data		*mii;
1911
1912	sc = ifp->if_softc;
1913
1914	NGE_LOCK(sc);
1915	if (sc->nge_tbi) {
1916		ifmr->ifm_status = IFM_AVALID;
1917		ifmr->ifm_active = IFM_ETHER;
1918
1919		if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
1920			ifmr->ifm_status |= IFM_ACTIVE;
1921		}
1922		if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK)
1923			ifmr->ifm_active |= IFM_LOOP;
1924		if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
1925			ifmr->ifm_active |= IFM_NONE;
1926			ifmr->ifm_status = 0;
1927			NGE_UNLOCK(sc);
1928			return;
1929		}
1930		ifmr->ifm_active |= IFM_1000_SX;
1931		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1932		    == IFM_AUTO) {
1933			ifmr->ifm_active |= IFM_AUTO;
1934			if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
1935			    & NGE_TBIANAR_FDX) {
1936				ifmr->ifm_active |= IFM_FDX;
1937			}else if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
1938				  & NGE_TBIANAR_HDX) {
1939				ifmr->ifm_active |= IFM_HDX;
1940			}
1941		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1942			== IFM_FDX)
1943			ifmr->ifm_active |= IFM_FDX;
1944		else
1945			ifmr->ifm_active |= IFM_HDX;
1946
1947	} else {
1948		mii = device_get_softc(sc->nge_miibus);
1949		mii_pollstat(mii);
1950		ifmr->ifm_active = mii->mii_media_active;
1951		ifmr->ifm_status = mii->mii_media_status;
1952	}
1953	NGE_UNLOCK(sc);
1954
1955	return;
1956}
1957
1958static int
1959nge_ioctl(ifp, command, data)
1960	struct ifnet		*ifp;
1961	u_long			command;
1962	caddr_t			data;
1963{
1964	struct nge_softc	*sc = ifp->if_softc;
1965	struct ifreq		*ifr = (struct ifreq *) data;
1966	struct mii_data		*mii;
1967	int			error = 0;
1968
1969	switch(command) {
1970	case SIOCSIFMTU:
1971		if (ifr->ifr_mtu > NGE_JUMBO_MTU)
1972			error = EINVAL;
1973		else {
1974			NGE_LOCK(sc);
1975			ifp->if_mtu = ifr->ifr_mtu;
1976			/*
1977			 * Workaround: if the MTU is larger than
1978			 * 8152 (TX FIFO size minus 64 minus 18), turn off
1979			 * TX checksum offloading.
1980			 */
1981			if (ifr->ifr_mtu >= 8152) {
1982				ifp->if_capenable &= ~IFCAP_TXCSUM;
1983				ifp->if_hwassist = 0;
1984			} else {
1985				ifp->if_capenable |= IFCAP_TXCSUM;
1986				ifp->if_hwassist = NGE_CSUM_FEATURES;
1987			}
1988			NGE_UNLOCK(sc);
1989		}
1990		break;
1991	case SIOCSIFFLAGS:
1992		NGE_LOCK(sc);
1993		if (ifp->if_flags & IFF_UP) {
1994			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1995			    ifp->if_flags & IFF_PROMISC &&
1996			    !(sc->nge_if_flags & IFF_PROMISC)) {
1997				NGE_SETBIT(sc, NGE_RXFILT_CTL,
1998				    NGE_RXFILTCTL_ALLPHYS|
1999				    NGE_RXFILTCTL_ALLMULTI);
2000			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2001			    !(ifp->if_flags & IFF_PROMISC) &&
2002			    sc->nge_if_flags & IFF_PROMISC) {
2003				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2004				    NGE_RXFILTCTL_ALLPHYS);
2005				if (!(ifp->if_flags & IFF_ALLMULTI))
2006					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2007					    NGE_RXFILTCTL_ALLMULTI);
2008			} else {
2009				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2010				nge_init_locked(sc);
2011			}
2012		} else {
2013			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2014				nge_stop(sc);
2015		}
2016		sc->nge_if_flags = ifp->if_flags;
2017		NGE_UNLOCK(sc);
2018		error = 0;
2019		break;
2020	case SIOCADDMULTI:
2021	case SIOCDELMULTI:
2022		NGE_LOCK(sc);
2023		nge_setmulti(sc);
2024		NGE_UNLOCK(sc);
2025		error = 0;
2026		break;
2027	case SIOCGIFMEDIA:
2028	case SIOCSIFMEDIA:
2029		if (sc->nge_tbi) {
2030			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2031					      command);
2032		} else {
2033			mii = device_get_softc(sc->nge_miibus);
2034			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2035					      command);
2036		}
2037		break;
2038	case SIOCSIFCAP:
2039#ifdef DEVICE_POLLING
2040		if (ifr->ifr_reqcap & IFCAP_POLLING &&
2041		    !(ifp->if_capenable & IFCAP_POLLING)) {
2042			error = ether_poll_register(nge_poll, ifp);
2043			if (error)
2044				return(error);
2045			NGE_LOCK(sc);
2046			/* Disable interrupts */
2047			CSR_WRITE_4(sc, NGE_IER, 0);
2048			ifp->if_capenable |= IFCAP_POLLING;
2049			NGE_UNLOCK(sc);
2050			return (error);
2051
2052		}
2053		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
2054		    ifp->if_capenable & IFCAP_POLLING) {
2055			error = ether_poll_deregister(ifp);
2056			/* Enable interrupts. */
2057			NGE_LOCK(sc);
2058			CSR_WRITE_4(sc, NGE_IER, 1);
2059			ifp->if_capenable &= ~IFCAP_POLLING;
2060			NGE_UNLOCK(sc);
2061			return (error);
2062		}
2063#endif /* DEVICE_POLLING */
2064		break;
2065	default:
2066		error = ether_ioctl(ifp, command, data);
2067		break;
2068	}
2069
2070	return(error);
2071}
2072
2073static void
2074nge_watchdog(ifp)
2075	struct ifnet		*ifp;
2076{
2077	struct nge_softc	*sc;
2078
2079	sc = ifp->if_softc;
2080
2081	ifp->if_oerrors++;
2082	if_printf(sc->nge_ifp, "watchdog timeout\n");
2083
2084	NGE_LOCK(sc);
2085	nge_stop(sc);
2086	nge_reset(sc);
2087	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2088	nge_init_locked(sc);
2089
2090	if (ifp->if_snd.ifq_head != NULL)
2091		nge_start_locked(ifp);
2092
2093	NGE_UNLOCK(sc);
2094
2095	return;
2096}
2097
2098/*
2099 * Stop the adapter and free any mbufs allocated to the
2100 * RX and TX lists.
2101 */
2102static void
2103nge_stop(sc)
2104	struct nge_softc	*sc;
2105{
2106	register int		i;
2107	struct ifnet		*ifp;
2108	struct mii_data		*mii;
2109
2110	NGE_LOCK_ASSERT(sc);
2111	ifp = sc->nge_ifp;
2112	ifp->if_timer = 0;
2113	if (sc->nge_tbi) {
2114		mii = NULL;
2115	} else {
2116		mii = device_get_softc(sc->nge_miibus);
2117	}
2118
2119	callout_stop(&sc->nge_stat_ch);
2120	CSR_WRITE_4(sc, NGE_IER, 0);
2121	CSR_WRITE_4(sc, NGE_IMR, 0);
2122	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2123	DELAY(1000);
2124	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2125	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2126
2127	if (!sc->nge_tbi)
2128		mii_down(mii);
2129
2130	sc->nge_link = 0;
2131
2132	/*
2133	 * Free data in the RX lists.
2134	 */
2135	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2136		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2137			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2138			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2139		}
2140	}
2141	bzero((char *)&sc->nge_ldata->nge_rx_list,
2142		sizeof(sc->nge_ldata->nge_rx_list));
2143
2144	/*
2145	 * Free the TX list buffers.
2146	 */
2147	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2148		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2149			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2150			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2151		}
2152	}
2153
2154	bzero((char *)&sc->nge_ldata->nge_tx_list,
2155		sizeof(sc->nge_ldata->nge_tx_list));
2156
2157	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2158
2159	return;
2160}
2161
2162/*
2163 * Stop all chip I/O so that the kernel's probe routines don't
2164 * get confused by errant DMAs when rebooting.
2165 */
2166static void
2167nge_shutdown(dev)
2168	device_t		dev;
2169{
2170	struct nge_softc	*sc;
2171
2172	sc = device_get_softc(dev);
2173
2174	NGE_LOCK(sc);
2175	nge_reset(sc);
2176	nge_stop(sc);
2177	NGE_UNLOCK(sc);
2178
2179	return;
2180}
2181