if_nge.c revision 143158
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001
4 *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 143158 2005-03-05 18:10:49Z imp $");
36
37/*
38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39 * for FreeBSD. Datasheets are available from:
40 *
41 * http://www.national.com/ds/DP/DP83820.pdf
42 * http://www.national.com/ds/DP/DP83821.pdf
43 *
44 * These chips are used on several low cost gigabit ethernet NICs
45 * sold by D-Link, Addtron, SMC and Asante. Both parts are
46 * virtually the same, except the 83820 is a 64-bit/32-bit part,
47 * while the 83821 is 32-bit only.
48 *
49 * Many cards also use National gigE transceivers, such as the
50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51 * contains a full register description that applies to all of these
52 * components:
53 *
54 * http://www.national.com/ds/DP/DP83861.pdf
55 *
56 * Written by Bill Paul <wpaul@bsdi.com>
57 * BSDi Open Source Solutions
58 */
59
60/*
61 * The NatSemi DP83820 and 83821 controllers are enhanced versions
62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67 * matching buffers, one perfect address filter buffer and interrupt
68 * moderation. The 83820 supports both 64-bit and 32-bit addressing
69 * and data transfers: the 64-bit support can be toggled on or off
70 * via software. This affects the size of certain fields in the DMA
71 * descriptors.
72 *
73 * There are two bugs/misfeatures in the 83820/83821 that I have
74 * discovered so far:
75 *
76 * - Receive buffers must be aligned on 64-bit boundaries, which means
77 *   you must resort to copying data in order to fix up the payload
78 *   alignment.
79 *
80 * - In order to transmit jumbo frames larger than 8170 bytes, you have
81 *   to turn off transmit checksum offloading, because the chip can't
82 *   compute the checksum on an outgoing frame unless it fits entirely
83 *   within the TX FIFO, which is only 8192 bytes in size. If you have
84 *   TX checksum offload enabled and you transmit attempt to transmit a
85 *   frame larger than 8170 bytes, the transmitter will wedge.
86 *
87 * To work around the latter problem, TX checksum offload is disabled
88 * if the user selects an MTU larger than 8152 (8170 - 18).
89 */
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/module.h>
97#include <sys/kernel.h>
98#include <sys/socket.h>
99
100#include <net/if.h>
101#include <net/if_arp.h>
102#include <net/ethernet.h>
103#include <net/if_dl.h>
104#include <net/if_media.h>
105#include <net/if_types.h>
106#include <net/if_vlan_var.h>
107
108#include <net/bpf.h>
109
110#include <vm/vm.h>              /* for vtophys */
111#include <vm/pmap.h>            /* for vtophys */
112#include <machine/clock.h>      /* for DELAY */
113#include <machine/bus_pio.h>
114#include <machine/bus_memio.h>
115#include <machine/bus.h>
116#include <machine/resource.h>
117#include <sys/bus.h>
118#include <sys/rman.h>
119
120#include <dev/mii/mii.h>
121#include <dev/mii/miivar.h>
122
123#include <dev/pci/pcireg.h>
124#include <dev/pci/pcivar.h>
125
126#define NGE_USEIOSPACE
127
128#include <dev/nge/if_ngereg.h>
129
130MODULE_DEPEND(nge, pci, 1, 1, 1);
131MODULE_DEPEND(nge, ether, 1, 1, 1);
132MODULE_DEPEND(nge, miibus, 1, 1, 1);
133
134/* "controller miibus0" required.  See GENERIC if you get errors here. */
135#include "miibus_if.h"
136
137#define NGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
138
139/*
140 * Various supported device vendors/types and their names.
141 */
142static struct nge_type nge_devs[] = {
143	{ NGE_VENDORID, NGE_DEVICEID,
144	    "National Semiconductor Gigabit Ethernet" },
145	{ 0, 0, NULL }
146};
147
148static int nge_probe(device_t);
149static int nge_attach(device_t);
150static int nge_detach(device_t);
151
152static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *);
153static int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
154#ifdef NGE_FIXUP_RX
155static __inline void nge_fixup_rx (struct mbuf *);
156#endif
157static void nge_rxeof(struct nge_softc *);
158static void nge_txeof(struct nge_softc *);
159static void nge_intr(void *);
160static void nge_tick(void *);
161static void nge_tick_locked(struct nge_softc *);
162static void nge_start(struct ifnet *);
163static void nge_start_locked(struct ifnet *);
164static int nge_ioctl(struct ifnet *, u_long, caddr_t);
165static void nge_init(void *);
166static void nge_init_locked(struct nge_softc *);
167static void nge_stop(struct nge_softc *);
168static void nge_watchdog(struct ifnet *);
169static void nge_shutdown(device_t);
170static int nge_ifmedia_upd(struct ifnet *);
171static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
172
173static void nge_delay(struct nge_softc *);
174static void nge_eeprom_idle(struct nge_softc *);
175static void nge_eeprom_putbyte(struct nge_softc *, int);
176static void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
177static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
178
179static void nge_mii_sync(struct nge_softc *);
180static void nge_mii_send(struct nge_softc *, u_int32_t, int);
181static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
182static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
183
184static int nge_miibus_readreg(device_t, int, int);
185static int nge_miibus_writereg(device_t, int, int, int);
186static void nge_miibus_statchg(device_t);
187
188static void nge_setmulti(struct nge_softc *);
189static void nge_reset(struct nge_softc *);
190static int nge_list_rx_init(struct nge_softc *);
191static int nge_list_tx_init(struct nge_softc *);
192
193#ifdef NGE_USEIOSPACE
194#define NGE_RES			SYS_RES_IOPORT
195#define NGE_RID			NGE_PCI_LOIO
196#else
197#define NGE_RES			SYS_RES_MEMORY
198#define NGE_RID			NGE_PCI_LOMEM
199#endif
200
201static device_method_t nge_methods[] = {
202	/* Device interface */
203	DEVMETHOD(device_probe,		nge_probe),
204	DEVMETHOD(device_attach,	nge_attach),
205	DEVMETHOD(device_detach,	nge_detach),
206	DEVMETHOD(device_shutdown,	nge_shutdown),
207
208	/* bus interface */
209	DEVMETHOD(bus_print_child,	bus_generic_print_child),
210	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
211
212	/* MII interface */
213	DEVMETHOD(miibus_readreg,	nge_miibus_readreg),
214	DEVMETHOD(miibus_writereg,	nge_miibus_writereg),
215	DEVMETHOD(miibus_statchg,	nge_miibus_statchg),
216
217	{ 0, 0 }
218};
219
220static driver_t nge_driver = {
221	"nge",
222	nge_methods,
223	sizeof(struct nge_softc)
224};
225
226static devclass_t nge_devclass;
227
228DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0);
229DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0);
230
231#define NGE_SETBIT(sc, reg, x)				\
232	CSR_WRITE_4(sc, reg,				\
233		CSR_READ_4(sc, reg) | (x))
234
235#define NGE_CLRBIT(sc, reg, x)				\
236	CSR_WRITE_4(sc, reg,				\
237		CSR_READ_4(sc, reg) & ~(x))
238
239#define SIO_SET(x)					\
240	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
241
242#define SIO_CLR(x)					\
243	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
244
245static void
246nge_delay(sc)
247	struct nge_softc	*sc;
248{
249	int			idx;
250
251	for (idx = (300 / 33) + 1; idx > 0; idx--)
252		CSR_READ_4(sc, NGE_CSR);
253
254	return;
255}
256
257static void
258nge_eeprom_idle(sc)
259	struct nge_softc	*sc;
260{
261	register int		i;
262
263	SIO_SET(NGE_MEAR_EE_CSEL);
264	nge_delay(sc);
265	SIO_SET(NGE_MEAR_EE_CLK);
266	nge_delay(sc);
267
268	for (i = 0; i < 25; i++) {
269		SIO_CLR(NGE_MEAR_EE_CLK);
270		nge_delay(sc);
271		SIO_SET(NGE_MEAR_EE_CLK);
272		nge_delay(sc);
273	}
274
275	SIO_CLR(NGE_MEAR_EE_CLK);
276	nge_delay(sc);
277	SIO_CLR(NGE_MEAR_EE_CSEL);
278	nge_delay(sc);
279	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
280
281	return;
282}
283
284/*
285 * Send a read command and address to the EEPROM, check for ACK.
286 */
287static void
288nge_eeprom_putbyte(sc, addr)
289	struct nge_softc	*sc;
290	int			addr;
291{
292	register int		d, i;
293
294	d = addr | NGE_EECMD_READ;
295
296	/*
297	 * Feed in each bit and stobe the clock.
298	 */
299	for (i = 0x400; i; i >>= 1) {
300		if (d & i) {
301			SIO_SET(NGE_MEAR_EE_DIN);
302		} else {
303			SIO_CLR(NGE_MEAR_EE_DIN);
304		}
305		nge_delay(sc);
306		SIO_SET(NGE_MEAR_EE_CLK);
307		nge_delay(sc);
308		SIO_CLR(NGE_MEAR_EE_CLK);
309		nge_delay(sc);
310	}
311
312	return;
313}
314
315/*
316 * Read a word of data stored in the EEPROM at address 'addr.'
317 */
318static void
319nge_eeprom_getword(sc, addr, dest)
320	struct nge_softc	*sc;
321	int			addr;
322	u_int16_t		*dest;
323{
324	register int		i;
325	u_int16_t		word = 0;
326
327	/* Force EEPROM to idle state. */
328	nge_eeprom_idle(sc);
329
330	/* Enter EEPROM access mode. */
331	nge_delay(sc);
332	SIO_CLR(NGE_MEAR_EE_CLK);
333	nge_delay(sc);
334	SIO_SET(NGE_MEAR_EE_CSEL);
335	nge_delay(sc);
336
337	/*
338	 * Send address of word we want to read.
339	 */
340	nge_eeprom_putbyte(sc, addr);
341
342	/*
343	 * Start reading bits from EEPROM.
344	 */
345	for (i = 0x8000; i; i >>= 1) {
346		SIO_SET(NGE_MEAR_EE_CLK);
347		nge_delay(sc);
348		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
349			word |= i;
350		nge_delay(sc);
351		SIO_CLR(NGE_MEAR_EE_CLK);
352		nge_delay(sc);
353	}
354
355	/* Turn off EEPROM access mode. */
356	nge_eeprom_idle(sc);
357
358	*dest = word;
359
360	return;
361}
362
363/*
364 * Read a sequence of words from the EEPROM.
365 */
366static void
367nge_read_eeprom(sc, dest, off, cnt, swap)
368	struct nge_softc	*sc;
369	caddr_t			dest;
370	int			off;
371	int			cnt;
372	int			swap;
373{
374	int			i;
375	u_int16_t		word = 0, *ptr;
376
377	for (i = 0; i < cnt; i++) {
378		nge_eeprom_getword(sc, off + i, &word);
379		ptr = (u_int16_t *)(dest + (i * 2));
380		if (swap)
381			*ptr = ntohs(word);
382		else
383			*ptr = word;
384	}
385
386	return;
387}
388
389/*
390 * Sync the PHYs by setting data bit and strobing the clock 32 times.
391 */
392static void
393nge_mii_sync(sc)
394	struct nge_softc		*sc;
395{
396	register int		i;
397
398	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
399
400	for (i = 0; i < 32; i++) {
401		SIO_SET(NGE_MEAR_MII_CLK);
402		DELAY(1);
403		SIO_CLR(NGE_MEAR_MII_CLK);
404		DELAY(1);
405	}
406
407	return;
408}
409
410/*
411 * Clock a series of bits through the MII.
412 */
413static void
414nge_mii_send(sc, bits, cnt)
415	struct nge_softc		*sc;
416	u_int32_t		bits;
417	int			cnt;
418{
419	int			i;
420
421	SIO_CLR(NGE_MEAR_MII_CLK);
422
423	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
424                if (bits & i) {
425			SIO_SET(NGE_MEAR_MII_DATA);
426                } else {
427			SIO_CLR(NGE_MEAR_MII_DATA);
428                }
429		DELAY(1);
430		SIO_CLR(NGE_MEAR_MII_CLK);
431		DELAY(1);
432		SIO_SET(NGE_MEAR_MII_CLK);
433	}
434}
435
436/*
437 * Read an PHY register through the MII.
438 */
439static int
440nge_mii_readreg(sc, frame)
441	struct nge_softc		*sc;
442	struct nge_mii_frame	*frame;
443
444{
445	int			i, ack;
446
447	/*
448	 * Set up frame for RX.
449	 */
450	frame->mii_stdelim = NGE_MII_STARTDELIM;
451	frame->mii_opcode = NGE_MII_READOP;
452	frame->mii_turnaround = 0;
453	frame->mii_data = 0;
454
455	CSR_WRITE_4(sc, NGE_MEAR, 0);
456
457	/*
458 	 * Turn on data xmit.
459	 */
460	SIO_SET(NGE_MEAR_MII_DIR);
461
462	nge_mii_sync(sc);
463
464	/*
465	 * Send command/address info.
466	 */
467	nge_mii_send(sc, frame->mii_stdelim, 2);
468	nge_mii_send(sc, frame->mii_opcode, 2);
469	nge_mii_send(sc, frame->mii_phyaddr, 5);
470	nge_mii_send(sc, frame->mii_regaddr, 5);
471
472	/* Idle bit */
473	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
474	DELAY(1);
475	SIO_SET(NGE_MEAR_MII_CLK);
476	DELAY(1);
477
478	/* Turn off xmit. */
479	SIO_CLR(NGE_MEAR_MII_DIR);
480	/* Check for ack */
481	SIO_CLR(NGE_MEAR_MII_CLK);
482	DELAY(1);
483	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
484	SIO_SET(NGE_MEAR_MII_CLK);
485	DELAY(1);
486
487	/*
488	 * Now try reading data bits. If the ack failed, we still
489	 * need to clock through 16 cycles to keep the PHY(s) in sync.
490	 */
491	if (ack) {
492		for(i = 0; i < 16; i++) {
493			SIO_CLR(NGE_MEAR_MII_CLK);
494			DELAY(1);
495			SIO_SET(NGE_MEAR_MII_CLK);
496			DELAY(1);
497		}
498		goto fail;
499	}
500
501	for (i = 0x8000; i; i >>= 1) {
502		SIO_CLR(NGE_MEAR_MII_CLK);
503		DELAY(1);
504		if (!ack) {
505			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
506				frame->mii_data |= i;
507			DELAY(1);
508		}
509		SIO_SET(NGE_MEAR_MII_CLK);
510		DELAY(1);
511	}
512
513fail:
514
515	SIO_CLR(NGE_MEAR_MII_CLK);
516	DELAY(1);
517	SIO_SET(NGE_MEAR_MII_CLK);
518	DELAY(1);
519
520	if (ack)
521		return(1);
522	return(0);
523}
524
525/*
526 * Write to a PHY register through the MII.
527 */
528static int
529nge_mii_writereg(sc, frame)
530	struct nge_softc		*sc;
531	struct nge_mii_frame	*frame;
532
533{
534
535	/*
536	 * Set up frame for TX.
537	 */
538
539	frame->mii_stdelim = NGE_MII_STARTDELIM;
540	frame->mii_opcode = NGE_MII_WRITEOP;
541	frame->mii_turnaround = NGE_MII_TURNAROUND;
542
543	/*
544 	 * Turn on data output.
545	 */
546	SIO_SET(NGE_MEAR_MII_DIR);
547
548	nge_mii_sync(sc);
549
550	nge_mii_send(sc, frame->mii_stdelim, 2);
551	nge_mii_send(sc, frame->mii_opcode, 2);
552	nge_mii_send(sc, frame->mii_phyaddr, 5);
553	nge_mii_send(sc, frame->mii_regaddr, 5);
554	nge_mii_send(sc, frame->mii_turnaround, 2);
555	nge_mii_send(sc, frame->mii_data, 16);
556
557	/* Idle bit. */
558	SIO_SET(NGE_MEAR_MII_CLK);
559	DELAY(1);
560	SIO_CLR(NGE_MEAR_MII_CLK);
561	DELAY(1);
562
563	/*
564	 * Turn off xmit.
565	 */
566	SIO_CLR(NGE_MEAR_MII_DIR);
567
568	return(0);
569}
570
571static int
572nge_miibus_readreg(dev, phy, reg)
573	device_t		dev;
574	int			phy, reg;
575{
576	struct nge_softc	*sc;
577	struct nge_mii_frame	frame;
578
579	sc = device_get_softc(dev);
580
581	bzero((char *)&frame, sizeof(frame));
582
583	frame.mii_phyaddr = phy;
584	frame.mii_regaddr = reg;
585	nge_mii_readreg(sc, &frame);
586
587	return(frame.mii_data);
588}
589
590static int
591nge_miibus_writereg(dev, phy, reg, data)
592	device_t		dev;
593	int			phy, reg, data;
594{
595	struct nge_softc	*sc;
596	struct nge_mii_frame	frame;
597
598	sc = device_get_softc(dev);
599
600	bzero((char *)&frame, sizeof(frame));
601
602	frame.mii_phyaddr = phy;
603	frame.mii_regaddr = reg;
604	frame.mii_data = data;
605	nge_mii_writereg(sc, &frame);
606
607	return(0);
608}
609
610static void
611nge_miibus_statchg(dev)
612	device_t		dev;
613{
614	int			status;
615	struct nge_softc	*sc;
616	struct mii_data		*mii;
617
618	sc = device_get_softc(dev);
619	if (sc->nge_tbi) {
620		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
621		    == IFM_AUTO) {
622			status = CSR_READ_4(sc, NGE_TBI_ANLPAR);
623			if (status == 0 || status & NGE_TBIANAR_FDX) {
624				NGE_SETBIT(sc, NGE_TX_CFG,
625				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
626				NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
627			} else {
628				NGE_CLRBIT(sc, NGE_TX_CFG,
629				    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
630				NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
631			}
632
633		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
634			!= IFM_FDX) {
635			NGE_CLRBIT(sc, NGE_TX_CFG,
636			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
637			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
638		} else {
639			NGE_SETBIT(sc, NGE_TX_CFG,
640			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
641			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
642		}
643	} else {
644		mii = device_get_softc(sc->nge_miibus);
645
646		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
647		        NGE_SETBIT(sc, NGE_TX_CFG,
648			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
649			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
650		} else {
651			NGE_CLRBIT(sc, NGE_TX_CFG,
652			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
653			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
654		}
655
656		/* If we have a 1000Mbps link, set the mode_1000 bit. */
657		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
658		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
659			NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
660		} else {
661			NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
662		}
663	}
664	return;
665}
666
667static void
668nge_setmulti(sc)
669	struct nge_softc	*sc;
670{
671	struct ifnet		*ifp;
672	struct ifmultiaddr	*ifma;
673	u_int32_t		h = 0, i, filtsave;
674	int			bit, index;
675
676	NGE_LOCK_ASSERT(sc);
677	ifp = &sc->arpcom.ac_if;
678
679	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
680		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
681		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
682		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
683		return;
684	}
685
686	/*
687	 * We have to explicitly enable the multicast hash table
688	 * on the NatSemi chip if we want to use it, which we do.
689	 * We also have to tell it that we don't want to use the
690	 * hash table for matching unicast addresses.
691	 */
692	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
693	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
694	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
695
696	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
697
698	/* first, zot all the existing hash bits */
699	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
700		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
701		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
702	}
703
704	/*
705	 * From the 11 bits returned by the crc routine, the top 7
706	 * bits represent the 16-bit word in the mcast hash table
707	 * that needs to be updated, and the lower 4 bits represent
708	 * which bit within that byte needs to be set.
709	 */
710	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
711		if (ifma->ifma_addr->sa_family != AF_LINK)
712			continue;
713		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
714		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 21;
715		index = (h >> 4) & 0x7F;
716		bit = h & 0xF;
717		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
718		    NGE_FILTADDR_MCAST_LO + (index * 2));
719		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
720	}
721
722	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
723
724	return;
725}
726
727static void
728nge_reset(sc)
729	struct nge_softc	*sc;
730{
731	register int		i;
732
733	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
734
735	for (i = 0; i < NGE_TIMEOUT; i++) {
736		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
737			break;
738	}
739
740	if (i == NGE_TIMEOUT)
741		printf("nge%d: reset never completed\n", sc->nge_unit);
742
743	/* Wait a little while for the chip to get its brains in order. */
744	DELAY(1000);
745
746	/*
747	 * If this is a NetSemi chip, make sure to clear
748	 * PME mode.
749	 */
750	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
751	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
752
753        return;
754}
755
756/*
757 * Probe for a NatSemi chip. Check the PCI vendor and device
758 * IDs against our list and return a device name if we find a match.
759 */
760static int
761nge_probe(dev)
762	device_t		dev;
763{
764	struct nge_type		*t;
765
766	t = nge_devs;
767
768	while(t->nge_name != NULL) {
769		if ((pci_get_vendor(dev) == t->nge_vid) &&
770		    (pci_get_device(dev) == t->nge_did)) {
771			device_set_desc(dev, t->nge_name);
772			return(BUS_PROBE_DEFAULT);
773		}
774		t++;
775	}
776
777	return(ENXIO);
778}
779
780/*
781 * Attach the interface. Allocate softc structures, do ifmedia
782 * setup and ethernet/BPF attach.
783 */
784static int
785nge_attach(dev)
786	device_t		dev;
787{
788	u_char			eaddr[ETHER_ADDR_LEN];
789	struct nge_softc	*sc;
790	struct ifnet		*ifp;
791	int			unit, error = 0, rid;
792	const char		*sep = "";
793
794	sc = device_get_softc(dev);
795	unit = device_get_unit(dev);
796	bzero(sc, sizeof(struct nge_softc));
797
798	NGE_LOCK_INIT(sc, device_get_nameunit(dev));
799	/*
800	 * Map control/status registers.
801	 */
802	pci_enable_busmaster(dev);
803
804	rid = NGE_RID;
805	sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE);
806
807	if (sc->nge_res == NULL) {
808		printf("nge%d: couldn't map ports/memory\n", unit);
809		error = ENXIO;
810		goto fail;
811	}
812
813	sc->nge_btag = rman_get_bustag(sc->nge_res);
814	sc->nge_bhandle = rman_get_bushandle(sc->nge_res);
815
816	/* Allocate interrupt */
817	rid = 0;
818	sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
819	    RF_SHAREABLE | RF_ACTIVE);
820
821	if (sc->nge_irq == NULL) {
822		printf("nge%d: couldn't map interrupt\n", unit);
823		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
824		error = ENXIO;
825		goto fail;
826	}
827
828	/* Reset the adapter. */
829	nge_reset(sc);
830
831	/*
832	 * Get station address from the EEPROM.
833	 */
834	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
835	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
836	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
837
838	sc->nge_unit = unit;
839	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
840
841	sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF,
842	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
843
844	if (sc->nge_ldata == NULL) {
845		printf("nge%d: no memory for list buffers!\n", unit);
846		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
847		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
848		error = ENXIO;
849		goto fail;
850	}
851	bzero(sc->nge_ldata, sizeof(struct nge_list_data));
852
853	ifp = &sc->arpcom.ac_if;
854	ifp->if_softc = sc;
855	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
856	ifp->if_mtu = ETHERMTU;
857	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
858	ifp->if_ioctl = nge_ioctl;
859	ifp->if_start = nge_start;
860	ifp->if_watchdog = nge_watchdog;
861	ifp->if_init = nge_init;
862	ifp->if_baudrate = 1000000000;
863	ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1;
864	ifp->if_hwassist = NGE_CSUM_FEATURES;
865	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING;
866#ifdef DEVICE_POLLING
867	ifp->if_capabilities |= IFCAP_POLLING;
868#endif
869	ifp->if_capenable = ifp->if_capabilities;
870
871	/*
872	 * Do MII setup.
873	 */
874	if (mii_phy_probe(dev, &sc->nge_miibus,
875			  nge_ifmedia_upd, nge_ifmedia_sts)) {
876		if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
877			sc->nge_tbi = 1;
878			device_printf(dev, "Using TBI\n");
879
880			sc->nge_miibus = dev;
881
882			ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd,
883				nge_ifmedia_sts);
884#define	ADD(m, c)	ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL)
885#define PRINT(s)	printf("%s%s", sep, s); sep = ", "
886			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0);
887			device_printf(dev, " ");
888			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0);
889			PRINT("1000baseSX");
890			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0);
891			PRINT("1000baseSX-FDX");
892			ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0);
893			PRINT("auto");
894
895			printf("\n");
896#undef ADD
897#undef PRINT
898			ifmedia_set(&sc->nge_ifmedia,
899				IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0));
900
901			CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
902				| NGE_GPIO_GP4_OUT
903				| NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
904				| NGE_GPIO_GP3_OUTENB
905				| NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN);
906
907		} else {
908			printf("nge%d: MII without any PHY!\n", sc->nge_unit);
909			bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
910			bus_release_resource(dev, NGE_RES, NGE_RID,
911					 sc->nge_res);
912			error = ENXIO;
913			goto fail;
914		}
915	}
916
917	/*
918	 * Call MI attach routine.
919	 */
920	ether_ifattach(ifp, eaddr);
921	callout_init(&sc->nge_stat_ch, CALLOUT_MPSAFE);
922
923	/*
924	 * Hookup IRQ last.
925	 */
926	error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE,
927	    nge_intr, sc, &sc->nge_intrhand);
928	if (error) {
929		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
930		bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
931		printf("nge%d: couldn't set up irq\n", unit);
932	}
933
934fail:
935
936	if (error)
937		NGE_LOCK_DESTROY(sc);
938	return(error);
939}
940
941static int
942nge_detach(dev)
943	device_t		dev;
944{
945	struct nge_softc	*sc;
946	struct ifnet		*ifp;
947
948	sc = device_get_softc(dev);
949	ifp = &sc->arpcom.ac_if;
950
951	NGE_LOCK(sc);
952	nge_reset(sc);
953	nge_stop(sc);
954	NGE_UNLOCK(sc);
955	ether_ifdetach(ifp);
956
957	bus_generic_detach(dev);
958	if (!sc->nge_tbi) {
959		device_delete_child(dev, sc->nge_miibus);
960	}
961	bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
962	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq);
963	bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res);
964
965	contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF);
966
967	NGE_LOCK_DESTROY(sc);
968
969	return(0);
970}
971
972/*
973 * Initialize the transmit descriptors.
974 */
975static int
976nge_list_tx_init(sc)
977	struct nge_softc	*sc;
978{
979	struct nge_list_data	*ld;
980	struct nge_ring_data	*cd;
981	int			i;
982
983	cd = &sc->nge_cdata;
984	ld = sc->nge_ldata;
985
986	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
987		if (i == (NGE_TX_LIST_CNT - 1)) {
988			ld->nge_tx_list[i].nge_nextdesc =
989			    &ld->nge_tx_list[0];
990			ld->nge_tx_list[i].nge_next =
991			    vtophys(&ld->nge_tx_list[0]);
992		} else {
993			ld->nge_tx_list[i].nge_nextdesc =
994			    &ld->nge_tx_list[i + 1];
995			ld->nge_tx_list[i].nge_next =
996			    vtophys(&ld->nge_tx_list[i + 1]);
997		}
998		ld->nge_tx_list[i].nge_mbuf = NULL;
999		ld->nge_tx_list[i].nge_ptr = 0;
1000		ld->nge_tx_list[i].nge_ctl = 0;
1001	}
1002
1003	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1004
1005	return(0);
1006}
1007
1008
1009/*
1010 * Initialize the RX descriptors and allocate mbufs for them. Note that
1011 * we arrange the descriptors in a closed ring, so that the last descriptor
1012 * points back to the first.
1013 */
1014static int
1015nge_list_rx_init(sc)
1016	struct nge_softc	*sc;
1017{
1018	struct nge_list_data	*ld;
1019	struct nge_ring_data	*cd;
1020	int			i;
1021
1022	ld = sc->nge_ldata;
1023	cd = &sc->nge_cdata;
1024
1025	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1026		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1027			return(ENOBUFS);
1028		if (i == (NGE_RX_LIST_CNT - 1)) {
1029			ld->nge_rx_list[i].nge_nextdesc =
1030			    &ld->nge_rx_list[0];
1031			ld->nge_rx_list[i].nge_next =
1032			    vtophys(&ld->nge_rx_list[0]);
1033		} else {
1034			ld->nge_rx_list[i].nge_nextdesc =
1035			    &ld->nge_rx_list[i + 1];
1036			ld->nge_rx_list[i].nge_next =
1037			    vtophys(&ld->nge_rx_list[i + 1]);
1038		}
1039	}
1040
1041	cd->nge_rx_prod = 0;
1042	sc->nge_head = sc->nge_tail = NULL;
1043
1044	return(0);
1045}
1046
1047/*
1048 * Initialize an RX descriptor and attach an MBUF cluster.
1049 */
1050static int
1051nge_newbuf(sc, c, m)
1052	struct nge_softc	*sc;
1053	struct nge_desc		*c;
1054	struct mbuf		*m;
1055{
1056	struct mbuf		*m_new = NULL;
1057
1058	if (m == NULL) {
1059		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1060		if (m_new == NULL)
1061			return (ENOBUFS);
1062		m = m_new;
1063	} else
1064		m->m_data = m->m_ext.ext_buf;
1065
1066	m->m_len = m->m_pkthdr.len = MCLBYTES;
1067
1068	m_adj(m_new, sizeof(u_int64_t));
1069
1070	c->nge_mbuf = m_new;
1071	c->nge_ptr = vtophys(mtod(m_new, caddr_t));
1072	c->nge_ctl = m_new->m_len;
1073	c->nge_extsts = 0;
1074
1075	return(0);
1076}
1077
1078#ifdef NGE_FIXUP_RX
1079static __inline void
1080nge_fixup_rx(m)
1081	struct mbuf		*m;
1082{
1083        int			i;
1084        uint16_t		*src, *dst;
1085
1086	src = mtod(m, uint16_t *);
1087	dst = src - 1;
1088
1089	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1090		*dst++ = *src++;
1091
1092	m->m_data -= ETHER_ALIGN;
1093
1094	return;
1095}
1096#endif
1097
1098/*
1099 * A frame has been uploaded: pass the resulting mbuf chain up to
1100 * the higher level protocols.
1101 */
1102static void
1103nge_rxeof(sc)
1104	struct nge_softc	*sc;
1105{
1106        struct mbuf		*m;
1107        struct ifnet		*ifp;
1108	struct nge_desc		*cur_rx;
1109	int			i, total_len = 0;
1110	u_int32_t		rxstat;
1111
1112	NGE_LOCK_ASSERT(sc);
1113	ifp = &sc->arpcom.ac_if;
1114	i = sc->nge_cdata.nge_rx_prod;
1115
1116	while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1117		u_int32_t		extsts;
1118
1119#ifdef DEVICE_POLLING
1120		if (ifp->if_flags & IFF_POLLING) {
1121			if (sc->rxcycles <= 0)
1122				break;
1123			sc->rxcycles--;
1124		}
1125#endif /* DEVICE_POLLING */
1126
1127		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1128		rxstat = cur_rx->nge_rxstat;
1129		extsts = cur_rx->nge_extsts;
1130		m = cur_rx->nge_mbuf;
1131		cur_rx->nge_mbuf = NULL;
1132		total_len = NGE_RXBYTES(cur_rx);
1133		NGE_INC(i, NGE_RX_LIST_CNT);
1134
1135		if (rxstat & NGE_CMDSTS_MORE) {
1136			m->m_len = total_len;
1137			if (sc->nge_head == NULL) {
1138				m->m_pkthdr.len = total_len;
1139				sc->nge_head = sc->nge_tail = m;
1140			} else {
1141				m->m_flags &= ~M_PKTHDR;
1142				sc->nge_head->m_pkthdr.len += total_len;
1143				sc->nge_tail->m_next = m;
1144				sc->nge_tail = m;
1145			}
1146			nge_newbuf(sc, cur_rx, NULL);
1147			continue;
1148		}
1149
1150		/*
1151		 * If an error occurs, update stats, clear the
1152		 * status word and leave the mbuf cluster in place:
1153		 * it should simply get re-used next time this descriptor
1154	 	 * comes up in the ring.
1155		 */
1156		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1157			ifp->if_ierrors++;
1158			if (sc->nge_head != NULL) {
1159				m_freem(sc->nge_head);
1160				sc->nge_head = sc->nge_tail = NULL;
1161			}
1162			nge_newbuf(sc, cur_rx, m);
1163			continue;
1164		}
1165
1166		/* Try conjure up a replacement mbuf. */
1167
1168		if (nge_newbuf(sc, cur_rx, NULL)) {
1169			ifp->if_ierrors++;
1170			if (sc->nge_head != NULL) {
1171				m_freem(sc->nge_head);
1172				sc->nge_head = sc->nge_tail = NULL;
1173			}
1174			nge_newbuf(sc, cur_rx, m);
1175			continue;
1176		}
1177
1178		if (sc->nge_head != NULL) {
1179			m->m_len = total_len;
1180			m->m_flags &= ~M_PKTHDR;
1181			sc->nge_tail->m_next = m;
1182			m = sc->nge_head;
1183			m->m_pkthdr.len += total_len;
1184			sc->nge_head = sc->nge_tail = NULL;
1185		} else
1186			m->m_pkthdr.len = m->m_len = total_len;
1187
1188		/*
1189		 * Ok. NatSemi really screwed up here. This is the
1190		 * only gigE chip I know of with alignment constraints
1191		 * on receive buffers. RX buffers must be 64-bit aligned.
1192		 */
1193		/*
1194		 * By popular demand, ignore the alignment problems
1195		 * on the Intel x86 platform. The performance hit
1196		 * incurred due to unaligned accesses is much smaller
1197		 * than the hit produced by forcing buffer copies all
1198		 * the time, especially with jumbo frames. We still
1199		 * need to fix up the alignment everywhere else though.
1200		 */
1201#ifdef NGE_FIXUP_RX
1202		nge_fixup_rx(m);
1203#endif
1204
1205		ifp->if_ipackets++;
1206		m->m_pkthdr.rcvif = ifp;
1207
1208		/* Do IP checksum checking. */
1209		if (extsts & NGE_RXEXTSTS_IPPKT)
1210			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1211		if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1212			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1213		if ((extsts & NGE_RXEXTSTS_TCPPKT &&
1214		    !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) ||
1215		    (extsts & NGE_RXEXTSTS_UDPPKT &&
1216		    !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) {
1217			m->m_pkthdr.csum_flags |=
1218			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1219			m->m_pkthdr.csum_data = 0xffff;
1220		}
1221
1222		/*
1223		 * If we received a packet with a vlan tag, pass it
1224		 * to vlan_input() instead of ether_input().
1225		 */
1226		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1227			VLAN_INPUT_TAG(ifp, m,
1228			    ntohs(extsts & NGE_RXEXTSTS_VTCI), continue);
1229		}
1230		NGE_UNLOCK(sc);
1231		(*ifp->if_input)(ifp, m);
1232		NGE_LOCK(sc);
1233	}
1234
1235	sc->nge_cdata.nge_rx_prod = i;
1236
1237	return;
1238}
1239
1240/*
1241 * A frame was downloaded to the chip. It's safe for us to clean up
1242 * the list buffers.
1243 */
1244
1245static void
1246nge_txeof(sc)
1247	struct nge_softc	*sc;
1248{
1249	struct nge_desc		*cur_tx;
1250	struct ifnet		*ifp;
1251	u_int32_t		idx;
1252
1253	NGE_LOCK_ASSERT(sc);
1254	ifp = &sc->arpcom.ac_if;
1255
1256	/*
1257	 * Go through our tx list and free mbufs for those
1258	 * frames that have been transmitted.
1259	 */
1260	idx = sc->nge_cdata.nge_tx_cons;
1261	while (idx != sc->nge_cdata.nge_tx_prod) {
1262		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1263
1264		if (NGE_OWNDESC(cur_tx))
1265			break;
1266
1267		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1268			sc->nge_cdata.nge_tx_cnt--;
1269			NGE_INC(idx, NGE_TX_LIST_CNT);
1270			continue;
1271		}
1272
1273		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1274			ifp->if_oerrors++;
1275			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1276				ifp->if_collisions++;
1277			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1278				ifp->if_collisions++;
1279		}
1280
1281		ifp->if_collisions +=
1282		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1283
1284		ifp->if_opackets++;
1285		if (cur_tx->nge_mbuf != NULL) {
1286			m_freem(cur_tx->nge_mbuf);
1287			cur_tx->nge_mbuf = NULL;
1288			ifp->if_flags &= ~IFF_OACTIVE;
1289		}
1290
1291		sc->nge_cdata.nge_tx_cnt--;
1292		NGE_INC(idx, NGE_TX_LIST_CNT);
1293	}
1294
1295	sc->nge_cdata.nge_tx_cons = idx;
1296
1297	if (idx == sc->nge_cdata.nge_tx_prod)
1298		ifp->if_timer = 0;
1299
1300	return;
1301}
1302
1303static void
1304nge_tick(xsc)
1305	void			*xsc;
1306{
1307	struct nge_softc	*sc;
1308
1309	sc = xsc;
1310
1311	NGE_LOCK(sc);
1312	nge_tick_locked(sc);
1313	NGE_UNLOCK(sc);
1314}
1315
1316static void
1317nge_tick_locked(sc)
1318	struct nge_softc	*sc;
1319{
1320	struct mii_data		*mii;
1321	struct ifnet		*ifp;
1322
1323	NGE_LOCK_ASSERT(sc);
1324	ifp = &sc->arpcom.ac_if;
1325
1326	if (sc->nge_tbi) {
1327		if (!sc->nge_link) {
1328			if (CSR_READ_4(sc, NGE_TBI_BMSR)
1329			    & NGE_TBIBMSR_ANEG_DONE) {
1330				if (bootverbose)
1331					printf("nge%d: gigabit link up\n",
1332					    sc->nge_unit);
1333				nge_miibus_statchg(sc->nge_miibus);
1334				sc->nge_link++;
1335				if (ifp->if_snd.ifq_head != NULL)
1336					nge_start_locked(ifp);
1337			}
1338		}
1339	} else {
1340		mii = device_get_softc(sc->nge_miibus);
1341		mii_tick(mii);
1342
1343		if (!sc->nge_link) {
1344			if (mii->mii_media_status & IFM_ACTIVE &&
1345			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1346				sc->nge_link++;
1347				if (IFM_SUBTYPE(mii->mii_media_active)
1348				    == IFM_1000_T && bootverbose)
1349					printf("nge%d: gigabit link up\n",
1350					    sc->nge_unit);
1351				if (ifp->if_snd.ifq_head != NULL)
1352					nge_start_locked(ifp);
1353			}
1354		}
1355	}
1356	callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc);
1357
1358	return;
1359}
1360
1361#ifdef DEVICE_POLLING
1362static poll_handler_t nge_poll;
1363
1364static void
1365nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1366{
1367	struct  nge_softc *sc = ifp->if_softc;
1368
1369	NGE_LOCK(sc);
1370	if (!(ifp->if_capenable & IFCAP_POLLING)) {
1371		ether_poll_deregister(ifp);
1372		cmd = POLL_DEREGISTER;
1373	}
1374	if (cmd == POLL_DEREGISTER) {	/* final call, enable interrupts */
1375		CSR_WRITE_4(sc, NGE_IER, 1);
1376		NGE_UNLOCK(sc);
1377		return;
1378	}
1379
1380	/*
1381	 * On the nge, reading the status register also clears it.
1382	 * So before returning to intr mode we must make sure that all
1383	 * possible pending sources of interrupts have been served.
1384	 * In practice this means run to completion the *eof routines,
1385	 * and then call the interrupt routine
1386	 */
1387	sc->rxcycles = count;
1388	nge_rxeof(sc);
1389	nge_txeof(sc);
1390	if (ifp->if_snd.ifq_head != NULL)
1391		nge_start_locked(ifp);
1392
1393	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1394		u_int32_t	status;
1395
1396		/* Reading the ISR register clears all interrupts. */
1397		status = CSR_READ_4(sc, NGE_ISR);
1398
1399		if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW))
1400			nge_rxeof(sc);
1401
1402		if (status & (NGE_ISR_RX_IDLE))
1403			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1404
1405		if (status & NGE_ISR_SYSERR) {
1406			nge_reset(sc);
1407			nge_init_locked(sc);
1408		}
1409	}
1410	NGE_UNLOCK(sc);
1411}
1412#endif /* DEVICE_POLLING */
1413
1414static void
1415nge_intr(arg)
1416	void			*arg;
1417{
1418	struct nge_softc	*sc;
1419	struct ifnet		*ifp;
1420	u_int32_t		status;
1421
1422	sc = arg;
1423	ifp = &sc->arpcom.ac_if;
1424
1425	NGE_LOCK(sc);
1426#ifdef DEVICE_POLLING
1427	if (ifp->if_flags & IFF_POLLING) {
1428		NGE_UNLOCK(sc);
1429		return;
1430	}
1431	if ((ifp->if_capenable & IFCAP_POLLING) &&
1432	    ether_poll_register(nge_poll, ifp)) { /* ok, disable interrupts */
1433		CSR_WRITE_4(sc, NGE_IER, 0);
1434		NGE_UNLOCK(sc);
1435		nge_poll(ifp, 0, 1);
1436		return;
1437	}
1438#endif /* DEVICE_POLLING */
1439
1440	/* Supress unwanted interrupts */
1441	if (!(ifp->if_flags & IFF_UP)) {
1442		nge_stop(sc);
1443		NGE_UNLOCK(sc);
1444		return;
1445	}
1446
1447	/* Disable interrupts. */
1448	CSR_WRITE_4(sc, NGE_IER, 0);
1449
1450	/* Data LED on for TBI mode */
1451	if(sc->nge_tbi)
1452		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1453			     | NGE_GPIO_GP3_OUT);
1454
1455	for (;;) {
1456		/* Reading the ISR register clears all interrupts. */
1457		status = CSR_READ_4(sc, NGE_ISR);
1458
1459		if ((status & NGE_INTRS) == 0)
1460			break;
1461
1462		if ((status & NGE_ISR_TX_DESC_OK) ||
1463		    (status & NGE_ISR_TX_ERR) ||
1464		    (status & NGE_ISR_TX_OK) ||
1465		    (status & NGE_ISR_TX_IDLE))
1466			nge_txeof(sc);
1467
1468		if ((status & NGE_ISR_RX_DESC_OK) ||
1469		    (status & NGE_ISR_RX_ERR) ||
1470		    (status & NGE_ISR_RX_OFLOW) ||
1471		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1472		    (status & NGE_ISR_RX_IDLE) ||
1473		    (status & NGE_ISR_RX_OK))
1474			nge_rxeof(sc);
1475
1476		if ((status & NGE_ISR_RX_IDLE))
1477			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1478
1479		if (status & NGE_ISR_SYSERR) {
1480			nge_reset(sc);
1481			ifp->if_flags &= ~IFF_RUNNING;
1482			nge_init_locked(sc);
1483		}
1484
1485#if 0
1486		/*
1487		 * XXX: nge_tick() is not ready to be called this way
1488		 * it screws up the aneg timeout because mii_tick() is
1489		 * only to be called once per second.
1490		 */
1491		if (status & NGE_IMR_PHY_INTR) {
1492			sc->nge_link = 0;
1493			nge_tick_locked(sc);
1494		}
1495#endif
1496	}
1497
1498	/* Re-enable interrupts. */
1499	CSR_WRITE_4(sc, NGE_IER, 1);
1500
1501	if (ifp->if_snd.ifq_head != NULL)
1502		nge_start_locked(ifp);
1503
1504	/* Data LED off for TBI mode */
1505
1506	if(sc->nge_tbi)
1507		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1508			    & ~NGE_GPIO_GP3_OUT);
1509
1510	NGE_UNLOCK(sc);
1511
1512	return;
1513}
1514
1515/*
1516 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1517 * pointers to the fragment pointers.
1518 */
1519static int
1520nge_encap(sc, m_head, txidx)
1521	struct nge_softc	*sc;
1522	struct mbuf		*m_head;
1523	u_int32_t		*txidx;
1524{
1525	struct nge_desc		*f = NULL;
1526	struct mbuf		*m;
1527	int			frag, cur, cnt = 0;
1528	struct m_tag		*mtag;
1529
1530	/*
1531 	 * Start packing the mbufs in this chain into
1532	 * the fragment pointers. Stop when we run out
1533 	 * of fragments or hit the end of the mbuf chain.
1534	 */
1535	m = m_head;
1536	cur = frag = *txidx;
1537
1538	for (m = m_head; m != NULL; m = m->m_next) {
1539		if (m->m_len != 0) {
1540			if ((NGE_TX_LIST_CNT -
1541			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1542				return(ENOBUFS);
1543			f = &sc->nge_ldata->nge_tx_list[frag];
1544			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1545			f->nge_ptr = vtophys(mtod(m, vm_offset_t));
1546			if (cnt != 0)
1547				f->nge_ctl |= NGE_CMDSTS_OWN;
1548			cur = frag;
1549			NGE_INC(frag, NGE_TX_LIST_CNT);
1550			cnt++;
1551		}
1552	}
1553
1554	if (m != NULL)
1555		return(ENOBUFS);
1556
1557	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1558	if (m_head->m_pkthdr.csum_flags) {
1559		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1560			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1561			    NGE_TXEXTSTS_IPCSUM;
1562		if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1563			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1564			    NGE_TXEXTSTS_TCPCSUM;
1565		if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1566			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1567			    NGE_TXEXTSTS_UDPCSUM;
1568	}
1569
1570	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m);
1571	if (mtag != NULL) {
1572		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1573		    (NGE_TXEXTSTS_VLANPKT|htons(VLAN_TAG_VALUE(mtag)));
1574	}
1575
1576	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1577	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1578	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1579	sc->nge_cdata.nge_tx_cnt += cnt;
1580	*txidx = frag;
1581
1582	return(0);
1583}
1584
1585/*
1586 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1587 * to the mbuf data regions directly in the transmit lists. We also save a
1588 * copy of the pointers since the transmit list fragment pointers are
1589 * physical addresses.
1590 */
1591
1592static void
1593nge_start(ifp)
1594	struct ifnet		*ifp;
1595{
1596	struct nge_softc	*sc;
1597
1598	sc = ifp->if_softc;
1599	NGE_LOCK(sc);
1600	nge_start_locked(ifp);
1601	NGE_UNLOCK(sc);
1602}
1603
1604static void
1605nge_start_locked(ifp)
1606	struct ifnet		*ifp;
1607{
1608	struct nge_softc	*sc;
1609	struct mbuf		*m_head = NULL;
1610	u_int32_t		idx;
1611
1612	sc = ifp->if_softc;
1613
1614	if (!sc->nge_link)
1615		return;
1616
1617	idx = sc->nge_cdata.nge_tx_prod;
1618
1619	if (ifp->if_flags & IFF_OACTIVE)
1620		return;
1621
1622	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1623		IF_DEQUEUE(&ifp->if_snd, m_head);
1624		if (m_head == NULL)
1625			break;
1626
1627		if (nge_encap(sc, m_head, &idx)) {
1628			IF_PREPEND(&ifp->if_snd, m_head);
1629			ifp->if_flags |= IFF_OACTIVE;
1630			break;
1631		}
1632
1633		/*
1634		 * If there's a BPF listener, bounce a copy of this frame
1635		 * to him.
1636		 */
1637		BPF_MTAP(ifp, m_head);
1638
1639	}
1640
1641	/* Transmit */
1642	sc->nge_cdata.nge_tx_prod = idx;
1643	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1644
1645	/*
1646	 * Set a timeout in case the chip goes out to lunch.
1647	 */
1648	ifp->if_timer = 5;
1649
1650	return;
1651}
1652
1653static void
1654nge_init(xsc)
1655	void			*xsc;
1656{
1657	struct nge_softc	*sc = xsc;
1658
1659	NGE_LOCK(sc);
1660	nge_init_locked(sc);
1661	NGE_UNLOCK(sc);
1662}
1663
1664static void
1665nge_init_locked(sc)
1666	struct nge_softc	*sc;
1667{
1668	struct ifnet		*ifp = &sc->arpcom.ac_if;
1669	struct mii_data		*mii;
1670
1671	NGE_LOCK_ASSERT(sc);
1672
1673	if (ifp->if_flags & IFF_RUNNING)
1674		return;
1675
1676	/*
1677	 * Cancel pending I/O and free all RX/TX buffers.
1678	 */
1679	nge_stop(sc);
1680
1681	if (sc->nge_tbi) {
1682		mii = NULL;
1683	} else {
1684		mii = device_get_softc(sc->nge_miibus);
1685	}
1686
1687	/* Set MAC address */
1688	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1689	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1690	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1691	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1692	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1693	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1694	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1695	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1696	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1697
1698	/* Init circular RX list. */
1699	if (nge_list_rx_init(sc) == ENOBUFS) {
1700		printf("nge%d: initialization failed: no "
1701			"memory for rx buffers\n", sc->nge_unit);
1702		nge_stop(sc);
1703		return;
1704	}
1705
1706	/*
1707	 * Init tx descriptors.
1708	 */
1709	nge_list_tx_init(sc);
1710
1711	/*
1712	 * For the NatSemi chip, we have to explicitly enable the
1713	 * reception of ARP frames, as well as turn on the 'perfect
1714	 * match' filter where we store the station address, otherwise
1715	 * we won't receive unicasts meant for this host.
1716	 */
1717	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1718	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1719
1720	 /* If we want promiscuous mode, set the allframes bit. */
1721	if (ifp->if_flags & IFF_PROMISC) {
1722		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1723	} else {
1724		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1725	}
1726
1727	/*
1728	 * Set the capture broadcast bit to capture broadcast frames.
1729	 */
1730	if (ifp->if_flags & IFF_BROADCAST) {
1731		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1732	} else {
1733		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1734	}
1735
1736	/*
1737	 * Load the multicast filter.
1738	 */
1739	nge_setmulti(sc);
1740
1741	/* Turn the receive filter on */
1742	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1743
1744	/*
1745	 * Load the address of the RX and TX lists.
1746	 */
1747	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1748	    vtophys(&sc->nge_ldata->nge_rx_list[0]));
1749	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1750	    vtophys(&sc->nge_ldata->nge_tx_list[0]));
1751
1752	/* Set RX configuration */
1753	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1754	/*
1755	 * Enable hardware checksum validation for all IPv4
1756	 * packets, do not reject packets with bad checksums.
1757	 */
1758	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1759
1760	/*
1761	 * Tell the chip to detect and strip VLAN tag info from
1762	 * received frames. The tag will be provided in the extsts
1763	 * field in the RX descriptors.
1764	 */
1765	NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1766	    NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB);
1767
1768	/* Set TX configuration */
1769	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1770
1771	/*
1772	 * Enable TX IPv4 checksumming on a per-packet basis.
1773	 */
1774	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
1775
1776	/*
1777	 * Tell the chip to insert VLAN tags on a per-packet basis as
1778	 * dictated by the code in the frame encapsulation routine.
1779	 */
1780	NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1781
1782	/* Set full/half duplex mode. */
1783	if (sc->nge_tbi) {
1784		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1785		    == IFM_FDX) {
1786			NGE_SETBIT(sc, NGE_TX_CFG,
1787			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1788			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1789		} else {
1790			NGE_CLRBIT(sc, NGE_TX_CFG,
1791			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1792			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1793		}
1794	} else {
1795		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1796			NGE_SETBIT(sc, NGE_TX_CFG,
1797			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1798			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1799		} else {
1800			NGE_CLRBIT(sc, NGE_TX_CFG,
1801			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1802			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1803		}
1804	}
1805
1806	nge_tick_locked(sc);
1807
1808	/*
1809	 * Enable the delivery of PHY interrupts based on
1810	 * link/speed/duplex status changes. Also enable the
1811	 * extsts field in the DMA descriptors (needed for
1812	 * TCP/IP checksum offload on transmit).
1813	 */
1814	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|
1815	    NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1816
1817	/*
1818	 * Configure interrupt holdoff (moderation). We can
1819	 * have the chip delay interrupt delivery for a certain
1820	 * period. Units are in 100us, and the max setting
1821	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1822	 */
1823	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1824
1825	/*
1826	 * Enable interrupts.
1827	 */
1828	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1829#ifdef DEVICE_POLLING
1830	/*
1831	 * ... only enable interrupts if we are not polling, make sure
1832	 * they are off otherwise.
1833	 */
1834	if (ifp->if_flags & IFF_POLLING)
1835		CSR_WRITE_4(sc, NGE_IER, 0);
1836	else
1837#endif /* DEVICE_POLLING */
1838	CSR_WRITE_4(sc, NGE_IER, 1);
1839
1840	/* Enable receiver and transmitter. */
1841	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1842	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1843
1844	nge_ifmedia_upd(ifp);
1845
1846	ifp->if_flags |= IFF_RUNNING;
1847	ifp->if_flags &= ~IFF_OACTIVE;
1848
1849	return;
1850}
1851
1852/*
1853 * Set media options.
1854 */
1855static int
1856nge_ifmedia_upd(ifp)
1857	struct ifnet		*ifp;
1858{
1859	struct nge_softc	*sc;
1860	struct mii_data		*mii;
1861
1862	sc = ifp->if_softc;
1863
1864	if (sc->nge_tbi) {
1865		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1866		     == IFM_AUTO) {
1867			CSR_WRITE_4(sc, NGE_TBI_ANAR,
1868				CSR_READ_4(sc, NGE_TBI_ANAR)
1869					| NGE_TBIANAR_HDX | NGE_TBIANAR_FDX
1870					| NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2);
1871			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG
1872				| NGE_TBIBMCR_RESTART_ANEG);
1873			CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG);
1874		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media
1875			    & IFM_GMASK) == IFM_FDX) {
1876			NGE_SETBIT(sc, NGE_TX_CFG,
1877			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1878			NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1879
1880			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
1881			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1882		} else {
1883			NGE_CLRBIT(sc, NGE_TX_CFG,
1884			    (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR));
1885			NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX);
1886
1887			CSR_WRITE_4(sc, NGE_TBI_ANAR, 0);
1888			CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1889		}
1890
1891		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1892			    & ~NGE_GPIO_GP3_OUT);
1893	} else {
1894		mii = device_get_softc(sc->nge_miibus);
1895		sc->nge_link = 0;
1896		if (mii->mii_instance) {
1897			struct mii_softc	*miisc;
1898			for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1899			    miisc = LIST_NEXT(miisc, mii_list))
1900				mii_phy_reset(miisc);
1901		}
1902		mii_mediachg(mii);
1903	}
1904
1905	return(0);
1906}
1907
1908/*
1909 * Report current media status.
1910 */
1911static void
1912nge_ifmedia_sts(ifp, ifmr)
1913	struct ifnet		*ifp;
1914	struct ifmediareq	*ifmr;
1915{
1916	struct nge_softc	*sc;
1917	struct mii_data		*mii;
1918
1919	sc = ifp->if_softc;
1920
1921	if (sc->nge_tbi) {
1922		ifmr->ifm_status = IFM_AVALID;
1923		ifmr->ifm_active = IFM_ETHER;
1924
1925		if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
1926			ifmr->ifm_status |= IFM_ACTIVE;
1927		}
1928		if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK)
1929			ifmr->ifm_active |= IFM_LOOP;
1930		if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) {
1931			ifmr->ifm_active |= IFM_NONE;
1932			ifmr->ifm_status = 0;
1933			return;
1934		}
1935		ifmr->ifm_active |= IFM_1000_SX;
1936		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1937		    == IFM_AUTO) {
1938			ifmr->ifm_active |= IFM_AUTO;
1939			if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
1940			    & NGE_TBIANAR_FDX) {
1941				ifmr->ifm_active |= IFM_FDX;
1942			}else if (CSR_READ_4(sc, NGE_TBI_ANLPAR)
1943				  & NGE_TBIANAR_HDX) {
1944				ifmr->ifm_active |= IFM_HDX;
1945			}
1946		} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1947			== IFM_FDX)
1948			ifmr->ifm_active |= IFM_FDX;
1949		else
1950			ifmr->ifm_active |= IFM_HDX;
1951
1952	} else {
1953		mii = device_get_softc(sc->nge_miibus);
1954		mii_pollstat(mii);
1955		ifmr->ifm_active = mii->mii_media_active;
1956		ifmr->ifm_status = mii->mii_media_status;
1957	}
1958
1959	return;
1960}
1961
1962static int
1963nge_ioctl(ifp, command, data)
1964	struct ifnet		*ifp;
1965	u_long			command;
1966	caddr_t			data;
1967{
1968	struct nge_softc	*sc = ifp->if_softc;
1969	struct ifreq		*ifr = (struct ifreq *) data;
1970	struct mii_data		*mii;
1971	int			error = 0;
1972
1973	switch(command) {
1974	case SIOCSIFMTU:
1975		if (ifr->ifr_mtu > NGE_JUMBO_MTU)
1976			error = EINVAL;
1977		else {
1978			ifp->if_mtu = ifr->ifr_mtu;
1979			/*
1980			 * Workaround: if the MTU is larger than
1981			 * 8152 (TX FIFO size minus 64 minus 18), turn off
1982			 * TX checksum offloading.
1983			 */
1984			if (ifr->ifr_mtu >= 8152) {
1985				ifp->if_capenable &= ~IFCAP_TXCSUM;
1986				ifp->if_hwassist = 0;
1987			} else {
1988				ifp->if_capenable |= IFCAP_TXCSUM;
1989				ifp->if_hwassist = NGE_CSUM_FEATURES;
1990			}
1991		}
1992		break;
1993	case SIOCSIFFLAGS:
1994		NGE_LOCK(sc);
1995		if (ifp->if_flags & IFF_UP) {
1996			if (ifp->if_flags & IFF_RUNNING &&
1997			    ifp->if_flags & IFF_PROMISC &&
1998			    !(sc->nge_if_flags & IFF_PROMISC)) {
1999				NGE_SETBIT(sc, NGE_RXFILT_CTL,
2000				    NGE_RXFILTCTL_ALLPHYS|
2001				    NGE_RXFILTCTL_ALLMULTI);
2002			} else if (ifp->if_flags & IFF_RUNNING &&
2003			    !(ifp->if_flags & IFF_PROMISC) &&
2004			    sc->nge_if_flags & IFF_PROMISC) {
2005				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2006				    NGE_RXFILTCTL_ALLPHYS);
2007				if (!(ifp->if_flags & IFF_ALLMULTI))
2008					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2009					    NGE_RXFILTCTL_ALLMULTI);
2010			} else {
2011				ifp->if_flags &= ~IFF_RUNNING;
2012				nge_init_locked(sc);
2013			}
2014		} else {
2015			if (ifp->if_flags & IFF_RUNNING)
2016				nge_stop(sc);
2017		}
2018		sc->nge_if_flags = ifp->if_flags;
2019		NGE_UNLOCK(sc);
2020		error = 0;
2021		break;
2022	case SIOCADDMULTI:
2023	case SIOCDELMULTI:
2024		NGE_LOCK(sc);
2025		nge_setmulti(sc);
2026		NGE_UNLOCK(sc);
2027		error = 0;
2028		break;
2029	case SIOCGIFMEDIA:
2030	case SIOCSIFMEDIA:
2031		if (sc->nge_tbi) {
2032			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2033					      command);
2034		} else {
2035			mii = device_get_softc(sc->nge_miibus);
2036			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2037					      command);
2038		}
2039		break;
2040	case SIOCSIFCAP:
2041		ifp->if_capenable &= ~IFCAP_POLLING;
2042		ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING;
2043		break;
2044	default:
2045		error = ether_ioctl(ifp, command, data);
2046		break;
2047	}
2048
2049	return(error);
2050}
2051
2052static void
2053nge_watchdog(ifp)
2054	struct ifnet		*ifp;
2055{
2056	struct nge_softc	*sc;
2057
2058	sc = ifp->if_softc;
2059
2060	ifp->if_oerrors++;
2061	printf("nge%d: watchdog timeout\n", sc->nge_unit);
2062
2063	NGE_LOCK(sc);
2064	nge_stop(sc);
2065	nge_reset(sc);
2066	ifp->if_flags &= ~IFF_RUNNING;
2067	nge_init_locked(sc);
2068
2069	if (ifp->if_snd.ifq_head != NULL)
2070		nge_start_locked(ifp);
2071
2072	NGE_UNLOCK(sc);
2073
2074	return;
2075}
2076
2077/*
2078 * Stop the adapter and free any mbufs allocated to the
2079 * RX and TX lists.
2080 */
2081static void
2082nge_stop(sc)
2083	struct nge_softc	*sc;
2084{
2085	register int		i;
2086	struct ifnet		*ifp;
2087	struct mii_data		*mii;
2088
2089	NGE_LOCK_ASSERT(sc);
2090	ifp = &sc->arpcom.ac_if;
2091	ifp->if_timer = 0;
2092	if (sc->nge_tbi) {
2093		mii = NULL;
2094	} else {
2095		mii = device_get_softc(sc->nge_miibus);
2096	}
2097
2098	callout_stop(&sc->nge_stat_ch);
2099#ifdef DEVICE_POLLING
2100	ether_poll_deregister(ifp);
2101#endif
2102	CSR_WRITE_4(sc, NGE_IER, 0);
2103	CSR_WRITE_4(sc, NGE_IMR, 0);
2104	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2105	DELAY(1000);
2106	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2107	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2108
2109	if (!sc->nge_tbi)
2110		mii_down(mii);
2111
2112	sc->nge_link = 0;
2113
2114	/*
2115	 * Free data in the RX lists.
2116	 */
2117	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2118		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2119			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2120			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2121		}
2122	}
2123	bzero((char *)&sc->nge_ldata->nge_rx_list,
2124		sizeof(sc->nge_ldata->nge_rx_list));
2125
2126	/*
2127	 * Free the TX list buffers.
2128	 */
2129	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2130		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2131			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2132			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2133		}
2134	}
2135
2136	bzero((char *)&sc->nge_ldata->nge_tx_list,
2137		sizeof(sc->nge_ldata->nge_tx_list));
2138
2139	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2140
2141	return;
2142}
2143
2144/*
2145 * Stop all chip I/O so that the kernel's probe routines don't
2146 * get confused by errant DMAs when rebooting.
2147 */
2148static void
2149nge_shutdown(dev)
2150	device_t		dev;
2151{
2152	struct nge_softc	*sc;
2153
2154	sc = device_get_softc(dev);
2155
2156	NGE_LOCK(sc);
2157	nge_reset(sc);
2158	nge_stop(sc);
2159	NGE_UNLOCK(sc);
2160
2161	return;
2162}
2163