if_re.c revision 134750
1231990Smp/*
259243Sobrien * Copyright (c) 1997, 1998-2003
359243Sobrien *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
459243Sobrien *
559243Sobrien * Redistribution and use in source and binary forms, with or without
659243Sobrien * modification, are permitted provided that the following conditions
759243Sobrien * are met:
859243Sobrien * 1. Redistributions of source code must retain the above copyright
959243Sobrien *    notice, this list of conditions and the following disclaimer.
1059243Sobrien * 2. Redistributions in binary form must reproduce the above copyright
1159243Sobrien *    notice, this list of conditions and the following disclaimer in the
1259243Sobrien *    documentation and/or other materials provided with the distribution.
1359243Sobrien * 3. All advertising materials mentioning features or use of this software
1459243Sobrien *    must display the following acknowledgement:
1559243Sobrien *	This product includes software developed by Bill Paul.
1659243Sobrien * 4. Neither the name of the author nor the names of any co-contributors
1759243Sobrien *    may be used to endorse or promote products derived from this software
18100616Smp *    without specific prior written permission.
1959243Sobrien *
2059243Sobrien * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
2159243Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2259243Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2359243Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
2459243Sobrien * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2559243Sobrien * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2659243Sobrien * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2759243Sobrien * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2859243Sobrien * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2959243Sobrien * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
3059243Sobrien * THE POSSIBILITY OF SUCH DAMAGE.
3159243Sobrien */
3259243Sobrien
3359243Sobrien#include <sys/cdefs.h>
3459243Sobrien__FBSDID("$FreeBSD: head/sys/dev/re/if_re.c 134750 2004-09-04 07:54:05Z ru $");
3559243Sobrien
36231990Smp/*
37145479Smp * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
3859243Sobrien *
39231990Smp * Written by Bill Paul <wpaul@windriver.com>
40145479Smp * Senior Networking Software Engineer
4159243Sobrien * Wind River Systems
42145479Smp */
43145479Smp
44145479Smp/*
45145479Smp * This driver is designed to support RealTek's next generation of
46145479Smp * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
4759243Sobrien * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
48145479Smp * and the RTL8110S.
49145479Smp *
50231990Smp * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51145479Smp * with the older 8139 family, however it also supports a special
52145479Smp * C+ mode of operation that provides several new performance enhancing
53145479Smp * features. These include:
54145479Smp *
55145479Smp *	o Descriptor based DMA mechanism. Each descriptor represents
56195609Smp *	  a single packet fragment. Data buffers may be aligned on
57145479Smp *	  any byte boundary.
58145479Smp *
59145479Smp *	o 64-bit DMA
60145479Smp *
61145479Smp *	o TCP/IP checksum offload for both RX and TX
62145479Smp *
63145479Smp *	o High and normal priority transmit DMA rings
64145479Smp *
65231990Smp *	o VLAN tag insertion and extraction
66145479Smp *
67145479Smp *	o TCP large send (segmentation offload)
68145479Smp *
69145479Smp * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70145479Smp * programming API is fairly straightforward. The RX filtering, EEPROM
71145479Smp * access and PHY access is the same as it is on the older 8139 series
72145479Smp * chips.
73231990Smp *
74231990Smp * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75231990Smp * same programming API and feature set as the 8139C+ with the following
76231990Smp * differences and additions:
77231990Smp *
78231990Smp *	o 1000Mbps mode
79231990Smp *
80231990Smp *	o Jumbo frames
81231990Smp *
82231990Smp *	o GMII and TBI ports/registers for interfacing with copper
83231990Smp *	  or fiber PHYs
84231990Smp *
85231990Smp *	o RX and TX DMA rings can have up to 1024 descriptors
86231990Smp *	  (the 8139C+ allows a maximum of 64)
87231990Smp *
88231990Smp *	o Slight differences in register layout from the 8139C+
89231990Smp *
90145479Smp * The TX start and timer interrupt registers are at different locations
91145479Smp * on the 8169 than they are on the 8139C+. Also, the status word in the
92145479Smp * RX descriptor has a slightly different bit layout. The 8169 does not
93145479Smp * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94145479Smp * copper gigE PHY.
95145479Smp *
96145479Smp * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97167465Smp * (the 'S' stands for 'single-chip'). These devices have the same
98145479Smp * programming API as the older 8169, but also have some vendor-specific
99231990Smp * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100145479Smp * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101145479Smp *
102145479Smp * This driver takes advantage of the RX and TX checksum offload and
103231990Smp * VLAN tag insertion/extraction features. It also implements TX
104231990Smp * interrupt moderation using the timer interrupt registers, which
105231990Smp * significantly reduces TX interrupt load. There is also support
106231990Smp * for jumbo frames, however the 8169/8169S/8110S can not transmit
107231990Smp * jumbo frames larger than 7.5K, so the max MTU possible with this
108231990Smp * driver is 7500 bytes.
109231990Smp */
110231990Smp
111231990Smp#include <sys/param.h>
112231990Smp#include <sys/endian.h>
113231990Smp#include <sys/systm.h>
114231990Smp#include <sys/sockio.h>
115231990Smp#include <sys/mbuf.h>
116231990Smp#include <sys/malloc.h>
117231990Smp#include <sys/module.h>
118231990Smp#include <sys/kernel.h>
119145479Smp#include <sys/socket.h>
120231990Smp
121231990Smp#include <net/if.h>
122231990Smp#include <net/if_arp.h>
123231990Smp#include <net/ethernet.h>
124231990Smp#include <net/if_dl.h>
125231990Smp#include <net/if_media.h>
126231990Smp#include <net/if_vlan_var.h>
127231990Smp
128231990Smp#include <net/bpf.h>
129231990Smp
130231990Smp#include <machine/bus_pio.h>
131231990Smp#include <machine/bus_memio.h>
132231990Smp#include <machine/bus.h>
133231990Smp#include <machine/resource.h>
134231990Smp#include <sys/bus.h>
135231990Smp#include <sys/rman.h>
136231990Smp
137231990Smp#include <dev/mii/mii.h>
138231990Smp#include <dev/mii/miivar.h>
139231990Smp
140231990Smp#include <dev/pci/pcireg.h>
141231990Smp#include <dev/pci/pcivar.h>
142231990Smp
143231990SmpMODULE_DEPEND(re, pci, 1, 1, 1);
144145479SmpMODULE_DEPEND(re, ether, 1, 1, 1);
145231990SmpMODULE_DEPEND(re, miibus, 1, 1, 1);
146231990Smp
147231990Smp/* "controller miibus0" required.  See GENERIC if you get errors here. */
148145479Smp#include "miibus_if.h"
149145479Smp
150167465Smp/*
151145479Smp * Default to using PIO access for this driver.
152167465Smp */
15359243Sobrien#define RE_USEIOSPACE
154167465Smp
15559243Sobrien#include <pci/if_rlreg.h>
15659243Sobrien
157145479Smp#define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
15859243Sobrien
15959243Sobrien/*
16059243Sobrien * Various supported device vendors/types and their names.
16159243Sobrien */
16259243Sobrienstatic struct rl_type re_devs[] = {
16359243Sobrien	{ RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS,
164167465Smp		"RealTek 8139C+ 10/100BaseTX" },
16559243Sobrien	{ RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169,
16659243Sobrien		"RealTek 8169 Gigabit Ethernet" },
16759243Sobrien	{ RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S,
16859243Sobrien		"RealTek 8169S Single-chip Gigabit Ethernet" },
16959243Sobrien	{ RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S,
17059243Sobrien		"RealTek 8110S Single-chip Gigabit Ethernet" },
17159243Sobrien	{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, RL_HWREV_8169S,
17259243Sobrien		"Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
173167465Smp	{ 0, 0, 0, NULL }
17459243Sobrien};
17559243Sobrien
176145479Smpstatic struct rl_hwrev re_hwrevs[] = {
17759243Sobrien	{ RL_HWREV_8139, RL_8139,  "" },
17859243Sobrien	{ RL_HWREV_8139A, RL_8139, "A" },
17959243Sobrien	{ RL_HWREV_8139AG, RL_8139, "A-G" },
18059243Sobrien	{ RL_HWREV_8139B, RL_8139, "B" },
18159243Sobrien	{ RL_HWREV_8130, RL_8139, "8130" },
18259243Sobrien	{ RL_HWREV_8139C, RL_8139, "C" },
183167465Smp	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" },
18459243Sobrien	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"},
18559243Sobrien	{ RL_HWREV_8169, RL_8169, "8169"},
18659243Sobrien	{ RL_HWREV_8169S, RL_8169, "8169S"},
18759243Sobrien	{ RL_HWREV_8110S, RL_8169, "8110S"},
18859243Sobrien	{ RL_HWREV_8100, RL_8139, "8100"},
18959243Sobrien	{ RL_HWREV_8101, RL_8139, "8101"},
19059243Sobrien	{ 0, 0, NULL }
19159243Sobrien};
192167465Smp
19359243Sobrienstatic int re_probe		(device_t);
194167465Smpstatic int re_attach		(device_t);
19559243Sobrienstatic int re_detach		(device_t);
19659243Sobrien
19759243Sobrienstatic int re_encap		(struct rl_softc *, struct mbuf *, int *);
19859243Sobrien
199167465Smpstatic void re_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
200167465Smpstatic void re_dma_map_desc	(void *, bus_dma_segment_t *, int,
201167465Smp				    bus_size_t, int);
20259243Sobrienstatic int re_allocmem		(device_t, struct rl_softc *);
203167465Smpstatic int re_newbuf		(struct rl_softc *, int, struct mbuf *);
204167465Smpstatic int re_rx_list_init	(struct rl_softc *);
20559243Sobrienstatic int re_tx_list_init	(struct rl_softc *);
206167465Smpstatic void re_rxeof		(struct rl_softc *);
207167465Smpstatic void re_txeof		(struct rl_softc *);
20859243Sobrien#ifdef DEVICE_POLLING
20959243Sobrienstatic void re_poll		(struct ifnet *, enum poll_cmd, int);
21059243Sobrienstatic void re_poll_locked	(struct ifnet *, enum poll_cmd, int);
211167465Smp#endif
21259243Sobrienstatic void re_intr		(void *);
21359243Sobrienstatic void re_tick		(void *);
21459243Sobrienstatic void re_tick_locked	(struct rl_softc *);
215145479Smpstatic void re_start		(struct ifnet *);
21659243Sobrienstatic void re_start_locked	(struct ifnet *);
21759243Sobrienstatic int re_ioctl		(struct ifnet *, u_long, caddr_t);
21859243Sobrienstatic void re_init		(void *);
21959243Sobrienstatic void re_init_locked	(struct rl_softc *);
22059243Sobrienstatic void re_stop		(struct rl_softc *);
22159243Sobrienstatic void re_watchdog		(struct ifnet *);
222167465Smpstatic int re_suspend		(device_t);
22359243Sobrienstatic int re_resume		(device_t);
22459243Sobrienstatic void re_shutdown		(device_t);
22559243Sobrienstatic int re_ifmedia_upd	(struct ifnet *);
22659243Sobrienstatic void re_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
227145479Smp
228145479Smpstatic void re_eeprom_putbyte	(struct rl_softc *, int);
229145479Smpstatic void re_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
230195609Smpstatic void re_read_eeprom	(struct rl_softc *, caddr_t, int, int, int);
231195609Smpstatic int re_gmii_readreg	(device_t, int, int);
232195609Smpstatic int re_gmii_writereg	(device_t, int, int, int);
23359243Sobrien
234167465Smpstatic int re_miibus_readreg	(device_t, int, int);
23559243Sobrienstatic int re_miibus_writereg	(device_t, int, int, int);
23659243Sobrienstatic void re_miibus_statchg	(device_t);
237195609Smp
238195609Smpstatic void re_setmulti		(struct rl_softc *);
239195609Smpstatic void re_reset		(struct rl_softc *);
240195609Smp
24159243Sobrienstatic int re_diag		(struct rl_softc *);
24259243Sobrien
24359243Sobrien#ifdef RE_USEIOSPACE
24459243Sobrien#define RL_RES			SYS_RES_IOPORT
24559243Sobrien#define RL_RID			RL_PCI_LOIO
24659243Sobrien#else
247231990Smp#define RL_RES			SYS_RES_MEMORY
24859243Sobrien#define RL_RID			RL_PCI_LOMEM
249167465Smp#endif
25059243Sobrien
251145479Smpstatic device_method_t re_methods[] = {
25259243Sobrien	/* Device interface */
25359243Sobrien	DEVMETHOD(device_probe,		re_probe),
25459243Sobrien	DEVMETHOD(device_attach,	re_attach),
25559243Sobrien	DEVMETHOD(device_detach,	re_detach),
25659243Sobrien	DEVMETHOD(device_suspend,	re_suspend),
25759243Sobrien	DEVMETHOD(device_resume,	re_resume),
25859243Sobrien	DEVMETHOD(device_shutdown,	re_shutdown),
25959243Sobrien
260167465Smp	/* bus interface */
26159243Sobrien	DEVMETHOD(bus_print_child,	bus_generic_print_child),
262145479Smp	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
26359243Sobrien
26459243Sobrien	/* MII interface */
26559243Sobrien	DEVMETHOD(miibus_readreg,	re_miibus_readreg),
26659243Sobrien	DEVMETHOD(miibus_writereg,	re_miibus_writereg),
26759243Sobrien	DEVMETHOD(miibus_statchg,	re_miibus_statchg),
26859243Sobrien
26959243Sobrien	{ 0, 0 }
27059243Sobrien};
27159243Sobrien
27259243Sobrienstatic driver_t re_driver = {
27359243Sobrien	"re",
27459243Sobrien	re_methods,
27559243Sobrien	sizeof(struct rl_softc)
27659243Sobrien};
27759243Sobrien
27859243Sobrienstatic devclass_t re_devclass;
279167465Smp
28059243SobrienDRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
281167465SmpDRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0);
282167465SmpDRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
28359243Sobrien
28459243Sobrien#define EE_SET(x)					\
28559243Sobrien	CSR_WRITE_1(sc, RL_EECMD,			\
28659243Sobrien		CSR_READ_1(sc, RL_EECMD) | x)
287167465Smp
28859243Sobrien#define EE_CLR(x)					\
289145479Smp	CSR_WRITE_1(sc, RL_EECMD,			\
29059243Sobrien		CSR_READ_1(sc, RL_EECMD) & ~x)
29159243Sobrien
29259243Sobrien/*
29359243Sobrien * Send a read command and address to the EEPROM, check for ACK.
29459243Sobrien */
29559243Sobrienstatic void
296167465Smpre_eeprom_putbyte(sc, addr)
297167465Smp	struct rl_softc		*sc;
29859243Sobrien	int			addr;
29959243Sobrien{
30059243Sobrien	register int		d, i;
30159243Sobrien
30259243Sobrien	d = addr | sc->rl_eecmd_read;
30359243Sobrien
30459243Sobrien	/*
30559243Sobrien	 * Feed in each bit and strobe the clock.
30659243Sobrien	 */
30759243Sobrien	for (i = 0x400; i; i >>= 1) {
30859243Sobrien		if (d & i) {
30959243Sobrien			EE_SET(RL_EE_DATAIN);
31059243Sobrien		} else {
31159243Sobrien			EE_CLR(RL_EE_DATAIN);
312167465Smp		}
31359243Sobrien		DELAY(100);
31459243Sobrien		EE_SET(RL_EE_CLK);
31559243Sobrien		DELAY(150);
316145479Smp		EE_CLR(RL_EE_CLK);
31759243Sobrien		DELAY(100);
31859243Sobrien	}
31959243Sobrien}
32059243Sobrien
32159243Sobrien/*
322167465Smp * Read a word of data stored in the EEPROM at address 'addr.'
32359243Sobrien */
324145479Smpstatic void
32559243Sobrienre_eeprom_getword(sc, addr, dest)
32659243Sobrien	struct rl_softc		*sc;
32759243Sobrien	int			addr;
32859243Sobrien	u_int16_t		*dest;
32959243Sobrien{
33059243Sobrien	register int		i;
331145479Smp	u_int16_t		word = 0;
33259243Sobrien
33359243Sobrien	/* Enter EEPROM access mode. */
33459243Sobrien	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
335167465Smp
33659243Sobrien	/*
337145479Smp	 * Send address of word we want to read.
33859243Sobrien	 */
33959243Sobrien	re_eeprom_putbyte(sc, addr);
34059243Sobrien
34159243Sobrien	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
34259243Sobrien
34359243Sobrien	/*
34459243Sobrien	 * Start reading bits from EEPROM.
345167465Smp	 */
34659243Sobrien	for (i = 0x8000; i; i >>= 1) {
34759243Sobrien		EE_SET(RL_EE_CLK);
34859243Sobrien		DELAY(100);
34959243Sobrien		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
35059243Sobrien			word |= i;
35159243Sobrien		EE_CLR(RL_EE_CLK);
35259243Sobrien		DELAY(100);
35359243Sobrien	}
35459243Sobrien
35559243Sobrien	/* Turn off EEPROM access mode. */
35659243Sobrien	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
35759243Sobrien
35859243Sobrien	*dest = word;
35959243Sobrien}
36059243Sobrien
36159243Sobrien/*
36259243Sobrien * Read a sequence of words from the EEPROM.
36359243Sobrien */
36459243Sobrienstatic void
365167465Smpre_read_eeprom(sc, dest, off, cnt, swap)
36659243Sobrien	struct rl_softc		*sc;
36759243Sobrien	caddr_t			dest;
36859243Sobrien	int			off;
36959243Sobrien	int			cnt;
37059243Sobrien	int			swap;
37159243Sobrien{
37259243Sobrien	int			i;
37359243Sobrien	u_int16_t		word = 0, *ptr;
37459243Sobrien
37559243Sobrien	for (i = 0; i < cnt; i++) {
37659243Sobrien		re_eeprom_getword(sc, off + i, &word);
37759243Sobrien		ptr = (u_int16_t *)(dest + (i * 2));
37859243Sobrien		if (swap)
37959243Sobrien			*ptr = ntohs(word);
38059243Sobrien		else
38159243Sobrien			*ptr = word;
38259243Sobrien	}
38359243Sobrien}
38459243Sobrien
38559243Sobrienstatic int
38659243Sobrienre_gmii_readreg(dev, phy, reg)
38759243Sobrien	device_t		dev;
38859243Sobrien	int			phy, reg;
389145479Smp{
39059243Sobrien	struct rl_softc		*sc;
391131962Smp	u_int32_t		rval;
392167465Smp	int			i;
393131962Smp
394145479Smp	if (phy != 1)
395231990Smp		return (0);
396231990Smp
397231990Smp	sc = device_get_softc(dev);
398231990Smp
399231990Smp	/* Let the rgephy driver read the GMEDIASTAT register */
400231990Smp
401145479Smp	if (reg == RL_GMEDIASTAT) {
402231990Smp		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
403231990Smp		return (rval);
404231990Smp	}
405231990Smp
406231990Smp	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
407231990Smp	DELAY(1000);
408231990Smp
409145479Smp	for (i = 0; i < RL_TIMEOUT; i++) {
410131962Smp		rval = CSR_READ_4(sc, RL_PHYAR);
411131962Smp		if (rval & RL_PHYAR_BUSY)
412131962Smp			break;
413131962Smp		DELAY(100);
414131962Smp	}
415131962Smp
416131962Smp	if (i == RL_TIMEOUT) {
417131962Smp		printf ("re%d: PHY read failed\n", sc->rl_unit);
418131962Smp		return (0);
419131962Smp	}
420131962Smp
421131962Smp	return (rval & RL_PHYAR_PHYDATA);
422131962Smp}
423131962Smp
424131962Smpstatic int
425131962Smpre_gmii_writereg(dev, phy, reg, data)
426131962Smp	device_t		dev;
42759243Sobrien	int			phy, reg, data;
428167465Smp{
42959243Sobrien	struct rl_softc		*sc;
430167465Smp	u_int32_t		rval;
431167465Smp	int			i;
432167465Smp
433167465Smp	sc = device_get_softc(dev);
434167465Smp
435167465Smp	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
436167465Smp	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
437167465Smp	DELAY(1000);
438167465Smp
439167465Smp	for (i = 0; i < RL_TIMEOUT; i++) {
440167465Smp		rval = CSR_READ_4(sc, RL_PHYAR);
44159243Sobrien		if (!(rval & RL_PHYAR_BUSY))
442167465Smp			break;
44359243Sobrien		DELAY(100);
444167465Smp	}
44559243Sobrien
446167465Smp	if (i == RL_TIMEOUT) {
447167465Smp		printf ("re%d: PHY write failed\n", sc->rl_unit);
448167465Smp		return (0);
44959243Sobrien	}
45059243Sobrien
45159243Sobrien	return (0);
45259243Sobrien}
453167465Smp
45459243Sobrienstatic int
455167465Smpre_miibus_readreg(dev, phy, reg)
456167465Smp	device_t		dev;
45759243Sobrien	int			phy, reg;
45859243Sobrien{
45959243Sobrien	struct rl_softc		*sc;
46059243Sobrien	u_int16_t		rval = 0;
46159243Sobrien	u_int16_t		re8139_reg = 0;
462167465Smp
46359243Sobrien	sc = device_get_softc(dev);
464167465Smp
46559243Sobrien	if (sc->rl_type == RL_8169) {
466167465Smp		rval = re_gmii_readreg(dev, phy, reg);
467167465Smp		return (rval);
46859243Sobrien	}
469167465Smp
47059243Sobrien	/* Pretend the internal PHY is only at address 0 */
471167465Smp	if (phy) {
47259243Sobrien		return (0);
47359243Sobrien	}
47459243Sobrien	switch (reg) {
475167465Smp	case MII_BMCR:
47659243Sobrien		re8139_reg = RL_BMCR;
47759243Sobrien		break;
478145479Smp	case MII_BMSR:
47959243Sobrien		re8139_reg = RL_BMSR;
48059243Sobrien		break;
481145479Smp	case MII_ANAR:
48259243Sobrien		re8139_reg = RL_ANAR;
48359243Sobrien		break;
48459243Sobrien	case MII_ANER:
485167465Smp		re8139_reg = RL_ANER;
48659243Sobrien		break;
48759243Sobrien	case MII_ANLPAR:
488145479Smp		re8139_reg = RL_LPAR;
489145479Smp		break;
49059243Sobrien	case MII_PHYIDR1:
49159243Sobrien	case MII_PHYIDR2:
49259243Sobrien		return (0);
493145479Smp	/*
49459243Sobrien	 * Allow the rlphy driver to read the media status
49559243Sobrien	 * register. If we have a link partner which does not
49659243Sobrien	 * support NWAY, this is the register which will tell
49759243Sobrien	 * us the results of parallel detection.
49859243Sobrien	 */
499167465Smp	case RL_MEDIASTAT:
500167465Smp		rval = CSR_READ_1(sc, RL_MEDIASTAT);
501167465Smp		return (rval);
502167465Smp	default:
503167465Smp		printf("re%d: bad phy register\n", sc->rl_unit);
504167465Smp		return (0);
505167465Smp	}
50659243Sobrien	rval = CSR_READ_2(sc, re8139_reg);
507167465Smp	return (rval);
508167465Smp}
509167465Smp
510167465Smpstatic int
511167465Smpre_miibus_writereg(dev, phy, reg, data)
512167465Smp	device_t		dev;
513167465Smp	int			phy, reg, data;
514167465Smp{
515167465Smp	struct rl_softc		*sc;
516167465Smp	u_int16_t		re8139_reg = 0;
517167465Smp	int			rval = 0;
518167465Smp
519167465Smp	sc = device_get_softc(dev);
52059243Sobrien
521167465Smp	if (sc->rl_type == RL_8169) {
52259243Sobrien		rval = re_gmii_writereg(dev, phy, reg, data);
52359243Sobrien		return (rval);
52459243Sobrien	}
525145479Smp
52659243Sobrien	/* Pretend the internal PHY is only at address 0 */
52759243Sobrien	if (phy)
52859243Sobrien		return (0);
52959243Sobrien
53059243Sobrien	switch (reg) {
53159243Sobrien	case MII_BMCR:
532167465Smp		re8139_reg = RL_BMCR;
53359243Sobrien		break;
53459243Sobrien	case MII_BMSR:
53559243Sobrien		re8139_reg = RL_BMSR;
53659243Sobrien		break;
53759243Sobrien	case MII_ANAR:
53859243Sobrien		re8139_reg = RL_ANAR;
53959243Sobrien		break;
54059243Sobrien	case MII_ANER:
541167465Smp		re8139_reg = RL_ANER;
542167465Smp		break;
54359243Sobrien	case MII_ANLPAR:
54459243Sobrien		re8139_reg = RL_LPAR;
54559243Sobrien		break;
54659243Sobrien	case MII_PHYIDR1:
547145479Smp	case MII_PHYIDR2:
548145479Smp		return (0);
549145479Smp		break;
550195609Smp	default:
55159243Sobrien		printf("re%d: bad phy register\n", sc->rl_unit);
552167465Smp		return (0);
55359243Sobrien	}
554195609Smp	CSR_WRITE_2(sc, re8139_reg, data);
55559243Sobrien	return (0);
55659243Sobrien}
55759243Sobrien
55859243Sobrienstatic void
55959243Sobrienre_miibus_statchg(dev)
560167465Smp	device_t		dev;
561195609Smp{
562195609Smp
563195609Smp}
564195609Smp
565195609Smp/*
566195609Smp * Program the 64-bit multicast hash filter.
567167465Smp */
568167465Smpstatic void
569167465Smpre_setmulti(sc)
570167465Smp	struct rl_softc		*sc;
571167465Smp{
572167465Smp	struct ifnet		*ifp;
573167465Smp	int			h = 0;
574167465Smp	u_int32_t		hashes[2] = { 0, 0 };
575167465Smp	struct ifmultiaddr	*ifma;
576167465Smp	u_int32_t		rxfilt;
577167465Smp	int			mcnt = 0;
578167465Smp
579167465Smp	RL_LOCK_ASSERT(sc);
580167465Smp
581167465Smp	ifp = &sc->arpcom.ac_if;
582167465Smp
583167465Smp	rxfilt = CSR_READ_4(sc, RL_RXCFG);
584167465Smp
585167465Smp	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
586167465Smp		rxfilt |= RL_RXCFG_RX_MULTI;
587167465Smp		CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
588167465Smp		CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF);
589167465Smp		CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF);
590167465Smp		return;
591167465Smp	}
592167465Smp
593167465Smp	/* first, zot all the existing hash bits */
594167465Smp	CSR_WRITE_4(sc, RL_MAR0, 0);
595167465Smp	CSR_WRITE_4(sc, RL_MAR4, 0);
596167465Smp
597167465Smp	/* now program new ones */
598167465Smp	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
599195609Smp		if (ifma->ifma_addr->sa_family != AF_LINK)
600195609Smp			continue;
601195609Smp		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
602195609Smp		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
603195609Smp		if (h < 32)
604195609Smp			hashes[0] |= (1 << h);
605195609Smp		else
606167465Smp			hashes[1] |= (1 << (h - 32));
607167465Smp		mcnt++;
608167465Smp	}
609167465Smp
610167465Smp	if (mcnt)
611167465Smp		rxfilt |= RL_RXCFG_RX_MULTI;
612167465Smp	else
613167465Smp		rxfilt &= ~RL_RXCFG_RX_MULTI;
614195609Smp
615195609Smp	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
616195609Smp	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
617195609Smp	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
618195609Smp}
619195609Smp
620195609Smpstatic void
621167465Smpre_reset(sc)
622167465Smp	struct rl_softc		*sc;
623167465Smp{
624167465Smp	register int		i;
625167465Smp
626167465Smp	RL_LOCK_ASSERT(sc);
627167465Smp
628167465Smp	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
629167465Smp
630167465Smp	for (i = 0; i < RL_TIMEOUT; i++) {
631231990Smp		DELAY(10);
632167465Smp		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
633167465Smp			break;
634167465Smp	}
635167465Smp	if (i == RL_TIMEOUT)
636167465Smp		printf("re%d: reset never completed!\n", sc->rl_unit);
637167465Smp
638167465Smp	CSR_WRITE_1(sc, 0x82, 1);
639167465Smp}
640167465Smp
641167465Smp/*
642167465Smp * The following routine is designed to test for a defect on some
643167465Smp * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
644167465Smp * lines connected to the bus, however for a 32-bit only card, they
645167465Smp * should be pulled high. The result of this defect is that the
646167465Smp * NIC will not work right if you plug it into a 64-bit slot: DMA
647167465Smp * operations will be done with 64-bit transfers, which will fail
648167465Smp * because the 64-bit data lines aren't connected.
649167465Smp *
650167465Smp * There's no way to work around this (short of talking a soldering
651167465Smp * iron to the board), however we can detect it. The method we use
652167465Smp * here is to put the NIC into digital loopback mode, set the receiver
653167465Smp * to promiscuous mode, and then try to send a frame. We then compare
654167465Smp * the frame data we sent to what was received. If the data matches,
655167465Smp * then the NIC is working correctly, otherwise we know the user has
656167465Smp * a defective NIC which has been mistakenly plugged into a 64-bit PCI
657167465Smp * slot. In the latter case, there's no way the NIC can work correctly,
658167465Smp * so we print out a message on the console and abort the device attach.
659167465Smp */
660167465Smp
661167465Smpstatic int
662167465Smpre_diag(sc)
663167465Smp	struct rl_softc		*sc;
664167465Smp{
665167465Smp	struct ifnet		*ifp = &sc->arpcom.ac_if;
666167465Smp	struct mbuf		*m0;
667167465Smp	struct ether_header	*eh;
668167465Smp	struct rl_desc		*cur_rx;
669167465Smp	u_int16_t		status;
670167465Smp	u_int32_t		rxstat;
671167465Smp	int			total_len, i, error = 0;
672167465Smp	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
673167465Smp	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
674167465Smp
675167465Smp	/* Allocate a single mbuf */
676167465Smp	MGETHDR(m0, M_DONTWAIT, MT_DATA);
677167465Smp	if (m0 == NULL)
678167465Smp		return (ENOBUFS);
679167465Smp
680167465Smp	RL_LOCK(sc);
681167465Smp
682167465Smp	/*
683167465Smp	 * Initialize the NIC in test mode. This sets the chip up
684167465Smp	 * so that it can send and receive frames, but performs the
685195609Smp	 * following special functions:
686195609Smp	 * - Puts receiver in promiscuous mode
687195609Smp	 * - Enables digital loopback mode
688195609Smp	 * - Leaves interrupts turned off
689195609Smp	 */
690195609Smp
691195609Smp	ifp->if_flags |= IFF_PROMISC;
692167465Smp	sc->rl_testmode = 1;
693167465Smp	re_init_locked(sc);
694167465Smp	re_stop(sc);
695167465Smp	DELAY(100000);
696	re_init_locked(sc);
697
698	/* Put some data in the mbuf */
699
700	eh = mtod(m0, struct ether_header *);
701	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
702	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
703	eh->ether_type = htons(ETHERTYPE_IP);
704	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
705
706	/*
707	 * Queue the packet, start transmission.
708	 * Note: IF_HANDOFF() ultimately calls re_start() for us.
709	 */
710
711	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
712	RL_UNLOCK(sc);
713	IF_HANDOFF(&ifp->if_snd, m0, ifp);
714	RL_LOCK(sc);
715	m0 = NULL;
716
717	/* Wait for it to propagate through the chip */
718
719	DELAY(100000);
720	for (i = 0; i < RL_TIMEOUT; i++) {
721		status = CSR_READ_2(sc, RL_ISR);
722		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
723		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
724			break;
725		DELAY(10);
726	}
727
728	if (i == RL_TIMEOUT) {
729		printf("re%d: diagnostic failed, failed to receive packet "
730		    "in loopback mode\n", sc->rl_unit);
731		error = EIO;
732		goto done;
733	}
734
735	/*
736	 * The packet should have been dumped into the first
737	 * entry in the RX DMA ring. Grab it from there.
738	 */
739
740	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
741	    sc->rl_ldata.rl_rx_list_map,
742	    BUS_DMASYNC_POSTREAD);
743	bus_dmamap_sync(sc->rl_ldata.rl_mtag,
744	    sc->rl_ldata.rl_rx_dmamap[0],
745	    BUS_DMASYNC_POSTWRITE);
746	bus_dmamap_unload(sc->rl_ldata.rl_mtag,
747	    sc->rl_ldata.rl_rx_dmamap[0]);
748
749	m0 = sc->rl_ldata.rl_rx_mbuf[0];
750	sc->rl_ldata.rl_rx_mbuf[0] = NULL;
751	eh = mtod(m0, struct ether_header *);
752
753	cur_rx = &sc->rl_ldata.rl_rx_list[0];
754	total_len = RL_RXBYTES(cur_rx);
755	rxstat = le32toh(cur_rx->rl_cmdstat);
756
757	if (total_len != ETHER_MIN_LEN) {
758		printf("re%d: diagnostic failed, received short packet\n",
759		    sc->rl_unit);
760		error = EIO;
761		goto done;
762	}
763
764	/* Test that the received packet data matches what we sent. */
765
766	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
767	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
768	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
769		printf("re%d: WARNING, DMA FAILURE!\n", sc->rl_unit);
770		printf("re%d: expected TX data: %6D/%6D/0x%x\n", sc->rl_unit,
771		    dst, ":", src, ":", ETHERTYPE_IP);
772		printf("re%d: received RX data: %6D/%6D/0x%x\n", sc->rl_unit,
773		    eh->ether_dhost, ":",  eh->ether_shost, ":",
774		    ntohs(eh->ether_type));
775		printf("re%d: You may have a defective 32-bit NIC plugged "
776		    "into a 64-bit PCI slot.\n", sc->rl_unit);
777		printf("re%d: Please re-install the NIC in a 32-bit slot "
778		    "for proper operation.\n", sc->rl_unit);
779		printf("re%d: Read the re(4) man page for more details.\n",
780		    sc->rl_unit);
781		error = EIO;
782	}
783
784done:
785	/* Turn interface off, release resources */
786
787	sc->rl_testmode = 0;
788	ifp->if_flags &= ~IFF_PROMISC;
789	re_stop(sc);
790	if (m0 != NULL)
791		m_freem(m0);
792
793	RL_UNLOCK(sc);
794
795	return (error);
796}
797
798/*
799 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
800 * IDs against our list and return a device name if we find a match.
801 */
802static int
803re_probe(dev)
804	device_t		dev;
805{
806	struct rl_type		*t;
807	struct rl_softc		*sc;
808	int			rid;
809	u_int32_t		hwrev;
810
811	t = re_devs;
812	sc = device_get_softc(dev);
813
814	while (t->rl_name != NULL) {
815		if ((pci_get_vendor(dev) == t->rl_vid) &&
816		    (pci_get_device(dev) == t->rl_did)) {
817
818			/*
819			 * Temporarily map the I/O space
820			 * so we can read the chip ID register.
821			 */
822			rid = RL_RID;
823			sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid,
824			    RF_ACTIVE);
825			if (sc->rl_res == NULL) {
826				device_printf(dev,
827				    "couldn't map ports/memory\n");
828				return (ENXIO);
829			}
830			sc->rl_btag = rman_get_bustag(sc->rl_res);
831			sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
832			hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
833			bus_release_resource(dev, RL_RES,
834			    RL_RID, sc->rl_res);
835			if (t->rl_basetype == hwrev) {
836				device_set_desc(dev, t->rl_name);
837				return (0);
838			}
839		}
840		t++;
841	}
842
843	return (ENXIO);
844}
845
846/*
847 * This routine takes the segment list provided as the result of
848 * a bus_dma_map_load() operation and assigns the addresses/lengths
849 * to RealTek DMA descriptors. This can be called either by the RX
850 * code or the TX code. In the RX case, we'll probably wind up mapping
851 * at most one segment. For the TX case, there could be any number of
852 * segments since TX packets may span multiple mbufs. In either case,
853 * if the number of segments is larger than the rl_maxsegs limit
854 * specified by the caller, we abort the mapping operation. Sadly,
855 * whoever designed the buffer mapping API did not provide a way to
856 * return an error from here, so we have to fake it a bit.
857 */
858
859static void
860re_dma_map_desc(arg, segs, nseg, mapsize, error)
861	void			*arg;
862	bus_dma_segment_t	*segs;
863	int			nseg;
864	bus_size_t		mapsize;
865	int			error;
866{
867	struct rl_dmaload_arg	*ctx;
868	struct rl_desc		*d = NULL;
869	int			i = 0, idx;
870
871	if (error)
872		return;
873
874	ctx = arg;
875
876	/* Signal error to caller if there's too many segments */
877	if (nseg > ctx->rl_maxsegs) {
878		ctx->rl_maxsegs = 0;
879		return;
880	}
881
882	/*
883	 * Map the segment array into descriptors. Note that we set the
884	 * start-of-frame and end-of-frame markers for either TX or RX, but
885	 * they really only have meaning in the TX case. (In the RX case,
886	 * it's the chip that tells us where packets begin and end.)
887	 * We also keep track of the end of the ring and set the
888	 * end-of-ring bits as needed, and we set the ownership bits
889	 * in all except the very first descriptor. (The caller will
890	 * set this descriptor later when it start transmission or
891	 * reception.)
892	 */
893	idx = ctx->rl_idx;
894	for (;;) {
895		u_int32_t		cmdstat;
896		d = &ctx->rl_ring[idx];
897		if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) {
898			ctx->rl_maxsegs = 0;
899			return;
900		}
901		cmdstat = segs[i].ds_len;
902		d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
903		d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
904		if (i == 0)
905			cmdstat |= RL_TDESC_CMD_SOF;
906		else
907			cmdstat |= RL_TDESC_CMD_OWN;
908		if (idx == (RL_RX_DESC_CNT - 1))
909			cmdstat |= RL_TDESC_CMD_EOR;
910		d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags);
911		i++;
912		if (i == nseg)
913			break;
914		RL_DESC_INC(idx);
915	}
916
917	d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
918	ctx->rl_maxsegs = nseg;
919	ctx->rl_idx = idx;
920}
921
922/*
923 * Map a single buffer address.
924 */
925
926static void
927re_dma_map_addr(arg, segs, nseg, error)
928	void			*arg;
929	bus_dma_segment_t	*segs;
930	int			nseg;
931	int			error;
932{
933	u_int32_t		*addr;
934
935	if (error)
936		return;
937
938	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
939	addr = arg;
940	*addr = segs->ds_addr;
941}
942
943static int
944re_allocmem(dev, sc)
945	device_t		dev;
946	struct rl_softc		*sc;
947{
948	int			error;
949	int			nseg;
950	int			i;
951
952	/*
953	 * Allocate map for RX mbufs.
954	 */
955	nseg = 32;
956	error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0,
957	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
958	    NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
959	    NULL, NULL, &sc->rl_ldata.rl_mtag);
960	if (error) {
961		device_printf(dev, "could not allocate dma tag\n");
962		return (ENOMEM);
963	}
964
965	/*
966	 * Allocate map for TX descriptor list.
967	 */
968	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
969	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
970	    NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
971	    NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
972	if (error) {
973		device_printf(dev, "could not allocate dma tag\n");
974		return (ENOMEM);
975	}
976
977	/* Allocate DMA'able memory for the TX ring */
978
979	error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
980	    (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
981	    &sc->rl_ldata.rl_tx_list_map);
982	if (error)
983		return (ENOMEM);
984
985	/* Load the map for the TX ring. */
986
987	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
988	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
989	     RL_TX_LIST_SZ, re_dma_map_addr,
990	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
991
992	/* Create DMA maps for TX buffers */
993
994	for (i = 0; i < RL_TX_DESC_CNT; i++) {
995		error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
996			    &sc->rl_ldata.rl_tx_dmamap[i]);
997		if (error) {
998			device_printf(dev, "can't create DMA map for TX\n");
999			return (ENOMEM);
1000		}
1001	}
1002
1003	/*
1004	 * Allocate map for RX descriptor list.
1005	 */
1006	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1007	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1008	    NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
1009	    NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1010	if (error) {
1011		device_printf(dev, "could not allocate dma tag\n");
1012		return (ENOMEM);
1013	}
1014
1015	/* Allocate DMA'able memory for the RX ring */
1016
1017	error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1018	    (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1019	    &sc->rl_ldata.rl_rx_list_map);
1020	if (error)
1021		return (ENOMEM);
1022
1023	/* Load the map for the RX ring. */
1024
1025	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1026	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1027	     RL_TX_LIST_SZ, re_dma_map_addr,
1028	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1029
1030	/* Create DMA maps for RX buffers */
1031
1032	for (i = 0; i < RL_RX_DESC_CNT; i++) {
1033		error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0,
1034			    &sc->rl_ldata.rl_rx_dmamap[i]);
1035		if (error) {
1036			device_printf(dev, "can't create DMA map for RX\n");
1037			return (ENOMEM);
1038		}
1039	}
1040
1041	return (0);
1042}
1043
1044/*
1045 * Attach the interface. Allocate softc structures, do ifmedia
1046 * setup and ethernet/BPF attach.
1047 */
1048static int
1049re_attach(dev)
1050	device_t		dev;
1051{
1052	u_char			eaddr[ETHER_ADDR_LEN];
1053	u_int16_t		as[3];
1054	struct rl_softc		*sc;
1055	struct ifnet		*ifp;
1056	struct rl_hwrev		*hw_rev;
1057	int			hwrev;
1058	u_int16_t		re_did = 0;
1059	int			unit, error = 0, rid, i;
1060
1061	sc = device_get_softc(dev);
1062	unit = device_get_unit(dev);
1063
1064	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1065	    MTX_DEF);
1066	/*
1067	 * Map control/status registers.
1068	 */
1069	pci_enable_busmaster(dev);
1070
1071	rid = RL_RID;
1072	sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid,
1073	    RF_ACTIVE);
1074
1075	if (sc->rl_res == NULL) {
1076		printf ("re%d: couldn't map ports/memory\n", unit);
1077		error = ENXIO;
1078		goto fail;
1079	}
1080
1081	sc->rl_btag = rman_get_bustag(sc->rl_res);
1082	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1083
1084	/* Allocate interrupt */
1085	rid = 0;
1086	sc->rl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1087	    RF_SHAREABLE | RF_ACTIVE);
1088
1089	if (sc->rl_irq == NULL) {
1090		printf("re%d: couldn't map interrupt\n", unit);
1091		error = ENXIO;
1092		goto fail;
1093	}
1094
1095	/* Reset the adapter. */
1096	RL_LOCK(sc);
1097	re_reset(sc);
1098	RL_UNLOCK(sc);
1099
1100	hw_rev = re_hwrevs;
1101	hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
1102	while (hw_rev->rl_desc != NULL) {
1103		if (hw_rev->rl_rev == hwrev) {
1104			sc->rl_type = hw_rev->rl_type;
1105			break;
1106		}
1107		hw_rev++;
1108	}
1109
1110	if (sc->rl_type == RL_8169) {
1111
1112		/* Set RX length mask */
1113
1114		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1115
1116		/* Force station address autoload from the EEPROM */
1117
1118		CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_AUTOLOAD);
1119		for (i = 0; i < RL_TIMEOUT; i++) {
1120			if (!(CSR_READ_1(sc, RL_EECMD) & RL_EEMODE_AUTOLOAD))
1121				break;
1122			DELAY(100);
1123		}
1124		if (i == RL_TIMEOUT)
1125			printf ("re%d: eeprom autoload timed out\n", unit);
1126
1127			for (i = 0; i < ETHER_ADDR_LEN; i++)
1128				eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1129	} else {
1130
1131		/* Set RX length mask */
1132
1133		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1134
1135		sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
1136		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1, 0);
1137		if (re_did != 0x8129)
1138			sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
1139
1140		/*
1141		 * Get station address from the EEPROM.
1142		 */
1143		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0);
1144		for (i = 0; i < 3; i++) {
1145			eaddr[(i * 2) + 0] = as[i] & 0xff;
1146			eaddr[(i * 2) + 1] = as[i] >> 8;
1147		}
1148	}
1149
1150	sc->rl_unit = unit;
1151	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1152
1153	/*
1154	 * Allocate the parent bus DMA tag appropriate for PCI.
1155	 */
1156#define RL_NSEG_NEW 32
1157	error = bus_dma_tag_create(NULL,	/* parent */
1158			1, 0,			/* alignment, boundary */
1159			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1160			BUS_SPACE_MAXADDR,	/* highaddr */
1161			NULL, NULL,		/* filter, filterarg */
1162			MAXBSIZE, RL_NSEG_NEW,	/* maxsize, nsegments */
1163			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1164			BUS_DMA_ALLOCNOW,	/* flags */
1165			NULL, NULL,		/* lockfunc, lockarg */
1166			&sc->rl_parent_tag);
1167	if (error)
1168		goto fail;
1169
1170	error = re_allocmem(dev, sc);
1171
1172	if (error)
1173		goto fail;
1174
1175	/* Do MII setup */
1176	if (mii_phy_probe(dev, &sc->rl_miibus,
1177	    re_ifmedia_upd, re_ifmedia_sts)) {
1178		printf("re%d: MII without any phy!\n", sc->rl_unit);
1179		error = ENXIO;
1180		goto fail;
1181	}
1182
1183	ifp = &sc->arpcom.ac_if;
1184	ifp->if_softc = sc;
1185	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1186	ifp->if_mtu = ETHERMTU;
1187	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1188	ifp->if_ioctl = re_ioctl;
1189	ifp->if_capabilities = IFCAP_VLAN_MTU;
1190	ifp->if_start = re_start;
1191	ifp->if_hwassist = RE_CSUM_FEATURES;
1192	ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1193#ifdef DEVICE_POLLING
1194	ifp->if_capabilities |= IFCAP_POLLING;
1195#endif
1196	ifp->if_watchdog = re_watchdog;
1197	ifp->if_init = re_init;
1198	if (sc->rl_type == RL_8169)
1199		ifp->if_baudrate = 1000000000;
1200	else
1201		ifp->if_baudrate = 100000000;
1202	ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN;
1203	ifp->if_capenable = ifp->if_capabilities;
1204
1205	callout_handle_init(&sc->rl_stat_ch);
1206
1207	/*
1208	 * Call MI attach routine.
1209	 */
1210	ether_ifattach(ifp, eaddr);
1211
1212	/* Perform hardware diagnostic. */
1213	error = re_diag(sc);
1214
1215	if (error) {
1216		printf("re%d: attach aborted due to hardware diag failure\n",
1217		    unit);
1218		ether_ifdetach(ifp);
1219		goto fail;
1220	}
1221
1222	/* Hook interrupt last to avoid having to lock softc */
1223	error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1224	    re_intr, sc, &sc->rl_intrhand);
1225	if (error) {
1226		printf("re%d: couldn't set up irq\n", unit);
1227		ether_ifdetach(ifp);
1228	}
1229
1230fail:
1231	if (error)
1232		re_detach(dev);
1233
1234	return (error);
1235}
1236
1237/*
1238 * Shutdown hardware and free up resources. This can be called any
1239 * time after the mutex has been initialized. It is called in both
1240 * the error case in attach and the normal detach case so it needs
1241 * to be careful about only freeing resources that have actually been
1242 * allocated.
1243 */
1244static int
1245re_detach(dev)
1246	device_t		dev;
1247{
1248	struct rl_softc		*sc;
1249	struct ifnet		*ifp;
1250	int			i;
1251	int			attached;
1252
1253	sc = device_get_softc(dev);
1254	ifp = &sc->arpcom.ac_if;
1255	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
1256
1257	attached = device_is_attached(dev);
1258	/* These should only be active if attach succeeded */
1259	if (attached)
1260		ether_ifdetach(ifp);
1261
1262	RL_LOCK(sc);
1263#if 0
1264	sc->suspended = 1;
1265#endif
1266
1267	/* These should only be active if attach succeeded */
1268	if (attached) {
1269		re_stop(sc);
1270		/*
1271		 * Force off the IFF_UP flag here, in case someone
1272		 * still had a BPF descriptor attached to this
1273		 * interface. If they do, ether_ifdetach() will cause
1274		 * the BPF code to try and clear the promisc mode
1275		 * flag, which will bubble down to re_ioctl(),
1276		 * which will try to call re_init() again. This will
1277		 * turn the NIC back on and restart the MII ticker,
1278		 * which will panic the system when the kernel tries
1279		 * to invoke the re_tick() function that isn't there
1280		 * anymore.
1281		 */
1282		ifp->if_flags &= ~IFF_UP;
1283		ether_ifdetach(ifp);
1284	}
1285	if (sc->rl_miibus)
1286		device_delete_child(dev, sc->rl_miibus);
1287	bus_generic_detach(dev);
1288
1289	/*
1290	 * The rest is resource deallocation, so we should already be
1291	 * stopped here.
1292	 */
1293	RL_UNLOCK(sc);
1294
1295	if (sc->rl_intrhand)
1296		bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand);
1297	if (sc->rl_irq)
1298		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq);
1299	if (sc->rl_res)
1300		bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res);
1301
1302
1303	/* Unload and free the RX DMA ring memory and map */
1304
1305	if (sc->rl_ldata.rl_rx_list_tag) {
1306		bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1307		    sc->rl_ldata.rl_rx_list_map);
1308		bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1309		    sc->rl_ldata.rl_rx_list,
1310		    sc->rl_ldata.rl_rx_list_map);
1311		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1312	}
1313
1314	/* Unload and free the TX DMA ring memory and map */
1315
1316	if (sc->rl_ldata.rl_tx_list_tag) {
1317		bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1318		    sc->rl_ldata.rl_tx_list_map);
1319		bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1320		    sc->rl_ldata.rl_tx_list,
1321		    sc->rl_ldata.rl_tx_list_map);
1322		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1323	}
1324
1325	/* Destroy all the RX and TX buffer maps */
1326
1327	if (sc->rl_ldata.rl_mtag) {
1328		for (i = 0; i < RL_TX_DESC_CNT; i++)
1329			bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1330			    sc->rl_ldata.rl_tx_dmamap[i]);
1331		for (i = 0; i < RL_RX_DESC_CNT; i++)
1332			bus_dmamap_destroy(sc->rl_ldata.rl_mtag,
1333			    sc->rl_ldata.rl_rx_dmamap[i]);
1334		bus_dma_tag_destroy(sc->rl_ldata.rl_mtag);
1335	}
1336
1337	/* Unload and free the stats buffer and map */
1338
1339	if (sc->rl_ldata.rl_stag) {
1340		bus_dmamap_unload(sc->rl_ldata.rl_stag,
1341		    sc->rl_ldata.rl_rx_list_map);
1342		bus_dmamem_free(sc->rl_ldata.rl_stag,
1343		    sc->rl_ldata.rl_stats,
1344		    sc->rl_ldata.rl_smap);
1345		bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1346	}
1347
1348	if (sc->rl_parent_tag)
1349		bus_dma_tag_destroy(sc->rl_parent_tag);
1350
1351	mtx_destroy(&sc->rl_mtx);
1352
1353	return (0);
1354}
1355
1356static int
1357re_newbuf(sc, idx, m)
1358	struct rl_softc		*sc;
1359	int			idx;
1360	struct mbuf		*m;
1361{
1362	struct rl_dmaload_arg	arg;
1363	struct mbuf		*n = NULL;
1364	int			error;
1365
1366	if (m == NULL) {
1367		n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1368		if (n == NULL)
1369			return (ENOBUFS);
1370		m = n;
1371	} else
1372		m->m_data = m->m_ext.ext_buf;
1373
1374	/*
1375	 * Initialize mbuf length fields and fixup
1376	 * alignment so that the frame payload is
1377	 * longword aligned.
1378	 */
1379	m->m_len = m->m_pkthdr.len = MCLBYTES;
1380	m_adj(m, ETHER_ALIGN);
1381
1382	arg.sc = sc;
1383	arg.rl_idx = idx;
1384	arg.rl_maxsegs = 1;
1385	arg.rl_flags = 0;
1386	arg.rl_ring = sc->rl_ldata.rl_rx_list;
1387
1388	error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag,
1389	    sc->rl_ldata.rl_rx_dmamap[idx], m, re_dma_map_desc,
1390	    &arg, BUS_DMA_NOWAIT);
1391	if (error || arg.rl_maxsegs != 1) {
1392		if (n != NULL)
1393			m_freem(n);
1394		return (ENOMEM);
1395	}
1396
1397	sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN);
1398	sc->rl_ldata.rl_rx_mbuf[idx] = m;
1399
1400	bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1401	    sc->rl_ldata.rl_rx_dmamap[idx],
1402	    BUS_DMASYNC_PREREAD);
1403
1404	return (0);
1405}
1406
1407static int
1408re_tx_list_init(sc)
1409	struct rl_softc		*sc;
1410{
1411
1412	RL_LOCK_ASSERT(sc);
1413
1414	bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ);
1415	bzero ((char *)&sc->rl_ldata.rl_tx_mbuf,
1416	    (RL_TX_DESC_CNT * sizeof(struct mbuf *)));
1417
1418	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1419	    sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE);
1420	sc->rl_ldata.rl_tx_prodidx = 0;
1421	sc->rl_ldata.rl_tx_considx = 0;
1422	sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT;
1423
1424	return (0);
1425}
1426
1427static int
1428re_rx_list_init(sc)
1429	struct rl_softc		*sc;
1430{
1431	int			i;
1432
1433	bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ);
1434	bzero ((char *)&sc->rl_ldata.rl_rx_mbuf,
1435	    (RL_RX_DESC_CNT * sizeof(struct mbuf *)));
1436
1437	for (i = 0; i < RL_RX_DESC_CNT; i++) {
1438		if (re_newbuf(sc, i, NULL) == ENOBUFS)
1439			return (ENOBUFS);
1440	}
1441
1442	/* Flush the RX descriptors */
1443
1444	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1445	    sc->rl_ldata.rl_rx_list_map,
1446	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1447
1448	sc->rl_ldata.rl_rx_prodidx = 0;
1449	sc->rl_head = sc->rl_tail = NULL;
1450
1451	return (0);
1452}
1453
1454/*
1455 * RX handler for C+ and 8169. For the gigE chips, we support
1456 * the reception of jumbo frames that have been fragmented
1457 * across multiple 2K mbuf cluster buffers.
1458 */
1459static void
1460re_rxeof(sc)
1461	struct rl_softc		*sc;
1462{
1463	struct mbuf		*m;
1464	struct ifnet		*ifp;
1465	int			i, total_len;
1466	struct rl_desc		*cur_rx;
1467	u_int32_t		rxstat, rxvlan;
1468
1469	RL_LOCK_ASSERT(sc);
1470
1471	ifp = &sc->arpcom.ac_if;
1472	i = sc->rl_ldata.rl_rx_prodidx;
1473
1474	/* Invalidate the descriptor memory */
1475
1476	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1477	    sc->rl_ldata.rl_rx_list_map,
1478	    BUS_DMASYNC_POSTREAD);
1479
1480	while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) {
1481
1482		cur_rx = &sc->rl_ldata.rl_rx_list[i];
1483		m = sc->rl_ldata.rl_rx_mbuf[i];
1484		total_len = RL_RXBYTES(cur_rx);
1485		rxstat = le32toh(cur_rx->rl_cmdstat);
1486		rxvlan = le32toh(cur_rx->rl_vlanctl);
1487
1488		/* Invalidate the RX mbuf and unload its map */
1489
1490		bus_dmamap_sync(sc->rl_ldata.rl_mtag,
1491		    sc->rl_ldata.rl_rx_dmamap[i],
1492		    BUS_DMASYNC_POSTWRITE);
1493		bus_dmamap_unload(sc->rl_ldata.rl_mtag,
1494		    sc->rl_ldata.rl_rx_dmamap[i]);
1495
1496		if (!(rxstat & RL_RDESC_STAT_EOF)) {
1497			m->m_len = MCLBYTES - ETHER_ALIGN;
1498			if (sc->rl_head == NULL)
1499				sc->rl_head = sc->rl_tail = m;
1500			else {
1501				m->m_flags &= ~M_PKTHDR;
1502				sc->rl_tail->m_next = m;
1503				sc->rl_tail = m;
1504			}
1505			re_newbuf(sc, i, NULL);
1506			RL_DESC_INC(i);
1507			continue;
1508		}
1509
1510		/*
1511		 * NOTE: for the 8139C+, the frame length field
1512		 * is always 12 bits in size, but for the gigE chips,
1513		 * it is 13 bits (since the max RX frame length is 16K).
1514		 * Unfortunately, all 32 bits in the status word
1515		 * were already used, so to make room for the extra
1516		 * length bit, RealTek took out the 'frame alignment
1517		 * error' bit and shifted the other status bits
1518		 * over one slot. The OWN, EOR, FS and LS bits are
1519		 * still in the same places. We have already extracted
1520		 * the frame length and checked the OWN bit, so rather
1521		 * than using an alternate bit mapping, we shift the
1522		 * status bits one space to the right so we can evaluate
1523		 * them using the 8169 status as though it was in the
1524		 * same format as that of the 8139C+.
1525		 */
1526		if (sc->rl_type == RL_8169)
1527			rxstat >>= 1;
1528
1529		if (rxstat & RL_RDESC_STAT_RXERRSUM) {
1530			ifp->if_ierrors++;
1531			/*
1532			 * If this is part of a multi-fragment packet,
1533			 * discard all the pieces.
1534			 */
1535			if (sc->rl_head != NULL) {
1536				m_freem(sc->rl_head);
1537				sc->rl_head = sc->rl_tail = NULL;
1538			}
1539			re_newbuf(sc, i, m);
1540			RL_DESC_INC(i);
1541			continue;
1542		}
1543
1544		/*
1545		 * If allocating a replacement mbuf fails,
1546		 * reload the current one.
1547		 */
1548
1549		if (re_newbuf(sc, i, NULL)) {
1550			ifp->if_ierrors++;
1551			if (sc->rl_head != NULL) {
1552				m_freem(sc->rl_head);
1553				sc->rl_head = sc->rl_tail = NULL;
1554			}
1555			re_newbuf(sc, i, m);
1556			RL_DESC_INC(i);
1557			continue;
1558		}
1559
1560		RL_DESC_INC(i);
1561
1562		if (sc->rl_head != NULL) {
1563			m->m_len = total_len % (MCLBYTES - ETHER_ALIGN);
1564			/*
1565			 * Special case: if there's 4 bytes or less
1566			 * in this buffer, the mbuf can be discarded:
1567			 * the last 4 bytes is the CRC, which we don't
1568			 * care about anyway.
1569			 */
1570			if (m->m_len <= ETHER_CRC_LEN) {
1571				sc->rl_tail->m_len -=
1572				    (ETHER_CRC_LEN - m->m_len);
1573				m_freem(m);
1574			} else {
1575				m->m_len -= ETHER_CRC_LEN;
1576				m->m_flags &= ~M_PKTHDR;
1577				sc->rl_tail->m_next = m;
1578			}
1579			m = sc->rl_head;
1580			sc->rl_head = sc->rl_tail = NULL;
1581			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1582		} else
1583			m->m_pkthdr.len = m->m_len =
1584			    (total_len - ETHER_CRC_LEN);
1585
1586		ifp->if_ipackets++;
1587		m->m_pkthdr.rcvif = ifp;
1588
1589		/* Do RX checksumming if enabled */
1590
1591		if (ifp->if_capenable & IFCAP_RXCSUM) {
1592
1593			/* Check IP header checksum */
1594			if (rxstat & RL_RDESC_STAT_PROTOID)
1595				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1596			if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
1597				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1598
1599			/* Check TCP/UDP checksum */
1600			if ((RL_TCPPKT(rxstat) &&
1601			    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1602			    (RL_UDPPKT(rxstat) &&
1603			    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
1604				m->m_pkthdr.csum_flags |=
1605				    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1606				m->m_pkthdr.csum_data = 0xffff;
1607			}
1608		}
1609
1610		if (rxvlan & RL_RDESC_VLANCTL_TAG)
1611			VLAN_INPUT_TAG(ifp, m,
1612			    ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue);
1613		RL_UNLOCK(sc);
1614		(*ifp->if_input)(ifp, m);
1615		RL_LOCK(sc);
1616	}
1617
1618	/* Flush the RX DMA ring */
1619
1620	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1621	    sc->rl_ldata.rl_rx_list_map,
1622	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1623
1624	sc->rl_ldata.rl_rx_prodidx = i;
1625}
1626
1627static void
1628re_txeof(sc)
1629	struct rl_softc		*sc;
1630{
1631	struct ifnet		*ifp;
1632	u_int32_t		txstat;
1633	int			idx;
1634
1635	ifp = &sc->arpcom.ac_if;
1636	idx = sc->rl_ldata.rl_tx_considx;
1637
1638	/* Invalidate the TX descriptor list */
1639
1640	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1641	    sc->rl_ldata.rl_tx_list_map,
1642	    BUS_DMASYNC_POSTREAD);
1643
1644	while (idx != sc->rl_ldata.rl_tx_prodidx) {
1645
1646		txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat);
1647		if (txstat & RL_TDESC_CMD_OWN)
1648			break;
1649
1650		/*
1651		 * We only stash mbufs in the last descriptor
1652		 * in a fragment chain, which also happens to
1653		 * be the only place where the TX status bits
1654		 * are valid.
1655		 */
1656
1657		if (txstat & RL_TDESC_CMD_EOF) {
1658			m_freem(sc->rl_ldata.rl_tx_mbuf[idx]);
1659			sc->rl_ldata.rl_tx_mbuf[idx] = NULL;
1660			bus_dmamap_unload(sc->rl_ldata.rl_mtag,
1661			    sc->rl_ldata.rl_tx_dmamap[idx]);
1662			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
1663			    RL_TDESC_STAT_COLCNT))
1664				ifp->if_collisions++;
1665			if (txstat & RL_TDESC_STAT_TXERRSUM)
1666				ifp->if_oerrors++;
1667			else
1668				ifp->if_opackets++;
1669		}
1670		sc->rl_ldata.rl_tx_free++;
1671		RL_DESC_INC(idx);
1672	}
1673
1674	/* No changes made to the TX ring, so no flush needed */
1675
1676	if (idx != sc->rl_ldata.rl_tx_considx) {
1677		sc->rl_ldata.rl_tx_considx = idx;
1678		ifp->if_flags &= ~IFF_OACTIVE;
1679		ifp->if_timer = 0;
1680	}
1681
1682	/*
1683	 * If not all descriptors have been released reaped yet,
1684	 * reload the timer so that we will eventually get another
1685	 * interrupt that will cause us to re-enter this routine.
1686	 * This is done in case the transmitter has gone idle.
1687	 */
1688	if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT)
1689		CSR_WRITE_4(sc, RL_TIMERCNT, 1);
1690}
1691
1692static void
1693re_tick(xsc)
1694	void			*xsc;
1695{
1696	struct rl_softc		*sc;
1697
1698	sc = xsc;
1699	RL_LOCK(sc);
1700	re_tick_locked(sc);
1701	RL_UNLOCK(sc);
1702}
1703
1704static void
1705re_tick_locked(sc)
1706	struct rl_softc		*sc;
1707{
1708	struct mii_data		*mii;
1709
1710	RL_LOCK_ASSERT(sc);
1711
1712	mii = device_get_softc(sc->rl_miibus);
1713
1714	mii_tick(mii);
1715
1716	sc->rl_stat_ch = timeout(re_tick, sc, hz);
1717}
1718
1719#ifdef DEVICE_POLLING
1720static void
1721re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1722{
1723	struct rl_softc *sc = ifp->if_softc;
1724
1725	RL_LOCK(sc);
1726	re_poll_locked(ifp, cmd, count);
1727	RL_UNLOCK(sc);
1728}
1729
1730static void
1731re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1732{
1733	struct rl_softc *sc = ifp->if_softc;
1734
1735	RL_LOCK_ASSERT(sc);
1736
1737	if (!(ifp->if_capenable & IFCAP_POLLING)) {
1738		ether_poll_deregister(ifp);
1739		cmd = POLL_DEREGISTER;
1740	}
1741	if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1742		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
1743		return;
1744	}
1745
1746	sc->rxcycles = count;
1747	re_rxeof(sc);
1748	re_txeof(sc);
1749
1750	if (ifp->if_snd.ifq_head != NULL)
1751		re_start_locked(ifp);
1752
1753	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1754		u_int16_t       status;
1755
1756		status = CSR_READ_2(sc, RL_ISR);
1757		if (status == 0xffff)
1758			return;
1759		if (status)
1760			CSR_WRITE_2(sc, RL_ISR, status);
1761
1762		/*
1763		 * XXX check behaviour on receiver stalls.
1764		 */
1765
1766		if (status & RL_ISR_SYSTEM_ERR) {
1767			re_reset(sc);
1768			re_init_locked(sc);
1769		}
1770	}
1771}
1772#endif /* DEVICE_POLLING */
1773
1774static void
1775re_intr(arg)
1776	void			*arg;
1777{
1778	struct rl_softc		*sc;
1779	struct ifnet		*ifp;
1780	u_int16_t		status;
1781
1782	sc = arg;
1783
1784	RL_LOCK(sc);
1785
1786	ifp = &sc->arpcom.ac_if;
1787
1788	if (sc->suspended || !(ifp->if_flags & IFF_UP))
1789		goto done_locked;
1790
1791#ifdef DEVICE_POLLING
1792	if  (ifp->if_flags & IFF_POLLING)
1793		goto done_locked;
1794	if ((ifp->if_capenable & IFCAP_POLLING) &&
1795	    ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
1796		CSR_WRITE_2(sc, RL_IMR, 0x0000);
1797		re_poll_locked(ifp, 0, 1);
1798		goto done_locked;
1799	}
1800#endif /* DEVICE_POLLING */
1801
1802	for (;;) {
1803
1804		status = CSR_READ_2(sc, RL_ISR);
1805		/* If the card has gone away the read returns 0xffff. */
1806		if (status == 0xffff)
1807			break;
1808		if (status)
1809			CSR_WRITE_2(sc, RL_ISR, status);
1810
1811		if ((status & RL_INTRS_CPLUS) == 0)
1812			break;
1813
1814		if (status & RL_ISR_RX_OK)
1815			re_rxeof(sc);
1816
1817		if (status & RL_ISR_RX_ERR)
1818			re_rxeof(sc);
1819
1820		if ((status & RL_ISR_TIMEOUT_EXPIRED) ||
1821		    (status & RL_ISR_TX_ERR) ||
1822		    (status & RL_ISR_TX_DESC_UNAVAIL))
1823			re_txeof(sc);
1824
1825		if (status & RL_ISR_SYSTEM_ERR) {
1826			re_reset(sc);
1827			re_init_locked(sc);
1828		}
1829
1830		if (status & RL_ISR_LINKCHG) {
1831			untimeout(re_tick, sc, sc->rl_stat_ch);
1832			re_tick_locked(sc);
1833		}
1834	}
1835
1836	if (ifp->if_snd.ifq_head != NULL)
1837		re_start_locked(ifp);
1838
1839done_locked:
1840	RL_UNLOCK(sc);
1841}
1842
1843static int
1844re_encap(sc, m_head, idx)
1845	struct rl_softc		*sc;
1846	struct mbuf		*m_head;
1847	int			*idx;
1848{
1849	struct mbuf		*m_new = NULL;
1850	struct rl_dmaload_arg	arg;
1851	bus_dmamap_t		map;
1852	int			error;
1853	struct m_tag		*mtag;
1854
1855	RL_LOCK_ASSERT(sc);
1856
1857	if (sc->rl_ldata.rl_tx_free <= 4)
1858		return (EFBIG);
1859
1860	/*
1861	 * Set up checksum offload. Note: checksum offload bits must
1862	 * appear in all descriptors of a multi-descriptor transmit
1863	 * attempt. (This is according to testing done with an 8169
1864	 * chip. I'm not sure if this is a requirement or a bug.)
1865	 */
1866
1867	arg.rl_flags = 0;
1868
1869	if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1870		arg.rl_flags |= RL_TDESC_CMD_IPCSUM;
1871	if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1872		arg.rl_flags |= RL_TDESC_CMD_TCPCSUM;
1873	if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1874		arg.rl_flags |= RL_TDESC_CMD_UDPCSUM;
1875
1876	arg.sc = sc;
1877	arg.rl_idx = *idx;
1878	arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
1879	if (arg.rl_maxsegs > 4)
1880		arg.rl_maxsegs -= 4;
1881	arg.rl_ring = sc->rl_ldata.rl_tx_list;
1882
1883	map = sc->rl_ldata.rl_tx_dmamap[*idx];
1884	error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
1885	    m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
1886
1887	if (error && error != EFBIG) {
1888		printf("re%d: can't map mbuf (error %d)\n", sc->rl_unit, error);
1889		return (ENOBUFS);
1890	}
1891
1892	/* Too many segments to map, coalesce into a single mbuf */
1893
1894	if (error || arg.rl_maxsegs == 0) {
1895		m_new = m_defrag(m_head, M_DONTWAIT);
1896		if (m_new == NULL)
1897			return (1);
1898		else
1899			m_head = m_new;
1900
1901		arg.sc = sc;
1902		arg.rl_idx = *idx;
1903		arg.rl_maxsegs = sc->rl_ldata.rl_tx_free;
1904		arg.rl_ring = sc->rl_ldata.rl_tx_list;
1905
1906		error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map,
1907		    m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
1908		if (error) {
1909			printf("re%d: can't map mbuf (error %d)\n",
1910			    sc->rl_unit, error);
1911			return (EFBIG);
1912		}
1913	}
1914
1915	/*
1916	 * Insure that the map for this transmission
1917	 * is placed at the array index of the last descriptor
1918	 * in this chain.
1919	 */
1920	sc->rl_ldata.rl_tx_dmamap[*idx] =
1921	    sc->rl_ldata.rl_tx_dmamap[arg.rl_idx];
1922	sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map;
1923
1924	sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head;
1925	sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs;
1926
1927	/*
1928	 * Set up hardware VLAN tagging. Note: vlan tag info must
1929	 * appear in the first descriptor of a multi-descriptor
1930	 * transmission attempt.
1931	 */
1932
1933	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
1934	if (mtag != NULL)
1935		sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl =
1936		    htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG);
1937
1938	/* Transfer ownership of packet to the chip. */
1939
1940	sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |=
1941	    htole32(RL_TDESC_CMD_OWN);
1942	if (*idx != arg.rl_idx)
1943		sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |=
1944		    htole32(RL_TDESC_CMD_OWN);
1945
1946	RL_DESC_INC(arg.rl_idx);
1947	*idx = arg.rl_idx;
1948
1949	return (0);
1950}
1951
1952static void
1953re_start(ifp)
1954	struct ifnet		*ifp;
1955{
1956	struct rl_softc		*sc;
1957
1958	sc = ifp->if_softc;
1959	RL_LOCK(sc);
1960	re_start_locked(ifp);
1961	RL_UNLOCK(sc);
1962}
1963
1964/*
1965 * Main transmit routine for C+ and gigE NICs.
1966 */
1967static void
1968re_start_locked(ifp)
1969	struct ifnet		*ifp;
1970{
1971	struct rl_softc		*sc;
1972	struct mbuf		*m_head = NULL;
1973	int			idx;
1974
1975	sc = ifp->if_softc;
1976
1977	RL_LOCK_ASSERT(sc);
1978
1979	idx = sc->rl_ldata.rl_tx_prodidx;
1980
1981	while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) {
1982		IF_DEQUEUE(&ifp->if_snd, m_head);
1983		if (m_head == NULL)
1984			break;
1985
1986		if (re_encap(sc, m_head, &idx)) {
1987			IF_PREPEND(&ifp->if_snd, m_head);
1988			ifp->if_flags |= IFF_OACTIVE;
1989			break;
1990		}
1991
1992		/*
1993		 * If there's a BPF listener, bounce a copy of this frame
1994		 * to him.
1995		 */
1996		BPF_MTAP(ifp, m_head);
1997	}
1998
1999	/* Flush the TX descriptors */
2000
2001	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2002	    sc->rl_ldata.rl_tx_list_map,
2003	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2004
2005	sc->rl_ldata.rl_tx_prodidx = idx;
2006
2007	/*
2008	 * RealTek put the TX poll request register in a different
2009	 * location on the 8169 gigE chip. I don't know why.
2010	 */
2011
2012	if (sc->rl_type == RL_8169)
2013		CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START);
2014	else
2015		CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START);
2016
2017	/*
2018	 * Use the countdown timer for interrupt moderation.
2019	 * 'TX done' interrupts are disabled. Instead, we reset the
2020	 * countdown timer, which will begin counting until it hits
2021	 * the value in the TIMERINT register, and then trigger an
2022	 * interrupt. Each time we write to the TIMERCNT register,
2023	 * the timer count is reset to 0.
2024	 */
2025	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2026
2027	/*
2028	 * Set a timeout in case the chip goes out to lunch.
2029	 */
2030	ifp->if_timer = 5;
2031}
2032
2033static void
2034re_init(xsc)
2035	void			*xsc;
2036{
2037	struct rl_softc		*sc = xsc;
2038
2039	RL_LOCK(sc);
2040	re_init_locked(sc);
2041	RL_UNLOCK(sc);
2042}
2043
2044static void
2045re_init_locked(sc)
2046	struct rl_softc		*sc;
2047{
2048	struct ifnet		*ifp = &sc->arpcom.ac_if;
2049	struct mii_data		*mii;
2050	u_int32_t		rxcfg = 0;
2051
2052	RL_LOCK_ASSERT(sc);
2053
2054	mii = device_get_softc(sc->rl_miibus);
2055
2056	/*
2057	 * Cancel pending I/O and free all RX/TX buffers.
2058	 */
2059	re_stop(sc);
2060
2061	/*
2062	 * Enable C+ RX and TX mode, as well as VLAN stripping and
2063	 * RX checksum offload. We must configure the C+ register
2064	 * before all others.
2065	 */
2066	CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB|
2067	    RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW|
2068	    RL_CPLUSCMD_VLANSTRIP|
2069	    (ifp->if_capenable & IFCAP_RXCSUM ?
2070	    RL_CPLUSCMD_RXCSUM_ENB : 0));
2071
2072	/*
2073	 * Init our MAC address.  Even though the chipset
2074	 * documentation doesn't mention it, we need to enter "Config
2075	 * register write enable" mode to modify the ID registers.
2076	 */
2077	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2078	CSR_WRITE_STREAM_4(sc, RL_IDR0,
2079	    *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
2080	CSR_WRITE_STREAM_4(sc, RL_IDR4,
2081	    *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
2082	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2083
2084	/*
2085	 * For C+ mode, initialize the RX descriptors and mbufs.
2086	 */
2087	re_rx_list_init(sc);
2088	re_tx_list_init(sc);
2089
2090	/*
2091	 * Enable transmit and receive.
2092	 */
2093	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2094
2095	/*
2096	 * Set the initial TX and RX configuration.
2097	 */
2098	if (sc->rl_testmode) {
2099		if (sc->rl_type == RL_8169)
2100			CSR_WRITE_4(sc, RL_TXCFG,
2101			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
2102		else
2103			CSR_WRITE_4(sc, RL_TXCFG,
2104			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
2105	} else
2106		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
2107	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
2108
2109	/* Set the individual bit to receive frames for this host only. */
2110	rxcfg = CSR_READ_4(sc, RL_RXCFG);
2111	rxcfg |= RL_RXCFG_RX_INDIV;
2112
2113	/* If we want promiscuous mode, set the allframes bit. */
2114	if (ifp->if_flags & IFF_PROMISC) {
2115		rxcfg |= RL_RXCFG_RX_ALLPHYS;
2116		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2117	} else {
2118		rxcfg &= ~RL_RXCFG_RX_ALLPHYS;
2119		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2120	}
2121
2122	/*
2123	 * Set capture broadcast bit to capture broadcast frames.
2124	 */
2125	if (ifp->if_flags & IFF_BROADCAST) {
2126		rxcfg |= RL_RXCFG_RX_BROAD;
2127		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2128	} else {
2129		rxcfg &= ~RL_RXCFG_RX_BROAD;
2130		CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
2131	}
2132
2133	/*
2134	 * Program the multicast filter, if necessary.
2135	 */
2136	re_setmulti(sc);
2137
2138#ifdef DEVICE_POLLING
2139	/*
2140	 * Disable interrupts if we are polling.
2141	 */
2142	if (ifp->if_flags & IFF_POLLING)
2143		CSR_WRITE_2(sc, RL_IMR, 0);
2144	else	/* otherwise ... */
2145#endif /* DEVICE_POLLING */
2146	/*
2147	 * Enable interrupts.
2148	 */
2149	if (sc->rl_testmode)
2150		CSR_WRITE_2(sc, RL_IMR, 0);
2151	else
2152		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2153
2154	/* Set initial TX threshold */
2155	sc->rl_txthresh = RL_TX_THRESH_INIT;
2156
2157	/* Start RX/TX process. */
2158	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
2159#ifdef notdef
2160	/* Enable receiver and transmitter. */
2161	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2162#endif
2163	/*
2164	 * Load the addresses of the RX and TX lists into the chip.
2165	 */
2166
2167	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
2168	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
2169	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
2170	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
2171
2172	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
2173	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
2174	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
2175	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
2176
2177	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
2178
2179	/*
2180	 * Initialize the timer interrupt register so that
2181	 * a timer interrupt will be generated once the timer
2182	 * reaches a certain number of ticks. The timer is
2183	 * reloaded on each transmit. This gives us TX interrupt
2184	 * moderation, which dramatically improves TX frame rate.
2185	 */
2186
2187	if (sc->rl_type == RL_8169)
2188		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
2189	else
2190		CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
2191
2192	/*
2193	 * For 8169 gigE NICs, set the max allowed RX packet
2194	 * size so we can receive jumbo frames.
2195	 */
2196	if (sc->rl_type == RL_8169)
2197		CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
2198
2199	if (sc->rl_testmode)
2200		return;
2201
2202	mii_mediachg(mii);
2203
2204	CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
2205
2206	ifp->if_flags |= IFF_RUNNING;
2207	ifp->if_flags &= ~IFF_OACTIVE;
2208
2209	sc->rl_stat_ch = timeout(re_tick, sc, hz);
2210}
2211
2212/*
2213 * Set media options.
2214 */
2215static int
2216re_ifmedia_upd(ifp)
2217	struct ifnet		*ifp;
2218{
2219	struct rl_softc		*sc;
2220	struct mii_data		*mii;
2221
2222	sc = ifp->if_softc;
2223	mii = device_get_softc(sc->rl_miibus);
2224	mii_mediachg(mii);
2225
2226	return (0);
2227}
2228
2229/*
2230 * Report current media status.
2231 */
2232static void
2233re_ifmedia_sts(ifp, ifmr)
2234	struct ifnet		*ifp;
2235	struct ifmediareq	*ifmr;
2236{
2237	struct rl_softc		*sc;
2238	struct mii_data		*mii;
2239
2240	sc = ifp->if_softc;
2241	mii = device_get_softc(sc->rl_miibus);
2242
2243	mii_pollstat(mii);
2244	ifmr->ifm_active = mii->mii_media_active;
2245	ifmr->ifm_status = mii->mii_media_status;
2246}
2247
2248static int
2249re_ioctl(ifp, command, data)
2250	struct ifnet		*ifp;
2251	u_long			command;
2252	caddr_t			data;
2253{
2254	struct rl_softc		*sc = ifp->if_softc;
2255	struct ifreq		*ifr = (struct ifreq *) data;
2256	struct mii_data		*mii;
2257	int			error = 0;
2258
2259	switch (command) {
2260	case SIOCSIFMTU:
2261		if (ifr->ifr_mtu > RL_JUMBO_MTU)
2262			error = EINVAL;
2263		ifp->if_mtu = ifr->ifr_mtu;
2264		break;
2265	case SIOCSIFFLAGS:
2266		RL_LOCK(sc);
2267		if (ifp->if_flags & IFF_UP)
2268			re_init_locked(sc);
2269		else if (ifp->if_flags & IFF_RUNNING)
2270			re_stop(sc);
2271		RL_UNLOCK(sc);
2272		error = 0;
2273		break;
2274	case SIOCADDMULTI:
2275	case SIOCDELMULTI:
2276		RL_LOCK(sc);
2277		re_setmulti(sc);
2278		RL_UNLOCK(sc);
2279		error = 0;
2280		break;
2281	case SIOCGIFMEDIA:
2282	case SIOCSIFMEDIA:
2283		mii = device_get_softc(sc->rl_miibus);
2284		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2285		break;
2286	case SIOCSIFCAP:
2287		ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_POLLING);
2288		ifp->if_capenable |=
2289		    ifr->ifr_reqcap & (IFCAP_HWCSUM | IFCAP_POLLING);
2290		if (ifp->if_capenable & IFCAP_TXCSUM)
2291			ifp->if_hwassist = RE_CSUM_FEATURES;
2292		else
2293			ifp->if_hwassist = 0;
2294		if (ifp->if_flags & IFF_RUNNING)
2295			re_init(sc);
2296		break;
2297	default:
2298		error = ether_ioctl(ifp, command, data);
2299		break;
2300	}
2301
2302	return (error);
2303}
2304
2305static void
2306re_watchdog(ifp)
2307	struct ifnet		*ifp;
2308{
2309	struct rl_softc		*sc;
2310
2311	sc = ifp->if_softc;
2312	RL_LOCK(sc);
2313	printf("re%d: watchdog timeout\n", sc->rl_unit);
2314	ifp->if_oerrors++;
2315
2316	re_txeof(sc);
2317	re_rxeof(sc);
2318	re_init_locked(sc);
2319
2320	RL_UNLOCK(sc);
2321}
2322
2323/*
2324 * Stop the adapter and free any mbufs allocated to the
2325 * RX and TX lists.
2326 */
2327static void
2328re_stop(sc)
2329	struct rl_softc		*sc;
2330{
2331	register int		i;
2332	struct ifnet		*ifp;
2333
2334	RL_LOCK_ASSERT(sc);
2335
2336	ifp = &sc->arpcom.ac_if;
2337	ifp->if_timer = 0;
2338
2339	untimeout(re_tick, sc, sc->rl_stat_ch);
2340	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2341#ifdef DEVICE_POLLING
2342	ether_poll_deregister(ifp);
2343#endif /* DEVICE_POLLING */
2344
2345	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2346	CSR_WRITE_2(sc, RL_IMR, 0x0000);
2347
2348	if (sc->rl_head != NULL) {
2349		m_freem(sc->rl_head);
2350		sc->rl_head = sc->rl_tail = NULL;
2351	}
2352
2353	/* Free the TX list buffers. */
2354
2355	for (i = 0; i < RL_TX_DESC_CNT; i++) {
2356		if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) {
2357			bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2358			    sc->rl_ldata.rl_tx_dmamap[i]);
2359			m_freem(sc->rl_ldata.rl_tx_mbuf[i]);
2360			sc->rl_ldata.rl_tx_mbuf[i] = NULL;
2361		}
2362	}
2363
2364	/* Free the RX list buffers. */
2365
2366	for (i = 0; i < RL_RX_DESC_CNT; i++) {
2367		if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) {
2368			bus_dmamap_unload(sc->rl_ldata.rl_mtag,
2369			    sc->rl_ldata.rl_rx_dmamap[i]);
2370			m_freem(sc->rl_ldata.rl_rx_mbuf[i]);
2371			sc->rl_ldata.rl_rx_mbuf[i] = NULL;
2372		}
2373	}
2374}
2375
2376/*
2377 * Device suspend routine.  Stop the interface and save some PCI
2378 * settings in case the BIOS doesn't restore them properly on
2379 * resume.
2380 */
2381static int
2382re_suspend(dev)
2383	device_t		dev;
2384{
2385	struct rl_softc		*sc;
2386
2387	sc = device_get_softc(dev);
2388
2389	RL_LOCK(sc);
2390	re_stop(sc);
2391	sc->suspended = 1;
2392	RL_UNLOCK(sc);
2393
2394	return (0);
2395}
2396
2397/*
2398 * Device resume routine.  Restore some PCI settings in case the BIOS
2399 * doesn't, re-enable busmastering, and restart the interface if
2400 * appropriate.
2401 */
2402static int
2403re_resume(dev)
2404	device_t		dev;
2405{
2406	struct rl_softc		*sc;
2407	struct ifnet		*ifp;
2408
2409	sc = device_get_softc(dev);
2410
2411	RL_LOCK(sc);
2412
2413	ifp = &sc->arpcom.ac_if;
2414
2415	/* reinitialize interface if necessary */
2416	if (ifp->if_flags & IFF_UP)
2417		re_init_locked(sc);
2418
2419	sc->suspended = 0;
2420	RL_UNLOCK(sc);
2421
2422	return (0);
2423}
2424
2425/*
2426 * Stop all chip I/O so that the kernel's probe routines don't
2427 * get confused by errant DMAs when rebooting.
2428 */
2429static void
2430re_shutdown(dev)
2431	device_t		dev;
2432{
2433	struct rl_softc		*sc;
2434
2435	sc = device_get_softc(dev);
2436
2437	RL_LOCK(sc);
2438	re_stop(sc);
2439	RL_UNLOCK(sc);
2440}
2441