if_re.c revision 218289
1/*-
2 * Copyright (c) 1997, 1998-2003
3 *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/re/if_re.c 218289 2011-02-04 17:49:55Z yongari $");
35
36/*
37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44/*
45 * This driver is designed to support RealTek's next generation of
46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
49 *
50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51 * with the older 8139 family, however it also supports a special
52 * C+ mode of operation that provides several new performance enhancing
53 * features. These include:
54 *
55 *	o Descriptor based DMA mechanism. Each descriptor represents
56 *	  a single packet fragment. Data buffers may be aligned on
57 *	  any byte boundary.
58 *
59 *	o 64-bit DMA
60 *
61 *	o TCP/IP checksum offload for both RX and TX
62 *
63 *	o High and normal priority transmit DMA rings
64 *
65 *	o VLAN tag insertion and extraction
66 *
67 *	o TCP large send (segmentation offload)
68 *
69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70 * programming API is fairly straightforward. The RX filtering, EEPROM
71 * access and PHY access is the same as it is on the older 8139 series
72 * chips.
73 *
74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75 * same programming API and feature set as the 8139C+ with the following
76 * differences and additions:
77 *
78 *	o 1000Mbps mode
79 *
80 *	o Jumbo frames
81 *
82 *	o GMII and TBI ports/registers for interfacing with copper
83 *	  or fiber PHYs
84 *
85 *	o RX and TX DMA rings can have up to 1024 descriptors
86 *	  (the 8139C+ allows a maximum of 64)
87 *
88 *	o Slight differences in register layout from the 8139C+
89 *
90 * The TX start and timer interrupt registers are at different locations
91 * on the 8169 than they are on the 8139C+. Also, the status word in the
92 * RX descriptor has a slightly different bit layout. The 8169 does not
93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94 * copper gigE PHY.
95 *
96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97 * (the 'S' stands for 'single-chip'). These devices have the same
98 * programming API as the older 8169, but also have some vendor-specific
99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101 *
102 * This driver takes advantage of the RX and TX checksum offload and
103 * VLAN tag insertion/extraction features. It also implements TX
104 * interrupt moderation using the timer interrupt registers, which
105 * significantly reduces TX interrupt load. There is also support
106 * for jumbo frames, however the 8169/8169S/8110S can not transmit
107 * jumbo frames larger than 7440, so the max MTU possible with this
108 * driver is 7422 bytes.
109 */
110
111#ifdef HAVE_KERNEL_OPTION_HEADERS
112#include "opt_device_polling.h"
113#endif
114
115#include <sys/param.h>
116#include <sys/endian.h>
117#include <sys/systm.h>
118#include <sys/sockio.h>
119#include <sys/mbuf.h>
120#include <sys/malloc.h>
121#include <sys/module.h>
122#include <sys/kernel.h>
123#include <sys/socket.h>
124#include <sys/lock.h>
125#include <sys/mutex.h>
126#include <sys/sysctl.h>
127#include <sys/taskqueue.h>
128
129#include <net/if.h>
130#include <net/if_arp.h>
131#include <net/ethernet.h>
132#include <net/if_dl.h>
133#include <net/if_media.h>
134#include <net/if_types.h>
135#include <net/if_vlan_var.h>
136
137#include <net/bpf.h>
138
139#include <machine/bus.h>
140#include <machine/resource.h>
141#include <sys/bus.h>
142#include <sys/rman.h>
143
144#include <dev/mii/mii.h>
145#include <dev/mii/miivar.h>
146
147#include <dev/pci/pcireg.h>
148#include <dev/pci/pcivar.h>
149
150#include <pci/if_rlreg.h>
151
152MODULE_DEPEND(re, pci, 1, 1, 1);
153MODULE_DEPEND(re, ether, 1, 1, 1);
154MODULE_DEPEND(re, miibus, 1, 1, 1);
155
156/* "device miibus" required.  See GENERIC if you get errors here. */
157#include "miibus_if.h"
158
159/* Tunables. */
160static int intr_filter = 0;
161TUNABLE_INT("hw.re.intr_filter", &intr_filter);
162static int msi_disable = 0;
163TUNABLE_INT("hw.re.msi_disable", &msi_disable);
164static int msix_disable = 0;
165TUNABLE_INT("hw.re.msix_disable", &msix_disable);
166static int prefer_iomap = 0;
167TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
168
169#define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
170
171/*
172 * Various supported device vendors/types and their names.
173 */
174static struct rl_type re_devs[] = {
175	{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
176	    "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
177	{ RT_VENDORID, RT_DEVICEID_8139, 0,
178	    "RealTek 8139C+ 10/100BaseTX" },
179	{ RT_VENDORID, RT_DEVICEID_8101E, 0,
180	    "RealTek 810xE PCIe 10/100baseTX" },
181	{ RT_VENDORID, RT_DEVICEID_8168, 0,
182	    "RealTek 8168/8111 B/C/CP/D/DP/E PCIe Gigabit Ethernet" },
183	{ RT_VENDORID, RT_DEVICEID_8169, 0,
184	    "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
185	{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
186	    "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
187	{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
188	    "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
189	{ LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
190	    "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
191	{ USR_VENDORID, USR_DEVICEID_997902, 0,
192	    "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
193};
194
195static struct rl_hwrev re_hwrevs[] = {
196	{ RL_HWREV_8139, RL_8139,  "", RL_MTU },
197	{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
198	{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
199	{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
200	{ RL_HWREV_8130, RL_8139, "8130", RL_MTU },
201	{ RL_HWREV_8139C, RL_8139, "C", RL_MTU },
202	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
203	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
204	{ RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
205	{ RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
206	{ RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
207	{ RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
208	{ RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
209	{ RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
210	{ RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
211	{ RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
212	{ RL_HWREV_8100, RL_8139, "8100", RL_MTU },
213	{ RL_HWREV_8101, RL_8139, "8101", RL_MTU },
214	{ RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
215	{ RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
216	{ RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
217	{ RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
218	{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
219	{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
220	{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
221	{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
222	{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
223	{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
224	{ RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
225	{ RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
226	{ RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
227	{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
228	{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
229	{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
230	{ 0, 0, NULL, 0 }
231};
232
233static int re_probe		(device_t);
234static int re_attach		(device_t);
235static int re_detach		(device_t);
236
237static int re_encap		(struct rl_softc *, struct mbuf **);
238
239static void re_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
240static int re_allocmem		(device_t, struct rl_softc *);
241static __inline void re_discard_rxbuf
242				(struct rl_softc *, int);
243static int re_newbuf		(struct rl_softc *, int);
244static int re_jumbo_newbuf	(struct rl_softc *, int);
245static int re_rx_list_init	(struct rl_softc *);
246static int re_jrx_list_init	(struct rl_softc *);
247static int re_tx_list_init	(struct rl_softc *);
248#ifdef RE_FIXUP_RX
249static __inline void re_fixup_rx
250				(struct mbuf *);
251#endif
252static int re_rxeof		(struct rl_softc *, int *);
253static void re_txeof		(struct rl_softc *);
254#ifdef DEVICE_POLLING
255static int re_poll		(struct ifnet *, enum poll_cmd, int);
256static int re_poll_locked	(struct ifnet *, enum poll_cmd, int);
257#endif
258static int re_intr		(void *);
259static void re_intr_msi		(void *);
260static void re_tick		(void *);
261static void re_int_task		(void *, int);
262static void re_start		(struct ifnet *);
263static void re_start_locked	(struct ifnet *);
264static int re_ioctl		(struct ifnet *, u_long, caddr_t);
265static void re_init		(void *);
266static void re_init_locked	(struct rl_softc *);
267static void re_stop		(struct rl_softc *);
268static void re_watchdog		(struct rl_softc *);
269static int re_suspend		(device_t);
270static int re_resume		(device_t);
271static int re_shutdown		(device_t);
272static int re_ifmedia_upd	(struct ifnet *);
273static void re_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
274
275static void re_eeprom_putbyte	(struct rl_softc *, int);
276static void re_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
277static void re_read_eeprom	(struct rl_softc *, caddr_t, int, int);
278static int re_gmii_readreg	(device_t, int, int);
279static int re_gmii_writereg	(device_t, int, int, int);
280
281static int re_miibus_readreg	(device_t, int, int);
282static int re_miibus_writereg	(device_t, int, int, int);
283static void re_miibus_statchg	(device_t);
284
285static void re_set_jumbo	(struct rl_softc *, int);
286static void re_set_rxmode		(struct rl_softc *);
287static void re_reset		(struct rl_softc *);
288static void re_setwol		(struct rl_softc *);
289static void re_clrwol		(struct rl_softc *);
290
291#ifdef RE_DIAG
292static int re_diag		(struct rl_softc *);
293#endif
294
295static void re_add_sysctls	(struct rl_softc *);
296static int re_sysctl_stats	(SYSCTL_HANDLER_ARGS);
297static int sysctl_int_range	(SYSCTL_HANDLER_ARGS, int, int);
298static int sysctl_hw_re_int_mod	(SYSCTL_HANDLER_ARGS);
299
300static device_method_t re_methods[] = {
301	/* Device interface */
302	DEVMETHOD(device_probe,		re_probe),
303	DEVMETHOD(device_attach,	re_attach),
304	DEVMETHOD(device_detach,	re_detach),
305	DEVMETHOD(device_suspend,	re_suspend),
306	DEVMETHOD(device_resume,	re_resume),
307	DEVMETHOD(device_shutdown,	re_shutdown),
308
309	/* bus interface */
310	DEVMETHOD(bus_print_child,	bus_generic_print_child),
311	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
312
313	/* MII interface */
314	DEVMETHOD(miibus_readreg,	re_miibus_readreg),
315	DEVMETHOD(miibus_writereg,	re_miibus_writereg),
316	DEVMETHOD(miibus_statchg,	re_miibus_statchg),
317
318	{ 0, 0 }
319};
320
321static driver_t re_driver = {
322	"re",
323	re_methods,
324	sizeof(struct rl_softc)
325};
326
327static devclass_t re_devclass;
328
329DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
330DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
331
332#define EE_SET(x)					\
333	CSR_WRITE_1(sc, RL_EECMD,			\
334		CSR_READ_1(sc, RL_EECMD) | x)
335
336#define EE_CLR(x)					\
337	CSR_WRITE_1(sc, RL_EECMD,			\
338		CSR_READ_1(sc, RL_EECMD) & ~x)
339
340/*
341 * Send a read command and address to the EEPROM, check for ACK.
342 */
343static void
344re_eeprom_putbyte(struct rl_softc *sc, int addr)
345{
346	int			d, i;
347
348	d = addr | (RL_9346_READ << sc->rl_eewidth);
349
350	/*
351	 * Feed in each bit and strobe the clock.
352	 */
353
354	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
355		if (d & i) {
356			EE_SET(RL_EE_DATAIN);
357		} else {
358			EE_CLR(RL_EE_DATAIN);
359		}
360		DELAY(100);
361		EE_SET(RL_EE_CLK);
362		DELAY(150);
363		EE_CLR(RL_EE_CLK);
364		DELAY(100);
365	}
366}
367
368/*
369 * Read a word of data stored in the EEPROM at address 'addr.'
370 */
371static void
372re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
373{
374	int			i;
375	u_int16_t		word = 0;
376
377	/*
378	 * Send address of word we want to read.
379	 */
380	re_eeprom_putbyte(sc, addr);
381
382	/*
383	 * Start reading bits from EEPROM.
384	 */
385	for (i = 0x8000; i; i >>= 1) {
386		EE_SET(RL_EE_CLK);
387		DELAY(100);
388		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
389			word |= i;
390		EE_CLR(RL_EE_CLK);
391		DELAY(100);
392	}
393
394	*dest = word;
395}
396
397/*
398 * Read a sequence of words from the EEPROM.
399 */
400static void
401re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
402{
403	int			i;
404	u_int16_t		word = 0, *ptr;
405
406	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
407
408        DELAY(100);
409
410	for (i = 0; i < cnt; i++) {
411		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
412		re_eeprom_getword(sc, off + i, &word);
413		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
414		ptr = (u_int16_t *)(dest + (i * 2));
415                *ptr = word;
416	}
417
418	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
419}
420
421static int
422re_gmii_readreg(device_t dev, int phy, int reg)
423{
424	struct rl_softc		*sc;
425	u_int32_t		rval;
426	int			i;
427
428	sc = device_get_softc(dev);
429
430	/* Let the rgephy driver read the GMEDIASTAT register */
431
432	if (reg == RL_GMEDIASTAT) {
433		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
434		return (rval);
435	}
436
437	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
438
439	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
440		rval = CSR_READ_4(sc, RL_PHYAR);
441		if (rval & RL_PHYAR_BUSY)
442			break;
443		DELAY(25);
444	}
445
446	if (i == RL_PHY_TIMEOUT) {
447		device_printf(sc->rl_dev, "PHY read failed\n");
448		return (0);
449	}
450
451	/*
452	 * Controller requires a 20us delay to process next MDIO request.
453	 */
454	DELAY(20);
455
456	return (rval & RL_PHYAR_PHYDATA);
457}
458
459static int
460re_gmii_writereg(device_t dev, int phy, int reg, int data)
461{
462	struct rl_softc		*sc;
463	u_int32_t		rval;
464	int			i;
465
466	sc = device_get_softc(dev);
467
468	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
469	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
470
471	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
472		rval = CSR_READ_4(sc, RL_PHYAR);
473		if (!(rval & RL_PHYAR_BUSY))
474			break;
475		DELAY(25);
476	}
477
478	if (i == RL_PHY_TIMEOUT) {
479		device_printf(sc->rl_dev, "PHY write failed\n");
480		return (0);
481	}
482
483	/*
484	 * Controller requires a 20us delay to process next MDIO request.
485	 */
486	DELAY(20);
487
488	return (0);
489}
490
491static int
492re_miibus_readreg(device_t dev, int phy, int reg)
493{
494	struct rl_softc		*sc;
495	u_int16_t		rval = 0;
496	u_int16_t		re8139_reg = 0;
497
498	sc = device_get_softc(dev);
499
500	if (sc->rl_type == RL_8169) {
501		rval = re_gmii_readreg(dev, phy, reg);
502		return (rval);
503	}
504
505	switch (reg) {
506	case MII_BMCR:
507		re8139_reg = RL_BMCR;
508		break;
509	case MII_BMSR:
510		re8139_reg = RL_BMSR;
511		break;
512	case MII_ANAR:
513		re8139_reg = RL_ANAR;
514		break;
515	case MII_ANER:
516		re8139_reg = RL_ANER;
517		break;
518	case MII_ANLPAR:
519		re8139_reg = RL_LPAR;
520		break;
521	case MII_PHYIDR1:
522	case MII_PHYIDR2:
523		return (0);
524	/*
525	 * Allow the rlphy driver to read the media status
526	 * register. If we have a link partner which does not
527	 * support NWAY, this is the register which will tell
528	 * us the results of parallel detection.
529	 */
530	case RL_MEDIASTAT:
531		rval = CSR_READ_1(sc, RL_MEDIASTAT);
532		return (rval);
533	default:
534		device_printf(sc->rl_dev, "bad phy register\n");
535		return (0);
536	}
537	rval = CSR_READ_2(sc, re8139_reg);
538	if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
539		/* 8139C+ has different bit layout. */
540		rval &= ~(BMCR_LOOP | BMCR_ISO);
541	}
542	return (rval);
543}
544
545static int
546re_miibus_writereg(device_t dev, int phy, int reg, int data)
547{
548	struct rl_softc		*sc;
549	u_int16_t		re8139_reg = 0;
550	int			rval = 0;
551
552	sc = device_get_softc(dev);
553
554	if (sc->rl_type == RL_8169) {
555		rval = re_gmii_writereg(dev, phy, reg, data);
556		return (rval);
557	}
558
559	switch (reg) {
560	case MII_BMCR:
561		re8139_reg = RL_BMCR;
562		if (sc->rl_type == RL_8139CPLUS) {
563			/* 8139C+ has different bit layout. */
564			data &= ~(BMCR_LOOP | BMCR_ISO);
565		}
566		break;
567	case MII_BMSR:
568		re8139_reg = RL_BMSR;
569		break;
570	case MII_ANAR:
571		re8139_reg = RL_ANAR;
572		break;
573	case MII_ANER:
574		re8139_reg = RL_ANER;
575		break;
576	case MII_ANLPAR:
577		re8139_reg = RL_LPAR;
578		break;
579	case MII_PHYIDR1:
580	case MII_PHYIDR2:
581		return (0);
582		break;
583	default:
584		device_printf(sc->rl_dev, "bad phy register\n");
585		return (0);
586	}
587	CSR_WRITE_2(sc, re8139_reg, data);
588	return (0);
589}
590
591static void
592re_miibus_statchg(device_t dev)
593{
594	struct rl_softc		*sc;
595	struct ifnet		*ifp;
596	struct mii_data		*mii;
597
598	sc = device_get_softc(dev);
599	mii = device_get_softc(sc->rl_miibus);
600	ifp = sc->rl_ifp;
601	if (mii == NULL || ifp == NULL ||
602	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
603		return;
604
605	sc->rl_flags &= ~RL_FLAG_LINK;
606	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
607	    (IFM_ACTIVE | IFM_AVALID)) {
608		switch (IFM_SUBTYPE(mii->mii_media_active)) {
609		case IFM_10_T:
610		case IFM_100_TX:
611			sc->rl_flags |= RL_FLAG_LINK;
612			break;
613		case IFM_1000_T:
614			if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
615				break;
616			sc->rl_flags |= RL_FLAG_LINK;
617			break;
618		default:
619			break;
620		}
621	}
622	/*
623	 * RealTek controllers does not provide any interface to
624	 * Tx/Rx MACs for resolved speed, duplex and flow-control
625	 * parameters.
626	 */
627}
628
629/*
630 * Set the RX configuration and 64-bit multicast hash filter.
631 */
632static void
633re_set_rxmode(struct rl_softc *sc)
634{
635	struct ifnet		*ifp;
636	struct ifmultiaddr	*ifma;
637	uint32_t		hashes[2] = { 0, 0 };
638	uint32_t		h, rxfilt;
639
640	RL_LOCK_ASSERT(sc);
641
642	ifp = sc->rl_ifp;
643
644	rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
645
646	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
647		if (ifp->if_flags & IFF_PROMISC)
648			rxfilt |= RL_RXCFG_RX_ALLPHYS;
649		/*
650		 * Unlike other hardwares, we have to explicitly set
651		 * RL_RXCFG_RX_MULTI to receive multicast frames in
652		 * promiscuous mode.
653		 */
654		rxfilt |= RL_RXCFG_RX_MULTI;
655		hashes[0] = hashes[1] = 0xffffffff;
656		goto done;
657	}
658
659	if_maddr_rlock(ifp);
660	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
661		if (ifma->ifma_addr->sa_family != AF_LINK)
662			continue;
663		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
664		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
665		if (h < 32)
666			hashes[0] |= (1 << h);
667		else
668			hashes[1] |= (1 << (h - 32));
669	}
670	if_maddr_runlock(ifp);
671
672	if (hashes[0] != 0 || hashes[1] != 0) {
673		/*
674		 * For some unfathomable reason, RealTek decided to
675		 * reverse the order of the multicast hash registers
676		 * in the PCI Express parts.  This means we have to
677		 * write the hash pattern in reverse order for those
678		 * devices.
679		 */
680		if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
681			h = bswap32(hashes[0]);
682			hashes[0] = bswap32(hashes[1]);
683			hashes[1] = h;
684		}
685		rxfilt |= RL_RXCFG_RX_MULTI;
686	}
687
688done:
689	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
690	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
691	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
692}
693
694static void
695re_reset(struct rl_softc *sc)
696{
697	int			i;
698
699	RL_LOCK_ASSERT(sc);
700
701	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
702
703	for (i = 0; i < RL_TIMEOUT; i++) {
704		DELAY(10);
705		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
706			break;
707	}
708	if (i == RL_TIMEOUT)
709		device_printf(sc->rl_dev, "reset never completed!\n");
710
711	if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
712		CSR_WRITE_1(sc, 0x82, 1);
713	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
714		re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
715}
716
717#ifdef RE_DIAG
718
719/*
720 * The following routine is designed to test for a defect on some
721 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
722 * lines connected to the bus, however for a 32-bit only card, they
723 * should be pulled high. The result of this defect is that the
724 * NIC will not work right if you plug it into a 64-bit slot: DMA
725 * operations will be done with 64-bit transfers, which will fail
726 * because the 64-bit data lines aren't connected.
727 *
728 * There's no way to work around this (short of talking a soldering
729 * iron to the board), however we can detect it. The method we use
730 * here is to put the NIC into digital loopback mode, set the receiver
731 * to promiscuous mode, and then try to send a frame. We then compare
732 * the frame data we sent to what was received. If the data matches,
733 * then the NIC is working correctly, otherwise we know the user has
734 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
735 * slot. In the latter case, there's no way the NIC can work correctly,
736 * so we print out a message on the console and abort the device attach.
737 */
738
739static int
740re_diag(struct rl_softc *sc)
741{
742	struct ifnet		*ifp = sc->rl_ifp;
743	struct mbuf		*m0;
744	struct ether_header	*eh;
745	struct rl_desc		*cur_rx;
746	u_int16_t		status;
747	u_int32_t		rxstat;
748	int			total_len, i, error = 0, phyaddr;
749	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
750	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
751
752	/* Allocate a single mbuf */
753	MGETHDR(m0, M_DONTWAIT, MT_DATA);
754	if (m0 == NULL)
755		return (ENOBUFS);
756
757	RL_LOCK(sc);
758
759	/*
760	 * Initialize the NIC in test mode. This sets the chip up
761	 * so that it can send and receive frames, but performs the
762	 * following special functions:
763	 * - Puts receiver in promiscuous mode
764	 * - Enables digital loopback mode
765	 * - Leaves interrupts turned off
766	 */
767
768	ifp->if_flags |= IFF_PROMISC;
769	sc->rl_testmode = 1;
770	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
771	re_init_locked(sc);
772	sc->rl_flags |= RL_FLAG_LINK;
773	if (sc->rl_type == RL_8169)
774		phyaddr = 1;
775	else
776		phyaddr = 0;
777
778	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
779	for (i = 0; i < RL_TIMEOUT; i++) {
780		status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
781		if (!(status & BMCR_RESET))
782			break;
783	}
784
785	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
786	CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
787
788	DELAY(100000);
789
790	/* Put some data in the mbuf */
791
792	eh = mtod(m0, struct ether_header *);
793	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
794	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
795	eh->ether_type = htons(ETHERTYPE_IP);
796	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
797
798	/*
799	 * Queue the packet, start transmission.
800	 * Note: IF_HANDOFF() ultimately calls re_start() for us.
801	 */
802
803	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
804	RL_UNLOCK(sc);
805	/* XXX: re_diag must not be called when in ALTQ mode */
806	IF_HANDOFF(&ifp->if_snd, m0, ifp);
807	RL_LOCK(sc);
808	m0 = NULL;
809
810	/* Wait for it to propagate through the chip */
811
812	DELAY(100000);
813	for (i = 0; i < RL_TIMEOUT; i++) {
814		status = CSR_READ_2(sc, RL_ISR);
815		CSR_WRITE_2(sc, RL_ISR, status);
816		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
817		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
818			break;
819		DELAY(10);
820	}
821
822	if (i == RL_TIMEOUT) {
823		device_printf(sc->rl_dev,
824		    "diagnostic failed, failed to receive packet in"
825		    " loopback mode\n");
826		error = EIO;
827		goto done;
828	}
829
830	/*
831	 * The packet should have been dumped into the first
832	 * entry in the RX DMA ring. Grab it from there.
833	 */
834
835	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
836	    sc->rl_ldata.rl_rx_list_map,
837	    BUS_DMASYNC_POSTREAD);
838	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
839	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
840	    BUS_DMASYNC_POSTREAD);
841	bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
842	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
843
844	m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
845	sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
846	eh = mtod(m0, struct ether_header *);
847
848	cur_rx = &sc->rl_ldata.rl_rx_list[0];
849	total_len = RL_RXBYTES(cur_rx);
850	rxstat = le32toh(cur_rx->rl_cmdstat);
851
852	if (total_len != ETHER_MIN_LEN) {
853		device_printf(sc->rl_dev,
854		    "diagnostic failed, received short packet\n");
855		error = EIO;
856		goto done;
857	}
858
859	/* Test that the received packet data matches what we sent. */
860
861	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
862	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
863	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
864		device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
865		device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
866		    dst, ":", src, ":", ETHERTYPE_IP);
867		device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
868		    eh->ether_dhost, ":",  eh->ether_shost, ":",
869		    ntohs(eh->ether_type));
870		device_printf(sc->rl_dev, "You may have a defective 32-bit "
871		    "NIC plugged into a 64-bit PCI slot.\n");
872		device_printf(sc->rl_dev, "Please re-install the NIC in a "
873		    "32-bit slot for proper operation.\n");
874		device_printf(sc->rl_dev, "Read the re(4) man page for more "
875		    "details.\n");
876		error = EIO;
877	}
878
879done:
880	/* Turn interface off, release resources */
881
882	sc->rl_testmode = 0;
883	sc->rl_flags &= ~RL_FLAG_LINK;
884	ifp->if_flags &= ~IFF_PROMISC;
885	re_stop(sc);
886	if (m0 != NULL)
887		m_freem(m0);
888
889	RL_UNLOCK(sc);
890
891	return (error);
892}
893
894#endif
895
896/*
897 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
898 * IDs against our list and return a device name if we find a match.
899 */
900static int
901re_probe(device_t dev)
902{
903	struct rl_type		*t;
904	uint16_t		devid, vendor;
905	uint16_t		revid, sdevid;
906	int			i;
907
908	vendor = pci_get_vendor(dev);
909	devid = pci_get_device(dev);
910	revid = pci_get_revid(dev);
911	sdevid = pci_get_subdevice(dev);
912
913	if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
914		if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
915			/*
916			 * Only attach to rev. 3 of the Linksys EG1032 adapter.
917			 * Rev. 2 is supported by sk(4).
918			 */
919			return (ENXIO);
920		}
921	}
922
923	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
924		if (revid != 0x20) {
925			/* 8139, let rl(4) take care of this device. */
926			return (ENXIO);
927		}
928	}
929
930	t = re_devs;
931	for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
932		if (vendor == t->rl_vid && devid == t->rl_did) {
933			device_set_desc(dev, t->rl_name);
934			return (BUS_PROBE_DEFAULT);
935		}
936	}
937
938	return (ENXIO);
939}
940
941/*
942 * Map a single buffer address.
943 */
944
945static void
946re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
947{
948	bus_addr_t		*addr;
949
950	if (error)
951		return;
952
953	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
954	addr = arg;
955	*addr = segs->ds_addr;
956}
957
958static int
959re_allocmem(device_t dev, struct rl_softc *sc)
960{
961	bus_addr_t		lowaddr;
962	bus_size_t		rx_list_size, tx_list_size;
963	int			error;
964	int			i;
965
966	rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
967	tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
968
969	/*
970	 * Allocate the parent bus DMA tag appropriate for PCI.
971	 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
972	 * register should be set. However some RealTek chips are known
973	 * to be buggy on DAC handling, therefore disable DAC by limiting
974	 * DMA address space to 32bit. PCIe variants of RealTek chips
975	 * may not have the limitation.
976	 */
977	lowaddr = BUS_SPACE_MAXADDR;
978	if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
979		lowaddr = BUS_SPACE_MAXADDR_32BIT;
980	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
981	    lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
982	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
983	    NULL, NULL, &sc->rl_parent_tag);
984	if (error) {
985		device_printf(dev, "could not allocate parent DMA tag\n");
986		return (error);
987	}
988
989	/*
990	 * Allocate map for TX mbufs.
991	 */
992	error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
993	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
994	    NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
995	    NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
996	if (error) {
997		device_printf(dev, "could not allocate TX DMA tag\n");
998		return (error);
999	}
1000
1001	/*
1002	 * Allocate map for RX mbufs.
1003	 */
1004
1005	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1006		error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1007		    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1008		    MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1009		    &sc->rl_ldata.rl_jrx_mtag);
1010		if (error) {
1011			device_printf(dev,
1012			    "could not allocate jumbo RX DMA tag\n");
1013			return (error);
1014		}
1015	}
1016	error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1017	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1018	    MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1019	if (error) {
1020		device_printf(dev, "could not allocate RX DMA tag\n");
1021		return (error);
1022	}
1023
1024	/*
1025	 * Allocate map for TX descriptor list.
1026	 */
1027	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1028	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1029	    NULL, tx_list_size, 1, tx_list_size, 0,
1030	    NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1031	if (error) {
1032		device_printf(dev, "could not allocate TX DMA ring tag\n");
1033		return (error);
1034	}
1035
1036	/* Allocate DMA'able memory for the TX ring */
1037
1038	error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1039	    (void **)&sc->rl_ldata.rl_tx_list,
1040	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1041	    &sc->rl_ldata.rl_tx_list_map);
1042	if (error) {
1043		device_printf(dev, "could not allocate TX DMA ring\n");
1044		return (error);
1045	}
1046
1047	/* Load the map for the TX ring. */
1048
1049	sc->rl_ldata.rl_tx_list_addr = 0;
1050	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1051	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1052	     tx_list_size, re_dma_map_addr,
1053	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1054	if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1055		device_printf(dev, "could not load TX DMA ring\n");
1056		return (ENOMEM);
1057	}
1058
1059	/* Create DMA maps for TX buffers */
1060
1061	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1062		error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1063		    &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1064		if (error) {
1065			device_printf(dev, "could not create DMA map for TX\n");
1066			return (error);
1067		}
1068	}
1069
1070	/*
1071	 * Allocate map for RX descriptor list.
1072	 */
1073	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1074	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1075	    NULL, rx_list_size, 1, rx_list_size, 0,
1076	    NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1077	if (error) {
1078		device_printf(dev, "could not create RX DMA ring tag\n");
1079		return (error);
1080	}
1081
1082	/* Allocate DMA'able memory for the RX ring */
1083
1084	error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1085	    (void **)&sc->rl_ldata.rl_rx_list,
1086	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1087	    &sc->rl_ldata.rl_rx_list_map);
1088	if (error) {
1089		device_printf(dev, "could not allocate RX DMA ring\n");
1090		return (error);
1091	}
1092
1093	/* Load the map for the RX ring. */
1094
1095	sc->rl_ldata.rl_rx_list_addr = 0;
1096	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1097	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1098	     rx_list_size, re_dma_map_addr,
1099	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1100	if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1101		device_printf(dev, "could not load RX DMA ring\n");
1102		return (ENOMEM);
1103	}
1104
1105	/* Create DMA maps for RX buffers */
1106
1107	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1108		error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1109		    &sc->rl_ldata.rl_jrx_sparemap);
1110		if (error) {
1111			device_printf(dev,
1112			    "could not create spare DMA map for jumbo RX\n");
1113			return (error);
1114		}
1115		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1116			error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1117			    &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1118			if (error) {
1119				device_printf(dev,
1120				    "could not create DMA map for jumbo RX\n");
1121				return (error);
1122			}
1123		}
1124	}
1125	error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1126	    &sc->rl_ldata.rl_rx_sparemap);
1127	if (error) {
1128		device_printf(dev, "could not create spare DMA map for RX\n");
1129		return (error);
1130	}
1131	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1132		error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1133		    &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1134		if (error) {
1135			device_printf(dev, "could not create DMA map for RX\n");
1136			return (error);
1137		}
1138	}
1139
1140	/* Create DMA map for statistics. */
1141	error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1142	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1143	    sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1144	    &sc->rl_ldata.rl_stag);
1145	if (error) {
1146		device_printf(dev, "could not create statistics DMA tag\n");
1147		return (error);
1148	}
1149	/* Allocate DMA'able memory for statistics. */
1150	error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1151	    (void **)&sc->rl_ldata.rl_stats,
1152	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1153	    &sc->rl_ldata.rl_smap);
1154	if (error) {
1155		device_printf(dev,
1156		    "could not allocate statistics DMA memory\n");
1157		return (error);
1158	}
1159	/* Load the map for statistics. */
1160	sc->rl_ldata.rl_stats_addr = 0;
1161	error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1162	    sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1163	     &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1164	if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1165		device_printf(dev, "could not load statistics DMA memory\n");
1166		return (ENOMEM);
1167	}
1168
1169	return (0);
1170}
1171
1172/*
1173 * Attach the interface. Allocate softc structures, do ifmedia
1174 * setup and ethernet/BPF attach.
1175 */
1176static int
1177re_attach(device_t dev)
1178{
1179	u_char			eaddr[ETHER_ADDR_LEN];
1180	u_int16_t		as[ETHER_ADDR_LEN / 2];
1181	struct rl_softc		*sc;
1182	struct ifnet		*ifp;
1183	struct rl_hwrev		*hw_rev;
1184	int			hwrev;
1185	u_int16_t		devid, re_did = 0;
1186	int			error = 0, i, phy, rid;
1187	int			msic, msixc, reg;
1188	uint8_t			cfg;
1189
1190	sc = device_get_softc(dev);
1191	sc->rl_dev = dev;
1192
1193	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1194	    MTX_DEF);
1195	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1196
1197	/*
1198	 * Map control/status registers.
1199	 */
1200	pci_enable_busmaster(dev);
1201
1202	devid = pci_get_device(dev);
1203	/*
1204	 * Prefer memory space register mapping over IO space.
1205	 * Because RTL8169SC does not seem to work when memory mapping
1206	 * is used always activate io mapping.
1207	 */
1208	if (devid == RT_DEVICEID_8169SC)
1209		prefer_iomap = 1;
1210	if (prefer_iomap == 0) {
1211		sc->rl_res_id = PCIR_BAR(1);
1212		sc->rl_res_type = SYS_RES_MEMORY;
1213		/* RTL8168/8101E seems to use different BARs. */
1214		if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1215			sc->rl_res_id = PCIR_BAR(2);
1216	} else {
1217		sc->rl_res_id = PCIR_BAR(0);
1218		sc->rl_res_type = SYS_RES_IOPORT;
1219	}
1220	sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1221	    &sc->rl_res_id, RF_ACTIVE);
1222	if (sc->rl_res == NULL && prefer_iomap == 0) {
1223		sc->rl_res_id = PCIR_BAR(0);
1224		sc->rl_res_type = SYS_RES_IOPORT;
1225		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1226		    &sc->rl_res_id, RF_ACTIVE);
1227	}
1228	if (sc->rl_res == NULL) {
1229		device_printf(dev, "couldn't map ports/memory\n");
1230		error = ENXIO;
1231		goto fail;
1232	}
1233
1234	sc->rl_btag = rman_get_bustag(sc->rl_res);
1235	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1236
1237	msic = pci_msi_count(dev);
1238	msixc = pci_msix_count(dev);
1239	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0)
1240		sc->rl_flags |= RL_FLAG_PCIE;
1241	if (bootverbose) {
1242		device_printf(dev, "MSI count : %d\n", msic);
1243		device_printf(dev, "MSI-X count : %d\n", msixc);
1244	}
1245	if (msix_disable > 0)
1246		msixc = 0;
1247	if (msi_disable > 0)
1248		msic = 0;
1249	/* Prefer MSI-X to MSI. */
1250	if (msixc > 0) {
1251		msixc = 1;
1252		rid = PCIR_BAR(4);
1253		sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1254		    &rid, RF_ACTIVE);
1255		if (sc->rl_res_pba == NULL) {
1256			device_printf(sc->rl_dev,
1257			    "could not allocate MSI-X PBA resource\n");
1258		}
1259		if (sc->rl_res_pba != NULL &&
1260		    pci_alloc_msix(dev, &msixc) == 0) {
1261			if (msixc == 1) {
1262				device_printf(dev, "Using %d MSI-X message\n",
1263				    msixc);
1264				sc->rl_flags |= RL_FLAG_MSIX;
1265			} else
1266				pci_release_msi(dev);
1267		}
1268		if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1269			if (sc->rl_res_pba != NULL)
1270				bus_release_resource(dev, SYS_RES_MEMORY, rid,
1271				    sc->rl_res_pba);
1272			sc->rl_res_pba = NULL;
1273			msixc = 0;
1274		}
1275	}
1276	/* Prefer MSI to INTx. */
1277	if (msixc == 0 && msic > 0) {
1278		msic = 1;
1279		if (pci_alloc_msi(dev, &msic) == 0) {
1280			if (msic == RL_MSI_MESSAGES) {
1281				device_printf(dev, "Using %d MSI message\n",
1282				    msic);
1283				sc->rl_flags |= RL_FLAG_MSI;
1284				/* Explicitly set MSI enable bit. */
1285				CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1286				cfg = CSR_READ_1(sc, RL_CFG2);
1287				cfg |= RL_CFG2_MSI;
1288				CSR_WRITE_1(sc, RL_CFG2, cfg);
1289				CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1290			} else
1291				pci_release_msi(dev);
1292		}
1293		if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1294			msic = 0;
1295	}
1296
1297	/* Allocate interrupt */
1298	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1299		rid = 0;
1300		sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1301		    RF_SHAREABLE | RF_ACTIVE);
1302		if (sc->rl_irq[0] == NULL) {
1303			device_printf(dev, "couldn't allocate IRQ resources\n");
1304			error = ENXIO;
1305			goto fail;
1306		}
1307	} else {
1308		for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1309			sc->rl_irq[i] = bus_alloc_resource_any(dev,
1310			    SYS_RES_IRQ, &rid, RF_ACTIVE);
1311			if (sc->rl_irq[i] == NULL) {
1312				device_printf(dev,
1313				    "couldn't llocate IRQ resources for "
1314				    "message %d\n", rid);
1315				error = ENXIO;
1316				goto fail;
1317			}
1318		}
1319	}
1320
1321	if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1322		CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1323		cfg = CSR_READ_1(sc, RL_CFG2);
1324		if ((cfg & RL_CFG2_MSI) != 0) {
1325			device_printf(dev, "turning off MSI enable bit.\n");
1326			cfg &= ~RL_CFG2_MSI;
1327			CSR_WRITE_1(sc, RL_CFG2, cfg);
1328		}
1329		CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1330	}
1331
1332	hw_rev = re_hwrevs;
1333	hwrev = CSR_READ_4(sc, RL_TXCFG);
1334	switch (hwrev & 0x70000000) {
1335	case 0x00000000:
1336	case 0x10000000:
1337		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1338		hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1339		break;
1340	default:
1341		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1342		hwrev &= RL_TXCFG_HWREV;
1343		break;
1344	}
1345	device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000);
1346	while (hw_rev->rl_desc != NULL) {
1347		if (hw_rev->rl_rev == hwrev) {
1348			sc->rl_type = hw_rev->rl_type;
1349			sc->rl_hwrev = hw_rev;
1350			break;
1351		}
1352		hw_rev++;
1353	}
1354	if (hw_rev->rl_desc == NULL) {
1355		device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1356		error = ENXIO;
1357		goto fail;
1358	}
1359
1360	switch (hw_rev->rl_rev) {
1361	case RL_HWREV_8139CPLUS:
1362		sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1363		break;
1364	case RL_HWREV_8100E:
1365	case RL_HWREV_8101E:
1366		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1367		break;
1368	case RL_HWREV_8102E:
1369	case RL_HWREV_8102EL:
1370	case RL_HWREV_8102EL_SPIN1:
1371		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1372		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1373		    RL_FLAG_AUTOPAD;
1374		break;
1375	case RL_HWREV_8103E:
1376		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1377		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1378		    RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1379		break;
1380	case RL_HWREV_8105E:
1381		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1382		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1383		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1384		break;
1385	case RL_HWREV_8168B_SPIN1:
1386	case RL_HWREV_8168B_SPIN2:
1387		sc->rl_flags |= RL_FLAG_WOLRXENB;
1388		/* FALLTHROUGH */
1389	case RL_HWREV_8168B_SPIN3:
1390		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1391		break;
1392	case RL_HWREV_8168C_SPIN2:
1393		sc->rl_flags |= RL_FLAG_MACSLEEP;
1394		/* FALLTHROUGH */
1395	case RL_HWREV_8168C:
1396		if ((hwrev & 0x00700000) == 0x00200000)
1397			sc->rl_flags |= RL_FLAG_MACSLEEP;
1398		/* FALLTHROUGH */
1399	case RL_HWREV_8168CP:
1400	case RL_HWREV_8168D:
1401	case RL_HWREV_8168DP:
1402		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1403		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1404		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
1405		break;
1406	case RL_HWREV_8168E:
1407		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1408		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1409		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
1410		break;
1411	case RL_HWREV_8168E_VL:
1412		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1413		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1414		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
1415		break;
1416	case RL_HWREV_8169_8110SB:
1417	case RL_HWREV_8169_8110SBL:
1418	case RL_HWREV_8169_8110SC:
1419	case RL_HWREV_8169_8110SCE:
1420		sc->rl_flags |= RL_FLAG_PHYWAKE;
1421		/* FALLTHROUGH */
1422	case RL_HWREV_8169:
1423	case RL_HWREV_8169S:
1424	case RL_HWREV_8110S:
1425		sc->rl_flags |= RL_FLAG_MACRESET;
1426		break;
1427	default:
1428		break;
1429	}
1430
1431	/* Reset the adapter. */
1432	RL_LOCK(sc);
1433	re_reset(sc);
1434	RL_UNLOCK(sc);
1435
1436	/* Enable PME. */
1437	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1438	cfg = CSR_READ_1(sc, RL_CFG1);
1439	cfg |= RL_CFG1_PME;
1440	CSR_WRITE_1(sc, RL_CFG1, cfg);
1441	cfg = CSR_READ_1(sc, RL_CFG5);
1442	cfg &= RL_CFG5_PME_STS;
1443	CSR_WRITE_1(sc, RL_CFG5, cfg);
1444	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1445
1446	if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1447		/*
1448		 * XXX Should have a better way to extract station
1449		 * address from EEPROM.
1450		 */
1451		for (i = 0; i < ETHER_ADDR_LEN; i++)
1452			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1453	} else {
1454		sc->rl_eewidth = RL_9356_ADDR_LEN;
1455		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1456		if (re_did != 0x8129)
1457			sc->rl_eewidth = RL_9346_ADDR_LEN;
1458
1459		/*
1460		 * Get station address from the EEPROM.
1461		 */
1462		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1463		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1464			as[i] = le16toh(as[i]);
1465		bcopy(as, eaddr, sizeof(eaddr));
1466	}
1467
1468	if (sc->rl_type == RL_8169) {
1469		/* Set RX length mask and number of descriptors. */
1470		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1471		sc->rl_txstart = RL_GTXSTART;
1472		sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1473		sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1474	} else {
1475		/* Set RX length mask and number of descriptors. */
1476		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1477		sc->rl_txstart = RL_TXSTART;
1478		sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1479		sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1480	}
1481
1482	error = re_allocmem(dev, sc);
1483	if (error)
1484		goto fail;
1485	re_add_sysctls(sc);
1486
1487	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1488	if (ifp == NULL) {
1489		device_printf(dev, "can not if_alloc()\n");
1490		error = ENOSPC;
1491		goto fail;
1492	}
1493
1494	/* Take controller out of deep sleep mode. */
1495	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1496		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1497			CSR_WRITE_1(sc, RL_GPIO,
1498			    CSR_READ_1(sc, RL_GPIO) | 0x01);
1499		else
1500			CSR_WRITE_1(sc, RL_GPIO,
1501			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
1502	}
1503
1504	/* Take PHY out of power down mode. */
1505	if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
1506		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1507	if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1508		re_gmii_writereg(dev, 1, 0x1f, 0);
1509		re_gmii_writereg(dev, 1, 0x0e, 0);
1510	}
1511
1512#define	RE_PHYAD_INTERNAL	 0
1513
1514	/* Do MII setup. */
1515	phy = RE_PHYAD_INTERNAL;
1516	if (sc->rl_type == RL_8169)
1517		phy = 1;
1518	error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1519	    re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1520	if (error != 0) {
1521		device_printf(dev, "attaching PHYs failed\n");
1522		goto fail;
1523	}
1524
1525	ifp->if_softc = sc;
1526	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1527	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1528	ifp->if_ioctl = re_ioctl;
1529	ifp->if_start = re_start;
1530	/*
1531	 * RTL8168/8111C generates wrong IP checksummed frame if the
1532	 * packet has IP options so disable TX IP checksum offloading.
1533	 */
1534	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1535	    sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2)
1536		ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
1537	else
1538		ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1539	ifp->if_hwassist |= CSUM_TSO;
1540	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1541	ifp->if_capenable = ifp->if_capabilities;
1542	ifp->if_init = re_init;
1543	IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1544	ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1545	IFQ_SET_READY(&ifp->if_snd);
1546
1547	TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1548
1549	/*
1550	 * Call MI attach routine.
1551	 */
1552	ether_ifattach(ifp, eaddr);
1553
1554	/* VLAN capability setup */
1555	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1556	if (ifp->if_capabilities & IFCAP_HWCSUM)
1557		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1558	/* Enable WOL if PM is supported. */
1559	if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &reg) == 0)
1560		ifp->if_capabilities |= IFCAP_WOL;
1561	ifp->if_capenable = ifp->if_capabilities;
1562	/*
1563	 * Don't enable TSO by default.  It is known to generate
1564	 * corrupted TCP segments(bad TCP options) under certain
1565	 * circumtances.
1566	 */
1567	ifp->if_hwassist &= ~CSUM_TSO;
1568	ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1569#ifdef DEVICE_POLLING
1570	ifp->if_capabilities |= IFCAP_POLLING;
1571#endif
1572	/*
1573	 * Tell the upper layer(s) we support long frames.
1574	 * Must appear after the call to ether_ifattach() because
1575	 * ether_ifattach() sets ifi_hdrlen to the default value.
1576	 */
1577	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1578
1579#ifdef RE_DIAG
1580	/*
1581	 * Perform hardware diagnostic on the original RTL8169.
1582	 * Some 32-bit cards were incorrectly wired and would
1583	 * malfunction if plugged into a 64-bit slot.
1584	 */
1585
1586	if (hwrev == RL_HWREV_8169) {
1587		error = re_diag(sc);
1588		if (error) {
1589			device_printf(dev,
1590		    	"attach aborted due to hardware diag failure\n");
1591			ether_ifdetach(ifp);
1592			goto fail;
1593		}
1594	}
1595#endif
1596
1597#ifdef RE_TX_MODERATION
1598	intr_filter = 1;
1599#endif
1600	/* Hook interrupt last to avoid having to lock softc */
1601	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1602	    intr_filter == 0) {
1603		error = bus_setup_intr(dev, sc->rl_irq[0],
1604		    INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1605		    &sc->rl_intrhand[0]);
1606	} else {
1607		error = bus_setup_intr(dev, sc->rl_irq[0],
1608		    INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1609		    &sc->rl_intrhand[0]);
1610	}
1611	if (error) {
1612		device_printf(dev, "couldn't set up irq\n");
1613		ether_ifdetach(ifp);
1614	}
1615
1616fail:
1617
1618	if (error)
1619		re_detach(dev);
1620
1621	return (error);
1622}
1623
1624/*
1625 * Shutdown hardware and free up resources. This can be called any
1626 * time after the mutex has been initialized. It is called in both
1627 * the error case in attach and the normal detach case so it needs
1628 * to be careful about only freeing resources that have actually been
1629 * allocated.
1630 */
1631static int
1632re_detach(device_t dev)
1633{
1634	struct rl_softc		*sc;
1635	struct ifnet		*ifp;
1636	int			i, rid;
1637
1638	sc = device_get_softc(dev);
1639	ifp = sc->rl_ifp;
1640	KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1641
1642	/* These should only be active if attach succeeded */
1643	if (device_is_attached(dev)) {
1644#ifdef DEVICE_POLLING
1645		if (ifp->if_capenable & IFCAP_POLLING)
1646			ether_poll_deregister(ifp);
1647#endif
1648		RL_LOCK(sc);
1649#if 0
1650		sc->suspended = 1;
1651#endif
1652		re_stop(sc);
1653		RL_UNLOCK(sc);
1654		callout_drain(&sc->rl_stat_callout);
1655		taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1656		/*
1657		 * Force off the IFF_UP flag here, in case someone
1658		 * still had a BPF descriptor attached to this
1659		 * interface. If they do, ether_ifdetach() will cause
1660		 * the BPF code to try and clear the promisc mode
1661		 * flag, which will bubble down to re_ioctl(),
1662		 * which will try to call re_init() again. This will
1663		 * turn the NIC back on and restart the MII ticker,
1664		 * which will panic the system when the kernel tries
1665		 * to invoke the re_tick() function that isn't there
1666		 * anymore.
1667		 */
1668		ifp->if_flags &= ~IFF_UP;
1669		ether_ifdetach(ifp);
1670	}
1671	if (sc->rl_miibus)
1672		device_delete_child(dev, sc->rl_miibus);
1673	bus_generic_detach(dev);
1674
1675	/*
1676	 * The rest is resource deallocation, so we should already be
1677	 * stopped here.
1678	 */
1679
1680	if (sc->rl_intrhand[0] != NULL) {
1681		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1682		sc->rl_intrhand[0] = NULL;
1683	}
1684	if (ifp != NULL)
1685		if_free(ifp);
1686	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1687		rid = 0;
1688	else
1689		rid = 1;
1690	if (sc->rl_irq[0] != NULL) {
1691		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1692		sc->rl_irq[0] = NULL;
1693	}
1694	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1695		pci_release_msi(dev);
1696	if (sc->rl_res_pba) {
1697		rid = PCIR_BAR(4);
1698		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1699	}
1700	if (sc->rl_res)
1701		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1702		    sc->rl_res);
1703
1704	/* Unload and free the RX DMA ring memory and map */
1705
1706	if (sc->rl_ldata.rl_rx_list_tag) {
1707		if (sc->rl_ldata.rl_rx_list_map)
1708			bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1709			    sc->rl_ldata.rl_rx_list_map);
1710		if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list)
1711			bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1712			    sc->rl_ldata.rl_rx_list,
1713			    sc->rl_ldata.rl_rx_list_map);
1714		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1715	}
1716
1717	/* Unload and free the TX DMA ring memory and map */
1718
1719	if (sc->rl_ldata.rl_tx_list_tag) {
1720		if (sc->rl_ldata.rl_tx_list_map)
1721			bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1722			    sc->rl_ldata.rl_tx_list_map);
1723		if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list)
1724			bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1725			    sc->rl_ldata.rl_tx_list,
1726			    sc->rl_ldata.rl_tx_list_map);
1727		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1728	}
1729
1730	/* Destroy all the RX and TX buffer maps */
1731
1732	if (sc->rl_ldata.rl_tx_mtag) {
1733		for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1734			if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1735				bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1736				    sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1737		}
1738		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1739	}
1740	if (sc->rl_ldata.rl_rx_mtag) {
1741		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1742			if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1743				bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1744				    sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1745		}
1746		if (sc->rl_ldata.rl_rx_sparemap)
1747			bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1748			    sc->rl_ldata.rl_rx_sparemap);
1749		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1750	}
1751	if (sc->rl_ldata.rl_jrx_mtag) {
1752		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1753			if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1754				bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1755				    sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1756		}
1757		if (sc->rl_ldata.rl_jrx_sparemap)
1758			bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1759			    sc->rl_ldata.rl_jrx_sparemap);
1760		bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1761	}
1762	/* Unload and free the stats buffer and map */
1763
1764	if (sc->rl_ldata.rl_stag) {
1765		if (sc->rl_ldata.rl_smap)
1766			bus_dmamap_unload(sc->rl_ldata.rl_stag,
1767			    sc->rl_ldata.rl_smap);
1768		if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats)
1769			bus_dmamem_free(sc->rl_ldata.rl_stag,
1770			    sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1771		bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1772	}
1773
1774	if (sc->rl_parent_tag)
1775		bus_dma_tag_destroy(sc->rl_parent_tag);
1776
1777	mtx_destroy(&sc->rl_mtx);
1778
1779	return (0);
1780}
1781
1782static __inline void
1783re_discard_rxbuf(struct rl_softc *sc, int idx)
1784{
1785	struct rl_desc		*desc;
1786	struct rl_rxdesc	*rxd;
1787	uint32_t		cmdstat;
1788
1789	if (sc->rl_ifp->if_mtu > RL_MTU &&
1790	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1791		rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1792	else
1793		rxd = &sc->rl_ldata.rl_rx_desc[idx];
1794	desc = &sc->rl_ldata.rl_rx_list[idx];
1795	desc->rl_vlanctl = 0;
1796	cmdstat = rxd->rx_size;
1797	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1798		cmdstat |= RL_RDESC_CMD_EOR;
1799	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1800}
1801
1802static int
1803re_newbuf(struct rl_softc *sc, int idx)
1804{
1805	struct mbuf		*m;
1806	struct rl_rxdesc	*rxd;
1807	bus_dma_segment_t	segs[1];
1808	bus_dmamap_t		map;
1809	struct rl_desc		*desc;
1810	uint32_t		cmdstat;
1811	int			error, nsegs;
1812
1813	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1814	if (m == NULL)
1815		return (ENOBUFS);
1816
1817	m->m_len = m->m_pkthdr.len = MCLBYTES;
1818#ifdef RE_FIXUP_RX
1819	/*
1820	 * This is part of an evil trick to deal with non-x86 platforms.
1821	 * The RealTek chip requires RX buffers to be aligned on 64-bit
1822	 * boundaries, but that will hose non-x86 machines. To get around
1823	 * this, we leave some empty space at the start of each buffer
1824	 * and for non-x86 hosts, we copy the buffer back six bytes
1825	 * to achieve word alignment. This is slightly more efficient
1826	 * than allocating a new buffer, copying the contents, and
1827	 * discarding the old buffer.
1828	 */
1829	m_adj(m, RE_ETHER_ALIGN);
1830#endif
1831	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1832	    sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1833	if (error != 0) {
1834		m_freem(m);
1835		return (ENOBUFS);
1836	}
1837	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1838
1839	rxd = &sc->rl_ldata.rl_rx_desc[idx];
1840	if (rxd->rx_m != NULL) {
1841		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1842		    BUS_DMASYNC_POSTREAD);
1843		bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1844	}
1845
1846	rxd->rx_m = m;
1847	map = rxd->rx_dmamap;
1848	rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1849	rxd->rx_size = segs[0].ds_len;
1850	sc->rl_ldata.rl_rx_sparemap = map;
1851	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1852	    BUS_DMASYNC_PREREAD);
1853
1854	desc = &sc->rl_ldata.rl_rx_list[idx];
1855	desc->rl_vlanctl = 0;
1856	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1857	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1858	cmdstat = segs[0].ds_len;
1859	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1860		cmdstat |= RL_RDESC_CMD_EOR;
1861	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1862
1863	return (0);
1864}
1865
1866static int
1867re_jumbo_newbuf(struct rl_softc *sc, int idx)
1868{
1869	struct mbuf		*m;
1870	struct rl_rxdesc	*rxd;
1871	bus_dma_segment_t	segs[1];
1872	bus_dmamap_t		map;
1873	struct rl_desc		*desc;
1874	uint32_t		cmdstat;
1875	int			error, nsegs;
1876
1877	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1878	if (m == NULL)
1879		return (ENOBUFS);
1880	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1881#ifdef RE_FIXUP_RX
1882	m_adj(m, RE_ETHER_ALIGN);
1883#endif
1884	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
1885	    sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1886	if (error != 0) {
1887		m_freem(m);
1888		return (ENOBUFS);
1889	}
1890	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1891
1892	rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1893	if (rxd->rx_m != NULL) {
1894		bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1895		    BUS_DMASYNC_POSTREAD);
1896		bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
1897	}
1898
1899	rxd->rx_m = m;
1900	map = rxd->rx_dmamap;
1901	rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
1902	rxd->rx_size = segs[0].ds_len;
1903	sc->rl_ldata.rl_jrx_sparemap = map;
1904	bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1905	    BUS_DMASYNC_PREREAD);
1906
1907	desc = &sc->rl_ldata.rl_rx_list[idx];
1908	desc->rl_vlanctl = 0;
1909	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1910	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1911	cmdstat = segs[0].ds_len;
1912	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1913		cmdstat |= RL_RDESC_CMD_EOR;
1914	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1915
1916	return (0);
1917}
1918
1919#ifdef RE_FIXUP_RX
1920static __inline void
1921re_fixup_rx(struct mbuf *m)
1922{
1923	int                     i;
1924	uint16_t                *src, *dst;
1925
1926	src = mtod(m, uint16_t *);
1927	dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
1928
1929	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1930		*dst++ = *src++;
1931
1932	m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
1933}
1934#endif
1935
1936static int
1937re_tx_list_init(struct rl_softc *sc)
1938{
1939	struct rl_desc		*desc;
1940	int			i;
1941
1942	RL_LOCK_ASSERT(sc);
1943
1944	bzero(sc->rl_ldata.rl_tx_list,
1945	    sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
1946	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
1947		sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
1948	/* Set EOR. */
1949	desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
1950	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
1951
1952	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1953	    sc->rl_ldata.rl_tx_list_map,
1954	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1955
1956	sc->rl_ldata.rl_tx_prodidx = 0;
1957	sc->rl_ldata.rl_tx_considx = 0;
1958	sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
1959
1960	return (0);
1961}
1962
1963static int
1964re_rx_list_init(struct rl_softc *sc)
1965{
1966	int			error, i;
1967
1968	bzero(sc->rl_ldata.rl_rx_list,
1969	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
1970	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1971		sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
1972		if ((error = re_newbuf(sc, i)) != 0)
1973			return (error);
1974	}
1975
1976	/* Flush the RX descriptors */
1977
1978	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1979	    sc->rl_ldata.rl_rx_list_map,
1980	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1981
1982	sc->rl_ldata.rl_rx_prodidx = 0;
1983	sc->rl_head = sc->rl_tail = NULL;
1984	sc->rl_int_rx_act = 0;
1985
1986	return (0);
1987}
1988
1989static int
1990re_jrx_list_init(struct rl_softc *sc)
1991{
1992	int			error, i;
1993
1994	bzero(sc->rl_ldata.rl_rx_list,
1995	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
1996	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1997		sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
1998		if ((error = re_jumbo_newbuf(sc, i)) != 0)
1999			return (error);
2000	}
2001
2002	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2003	    sc->rl_ldata.rl_rx_list_map,
2004	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2005
2006	sc->rl_ldata.rl_rx_prodidx = 0;
2007	sc->rl_head = sc->rl_tail = NULL;
2008	sc->rl_int_rx_act = 0;
2009
2010	return (0);
2011}
2012
2013/*
2014 * RX handler for C+ and 8169. For the gigE chips, we support
2015 * the reception of jumbo frames that have been fragmented
2016 * across multiple 2K mbuf cluster buffers.
2017 */
2018static int
2019re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2020{
2021	struct mbuf		*m;
2022	struct ifnet		*ifp;
2023	int			i, rxerr, total_len;
2024	struct rl_desc		*cur_rx;
2025	u_int32_t		rxstat, rxvlan;
2026	int			jumbo, maxpkt = 16, rx_npkts = 0;
2027
2028	RL_LOCK_ASSERT(sc);
2029
2030	ifp = sc->rl_ifp;
2031	if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2032		jumbo = 1;
2033	else
2034		jumbo = 0;
2035
2036	/* Invalidate the descriptor memory */
2037
2038	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2039	    sc->rl_ldata.rl_rx_list_map,
2040	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2041
2042	for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2043	    i = RL_RX_DESC_NXT(sc, i)) {
2044		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2045			break;
2046		cur_rx = &sc->rl_ldata.rl_rx_list[i];
2047		rxstat = le32toh(cur_rx->rl_cmdstat);
2048		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2049			break;
2050		total_len = rxstat & sc->rl_rxlenmask;
2051		rxvlan = le32toh(cur_rx->rl_vlanctl);
2052		if (jumbo != 0)
2053			m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2054		else
2055			m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2056
2057		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2058		    (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2059		    (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2060			/*
2061			 * RTL8168C or later controllers do not
2062			 * support multi-fragment packet.
2063			 */
2064			re_discard_rxbuf(sc, i);
2065			continue;
2066		} else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2067			if (re_newbuf(sc, i) != 0) {
2068				/*
2069				 * If this is part of a multi-fragment packet,
2070				 * discard all the pieces.
2071				 */
2072				if (sc->rl_head != NULL) {
2073					m_freem(sc->rl_head);
2074					sc->rl_head = sc->rl_tail = NULL;
2075				}
2076				re_discard_rxbuf(sc, i);
2077				continue;
2078			}
2079			m->m_len = RE_RX_DESC_BUFLEN;
2080			if (sc->rl_head == NULL)
2081				sc->rl_head = sc->rl_tail = m;
2082			else {
2083				m->m_flags &= ~M_PKTHDR;
2084				sc->rl_tail->m_next = m;
2085				sc->rl_tail = m;
2086			}
2087			continue;
2088		}
2089
2090		/*
2091		 * NOTE: for the 8139C+, the frame length field
2092		 * is always 12 bits in size, but for the gigE chips,
2093		 * it is 13 bits (since the max RX frame length is 16K).
2094		 * Unfortunately, all 32 bits in the status word
2095		 * were already used, so to make room for the extra
2096		 * length bit, RealTek took out the 'frame alignment
2097		 * error' bit and shifted the other status bits
2098		 * over one slot. The OWN, EOR, FS and LS bits are
2099		 * still in the same places. We have already extracted
2100		 * the frame length and checked the OWN bit, so rather
2101		 * than using an alternate bit mapping, we shift the
2102		 * status bits one space to the right so we can evaluate
2103		 * them using the 8169 status as though it was in the
2104		 * same format as that of the 8139C+.
2105		 */
2106		if (sc->rl_type == RL_8169)
2107			rxstat >>= 1;
2108
2109		/*
2110		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2111		 * set, but if CRC is clear, it will still be a valid frame.
2112		 */
2113		if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2114			rxerr = 1;
2115			if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2116			    total_len > 8191 &&
2117			    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2118				rxerr = 0;
2119			if (rxerr != 0) {
2120				ifp->if_ierrors++;
2121				/*
2122				 * If this is part of a multi-fragment packet,
2123				 * discard all the pieces.
2124				 */
2125				if (sc->rl_head != NULL) {
2126					m_freem(sc->rl_head);
2127					sc->rl_head = sc->rl_tail = NULL;
2128				}
2129				re_discard_rxbuf(sc, i);
2130				continue;
2131			}
2132		}
2133
2134		/*
2135		 * If allocating a replacement mbuf fails,
2136		 * reload the current one.
2137		 */
2138		if (jumbo != 0)
2139			rxerr = re_jumbo_newbuf(sc, i);
2140		else
2141			rxerr = re_newbuf(sc, i);
2142		if (rxerr != 0) {
2143			ifp->if_iqdrops++;
2144			if (sc->rl_head != NULL) {
2145				m_freem(sc->rl_head);
2146				sc->rl_head = sc->rl_tail = NULL;
2147			}
2148			re_discard_rxbuf(sc, i);
2149			continue;
2150		}
2151
2152		if (sc->rl_head != NULL) {
2153			if (jumbo != 0)
2154				m->m_len = total_len;
2155			else {
2156				m->m_len = total_len % RE_RX_DESC_BUFLEN;
2157				if (m->m_len == 0)
2158					m->m_len = RE_RX_DESC_BUFLEN;
2159			}
2160			/*
2161			 * Special case: if there's 4 bytes or less
2162			 * in this buffer, the mbuf can be discarded:
2163			 * the last 4 bytes is the CRC, which we don't
2164			 * care about anyway.
2165			 */
2166			if (m->m_len <= ETHER_CRC_LEN) {
2167				sc->rl_tail->m_len -=
2168				    (ETHER_CRC_LEN - m->m_len);
2169				m_freem(m);
2170			} else {
2171				m->m_len -= ETHER_CRC_LEN;
2172				m->m_flags &= ~M_PKTHDR;
2173				sc->rl_tail->m_next = m;
2174			}
2175			m = sc->rl_head;
2176			sc->rl_head = sc->rl_tail = NULL;
2177			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2178		} else
2179			m->m_pkthdr.len = m->m_len =
2180			    (total_len - ETHER_CRC_LEN);
2181
2182#ifdef RE_FIXUP_RX
2183		re_fixup_rx(m);
2184#endif
2185		ifp->if_ipackets++;
2186		m->m_pkthdr.rcvif = ifp;
2187
2188		/* Do RX checksumming if enabled */
2189
2190		if (ifp->if_capenable & IFCAP_RXCSUM) {
2191			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2192				/* Check IP header checksum */
2193				if (rxstat & RL_RDESC_STAT_PROTOID)
2194					m->m_pkthdr.csum_flags |=
2195					    CSUM_IP_CHECKED;
2196				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2197					m->m_pkthdr.csum_flags |=
2198					    CSUM_IP_VALID;
2199
2200				/* Check TCP/UDP checksum */
2201				if ((RL_TCPPKT(rxstat) &&
2202				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2203				    (RL_UDPPKT(rxstat) &&
2204				     !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2205					m->m_pkthdr.csum_flags |=
2206						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2207					m->m_pkthdr.csum_data = 0xffff;
2208				}
2209			} else {
2210				/*
2211				 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2212				 */
2213				if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2214				    (rxvlan & RL_RDESC_IPV4))
2215					m->m_pkthdr.csum_flags |=
2216					    CSUM_IP_CHECKED;
2217				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2218				    (rxvlan & RL_RDESC_IPV4))
2219					m->m_pkthdr.csum_flags |=
2220					    CSUM_IP_VALID;
2221				if (((rxstat & RL_RDESC_STAT_TCP) &&
2222				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2223				    ((rxstat & RL_RDESC_STAT_UDP) &&
2224				    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2225					m->m_pkthdr.csum_flags |=
2226						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2227					m->m_pkthdr.csum_data = 0xffff;
2228				}
2229			}
2230		}
2231		maxpkt--;
2232		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2233			m->m_pkthdr.ether_vtag =
2234			    bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2235			m->m_flags |= M_VLANTAG;
2236		}
2237		RL_UNLOCK(sc);
2238		(*ifp->if_input)(ifp, m);
2239		RL_LOCK(sc);
2240		rx_npkts++;
2241	}
2242
2243	/* Flush the RX DMA ring */
2244
2245	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2246	    sc->rl_ldata.rl_rx_list_map,
2247	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2248
2249	sc->rl_ldata.rl_rx_prodidx = i;
2250
2251	if (rx_npktsp != NULL)
2252		*rx_npktsp = rx_npkts;
2253	if (maxpkt)
2254		return (EAGAIN);
2255
2256	return (0);
2257}
2258
2259static void
2260re_txeof(struct rl_softc *sc)
2261{
2262	struct ifnet		*ifp;
2263	struct rl_txdesc	*txd;
2264	u_int32_t		txstat;
2265	int			cons;
2266
2267	cons = sc->rl_ldata.rl_tx_considx;
2268	if (cons == sc->rl_ldata.rl_tx_prodidx)
2269		return;
2270
2271	ifp = sc->rl_ifp;
2272	/* Invalidate the TX descriptor list */
2273	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2274	    sc->rl_ldata.rl_tx_list_map,
2275	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2276
2277	for (; cons != sc->rl_ldata.rl_tx_prodidx;
2278	    cons = RL_TX_DESC_NXT(sc, cons)) {
2279		txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2280		if (txstat & RL_TDESC_STAT_OWN)
2281			break;
2282		/*
2283		 * We only stash mbufs in the last descriptor
2284		 * in a fragment chain, which also happens to
2285		 * be the only place where the TX status bits
2286		 * are valid.
2287		 */
2288		if (txstat & RL_TDESC_CMD_EOF) {
2289			txd = &sc->rl_ldata.rl_tx_desc[cons];
2290			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2291			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2292			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2293			    txd->tx_dmamap);
2294			KASSERT(txd->tx_m != NULL,
2295			    ("%s: freeing NULL mbufs!", __func__));
2296			m_freem(txd->tx_m);
2297			txd->tx_m = NULL;
2298			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2299			    RL_TDESC_STAT_COLCNT))
2300				ifp->if_collisions++;
2301			if (txstat & RL_TDESC_STAT_TXERRSUM)
2302				ifp->if_oerrors++;
2303			else
2304				ifp->if_opackets++;
2305		}
2306		sc->rl_ldata.rl_tx_free++;
2307		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2308	}
2309	sc->rl_ldata.rl_tx_considx = cons;
2310
2311	/* No changes made to the TX ring, so no flush needed */
2312
2313	if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2314#ifdef RE_TX_MODERATION
2315		/*
2316		 * If not all descriptors have been reaped yet, reload
2317		 * the timer so that we will eventually get another
2318		 * interrupt that will cause us to re-enter this routine.
2319		 * This is done in case the transmitter has gone idle.
2320		 */
2321		CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2322#endif
2323	} else
2324		sc->rl_watchdog_timer = 0;
2325}
2326
2327static void
2328re_tick(void *xsc)
2329{
2330	struct rl_softc		*sc;
2331	struct mii_data		*mii;
2332
2333	sc = xsc;
2334
2335	RL_LOCK_ASSERT(sc);
2336
2337	mii = device_get_softc(sc->rl_miibus);
2338	mii_tick(mii);
2339	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2340		re_miibus_statchg(sc->rl_dev);
2341	/*
2342	 * Reclaim transmitted frames here. Technically it is not
2343	 * necessary to do here but it ensures periodic reclamation
2344	 * regardless of Tx completion interrupt which seems to be
2345	 * lost on PCIe based controllers under certain situations.
2346	 */
2347	re_txeof(sc);
2348	re_watchdog(sc);
2349	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2350}
2351
2352#ifdef DEVICE_POLLING
2353static int
2354re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2355{
2356	struct rl_softc *sc = ifp->if_softc;
2357	int rx_npkts = 0;
2358
2359	RL_LOCK(sc);
2360	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2361		rx_npkts = re_poll_locked(ifp, cmd, count);
2362	RL_UNLOCK(sc);
2363	return (rx_npkts);
2364}
2365
2366static int
2367re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2368{
2369	struct rl_softc *sc = ifp->if_softc;
2370	int rx_npkts;
2371
2372	RL_LOCK_ASSERT(sc);
2373
2374	sc->rxcycles = count;
2375	re_rxeof(sc, &rx_npkts);
2376	re_txeof(sc);
2377
2378	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2379		re_start_locked(ifp);
2380
2381	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2382		u_int16_t       status;
2383
2384		status = CSR_READ_2(sc, RL_ISR);
2385		if (status == 0xffff)
2386			return (rx_npkts);
2387		if (status)
2388			CSR_WRITE_2(sc, RL_ISR, status);
2389		if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2390		    (sc->rl_flags & RL_FLAG_PCIE))
2391			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2392
2393		/*
2394		 * XXX check behaviour on receiver stalls.
2395		 */
2396
2397		if (status & RL_ISR_SYSTEM_ERR) {
2398			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2399			re_init_locked(sc);
2400		}
2401	}
2402	return (rx_npkts);
2403}
2404#endif /* DEVICE_POLLING */
2405
2406static int
2407re_intr(void *arg)
2408{
2409	struct rl_softc		*sc;
2410	uint16_t		status;
2411
2412	sc = arg;
2413
2414	status = CSR_READ_2(sc, RL_ISR);
2415	if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2416                return (FILTER_STRAY);
2417	CSR_WRITE_2(sc, RL_IMR, 0);
2418
2419	taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2420
2421	return (FILTER_HANDLED);
2422}
2423
2424static void
2425re_int_task(void *arg, int npending)
2426{
2427	struct rl_softc		*sc;
2428	struct ifnet		*ifp;
2429	u_int16_t		status;
2430	int			rval = 0;
2431
2432	sc = arg;
2433	ifp = sc->rl_ifp;
2434
2435	RL_LOCK(sc);
2436
2437	status = CSR_READ_2(sc, RL_ISR);
2438        CSR_WRITE_2(sc, RL_ISR, status);
2439
2440	if (sc->suspended ||
2441	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2442		RL_UNLOCK(sc);
2443		return;
2444	}
2445
2446#ifdef DEVICE_POLLING
2447	if  (ifp->if_capenable & IFCAP_POLLING) {
2448		RL_UNLOCK(sc);
2449		return;
2450	}
2451#endif
2452
2453	if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2454		rval = re_rxeof(sc, NULL);
2455
2456	/*
2457	 * Some chips will ignore a second TX request issued
2458	 * while an existing transmission is in progress. If
2459	 * the transmitter goes idle but there are still
2460	 * packets waiting to be sent, we need to restart the
2461	 * channel here to flush them out. This only seems to
2462	 * be required with the PCIe devices.
2463	 */
2464	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2465	    (sc->rl_flags & RL_FLAG_PCIE))
2466		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2467	if (status & (
2468#ifdef RE_TX_MODERATION
2469	    RL_ISR_TIMEOUT_EXPIRED|
2470#else
2471	    RL_ISR_TX_OK|
2472#endif
2473	    RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2474		re_txeof(sc);
2475
2476	if (status & RL_ISR_SYSTEM_ERR) {
2477		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2478		re_init_locked(sc);
2479	}
2480
2481	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2482		re_start_locked(ifp);
2483
2484	RL_UNLOCK(sc);
2485
2486        if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2487		taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2488		return;
2489	}
2490
2491	CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2492}
2493
2494static void
2495re_intr_msi(void *xsc)
2496{
2497	struct rl_softc		*sc;
2498	struct ifnet		*ifp;
2499	uint16_t		intrs, status;
2500
2501	sc = xsc;
2502	RL_LOCK(sc);
2503
2504	ifp = sc->rl_ifp;
2505#ifdef DEVICE_POLLING
2506	if (ifp->if_capenable & IFCAP_POLLING) {
2507		RL_UNLOCK(sc);
2508		return;
2509	}
2510#endif
2511	/* Disable interrupts. */
2512	CSR_WRITE_2(sc, RL_IMR, 0);
2513	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2514		RL_UNLOCK(sc);
2515		return;
2516	}
2517
2518	intrs = RL_INTRS_CPLUS;
2519	status = CSR_READ_2(sc, RL_ISR);
2520        CSR_WRITE_2(sc, RL_ISR, status);
2521	if (sc->rl_int_rx_act > 0) {
2522		intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2523		    RL_ISR_RX_OVERRUN);
2524		status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2525		    RL_ISR_RX_OVERRUN);
2526	}
2527
2528	if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2529	    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2530		re_rxeof(sc, NULL);
2531		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2532			if (sc->rl_int_rx_mod != 0 &&
2533			    (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2534			    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2535				/* Rearm one-shot timer. */
2536				CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2537				intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2538				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2539				sc->rl_int_rx_act = 1;
2540			} else {
2541				intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2542				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2543				sc->rl_int_rx_act = 0;
2544			}
2545		}
2546	}
2547
2548	/*
2549	 * Some chips will ignore a second TX request issued
2550	 * while an existing transmission is in progress. If
2551	 * the transmitter goes idle but there are still
2552	 * packets waiting to be sent, we need to restart the
2553	 * channel here to flush them out. This only seems to
2554	 * be required with the PCIe devices.
2555	 */
2556	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2557	    (sc->rl_flags & RL_FLAG_PCIE))
2558		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2559	if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2560		re_txeof(sc);
2561
2562	if (status & RL_ISR_SYSTEM_ERR) {
2563		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2564		re_init_locked(sc);
2565	}
2566
2567	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2568		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2569			re_start_locked(ifp);
2570		CSR_WRITE_2(sc, RL_IMR, intrs);
2571	}
2572	RL_UNLOCK(sc);
2573}
2574
2575static int
2576re_encap(struct rl_softc *sc, struct mbuf **m_head)
2577{
2578	struct rl_txdesc	*txd, *txd_last;
2579	bus_dma_segment_t	segs[RL_NTXSEGS];
2580	bus_dmamap_t		map;
2581	struct mbuf		*m_new;
2582	struct rl_desc		*desc;
2583	int			nsegs, prod;
2584	int			i, error, ei, si;
2585	int			padlen;
2586	uint32_t		cmdstat, csum_flags, vlanctl;
2587
2588	RL_LOCK_ASSERT(sc);
2589	M_ASSERTPKTHDR((*m_head));
2590
2591	/*
2592	 * With some of the RealTek chips, using the checksum offload
2593	 * support in conjunction with the autopadding feature results
2594	 * in the transmission of corrupt frames. For example, if we
2595	 * need to send a really small IP fragment that's less than 60
2596	 * bytes in size, and IP header checksumming is enabled, the
2597	 * resulting ethernet frame that appears on the wire will
2598	 * have garbled payload. To work around this, if TX IP checksum
2599	 * offload is enabled, we always manually pad short frames out
2600	 * to the minimum ethernet frame size.
2601	 */
2602	if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2603	    (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2604	    ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2605		padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2606		if (M_WRITABLE(*m_head) == 0) {
2607			/* Get a writable copy. */
2608			m_new = m_dup(*m_head, M_DONTWAIT);
2609			m_freem(*m_head);
2610			if (m_new == NULL) {
2611				*m_head = NULL;
2612				return (ENOBUFS);
2613			}
2614			*m_head = m_new;
2615		}
2616		if ((*m_head)->m_next != NULL ||
2617		    M_TRAILINGSPACE(*m_head) < padlen) {
2618			m_new = m_defrag(*m_head, M_DONTWAIT);
2619			if (m_new == NULL) {
2620				m_freem(*m_head);
2621				*m_head = NULL;
2622				return (ENOBUFS);
2623			}
2624		} else
2625			m_new = *m_head;
2626
2627		/*
2628		 * Manually pad short frames, and zero the pad space
2629		 * to avoid leaking data.
2630		 */
2631		bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2632		m_new->m_pkthdr.len += padlen;
2633		m_new->m_len = m_new->m_pkthdr.len;
2634		*m_head = m_new;
2635	}
2636
2637	prod = sc->rl_ldata.rl_tx_prodidx;
2638	txd = &sc->rl_ldata.rl_tx_desc[prod];
2639	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2640	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2641	if (error == EFBIG) {
2642		m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS);
2643		if (m_new == NULL) {
2644			m_freem(*m_head);
2645			*m_head = NULL;
2646			return (ENOBUFS);
2647		}
2648		*m_head = m_new;
2649		error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2650		    txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2651		if (error != 0) {
2652			m_freem(*m_head);
2653			*m_head = NULL;
2654			return (error);
2655		}
2656	} else if (error != 0)
2657		return (error);
2658	if (nsegs == 0) {
2659		m_freem(*m_head);
2660		*m_head = NULL;
2661		return (EIO);
2662	}
2663
2664	/* Check for number of available descriptors. */
2665	if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2666		bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2667		return (ENOBUFS);
2668	}
2669
2670	bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2671	    BUS_DMASYNC_PREWRITE);
2672
2673	/*
2674	 * Set up checksum offload. Note: checksum offload bits must
2675	 * appear in all descriptors of a multi-descriptor transmit
2676	 * attempt. This is according to testing done with an 8169
2677	 * chip. This is a requirement.
2678	 */
2679	vlanctl = 0;
2680	csum_flags = 0;
2681	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2682		if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2683			csum_flags |= RL_TDESC_CMD_LGSEND;
2684			vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2685			    RL_TDESC_CMD_MSSVALV2_SHIFT);
2686		} else {
2687			csum_flags |= RL_TDESC_CMD_LGSEND |
2688			    ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2689			    RL_TDESC_CMD_MSSVAL_SHIFT);
2690		}
2691	} else {
2692		/*
2693		 * Unconditionally enable IP checksum if TCP or UDP
2694		 * checksum is required. Otherwise, TCP/UDP checksum
2695		 * does't make effects.
2696		 */
2697		if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2698			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2699				csum_flags |= RL_TDESC_CMD_IPCSUM;
2700				if (((*m_head)->m_pkthdr.csum_flags &
2701				    CSUM_TCP) != 0)
2702					csum_flags |= RL_TDESC_CMD_TCPCSUM;
2703				if (((*m_head)->m_pkthdr.csum_flags &
2704				    CSUM_UDP) != 0)
2705					csum_flags |= RL_TDESC_CMD_UDPCSUM;
2706			} else {
2707				vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2708				if (((*m_head)->m_pkthdr.csum_flags &
2709				    CSUM_TCP) != 0)
2710					vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2711				if (((*m_head)->m_pkthdr.csum_flags &
2712				    CSUM_UDP) != 0)
2713					vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2714			}
2715		}
2716	}
2717
2718	/*
2719	 * Set up hardware VLAN tagging. Note: vlan tag info must
2720	 * appear in all descriptors of a multi-descriptor
2721	 * transmission attempt.
2722	 */
2723	if ((*m_head)->m_flags & M_VLANTAG)
2724		vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2725		    RL_TDESC_VLANCTL_TAG;
2726
2727	si = prod;
2728	for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2729		desc = &sc->rl_ldata.rl_tx_list[prod];
2730		desc->rl_vlanctl = htole32(vlanctl);
2731		desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2732		desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2733		cmdstat = segs[i].ds_len;
2734		if (i != 0)
2735			cmdstat |= RL_TDESC_CMD_OWN;
2736		if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2737			cmdstat |= RL_TDESC_CMD_EOR;
2738		desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2739		sc->rl_ldata.rl_tx_free--;
2740	}
2741	/* Update producer index. */
2742	sc->rl_ldata.rl_tx_prodidx = prod;
2743
2744	/* Set EOF on the last descriptor. */
2745	ei = RL_TX_DESC_PRV(sc, prod);
2746	desc = &sc->rl_ldata.rl_tx_list[ei];
2747	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2748
2749	desc = &sc->rl_ldata.rl_tx_list[si];
2750	/* Set SOF and transfer ownership of packet to the chip. */
2751	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2752
2753	/*
2754	 * Insure that the map for this transmission
2755	 * is placed at the array index of the last descriptor
2756	 * in this chain.  (Swap last and first dmamaps.)
2757	 */
2758	txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2759	map = txd->tx_dmamap;
2760	txd->tx_dmamap = txd_last->tx_dmamap;
2761	txd_last->tx_dmamap = map;
2762	txd_last->tx_m = *m_head;
2763
2764	return (0);
2765}
2766
2767static void
2768re_start(struct ifnet *ifp)
2769{
2770	struct rl_softc		*sc;
2771
2772	sc = ifp->if_softc;
2773	RL_LOCK(sc);
2774	re_start_locked(ifp);
2775	RL_UNLOCK(sc);
2776}
2777
2778/*
2779 * Main transmit routine for C+ and gigE NICs.
2780 */
2781static void
2782re_start_locked(struct ifnet *ifp)
2783{
2784	struct rl_softc		*sc;
2785	struct mbuf		*m_head;
2786	int			queued;
2787
2788	sc = ifp->if_softc;
2789
2790	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2791	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2792		return;
2793
2794	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2795	    sc->rl_ldata.rl_tx_free > 1;) {
2796		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2797		if (m_head == NULL)
2798			break;
2799
2800		if (re_encap(sc, &m_head) != 0) {
2801			if (m_head == NULL)
2802				break;
2803			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2804			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2805			break;
2806		}
2807
2808		/*
2809		 * If there's a BPF listener, bounce a copy of this frame
2810		 * to him.
2811		 */
2812		ETHER_BPF_MTAP(ifp, m_head);
2813
2814		queued++;
2815	}
2816
2817	if (queued == 0) {
2818#ifdef RE_TX_MODERATION
2819		if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2820			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2821#endif
2822		return;
2823	}
2824
2825	/* Flush the TX descriptors */
2826
2827	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2828	    sc->rl_ldata.rl_tx_list_map,
2829	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2830
2831	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2832
2833#ifdef RE_TX_MODERATION
2834	/*
2835	 * Use the countdown timer for interrupt moderation.
2836	 * 'TX done' interrupts are disabled. Instead, we reset the
2837	 * countdown timer, which will begin counting until it hits
2838	 * the value in the TIMERINT register, and then trigger an
2839	 * interrupt. Each time we write to the TIMERCNT register,
2840	 * the timer count is reset to 0.
2841	 */
2842	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2843#endif
2844
2845	/*
2846	 * Set a timeout in case the chip goes out to lunch.
2847	 */
2848	sc->rl_watchdog_timer = 5;
2849}
2850
2851static void
2852re_set_jumbo(struct rl_softc *sc, int jumbo)
2853{
2854
2855	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
2856		pci_set_max_read_req(sc->rl_dev, 4096);
2857		return;
2858	}
2859
2860	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2861	if (jumbo != 0) {
2862		CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) |
2863		    RL_CFG3_JUMBO_EN0);
2864		switch (sc->rl_hwrev->rl_rev) {
2865		case RL_HWREV_8168DP:
2866			break;
2867		case RL_HWREV_8168E:
2868			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2869			    0x01);
2870			break;
2871		default:
2872			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2873			    RL_CFG4_JUMBO_EN1);
2874		}
2875	} else {
2876		CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) &
2877		    ~RL_CFG3_JUMBO_EN0);
2878		switch (sc->rl_hwrev->rl_rev) {
2879		case RL_HWREV_8168DP:
2880			break;
2881		case RL_HWREV_8168E:
2882			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) &
2883			    ~0x01);
2884			break;
2885		default:
2886			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) &
2887			    ~RL_CFG4_JUMBO_EN1);
2888		}
2889	}
2890	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2891
2892	switch (sc->rl_hwrev->rl_rev) {
2893	case RL_HWREV_8168DP:
2894		pci_set_max_read_req(sc->rl_dev, 4096);
2895		break;
2896	default:
2897		if (jumbo != 0)
2898			pci_set_max_read_req(sc->rl_dev, 512);
2899		else
2900			pci_set_max_read_req(sc->rl_dev, 4096);
2901	}
2902}
2903
2904static void
2905re_init(void *xsc)
2906{
2907	struct rl_softc		*sc = xsc;
2908
2909	RL_LOCK(sc);
2910	re_init_locked(sc);
2911	RL_UNLOCK(sc);
2912}
2913
2914static void
2915re_init_locked(struct rl_softc *sc)
2916{
2917	struct ifnet		*ifp = sc->rl_ifp;
2918	struct mii_data		*mii;
2919	uint32_t		reg;
2920	uint16_t		cfg;
2921	union {
2922		uint32_t align_dummy;
2923		u_char eaddr[ETHER_ADDR_LEN];
2924        } eaddr;
2925
2926	RL_LOCK_ASSERT(sc);
2927
2928	mii = device_get_softc(sc->rl_miibus);
2929
2930	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2931		return;
2932
2933	/*
2934	 * Cancel pending I/O and free all RX/TX buffers.
2935	 */
2936	re_stop(sc);
2937
2938	/* Put controller into known state. */
2939	re_reset(sc);
2940
2941	/*
2942	 * For C+ mode, initialize the RX descriptors and mbufs.
2943	 */
2944	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
2945		if (ifp->if_mtu > RL_MTU) {
2946			if (re_jrx_list_init(sc) != 0) {
2947				device_printf(sc->rl_dev,
2948				    "no memory for jumbo RX buffers\n");
2949				re_stop(sc);
2950				return;
2951			}
2952			/* Disable checksum offloading for jumbo frames. */
2953			ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
2954			ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
2955		} else {
2956			if (re_rx_list_init(sc) != 0) {
2957				device_printf(sc->rl_dev,
2958				    "no memory for RX buffers\n");
2959				re_stop(sc);
2960				return;
2961			}
2962		}
2963		re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
2964	} else {
2965		if (re_rx_list_init(sc) != 0) {
2966			device_printf(sc->rl_dev, "no memory for RX buffers\n");
2967			re_stop(sc);
2968			return;
2969		}
2970		if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
2971		    pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
2972			if (ifp->if_mtu > RL_MTU)
2973				pci_set_max_read_req(sc->rl_dev, 512);
2974			else
2975				pci_set_max_read_req(sc->rl_dev, 4096);
2976		}
2977	}
2978	re_tx_list_init(sc);
2979
2980	/*
2981	 * Enable C+ RX and TX mode, as well as VLAN stripping and
2982	 * RX checksum offload. We must configure the C+ register
2983	 * before all others.
2984	 */
2985	cfg = RL_CPLUSCMD_PCI_MRW;
2986	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2987		cfg |= RL_CPLUSCMD_RXCSUM_ENB;
2988	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2989		cfg |= RL_CPLUSCMD_VLANSTRIP;
2990	if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
2991		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
2992		/* XXX magic. */
2993		cfg |= 0x0001;
2994	} else
2995		cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
2996	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
2997	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
2998	    sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
2999		reg = 0x000fff00;
3000		if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_PCI66MHZ) != 0)
3001			reg |= 0x000000ff;
3002		if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3003			reg |= 0x00f00000;
3004		CSR_WRITE_4(sc, 0x7c, reg);
3005		/* Disable interrupt mitigation. */
3006		CSR_WRITE_2(sc, 0xe2, 0);
3007	}
3008	/*
3009	 * Disable TSO if interface MTU size is greater than MSS
3010	 * allowed in controller.
3011	 */
3012	if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3013		ifp->if_capenable &= ~IFCAP_TSO4;
3014		ifp->if_hwassist &= ~CSUM_TSO;
3015	}
3016
3017	/*
3018	 * Init our MAC address.  Even though the chipset
3019	 * documentation doesn't mention it, we need to enter "Config
3020	 * register write enable" mode to modify the ID registers.
3021	 */
3022	/* Copy MAC address on stack to align. */
3023	bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3024	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3025	CSR_WRITE_4(sc, RL_IDR0,
3026	    htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3027	CSR_WRITE_4(sc, RL_IDR4,
3028	    htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3029	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3030
3031	/*
3032	 * Load the addresses of the RX and TX lists into the chip.
3033	 */
3034
3035	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3036	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3037	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3038	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3039
3040	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3041	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3042	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3043	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3044
3045	/*
3046	 * Enable transmit and receive.
3047	 */
3048	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3049
3050	/*
3051	 * Set the initial TX configuration.
3052	 */
3053	if (sc->rl_testmode) {
3054		if (sc->rl_type == RL_8169)
3055			CSR_WRITE_4(sc, RL_TXCFG,
3056			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3057		else
3058			CSR_WRITE_4(sc, RL_TXCFG,
3059			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3060	} else
3061		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3062
3063	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3064
3065	/*
3066	 * Set the initial RX configuration.
3067	 */
3068	re_set_rxmode(sc);
3069
3070	/* Configure interrupt moderation. */
3071	if (sc->rl_type == RL_8169) {
3072		/* Magic from vendor. */
3073		CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3074	}
3075
3076#ifdef DEVICE_POLLING
3077	/*
3078	 * Disable interrupts if we are polling.
3079	 */
3080	if (ifp->if_capenable & IFCAP_POLLING)
3081		CSR_WRITE_2(sc, RL_IMR, 0);
3082	else	/* otherwise ... */
3083#endif
3084
3085	/*
3086	 * Enable interrupts.
3087	 */
3088	if (sc->rl_testmode)
3089		CSR_WRITE_2(sc, RL_IMR, 0);
3090	else
3091		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3092	CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3093
3094	/* Set initial TX threshold */
3095	sc->rl_txthresh = RL_TX_THRESH_INIT;
3096
3097	/* Start RX/TX process. */
3098	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3099#ifdef notdef
3100	/* Enable receiver and transmitter. */
3101	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3102#endif
3103
3104	/*
3105	 * Initialize the timer interrupt register so that
3106	 * a timer interrupt will be generated once the timer
3107	 * reaches a certain number of ticks. The timer is
3108	 * reloaded on each transmit.
3109	 */
3110#ifdef RE_TX_MODERATION
3111	/*
3112	 * Use timer interrupt register to moderate TX interrupt
3113	 * moderation, which dramatically improves TX frame rate.
3114	 */
3115	if (sc->rl_type == RL_8169)
3116		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3117	else
3118		CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3119#else
3120	/*
3121	 * Use timer interrupt register to moderate RX interrupt
3122	 * moderation.
3123	 */
3124	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3125	    intr_filter == 0) {
3126		if (sc->rl_type == RL_8169)
3127			CSR_WRITE_4(sc, RL_TIMERINT_8169,
3128			    RL_USECS(sc->rl_int_rx_mod));
3129	} else {
3130		if (sc->rl_type == RL_8169)
3131			CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3132	}
3133#endif
3134
3135	/*
3136	 * For 8169 gigE NICs, set the max allowed RX packet
3137	 * size so we can receive jumbo frames.
3138	 */
3139	if (sc->rl_type == RL_8169) {
3140		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3141			/*
3142			 * For controllers that use new jumbo frame scheme,
3143			 * set maximum size of jumbo frame depedning on
3144			 * controller revisions.
3145			 */
3146			if (ifp->if_mtu > RL_MTU)
3147				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3148				    sc->rl_hwrev->rl_max_mtu +
3149				    ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3150				    ETHER_CRC_LEN);
3151			else
3152				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3153				    RE_RX_DESC_BUFLEN);
3154		} else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3155		    sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3156			/* RTL810x has no jumbo frame support. */
3157			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3158		} else
3159			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3160	}
3161
3162	if (sc->rl_testmode)
3163		return;
3164
3165	mii_mediachg(mii);
3166
3167	CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD);
3168
3169	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3170	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3171
3172	sc->rl_flags &= ~RL_FLAG_LINK;
3173	sc->rl_watchdog_timer = 0;
3174	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3175}
3176
3177/*
3178 * Set media options.
3179 */
3180static int
3181re_ifmedia_upd(struct ifnet *ifp)
3182{
3183	struct rl_softc		*sc;
3184	struct mii_data		*mii;
3185	int			error;
3186
3187	sc = ifp->if_softc;
3188	mii = device_get_softc(sc->rl_miibus);
3189	RL_LOCK(sc);
3190	error = mii_mediachg(mii);
3191	RL_UNLOCK(sc);
3192
3193	return (error);
3194}
3195
3196/*
3197 * Report current media status.
3198 */
3199static void
3200re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3201{
3202	struct rl_softc		*sc;
3203	struct mii_data		*mii;
3204
3205	sc = ifp->if_softc;
3206	mii = device_get_softc(sc->rl_miibus);
3207
3208	RL_LOCK(sc);
3209	mii_pollstat(mii);
3210	RL_UNLOCK(sc);
3211	ifmr->ifm_active = mii->mii_media_active;
3212	ifmr->ifm_status = mii->mii_media_status;
3213}
3214
3215static int
3216re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3217{
3218	struct rl_softc		*sc = ifp->if_softc;
3219	struct ifreq		*ifr = (struct ifreq *) data;
3220	struct mii_data		*mii;
3221	uint32_t		rev;
3222	int			error = 0;
3223
3224	switch (command) {
3225	case SIOCSIFMTU:
3226		if (ifr->ifr_mtu < ETHERMIN ||
3227		    ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) {
3228			error = EINVAL;
3229			break;
3230		}
3231		RL_LOCK(sc);
3232		if (ifp->if_mtu != ifr->ifr_mtu) {
3233			ifp->if_mtu = ifr->ifr_mtu;
3234			if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3235			    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3236				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3237				re_init_locked(sc);
3238			}
3239			if (ifp->if_mtu > RL_TSO_MTU &&
3240			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3241				ifp->if_capenable &= ~(IFCAP_TSO4 |
3242				    IFCAP_VLAN_HWTSO);
3243				ifp->if_hwassist &= ~CSUM_TSO;
3244			}
3245			VLAN_CAPABILITIES(ifp);
3246		}
3247		RL_UNLOCK(sc);
3248		break;
3249	case SIOCSIFFLAGS:
3250		RL_LOCK(sc);
3251		if ((ifp->if_flags & IFF_UP) != 0) {
3252			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3253				if (((ifp->if_flags ^ sc->rl_if_flags)
3254				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3255					re_set_rxmode(sc);
3256			} else
3257				re_init_locked(sc);
3258		} else {
3259			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3260				re_stop(sc);
3261		}
3262		sc->rl_if_flags = ifp->if_flags;
3263		RL_UNLOCK(sc);
3264		break;
3265	case SIOCADDMULTI:
3266	case SIOCDELMULTI:
3267		RL_LOCK(sc);
3268		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3269			re_set_rxmode(sc);
3270		RL_UNLOCK(sc);
3271		break;
3272	case SIOCGIFMEDIA:
3273	case SIOCSIFMEDIA:
3274		mii = device_get_softc(sc->rl_miibus);
3275		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3276		break;
3277	case SIOCSIFCAP:
3278	    {
3279		int mask, reinit;
3280
3281		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3282		reinit = 0;
3283#ifdef DEVICE_POLLING
3284		if (mask & IFCAP_POLLING) {
3285			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3286				error = ether_poll_register(re_poll, ifp);
3287				if (error)
3288					return (error);
3289				RL_LOCK(sc);
3290				/* Disable interrupts */
3291				CSR_WRITE_2(sc, RL_IMR, 0x0000);
3292				ifp->if_capenable |= IFCAP_POLLING;
3293				RL_UNLOCK(sc);
3294			} else {
3295				error = ether_poll_deregister(ifp);
3296				/* Enable interrupts. */
3297				RL_LOCK(sc);
3298				CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3299				ifp->if_capenable &= ~IFCAP_POLLING;
3300				RL_UNLOCK(sc);
3301			}
3302		}
3303#endif /* DEVICE_POLLING */
3304		if ((mask & IFCAP_TXCSUM) != 0 &&
3305		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3306			ifp->if_capenable ^= IFCAP_TXCSUM;
3307			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
3308				rev = sc->rl_hwrev->rl_rev;
3309				if (rev == RL_HWREV_8168C ||
3310				    rev == RL_HWREV_8168C_SPIN2)
3311					ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
3312				else
3313					ifp->if_hwassist |= RE_CSUM_FEATURES;
3314			} else
3315				ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3316			reinit = 1;
3317		}
3318		if ((mask & IFCAP_RXCSUM) != 0 &&
3319		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3320			ifp->if_capenable ^= IFCAP_RXCSUM;
3321			reinit = 1;
3322		}
3323		if ((mask & IFCAP_TSO4) != 0 &&
3324		    (ifp->if_capabilities & IFCAP_TSO) != 0) {
3325			ifp->if_capenable ^= IFCAP_TSO4;
3326			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3327				ifp->if_hwassist |= CSUM_TSO;
3328			else
3329				ifp->if_hwassist &= ~CSUM_TSO;
3330			if (ifp->if_mtu > RL_TSO_MTU &&
3331			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3332				ifp->if_capenable &= ~IFCAP_TSO4;
3333				ifp->if_hwassist &= ~CSUM_TSO;
3334			}
3335		}
3336		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3337		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3338			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3339		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3340		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3341			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3342			/* TSO over VLAN requires VLAN hardware tagging. */
3343			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3344				ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3345			reinit = 1;
3346		}
3347		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3348		    (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3349		    IFCAP_VLAN_HWTSO)) != 0)
3350				reinit = 1;
3351		if ((mask & IFCAP_WOL) != 0 &&
3352		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
3353			if ((mask & IFCAP_WOL_UCAST) != 0)
3354				ifp->if_capenable ^= IFCAP_WOL_UCAST;
3355			if ((mask & IFCAP_WOL_MCAST) != 0)
3356				ifp->if_capenable ^= IFCAP_WOL_MCAST;
3357			if ((mask & IFCAP_WOL_MAGIC) != 0)
3358				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3359		}
3360		if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3361			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3362			re_init(sc);
3363		}
3364		VLAN_CAPABILITIES(ifp);
3365	    }
3366		break;
3367	default:
3368		error = ether_ioctl(ifp, command, data);
3369		break;
3370	}
3371
3372	return (error);
3373}
3374
3375static void
3376re_watchdog(struct rl_softc *sc)
3377{
3378	struct ifnet		*ifp;
3379
3380	RL_LOCK_ASSERT(sc);
3381
3382	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3383		return;
3384
3385	ifp = sc->rl_ifp;
3386	re_txeof(sc);
3387	if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3388		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3389		    "-- recovering\n");
3390		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3391			re_start_locked(ifp);
3392		return;
3393	}
3394
3395	if_printf(ifp, "watchdog timeout\n");
3396	ifp->if_oerrors++;
3397
3398	re_rxeof(sc, NULL);
3399	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3400	re_init_locked(sc);
3401	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3402		re_start_locked(ifp);
3403}
3404
3405/*
3406 * Stop the adapter and free any mbufs allocated to the
3407 * RX and TX lists.
3408 */
3409static void
3410re_stop(struct rl_softc *sc)
3411{
3412	int			i;
3413	struct ifnet		*ifp;
3414	struct rl_txdesc	*txd;
3415	struct rl_rxdesc	*rxd;
3416
3417	RL_LOCK_ASSERT(sc);
3418
3419	ifp = sc->rl_ifp;
3420
3421	sc->rl_watchdog_timer = 0;
3422	callout_stop(&sc->rl_stat_callout);
3423	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3424
3425	if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0)
3426		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3427		    RL_CMD_RX_ENB);
3428	else
3429		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3430	DELAY(1000);
3431	CSR_WRITE_2(sc, RL_IMR, 0x0000);
3432	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3433
3434	if (sc->rl_head != NULL) {
3435		m_freem(sc->rl_head);
3436		sc->rl_head = sc->rl_tail = NULL;
3437	}
3438
3439	/* Free the TX list buffers. */
3440
3441	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3442		txd = &sc->rl_ldata.rl_tx_desc[i];
3443		if (txd->tx_m != NULL) {
3444			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3445			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3446			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3447			    txd->tx_dmamap);
3448			m_freem(txd->tx_m);
3449			txd->tx_m = NULL;
3450		}
3451	}
3452
3453	/* Free the RX list buffers. */
3454
3455	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3456		rxd = &sc->rl_ldata.rl_rx_desc[i];
3457		if (rxd->rx_m != NULL) {
3458			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3459			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3460			bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3461			    rxd->rx_dmamap);
3462			m_freem(rxd->rx_m);
3463			rxd->rx_m = NULL;
3464		}
3465	}
3466}
3467
3468/*
3469 * Device suspend routine.  Stop the interface and save some PCI
3470 * settings in case the BIOS doesn't restore them properly on
3471 * resume.
3472 */
3473static int
3474re_suspend(device_t dev)
3475{
3476	struct rl_softc		*sc;
3477
3478	sc = device_get_softc(dev);
3479
3480	RL_LOCK(sc);
3481	re_stop(sc);
3482	re_setwol(sc);
3483	sc->suspended = 1;
3484	RL_UNLOCK(sc);
3485
3486	return (0);
3487}
3488
3489/*
3490 * Device resume routine.  Restore some PCI settings in case the BIOS
3491 * doesn't, re-enable busmastering, and restart the interface if
3492 * appropriate.
3493 */
3494static int
3495re_resume(device_t dev)
3496{
3497	struct rl_softc		*sc;
3498	struct ifnet		*ifp;
3499
3500	sc = device_get_softc(dev);
3501
3502	RL_LOCK(sc);
3503
3504	ifp = sc->rl_ifp;
3505	/* Take controller out of sleep mode. */
3506	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3507		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3508			CSR_WRITE_1(sc, RL_GPIO,
3509			    CSR_READ_1(sc, RL_GPIO) | 0x01);
3510	}
3511
3512	/*
3513	 * Clear WOL matching such that normal Rx filtering
3514	 * wouldn't interfere with WOL patterns.
3515	 */
3516	re_clrwol(sc);
3517
3518	/* reinitialize interface if necessary */
3519	if (ifp->if_flags & IFF_UP)
3520		re_init_locked(sc);
3521
3522	sc->suspended = 0;
3523	RL_UNLOCK(sc);
3524
3525	return (0);
3526}
3527
3528/*
3529 * Stop all chip I/O so that the kernel's probe routines don't
3530 * get confused by errant DMAs when rebooting.
3531 */
3532static int
3533re_shutdown(device_t dev)
3534{
3535	struct rl_softc		*sc;
3536
3537	sc = device_get_softc(dev);
3538
3539	RL_LOCK(sc);
3540	re_stop(sc);
3541	/*
3542	 * Mark interface as down since otherwise we will panic if
3543	 * interrupt comes in later on, which can happen in some
3544	 * cases.
3545	 */
3546	sc->rl_ifp->if_flags &= ~IFF_UP;
3547	re_setwol(sc);
3548	RL_UNLOCK(sc);
3549
3550	return (0);
3551}
3552
3553static void
3554re_setwol(struct rl_softc *sc)
3555{
3556	struct ifnet		*ifp;
3557	int			pmc;
3558	uint16_t		pmstat;
3559	uint8_t			v;
3560
3561	RL_LOCK_ASSERT(sc);
3562
3563	if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3564		return;
3565
3566	ifp = sc->rl_ifp;
3567	/* Put controller into sleep mode. */
3568	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3569		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3570			CSR_WRITE_1(sc, RL_GPIO,
3571			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
3572	}
3573	if ((ifp->if_capenable & IFCAP_WOL) != 0 &&
3574	    (sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3575		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3576	/* Enable config register write. */
3577	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3578
3579	/* Enable PME. */
3580	v = CSR_READ_1(sc, RL_CFG1);
3581	v &= ~RL_CFG1_PME;
3582	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3583		v |= RL_CFG1_PME;
3584	CSR_WRITE_1(sc, RL_CFG1, v);
3585
3586	v = CSR_READ_1(sc, RL_CFG3);
3587	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3588	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3589		v |= RL_CFG3_WOL_MAGIC;
3590	CSR_WRITE_1(sc, RL_CFG3, v);
3591
3592	/* Config register write done. */
3593	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3594
3595	v = CSR_READ_1(sc, RL_CFG5);
3596	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3597	v &= ~RL_CFG5_WOL_LANWAKE;
3598	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3599		v |= RL_CFG5_WOL_UCAST;
3600	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3601		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3602	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3603		v |= RL_CFG5_WOL_LANWAKE;
3604	CSR_WRITE_1(sc, RL_CFG5, v);
3605
3606	if ((ifp->if_capenable & IFCAP_WOL) != 0 &&
3607	    (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3608		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3609	/*
3610	 * It seems that hardware resets its link speed to 100Mbps in
3611	 * power down mode so switching to 100Mbps in driver is not
3612	 * needed.
3613	 */
3614
3615	/* Request PME if WOL is requested. */
3616	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3617	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3618	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3619		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3620	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3621}
3622
3623static void
3624re_clrwol(struct rl_softc *sc)
3625{
3626	int			pmc;
3627	uint8_t			v;
3628
3629	RL_LOCK_ASSERT(sc);
3630
3631	if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3632		return;
3633
3634	/* Enable config register write. */
3635	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3636
3637	v = CSR_READ_1(sc, RL_CFG3);
3638	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3639	CSR_WRITE_1(sc, RL_CFG3, v);
3640
3641	/* Config register write done. */
3642	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3643
3644	v = CSR_READ_1(sc, RL_CFG5);
3645	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3646	v &= ~RL_CFG5_WOL_LANWAKE;
3647	CSR_WRITE_1(sc, RL_CFG5, v);
3648}
3649
3650static void
3651re_add_sysctls(struct rl_softc *sc)
3652{
3653	struct sysctl_ctx_list	*ctx;
3654	struct sysctl_oid_list	*children;
3655	int			error;
3656
3657	ctx = device_get_sysctl_ctx(sc->rl_dev);
3658	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3659
3660	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3661	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I",
3662	    "Statistics Information");
3663	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3664		return;
3665
3666	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3667	    CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0,
3668	    sysctl_hw_re_int_mod, "I", "re RX interrupt moderation");
3669	/* Pull in device tunables. */
3670	sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3671	error = resource_int_value(device_get_name(sc->rl_dev),
3672	    device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3673	if (error == 0) {
3674		if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3675		    sc->rl_int_rx_mod > RL_TIMER_MAX) {
3676			device_printf(sc->rl_dev, "int_rx_mod value out of "
3677			    "range; using default: %d\n",
3678			    RL_TIMER_DEFAULT);
3679			sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3680		}
3681	}
3682
3683}
3684
3685static int
3686re_sysctl_stats(SYSCTL_HANDLER_ARGS)
3687{
3688	struct rl_softc		*sc;
3689	struct rl_stats		*stats;
3690	int			error, i, result;
3691
3692	result = -1;
3693	error = sysctl_handle_int(oidp, &result, 0, req);
3694	if (error || req->newptr == NULL)
3695		return (error);
3696
3697	if (result == 1) {
3698		sc = (struct rl_softc *)arg1;
3699		RL_LOCK(sc);
3700		if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3701			RL_UNLOCK(sc);
3702			goto done;
3703		}
3704		bus_dmamap_sync(sc->rl_ldata.rl_stag,
3705		    sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
3706		CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
3707		    RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
3708		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3709		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
3710		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3711		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
3712		    RL_DUMPSTATS_START));
3713		for (i = RL_TIMEOUT; i > 0; i--) {
3714			if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
3715			    RL_DUMPSTATS_START) == 0)
3716				break;
3717			DELAY(1000);
3718		}
3719		bus_dmamap_sync(sc->rl_ldata.rl_stag,
3720		    sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
3721		RL_UNLOCK(sc);
3722		if (i == 0) {
3723			device_printf(sc->rl_dev,
3724			    "DUMP statistics request timedout\n");
3725			return (ETIMEDOUT);
3726		}
3727done:
3728		stats = sc->rl_ldata.rl_stats;
3729		printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
3730		printf("Tx frames : %ju\n",
3731		    (uintmax_t)le64toh(stats->rl_tx_pkts));
3732		printf("Rx frames : %ju\n",
3733		    (uintmax_t)le64toh(stats->rl_rx_pkts));
3734		printf("Tx errors : %ju\n",
3735		    (uintmax_t)le64toh(stats->rl_tx_errs));
3736		printf("Rx errors : %u\n",
3737		    le32toh(stats->rl_rx_errs));
3738		printf("Rx missed frames : %u\n",
3739		    (uint32_t)le16toh(stats->rl_missed_pkts));
3740		printf("Rx frame alignment errs : %u\n",
3741		    (uint32_t)le16toh(stats->rl_rx_framealign_errs));
3742		printf("Tx single collisions : %u\n",
3743		    le32toh(stats->rl_tx_onecoll));
3744		printf("Tx multiple collisions : %u\n",
3745		    le32toh(stats->rl_tx_multicolls));
3746		printf("Rx unicast frames : %ju\n",
3747		    (uintmax_t)le64toh(stats->rl_rx_ucasts));
3748		printf("Rx broadcast frames : %ju\n",
3749		    (uintmax_t)le64toh(stats->rl_rx_bcasts));
3750		printf("Rx multicast frames : %u\n",
3751		    le32toh(stats->rl_rx_mcasts));
3752		printf("Tx aborts : %u\n",
3753		    (uint32_t)le16toh(stats->rl_tx_aborts));
3754		printf("Tx underruns : %u\n",
3755		    (uint32_t)le16toh(stats->rl_rx_underruns));
3756	}
3757
3758	return (error);
3759}
3760
3761static int
3762sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3763{
3764	int error, value;
3765
3766	if (arg1 == NULL)
3767		return (EINVAL);
3768	value = *(int *)arg1;
3769	error = sysctl_handle_int(oidp, &value, 0, req);
3770	if (error || req->newptr == NULL)
3771		return (error);
3772	if (value < low || value > high)
3773		return (EINVAL);
3774	*(int *)arg1 = value;
3775
3776	return (0);
3777}
3778
3779static int
3780sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
3781{
3782
3783	return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
3784	    RL_TIMER_MAX));
3785}
3786